text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# Iris clustering using k-means Let's play initialy only with the sepal features - Load the database ``` from sklearn import datasets from sklearn.cluster import KMeans import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline iris = datasets.load_iris() X = iris.data[:, 2:4] # use only 'petal length and width' y_iris = iris.target ``` - Lets plot the distribution ``` plt.scatter(X[:, 0], X[:, 1], c=y_iris, cmap="viridis") from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X) X = scaler.transform(X) plt.scatter(X[:, 0], X[:, 1], c=y_iris, cmap="viridis") ``` # k-means - Unsupervised Learning **We are using a lebeled dataset just to confirm the clusters make sense.** Note that categorical data 'y' will not be used. **It means that, to the algorithm and in a real situation, data will be like below:** ``` plt.scatter(X[:, 0], X[:, 1], c='black', cmap="viridis") ``` - Let's create the KMeans cluster with 2, 3 and 4 centroids, fit the data and then plot the results ``` km2 = KMeans(n_clusters=2).fit(X) km3 = KMeans(n_clusters=3).fit(X) km4 = KMeans(n_clusters=4).fit(X) plt.figure(figsize=(15, 5)) plt.subplot(131) plt.scatter(X[:, 0], X[:, 1], c=km2.labels_, cmap="viridis") plt.title("K=2, Inertia=%.2f" % km2.inertia_) c2 = km2.cluster_centers_ plt.scatter(c2[:, 0], c2[:, 1], c='red', s=200, alpha=0.5) plt.subplot(132) plt.scatter(X[:, 0], X[:, 1], c=km3.labels_, cmap="viridis") plt.title("K=3, Inertia=%.2f" % km3.inertia_) c3 = km3.cluster_centers_ plt.scatter(c3[:, 0], c3[:, 1], c='red', s=200, alpha=0.5) plt.subplot(133) plt.scatter(X[:, 0], X[:, 1], c=km4.labels_, cmap="viridis") plt.title("K=4, Inertia=%.2f" % km4.inertia_) c4 = km4.cluster_centers_ plt.scatter(c4[:, 0], c4[:, 1], c='red', s=200, alpha=0.5) ``` - Now, try to find the best K using Elbown Curve ``` import numpy as np X = iris.data[:, 2:4] # use only 'petal length and sepal width' y_iris = iris.target k = range(1, 10) kmeans = [KMeans(n_clusters=i) for i in k] score = [kmeans[i].fit(X).inertia_ for i in range(len(kmeans))] plt.plot(k,score) plt.xlabel('Number of Clusters') plt.ylabel('Score') plt.title('Elbow Curve') plt.show() ``` It seems that the best bet is between 2 and 3. No great improvement after 4. **Note that k-means does not know the target classes, yet it achieves a high similarity between the real classes and the clusters found when k=3:** ``` plt.figure(figsize=(15, 5)) plt.subplot(131) plt.title("Data without labels") plt.scatter(X[:, 0], X[:, 1]) plt.subplot(132) plt.title("Real species") plt.scatter(X[:, 0], X[:, 1], c=y_iris) plt.subplot(133) plt.title("k-means Clusters") plt.scatter(X[:, 0], X[:, 1], c=km3.labels_) ```
github_jupyter
# Pulse Nulling: Example Notebook This notebook will serve as an example of how to use the pulse nulling feature of the `Pulse Signal Simulator`. ``` # Start by importing the packages we will need for the simulation. import psrsigsim as pss # Additional necessary packages import numpy as np import matplotlib.pyplot as plt # helpful magic lines %matplotlib inline ``` We define a plotting convenience function for later. ``` # Define a function for easier plotting later on/throughout the testing def plotsignal(signals, nbins=2048): # signals can be a list of multiple signals to overplot for ii in range(len(signals)): # Define the x axis phases = np.linspace(0.0, len(signals[ii]), len(signals[ii]))/nbins # now plot it plt.plot(phases, signals[ii], label="signal %s" % (ii)) plt.xlim([0.0, np.max(phases)]) plt.xlabel("Pulse Phase") plt.ylabel("Arb. Flux") plt.show() plt.close() ``` Now we will define some example simulation parameters. The warning generated below may be ignored. ``` # define the required filterbank signal parameters f0 = 1380 # center observing frequecy in MHz bw = 800.0 # observation MHz Nf = 2 # number of frequency channels F0 = np.double(1.0) # pulsar frequency in Hz f_samp = F0*2048*10**-6 # sample rate of data in MHz, here 2048 bins across the pulse subintlen = 1.0 # desired length of fold-mode subintegration in seconds # Now we define our signal null_signal = pss.signal.FilterBankSignal(fcent = f0, bandwidth = bw, Nsubband=Nf,\ sample_rate=f_samp, fold=True, sublen=subintlen) ``` Now we define an example Gaussian pulse shape. Details on defining a pulse shape from a data array may be found in the example notebook in the docs. ``` prof = pss.pulsar.GaussProfile(peak=0.5, width=0.05, amp=1.0) ``` Now we define an example pulsar. ``` # Define the necessary parameters period = np.double(1.0)/F0 # seconds flux = 0.1 # Jy psr_name = "J0000+0000" # Define the pulsar object pulsar = pss.pulsar.Pulsar(period=period, Smean=flux, profiles=prof, name=psr_name) ``` Now we actually make the pulsar signal. Note that if the observation length is very long all the data will be saved in memory which may crash the computer or slow it down significantly. ``` # Define the observation time, in seconds ObsTime = 3.0 # seconds # make the pulses pulsar.make_pulses(null_signal, tobs = ObsTime) ``` Now let's take a look at what the signals look like. ``` # We plot just the first frequency channel, but all pulses simulated plotsignal([null_signal.data[0,:]]) ``` Now we can disperse the simuated data if desired. Note that this is not required, and if you only want to simulate a single frequency channel or simulate coherently dedispersed data, the data does not have to be dispersed. ``` # First define the dispersion measure dm = 10.0 # pc cm^-3 # Now define the ISM class ism_ob = pss.ism.ISM() # Now we give the ISM class the signal and disperse the data ism_ob.disperse(null_signal, dm) # If we plot the same pulses as above, you can see that the phase of the pulse has # been shfited due to the dispersion plotsignal([null_signal.data[0,:]]) ``` This is where the pulses should be nulled if desired. This can be run easily by giving the pulsar object only the signal class and the null fraction as a value between 0 and 1. The simulator will null as close to the null fraction as desired, and will round to the closest integer number of pulses to null based on the input nulling fraction, e.g. if 5 pulses are simulated and the nulling fraction is 0.5, it will round to null 3 pulses. Additionally, currently only the ability to null the pulses randomly is implemented. Here we will put in a nulling fraction of 33%. ``` pulsar.null(null_signal, 0.34) # and plot the signal to show the null plotsignal([null_signal.data[0,:]]) ``` We can also add radiometer noise from some observing telescope. This should only be run AFTER the pulsar nulling, but is not required. For our example, we will use the L-band feed for the Arecibo telescope. Note that here since we have set the pulsar flux very high we can easily see the single pulses above the noise. ``` # We define the telescope object tscope = pss.telescope.telescope.Arecibo() # Now add radiometer noise; ignore the output here, the noise is added directly to the signal output = tscope.observe(null_signal, pulsar, system="Lband_PUPPI", noise=True) # and plot the signal to show the added noise plotsignal([null_signal.data[0,:]]) ``` Now we can save the data in a `PSRCHIVE pdv` format. This is done with the `txtfile` class. The save function will dump a new file for every 100 pulses that it writes to the text file. We start by initializing the `txtfile` object. The only input needed here is the `path` variable, which will tell the simulator where to save the data. All files saved will have "_\#.txt" added to the end of the `path` variable. ``` txtfile = pss.io.TxtFile(path="PsrSigSim_Simulated_Pulsar.ar") # Now we call the saving function. Note that depending on the length of the simulated data this may take awhile # the two inputs are the signal and the pulsar objects used to simulate the data. txtfile.save_psrchive_pdv(null_signal, pulsar) ``` And that's all that there should be to it. Let us know if you have any questions moving forward, or if something is not working as it should be. ### Note about randomly generated pulses and noise `PsrSigSim` uses `numpy.random` under the hood in order to generate the radio pulses and various types of noise. If a user desires or requires that this randomly generated data is reproducible we recommend using a call the seed generator native to `Numpy` before calling the function that produces the random noise/pulses. Newer versions of `Numpy` are moving toward slightly different [functionality/syntax](https://numpy.org/doc/stable/reference/random/index.html), but is essentially used in the same way. ``` numpy.random.seed(1776) pulsar_1.make_pulses(signal_1, tobs=obslen) ```
github_jupyter
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> $$ \newcommand{\set}[1]{\left\{#1\right\}} \newcommand{\abs}[1]{\left\lvert#1\right\rvert} \newcommand{\norm}[1]{\left\lVert#1\right\rVert} \newcommand{\inner}[2]{\left\langle#1,#2\right\rangle} \newcommand{\bra}[1]{\left\langle#1\right|} \newcommand{\ket}[1]{\left|#1\right\rangle} \newcommand{\braket}[2]{\left\langle#1|#2\right\rangle} \newcommand{\ketbra}[2]{\left|#1\right\rangle\left\langle#2\right|} \newcommand{\angleset}[1]{\left\langle#1\right\rangle} $$ # Motivation _prepared by Israel Gelover_ Currently we have a paradigm of **Classical Computing** whose implementation is based on electronics, basically transistors on integrated circuits. The objective of large companies that design processors is precisely to integrate more transistors on smaller circuits. This trend is known as **Moore's Law**, which originally predicted that the number of transistors that can be integrated in a circuit would double every 2 years. Later there was an adjustment to this forecast and the time was reduced to 18 months. The Quantum Computing paradigm has several advantages over classical computing. For example, the possibility of modeling quantum systems naturally, just as Richard Feynmann proposed; task optimization, that is, solving problems that would take too long for a classical computer; etc. However, in this section we will focus on motivation: **Why do we need quantum computing?** On one hand, due to the limitations of the physical implementation that we have for clasical computing, bit operations using voltage, i.e. the limitations of the implementation based on electronics. And on the other hand, the problems inherent to the classical paradigm. ## Problems due to Implementation ### <a name="remark_1_1">Remark 1.1</a> Computational power is measured in FLOPS (Floating point operations per second). Let's analyze how computational power has evolved throughout history. | Year | Technology | Size | # Transistors | FLOPS | |:-----|:--------------------|:--------|:--------------|:-------| | 1995 | Vacuum tubes | ~10 cm | ~10 | ~10^3 | | 2005 | Integrated circuits | ~100 nm | ~10^8 | ~10^13 | | 2018 | Integrated circuits | ~12 nm | ~10^10 | ~10^15 | - Moore's Law (1965): The number of transistors that can be integrated in a circuit doubles every 2 years. - Moore's Law (1975): The number of transistors that can be integrated in a circuit doubles every 18 months. - Around 2000, it was noted that this growth started to slow down, so in 2007 an expiration date was proposed for Moore's law, this was predicted between years 2017-2022. - Moore's Law is still in effect but... - In short time, atomic sizes will be reached and quantum effects will be dominant. - Thermal noise increases as the size decreases and the density of the components increases. - Heat generation is increasing and its dissipation is increasingly difficult. - Electronics inherently generates heat and the process of erasing information generates heat as well. The entropy of a system is closely related to the information that can be stored in that system and deleting information increases the entropy of the system, that is, it generates heat. ## Problems of the Classical Paradigm ### <a name="remark_1_2">Remark 1.2</a> Problems of the Classical Paradigm. - Limitations of AI (Machine Learning & Deep Learning): They are not multitasking, they are designed to solve limited tasks. They do not explain, explainability is currently an open problem. They need to be "massively parallel", but this conflicts with the next bullet point. - It is sequential (it is not parallel), although there are processors with several cores and different tasks can be parallelized, the classical paradigm itself is sequential. There are limitations of this parallelization, for example, two nested functions cannot be parallelized, and this is not a hardware problem, but rather an implementation problem due to the classical paradigm itself. In summary: **We need to find an alternative.** In order to aleviate the problems due to implementation that we just listed, there are several proposals for alternative paradigms to perform computation. To name a few: bacterial computation, probabilistic computation, analog computation, spintronics (spin transport electronics) or spin electronics, etc. However, Quantum Computing is one that currently is most spread amomg the scientific community and even with a lot of financial help from large companies such as IBM. ## Advantages of Quantum Computing - Quantum Computing is inherently parallel computation. We will delve into this point later, but this intrinsic parallelism is due to the superposition of states. At the end of a quantum algorithm, a measurement will only recover one state, however the operations carried out by the algorithm are carried out with each of the elements of the superposition. - No heat dissipation. The entropy of the system is not increased since information is not erased. Information is maintained in quantum mechanics, it can be transformed by unitary operators but not erased. - Natural simulation of quantum systems. As Feynmann proposed it, this is due to the very nature of the quantum paradigm, since quantum systems are used to model another quantum system, basically what we have is a scale model of the initial problem. ## NISQ Era (Noisy Intermediate-Scale Quantum) All the quantum computers that have been built so far serve very specific purposes, there is no such thing as a multipurpose quantum computer. We are speaking of devices that use properties of quantum mechanics to perform operations very quickly. A clear example of this is the quantum computer created in China that recently claimed quantum supremacy, this machine is designed to solve a specific problem and serves exclusively to solve that problem. What is currently being sought is to create a multipurpose quantum computer, however there are a number of technological challenges. - Control of the evolution of the components. The basic unit of information in quantum computing is a two-level quantum state that we call _qubit_. There are different ways of modeling these qubits, with a photon, with an electron, with an anion, etc; and regardless of the physical system used to model them, it is necessary to control their temporal evolution. This control in the evolution of the components essentially limits the number of qubits a quamtum computer can have. Currently, around 100 qubits can be controlled for a certain period of time (which is sufficient to perform operations moderately efficiently). - Decoherence. It is the information decay due to interaction with the environment. For example, a qubit can be encoded with the spin of a photon and sent through a fiber optic cable, but regardless of the quality of the fiber optic, as the photon advances, the probability of measuring the information that was encoded in it decreases until it becomes random. Therefore, it is necessary to process this information, that is, to execute the algorithm that has been programmed, in a sufficiently short time to avoid this decay. These two previous points mark the era of quantum computing that we are in, which is known as the NISQ (Noisy Intermediate-Scale Quantum) Era. Intermediate-Scale refers to the size of quantum computers that will be available in the medium term of the next few years, with a number of qubits between 50 and a few hundred. Noisy emphasizes the fact that we will have imperfect control over qubits, this represents serious limitations on what these devices will be able to achieve in the near future. All algorithms developed in this era must take into account these limitations. ## Additional benefits What else do we get from having this smallest unit of quantum information (the two-level quantum state that we call a qubit)? - Teleportation. The quantum information contained in a qubit is encoded in classical bits that are sent through a traditional or classical channel, to later recreate the information quantum, that is, recreate the qubit. - Superdense coding. This is essentially the reverse process of teleportation, that is, two classical bits are encoded into a single qubit, to be recovered later. - Quantum cryptography. The RSA algorithm is based on factoring very large integers into their prime factors, which is very expensive in terms classical computing. Shor's algorithm offers a much faster way to do this factoring, which has serious implications for classical cryptography.
github_jupyter
# Construction $$ \newcommand{\sumN}{\sum_{n = 1}^N} \newcommand{\sumn}{\sum_n} \newcommand{\bx}{\mathbf{x}} \newcommand{\bbeta}{\boldsymbol{\beta}} \newcommand{\btheta}{\boldsymbol{\theta}} \newcommand{\bbetahat}{\boldsymbol{\hat{\beta}}} \newcommand{\bthetahat}{\boldsymbol{\hat{\theta}}} \newcommand{\dadb}[2]{\frac{\partial #1}{\partial #2}} \newcommand{\by}{\mathbf{y}} \newcommand{\bX}{\mathbf{X}} $$ This section demonstrates how to construct a linear regression model using only `numpy`. To do this, we generate a class named `LinearRegression`. We use this class to train the model and make future predictions. The first method in the `LinearRegression` class is `fit()`, which takes care of estimating the $\bbeta$ parameters. This simply consists of calculating $$ \bbetahat = \left(\bX^\top \bX\right)^{-1}\bX^\top \by $$ The `fit` method also makes in-sample predictions with $\hat{\by} = \bX \bbetahat$ and calculates the training loss with $$ \mathcal{L}(\bbetahat) = \frac{1}{2}\sumN \left(y_n - \hat{y}_n \right)^2. $$ The second method is `predict()`, which forms out-of-sample predictions. Given a test set of predictors $\bX'$, we can form fitted values with $\hat{\by}' = \bX' \bbetahat$. ``` import numpy as np import matplotlib.pyplot as plt import seaborn as sns class LinearRegression: def fit(self, X, y, intercept = False): # record data and dimensions if intercept == False: # add intercept (if not already included) ones = np.ones(len(X)).reshape(len(X), 1) # column of ones X = np.concatenate((ones, X), axis = 1) self.X = np.array(X) self.y = np.array(y) self.N, self.D = self.X.shape # estimate parameters XtX = np.dot(self.X.T, self.X) XtX_inverse = np.linalg.inv(XtX) Xty = np.dot(self.X.T, self.y) self.beta_hats = np.dot(XtX_inverse, Xty) # make in-sample predictions self.y_hat = np.dot(self.X, self.beta_hats) # calculate loss self.L = .5*np.sum((self.y - self.y_hat)**2) def predict(self, X_test, intercept = True): # form predictions self.y_test_hat = np.dot(X_test, self.beta_hats) ``` Let's try out our `LinearRegression` class with some data. Here we use the {doc}`Boston housing </content/appendix/data>` dataset from `sklearn.datasets`. The target variable in this dataset is median neighborhood home value. The predictors are all continuous and represent factors possibly related to the median home value, such as average rooms per house. Hit "Click to show" to see the code that loads this data. ``` from sklearn import datasets boston = datasets.load_boston() X = boston['data'] y = boston['target'] ``` With the class built and the data loaded, we are ready to run our regression model. This is as simple as instantiating the model and applying `fit()`, as shown below. ``` model = LinearRegression() # instantiate model model.fit(X, y, intercept = False) # fit model ``` Let's then see how well our fitted values model the true target values. The closer the points lie to the 45-degree line, the more accurate the fit. The model seems to do reasonably well; our predictions definitely follow the true values quite well, although we would like the fit to be a bit tighter. ```{note} Note the handful of observations with $y = 50$ exactly. This is due to censorship in the data collection process. It appears neighborhoods with average home values above \$50,000 were assigned a value of 50 even. ``` ``` fig, ax = plt.subplots() sns.scatterplot(model.y, model.y_hat) ax.set_xlabel(r'$y$', size = 16) ax.set_ylabel(r'$\hat{y}$', rotation = 0, size = 16, labelpad = 15) ax.set_title(r'$y$ vs. $\hat{y}$', size = 20, pad = 10) sns.despine() ```
github_jupyter
# Convergent Cross Mapping This notebook and package is reproducing the results from [ Detecting Causality in Complex Ecosystems](http://science.sciencemag.org/content/338/6106/496) ``` import sys import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style('ticks') sns.set_context(context='paper',font_scale=1.5) %matplotlib inline %load_ext autoreload %autoreload 2 #alter the line below to correspond to your file system #nonlinpy_dir = '/Users/nickc/Documents/skccm' #sys.path.append('../') import skccm as ccm import skccm.data as data ``` ## Work Flow A coupled logistic map. Here is what we are going to do: 1. Generate the time series 2. Calculate the mutual information of the time series 3. Embed the time series (not going to explore embedding dimension, just lag) 4. Analyze forecast skill for a range of library lengths ### 1. Generate the time series ``` rx1 = 3.72 #determines chaotic behavior of the x1 series rx2 = 3.72 #determines chaotic behavior of the x2 series b12 = 0.2 #Influence of x1 on x2 b21 = 0.01 #Influence of x2 on x1 ts_length = 1000 x1,x2 = data.coupled_logistic(rx1,rx2,b12,b21,ts_length) fig,ax = plt.subplots(nrows=2,sharex=True,sharey=True) ax[0].plot(x1[0:100]) ax[1].plot(x2[0:100]) ax[0].set_yticks([.1,.3,.5,.7,.9]) ax[1].set_xlabel('Time') sns.despine() #fig.savefig('../figures/train_test_split/coupled_logistic.png',bbox_inches='tight') ``` ### 2. Calculate the mutual information ``` e1 = ccm.Embed(x1) e2 = ccm.Embed(x2) mi1 = e1.mutual_information(10) mi2 = e2.mutual_information(10) fig,ax = plt.subplots(nrows=2,sharex=True,sharey=True) ax[0].plot(np.arange(1,11),mi1) ax[1].plot(np.arange(1,11),mi2) ax[1].set_xlabel('Lag') sns.despine() #fig.savefig('../figures/mutual_info.png',bbox_inches='tight') ``` ### 3. Embed the time series ``` lag = 1 embed = 2 X1 = e1.embed_vectors_1d(lag,embed) X2 = e2.embed_vectors_1d(lag,embed) fig,ax = plt.subplots(ncols=2,sharey=True,sharex=True,figsize=(10,4)) ax[0].scatter(X1[:,0],X1[:,1]) ax[1].scatter(X2[:,0],X2[:,1]) ax[0].set_xlabel('X1(t)') ax[0].set_ylabel('X1(t-1)') ax[1].set_xlabel('X2(t)') ax[1].set_ylabel('X2(t-1)') sns.despine() #fig.savefig('../figures/x_embedded.png',bbox_inches='tight') ``` ### 4. Forecast skill as a function of library length Same legend as above. ``` CCM = ccm.CCM() ``` ### Split it into a testing set and training set ``` from skccm.utilities import train_test_split x1tr, x1te, x2tr, x2te = train_test_split(X1,X2, percent=.75) len_tr = len(x1tr) lib_lens = np.linspace(10, len_tr/2, dtype='int') lib_lens CCM.fit(x1tr,x2tr) x1p, x2p = CCM.predict(x1te, x2te,lib_lengths=lib_lens) sc1,sc2 = CCM.score() fig,ax = plt.subplots() ax.plot(lib_lens,sc1,label='X1 xmap X2') ax.plot(lib_lens,sc2, label='X2 xmap X1') ax.set_xlabel('Library Length') ax.set_ylabel('Forecast Skill') sns.despine() #fig.savefig('../figures/train_test_split/xmap_lib_len.png',bbox_inches='tight') ``` ## Reproducing the plot from the paper The paper explores different values for b12 and b21 in Figure 3B. ``` rx1 = 3.7 #determines chaotic behavior of the x1 series rx2 = 3.7 #determines chaotic behavior of the x2 series ts_length = 1000 #variables for embedding lag = 1 embed = 2 CCM = ccm.CCM() #intitiate the ccm object #store values num_b = 20 #number of bs to test sc1_store = np.empty((num_b,num_b)) sc2_store = np.empty((num_b,num_b)) #values over which to test max_b = .4 #maximum b values b_range = np.linspace(0,max_b,num=num_b) #loop through b values for b12 and b21 for ii,b12 in enumerate(b_range): for jj, b21 in enumerate(b_range): x1,x2 = data.coupled_logistic(rx1,rx2,b12,b21,ts_length) e1 = ccm.Embed(x1) e2 = ccm.Embed(x2) X1 = e1.embed_vectors_1d(lag,embed) X2 = e2.embed_vectors_1d(lag,embed) x1tr, x1te, x2tr, x2te = train_test_split(X1,X2, percent=.75) CCM.fit(x1tr,x2tr) x1p, x2p = CCM.predict(x1te, x2te,lib_lengths=[500]) #only one prediction sc1,sc2 = CCM.score() sc1_store[ii,jj] = sc1[0] sc2_store[ii,jj] = sc2[0] sc_diff = sc2_store-sc1_store fig,ax = plt.subplots() v = np.linspace(-1.6,1.6,21) cax = ax.contourf(b_range,b_range,sc_diff,v,cmap='magma') fig.colorbar(cax,ticks=v[::2]) ax.set_xlabel('B12') ax.set_ylabel('B21') #fig.savefig('../figures/train_test_split/xmap_changingB.png',bbox_inches='tight') ``` ## Randomly forced ``` rx2 = 3.72 #determines chaotic behavior of the x2 series b12 = .2 #Influence of x1 on x2 ts_length = 1000 x1,x2 = data.driven_rand_logistic(rx2,b12,ts_length) fig,ax = plt.subplots(nrows=2,sharex=True,sharey=True) ax[0].plot(x1[0:100]) ax[1].plot(x2[0:100]) ax[0].set_yticks([.1,.3,.5,.7,.9]) ax[1].set_xlabel('Time') sns.despine() e1 = ccm.Embed(x1) e2 = ccm.Embed(x2) mi1 = e1.mutual_information(10) mi2 = e2.mutual_information(10) fig,ax = plt.subplots(nrows=2,sharex=True,sharey=True) ax[0].plot(np.arange(1,11),mi1) ax[1].plot(np.arange(1,11),mi2) ax[1].set_xlabel('Lag') sns.despine() lag = 1 embed = 3 X1 = e1.embed_vectors_1d(lag,embed) X2 = e2.embed_vectors_1d(lag,embed) fig,ax = plt.subplots(ncols=2,sharey=True,sharex=True,figsize=(10,4)) ax[0].scatter(X1[:,0],X1[:,1]) ax[1].scatter(X2[:,0],X2[:,1]) ax[0].set_xlabel('X1(t)') ax[0].set_ylabel('X1(t-1)') ax[1].set_xlabel('X2(t)') ax[1].set_ylabel('X2(t-1)') sns.despine() CCM = ccm.CCM() lib_lens = np.linspace(10,ts_length/2,dtype='int') x1tr, x1te, x2tr, x2te = train_test_split(X1,X2, percent=.75) CCM.fit(x1tr,x2tr) x1p, x2p = CCM.predict(x1te, x2te,lib_lengths=lib_lens) #only one prediction sc1,sc2 = CCM.score() plt.plot(sc1) plt.plot(sc2) ``` ## Periodic Forcing ``` rx2 = 3.72 #determines chaotic behavior of the x2 series b12 = .5 #Influence of x1 on x2 ts_length = 1000 x1,x2 = data.driving_sin(rx2,b12,ts_length) fig,ax = plt.subplots(nrows=2,sharex=True) ax[0].plot(x1[0:100]) ax[1].plot(x2[0:100]) ax[1].set_xlabel('Time') sns.despine() em_x1 = ccm.Embed(x1) em_x2 = ccm.Embed(x2) mi1 = em_x1.mutual_information(10) mi2 = em_x2.mutual_information(10) fig,ax = plt.subplots(nrows=2,sharex=True,sharey=True) ax[0].plot(np.arange(1,11),mi1) ax[1].plot(np.arange(1,11),mi2) ax[1].set_xlabel('Lag') sns.despine() lag1 = 4 lag2 = 1 embed = 3 X1 = em_x1.embed_vectors_1d(lag1,embed) X2 = em_x2.embed_vectors_1d(lag2,embed) fig,ax = plt.subplots(ncols=2,sharey=True,sharex=True,figsize=(10,4)) ax[0].scatter(X1[:,0],X1[:,1]) ax[1].scatter(X2[:,0],X2[:,1]) ax[0].set_xlabel('X1(t)') ax[0].set_ylabel('X1(t-1)') ax[1].set_xlabel('X2(t)') ax[1].set_ylabel('X2(t-1)') sns.despine() CCM = ccm.CCM() print('X1 Shape:', X1.shape) print('X2 Shape:', X2.shape) ``` Need to make them the same length before they can be used. This could be corrected in future versions. ``` X2 = X2[0:len(X1)] lib_lens = np.linspace(10,ts_length/2,dtype='int') x1tr, x1te, x2tr, x2te = train_test_split(X1,X2, percent=.75) CCM.fit(x1tr,x2tr) x1p, x2p = CCM.predict(x1te, x2te,lib_lengths=lib_lens) #only one prediction sc1,sc2 = CCM.score() plt.plot(sc1) plt.plot(sc2) x1p[-1].shape x1te.shape plt.plot(x1p[-1][:,0]) plt.plot(x1te[:,0]) plt.plot(x2te[:,0],alpha=.5) plt.plot(x2p[-1][:,0]) ``` ## Lagged Coupled Logistic ``` rx1 = 3.72 #determines chaotic behavior of the x1 series rx2 = 3.72 #determines chaotic behavior of the x2 series b12 = 0.01 #Influence of x1 on x2 b21 = 0.3 #Influence of x2 on x1 ts_length = 8000 x1,x2 = data.lagged_coupled_logistic(rx1,rx2,b12,b21,ts_length) fig,ax = plt.subplots(nrows=2,sharex=True) ax[0].plot(x1[0:100]) ax[1].plot(x2[0:100]) ax[1].set_xlabel('Time') sns.despine() em_x1 = ccm.Embed(x1) em_x2 = ccm.Embed(x2) mi1 = em_x1.mutual_information(10) mi2 = em_x2.mutual_information(10) fig,ax = plt.subplots(nrows=2,sharex=True,sharey=True) ax[0].plot(np.arange(1,11),mi1) ax[1].plot(np.arange(1,11),mi2) ax[1].set_xlabel('Lag') sns.despine() lag1 = 2 lag2 = 2 embed = 3 X1 = em_x1.embed_vectors_1d(lag1,embed) X2 = em_x2.embed_vectors_1d(lag2,embed) fig,ax = plt.subplots(ncols=2,sharey=True,sharex=True,figsize=(10,4)) ax[0].scatter(X1[:,0],X1[:,1]) ax[1].scatter(X2[:,0],X2[:,1]) ax[0].set_xlabel('X1(t)') ax[0].set_ylabel('X1(t-1)') ax[1].set_xlabel('X2(t)') ax[1].set_ylabel('X2(t-1)') sns.despine() CCM = ccm.CCM() lib_lens = np.linspace(10,ts_length/2,dtype='int') x1tr, x1te, x2tr, x2te = train_test_split(X1,X2, percent=.75) CCM.fit(x1tr,x2tr) x1p, x2p = CCM.predict(x1te, x2te,lib_lengths=lib_lens) #only one prediction sc1,sc2 = CCM.score() plt.plot(lib_lens,sc1) plt.plot(lib_lens,sc2) ``` ## Random Linearly Increasing ``` ts_length=1000 x1 = np.random.randn(ts_length,) + np.linspace(0,25,ts_length) x2 = np.random.randn(ts_length,) + np.linspace(0,10,ts_length) plt.plot(x1) plt.plot(x2) em_x1 = ccm.Embed(x1) em_x2 = ccm.Embed(x2) mi1 = em_x1.mutual_information(20) mi2 = em_x2.mutual_information(20) fig,ax = plt.subplots(nrows=2,sharex=True) ax[0].plot(np.arange(1,21),mi1) ax[1].plot(np.arange(1,21),mi2) ax[1].set_xlabel('Lag') sns.despine() lag1 = 2 lag2 = 2 embed = 8 X1 = em_x1.embed_vectors_1d(lag1,embed) X2 = em_x2.embed_vectors_1d(lag2,embed) fig,ax = plt.subplots(ncols=2,sharey=True,sharex=True,figsize=(10,4)) ax[0].scatter(X1[:,0],X1[:,1]) ax[1].scatter(X2[:,0],X2[:,1]) ax[0].set_xlabel('X1(t)') ax[0].set_ylabel('X1(t-1)') ax[1].set_xlabel('X2(t)') ax[1].set_ylabel('X2(t-1)') sns.despine() CCM = ccm.CCM() lib_lens = np.linspace(10,ts_length/2,dtype='int') x1tr, x1te, x2tr, x2te = train_test_split(X1,X2, percent=.75) CCM.fit(x1tr,x2tr) x1p, x2p = CCM.predict(x1te, x2te,lib_lengths=lib_lens) #only one prediction sc1,sc2 = CCM.score() plt.plot(lib_lens,sc1) plt.plot(lib_lens,sc2) ``` ### Cosine and Sine Waves ``` ts_length=500 x1 = np.random.randn(ts_length,) + 10*np.sin(np.linspace(0,10*np.pi,ts_length)) x2 = np.random.randn(ts_length,) + 20*np.cos(np.linspace(0,10*np.pi,ts_length)) #x1[x1<0] = np.random.randn(np.sum(x1<0),) #x2[x2<0] = np.random.randn(np.sum(x2<0),) plt.plot(x1) plt.plot(x2,alpha=.5) em_x1 = ccm.Embed(x1) em_x2 = ccm.Embed(x2) mi1 = em_x1.mutual_information(100) mi2 = em_x2.mutual_information(100) fig,ax = plt.subplots(nrows=2,sharex=True,sharey=True) ax[0].plot(np.arange(1,101),mi1) ax[1].plot(np.arange(1,101),mi2) ax[1].set_xlabel('Lag') sns.despine() lag1 = 20 lag2 = 20 embed = 2 X1 = em_x1.embed_vectors_1d(lag1,embed) X2 = em_x2.embed_vectors_1d(lag2,embed) fig,ax = plt.subplots(ncols=2,sharey=True,sharex=True,figsize=(10,4)) ax[0].scatter(X1[:,0],X1[:,1]) ax[1].scatter(X2[:,0],X2[:,1]) ax[0].set_xlabel('X1(t)') ax[0].set_ylabel('X1(t-1)') ax[1].set_xlabel('X2(t)') ax[1].set_ylabel('X2(t-1)') sns.despine() CCM = ccm.CCM() lib_lens = np.linspace(10,ts_length/2,dtype='int') x1tr, x1te, x2tr, x2te = train_test_split(X1,X2, percent=.75) CCM.fit(x1tr,x2tr) x1p, x2p = CCM.predict(x1te, x2te,lib_lengths=lib_lens) #only one prediction sc1,sc2 = CCM.score() plt.plot(lib_lens,sc1) plt.plot(lib_lens,sc2) ``` ### Lorenz ``` X = data.lorenz() fig,ax = plt.subplots(3,figsize=(10,5),sharex=True) ax[0].plot(X[:,0]) ax[1].plot(X[:,1]) ax[2].plot(X[:,2]) sns.despine() e1 = ccm.Embed(X[:,0]) e2 = ccm.Embed(X[:,2]) mi1 = e1.mutual_information(100) mi2 = e2.mutual_information(100) plt.plot(mi1) plt.plot(mi2) lag = 18 embed = 3 X1 = e1.embed_vectors_1d(lag,embed) X2 = e2.embed_vectors_1d(lag,embed) fig,ax = plt.subplots(ncols=2,sharey=True,sharex=True,figsize=(10,4)) ax[0].scatter(X1[:,0],X1[:,1]) ax[1].scatter(X2[:,0],X2[:,1]) ax[0].set_xlabel('X1(t)') ax[0].set_ylabel('X1(t-1)') ax[1].set_xlabel('X2(t)') ax[1].set_ylabel('X2(t-1)') sns.despine() CCM = ccm.CCM() lib_lens = np.linspace(10,ts_length/2,dtype='int') x1tr, x1te, x2tr, x2te = train_test_split(X1,X2, percent=.75) CCM.fit(x1tr,x2tr) x1p, x2p = CCM.predict(x1te, x2te,lib_lengths=lib_lens) #only one prediction sc1,sc2 = CCM.score() plt.plot(sc1) plt.plot(sc2) ```
github_jupyter
### **Section I: Setup** #### Importing all libraries ``` import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import os, sys from glob import glob from mpl_toolkits.mplot3d import Axes3D import random from PIL import Image from tqdm import tqdm from tqdm.notebook import tqdm_notebook import tensorflow import shutil import datetime import tensorflow_hub as hub from tensorflow.keras.models import load_model ``` #### Mounting google drive and changing directory ``` from google.colab import drive drive._mount('/content/drive') os.chdir('/content/drive/MyDrive/ROB535_Perception_project') print(os.getcwd()) df = pd.read_csv("classes.csv") df.head() ``` ### **Section II: EDA and Directory structure** #### Reference functions for EDA ``` #Demo code to read data def rot(n): """ Taking a rotation vector n and returning the resultant rotation matrix """ n = np.asarray(n).flatten() assert(n.size == 3) theta = np.linalg.norm(n) if theta: n /= theta K = np.array([[0, -n[2], n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]]) return np.identity(3) + np.sin(theta) * K + (1 - np.cos(theta)) * K @ K else: return np.identity(3) def get_bbox(p0, p1): """ Input: * p0, p1 (3) Corners of a bounding box represented in the body frame. Output: * v (3, 8) Vertices of the bounding box represented in the body frame. * e (2, 14) Edges of the bounding box. The first 2 edges indicate the `front` side of the box. """ v = np.array([ [p0[0], p0[0], p0[0], p0[0], p1[0], p1[0], p1[0], p1[0]], [p0[1], p0[1], p1[1], p1[1], p0[1], p0[1], p1[1], p1[1]], [p0[2], p1[2], p0[2], p1[2], p0[2], p1[2], p0[2], p1[2]]]) e = np.array([ [2, 3, 0, 0, 3, 3, 0, 1, 2, 3, 4, 4, 7, 7], [7, 6, 1, 2, 1, 2, 4, 5, 6, 7, 5, 6, 5, 6]], dtype=np.uint8) return v, e #Printing list of classes from classes.csv file classes = ( 'Unknown', 'Compacts', 'Sedans', 'SUVs', 'Coupes', 'Muscle', 'SportsClassics', 'Sports', 'Super', 'Motorcycles', 'OffRoad', 'Industrial', 'Utility', 'Vans', 'Cycles', 'Boats', 'Helicopters', 'Planes', 'Service', 'Emergency', 'Military', 'Commercial', 'Trains') #Getting all images in files train_iles = sorted(glob('trainval/*/*_image.jpg')) #Taking a random image and printing it idx = np.random.randint(0, len(train_iles)) snapshot = train_iles[idx] img = plt.imread(snapshot) print(snapshot) print(img.shape) #Getting point cloud for each image xyz = np.fromfile(snapshot.replace('_image.jpg', '_cloud.bin'), dtype=np.float32) xyz = xyz.reshape([3, -1]) #Getting the camera projection matrix 'M' for each image proj = np.fromfile(snapshot.replace('_image.jpg', '_proj.bin'), dtype=np.float32) proj.resize([3, 4]) #Getting bounding box for each image try: bbox = np.fromfile(snapshot.replace('_image.jpg', '_bbox.bin'), dtype=np.float32) except FileNotFoundError: print('[*] bbox not found.') bbox = np.array([], dtype=np.float32) bbox = bbox.reshape([-1, 11]) uv = proj @ np.vstack([xyz, np.ones_like(xyz[0, :])]) uv = uv / uv[2, :] dist = np.linalg.norm(xyz, axis=0) fig1 = plt.figure(1, figsize=(10, 7)) ax1 = fig1.add_subplot(1, 1, 1) ax1.imshow(img) ax1.scatter(uv[0, :], uv[1, :], c=dist, marker='+', s=1) ax1.axis('scaled') fig1.tight_layout() fig2 = plt.figure(2, figsize=(8, 8)) ax2 = Axes3D(fig2) ax2.set_xlabel('x') ax2.set_ylabel('y') ax2.set_zlabel('z') step = 5 ax2.scatter( xyz[0, ::step], xyz[1, ::step], xyz[2, ::step], c=dist[::step], marker='.', s=1 ) colors = ['C{:d}'.format(i) for i in range(10)] for k, b in enumerate(bbox): R = rot(b[0:3]) t = b[3:6] sz = b[6:9] vert_3D, edges = get_bbox(-sz / 2, sz / 2) vert_3D = R @ vert_3D + t[:, np.newaxis] vert_2D = proj @ np.vstack([vert_3D, np.ones(vert_3D.shape[1])]) vert_2D = vert_2D / vert_2D[2, :] clr = colors[np.mod(k, len(colors))] for e in edges.T: ax1.plot(vert_2D[0, e], vert_2D[1, e], color=clr) ax2.plot(vert_3D[0, e], vert_3D[1, e], vert_3D[2, e], color=clr) c = classes[int(b[9])] ignore_in_eval = bool(b[10]) if ignore_in_eval: ax2.text(t[0], t[1], t[2], c, color='r') else: ax2.text(t[0], t[1], t[2], c) ax2.auto_scale_xyz([-40, 40], [-40, 40], [0, 80]) ax2.view_init(elev=-30, azim=-90) for e in np.identity(3): ax2.plot([0, e[0]], [0, e[1]], [0, e[2]], color=e) plt.show() def img2classname(img_path): """ Obtain image class name based on input image path Using classes.csv file """ bbox = np.fromfile(img_path.replace('_image.jpg', '_bbox.bin'), dtype=np.float32) bbox = bbox.reshape([-1, 11]) class_id = int(bbox[:,-2]) return id2name_dict[class_id] #Plotting multiple images in the test set # Set up matplotlib fig, and size it to fit 4x4 pics def plot_multiple_images(nrows, ncols, image_path_files): ncols = ncols nrows = nrows fig = plt.gcf() fig.set_size_inches(14, 10) idx_list = random.sample(range(1, len(image_path_files)), 20) car_pix = [image_path_files[idx] for idx in idx_list] for i, img_path in tqdm_notebook(enumerate(car_pix)): # Set up subplot; subplot indices start at 1 sp = plt.subplot(nrows, ncols, i + 1) sp.axis('Off') # Don't show axes (or gridlines) img = plt.imread(img_path) plt.imshow(img) plt.show() plot_multiple_images(4, 5, train_iles) #Creating mapping dictionaries for output label class and class names id2label_dict = {id:int(df[df.class_id == id].label) for id in df.class_id} label2classes_dict = {0: "Unknown_and_Others", 1: "Cars", 2: "Other_modes_of_transport"} #Reference functions to copy images into their respective directories def img2classid(img_path): bbox = np.fromfile(img_path.replace('_image.jpg', '_bbox.bin'), dtype=np.float32) bbox = bbox.reshape([-1, 11]) class_id = int(bbox[:,-2]) return id2label_dict[class_id] #Creating training and testing datasets def create_train_dataset(img_path_file, resize_dim): data_list = [] label_list = [] for fname in tqdm_notebook(img_path_file): arr = np.array(Image.open(fname).resize((resize_dim, resize_dim))) data_list.append(arr) label_list.append(img2classid(fname)) return np.array(data_list), np.array(label_list) #Printing training data and label shape print("The number of images in the dataset = {}".format(train_X.shape[0])) print("Size of each training dataset = {}".format(train_X.shape)) print("Size of each image = {}".format(train_X.shape[1:])) assert train_X.shape[0] == train_y.shape[0] #Getting the class distribution dictionary def class_counts(img_file_path): counts = {} for fname in tqdm_notebook(img_file_path): name = img2classname(fname) if name not in counts: counts[name] = 1 else: counts[name] += 1 return counts #Obtaining the distribution of classes in training set class_distribution = class_counts(train_files) #Plotting bar graph of class distributions in Training set class_names = list(class_distribution.keys()) num_class = list(class_distribution.values()) fig, ax = plt.subplots(figsize =(16, 9)) # Horizontal Bar Plot ax.barh(class_names, num_class) # Remove axes splines for s in ['top', 'bottom', 'left', 'right']: ax.spines[s].set_visible(False) # Remove x, y Ticks ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') # Add padding between axes and labels ax.xaxis.set_tick_params(pad = 5) ax.yaxis.set_tick_params(pad = 10) # Add x, y gridlines ax.grid(b = True, color ='grey', linestyle ='-.', linewidth = 0.5, alpha = 0.2) for i in ax.patches: plt.text(i.get_width()+1, i.get_y()+0.25, str(round((i.get_width()), 2)), fontsize = 10, fontweight ='bold', color ='grey') # Add Plot Title ax.set_title('Classes distribution in Training set', fontsize=12) plt.show() ``` #### Creating Image directory structure for using ImageDataGenerator class ``` #Reference class categorisation for pre processing into directories """unknown/ Boats/ helicopters/ planes/ trains -- Unknown and Others compacts/ sedans/ suvs/ coupes/ muscle/ sportsclassics/ sports/ super -- Cars Motorcycles/ cycles -- 2 Wheeler Offroad -- Offroad Industrial -- Industrial Utility, Vans -- Utility Service/ Emergency/ Military -- Service Commercial -- Commercial""" #Creating directory structure for ImageDataGenerator class try: os.mkdir('Preprocessed_Task1') base_dir = os.path.join(os.getcwd(), 'Preprocessed_Task1') #Creating classes directory within training train_unknown_dir = os.path.join(base_dir, "Unknown_and_Others") train_car_dir = os.path.join(base_dir, "Cars") train_2wheeler_dir = os.path.join(base_dir, "2_Wheeler") train_offroad_dir = os.path.join(base_dir, "Offroad") train_industrial_dir = os.path.join(base_dir, "Industrial") train_utility_dir = os.path.join(base_dir, "Utility") train_service_dir = os.path.join(base_dir, "Service") train_commercial_dir = os.path.join(base_dir, "Commercial") os.mkdir(train_unknown_dir) os.mkdir(train_car_dir) os.mkdir(train_2wheeler_dir) os.mkdir(train_offroad_dir) os.mkdir(train_industrial_dir) os.mkdir(train_utility_dir) os.mkdir(train_service_dir) os.mkdir(train_commercial_dir) except OSError as error: print(error) print('Successfully created directory structure!!') #Creating mapping dictionaries for output label class and class names id2label_dict = {id:int(df[df.class_id == id].label) for id in df.class_id} id2name_dict = {id:df[df.class_id == id].class_name.tolist()[0] for id in df.class_id} #Reference functions created def img2classid(img_path): """ Getting image class id for classes.csv file based on input image path """ bbox = np.fromfile(img_path.replace('_image.jpg', '_bbox.bin'), dtype=np.float32) bbox = bbox.reshape([-1, 11]) class_id = int(bbox[:,-2]) return id2label_dict[class_id] def select_class_dir(image_file, class_name): """ Uses the shutil module to copy files in the image folder Copied into respective directories based on class name """ if class_name == "Compacts" or class_name == "Sedans" or class_name == "SUVs" or class_name == "Coupes" or class_name == "Muscle" or class_name == "SportsClassics" or class_name == "Sports" or class_name == "Super": shutil.copy(image_file, train_car_dir) #Renaming the file on the file os.rename(os.path.join(train_car_dir, image_file[-14:]), os.path.join(train_car_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "Unknown" or class_name == "Boats" or class_name == "Helicopters" or class_name == "Planes" or class_name == "Trains": shutil.copy(image_file, train_unknown_dir) #Renaming the file on the file os.rename(os.path.join(train_unknown_dir, image_file[-14:]), os.path.join(train_unknown_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "Motorcycles" or class_name == "Cycles": shutil.copy(image_file, train_2wheeler_dir) #Renaming the file on the file os.rename(os.path.join(train_2wheeler_dir, image_file[-14:]), os.path.join(train_2wheeler_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "OffRoad": shutil.copy(image_file, train_offroad_dir) #Renaming the file on the file os.rename(os.path.join(train_offroad_dir, image_file[-14:]), os.path.join(train_offroad_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "Industrial": shutil.copy(image_file, train_industrial_dir) #Renaming the file on the file os.rename(os.path.join(train_industrial_dir, image_file[-14:]), os.path.join(train_industrial_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "Commercial": shutil.copy(image_file, train_commercial_dir) #Renaming the file on the file os.rename(os.path.join(train_commercial_dir, image_file[-14:]), os.path.join(train_commercial_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "Utility" or class_name == "Vans": shutil.copy(image_file, train_utility_dir) #Renaming the file on the file os.rename(os.path.join(train_utility_dir, image_file[-14:]), os.path.join(train_utility_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "Service" or class_name == "Emergency" or class_name == "Military": shutil.copy(image_file, train_service_dir) #Renaming the file on the file os.rename(os.path.join(train_service_dir, image_file[-14:]), os.path.join(train_service_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) #Copying files from the trainval folder to each subfolder depending upon class label def copy_data_into_dir(img_path_file): """ References the above functions to create the image directory structure """ for fname in tqdm_notebook(img_path_file): class_name = img2classname(fname) select_class_dir(fname, class_name) #Running the copying images function copy_data_into_dir(train_files) print('Successfully copied all files into respective directories') base_dir = os.path.join(os.getcwd(), 'Preprocessed_Task1') train_unknown_dir = os.path.join(base_dir, "Unknown_and_Others") train_car_dir = os.path.join(base_dir, "Cars") train_2wheeler_dir = os.path.join(base_dir, "2_Wheeler") train_offroad_dir = os.path.join(base_dir, "Offroad") train_industrial_dir = os.path.join(base_dir, "Industrial") train_utility_dir = os.path.join(base_dir, "Utility") train_service_dir = os.path.join(base_dir, "Service") train_commercial_dir = os.path.join(base_dir, "Commercial") #Printing results to crosscheck successful file transfer print("The number of Class: {} = {}".format("Unknown_and_Others", len(os.listdir(train_unknown_dir)))) print("The number of Class: {} = {}".format("Cars", len(os.listdir(train_car_dir)))) print("The number of Class: {} = {}".format("2_Wheeler", len(os.listdir(train_2wheeler_dir)))) print("The number of Class: {} = {}".format("Offroad", len(os.listdir(train_offroad_dir)))) print("The number of Class: {} = {}".format("Industrial", len(os.listdir(train_industrial_dir)))) print("The number of Class: {} = {}".format("Utility", len(os.listdir(train_utility_dir)))) print("The number of Class: {} = {}".format("Service", len(os.listdir(train_service_dir)))) print("The number of Class: {} = {}".format("Commercial", len(os.listdir(train_commercial_dir)))) ``` ### **Section III: Defining Data Loaders in Tensorflow** #### Checking GPU and Ram availability ``` #Checking GPU availablity gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) if gpu_info.find('failed') >= 0: print('Not connected to a GPU') else: print(gpu_info) from psutil import virtual_memory ram_gb = virtual_memory().total / 1e9 print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb)) if ram_gb < 20: print('Not using a high-RAM runtime') else: print('You are using a high-RAM runtime!') ``` #### Loading images from directories using ImageDataGenerator class ``` #Creating ImageDataGenerators for training, validation sets base_dir = os.path.join(os.getcwd(), "Preprocessed_Task1") from tensorflow.keras.preprocessing.image import ImageDataGenerator res_size = 224 classes_ls = ["Unknown_and_Others", "Cars", "2_Wheeler", "Offroad", "Industrial", "Utility", "Service", "Commercial"] train_datagen = ImageDataGenerator(height_shift_range=0.15, fill_mode='nearest', horizontal_flip=True, rescale=1.0/255., validation_split=0.15) train_generator = train_datagen.flow_from_directory( base_dir, target_size=(res_size, res_size), color_mode='rgb', classes = classes_ls, class_mode='categorical', batch_size=64, shuffle=True, subset="training", interpolation='bilinear') val_generator = train_datagen.flow_from_directory( base_dir, target_size=(res_size, res_size), color_mode='rgb', classes = classes_ls, class_mode='categorical', batch_size=64, shuffle=True, subset="validation", interpolation='bilinear') ``` ### **Section IV: Training models using Transfer Learning** #### Using Transfer Learning with ResNet50v2 model ``` #Loading a ResNet50v2 model from tensorflow.keras import layers from tensorflow.keras import Model res_size = 224 from tensorflow.keras.applications.resnet_v2 import ResNet50V2 res_model = ResNet50V2(include_top = False, weights='imagenet', input_shape=(res_size, res_size, 3)) for layer in res_model.layers[:130]: layer.trainable = False last_layer = res_model.get_layer("conv5_block3_2_relu") print('last layer output shape: ', last_layer.output_shape) last_output = last_layer.output # Flatten the output layer to 1 dimension #x = layers.Flatten()(last_output) x = layers.GlobalAveragePooling2D()(last_output) x = tf.keras.layers.BatchNormalization()(x) #x = tf.keras.layers.Dropout(rate = 0.2)(x) # Add a fully connected layer x = layers.Dense(256, activation='relu', use_bias=True, kernel_initializer = tf.keras.initializers.HeNormal(), bias_initializer='zeros')(x) x = tf.keras.layers.BatchNormalization()(x) # Add a fully connected layer x = layers.Dense(128, activation='relu', use_bias=True, kernel_initializer = tf.keras.initializers.HeNormal(), bias_initializer='zeros')(x) x = tf.keras.layers.BatchNormalization()(x) x = layers.Dense(32, activation='relu', use_bias=True, kernel_initializer = tf.keras.initializers.HeNormal(), bias_initializer='zeros')(x) # Add a final sigmoid layer for classification x = layers.Dense(len(os.listdir(base_dir)), activation = "softmax")(x) resnetv2_model = Model(res_model.input, x) resnetv2_model.summary() #Compiling the model from tensorflow.keras.optimizers import Adam opt = Adam(learning_rate=5e-3) loss = tensorflow.keras.losses.CategoricalCrossentropy() resnetv2_model.compile(optimizer = opt, loss = loss, metrics = ["accuracy"]) lor = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_accuracy', factor=0.3, patience=3, verbose=1, mode='auto', min_delta=0.05, min_lr=0) checkpoint = tf.keras.callbacks.ModelCheckpoint( "training_ResNetV2-{epoch:03d}.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only = False, save_freq='epoch') history = resnetv2_model.fit( train_generator, validation_data = val_generator, epochs = 7, verbose = 1, shuffle=True, callbacks = [lor, checkpoint]) ``` #### Testing model performance ``` def create_test_dataset(test_img_path_file, resize_dim): """ Creates a numpy file in format (test_num, Res50 input, Res50 input, 3) Used to test the model performance later """ data_list = [] for fname in tqdm_notebook(test_img_path_file): arr = np.array(Image.open(fname).resize((resize_dim, resize_dim))) data_list.append(arr) return data_list res_size = 224 test_files = sorted(glob('test/*/*_image.jpg')) res_test_X = create_test_dataset(test_files, res_size) np.save("res_test_x.npy", res_test_X) #Creating test_set for EfficientNetB4 def create_test_dataset(test_img_path_file, resize_dim): data_list = [] for fname in tqdm_notebook(test_img_path_file): arr = np.array(Image.open(fname).resize((resize_dim, resize_dim))) data_list.append(arr) return data_list effv2s_size = 384 test_files = sorted(glob('test/*/*_image.jpg')) effv2s_test_X = create_test_dataset(test_files, effv2s_size) np.save("effv2s_test_x.npy", effv2s_test_X) #Testing performance #Loading test dataset images from tensorflow.keras.preprocessing.image import ImageDataGenerator test_X = np.array(np.load("res_test_x.npy")) test_datagen = ImageDataGenerator(rescale=1.0/255.) test_generator = test_datagen.flow(test_X, batch_size = 32, shuffle=False) #Loading resnetv2 model from tensorflow.keras.models import load_model checkpoint_path = "/content/drive/MyDrive/ROB535_Perception_project/training_2-048.h5" test_resnetv2101_model = load_model(checkpoint_path) test_files = sorted(glob('test/*/*_image.jpg')) #Obtain predictions from the test data print("Dimensions of the predictions array: {}".format(predictions.shape)) assert predictions.shape[0] == np.array(test_X).shape[0] test_files = sorted(glob('test/*/*_image.jpg')) np.save("test_files.npy", np.array(test_files)) print("File saved") ``` #### Post processing and submitting csv file ``` #Transforming labels to appropriate classes for csv submission def convert_label(label): """ Tranforming label to match final output class """ if label == 3 or label == 4 or label == 5: return 2 elif label == 6 or label == 7: return 0 else: return label predictions_ls = [convert_label(label) for label in np.argmax(predictions, axis=1).tolist()] #Creating a dataframe and saving it as output test_file = [fname[5:-10] for fname in test_files] data = {'guid/image': test_file, 'label': predictions_ls} output_df = pd.DataFrame(data) output_df.to_csv("ResNetv2101_preprocessed_Team11.csv", index=False) from tensorflow.keras.models import load_model test_files = sorted(glob('test/*/*_image.jpg')) def test_model_performance_with_hub(model_path, csv_file_name): """ Saves an apt Kaggle competition submission csv file baed on tf model and csv file name """ checkpoint_path = model_path test_model = load_model(checkpoint_path, custom_objects={'KerasLayer':hub.KerasLayer}) #Obtain predictions from the test data predictions = test_model.predict(test_generator) print("Dimensions of the predictions array: {}".format(predictions.shape)) assert predictions.shape[0] == np.array(test_X).shape[0] predictions_ls = [convert_label(label) for label in np.argmax(predictions, axis=1).tolist()] #Creating a dataframe and saving it as output test_file = [fname[5:-10] for fname in test_files] data = {'guid/image': test_file, 'label': predictions_ls} output_df = pd.DataFrame(data) output_df.to_csv(csv_file_name, index=False) print("File_downloaded as .csv!!") test_model_performance(model_path = "/content/drive/MyDrive/ROB535_Perception_project/Efficientv2B0_training-049.h5", csv_file_name = "Effv2B0_Team11.csv") ``` #### Evaluating ResNet50v2 model performance of validation set ``` #Loading resnetv2 model from tensorflow.keras.models import load_model checkpoint_path = "/content/drive/MyDrive/ROB535_Perception_project/training_ResNetV2-046.h5" test_resnetv50_model = load_model(checkpoint_path) val_loss, val_accuracy = test_resnetv502_model.evaluate(val_generator, verbose=1) ``` #### Complete pipeline -- Training the ResNet50 v2 model with small learning rates -- Tuning model performance ``` #Loading resnetv2 model from tensorflow.keras.models import load_model checkpoint_path = "/content/drive/MyDrive/ROB535_Perception_project/training_ResNetV2-046.h5" test_resnetv250_model = load_model(checkpoint_path) base_dir2 = os.path.join(os.getcwd(), "Preprocessed_Task1") from tensorflow.keras.preprocessing.image import ImageDataGenerator res_size = 224 classes_ls = ["Unknown_and_Others", "Cars", "2_Wheeler", "Offroad", "Industrial", "Utility", "Service", "Commercial"] train_datagen = ImageDataGenerator(height_shift_range=0.15, fill_mode='nearest', horizontal_flip=True, rescale=1.0/255., validation_split=0.15, width_shift_range=0.1, brightness_range=(0.3, 0.7), zoom_range=0.1, rotation_range = 10, shear_range=0.1) train_generator = train_datagen.flow_from_directory( base_dir2, target_size=(res_size, res_size), color_mode='rgb', classes = classes_ls, class_mode='categorical', batch_size=64, shuffle=True, subset="training", interpolation='lanczos') val_generator = train_datagen.flow_from_directory( base_dir2, target_size=(res_size, res_size), color_mode='rgb', classes = classes_ls, class_mode='categorical', batch_size=64, shuffle=True, subset="validation", interpolation='lanczos') from tensorflow.keras.optimizers import Adam opt = Adam(learning_rate=3e-6) loss = tensorflow.keras.losses.CategoricalCrossentropy() with tf.device('/gpu:0'): test_resnetv250_model.compile(optimizer = opt, loss = loss, metrics = ["accuracy"]) lor = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_accuracy', factor=0.25, patience=3, verbose=1, mode='auto', min_delta=0.01, min_lr=1e-13) checkpoint = tf.keras.callbacks.ModelCheckpoint( "ResNetv250_training/training-{epoch:03d}-{val_loss:.4f}-{val_accuracy:.4f}.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only = False, save_freq='epoch') history2 = test_resnetv250_model.fit( train_generator, validation_data = val_generator, epochs = 25, verbose = 1, shuffle=True, callbacks = [lor, checkpoint]) #Transforming labels to appropriate classes for csv submission def convert_label(label): if label == 3 or label == 4 or label == 5: return 2 elif label == 6 or label == 7: return 0 else: return label def test_model_performance_without_hub(model_path, csv_file_name): checkpoint_path = model_path test_model = load_model(checkpoint_path) #Obtain predictions from the test data predictions = test_model.predict(test_generator) print("Dimensions of the predictions array: {}".format(predictions.shape)) assert predictions.shape[0] == np.array(test_X).shape[0] predictions_ls = [convert_label(label) for label in np.argmax(predictions, axis=1).tolist()] #Creating a dataframe and saving it as output test_file = [fname[5:-10] for fname in test_files] data = {'guid/image': test_file, 'label': predictions_ls} output_df = pd.DataFrame(data) output_df.to_csv(csv_file_name, index=False) print("File_downloaded as .csv!!") test_model_performance_without_hub("/content/drive/MyDrive/ROB535_Perception_project/ResNetv250_training/training-023-1.3107-0.7323.h5", "ResNetv250_small_lr_Team11.csv") #Transforming labels to appropriate classes for csv submission def convert_label(label): if label == 3 or label == 4 or label == 5: return 2 elif label == 6 or label == 7: return 0 else: return label def sub_model_label(label): if label == 0: return 2 else: return label classes_ls = ["Unknown_and_Others", "Cars", "2_Wheeler", "Offroad", "Industrial", "Utility", "Service", "Commercial"] os.chdir("/content/drive/MyDrive/ROB535_Task1") #Defining custom test generator with soft voting def custom_augmentation_testgen(test_npy_file, saved_full_model_path, saved_subset_model_path1, saved_subset_model_path2, test_files, csv_file_name): """ Inputs: test_npy_file: Input numpy file which contains test images in form (batch, size, size, channel_size) Use np.load("test_npy_file") to use it for custom generator saved_full_model_path: Full 8 classes classifier model path saved_subset_model_path1: Cars, Offroad and Utility class classfier model path Preferred when class label is Cars or Utility saved_subset_model_path2: Second Cars, Offroad and Utility class classfier model path Preferred when class label is Offroad test_files: Test files loaded using glob from the directory -- Used to create column in DataFrame csv_file_name: Output csv file name to store the submission Output: Submission csv file to upload on Kaggle """ #Aggregating predictions on each test image prediction_ls = [] #Used for prioritizing between stacked Image Classifier and Object Detection model -- 0/1 flag_label = [] #Loading saved full model checkpoint_path = saved_full_model_path test_model_1 = load_model(checkpoint_path, custom_objects={'KerasLayer':hub.KerasLayer}) #Loading saved subset model 1 -- Trained on Cars, Offroad and Utility class only #Preferred when class is Cars or Utility checkpoint_path2 = saved_subset_model_path1 test_model_2 = load_model(checkpoint_path2, custom_objects={'KerasLayer':hub.KerasLayer}) #Loading saved subset model 2 -- Trained on Cars, Offroad and Utility class only #Preferred when class is Offroad checkpoint_path3 = saved_subset_model_path2 test_model_3 = load_model(checkpoint_path3, custom_objects={'KerasLayer':hub.KerasLayer}) for img in tqdm_notebook(test_npy_file): #Stacking 6 rescaled images - the original, random shift 0.1*x & 0.1*y, random rotation 10 degrees, # random brightness, random zoom 0.1 and random shear - intensity 8 batch_img = np.vstack((img[np.newaxis, :]/255, tf.keras.preprocessing.image.random_shift( img, wrg = 0.1, hrg = 0.1, row_axis=0, col_axis=1, channel_axis=2)[np.newaxis, :]/255, tf.keras.preprocessing.image.random_rotation( img, rg = 10, row_axis=0, col_axis=1, channel_axis=2)[np.newaxis, :]/255, tf.keras.preprocessing.image.random_brightness( img, brightness_range = (0.3,0.7))[np.newaxis, :]/255, tf.keras.preprocessing.image.random_zoom( img, zoom_range = (0.1,0.1), row_axis=0, col_axis=1, channel_axis=2)[np.newaxis, :]/255, tf.keras.preprocessing.image.random_shear( img, intensity = 8, row_axis=0, col_axis=1, channel_axis=2)[np.newaxis, :]/255 )) #Checking whether the batch_img is valid or not assert batch_img.shape[0] == 6 #Obtain predictions from the batch Image data predictions = test_model_1.predict(batch_img) assert predictions.shape[0] == batch_img.shape[0] #Performing soft voting based on probabilities -- single classifier final_label = np.argmax(np.average(predictions, axis=0)) img_class = classes_ls[final_label] #Building stacking classifier if predicted is either Cars, Offroad or Utility class if img_class == "Cars" or img_class == "Utility": #Predicting using seperate subset classifier 1 trained on these 3 classes pred = test_model_2.predict(batch_img) assert pred.shape[0] == batch_img.shape[0] #Implementing soft voting and appending the predicted label label = np.argmax(np.average(pred, axis=0)) prediction_ls.append(sub_model_label(label)) flag_label.append(1) elif img_class == "Offroad": #Predicting using seperate subset classifier 1 trained on these 3 classes pred = test_model_3.predict(batch_img) assert pred.shape[0] == batch_img.shape[0] #Implementing soft voting and appending the predicted label label = np.argmax(np.average(pred, axis=0)) prediction_ls.append(sub_model_label(label)) flag_label.append(1) else: prediction_ls.append(convert_label(final_label)) flag_label.append(0) #Creating a dataframe and saving it as output assert len(prediction_ls) == test_npy_file.shape[0] #Based on submission regulations image_file = [fname[5:-10] for fname in test_files] data = {'guid/image': image_file, 'label': prediction_ls, 'flag':flag_label} #Creating the pandas DataFrame and saving it output_df = pd.DataFrame(data) output_df.to_csv(csv_file_name, index=False) print("File_downloaded as .csv!!") #Running the function test_npy_file = np.load("effv2s_test_x.npy") #Contains each image of dim (384,384,3) -- Input for EfficientV2s model model_path = "Models/training-017-0.9035-0.7473.h5" model_path2 = "Models/training-001-0.7265-0.7889.h5" model_path3 = "Models/training-016-0.7580-0.7930.h5" test_files = sorted(glob('test/*/*_image.jpg')) file_name = "Stacked_EfficientV2s_softVoting_Team11_flag.csv" custom_augmentation_testgen(test_npy_file = test_npy_file, saved_full_model_path = model_path, saved_subset_model_path1 = model_path2, saved_subset_model_path2 = model_path3, test_files = test_files, csv_file_name = file_name) #Printing results base_dir = os.path.join(os.getcwd(), 'Preprocessed_subset_Task1') train_car_dir = os.path.join(base_dir, "Cars") train_offroad_dir = os.path.join(base_dir, "Offroad") train_utility_dir = os.path.join(base_dir, "Utility") #Printing results to crosscheck successful file transfer print("The number of Class: {} = {}".format("Cars", len(os.listdir(train_car_dir)))) print("The number of Class: {} = {}".format("Offroad", len(os.listdir(train_offroad_dir)))) print("The number of Class: {} = {}".format("Utility", len(os.listdir(train_utility_dir)))) ``` ### **Section V: Developing sub classifiers based on Error Analysis** #### Training a seperate classifier on Cars, Utility and Commerical vehicles ``` #Creating directory strucuture to run ImageDataGenerator #Creating directory structure for ImageDataGenerator class try: os.mkdir('Preprocessed_subset_Task1') base_dir = os.path.join(os.getcwd(), 'Preprocessed_subset_Task1') #Creating classes directory within training train_car_dir = os.path.join(base_dir, "Cars") train_offroad_dir = os.path.join(base_dir, "Offroad") train_utility_dir = os.path.join(base_dir, "Utility") os.mkdir(train_car_dir) os.mkdir(train_offroad_dir) os.mkdir(train_utility_dir) except OSError as error: print(error) print('Successfully created directory structure!!') def select_class_subset_dir(image_file, class_name): if class_name == "Compacts" or class_name == "Sedans" or class_name == "SUVs" or class_name == "Coupes" or class_name == "Muscle" or class_name == "SportsClassics" or class_name == "Sports" or class_name == "Super": shutil.copy(image_file, train_car_dir) #Renaming the file on the file os.rename(os.path.join(train_car_dir, image_file[-14:]), os.path.join(train_car_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "OffRoad": shutil.copy(image_file, train_offroad_dir) #Renaming the file on the file os.rename(os.path.join(train_offroad_dir, image_file[-14:]), os.path.join(train_offroad_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) elif class_name == "Utility" or class_name == "Vans": shutil.copy(image_file, train_utility_dir) #Renaming the file on the file os.rename(os.path.join(train_utility_dir, image_file[-14:]), os.path.join(train_utility_dir, image_file[-14:-4] + str(datetime.datetime.now())[11:]+'.jpg')) #Copying files from the trainval folder to each subfolder depending upon class label def copy_data_subset_dir(img_path_file): for fname in tqdm_notebook(img_path_file): class_name = img2classname(fname) select_class_subset_dir(fname, class_name) #Running the copying images function copy_data_subset_dir(train_files) print('Successfully copied all files into respective directories') #Training EfficientNetV2s on the subset preprocessed dataset base_dir2 = os.path.join(os.getcwd(), "Preprocessed_subset_Task1") from tensorflow.keras.preprocessing.image import ImageDataGenerator effv2s_size = 384 classes_ls = ["Offroad","Cars","Utility"] train_datagen = ImageDataGenerator(height_shift_range=0.15, fill_mode='nearest', horizontal_flip=True, rescale=1.0/255., validation_split=0.15, width_shift_range=0.15, brightness_range=(0.3, 0.7), zoom_range=0.1, rotation_range = 10, shear_range=0.1) train_generator = train_datagen.flow_from_directory( base_dir2, target_size=(effv2s_size, effv2s_size), color_mode='rgb', classes = classes_ls, class_mode='categorical', batch_size=16, shuffle=True, subset="training", interpolation='lanczos') val_generator = train_datagen.flow_from_directory( base_dir2, target_size=(effv2s_size, effv2s_size), color_mode='rgb', classes = classes_ls, class_mode='categorical', batch_size=16, shuffle=True, subset="validation", interpolation='lanczos') #Loading the EfficientNetv2s model effv2s_size = 384 do_fine_tuning = True base_model = hub.KerasLayer("https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/classification/2", trainable = do_fine_tuning) effv2s_model = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=(effv2s_size, effv2s_size,3)), base_model, # Add a fully connected layer tf.keras.layers.Dense(512, activation='relu', use_bias=True, kernel_initializer = tf.keras.initializers.HeNormal(), bias_initializer='zeros'), # Add another fully connected layer tf.keras.layers.Dense(256, activation='relu', use_bias=True, kernel_initializer = tf.keras.initializers.HeNormal(), bias_initializer='zeros'), # Add another fully connected layer tf.keras.layers.Dense(64, activation='relu', use_bias=True, kernel_initializer = tf.keras.initializers.HeNormal(), bias_initializer='zeros'), # Add a final sigmoid layer for classification tf.keras.layers.Dense(3, activation = "softmax")]) effv2s_model.build((None, effv2s_size, effv2s_size, 3)) from tensorflow.keras.models import load_model checkpoint_path = "EfficientV2s_subset_training/training-013-0.7522-0.7899.h5" effv2s_model = load_model(checkpoint_path, custom_objects={'KerasLayer':hub.KerasLayer}) #Compiling the model from tensorflow.keras.optimizers import Adam opt = Adam(learning_rate=1e-4) loss = tensorflow.keras.losses.CategoricalCrossentropy() effv2s_model.compile(optimizer = opt, loss = loss, metrics = ["accuracy"]) lor = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_accuracy', factor=0.25, patience=3, verbose=1, mode='auto', min_delta=0.01, min_lr=1e-12) checkpoint = tf.keras.callbacks.ModelCheckpoint( "EfficientV2s_subset_training/training-{epoch:03d}-{val_loss:.4f}-{val_accuracy:.4f}.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only = False, save_freq='epoch') history3 = effv2s_model.fit( train_generator, validation_data = val_generator, epochs = 30, verbose = 1, shuffle=True, callbacks = [lor, checkpoint] ) acc = history3.history['accuracy'] val_acc = history3.history['val_accuracy'] loss = history3.history['loss'] val_loss = history3.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.show() ``` ### **Section VI: Experimentation** ``` from tensorflow.keras.models import load_model def re_train_model(checkpoint_path, lr): new_model = load_model(checkpoint_path) opt = Adam(learning_rate = lr) loss = tensorflow.keras.losses.CategoricalCrossentropy() new_model.compile(optimizer = opt, loss = loss, metrics = ["accuracy"]) return new_model path = "/content/drive/MyDrive/ROB535_Perception_project/ResNetV2_model(5).h5" resv2_model3 = re_train_model(path, 9e-7) #Loading in data with the generators train_generator = train_datagen.flow_from_directory( base_dir, target_size=(res_size,res_size), color_mode='rgb', classes = ['Unknown_and_Others', 'Cars', 'Other_modes_of_transport'], class_mode='categorical', batch_size=64, shuffle=True, subset="training", interpolation='bilinear') val_generator = train_datagen.flow_from_directory( base_dir, target_size=(res_size,res_size), color_mode='rgb', classes = ['Unknown_and_Others', 'Cars', 'Other_modes_of_transport'], class_mode='categorical', batch_size=64, shuffle=True, subset="validation", interpolation='bilinear') lor3 = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_accuracy', factor=0.3, patience=2, verbose=1, mode='auto', min_delta=0.002, min_lr=0) checkpoint3 = tf.keras.callbacks.ModelCheckpoint( "ResNetV2_model(6).h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only = False, save_freq='epoch') history4 = resv2_model3.fit( train_generator, validation_data = val_generator, epochs = 15, verbose = 1, shuffle=True, callbacks = [lor3, checkpoint3]) import matplotlib.pyplot as plt acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc)+1) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.show() test_files = sorted(glob('test/*/*_image.jpg')) #Obtain predictions from the test data predictions = resnetv2_model.predict(test_generator) print("Dimensions of the predictions array: {}".format(predictions.shape)) assert predictions.shape[0] == np.array(test_X).shape[0] #Creating a dataframe and saving it as output test_file = [fname[5:-10] for fname in test_files] data = {'guid/image': test_file, 'label': np.argmax(predictions, axis=1).tolist()} output_df = pd.DataFrame(data) output_df.to_csv("Update_Team11.csv", index=False) #Loading a new model resnetv2_model_iter2 = tf.keras.models.load_model('ResNetV2_model.h5') resnetv2_model_iter2.summary() from tensorflow.keras.optimizers import Adam opt = Adam(learning_rate=5e-4) loss = tensorflow.keras.losses.CategoricalCrossentropy() resnetv2_model_iter2.compile(optimizer = opt, loss = loss, metrics = ["accuracy"]) lor = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_accuracy', factor=0.2, patience=2, verbose=1, mode='auto', min_delta=0.001, min_lr=0) checkpoint1 = tf.keras.callbacks.ModelCheckpoint( "ResNetV2_model_iter2.h5", monitor='val_accuracy', verbose=1, save_best_only=False, save_weights_only = False, mode='auto', save_freq='epoch') history_2 = resnetv2_model_iter2.fit( train_generator, validation_data = val_generator, epochs = 10, verbose = 1, shuffle=True, callbacks = [lor, checkpoint1]) #Loading test dataset images test_X = np.array(np.load("test_xupd.npy")) test_datagen = ImageDataGenerator(rescale=1.0/255.) test_generator = test_datagen.flow(test_X, batch_size = 32, shuffle=False) import matplotlib.pyplot as plt acc = history_2.history['accuracy'] val_acc = history_2.history['val_accuracy'] loss = history_2.history['loss'] val_loss = history_2.history['val_loss'] epochs = range(1, len(acc)+1) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.show() ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt from IPython.display import Image %matplotlib inline Image('djikstra.png') ``` Consider the example problem we discussed in the theory session (easy to verify!). We will implement from scratch Djikstra algorithm to find the cost of traversal from a source node to all other nodes in a given connected graph. Note that we are not finding the shortest path itself. But that will follow ``` class Graph(object): def __init__(self, nodes, edges): self.nodes = nodes self.adjacency = -np.ones([nodes, nodes]) self.shortest_path_set = [False] * nodes #populate the adjacency matrix from edges # format of edges = (node1, node2, edge_cost) for node1, node2, cost in edges: self.adjacency[node1, node2] = cost # dist = 1D array of all distances to source # check if node is not already in the shortest path set # output = closest node # minimum entry in dist, corresponding entry in # self.shortest_path_set must be False def min_cost_index(self, dist): return np.argmin(np.array(dist) + 1000*np.array(self.shortest_path_set)) def dijkstra(self, src): #initialize distance array dist = [1000] * self.nodes dist[src] = 0 for _ in range(self.nodes): #dist = [0, 1000, 1000, ..., 1000] #shortest_path_set = [1, 0, 0, ..., 0] #dist = [0, 8, 5, 2, 1000,...] #shortest_path_set = [1, 0, 0, 1, 0, ...] i = self.min_cost_index(dist) # Store min distance vertex in shortest path tree self.shortest_path_set[i] = True # Update dist value of the neighbors of selected node # Two conditions to check for each neighbor # (a) not in shortest path tree (b) cost is now lowered # first get neighbor list from adjacency matrix all_nodes = self.adjacency[i,:] # loop over neighbor list to check for other 2 conditions # if satisfied, change dist[j] for j, edge_cost in enumerate(all_nodes): if edge_cost > 0 and not self.shortest_path_set[j]: # valid neighbor if dist[i] + edge_cost < dist[j]: dist[j] = dist[i] + edge_cost return dist nodes = 7 # (node_A, node_B, edge_cost) edges = [(0, 1, 8), (0, 2, 5), (0, 3, 2), (1, 4, 2), (2,1,1), (2, 5, 3), \ (3, 5, 8), (4, 5, 7), (5, 4, 7), (4, 6, 1), (6, 5, 4) ] g = Graph(nodes, edges) for node, dist in enumerate(g.dijkstra(0)): print(f"Node {node} is at distance {dist}") g.adjacency ``` ## A* Let us now modify the graph to accept the 2D co-ordinates of the node. We will use Euclidean distance as the heuristic ``` Image('astar.png') from google.colab import drive drive.mount('/content/drive') node_coords = [(0, 0),(2,2),(1,2),(1,0),(3,3),(3,2), (4,2)] # Function to calculate euclidean distance # (x1, y1), (x2, y2) given def euclidean(node1, node2): x1, y1 = node1 x2, y2 = node2 return np.sqrt((x1-x2)**2+(y1-y2)**2) class Graph(object): def __init__(self, nodes, edges, coords, weight=1.0, heuristic=euclidean): self.nodes = nodes self.adjacency = np.zeros([nodes, nodes]) self.shortest_path_set = [False] * nodes self.heuristic = heuristic self.coords = coords self.weight = weight # weight of heuristic #populate the adjacency matrix from edges # edges = (node1, node2, edge_cost) for node1, node2, cost in edges: self.adjacency[node1, node2] = cost # Input: 1-D distance array to source, destination (x, y) # output: next node to be selected # remember criteria is source_cost + weight * heuristic_destination # node should not be in shortest_path_set def min_astar_distance(self, dist, dest_coords): heuristic_cost = np.array([self.heuristic(n, dest_coords) for n in self.coords]) src_cost = np.array(dist) costs = src_cost + self.weight*heuristic_cost + 1000 *np.array(self.shortest_path_set) return np.argmin(costs) def astar(self, src, dest): #initialize distance array dist = [1000] * self.nodes dist[src] = 0 #get the destination (x,y) dest_coords = self.coords[dest] i = 0 #predecessors = [] for _ in range(self.nodes): #previous_i = i i = self.min_astar_distance(dist, dest_coords) # Store min distance vertex in shortest path tree self.shortest_path_set[i] = True #predecessors[i] = previous_i # Update dist value of the neighbors of selected node # Two conditions to check for each neighbor # (a) not in shortest path tree (b) cost is now lowered # first get neighbor list from adjacency matrix all_nodes = self.adjacency[i,:] neighbours = [i for i, cost in enumerate(all_nodes) if cost>0] # loop over neighbor list to check for other 2 conditions # if satisfied, change dist[j] for j, edge_cost in enumerate(all_nodes): if edge_cost > 0 and not self.shortest_path_set[j]: # valid neighbor if dist[i] + edge_cost < dist[j]: dist[j] = dist[i] + edge_cost #for j, nbr_cost in enumerate(neighbors): # find heuristic cost from all nodes to destination # use list comprehension heuristic_cost = [] for i in self.coords: cost = euclidean(i, dest_coords) heuristic_cost.append(cost) # predecessors of nodes return dist, heuristic_cost ``` # New Section # New Section ``` nodes = 7 # (node_A, node_B, edge_cost) edges = [(0, 1, 8), (0, 2, 5), (0, 3, 2), (1, 4, 2), (2, 5, 3), \ (3, 5, 8), (4, 5, 7), (5, 4, 7), (5, 6, 1), (6, 5, 4) ] node_coords = [(0, 0),(2,2),(1,2),(1,0),(3,3),(3,2),(4,2)] g = Graph(nodes, edges, node_coords) cost, heuristic = g.astar(0, 6) for node, (dist, heur) in enumerate(zip(cost, heuristic)): print(f"Node {node} is at distance {dist}") print(f"Node {node} heuristic is {heur}") ``` ### Notice that this is a very simple implementation to get the costs of all nodes to the source node. We can make 2 changes 1. We did not get the predecessors of each node. Predecessors list is what will help us determine the path. Can you change the code to print out the predecessors as well? 2. In general we have to calculate only the path to the destination (not all nodes) as it is computationally expensive. What do you think should be the convergence criteria? Use it to find the shortest path to Node 5 instead of Node 6 Feel free to experiment with other heuristics like (a) L-1 norm (b) number of edges ``` # Function to calculate euclidean distance # (x1, y1), (x2, y2) given def euclidean(node1, node2): x1, y1 = node1 x2, y2 = node2 return np.sqrt((x1-x2)**2+(y1-y2)**2) class Graph(object): def __init__(self, nodes, edges, coords, weight=1.0, heuristic=euclidean): self.nodes = nodes self.adjacency = np.zeros([nodes, nodes]) self.shortest_path_set = [False] * nodes self.heuristic = heuristic self.coords = coords self.weight = weight # weight of heuristic #populate the adjacency matrix from edges # edges = (node1, node2, edge_cost) for node1, node2, cost in edges: self.adjacency[node1, node2] = cost # Input: 1-D distance array to source, destination (x, y) # output: next node to be selected # remember criteria is source_cost + weight * heuristic_destination # node should not be in shortest_path_set def min_astar_distance(self, dist, dest_coords): heuristic_cost = np.array([self.heuristic(n, dest_coords) for n in self.coords]) src_cost = np.array(dist) costs = src_cost + self.weight*heuristic_cost + 1000 *np.array(self.shortest_path_set) return np.argmin(costs) def astar(self, src, dest): #initialize distance array dist = [1000] * self.nodes dist[src] = 0 #get the destination (x,y) dest_coords = self.coords[dest] i = 0 #predecessors = [] while self.shortest_path_set[1] == False: i = self.min_astar_distance(dist, dest_coords) # Store min distance vertex in shortest path tree self.shortest_path_set[i] = True #predecessors[i] = previous_i # Update dist value of the neighbors of selected node # Two conditions to check for each neighbor # (a) not in shortest path tree (b) cost is now lowered # first get neighbor list from adjacency matrix all_nodes = self.adjacency[i,:] neighbours = [i for i, cost in enumerate(all_nodes) if cost>0] # loop over neighbor list to check for other 2 conditions # if satisfied, change dist[j] for j, edge_cost in enumerate(all_nodes): if edge_cost > 0 and not self.shortest_path_set[j]: # valid neighbor if dist[i] + edge_cost < dist[j]: dist[j] = dist[i] + edge_cost #for j, nbr_cost in enumerate(neighbors): # find heuristic cost from all nodes to destination # use list comprehension heuristic_cost = [] for i in self.coords: cost = euclidean(i, dest_coords) heuristic_cost.append(cost) # predecessors of nodes return dist, heuristic_cost nodes = 7 # (node_A, node_B, edge_cost) edges = [(0, 1, 8), (0, 2, 5), (0, 3, 2), (1, 4, 2), (2, 5, 3), \ (3, 5, 8), (4, 5, 7), (5, 4, 7), (5, 6, 1), (6, 5, 4) ] node_coords = [(0, 0),(2,2),(1,2),(1,0),(3,3),(3,2),(4,2)] g = Graph(nodes, edges, node_coords) cost, heuristic = g.astar(0, 6) for node, (dist, heur) in enumerate(zip(cost, heuristic)): print(f"Node {node} is at distance {dist}") print(f"Node {node} heuristic is {heur}") ```
github_jupyter
# Classification of quantum states with high dimensional entanglement This notebook for saving the transpiled circuits. These are saved as qasm strings in a dictionay. The first section on creating circuits for the simulator before transpilation for real device was maintained. ## Circuits and computations Version compatible with 1st and 2d pilot studies (not tested on 1st pilot study) ``` import numpy as np import copy from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, execute, transpile, assemble from qiskit.tools.visualization import * from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal, CompleteMeasFitter, TensoredMeasFitter) import json from scipy.signal import savgol_filter import time from qiskit.tools.monitor import job_monitor from o_utils import ora # classifier utilities from o_plot import opl # utilities for result plot from c_utils import new_cut # circuit building utilities def json_dic_loader(dic_name): f = open(data_directory+dic_name+'.json') return json.load(f) ###markdown for safety on demo def json_dic_dumper(dic, dic_name): with open(data_directory+dic_name+'.json', 'w') as f: json.dump(dic,f) ``` ## Set up the simulator and layout for 5 qubits ``` simulator = Aer.get_backend('qasm_simulator') #specify the layout of the devices used_qubits = 5 qubit_list = [0,1,2,3,4] #short_version = False #program_name="QAD" # 1st pilot project GHZ Psi+ / W Phi+ program_name="AL2" # 2d pilot project W Psi+ / Wbar Phi+ Flag_char = "DS" # this for a mix of two types of separable states if len(Flag_char) >= 2: unique_char = "M" else: unique_char = Flag_char # These dictionaries for the devices used in the study if program_name == "QAD": fidelity_dic = {'ibmq_athens': 0.925110, 'ibmq_valencia': 0.809101, 'ibmq_ourense': 0.802380,"ibmqx2": 0.627392, 'ibmq_santiago': 0.919399, 'ibmq_vigo': 0.908840, 'ibmq_lima':0.771835, 'ideal_device': 1.0} data_directory = "data_files/" elif program_name == "AL2": fidelity_dic = {'ibmq_athens': 0.910145, 'ibmq_valencia': 0.794262, 'ibmq_ourense': 0.818974, "ibmqx2": 0.359528, 'ibmq_santiago': 0.900024, 'ibmq_vigo': 0.841831, 'ibmq_quito': 0.839985, 'ibmq_lima':0.771835, 'ibmq_belem':0.842281,'ideal_device': 1.0} data_directory = "data2_files/" QV_dic = {'ibmq_athens': 32.0, 'ibmq_valencia': 16.0, 'ibmq_ourense': 8.0,"ibmqx2": 8.0, 'ibmq_santiago': 32.0, 'ibmq_vigo': 16.0, 'ideal_device': np.inf, 'ibmq_quito': 16.0, 'ibmq_lima': "Lim",'ibmq_belem':16.0} dev_dic = {'ibmq_santiago': "San",'ibmq_athens': "Ath", 'ibmq_valencia': "Val", 'ibmq_vigo': 'Vig','ibmq_ourense': "Our", "ibmqx2": 'Yor', 'ibmq_quito': "Qui", 'ibmq_lima': "Lim", 'ibmq_belem': "Bel",'ideal_device': "Ide" } # specify the device: here first the ideal noise-free device project_device = 'ideal_device' device_name = dev_dic[project_device] # specify the nb of id gates between state creation and measurements # zero for the ideal device id_gates = 0 str_nb_id = str(id_gates) zfilled = str_nb_id.zfill(4-len(str_nb_id)) # tail of the file names for RAM storage mitig_name = program_name + "_" + device_name project_name = mitig_name + "_" + unique_char + zfilled print(mitig_name) print(project_name) # establish the result label list # meas_calibs will be used for mitigation in the real device section qr = QuantumRegister(used_qubits) meas_calibs, label_list = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal') nb_labels=len(label_list) print(nb_labels,label_list) len(meas_calibs) # permutation list # here it is simple to write down the list, # but a version using itertools will be wellcome for >5 qubits projects if used_qubits == 5: q_perm = [[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 4, 2, 3], [0, 2, 3, 1, 4], [0, 2, 4, 1, 3], [0, 3, 4, 1, 2], [1, 2, 3, 0, 4], [1, 2, 4, 0, 3], [1, 3, 4, 0, 2], [2, 3, 4, 0, 1]] else: print("work in progress - meanwhile please provide the list of permutations") ``` ## Create the quantum states ``` # define the two subsets of 10 separable states if program_name == "QAD": state_1a = ["W","Phi+"] state_1b = ["GHZ","Psi+"] elif program_name == "ALT" or "AL2": state_1a = ["W","Psi+"] state_1b = ["Wbar","Phi+"] l_states = state_1a+state_1b l_states # version 20 circuits for demonstration # (in the version run on real devices: two batches of 10 circuits, "shallow" and "deep") # these circuits limited to state creation are ready to be saved # for ultimately building circuits adapted to noisy simulator and real devices # as option, these circuits will include a row of id gates between creation and measurements circ_ori = [] for i_s in range(0,len(l_states),2): for perm in q_perm: mycircuit = QuantumCircuit(used_qubits, used_qubits) mycircuit = new_cut.circuit_builder(mycircuit, perm, l_states[i_s],l_states[i_s+1]) circ_ori.append(mycircuit) # add measurement section to the circuit set newly created: nb_states = len(circ_ori) circ_ideal = copy.deepcopy(circ_ori) for i_state in range(nb_states): new_cut.add_barrier_and_measure(circ_ideal[i_state],qubit_list) ideal_dic = {} ``` ## Obtain result distributions on noise free simulator ``` # execute on noise free simulator s_sim = 12000 job_simul = execute(circ_ideal, backend=simulator, shots=s_sim) tot_results_simul = job_simul.result() # establish a dictionary of count results on noise free simulator: # (this step is only useful if ram storage is performed) void_counts = dict(zip(label_list, np.zeros(2**used_qubits))) tot_results_sim_dic = {} for i_state in range(nb_states): counts_simul = copy.deepcopy(void_counts) counts_simul.update(tot_results_simul.get_counts(i_state)) ideal_dic[str(i_state)]=counts_simul ``` #markdown for security json_dic_dumper(ideal_dic,"ideal_dic_"+project_name) Example of circuit for separable state of the first type for project 2 : $|W\rangle\otimes|\Psi^+\rangle$ ``` i_state_test = 0 print(device_name, "circuit #",i_state_test) circ_ideal[i_state_test].draw(output='mpl') print(device_name, "circuit #",i_state_test) plot_histogram(ideal_dic[str(i_state_test)], legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ``` Example of circuit for separable state of the second type for project 2 : $|W\rangle^{\otimes X}\otimes|\Phi^+\rangle$ ``` i_state_test = 10 print(device_name, "circuit #",i_state_test) circ_ideal[i_state_test].draw(output='mpl') print(device_name, "circuit #",i_state_test) plot_histogram(ideal_dic[str(i_state_test)], legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ``` ### Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ``` # try loading the dictionary of results if its creation was skipped if len(ideal_dic) == 0: ideal_dic = json_dic_loader("ideal_dic_"+project_name) nb_states = len(ideal_dic) nb_labels = len(list(ideal_dic.values())[0]) s_sim = sum(list(ideal_dic.values())[0].values()) ``` # Real device section ``` from qiskit import IBMQ IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() project_device = 'ibmq_quito' # you may choice here a different backend device_name = dev_dic[project_device] mitig_name = program_name + "_" + device_name print(mitig_name) #determine here the backend device = provider.get_backend(project_device) # the backend names are listed here above properties = device.properties() coupling_map = device.configuration().coupling_map ``` ### Transpile the basic circuits for running on real device In this demo, these are not the circuits which were actually run on real devices (not the same transpiler seed). The optimization level is set to 2 instead of 3 in real experiments, for speed and also because at this moment there is a transpiler error occuring for ibmqx2: 'Maximum iteration reached. max_iteration=1000' ``` id_gates = 0 str_nb_id = str(id_gates) zfilled = str_nb_id.zfill(4-len(str_nb_id)) project_name = mitig_name + "_" + unique_char + zfilled print(project_name) # transpile verbose = True summary_dic = {} seed_transpiler_list = list(range(nb_states)) real_circs = [] start_time = time.strftime('%d/%m/%Y %H:%M:%S') print("Start at DMY: ",start_time) for i_state in list(range(nb_states)): # prepare circuit to be transpiled circuit = copy.deepcopy(circ_ori[i_state]) if id_gates > 0: circuit.barrier() for id_gates_index in range(id_gates): for index, value in enumerate(qubit_list): circuit.id(value) new_cut.add_barrier_and_measure(circuit, qubit_list) summary = [] depth_list = [] Q_state_opt_new = transpile(circuit, backend=device, coupling_map = coupling_map, seed_transpiler=seed_transpiler_list[i_state], optimization_level=2, initial_layout=qubit_list) summary_dic[i_state] = {"depth": Q_state_opt_new.depth(), 'circuit':Q_state_opt_new} real_circs.append(Q_state_opt_new) if verbose: print("circuit %2i" % i_state,"length",summary_dic[i_state]["depth"], "DMY: ",time.strftime('%d/%m/%Y %H:%M:%S')) end_time = time.strftime('%d/%m/%Y %H:%M:%S') print("Completed at DMY: ",end_time) ``` Start at DMY: 23/02/2021 15:27:39 circuit 0 length 54 DMY: 23/02/2021 15:27:39 circuit 1 length 67 DMY: 23/02/2021 15:27:39 circuit 2 length 59 DMY: 23/02/2021 15:27:40 circuit 3 length 57 DMY: 23/02/2021 15:27:40 circuit 4 length 69 DMY: 23/02/2021 15:27:40 circuit 5 length 56 DMY: 23/02/2021 15:27:41 circuit 6 length 54 DMY: 23/02/2021 15:27:41 circuit 7 length 60 DMY: 23/02/2021 15:27:41 circuit 8 length 53 DMY: 23/02/2021 15:27:41 circuit 9 length 58 DMY: 23/02/2021 15:27:41 circuit 10 length 58 DMY: 23/02/2021 15:27:42 circuit 11 length 70 DMY: 23/02/2021 15:27:42 circuit 12 length 60 DMY: 23/02/2021 15:27:42 circuit 13 length 55 DMY: 23/02/2021 15:27:42 circuit 14 length 56 DMY: 23/02/2021 15:27:42 circuit 15 length 53 DMY: 23/02/2021 15:27:43 circuit 16 length 59 DMY: 23/02/2021 15:27:43 circuit 17 length 56 DMY: 23/02/2021 15:27:43 circuit 18 length 73 DMY: 23/02/2021 15:27:43 circuit 19 length 72 DMY: 23/02/2021 15:27:43 Completed at DMY: 23/02/2021 15:27:43 ``` qasm_circuit_dic = {} for i_state in list(range(nb_states)): qasm_circuit_dic[str(i_state)] = real_circs[i_state].qasm() ``` #### unmarkdown this one only for storing new circuit sets! json_dic_dumper(qasm_circuit_dic,"circuit_"+ project_name) ``` circuit_dic = json_dic_loader("circuit_"+ project_name) real_circs = [] for i_state in list(range(nb_states)): real_circs.append(QuantumCircuit().from_qasm_str(circuit_dic[str(i_state)])) i_state_test = 11 print(project_device, "circuit #",i_state_test, "circuit length:",real_circs[i_state_test].depth()) # you may want to skip this if large nb of id gates before measurement real_circs[i_state_test].draw(output='mpl') #check a circuit on noise-free simulator job_simul = execute(real_circs[i_state_test], backend=simulator, shots=s_sim) print(project_device, "circuit #",i_state_test, "on noise free simulator") plot_histogram(job_simul.result().get_counts(), legend=['noise free simulation'], color = "b", figsize=(10.,5.)) from pprint import pprint as pp_pprint for i_state_test in range(20): print(i_state_test, "depth",real_circs[i_state_test].depth(), "size", real_circs[i_state_test].size(), "cx",real_circs[i_state_test].num_nonlocal_gates(), json.dumps(real_circs[i_state_test].count_ops())) pp_pprint(json.dumps(real_circs[2].count_ops())) ```
github_jupyter
Online Convolutional Dictionary Learning with Spatial Mask ========================================================== This example demonstrates the use of [dictlrn.onlinecdl.OnlineConvBPDNMaskDictLearn](http://sporco.rtfd.org/en/latest/modules/sporco.dictlrn.onlinecdl.html#sporco.dictlrn.onlinecdl.OnlineConvBPDNMaskDictLearn) for learning a convolutional dictionary from a set of training images. The dictionary is learned using the online dictionary learning algorithm proposed in [[33]](http://sporco.rtfd.org/en/latest/zreferences.html#id33). ``` from __future__ import print_function from builtins import input import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40 import numpy as np from sporco.dictlrn import onlinecdl from sporco import util from sporco import signal from sporco import cuda from sporco import plot plot.config_notebook_plotting() ``` Load training images. ``` exim = util.ExampleImages(scaled=True, zoom=0.5, gray=True) S1 = exim.image('barbara.png', idxexp=np.s_[10:522, 100:612]) S2 = exim.image('kodim23.png', idxexp=np.s_[:, 60:572]) S3 = exim.image('monarch.png', idxexp=np.s_[:, 160:672]) S4 = exim.image('sail.png', idxexp=np.s_[:, 210:722]) S5 = exim.image('tulips.png', idxexp=np.s_[:, 30:542]) S = np.dstack((S1, S2, S3, S4, S5)) ``` Highpass filter training images. ``` npd = 16 fltlmbd = 5 sl, sh = signal.tikhonov_filter(S, fltlmbd, npd) ``` Create random mask and apply to highpass filtered training image set. ``` np.random.seed(12345) frc = 0.25 W = signal.rndmask(S.shape, frc, dtype=np.float32) shw = W * sh ``` Construct initial dictionary. ``` D0 = np.random.randn(8, 8, 32) ``` Set regularization parameter and options for dictionary learning solver. ``` lmbda = 0.1 opt = onlinecdl.OnlineConvBPDNMaskDictLearn.Options({ 'Verbose': True, 'ZeroMean': False, 'eta_a': 10.0, 'eta_b': 20.0, 'DataType': np.float32, 'CBPDN': {'rho': 3.0, 'AutoRho': {'Enabled': False}, 'RelaxParam': 1.8, 'RelStopTol': 1e-4, 'MaxMainIter': 100, 'FastSolve': False, 'DataType': np.float32}}) if cuda.device_count() > 0: opt['CUDA_CBPDN'] = True ``` Create solver object and solve. ``` d = onlinecdl.OnlineConvBPDNMaskDictLearn(D0, lmbda, opt) iter = 50 d.display_start() for it in range(iter): img_index = np.random.randint(0, sh.shape[-1]) d.solve(shw[..., [img_index]], W[..., [img_index]]) d.display_end() D1 = d.getdict() print("OnlineConvBPDNMaskDictLearn solve time: %.2fs" % d.timer.elapsed('solve')) ``` Display initial and final dictionaries. ``` D1 = D1.squeeze() fig = plot.figure(figsize=(14, 7)) plot.subplot(1, 2, 1) plot.imview(util.tiledict(D0), title='D0', fig=fig) plot.subplot(1, 2, 2) plot.imview(util.tiledict(D1), title='D1', fig=fig) fig.show() ``` Get iterations statistics from solver object and plot functional value. ``` its = d.getitstat() fig = plot.figure(figsize=(7, 7)) plot.plot(np.vstack((its.DeltaD, its.Eta)).T, xlbl='Iterations', lgnd=('Delta D', 'Eta'), fig=fig) fig.show() ```
github_jupyter
## Linear Model to Classify Iris Dataset ``` from sklearn.datasets import load_iris import numpy as np import keras np.random.seed(10) ``` ### Loading the Iris Dataset ``` iris = load_iris() print(iris.keys()) X = iris['data'] # array([[5.1, 3.5, 1.4, 0.2], [4.9, 3. , 1.4, 0.2], ... ]) Y = iris['target'] # array([0, 1, 2, 0, ... ]) names = iris['target_names'] #['setosa', 'versicolor', 'virginica'] feature_names = iris['feature_names'] # ['sepal length (cm)', # 'sepal width (cm)', # 'petal length (cm)', # 'petal width (cm)'] # To Track a few sample points isamples = np.random.randint(len(Y), size = (5)) # array([ 9, 125, 15, 64, 113]) <-- random samples (example) print(X.shape, Y.shape) print(X[isamples]) print(Y[isamples]) ``` ### Categorial One-Hot Encoding ``` # Convert lables to categorial one-hot encoding Ny = len(np.unique(Y)) # Ny = 3 Y = keras.utils.to_categorical(Y[:], num_classes = Ny) # Y is np.ndarray now print("X:", X[isamples, :]) print("Y:", Y[isamples]) ``` ### Train Test Split (randomly into 80% - 20%) ``` from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 1) print(X_train.shape) print(X_test.shape) print(Y_train.shape) print(Y_test.shape) ``` ### Data Normalization: Zero-Mean & Unit-Variance ``` from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) # computes mean and std X_train = scaler.transform(X_train) # x = (x-mean)/std X_test = scaler.transform(X_test) print(X_train) ``` ### Least Square Solution ``` # Training with XW = Y addlcol = lambda A: np.concatenate((A, np.ones((A.shape[0], 1))), axis = 1) Ns, Nx = X_train.shape # 120, 4 # XX = np.concatenate((X_train, np.ones((Ns, 1))), axis = 1) XX = addlcol(X_train) print(XX.shape) YY = Y_train print(YY.shape) W = np.linalg.inv(XX.T.dot(XX)).dot(XX.T.dot(YY)) print(W) def evaluate(X, W, Yd): ''' X is np.array (Nsamples, Nfeats); Yd is np.array (Nsamples, Nonehot) ''' x = addlcol(X) yd = np.argmax(Yd, axis = 1) y = np.argmax(x.dot(W), axis = 1) print("CM:") print(confusion_matrix(yd, y)) evaluate(X_train, W, Y_train) evaluate(X_test, W, Y_test) ``` Since, in Lower-Dimension we are'nt able to separate the Data therefore, we now take the data to Higher Dimension. ``` addSqlcol = lambda A: np.concatenate((A, A**2, np.ones((A.shape[0], 1))), axis = 1) Ns, Nx = X_train.shape # 120, 4 XX = addSqlcol(X_train) print(XX.shape) YY = Y_train print(YY.shape) W = np.linalg.inv(XX.T.dot(XX)).dot(XX.T.dot(YY)) print(W) def evaluate(X, W, Yd): ''' X is np.array (Nsamples, Nfeats); Yd is np.array (Nsamples, Nonehot) ''' x = addSqlcol(X) yd = np.argmax(Yd, axis = 1) y = np.argmax(x.dot(W), axis = 1) print("CM:") print(confusion_matrix(yd, y)) evaluate(X_train, W, Y_train) evaluate(X_test, W, Y_test) ``` Thus we obtained very high accuracy on the IRIS Dataset & also see that Data is mostly Linearly Separable in Higher Dimensional Space. ### Minimum Norm Solution ### For Under-determined System ``` addlcol = lambda A: np.concatenate((A, np.ones((A.shape[0], 1))), axis = 1) ind = np.random.choice(range(X_train.shape[0]), size = 12, replace = False) XX = X_train[ind, :] XX = addlcol(XX) YY = Y_train[ind, :] W = XX.T.dot(np.linalg.inv(XX.dot(XX.T)).T.dot(YY)) # <--- X' (X X')^(-1) Y print(W, XX.shape, YY.shape) def evaluate(X, W, Yd): ''' X is np.array (Nsamples, Nfeats); Yd is np.array (Nsamples, Nonehot) ''' x = addlcol(X) yd = np.argmax(Yd, axis = 1) y = np.argmax(x.dot(W), axis = 1) print("CM:") print(confusion_matrix(yd, y)) evaluate(X_train[ind, :], W, YY) evaluate(X_test, W, Y_test) ``` Since, we did'nt use the entire data in the Minimum Norm Soln $(n=12)$ therefore, we don't get high accuracy. ### For Under-determined System, with Square Input Features too ``` addSqlcol = lambda A: np.concatenate((A, A**2, np.ones((A.shape[0], 1))), axis = 1) ind = np.random.choice(range(X_train.shape[0]), size = 25, replace = False) XX = X_train[ind, :] XX = addSqlcol(XX) YY = Y_train[ind, :] W = XX.T.dot(np.linalg.inv(XX.dot(XX.T)).T.dot(YY)) # <--- X' (X X')^(-1) Y print(W, XX.shape, YY.shape) def evaluate(X, W, Yd): ''' X is np.array (Nsamples, Nfeats); Yd is np.array (Nsamples, Nonehot) ''' x = addSqlcol(X) yd = np.argmax(Yd, axis = 1) y = np.argmax(x.dot(W), axis = 1) print("CM:") print(confusion_matrix(yd, y)) evaluate(X_train[ind, :], W, YY) evaluate(X_test, W, Y_test) ``` Since, we did'nt use the entire data in the Minimum Norm Soln $(n=25)$ therefore, we don't get high accuracy. There isn't any reason to use Minimum Norm Solution (since for IRIS Dataset we have already have large data) Thus Least Square Solution/ Pseudo- Inverse Soln. gives a Higher Accuracy.
github_jupyter
## Make your own Rick Sanchez (bot) with Transformers and DialoGPT fine-tuning ``` from google.colab import drive drive.mount('/content/drive/') !pip -q install transformers ``` Let's move to the desired folder in which we will store all our data. ``` import os os.chdir("/content/drive/My Drive/Colab Notebooks") from transformers import AutoModelWithLMHead, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = AutoModelWithLMHead.from_pretrained("microsoft/DialoGPT-small") # Let's chat for 5 lines for step in range(5): # encode the new user input, add the eos_token and return a tensor in Pytorch new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt') # append the new user input tokens to the chat history bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids # generated a response while limiting the total chat history to 1000 tokens chat_history_ids = model.generate( bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id ) # pretty print last ouput tokens from bot print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))) ``` ## Model initial configuration ``` """ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned using a masked language modeling (MLM) loss. """ import glob import logging import os import pickle import random import re import shutil from typing import Dict, List, Tuple import pandas as pd import numpy as np import torch from sklearn.model_selection import train_test_split from torch.nn.utils.rnn import pad_sequence from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm.notebook import tqdm, trange from pathlib import Path from transformers import ( MODEL_WITH_LM_HEAD_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, get_linear_schedule_with_warmup, ) try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter # Configs logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) # Args to allow for easy convertion of python script to notebook class Args(): def __init__(self): self.output_dir = 'output-small' self.model_type = 'gpt2' self.model_name_or_path = 'microsoft/DialoGPT-small' self.config_name = 'microsoft/DialoGPT-small' self.tokenizer_name = 'microsoft/DialoGPT-small' self.cache_dir = 'cached' self.block_size = 512 self.do_train = True self.do_eval = True self.evaluate_during_training = False self.per_gpu_train_batch_size = 4 self.per_gpu_eval_batch_size = 4 self.gradient_accumulation_steps = 1 self.learning_rate = 5e-5 self.weight_decay = 0.0 self.adam_epsilon = 1e-8 self.max_grad_norm = 1.0 self.num_train_epochs = 10 self.max_steps = -1 self.warmup_steps = 0 self.logging_steps = 1000 self.save_steps = 3500 self.save_total_limit = None self.eval_all_checkpoints = False self.no_cuda = False self.overwrite_output_dir = True self.overwrite_cache = True self.should_continue = False self.seed = 42 self.local_rank = -1 self.fp16 = False self.fp16_opt_level = 'O1' args = Args() ``` ## Prepare Dataset ``` !pip install kaggle !mkdir ~/.kaggle !cp kaggle.json ~/.kaggle/kaggle.json !kaggle datasets download andradaolteanu/rickmorty-scripts -f RickAndMortyScripts.csv %cd Script/ !ls # Let's look at original dataset data = pd.read_csv('RickAndMortyScripts.csv') data.head(10) contexted = [] # context window of size 7 n = 7 for i in data[data.name == CHARACTER_NAME].index: if i < n: continue row = [] prev = i - 1 - n # we additionally substract 1, so row will contain current responce and 7 previous responces for j in range(i, prev, -1): row.append(data.line[j]) contexted.append(row) columns = ['response', 'context'] columns = columns + ['context/' + str(i) for i in range(n - 1)] df = pd.DataFrame.from_records(contexted, columns=columns) len(contexted) columns = ['response', 'context'] columns = columns + ['context/'+str(i) for i in range(n-1)] columns df = pd.DataFrame.from_records(contexted, columns=columns) df.head(5) trn_df, val_df = train_test_split(df, test_size = 0.1) trn_df.head() def construct_conv(row, tokenizer, eos = True): flatten = lambda l: [item for sublist in l for item in sublist] conv = list(reversed([tokenizer.encode(x) + [tokenizer.eos_token_id] for x in row])) conv = flatten(conv) return conv class ConversationDataset(Dataset): def __init__(self, tokenizer: PreTrainedTokenizer, args, df, block_size=512): block_size = block_size - (tokenizer.model_max_length - tokenizer.max_len_single_sentence) directory = args.cache_dir cached_features_file = os.path.join( directory, args.model_type + "_cached_lm_" + str(block_size) ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) with open(cached_features_file, "rb") as handle: self.examples = pickle.load(handle) else: logger.info("Creating features from dataset file at %s", directory) self.examples = [] for _, row in df.iterrows(): conv = construct_conv(row, tokenizer) self.examples.append(conv) logger.info("Saving features into cached file %s", cached_features_file) with open(cached_features_file, "wb") as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) def __len__(self): return len(self.examples) def __getitem__(self, item): return torch.tensor(self.examples[item], dtype=torch.long) # Cacheing and storing of data/checkpoints def load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=False): return ConversationDataset(tokenizer, args, df_val if evaluate else df_trn) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix))) for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path) if regex_match and regex_match.groups(): ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] return checkpoints_sorted def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None: if not args.save_total_limit: return if args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime) if len(checkpoints_sorted) <= args.save_total_limit: return number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint)) shutil.rmtree(checkpoint) ``` ## Training and Evaluating ``` def train(args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer) -> Tuple[int, float]: """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) def collate(examples: List[torch.Tensor]): if tokenizer._pad_token is None: return pad_sequence(examples, batch_first=True) return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate, drop_last = True ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs model = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training model.resize_token_embeddings(len(tokenizer)) # add_special_tokens_(model, tokenizer) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if ( args.model_name_or_path and os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt")) ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if args.model_name_or_path and os.path.exists(args.model_name_or_path): try: # set global_step to gobal_step of last saved checkpoint from model path checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) set_seed(args) # Added here for reproducibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue inputs, labels = (batch, batch) if inputs.shape[1] > 1024: continue inputs = inputs.to(args.device) labels = labels.to(args.device) model.train() outputs = model(inputs, labels=labels) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: checkpoint_prefix = "checkpoint" # Save model checkpoint output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step)) os.makedirs(output_dir, exist_ok=True) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) _rotate_checkpoints(args, checkpoint_prefix) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step # Evaluation of some model def evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, df_trn, df_val, prefix="") -> Dict: # Loop to handle MNLI double evaluation (matched, mis-matched) eval_output_dir = args.output_dir eval_dataset = load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=True) os.makedirs(eval_output_dir, exist_ok=True) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly def collate(examples: List[torch.Tensor]): if tokenizer._pad_token is None: return pad_sequence(examples, batch_first=True) return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id) eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate, drop_last = True ) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): inputs, labels = (batch, batch) inputs = inputs.to(args.device) labels = labels.to(args.device) with torch.no_grad(): outputs = model(inputs, labels=labels) lm_loss = outputs[0] eval_loss += lm_loss.mean().item() nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps perplexity = torch.exp(torch.tensor(eval_loss)) result = {"perplexity": perplexity} output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result # Main runner def main(df_trn, df_val): args = Args() if args.should_continue: sorted_checkpoints = _sorted_checkpoints(args) if len(sorted_checkpoints) == 0: raise ValueError("Used --should_continue but no checkpoint was found in --output_dir.") else: args.model_name_or_path = sorted_checkpoints[-1] if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir and not args.should_continue ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup CUDA, GPU & distributed training device = torch.device("cuda") args.n_gpu = torch.cuda.device_count() args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir) tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir) model = AutoModelWithLMHead.from_pretrained( args.model_name_or_path, from_tf=False, config=config, cache_dir=args.cache_dir, ) model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained() if args.do_train: # Create output directory if needed os.makedirs(args.output_dir, exist_ok=True) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = AutoModelWithLMHead.from_pretrained(args.output_dir) tokenizer = AutoTokenizer.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = AutoModelWithLMHead.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, df_trn, df_val, prefix=prefix) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results main(trn_df, val_df) ``` ## Chatting with the bot ``` tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small') model = AutoModelWithLMHead.from_pretrained('output-small') # Let's chat for 5 lines for step in range(5): # encode the new user input, add the eos_token and return a tensor in Pytorch new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt') # print(new_user_input_ids) # append the new user input tokens to the chat history bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids # generated a response while limiting the total chat history to 1000 tokens, chat_history_ids = model.generate( bot_input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=3, do_sample=True, top_k=100, top_p=0.7, temperature = 0.8 ) # pretty print last ouput tokens from bot print("RickBot: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))) ```
github_jupyter
``` import cv2 import os from matplotlib import pyplot as plt import numpy as np from skimage import morphology, filters, measure, restoration from skimage.io import imread def custom_imread(path): a = plt.imread(path) # a = cv2.cvtColor(a, cv2.COLOR_BGR2GRAY) return a ``` ### Important definitions ``` # Path to cage pics (depth not currently used) cagePath = os.path.join(os.path.expanduser('~'),'Downloads','samples','subtract_data','no_mouse','pic') # Path to mouse images (pics and depth and normalized) mousePath = os.path.join(os.path.expanduser('~'),'Downloads','samples','subtract_data','mouse') # Frame number to use FRAME_CAGE = 1 FRAME_MOUSE = 1329 ``` Get the images ``` cageIm = [] mouseIm = [] mouseDepth = [] mouseNorm = [] for stream in range(4): file = 's_' + str(stream+1) + '_f_' + str(FRAME_MOUSE) + '.png' mouseIm.append(custom_imread(mousePath+'/pic_'+file)) mouseNorm.append(custom_imread(mousePath+'/depth_normalized_' + file)) mouseDepth.append(custom_imread(mousePath+'/depth_' + file)) file = 'pic_s_' + str(stream+1) + '_f_' + str(FRAME_CAGE) + '.png' cageIm.append(custom_imread(cagePath+'/'+file)) def plot_helper(ax, im, title=None, cmap='gray'): if title: ax.set_title(title) ax.imshow(im, cmap=cmap) def filter_subt(im): # im = restoration.denoise_nl_means(im, patch_size=9, h=0.3, patch_distance=7, multichannel=False) im = morphology.opening(im, np.ones((3,3))) return im def filter_mask(im): im = morphology.closing(im, np.ones((3,3))) return im masks = [] f,axes = plt.subplots(ncols=5, nrows=len(mouseIm), figsize=(20,20)) for i,im in enumerate(mouseIm): plot_helper(axes[i][0], im, 'original') # For convenience, compare filtered and not filtered. for j,do_filter in enumerate([False, True]): subt = np.abs(im - cageIm[i]) if do_filter: subt = filter_subt(subt) otsu = filters.threshold_otsu(subt) # A very low threshold means there's no significant bright spots, i.e. no mouse if otsu > 0.01: mask = subt > otsu else: mask = subt * 0. # Only want the mask of the filtered if do_filter: masks.append(mask) plot_helper(axes[i][j*2+1], subt, 'subtracted\n(filter = {})'.format(do_filter)) plot_helper(axes[i][j*2+2], mask, 'mask\n(filter = {})'.format(do_filter)) plt.show() f,axes = plt.subplots(ncols=3, nrows=len(mouseIm), figsize=(20,20)) for i in range(len(mouseDepth)): axes[i][0].imshow(mouseNorm[i], cmap = 'gray') axes[i][1].imshow(masks[i], cmap = 'gray') masked = mouseNorm[i] * masks[i] axes[i][2].imshow(masked, cmap = 'gray') plt.imsave(mousePath + '/../out/depth_s_' + str(i+1) + '_f_' + str(frame) + '.png', masked, cmap = 'gray') plt.imsave(mousePath + '/../out/depth_normalized_s_' + str(i+1) + '_f_' + str(frame) + '.png', masked, cmap = 'gray', vmin=0, vmax=1) plt.show() ```
github_jupyter
... ***CURRENTLY UNDER DEVELOPMENT*** ... ## Obtain Intraseasonal Weather Types (IWT, at daily scale) following the methodology explained in Anderson et al. (2019) inputs required: * Daily values of Madden-Julian Oscillation (MJO) parameters (rmm1, rmm2, phase, mjo) in this notebook: * Obtain MJO categories (25) based on rmm1, rmm2, and phase * Fit the autoregressive logistic model with a markov order 3 and seasonality * Time-series simulation of *n* simulations of 1000 years of the 25 categories * Randomly obtain pairs of rmm1, rmm2 and phase from the simulated time-series ### Workflow: <div> <img src="resources/nb01_04.png" width="300px"> </div> Intra-seasonal Weather Types (**IWTs**) are representative of the **Madden-Julian Oscillation (MJO)**, which is a broad region of anomalous atmospheric circulation and convective precipitation anomalies that propagates eastward around the equator on one to two-month timescales (Madden & Julian, 1972) and exhibits correlations with relevant coastal climatology such as tropical cyclone genesis (Slade & Malony, 2013) and surface wind wave anomalies (Marshal et al., 2015, Godoi et al., 2019) ``` #!/usr/bin/env python # -*- coding: utf-8 -*- # common import os import os.path as op from datetime import date, timedelta, datetime # pip import numpy as np import xarray as xr # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..')) # teslakit from teslakit.database import Database from teslakit.mjo import MJO_Categories, MJO_Phases from teslakit.alr import ALR_WRP from teslakit.util.operations import GetRepeatedValues from teslakit.plotting.mjo import Plot_MJO_phases, Plot_MJO_Categories ``` ## Database and Site parameters ``` # -------------------------------------- # Teslakit database p_data = r'/Users/nico/Projects/TESLA-kit/TeslaKit/data' db = Database(p_data) # set site db.SetSite('ROI') # -------------------------------------- # load data and set parameters MJO_hist = db.Load_MJO_hist() # historical MJO # MJO ALR parameters alr_markov_order = 3 alr_seasonality = [2, 4, 8] # Simulation num_sims = 100 d1_sim = np.datetime64('2000-01-01').astype(datetime) d2_sim = np.datetime64('3000-01-01').astype(datetime) ``` ## MJO phases and categories It is common practice in the MJO literature to separate the longitudinal location of the center of convection into eight longitudinal phases (Wheeler & Hendon, 2004). This convention was preserved in a daily index in this study, intended to be a proxy for intra-seasonal MJO oscillations by clustering the two leading PCs (henceforth referred to as IPC1 and IPC2) of outgoing longwave radiation into eight pre-defined longitudinal phases and further separating into three categories of low, medium, and high convection strength (analogous to conventions in Lafleur et al., (2015)) (Figure 4). A separate cluster was created for times when the location of the MJO is considered to have low certainty (when the vector magnitude of PC1 and PC2 is less than 1 (Wheeler & Hendon, 2004)). Altogether, the 25 clusters of Intra-seasonal Weather Types (IWTs) effectively create categorical MJO. ``` # -------------------------------------- # Calculate MJO categories (25 used) rmm1 = MJO_hist['rmm1'] rmm2 = MJO_hist['rmm2'] phase = MJO_hist['phase'] categ, d_rmm_categ = MJO_Categories(rmm1, rmm2, phase) MJO_hist['categ'] = (('time',), categ) print(MJO_hist) # plot MJO phases Plot_MJO_phases(rmm1, rmm2, phase); # plot MJO categories Plot_MJO_Categories(rmm1, rmm2, categ); ``` ## Autoregressive Logistic Regression Synthetic time series of the MJO are obtained with a Markov chain of the predefined IWT categorical states (statistically significant to the third order) and subsequent sampling from joint distributions of IPC1 and IPC2 within each cluster. When consecutive days in the synthetic record are sampled from the same categorical state, the randomly picked EOF pairs are ordered to preserve counterclockwise propagation of the MJO around the globe in a consistent direction. ``` # -------------------------------------- # Autoregressive Logistic Regression - fit model # MJO historical data for fitting bmus_fit = xr.Dataset( { 'bmus' :(('time',), MJO_hist.categ.values[:]), }, {'time' : MJO_hist.time.values[:]} ) # ALR terms d_terms_settings = { 'mk_order' : alr_markov_order, 'constant' : True, 'seasonality': (True, alr_seasonality), } # ALR wrapper ALRW = ALR_WRP(db.paths.site.MJO.alrw) ALRW.SetFitData(25, bmus_fit, d_terms_settings) # ALR model fitting ALRW.FitModel(max_iter=10000) # show fit report ALRW.Report_Fit() # -------------------------------------- # Autoregressive Logistic Regression - simulate # simulation dates dates_sim = [d1_sim + timedelta(days=i) for i in range((d2_sim-d1_sim).days+1)] # launch simulation ALR_sim = ALRW.Simulate(num_sims, dates_sim) # -------------------------------------- # MJO rmm1, rmm2, phase generation # solve each ALR simulation l_MJO_sim = [] for s in ALR_sim.n_sim: evbmus_sim = ALR_sim.sel(n_sim=s).evbmus_sims.values[:] # Generate rmm1 and rmm2 simulated values rmm12_sim = np.empty((len(evbmus_sim), 2)) * np.nan mjo_sim = np.empty(len(evbmus_sim)) * np.nan phase_sim = np.empty(len(evbmus_sim)) * np.nan categs = np.unique(evbmus_sim) for c in categs: c_ix = np.where(evbmus_sim==c)[0] # select random values for rmm1, rmm2 options = d_rmm_categ['cat_{0}'.format(int(c))] r = np.random.randint(options.shape[0], size=len(c_ix)) rmm12_sim[c_ix,:] = options[r,:] # calculate mjo and phase mjo_sim = np.sqrt(rmm12_sim[:,0]**2 + rmm12_sim[:,1]**2) phase_sim, degrees_sim = MJO_Phases(rmm12_sim[:,0], rmm12_sim[:,1]) # internally reorder days with same category (counter-clockwise phase ascend) l_ad = GetRepeatedValues(evbmus_sim) for s,e in l_ad: # get sort index by MJO phase value ixs = np.argsort(degrees_sim[s:e]) # sort mjo rmm12_sim[s:e,0] = rmm12_sim[s:e,0][ixs] rmm12_sim[s:e,1] = rmm12_sim[s:e,1][ixs] mjo_sim[s:e] = mjo_sim[s:e][ixs] phase_sim[s:e] = phase_sim[s:e][ixs] # append simulated PCs l_MJO_sim.append( xr.Dataset( { 'mjo' :(('time',), mjo_sim), 'phase' :(('time',), phase_sim), 'rmm1' :(('time',), rmm12_sim[:,0]), 'rmm2' :(('time',), rmm12_sim[:,1]), 'evbmus_sims' :(('time',), evbmus_sim), }, {'time' : dates_sim} ) ) # concatenate simulations MJO_sim = xr.concat(l_MJO_sim, 'n_sim') # store simulated MJO db.Save_MJO_sim(MJO_sim) ``` ## Validation Synthetic and historical MJO categories comparison: - Perpetual Year - Cluster Transition Probabilities ``` # show simulation report ALRW.Report_Sim(); ```
github_jupyter
# L'ensemble de Mandelbrot **TODO** * dans la definition, ajouter le developpement sur une dizaine d'itérations de 2 ou 3 points comme exemple illustratif du calcul (ecrire z_i ou |z_i| ou les 2 ?) * dans la definition, ajouter une representation graphique (code source caché) pour un niveau d'itération donné (ex. 50) pour avoir un exemple binaire, plus coherent avec la definition: soit un point est dans l'ensemble, soit il ne l'est pas * tweet "Faire des maths (et du Python) en s'amusant: l'ensemble de Mandelbrot" * à la fin du document, ajouter une section exploration ou on incite le lecteur a explorer en zoomant sur la representation graphique, en donnant des exemples illustrés et en rappelant la propriete autoreplicative a toute echelle des fractales (ne pas oublier d'introduire en rappelant que l'ens de Mandelbrot est une fractale...) ``` %matplotlib inline import matplotlib matplotlib.rcParams['figure.figsize'] = (10, 10) ``` ## Définition Soit la suite $\{z_i\}$ de nombres complexes définie par $$ z_{i+1} = z^2_i + c $$ avec $z_0 = 0$ et avec $c \in \mathbb C$ une constante fixée. L'ensemble de Mandelbrot est l'ensemble de tous les nombres $c$ pour lesquels cette suite converge ; la suite tend vers l'infini pour les nombres $c$ n'appartenant pas à l'ensemble de Mandelbrot (i.e. $\lim_{i \to +\infty}{|z_i|} = +\infty$ où $|z_i|$ est le module de $z_i$). Ci-dessous, l'ensemble de Mandelbrot est représenté graphiquement dans le plan complexe. Référence: *Toutes les mathématiques et les bases de l'informatique*, H. Stöcker, Dunod, p.696 ``` import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d from matplotlib import cm EPSILON_MAX = 2. NUM_IT_MAX = 64 Z_INIT = complex(0, 0) def mandelbrot_version1(x, y): it = 0 z = Z_INIT c = complex(x, y) # Rem: abs(z) = |z| = math.sqrt(pow(z.imag,2) + pow(z.real,2)) while it < NUM_IT_MAX and abs(z) <= EPSILON_MAX: z = z**2 + c it += 1 return 1 if it == NUM_IT_MAX else 0 REAL_RANGE = np.linspace(-2.0, 1.0, 800).tolist() IMAG_RANGE = np.linspace(-1.2, 1.2, 800).tolist() # Définie un ensemble de points c et vérifie leur appartenance à l'ensemble de Mandelbrot xgrid, ygrid = np.meshgrid(REAL_RANGE, IMAG_RANGE) data = np.array([mandelbrot_version1(x, y) for y in IMAG_RANGE for x in REAL_RANGE]).reshape(len(IMAG_RANGE), len(REAL_RANGE)) # Génère l'image # (cmap alternatifs: summer, magma, gist_gray, gist_yarg, gist_heat, Blues, coolwarm, copper) fig, ax = plt.subplots() ax.imshow(data, extent=[xgrid.min(), xgrid.max(), ygrid.min(), ygrid.max()], interpolation="none", cmap=cm.gray_r) ax.set_axis_off() # Ajoute un titre à l'image et nome les axes ax.set_title("Ensemble de Mandelbrot") plt.show() ``` ## Une implémentation Python Note: ce script Python peut également être téléchargé [ici](https://raw.githubusercontent.com/jeremiedecock/snippets/master/python/matplotlib/mandelbrot/mandelbrot.py). Commençons par importer les paquets requis : ``` import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d from matplotlib import cm ``` Puis définissons l'ensemble de Mandelbrot par itérations successives : ``` EPSILON_MAX = 2. NUM_IT_MAX = 32 Z_INIT = complex(0, 0) def mandelbrot_version1(x, y): it = 0 z = Z_INIT c = complex(x, y) # Rem: abs(z) = |z| = math.sqrt(pow(z.imag,2) + pow(z.real,2)) while it < NUM_IT_MAX and abs(z) <= EPSILON_MAX: z = z**2 + c it += 1 return 1 if it == NUM_IT_MAX else 0 def mandelbrot_version2(x, y): it = 0 z = Z_INIT c = complex(x, y) # Rem: abs(z) = |z| = math.sqrt(pow(z.imag,2) + pow(z.real,2)) while it < NUM_IT_MAX and abs(z) <= EPSILON_MAX: z = z**2 + c it += 1 return it ``` `mandelbrot_version1` définie l'ensemble de Mandelbrot ; `mandelbrot_version2` est une fonction alternative qui permet de voir à quelle vitesse la suite diverge (la fonction retroune une valeur d'autant plus petite que le nombre complexe $c = x + yi$ fait diverger la suite rapidement). Nous pouvons maintenant représenter graphiquement l'ensemble de Mandelbrot dans le plan complexe (plus la suite diverge vite plus le point image du nombre complexe $c=x+yi$ est claire) : ``` REAL_RANGE = np.linspace(-2.0, 1.0, 800).tolist() IMAG_RANGE = np.linspace(-1.2, 1.2, 800).tolist() # Définie un ensemble de points c et vérifie leur appartenance à l'ensemble de Mandelbrot xgrid, ygrid = np.meshgrid(REAL_RANGE, IMAG_RANGE) data = np.array([mandelbrot_version2(x, y) for y in IMAG_RANGE for x in REAL_RANGE]).reshape(len(IMAG_RANGE), len(REAL_RANGE)) # Génère l'image fig, ax = plt.subplots() ax.imshow(data, extent=[xgrid.min(), xgrid.max(), ygrid.min(), ygrid.max()], interpolation="bicubic", cmap=cm.Blues) # Ajoute un titre à l'image et nome les axes ax.set_title("Ensemble de Mandelbrot") ax.set_xlabel("Re(c)") ax.set_ylabel("Im(c)") plt.show() ``` Nous pouvons aussi représenter cet ensemble en 3 dimensions pour mieux mettre en évidence l'aspect itératif du processus de construction de l'ensemble de Mandelbrot. ``` REAL_RANGE = np.arange(-2.0, 1.0, 0.05).tolist() IMAG_RANGE = np.arange(-1.2, 1.2, 0.05).tolist() # Définie un ensemble de points c et vérifie leur appartenance à l'ensemble de Mandelbrot xgrid, ygrid = np.meshgrid(REAL_RANGE, IMAG_RANGE) data = np.array([mandelbrot_version2(x, y) for y in IMAG_RANGE for x in REAL_RANGE]).reshape(len(IMAG_RANGE), len(REAL_RANGE)) # Génère la figure fig = plt.figure() ax = axes3d.Axes3D(fig) ax.plot_surface(xgrid, ygrid, data, cmap=cm.jet, rstride=1, cstride=1, color='b', shade=True) # Ajoute un titre à l'image et nome les axes plt.title("Ensemble de Mandelbrot") ax.set_xlabel("Re(c)") ax.set_ylabel("Im(c)") ax.set_zlabel("Itérations") plt.show() ```
github_jupyter
``` # load image from func.utils import get_model_output_id_wnid_class_dict # get mapping: format: {"Model Ouput ID": ["WNID", "Class"]} from func.utils import get_imagenet_id_wnid_class_dict # get mapping: format: {"ImageNet ID": ["WNID", "class"]}, e.g. {..."233": ['n02106382', 'Bouvier_des_Flandres'], ...} from func.utils import map_model_id_to_imagenet_id, map_imagenet_id_to_model_id # mapping funcs from func.utils import save_obj, load_obj from func.saliency_maps import conduct_saliency_map_method, GuidedBackprop, VanillaBackprop, SmoothGrad, GradCAM, GuidedGradCAM, IntegratedGradients, GradientxInput from func.responsible_regions import load_responsible_regions_from_given_path, X_y_preparation, process_cat_saliency_map from func.concept_classifier import get_linear_classifier, get_xgb_classifier, prediction from func.show import load_feature_saliency_map_and_resized_img_for_show, show_concept_region_on_img # from func.receptive_field import receptive_field, get_rfs_from_a_mask, show_rf_in_org_img, show_some_rfs_randomly, show_some_rfs_order, get_rfs_from_a_mask_with_order import numpy as np import matplotlib.pyplot as plt import copy import os import gc import torch from torchvision import models os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="0" # get the dict of ImageNet ID, WNID and class name # format: {"ImageNet ID": ["WNID", "class"]}, e.g. {..."233": ['n02106382', 'Bouvier_des_Flandres'], ...} imagenet_id_label=get_imagenet_id_wnid_class_dict(matfilepath = "imagenet_info/ILSVRC2012_meta.mat") # get the dict of model output ID, WNID and class name # format: {"Model Ouput ID": ["WNID", "Class"]} modeloutput_id_label=get_model_output_id_wnid_class_dict(jsonfilepath = "imagenet_info/imagenet_label_index.json") # get dict map model output ID to ImageNet ID map_dict_model2imagenet=map_model_id_to_imagenet_id(imagenet_id_label, modeloutput_id_label) # get ImageNet ID to dict map model output ID map_dict_imagenet2model=map_imagenet_id_to_model_id(imagenet_id_label, modeloutput_id_label) ``` ### show imagenet classes ``` # only 1~1000 is valid for idx in imagenet_id_label: print(str(idx)+": "+str(imagenet_id_label[idx])) ``` ### imagenet parent and child ``` imagenet_class_parent_and_child_dict = load_obj("imagenet_info/imagenet_class_dict") # e.g. imagenet_class_parent_and_child_dict[1095] ``` ### Responisble regions ``` # set the path to load feature maps and saliency maps of a model feature_maps_and_saliency_maps_path = "output/feature_maps_and_saliency_maps_vgg19" # set ID_groups and layers ID_groups = [[1082,1095,1845]] layers = ["features.30"] # set the path to save responisble regions responisble_regions_save_path = "output/responisble_regions" if not os.path.exists(responisble_regions_save_path): os.mkdir(responisble_regions_save_path) IDs_str = [] for ID_group in ID_groups: IDs_str.append(str(ID_group)) IDs_str = np.array(IDs_str) for ID_group in ID_groups: for ID in ID_group: print(ID, imagenet_class_parent_and_child_dict[ID]['words'], imagenet_class_parent_and_child_dict[ID]['gloss']) for idx_layer, layer in enumerate(layers): chosen_layer = layer save_layer_path = os.path.join(responisble_regions_save_path, chosen_layer) if not os.path.exists(save_layer_path): os.mkdir(save_layer_path) X_dict = {} for ID_group in ID_groups: for idx, ID in enumerate(ID_group): print("loading data", chosen_layer, ID, imagenet_class_parent_and_child_dict[ID]['words'], imagenet_class_parent_and_child_dict[ID]['gloss']) if ID not in X_dict.keys(): X_pos_current, X_neg_current = load_responsible_regions_from_given_path(os.path.join(feature_maps_and_saliency_maps_path, "result_of_ID_"+str(ID)), layer = chosen_layer, pic_size = 14) X_dict[ID] = {} X_dict[ID]["foreground"] = X_pos_current X_dict[ID]["background"] = X_neg_current for idx_ID, ID_group in enumerate(ID_groups): which_ID_group = idx_ID save_layer_ID_group_path = os.path.join(save_layer_path, IDs_str[which_ID_group]) if not os.path.exists(save_layer_ID_group_path): os.mkdir(save_layer_ID_group_path) flag = True idx = 0 print("\nFor "+save_layer_ID_group_path+"/X_y.npz, the labels: ",end="\n\n") for position_in_group, ID in enumerate(ID_group): X_pos_current = X_dict[ID]['foreground'] X_neg_current = X_dict[ID]['background'] print(str(idx)+" presents concept "+str(ID)+" which is "+str(imagenet_class_parent_and_child_dict[ID]), end="\n\n") if flag: X = X_pos_current y = np.ones(X_pos_current.shape[0]) * idx X_neg = X_neg_current flag = False idx+=1 else: X = np.concatenate((X, X_pos_current)) y = np.concatenate((y, np.ones(X_pos_current.shape[0]) * idx)) X_neg = np.concatenate((X_neg, X_neg_current)) idx+=1 print(str(idx)+" presents background info") X_neg = X_neg[np.random.randint(X_neg.shape[0], size=int(X.shape[0]/len(ID_group))),:] X = np.concatenate((X, X_neg)) y = np.concatenate((y, np.ones(X_neg.shape[0]) * (idx))) np.savez_compressed(save_layer_ID_group_path+"/X_y.npz", X=X, y=y) ``` ### Concept classifier and Shap values ``` chosen_layer = str(layers[0]) print("chosen layer: "+chosen_layer) chosen_ID_group = str(ID_groups[0]) print("chosen_ID_group: "+chosen_ID_group) X_y = np.load(os.path.join(responisble_regions_save_path, chosen_layer, chosen_ID_group, "X_y.npz")) target_obj_loc = 1 y = copy.deepcopy(X_y["y"]) print("target is "+str(chosen_ID_group.split(",")[target_obj_loc])+" | "+imagenet_class_parent_and_child_dict[int(chosen_ID_group.split(",")[target_obj_loc])]['words']) print("we will train a concept classifier to distinguish the target from the others in "+str(chosen_ID_group)) y[y!=target_obj_loc] = -1 y[y==target_obj_loc] = 1 y[y!=1] = 0 # train the concept classifier coef, clf, Validation_score = get_linear_classifier(X_y["X"], y, classifier = "SGD") bst = get_xgb_classifier(X_y["X"],y) bst.set_param({"predictor": "gpu_predictor"}) explainer = shap.TreeExplainer(bst) shap_values = explainer.shap_values(X_y["X"]) importance_arr_mean = [] importance_arr_median = [] importance_arr_max = [] importance_arr_min = [] for i in range(shap_values.shape[1]): shap_vals_of_one_channel = np.abs(shap_values[:,i]) #xgb_shap_values[:,i] importance_arr_mean.append(np.mean(shap_vals_of_one_channel)) importance_arr_median.append(np.median(shap_vals_of_one_channel)) importance_arr_max.append(np.max(shap_vals_of_one_channel)) importance_arr_min.append(np.min(shap_vals_of_one_channel)) importance_arr_mean = np.array(importance_arr_mean) importance_arr_median = np.array(importance_arr_median) importance_arr_max = np.array(importance_arr_max) importance_arr_min = np.array(importance_arr_min) # plot shap vals sort_locs = np.argsort(importance_arr_mean)[::-1] sort_locs_str = [] for i in sort_locs: sort_locs_str.append(str(i)) N_subplot = 7 len_of_one_sub = int(len(sort_locs)/N_subplot+1) sort_locs_sub = [] sort_locs_str_sub = [] for i in range(N_subplot): start_loc = np.clip(i*len_of_one_sub,0,len(sort_locs)) end_loc = np.clip((i+1)*len_of_one_sub,0,len(sort_locs)) sort_locs_sub.append(sort_locs[start_loc:end_loc]) sort_locs_str_sub.append(sort_locs_str[start_loc:end_loc]) fig, axs = plt.subplots(1,N_subplot,figsize=(20,20)) for i in range(N_subplot): axs[i].barh(sort_locs_str_sub[i][::-1],importance_arr_mean[sort_locs_sub[i]][::-1]) axs[i].set_xlim(0, np.max(importance_arr_mean)+0.05) axs[i].set_xlabel('Shapley') # plt.savefig("shap_bar_plot.svg", # bbox_inches='tight', dpi=100, pad_inches=0.0) ``` ### Show the results of concept classifier ``` ID_for_show = 1095 print(ID_for_show, imagenet_class_parent_and_child_dict[ID_for_show]['words']) resized_img_for_show_cat, feature_map_for_show_cat, saliency_map_for_show_cat = \ load_feature_saliency_map_and_resized_img_for_show(feature_maps_and_saliency_maps_path+"/result_of_ID_"+str(ID_for_show), chosen_layer) resized_img_for_show_cat = np.array(resized_img_for_show_cat, dtype=int) grayscale_saliency_map = process_cat_saliency_map(saliency_map_for_show_cat, num_of_pic_of_a_row = 10, mode="norm") concept_map = prediction(clf, feature_map_for_show_cat, is_predict_proba = True) show_concept_region_on_img(resized_img_for_show_cat, concept_map[:,:,1]) ```
github_jupyter
# database_tools: Set of tools to connect to the data base, put and get data from them. ``` import psycopg2 from psycopg2 import sql import pandas as pd from datetime import datetime import numpy as np from ifis_tools import auxiliar as aux def DataBaseConnect(user = "iihr_student", password = "iihr.student", host = "s-iihr51.iihr.uiowa.edu", port = "5435", database = "research_environment"): '''Connect to the database that hsa stored the usgs information''' con = psycopg2.connect(user = user, password = password, host = host, port = port, database = database) return con def SQL_read_USGS_Streamflow(usgs_id, date1, date2, schema = 'pers_nico', table = 'data_usgs', time_name = 'unix_time', data_name = 'val', usgs_name = 'usgs_id'): '''Read streamflow data from IIHR database "research_environment" and returns it as a pandas.DataFrame element. Parameters: - usgs_id: code of the usgs. - date1: initial date of the query. - date2: final date of the query. Optional: - schema: where to obtain data in the databse. - table: master table with the usgs data. - time_name: the name of the column that has the time. - data_name: the name of the column that has the data. - usgs_name: the name of the column that has the id of the usgs stations. Returns: - pandas.DataFrame containing the streamflow data.''' #make the connection con = DataBaseConnect(user = 'nicolas', password = '10A28Gir0') #Work with dates and usgs id date1 = str(aux.__datetime2unix__(date1)) date2 = str(aux.__datetime2unix__(date2)) if type(usgs_id) is not str: usgs_id = str(usgs_id) #make the querty query = sql.SQL("SELECT "+time_name+", "+data_name+" FROM "+schema+"."+table+" WHERE "+time_name+" BETWEEN "+date1+" and "+date2+" AND "+usgs_name+"='"+usgs_id+"'") #Make the consult. Data = pd.read_sql(query, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}}) con.close() return Data #SQL Query to obtain the data from per_felipe.pois_adv_geom def SQL_USGS_at_IFIS(): '''Return the list of the usgs stations in the IFIS system and the linkID where they belong.''' #make the connection con = DataBaseConnect(user = 'nicolas', password = '10A28Gir0') #Query for the stations query = sql.SQL("SELECT foreign_id,link_id FROM pers_felipe.pois_adv_geom where type in (2,3) and foreign_id like '0%' AND link_id < 620000") #make the consult cur = con.cursor() cur.execute(query) L = cur.fetchall() cur.close() con.close() #Obtains a dictionary in which stations are the key DicUSGSinIFIS = {} for l in L: DicUSGSinIFIS.update({l[0]:l[1]}) return DicUSGSinIFIS def SQL_USGS_at_MATC(): '''Return the list of stations that are in the databse pers_nico (matc).''' #make the connection con = DataBaseConnect(user = 'nicolas', password = '10A28Gir0') #Make the query query = sql.SQL("SELECT DISTINCT(usgs_id) FROM pers_nico.data_usgs_2008") cur = con.cursor() cur.execute(query) L = cur.fetchall() cur.close() con.close() return [l[0] for l in L] def SQL_Get_linkArea(linkID): '''Obtains the up area for a link ID''' #The query and the obtentions con = DataBaseConnect('nicolas','10A28Gir0') cur = con.cursor() q = sql.SQL("SELECT upstream_area FROM pers_felipe.pois_adv_geom WHERE link_id = "+str(linkID)) cur.execute(q) A = cur.fetchall() cur.close() con.close() return A[0][0]*2.583 def SQL_Read_MeanRainfall(link_id, date1, date2, schema = 'pers_nico', table = 's4mrain', time_name = 'unix_time', data_name = 'rain', linkid_name = 'link_id'): '''Read streamflow data from IIHR database "research_environment" and returns it as a pandas.DataFrame element. Parameters: - usgs_id: code of the usgs. - date1: initial date of the query. - date2: final date of the query. Optional: - schema: where to obtain data in the databse. - table: master table with the usgs data. - time_name: the name of the column that has the time. - data_name: the name of the column that has the data. - usgs_name: the name of the column that has the id of the usgs stations. Returns: - pandas.DataFrame containing the streamflow data.''' #make the connection con = DataBaseConnect(user = 'nicolas', password = '10A28Gir0') #Work with dates and usgs id date1 = str(aux.__datetime2unix__(date1)) date2 = str(aux.__datetime2unix__(date2)) if type(link_id) is not str: link_id = str(link_id) #make the querty query = sql.SQL("SELECT "+time_name+", "+data_name+" FROM "+schema+"."+table+" WHERE "+time_name+" BETWEEN "+date1+" and "+date2+" AND "+linkid_name+"='"+link_id+"'") #Make the consult. Data = pd.read_sql(query, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}}) con.close() #Organize rainfall Data = Data.sort_index() Dates = pd.date_range(Data.index[0], Data.index[-1], freq='1h') Rain = pd.Series(np.zeros(Dates.size), Dates) Rain[Data.index] = Data['rain'].values Rain[Rain>1000] = 0.0 return Rain def SQL_Get_MeanRainfall(linkID, date1, date2): '''Obtains the mean rainfall for the watershed associated to a given linkID. Parameters: - linkID: linkID of the outlet of the basin. - date1: initial date (YYYY-MM-DD HH:MM). - date2: end date (YYYY-MM-DD HH:MM). Returns: - Rainfall: Pandas series with the mean rainfall in the basin.''' #SEt the connection con = DataBaseConnect(user='nicolas', password='10A28Gir0', database='rt_precipitation') #Transform dates to unix unix1 = str(aux.__datetime2unix__(date1)) unix2 = str(aux.__datetime2unix__(date2)) linkID = str(linkID) #Set the query and obtains data q = sql.SQL("WITH subbasin AS (SELECT nodeX.link_id AS link_id FROM students.env_master_km AS nodeX, students.env_master_km AS parentX WHERE (nodeX.left BETWEEN parentX.left AND parentX.right) AND parentX.link_id = "+str(linkID)+"), uparea as (SELECT up_area FROM students.env_master_km WHERE link_id= "+str(linkID)+"), lut as (SELECT x, y FROM env_lookup_hrap_lid_v4 WHERE link_id IN (SELECT * FROM subbasin) group by x, y) SELECT unix_time, sum(val)/(SELECT count(*) FROM lut) as rain FROM stage_4.data WHERE grid_x IN (SELECT x FROM lut) AND grid_y IN (SELECT y from lut) AND unix_time between "+unix1+" AND "+unix2+" group by unix_time order by unix_time;") Data = pd.read_sql(q, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}}) #close connection con.close() #Pos process data dates = pd.date_range(date1, date2, freq='1h') Rain = pd.Series(np.zeros(dates.size), dates) Rain[Data.index] = Data['rain'] return Rain ```
github_jupyter
# T11 Example project with tips and tricks <span style='background : yellow' > Hint and tips are provided with yellow background.</span> ## Project : Extract local field potential (LFP) from extracellular recording in the hippocampus You are provided with a raw extracellular recording from the hippocampus of a awake behaving rat. The trace has been recorded with a silicon probe (32 channel) and you will be analyzing the signal from a single channel (the probe delivers in total 32 of such recordings). The signal contains the local field potential (LFP) as well as spiking signals. Since both occur at different frequencies, it is possible to separate them through appropriate signal filtering using spectral analysis. Your task is to extract frequency content of the LFP from the raw trace. The frequency content of the LFP trace can then be further analyzed and be linked to specific behaviors. ### Objective of the project Study the frequencies contained in the LFP signal. For that purpose, calculate the spectrogram showing the frequency content as a function of time. The frequency range of interest ranges from 0 to 100 Hz. Characterize which frequencies are contained in the recording, and whether these signals change over time. Which frequencies show prominent dynamics during the recording period, i.e., which frequencies show dynamics changes in spectral power over time? <span style='background : yellow' > Using the information provided, try to learn as much as possible about the scientific and technical background of the project. * have a in depth look at the publication provided with the project (if any), here : [Gabrielle Girardeau et al. (2017) <i>Nature Neurosciene</i>](https://www.nature.com/articles/nn.4637) * search for project keywords, here : "hippocampus", "silcon probe", "local field potential", "spectral analysis", "spectrogram" * look back at the lecture and turorial which dealt with the technique to be used in the project, here : "Course 6 : Spectral analysis of analog signals" </span> ### Details about the recording The data has been recorded in the lab of [Gabrielle Girardeau](https://girardeaulab.org/). The entire trace is 6.3 hours long with running on track at the beginning, middle and end and homecage inbetween with sleep and quiet wakefulness. The electrode has been placed in the hippocampus in the CA1 pyramidal cell layer. The sampling rate of the recording is 20 kHz (or 20 000 Hz). To avoid issues with handling the large data file, a part of the entire recording was extracted for this project. #### The data Find below the code on how to load the data. <span style='background : yellow' > Try to first understand in detail the structure of the data. What is contained in the data structure and what are the properties of the data. </span> ``` import numpy as np import matplotlib.pyplot as plt # if you use qt (instead of inline what we used in the course) you can zoom in on the data which is useful for large traces %matplotlib qt data = np.load('data-for-project_3.npy') print(np.shape(data)) print(data[0]) print(data[1]) # look in more detail at the time array samplingRate = 1./np.mean(np.diff(data[0])) print('The sampling rate is : ', samplingRate, ' Hz') print('The total duration of the recording is ', data[0,-1], ' s. Or ', data[0,-1]/60 ,' min') # look in more detail at the data array print('max :',np.max(data[1])) print('min :',np.min(data[1])) print('mean : ', np.mean(data[1])) ``` #### Plotting Plot the raw data! <span style='background : yellow' >As always, plotting the data provide immediate and useful insights. Generally, plot data and results helps to develop a better understanding. I would recommend plotting in an external window, since this allows to zoom into the data. This is in particular useful for large data-sets. Don't forget to include figures in the jupyter-notebook for the project presentation and for illustrating the results obtained. * zoom into the data and explore features at many different time points, here : check out the interval [70,90] sec * try to get a first rough estimate about the project question through visual inspection, here : what is the frequency present in the signal * if data-set is too large, plot and inspect smaller sections of the data * figures can be implemented with the code : ![Data Figure](Figure_data.png) </span> ``` plt.plot(data[0],data[1]) # plot the first 1000 entries of the time series plt.xlabel('time (s)') plt.show() ``` #### Analysis <span style='background : yellow' >Using the course material, try to understand the analysis to perform and how to implement it. * Pick the right lecture and read up about the analyis method. Maybe read more extensive explanations on the internet about the analysis, here : [Spectral analysis on Wikipedia, for exmaple](https://en.wikipedia.org/wiki/Spectral_density_estimation) * Use example code snippets from the tutorial to start programming, here : "T06 : Calculating the spectrogram of a signal" * Go through the code and try to understand the code. * You could also start from a code snippet you find on the internet. There are many possible ways to realize similar analysis. The difficulty is to find what suits best your problem. </span> ``` import scipy.signal as sg # constructing the signal ################# fs = 1000 #1 time = np.linspace(0,2,fs*2) #2 y=sg.chirp(time,400,1,0,'linear') #3 # computing the spetrogram ################ windLength = 128 #4 overl = windLength-1 #5 wind=np.hanning(windLength) #6 #wind=np.kaiser(windLength,0) #7 f, tt, Sxx =sg.spectrogram(y,fs,wind,len(wind),overl) #8 # producing the figure ################ fig=plt.figure(figsize=(10,10)) #9 ax0 = plt.subplot(2,1,1) #10 ax0.plot(time,y) #11 ax0.set_xlabel('time (s)') #12 ax0.set_ylabel('signal') #13 ax1 = plt.subplot(2,1,2) #14 im = ax1.pcolormesh(tt,f,Sxx) #15 fig.colorbar(im, ax=ax1) #16 ax1.set_xlabel('time (s)') #17 ax1.set_ylabel('freqeuncy (Hz)') #18 plt.show() #19 ``` <span style='background : yellow' >Adapt the code for the project. In other words, start from the tutorial code and adapt it * Change the values and fill in the project specifics. * It is useful and good practise to separate **computations**: the actual implementation of the analysis and **visualization** : the figure generation. </span> **Computation** ``` import scipy.signal as sg # constructing the signal ################# fs = 20000 # computing the spetrogram ################ windLength = 128 #4 overl = windLength-1 #5 wind=np.hanning(windLength) #6 f, tt, Sxx =sg.spectrogram(data[1],fs,wind,len(wind),overl) #8 #f, tt, Sxx =sg.spectrogram(data[1], fs, window='hanning',nperseg=1024, noverlap=1024 - 100,detrend=False, scaling='spectrum') ``` **Figure generation** ``` # producing the figure ################ fig=plt.figure(figsize=(10,10)) #9 ax1 = plt.subplot(1,1,1) #14 im = ax1.pcolormesh(tt,f,Sxx) #15 fig.colorbar(im, ax=ax1) #16 ax1.set_xlabel('time (s)') #17 ax1.set_ylabel('freqeuncy (Hz)') #18 plt.show() #19 ``` <span style='background : red' >The above code crashes the jupyter-notebook!</span> It also aborts when executing the code in `ipython` for example. There exits probably a memory problem : the execution of the code requires more RAM than available on the computer. <span style='background : yellow' >**Important lesson :** Programming is trial and error. Most first tries don't always provide the desired outcome. Most imporantly, don't panic. This is normal!</span> <span style='background : yellow' >Your tutor might have warned you about possible pitfalls. If not, play around with the code for a while and if no solution is found, reach out the the tutor.</span> #### Alternative method to analyze the data We could split the signal is segments and calculate the frequency content sequentially. We start by chopping up the signal into slices of 1024 samples, each slice overlapping the previous by 100 samples. The resulting slices array contains one slice per row. We found a `scikit-learn` function online to perform that : [Frequency and the Fast Fourier Transform](https://www.oreilly.com/library/view/elegant-scipy/9781491922927/ch04.html) ``` from skimage import util windowLength = 1024 slices = util.view_as_windows(data[1], window_shape=(windowLength,), step=100) print(np.shape(data[1])) print(np.shape(slices)) print(windowLength) nSlices = len(slices) print(nSlices) ``` We have another code snipplet from tutorial 6 where we calculate the spetral content using FFT in numpy. <span style='background : yellow' >In most cases, it is more efficient and instructive to perform the analysis first on a subset of the data. * Optimize the code and visualization until the desired output is generated. Once the output is plausible and correct, move on to apply the analysis to the entire data. </span> ``` hanningWindow = np.hanning(windowLength) fs = 20000 exampleSlice = slices[1]*hanningWindow nyquist = fs/2. #10 fSpaceSignal = np.fft.fft(exampleSlice) #11 fBase = np.linspace(0,nyquist,np.floor(len(exampleSlice)/2)+1) #12 halfTheSignal = fSpaceSignal[:len(fBase)] #13 complexConjugate = np.conj(halfTheSignal) #14 powe = np.abs(halfTheSignal*complexConjugate) #15 plt.plot(fBase,powe/max(powe)) plt.show() print(fBase[:20]) ``` Here in particular, we need to change the resolution of the frequency axis. This can be done by changing the window length. ``` windowLength = 2**16 slices = util.view_as_windows(data[1], window_shape=(windowLength,), step=100) hanningWindow = np.hanning(windowLength) fs = 20000 exampleSlice = slices[1]*hanningWindow nyquist = fs/2. #10 fSpaceSignal = np.fft.fft(exampleSlice) #11 fBase = np.linspace(0,nyquist,np.floor(len(exampleSlice)/2)+1) #12 halfTheSignal = fSpaceSignal[:len(fBase)] #13 complexConjugate = np.conj(halfTheSignal) #14 powe = np.abs(halfTheSignal*complexConjugate) #15 print(fBase) print(powe) ``` #### Further optimize We are interested in the frequency range up to 100 Hz, so lets restrict the output to that. ``` mask = fBase < 100 plt.plot(fBase[mask],powe[mask]/max(powe[mask])) plt.show() ``` The specific unit used for the ratio is the decibel, `20*log10` (amplitude ratio). ``` S = 20 * np.log10(powe[mask] / np.max(powe[mask])) plt.plot(fBase[mask],S) plt.show() ``` #### Extend the analysis to the entire data-set Use the above developped code and apply it to the entire data-set. <span style='background : yellow' >Sometimes, the computation can take minutes or even hours, when the data-set is large or the computations heavy. Be patient. You can try to let the analysis run over night. </span> * Copy all relevant code in one window. That way you avoid surprises of a stray variable which has been initialized with a strange value somewhere else. For the above problem, let's write a loop which calculates spetral power for all slices. ``` import pdb fs = 20000 windowLength = 2**16 import scipy.signal as sg slices = util.view_as_windows(data[1], window_shape=(windowLength,), step=int(windowLength*0.9)) nSlices = len(slices) hanningWindow = np.hanning(windowLength) nyquist = fs/2. mask = fBase < 100 spectra = [] for i in range(nSlices): exampleSlice = slices[i]*hanningWindow #10 fSpaceSignal = np.fft.fft(exampleSlice) #11 fBase = np.linspace(0,nyquist,np.floor(len(exampleSlice)/2)+1) #12 halfTheSignal = fSpaceSignal[:len(fBase)] #13 complexConjugate = np.conj(halfTheSignal) #14 powe = np.abs(halfTheSignal*complexConjugate) #15 #S = 20 * np.log10(powe[mask] / np.max(powe[mask])) #S = powe[mask] / np.max(powe[mask]) spectra.append(powe[mask]) frequencies = fBase[mask] spectrum = np.asarray(spectra) psd = spectrum/np.max(spectrum) np.max(spectrum) ``` #### Figure generation Show your results in figure with captions and labels. <span style='background : yellow' > Producing good representations of results in form of figures takes time and effort. Try to find the visualization which best conveys your result. Certain measures have standard representations (e.g. frequency content is shown in spectrograms, spikes in raster plots, etc.).</span> ``` plt.imshow(psd.T, origin='lower', cmap='viridis', extent=(0, 30*60, 0, 100),aspect='auto',interpolation='nearest') plt.colorbar() plt.xlabel('time (sec)') plt.ylabel('frequency (Hz)') ``` #### Refine and interpret results The specific objective for this project was to find frequency ranges which exhibit dynamics over time. Here, it looks like as if theta power is present only for certain time intervals. Let's extract the mean power in the theta range and display it over time. <span style='background : yellow' >Going an additional step after obtaining the main result provides often great insight, doesn't take much effort and increases the quality/impact of your work.</span> ``` plt.imshow(psd.T, origin='lower', cmap='viridis', extent=(0, 30*60, 0, 100),aspect='auto',interpolation='nearest') plt.axhline(y=5,ls='--',color='black') plt.axhline(y=10,ls='--',color='black') plt.colorbar() plt.xlabel('time (sec)') plt.ylabel('frequency (Hz)') thetaMask = (frequencies>6)&(frequencies<9) print(frequencies[thetaMask]) timeEntries = np.shape(psd.T)[1] time = np.linspace(0,30,timeEntries) plt.plot(time,np.mean((psd.T)[thetaMask],axis=0)) plt.xlabel('time (min)') plt.ylabel('mean theta power (dB)') ``` #### Discussion <span style='background : yellow' >Finish with a disucssion which contains a summary of your results and an interpretation. What does it mean? What would be further steps?</span>
github_jupyter
``` import numpy as np import pandas as pd from IPython.display import display from sklearn.model_selection import train_test_split trf = pd.read_csv('train_features.csv') #train data tef = pd.read_csv('test_features.csv') #test data ttn = pd.read_csv('train_targets_nonscored.csv') #train label template tts = pd.read_csv('train_targets_scored.csv') #train label with values (has a lot of variance) def show_df(df, n=4): display(df.head(n=n)) print("Train Features") show_df(trf) print("Train Targets Scored") show_df(tts) import matplotlib.pyplot as plt %matplotlib inline print(trf.shape) print(trf.sig_id.nunique()) print(trf.cp_type.unique()) print(trf.cp_type.value_counts()) print(trf.cp_time.value_counts()) print(trf.cp_dose.value_counts()) print(tts.sum()[1:].sort_values()) gs = trf[:1][[col for col in trf.columns if 'g-' in col]].values.reshape(-1, 1) plt.plot(gs) plt.plot(sorted(gs)) trf['c-0'].plot(kind='hist') # !pip install iterative-stratification from iterstrat.ml_stratifiers import MultilabelStratifiedKFold as mkfold if __name__ == "__main__": tts.loc[:, 'kfold'] = -1 tts = tts.sample(frac=1).reset_index(drop=True) targets = tts.drop("sig_id", axis=1).values mskf = mkfold(n_splits=5) for fold_, (trn_, val_) in enumerate(mskf.split(X=tts, y=targets)): tts.loc[val_, "kfold"] = fold_ tts.to_csv("train_folds.csv", index=False) import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader DEVCE = "cpu" EPOCHS = 100 class MoADataset(Dataset): def __init__ (self, data, target): self.dataset = data self.feature = target def __len__(self): return self.dataset.shape[0] def __getitem__(self, item): return { "x": torch.tensor(self.dataset[item, :], dtype=torch.float), "y": torch.tensor(self.dataset[item, :], dtype=torch.float), } class Engine: def __init__ (self, model, optimizer, device): self.model = model self.optimizer = optimizer self.device = device def loss_fn(target, outputs): loss_n = nn.BCEWithLogitsLoss() return loss_n(outputs, targets) def train(self, data_loader): self.model.train() final_loss = 0 for data in data_loader: self.optimizer.zero_grad() inputs = data["x"].to(self.device) targets = data["y"].to(self.device) outputs = self.model(inputs) loss = self.loss_fn(targets, outputs) self.optimizer.step() final_loss += loss.item() return final_loss/len(data_loader) #avg loss for len_dataloader per epoch length def validate(self, data_loader): self.model.eval() final_loss = 0 for data in data_loader: inputs = data["x"].to(self.device) targets = data["y"].to(self.device) outputs = self.model(inputs) loss = self.loss_fn(targets, outputs) final_loss += loss.item() return final_loss/len(data_loader) #avg loss for len_dataloader per epoch length def add_dummies(data, column): ohe = pd.get_dummies(data[column]) ohe_columns = [f"{column}_{c}" for c in ohe.columns] ohe.columns = ohe_columns data = data.drop(column, axis=1) data = data.join(ohe) return data def process_data(df): df = add_dummies(df, "cp_time") df = add_dummies(df, "cp_dose") df = add_dummies(df, "cp_type") return df def run_training(fold): df = pd.read_csv("train_features.csv") df = process_data(df) folds = pd.read_csv("train_folds.csv") targets = folds.drop(["sig_id", "kfold"], axis=1).columns features = df.drop(["sig_id"], axis=1).column df = df.merge(folds, on="sig_id", how="left") train_df = df[df.fold != fold].reset_index(drop=True) valid_df = df[df.fold == fold].reset_index(drop=True) x_train = train_df[features].to_array() x_valid = valid_df[features].to_array() y_train = train_df[targets].to_array() y_valid = valid[targets].to_array() train_dataset = MoADataset(x_train, y_train) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1024, num_workers = 8) # model = Model(...) model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters(), lr=3e-4) eng = utils.Engine(model, optimizer, DEVICE) for _ in range(EPOCHS): train_loss = eng.train(train_loss) # valid_loss = class Model(nn.Module): def __init__ (self, num_features, num_targets): super().__init__() self.model = nn.Sequential (nn.Linear(num_features, 1024), nn.BatchNorm1d(1024), nn.Dropout(0.3), nn.PReLU(), nn.Linear(1024, 1024), nn.BatchNorm1d(1024), nn.Dropout(0.3), nn.PReLU(), nn.Linear(1024, num_targets)) def forward(self, x): return self.model(x) train_features = pd.concat([trf, pd.get_dummies(trf['cp_time'], prefix='cp_time')], axis=1) train_features = pd.concat([train_features, pd.get_dummies(train_features['cp_dose'], prefix='cp_dose')], axis=1) train_features = pd.concat([train_features, pd.get_dummies(train_features['cp_type'], prefix='cp_type')], axis=1) train_features = train_features.drop(['cp_type', 'cp_time', 'cp_dose'], axis=1) show_df(train_features, 4) # !pip install pytorch-lightning import pytorch_lightning as pl class MoADataModule(pl.LightningDataModule): def __init__(self, hparams, data, targets): super().__init__() self.hparams = hparams self.data = data self.targets = targets def prepare_data(self): pass def setup(self, stage=None): train_data, valid_data, train_targets, valid_targets = train_test_split(self.data, self.targets, test_size=0.1, random_state=42) self.train_dataset = MoADataset(dataset=train_data.iloc[:, 1:].values, targets=train_targets.iloc[:, 1:].values) self.valid_dataset = MoADataset(dataset=valid_data.iloc[:, 1:].values, targets=valid_targets.iloc[:, 1:].values) def train_dataloader(self): train_loader = torch.utils.data.DataLoader( self.train_dataset, batch_size=1024, num_workers=0, shuffle=True, ) return train_loader def val_dataloader(self): valid_loader = torch.utils.data.DataLoader( self.valid_dataset, batch_size=1024, num_workers=0, shuffle=False, ) return valid_loader def test_dataloader(self): return None class LitMoA(pl.LightningModule): def __init__(self, hparams, model): super(LitMoA, self).__init__() self.hparams = hparams self.model = model self.criterion = nn.BCEWithLogitsLoss() def forward(self, x): return self.model(x) def configure_optimizers(self): optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, threshold=0.00001, mode="min", verbose=True) return ([optimizer], [{'scheduler': scheduler, 'interval': 'epoch', 'monitor': 'valid_loss'}]) def training_step(self, batch, batch_idx): data = batch['x'] target = batch['y'] out = self(data) loss = self.criterion(out, target) logs = {'train_loss': loss} return {'loss': loss, 'log': logs, 'progress_bar': logs} def training_epoch_end(self, outputs): avg_loss = torch.stack([x['loss'] for x in outputs]).mean() logs = {'train_loss': avg_loss} return {'log': logs, 'progress_bar': logs} def validation_step(self, batch, batch_idx): data = batch['x'] target = batch['y'] out = self(data) loss = self.criterion(out, target) logs = {'valid_loss': loss} return {'loss': loss, 'log': logs, 'progress_bar': logs} def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['loss'] for x in outputs]).mean() logs = {'valid_loss': avg_loss} return {'log': logs, 'progress_bar': logs} trainer = pl.Trainer(gpus=0, max_epochs = 5, weights_summary='full') net = Model(879, 206) model = LitMoA(hparams = {}, model=net) dm = MoADataModule(hparams = {}, data=train_features, targets=tts) trainer.fit(model, dm) ```
github_jupyter
# Create an interactive Parallel Plot To demonstrate the use of the interactive parallel plot, we use a project already loaded into the CKG database. ``` import pandas as pd from ckg.report_manager import project, dataset, report from ckg.analytics_core.viz import viz as plots import networkx as nx from networkx.readwrite import json_graph from plotly.offline import init_notebook_mode, iplot import plotly.graph_objs as go from scipy.stats import zscore init_notebook_mode(connected=True) %matplotlib inline import ipywidgets as widgets from ipywidgets import interact, interact_manual ``` #### We create a new project object and load the respective data and report ``` my_project = project.Project(identifier='P0000001', datasets={}, report={}) my_project.load_project_data() my_project.load_project_report() ``` #### We can now access to all the results for each data type ``` my_project.list_datasets() ``` #### We will use the results from the proteomics analyses. We access the dataset 'proteomics' for further analysis ``` proteomics_dataset = my_project.get_dataset('proteomics') ``` #### The available analysis for this dataset are: ``` my_project.get_dataset('proteomics').list_dataframes() ``` #### We can access the different dataframes like this: ``` my_project.get_dataset('proteomics').get_dataframe('go annotation') ``` #### In this case, we will use the the processed dataframe with transformed and imputed LFQ intensities. We then normalize the data using Z Score. ``` proteomics_dataset = my_project.get_dataset('proteomics') processed_df = proteomics_dataset.get_dataframe('processed') processed_df.head() processed_df = processed_df.drop(['sample', 'subject'], axis=1).set_index('group').apply(zscore).reset_index() ``` #### In order to find clusters of proteins, we access the report and the protein-protein correlation network as a dictionary. ``` proteomics_report = my_project.get_dataset('proteomics').report proteomics_report.list_plots() correlation_net_dict = proteomics_report.get_plot('13~correlation_correlation~network')[0] ``` #### To convert the dictionary into a network, we access the json version within the dictionary and convert it using the networkX package. ``` correlation_net = json_graph.node_link_graph(correlation_net_dict['net_json']) ``` #### Now that we have a network with proteins colored by cluster, we can convert this information into a dataframe to be used in this Jupyter Notebook. ``` correlation_df = pd.DataFrame.from_dict(correlation_net.nodes(data=True)) correlation_df = correlation_df[0].to_frame().join(correlation_df[1].apply(pd.Series)) correlation_df.columns = ['identifier', 'degree', 'radius', 'color', 'cluster'] ``` #### Since the correlation network was generated using cut-off , not all the proteins in the processed dataframe are part of a cluster, therefore we filter the processed dataframe and keep only the proteins that are present in the correlation clusters. ``` min_val = processed_df._get_numeric_data().min().min().round() max_val = processed_df._get_numeric_data().max().max().round() processed_df = processed_df[list(correlation_df.identifier) + ['group']] ``` #### Ready! To build the parallel plot, we create a dictionary with the clusters and respectives colors, and filter the processed dataframe to include only the proteins in a specific cluster. Using the Jupyter Widgets **interact** function, we can make the plot interactive and allow the visualization of a cluster selected by the user. ``` from IPython.core.display import display, HTML @interact def plot_parallel_plot(cluster=correlation_df.cluster.unique()): cluster_colors = dict(zip(correlation_df.cluster, correlation_df.color)) clusters = correlation_df.groupby('cluster') identifiers = clusters.get_group(cluster)['identifier'].tolist() title= "Parallel plot cluster: {}".format(cluster) df = processed_df.set_index('group')[identifiers].reset_index() figure = plots.get_parallel_plot(df, identifier=cluster, args={'color':cluster_colors[cluster],'group':'group', 'title':title, 'zscore':False}) display(HTML("<p>{}</p>".format(",".join(identifiers)))) iplot(figure.figure) ```
github_jupyter
``` # !wget https://f000.backblazeb2.com/file/malaya-speech-model/data/audio-iium.zip # !wget https://f000.backblazeb2.com/file/malaya-speech-model/collections/shuffled-iium.json # !unzip audio-iium.zip -d iium # !wget https://f000.backblazeb2.com/file/malaya-speech-model/data/audio-wattpad.zip # !wget https://f000.backblazeb2.com/file/malaya-speech-model/collections/transcript-wattpad.json # !unzip audio-wattpad.zip -d wattpad # !wget https://f000.backblazeb2.com/file/malaya-speech-model/data/text-audiobook.tar.gz # !wget https://f000.backblazeb2.com/file/malaya-speech-model/data/testset-audiobook.tar.gz # !tar -zxf text-audiobook.tar.gz # !tar -xf testset-audiobook.tar.gz import os from glob import glob from tqdm import tqdm import json import unicodedata import re import itertools vocabs = [" ", "a", "e", "n", "i", "t", "o", "u", "s", "k", "r", "l", "h", "d", "m", "g", "y", "b", "p", "w", "c", "f", "j", "v", "z", "0", "1", "x", "2", "q", "5", "3", "4", "6", "9", "8", "7"] def preprocessing_text(string): string = unicodedata.normalize('NFC', string.lower()) string = ''.join([c if c in vocabs else ' ' for c in string]) string = re.sub(r'[ ]+', ' ', string).strip() string = ( ''.join(''.join(s)[:2] for _, s in itertools.groupby(string)) ) return string base_directory = '/home/husein/speech-bahasa' wattpad = [] wavs = glob('wattpad/audio-wattpad/*wav') with open('transcript-wattpad.json') as fopen: transcript = json.load(fopen) for i in tqdm(wavs): index = i.split('/')[-1].replace('.wav','') text = transcript[int(index)] wattpad.append((i, text)) iium = [] wavs = glob('iium/audio-iium/*wav') with open('shuffled-iium.json') as fopen: transcript = json.load(fopen) for i in tqdm(wavs): index = i.split('/')[-1].replace('.wav','') text = transcript[int(index)] iium.append((i, text)) khalil = glob(f'{base_directory}/tolong-sebut/*.wav') mas = glob(f'{base_directory}/sebut-perkataan-woman/*.wav') husein = glob(f'{base_directory}/sebut-perkataan-man/*.wav') len(khalil), len(mas), len(husein) khalils = [] for i in tqdm(khalil[-int(len(khalil) * 0.05):]): try: t = i.split('/')[-1].replace('.wav','') text = f'tolong sebut {t}' khalils.append((i, text)) except Exception as e: print(e) mass = [] for i in tqdm(mas[-int(len(mas) * 0.05):]): try: t = i.split('/')[-1].replace('.wav','') text = f'sebut perkataan {t}' mass.append((i, text)) except Exception as e: print(e) huseins = [] for i in tqdm(husein[-int(len(husein) * 0.05):]): try: t = i.split('/')[-1].replace('.wav','') text = f'sebut perkataan {t}' huseins.append((i, text)) except Exception as e: print(e) wikipedia = [] wavs = glob(f'{base_directory}/streaming/*wav') for i in tqdm(wavs[-int(len(wavs) * 0.05):]): text = os.path.split(i)[1].replace('.wav', '') wikipedia.append((i, text)) len(wikipedia) news = [] wavs = glob(f'{base_directory}/news/audio/*wav') with open(f'{base_directory}/transcript-news.json') as fopen: transcript_news = json.load(fopen) for i in tqdm(wavs[-int(len(wavs) * 0.05):]): index = i.split('/')[-1].replace('.wav','') text = transcript_news[int(index)] news.append((i, text)) import pandas as pd df = pd.read_csv(f'{base_directory}/haqkiem/metadata.csv', header = None, sep = '|') txts = df.values.tolist() haqkiem = [] for f in tqdm(txts[-int(len(txts) * 0.05):]): text = f[1] text = text.split('.,,')[0] f = f[0] r = f'{base_directory}/haqkiem/{f}.wav' haqkiem.append((r, text)) audios = wattpad + iium + khalils + mass + wikipedia + news + haqkiem + huseins audios, texts = zip(*audios) processed_text = [preprocessing_text(t) for t in texts] from sklearn.utils import shuffle audios, processed_text = shuffle(audios, processed_text) with open('bahasa-asr-test.json', 'w') as fopen: json.dump({'X': audios, 'Y':processed_text}, fopen) import json with open('bahasa-asr-test.json') as fopen: data = json.load(fopen) 6000000 / 16000 # import malaya_speech # tokenizer = malaya_speech.subword.load('transducer.subword') # tokenizer # malaya_speech.subword.decode(tokenizer, [0, 2, 133, 875]) # from pydub import AudioSegment # import numpy as np # sr = 16000 # def mp3_to_wav(file, sr = sr): # audio = AudioSegment.from_file(file) # audio = audio.set_frame_rate(sr).set_channels(1) # sample = np.array(audio.get_array_of_samples()) # return malaya_speech.astype.int_to_float(sample), sr # def generator(maxlen = 18, min_length_text = 2): # for i in tqdm(range(len(audios))): # try: # if audios[i].endswith('.mp3'): # wav_data, _ = mp3_to_wav(audios[i]) # else: # wav_data, _ = malaya_speech.load(audios[i]) # if (len(wav_data) / sr) > maxlen: # print(f'skipped audio too long {audios[i]}') # continue # if len(processed_text[i]) < min_length_text: # print(f'skipped text too short {audios[i]}') # continue # yield { # 'waveforms': wav_data.tolist(), # 'waveform_lens': [len(wav_data)], # 'targets': malaya_speech.subword.encode(tokenizer, processed_text[i], add_blank = False), # } # except Exception as e: # print(e) # generator = generator() # import os # import tensorflow as tf # os.system('rm bahasa-asr-test/data/*') # DATA_DIR = os.path.expanduser('bahasa-asr-test/data') # tf.gfile.MakeDirs(DATA_DIR) # shards = [{'split': 'dev', 'shards': 10}] # import malaya_speech.train as train # train.prepare_dataset(generator, DATA_DIR, shards, prefix = 'bahasa-asr') ```
github_jupyter
## 1 - Analisando Dados de Pacientes Estamos estudando inflamação em pacientes que receberam um novo tratamento para artrite, precisamos analisar a primeira dúzia de conjuntos de dados. Os conjuntos de dados são armazenados em valores separados por vírgula no formato CSV: cada linha contém informações para um único paciente, e as colunas representam dias sucessivos. As primeiras fileiras do nosso primeiro arquivo se parecem com isto: 0,0,1,3,1,2,4,7,8,3,3,3,10,5,7,4,7,7,12,18,6,13,11,11,7,7,4,6,8,8,4,4,5,7,3,4,2,3,0,0 0,1,2,1,2,1,3,2,2,6,10,11,5,9,4,4,7,16,8,6,18,4,12,5,12,7,11,5,11,3,3,5,4,4,5,5,1,1,0,1 0,1,1,3,3,2,6,2,5,9,5,7,4,5,4,15,5,11,9,10,19,14,12,17,7,12,11,7,4,2,10,5,4,2,2,3,2,2,1,1 0,0,2,0,4,2,2,1,6,7,10,7,9,13,8,8,15,10,10,7,17,4,4,7,6,15,6,4,9,11,3,5,6,3,3,4,2,3,2,1 0,1,1,3,3,1,3,5,2,4,4,7,6,5,3,10,8,10,6,17,9,14,9,7,13,9,12,6,7,7,9,6,3,2,2,4,2,0,1,1 Nós queremos: * carregar esses dados na memória, * calcular a inflamação média por dia em todos os pacientes, e * traçar o resultado. #### Objetivos da atividade: - Explicar o que é uma biblioteca e para que as bibliotecas são usadas. - Importar uma biblioteca Python e usar as funções que ela contém. - Ler dados tabulares de um arquivo em um programa. - Atribuir valores a variáveis. - Selecionar valores individuais e subsecções de dados. - Executar operações em matrizes de dados. - Plotar gráficos simples a partir de dados. ### Armazenando Dados no Computador Nesta lição, aprenderemos a manipular o conjunto de dados de inflamação com o Python. Mas antes de discutir como lidar com muitos dados, mostraremos como armazenar um único valor no computador. A linha abaixo atribui o valor 55 a variável weight_kg (Sem precisar declarar o tipo). Quando terminar de digitar, pressione $Shift + Enter$ que o notebook executará o comando. ``` aula = 'visao computacional' weight_kg = 55 ``` Uma vez que a variável tem um valor, podemos imprimi-la: ``` print (weight_kg) ``` e fazer operações aritméticas com ela: ``` print ('peso em libras: '+str(2.2 * weight_kg)) ``` Também podemos alterar o valor de uma variável, atribuindo-lhe um novo: ``` weight_kg = 57.5 print ('peso em quilogramas:', weight_kg) ``` Como mostra o exemplo acima,podemos imprimir várias coisas ao mesmo tempo, separando-as com vírgulas. Se imaginarmos a variável como um post-it com um nome escrito nele, a atribuição é como colar a nota em um valor particular: <img src="files/img/python-sticky-note-variables-01.svg" alt="Variables as Sticky Notes" /> Isso significa que atribuir um valor a uma variável *não* altera os valores de outras variáveis. Por exemplo, vamos armazenar o peso do sujeito em libras em uma variável: ``` weight_lb = 2.2 * weight_kg print ('weight in kilograms:', weight_kg, 'and in pounds:', weight_lb) ``` <img src="files/img/python-sticky-note-variables-02.svg" alt="Creating Another Variable" /> e então mudar a variável `weight_kg`: ``` weight_kg = 100.0 print ('weight in kilograms is now:', weight_kg, 'and weight in pounds is still:', weight_lb) ``` <img src="files/img/python-sticky-note-variables-03.svg" alt="Updating a Variable" /> Uma vez que `weight_lb` não "se lembra" de onde seu valor veio, ela não é atualizada automaticamente quando `weight_kg` muda. >#### DICA >Você pode usar o comando% whos a qualquer momento para ver quais variáveis você criou e quais módulos você carregou na memória do computador. Como este é um comando IPython, ele só funcionará se você estiver em um terminal IPython ou no Notebook Jupyter. ``` %whos ``` ### Carregando os Dados Palavras são úteis, mas o que é mais útil ainda são as frases e histórias que podemos construir com elas. Da mesma forma, enquanto muitas ferramentas poderosas são incorporadas a lingaugens como o Python, ainda mais utilidades nas [bibliotecas](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#library) são construídas. Para carregar nossos dados de inflamações, precisamos [importar](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#import) uma biblioteca chamada NumPy que sabe operar em matrizes: ``` import numpy ``` Importar uma biblioteca é como tirar um equipamento de laboratório de um armário de armazenamento e configurando-o no banco. Uma vez que está pronto, podemos pedir à biblioteca que leia nosso arquivo de dados para nós: ``` numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',') ``` A expressão `numpy.loadtxt (...)` é uma [chamada de função](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#function-call) que pede ao Python para executar a função `loadtxt` que pertence à biblioteca `numpy`. Esta [notação com ponto](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#dotted-notation) é usada em todos os lugares em Python para se referir às partes de coisas como `todo.parte`. `numpy.loadtxt` tem dois [parâmetros](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#parameter): o nome do arquivo que queremos ler, e o [delimitador](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#delimiter) que separa valores em uma linha. Estes dois precisam ser cadeias de caracteres (ou [strings](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#string) para abreviar), então os colocamos entre aspas. O notebook executa nosso comando. Como não dissemos a ele para fazer algo com a saída da função, o notebook exibe isso. Nesse caso, essa saída é o dado que acabamos de carregar. Por padrão, apenas algumas linhas e colunas são mostradas (com `...` para omitir elementos ao exibir grandes arrays). Para economizar espaço, Python exibe números como `1.` em vez de` 1.0` quando não há nada interessante após o ponto decimal. Nossa chamada para `numpy.loadtxt` leu nosso arquivo, mas não salvou os dados na memória. Para fazer isso, nós precisamos [atribuir](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#assignment) a matriz para uma [variável](http://swcarpentry.github.io/python -novice-inflammation-2.7 / reference.html # variable). Uma variável é apenas um nome para um valor, como `x`,` current_temperature` ou `subject_id`. Podemos criar uma nova variável simplesmente atribuindo-lhe um valor usando `=`, sem precisar declarar o seu tipo. Vamos rodar `numpy.loadtxt` novamente e salvar seu resultado: ``` data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',') ``` Esta declaração não produz qualquer saída porque a atribuição não exibe nada. Se quisermos verificar se nossos dados foram carregados, podemos imprimir o valor da variável: ``` print (data) ``` #### Exercícios 1. Desenhe diagramas mostrando quais variáveis se referem aos valores após cada declaração no seguinte programa: ~~~python mass = 47.5 age = 122 mass = mass * 2.0 age = age - 20 ~~~ 2. O que o programa a seguir imprime? ~~~python first, second = 'Grace', 'Hopper' third, fourth = second, first print (third, fourth) ~~~ ``` first, second= 'Grace','Hopper' third, fourth= second, first print (third, fourth) mass= 47.5 age=122 mass= mass*2.0 age=age-20 print (mass) print (age) ``` ### Manipulando os Dados Agora que nossos dados estão na memória, podemos começar a fazer coisas com eles. Primeiro, vamos perguntar a qual [tipo](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#data-type) de coisa `data` se refere: ``` print (data.dtype) ``` O resultado nos diz que `data` atualmente se refere a uma matriz N-dimensional criada pela biblioteca NumPy. Podemos ver como que é o [formato/shape](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.htm#shape) desta forma: ``` print (data.shape) ``` Isso nos diz que `data` possui 60 linhas e 40 colunas. `data.shape` é um [membro](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.htm#member) de `data`, isto é, um valor que é armazenado como parte de um valor maior. Usamos a mesma notação pontilhada para os membros de valores que usamos para as funções em bibliotecas porque eles têm o mesmo relacionamento parte-todo. Se quisermos obter um único valor da matriz, devemos fornecer um [índice](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.htm#index) entre colchetes, assim como fazemos em matemática: ``` print ('first value in data:', data[0, 0]) print ('middle value in data:', data[15, 2]) ``` A expressão `data [30, 20]` pode não surpreendê-lo, mas `data [0, 0]` pode. Linguagens de programação como Fortran e MATLAB começam a contar às 1, porque isso é o que os seres humanos fizeram há milhares de anos. Idiomas na família C (incluindo C ++, Java, Perl e Python) contagem de 0 porque isso é mais simples para os computadores fazerem. Como um resultado, se tivermos um M&times;N array em Python, seus índices vão de 0 a M-1 no primeiro eixo e 0 a N-1 no segundo. Demora um pouco de acostumar, mas uma maneira de lembrar a regra é que O índice é o número de passos que temos que tomar desde o início para obter o item que queremos. <img src="files/img/python-zero-index.png" alt="Indexação do Python" /> > #### Nota > > O que também pode surpreender você é que quando o Python exibe uma matriz, > mostra o elemento com índice `[0, 0]` no canto superior esquerdo > em vez da inferior esquerda. > Isso é consistente com a forma como os matemáticos desenham matrizes, > mas diferente das coordenadas cartesianas. > Os índices são (linha, coluna) em vez de (coluna, linha) pelo mesmo motivo. Um índice como `[30, 20]` seleciona um único elemento de uma matriz, mas também podemos selecionar seções inteiras. Por exemplo, podemos selecionar os primeiros dez dias (colunas) de valores para as primeiras quatro (linhas) pacientes como este: ``` print (data[:4, 10:]) ``` O [intervalo](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.htm#slice) `0: 4` significa: "Comece no índice 0 e vá até, mas não incluído, o índice 4." Novamente, o até-mas-nao-incluso pode levar um tempo para acostumar, mas a regra é que a diferença entre os limites superior e inferior é o número de valores no intervalo. Não temos que começar, necessariamente, intervalos em 0: ``` print (data[5:10, 0:10]) ``` e não precisamos acessar todos os valores do intervalo --- se fornecemos um [passo](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.htm#stride), Python pega os valores espaçados com o `passo` definido: ``` print (data[0:10:3, 0:10:2]) ``` Aqui, nós pegamos as linhas 0, 3, 6 e 9, e as colunas 0, 2, 4, 6 e 8. (Novamente, sempre incluímos o limite inferior, mas pare quando alcançamos ou cruzamos o limite superior.) Também não precisamos incluir o limite superior e inferior do intervalo. Se não incluímos o limite inferior, Python usa 0 por padrão; se não incluímos o superior, o intervalo corre até o final do eixo, e se não incluímos nenhum (isto é, se usarmos apenas ':' por conta própria) O intervalo inclui tudo: ``` small = data[:3, 36:] print ('small is:') print (small) ``` Arrays também sabem como executar operações matemáticas comuns em seus valores. Se queremos encontrar a inflamação média para todos os pacientes em todos os dias, por exemplo, podemos pedir a matriz pelo seu valor médio: ``` print (data.mean()) numpy.zeros((3,10,10)) ``` `mean` é um [método](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.htm#method) da matriz, isto é, uma função que lhe pertence da mesma forma que o `shape` do membro faz. Se as variáveis são substantivos, os métodos são verbos: Eles são o que o assunto em questão sabe como fazer. É por isso que `data.shape` não precisa ser chamado (é só uma coisa) mas `data.mean ()` faz (é uma ação). É também por isso que precisamos de parênteses vazios para `data.mean ()`: mesmo quando não passamos em nenhum parâmetro, parênteses são como dizer ao Python para ir e fazer algo por nós. As matrizes numPy têm muitos métodos úteis: ``` print ('maximum inflammation:', data.max()) print ('minimum inflammation:', data.min()) print ('standard deviation:', data.std()) ``` Ao analisar dados, muitas vezes queremos olhar para estatísticas parciais, como o valor máximo por paciente ou o valor médio por dia. Uma maneira de fazer isso é selecionar os dados que queremos para criar uma nova matriz temporária, e então fazer o cálculo: ``` patient_0 = data[0, :] # 0 on the first axis, everything on the second print ('maximum inflammation for patient 0:', patient_0.max()) ``` Na verdade, não precisamos armazenar a linha em uma variável própria. Em vez disso, podemos combinar a seleção e o método de chamada: ``` print ('maximum inflammation for patient 2:', data[2, :].max()) ``` #### Exercício Uma subseção de uma matriz é chamada de [fatia](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#slice). Podemos pegar fatias de strings de caracteres também: ``` element = 'oxygen' print ('first three characters:', element[:3]) print ('last three characters:', element[3:]) ``` 1. Qual é o valor de `element [: 4]`?      E `element [4:]`?      Ou `element [:]`? 1. O que é `element [-1]`?      O que é `element [-2]`?      Dadas essas respostas,      explique o que `element [1: -1]` faz. 1. A expressão `element [3: 3]` produz uma [string vazia](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#empty-string),      ou seja, uma string que não contém caracteres.      Se `data` contém nossa matriz de dados de pacientes,      O que `data [3: 3, 4: 4]` produz?      E quanto a `data [3: 3,:]`? ### Plotando O matemático Richard Hamming já disse, "O objetivo da computação é gerar insights, não números" e a melhor maneira de obter estes insights é visualizandos os dados. A visualização merece uma leitura inteira (ou curso) própria, mas podemos explorar alguns recursos da biblioteca `matplotlib` do Python aqui. Primeiro, digamos ao IPython Notebook que queremos que nossos gráficos sejam exibidos inline, em vez de em uma janela de visualização separada: ``` %matplotlib inline ``` O `%` no início da linha indica que este é um comando para o notebook, em vez de uma declaração em Python. A seguir, vamos importar o módulo `pyplot` de ` matplotlib` e usar duas de suas funções para criar e exibir um mapa de calor de nossos dados: ``` from matplotlib import pyplot pyplot.imshow(data[2:,:38], cmap="inferno") pyplot.show() ``` As regiões azuis neste mapa de calor são valores baixos, enquanto o vermelho mostra valores altos. Como podemos ver, a inflamação aumenta e cai em um período de 40 dias. Vamos dar uma olhada na inflamação média ao longo do tempo: ``` data.shape ave_inflammation = data.mean(axis=0) pyplot.plot(ave_inflammation) pyplot.show() ``` Aqui, colocamos a média por dia em todos os pacientes na variável `ave_inflammation`, então pedimos o `pyplot` para criar e exibir um gráfico de linha desses valores. O resultado é aproximadamente um aumento linear e queda,o que é suspeito: com base em outros estudos, esperamos um aumento mais acentuado e queda mais lenta. Vamos dar uma olhada em outras duas estatísticas: ``` print ('inflamação máxima por dia') pyplot.plot(data.max(axis=0)) pyplot.show() print ('inflamação mínima por dia') pyplot.plot(data.min(axis=0)) pyplot.show() ``` O valor máximo aumenta e cai perfeitamente, enquanto o mínimo parece ser uma função de etapas. Nenhum resultado parece particularmente provável, então, há um erro em nossos cálculos ou algo está errado com nossos dados. #### Exercício 1. Crie um gráfico que mostre o desvio padrão (numpy.std) dos dados de inflamação para cada dia em todos os pacientes. ``` from matplotlib import pyplot as plt ''' #axis = 1 refere-se as colunas, enquanto axis = 0 as linhas pyplot.plot( data.std(axis=0) ) pyplot.show() #Explicação: Executa a função numpy.std() para cada linha de cada coluna... 0 < 40 d = numpy.array([[1 , 2 , 3], [4 , 5 , 7]]) d.sum(axis=0) ''' plt.title("Mapa de calor") plt.xlabel('Dias') plt.imshow(data, cmap="inferno") plt.figure(figsize=(15, 5)) plt.subplot(1, 4, 1) plt.title('Desvio Padrão') plt.xlabel('Dias') plt.plot(data.std(axis = 0), color="red") plt.subplot(1, 4, 2) plt.title('Inflamação Média') plt.xlabel('Dias') plt.plot(data.mean(axis = 0)) plt.subplot(1, 4, 3) plt.title('Inflamação Máxima') plt.xlabel('Dias') plt.plot(data.max(axis = 0)) plt.subplot(1, 4, 4) plt.title('Inflamação Mínima') plt.xlabel('Dias') plt.plot(data.min(axis = 0)) plt.show() ``` ### Resumindo É muito comum criar um [alias](http://swcarpentry.github.io/python-novice-inflammation-2.7/reference.html#alias) (alias renomeação de bilioteca) para uma biblioteca ao importá-la para reduzir a quantidade de digitação que temos que fazer. Aqui estão os nossos três gráficos lado a lado usando aliases para `numpy` e` pyplot`: ``` import numpy as np from matplotlib import pyplot as plt data = np.loadtxt(fname='data/inflammation-01.csv', delimiter=',') plt.figure(figsize=(10.0, 3.0)) plt.subplot(1, 3, 1) plt.ylabel('average') plt.plot(data.mean(0)) plt.subplot(1, 3, 2) plt.ylabel('max') plt.plot(data.max(0)) plt.subplot(1, 3, 3) plt.ylabel('min') plt.plot(data.min(0)) plt.tight_layout() plt.show() ``` As duas primeiras linhas recarregam nossas bibliotecas como `np` e `plt`, que são os alias que a maioria dos programadores de Python usam. A chamada para `loadtxt` lê nossos dados, o resto do programa diz a biblioteca de plota os gráficos o quão grande queremos que a figura seja, que estamos criando três sub-gráficos, o que desenhar para cada um, e que queremos um layout menor. (Perversamente, se excluirmos essa chamada para `plt.tight_layout ()`, os gráficos sairão realmente esticados e mais próxima.) #### Exercício 1. Modifique o programa para exibir os três gráficos em cima um do outro em vez de lado a lado. ``` plt.figure(figsize=(5.0, 10.0)) plt.subplot(3, 1, 1) plt.ylabel('average') plt.plot(data.mean(0)) plt.subplot(3, 1, 2) plt.ylabel('max') plt.plot(data.max(0)) plt.subplot(3, 1, 3) plt.ylabel('min') plt.plot(data.min(0)) plt.tight_layout() plt.show() ``` #### Pontos Principais * Importar uma biblioteca para um programa usando `import nome_da_biblioteca`. * Usar a biblioteca `numpy` para trabalhar com arrays em Python. * Usar `variável = valor` para atribuir um valor a uma variável e gravá-la na memória. * As variáveis são criadas sob demanda sempre que um valor lhes é atribuído. * Usar `print (alguma_coisa)` para exibir o valor de `alguma_coisa`. * A expressão `array.shape` dá a forma de uma matriz. * Usar `array [x, y]` para selecionar um único elemento de uma matriz. * Os índices de matriz começam em 0, não 1. * Usar `low: high` para especificar uma fatia que inclui os índices de `low` até `high-1`. * Toda a indexação e corte que funciona em arrays também funciona em strings. * Usar `# algum tipo de explicação` para adicionar comentários aos programas. * Usar `array.mean ()`, `array.max ()` e `array.min ()` para calcular estatísticas simples. * Usar `array.mean (axis = 0)` ou `array.mean (axis = 1)` para calcular estatísticas no eixo especificado. * Usar a biblioteca `pyplot` de ` matplotlib` para criar visualizações simples. #### Próximos passos Nosso trabalho até agora nos convenceu de que algo está errado com nosso primeiro arquivo de dados. Gostaríamos de verificar os outros 11 da mesma maneira, mas digitar repetidamente os mesmos comandos é tedioso e propenso a erros. Como os computadores não ficam entediados (que sabemos) devemos criar uma maneira de fazer uma análise completa com um único comando, e então descobrir como repetir esse passo uma vez para cada arquivo. Essas operações são assuntos das próximas duas lições.
github_jupyter
# Data Preparation Part 1 (10 points) Define and prepare your class variables. Use proper variable representations (int, float, one-hot, etc.). Use pre-processing methods (as needed) for dimensionality reduction, scaling, etc. Remove variables that are not needed/useful for the analysis. ## Library Imports ``` # Base Imports import pandas as pd import numpy as np import time from matplotlib import pyplot as plt from matplotlib.ticker import MaxNLocator import seaborn as sns %matplotlib inline # Pre-Processing from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer # Metrics and Evaluation from sklearn import metrics from sklearn.metrics import classification_report from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import plot_roc_curve # Train/ Test Split from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline # Estimators from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier # Hyper Parameter Tuning from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV ``` ## Load Data ``` df = pd.read_csv('../../../../../../data/cardio_train.csv', delimiter=';') # set id as index df.set_index("id", inplace=True) # copy original data df_clean = df.copy(deep=True) # drop duplicates df_clean.drop_duplicates(inplace=True) # %%time # Convert age into years df_clean['age'] = (df_clean['age'] / 365).round().astype('int') # re-encode gender to male (1) and female (0) df_clean['gender'] = np.where((df_clean.gender == 2), 1, 0) # compute the body mass index based on weight and height df_clean['bmi'] = df_clean['weight'] / (df_clean['height']/100)**2 # create a BMI group df_clean['bmiGrp'] = np.where((df_clean.bmi < 18.5), 1, 0) df_clean['bmiGrp'] = np.where((df_clean.bmi >= 18.5) & (df_clean.bmi < 25), 2, df_clean.bmiGrp) df_clean['bmiGrp'] = np.where((df_clean.bmi >= 25) & (df_clean.bmi < 30), 3, df_clean.bmiGrp) df_clean['bmiGrp'] = np.where((df_clean.bmi >= 30), 4, df_clean.bmiGrp) # bin blood pressure groups based on the api hi/ lo variables df_clean['bp'] = np.where((df_clean.ap_hi < 120) & (df_clean.ap_lo < 80), 1, 0) df_clean['bp'] = np.where((df_clean.ap_hi >= 120) & (df_clean.ap_hi < 130) & (df_clean.ap_lo < 80), 2, df_clean.bp) df_clean['bp'] = np.where((df_clean.ap_hi >= 130) & (df_clean.ap_hi < 140) | ((df_clean.ap_lo >= 80) & (df_clean.ap_lo < 90)), 3, df_clean.bp) df_clean['bp'] = np.where((df_clean.ap_hi >= 140) | (df_clean.ap_lo >= 90), 4, df_clean.bp) df_clean['bp'] = np.where((df_clean.ap_hi > 180) | (df_clean.ap_lo > 120), 5, df_clean.bp) ``` # Data Preparation Part 2 (5 points) Describe the final dataset that is used for classification/regression (include a description of any newly formed variables you created). ``` # New Feature Model X_cols = ['age', 'gender', 'bmiGrp', 'bp', 'cholesterol', 'gluc', 'smoke', 'alco', 'active'] # Store feature matrix X = df_clean[X_cols] #.to_numpy() # Store response vector y = df_clean['cardio'] #.to_numpy() # Full Model # X_cols = ['age', 'gender', 'height', 'weight', 'ap_hi', 'ap_lo', 'cholesterol', 'gluc', 'smoke', 'alco', 'active'] ``` # Modeling and Evaluation 1 (10 points) Choose and explain your evaluation metrics that you will use (i.e., accuracy, precision, recall, F-measure, or any metric we have discussed). Why are the measure(s) appropriate for analyzing the results of your modeling? Give a detailed explanation backing up any assertions. # Modeling and Evaluation 2 (10 points) Choose the method you will use for dividing your data into training and testing splits (i.e., are you using Stratified 10-fold cross validation? Why?). Explain why your chosen method is appropriate or use more than one method as appropriate. For example, if you are using time series data then you should be using continuous training and testing sets across time. ``` numeric_features = ['age', 'cholesterol', 'bp', 'bmiGrp', 'gluc'] categorical_features = ['gender', 'smoke', 'alco', 'active'] numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(missing_values=np.nan, strategy="median"))]) categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(missing_values=np.nan, strategy="median")), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer( transformers=[ ('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)]) clf1 = MultinomialNB(alpha=1.0) clf2 = KNeighborsClassifier(n_neighbors=49) clf3 = RandomForestClassifier(random_state=1, n_estimators=1000, min_samples_split=5, min_samples_leaf=4, max_features='sqrt', bootstrap=True) clf4 = GradientBoostingClassifier(random_state=1, n_estimators=4, min_samples_split=0.2, min_samples_leaf=0.1, max_features=7, max_depth=26.0, loss='exponential') clf5 = LogisticRegression(random_state=1, penalty='l2', C=.01) clf6 = DecisionTreeClassifier(random_state=1, min_samples_split=2, min_samples_leaf=4, max_features='log2', criterion='entropy', class_weight=None) pipe1 = Pipeline([['preprocessor', preprocessor], # ['rs', RobustScaler()], <<< ValueError: Negative values in data passed to MultinomialNB (input X) ['clf', clf1]]) pipe2 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf2]]) pipe3 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf3]]) pipe4 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf4]]) pipe5 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf5]]) pipe6 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf6]]) clf_labels = ['Naive Bayes', 'KNN', 'Random Forest', 'Gradient Boosting', 'Logistic Regression', 'Decision Tree'] # Note n_jobs below. Setting it to -1 will create cv number of threads print('10-fold cross validation:\n') for clf, label in zip([pipe1, pipe2, pipe3, pipe4, pipe5, pipe6], clf_labels): scores = cross_val_score(estimator=clf, X=X, y=y, cv=10, scoring='roc_auc', n_jobs=-1) print("ROC AUC: %0.3f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label)) ``` # Modeling and Evaluation 3 Create three different classification/regression models for each task (e.g., random forest, KNN, and SVM for task one and the same or different algorithms for task two). Two modeling techniques must be new (but the third could be SVM or logistic regression). Adjust parameters as appropriate to increase generalization performance using your chosen metric. You must investigate different parameters of the algorithms! ## Grid Search ``` numeric_features = ['age', 'cholesterol', 'bp', 'bmiGrp', 'gluc'] categorical_features = ['gender', 'smoke', 'alco', 'active'] numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(missing_values=np.nan, strategy="median"))]) categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(missing_values=np.nan, strategy="median")), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer( transformers=[ ('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)]) clf1 = MultinomialNB() clf2 = KNeighborsClassifier() clf3 = RandomForestClassifier(random_state=1) clf4 = GradientBoostingClassifier(random_state=1) clf5 = LogisticRegression(random_state=1) clf6 = DecisionTreeClassifier(random_state=1) pipe1 = Pipeline([['preprocessor', preprocessor], # ['rs', RobustScaler()], <<< ValueError: Negative values in data passed to MultinomialNB (input X) ['clf', clf1]]) pipe2 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf2]]) pipe3 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf3]]) pipe4 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf4]]) pipe5 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf5]]) pipe6 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['clf', clf6]]) model_params = { # "multinomialnb": { # "model": pipe1, # "params": { # "clf__alpha": [0.0001, 0.001, 0.01, 0.1, 1.0] # } # }, # "kneighborsClassifier": { # "model": pipe2, # "params": { # "clf__n_neighbors": np.arange(5,51), # "clf__weights": ["uniform", "distance"] # } # }, # "randomforestclassifier": { # "model": pipe3, # "params": { # "clf__n_estimators": [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000], # # "clf__criterion": ['gini','entropy'], # "clf__max_features": ['auto', 'sqrt'], # "clf__min_samples_split": [2, 5, 10], # "clf__min_samples_leaf": [1, 2, 4], # "clf__bootstrap": [True, False] # } # }, # "gradientboostingclassifier": { # "model": pipe4, # "params": { # "clf__loss": ['deviance', 'exponential'], # "clf__n_estimators": [1, 2, 4, 8, 16, 32, 64, 100, 200], # "clf__max_depth": np.linspace(1, 32, 32, endpoint=True), # "clf__min_samples_split": np.linspace(0.1, 1.0, 10, endpoint=True), # "clf__min_samples_leaf": np.linspace(0.1, 0.5, 5, endpoint=True), # "clf__max_features": list(range(1,X.shape[1])) # } # }, # "logisticregression": { # "model": pipe5, # "params": { # "clf__C": [.01, .1, 1, 5, 10, 25, 50], # "clf__penalty": ["l1", "l2"] # } # }, # "decisiontreeclassifier": { # "model": pipe6, # "params": { # "clf__criterion": ['gini','entropy'], # "clf__splitter": ['best', 'random'], # "clf__min_samples_split": [2, 5, 10], # "clf__min_samples_leaf": [1, 2, 4], # "clf__max_features": ['auto', 'sqrt', 'log2'], # "clf__class_weight": [None, 'balanced'] # } # } } scores = [] for model_name, mp in model_params.items(): start = time.time() # clf = GridSearchCV(estimator = mp["model"], param_grid=mp["params"], cv=3, scoring="roc_auc", n_jobs=-1) clf = RandomizedSearchCV(estimator = mp["model"], param_distributions=mp["params"], cv=10, scoring="roc_auc", n_jobs=-1) clf.fit(X, y) elapsed_time = (time.time() - start) scores.append({"Model": model_name, "Best ROC AUC": clf.best_score_, # Mean cross-validated score of the best_estimator "Best Params": clf.best_params_, "results": clf.cv_results_, "Cross Validation Time": elapsed_time, "Best Estimator": clf.best_estimator_ }) # Although not pretty, it's quick and easy to read print('10 Fold Cross Validation Scores:') for model in scores: print() for key, value in model.items(): if key == 'Best Estimator': print("Prediction Accuracy",': ',value.score(X, y)) elif key == 'results': print('Mean Fit Time: ', value['mean_fit_time'].mean()) print('Mean Score Time: ', value['mean_score_time'].mean()) else: print(key,': ',value) # 10 Fold Cross Validation Scores: # Model : multinomialnb # Best ROC AUC : 0.7032399438887057 # Best Params : {'clf__alpha': 1.0} # Mean Fit Time: 0.11702154636383058 # Mean Score Time: 0.014475626945495604 # Cross Validation Time : 1.922368049621582 # Prediction Accuracy : 0.6558677260775123 # Model : kneighborsClassifier # Best ROC AUC : 0.7768214074917433 # Best Params : {'clf__weights': 'uniform', 'clf__n_neighbors': 49} # Mean Fit Time: 2.8226044678688047 # Mean Score Time: 1.6014243602752685 # Cross Validation Time : 114.64539361000061 # Prediction Accuracy : 0.728035326397622 # 10 Fold Cross Validation Scores: # Model : randomforestclassifier # Best ROC AUC : 0.7769019017622767 # Best Params : {'clf__n_estimators': 1000, 'clf__min_samples_split': 5, 'clf__min_samples_leaf': 4, 'clf__max_features': 'sqrt', 'clf__bootstrap': True} # Mean Fit Time: 31.808455479145056 # Mean Score Time: 1.0880742049217225 # Cross Validation Time : 868.0742781162262 # Prediction Accuracy : 0.7426117526008917 # Model : gradientboostingclassifier # Best ROC AUC : 0.7709950123343386 # Best Params : {'clf__n_estimators': 4, 'clf__min_samples_split': 0.2, 'clf__min_samples_leaf': 0.1, 'clf__max_features': 7, 'clf__max_depth': 26.0, 'clf__loss': 'exponential'} # Mean Fit Time: 0.7776849865913391 # Mean Score Time: 0.017364499568939207 # Cross Validation Time : 20.681216716766357 # Prediction Accuracy : 0.7102435120612781 # Model : logisticregression # Best ROC AUC : 0.7683033417244554 # Best Params : {'clf__penalty': 'l2', 'clf__C': 0.01} # Mean Fit Time: 0.21944808721542355 # Mean Score Time: 0.006357955932617188 # Cross Validation Time : 6.326923131942749 # Prediction Accuracy : 0.7112581456499372 # Model : decisiontreeclassifier # Best ROC AUC : 0.7671939853551971 # Best Params : {'clf__splitter': 'random', 'clf__min_samples_split': 2, 'clf__min_samples_leaf': 4, 'clf__max_features': 'log2', 'clf__criterion': 'entropy', 'clf__class_weight': None} # Mean Fit Time: 0.24330010652542114 # Mean Score Time: 0.016144764423370362 # Cross Validation Time : 7.108668088912964 # Prediction Accuracy : 0.7343231965245227 ``` # Modeling and Evaluation 4 (10 Points) Analyze the results using your chosen method of evaluation. Use visualizations of the results to bolster the analysis. Explain any visuals and analyze why they are interesting to someone that might use this model. # Modeling and Evaluation 5 (10 Points) Discuss the advantages of each model for each classification task, if any. If there are not advantages, explain why. Is any model better than another? Is the difference significant with 95% confidence? Use proper statistical comparison methods. You must use statistical comparison techniques—be sure they are appropriate for your chosen method of validation as discussed in unit 7 of the course. # Modeling and Evaluation 6 (10 Points) Which attributes from your analysis are most important? Use proper methods discussed in class to evaluate the importance of different attributes. Discuss the results and hypothesize about why certain attributes are more important than others for a given classification task. # Deployment (5 Points) How useful is your model for interested parties (i.e., the companies or organizations that might want to use it for prediction)? How would you measure the model's value if it was used by these parties? How would your deploy your model for interested parties? What other data should be collected? How often would the model need to be updated, etc.? # Exceptional Work (10 points) You have free reign to provide additional analyses. One idea: grid search parameters in a parallelized fashion and visualize the performances across attributes. Which parameters are most significant for making a good model for each classification algorithm? ``` X_train, X_test, y_train, y_test =\ train_test_split(X, y, test_size=0.2, random_state=1, stratify=y) numeric_features = ['age', 'cholesterol', 'bp', 'bmiGrp', 'gluc'] categorical_features = ['gender', 'smoke', 'alco', 'active'] numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(missing_values=np.nan, strategy="median"))]) categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(missing_values=np.nan, strategy="median")), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer( transformers=[ ('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)]) nb = MultinomialNB(alpha=1.0) knn = KNeighborsClassifier(n_neighbors=49) rf = RandomForestClassifier(random_state=1, n_estimators=1000, min_samples_split=5, min_samples_leaf=4, max_features='sqrt', bootstrap=True) gb = GradientBoostingClassifier(random_state=1, n_estimators=4, min_samples_split=0.2, min_samples_leaf=0.1, max_features=7, max_depth=26.0, loss='exponential') logreg = LogisticRegression(random_state=1, penalty='l2', C=.01) dt = DecisionTreeClassifier(random_state=1, min_samples_split=2, min_samples_leaf=4, max_features='log2', criterion='entropy', class_weight=None) pipe1 = Pipeline([['preprocessor', preprocessor], # ['rs', RobustScaler()], ['classifer', nb]]) pipe2 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['classifer', knn]]) pipe3 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['classifer', rf]]) pipe4 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['classifer', gb]]) pipe5 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['classifer', logreg]]) pipe6 = Pipeline([['preprocessor', preprocessor], ['rs', RobustScaler()], ['classifer', dt]]) fit1 = pipe1.fit(X_train, y_train) fit2 = pipe2.fit(X_train, y_train) fit3 = pipe3.fit(X_train, y_train) fit4 = pipe4.fit(X_train, y_train) fit5 = pipe5.fit(X_train, y_train) fit6 = pipe6.fit(X_train, y_train) ``` ## Naive Bayes ``` y_pred = fit1.predict(X_test) print(classification_report(y_test, y_pred, target_names=["non-smoker", "smoker"])) fig = plt.figure(1, figsize=(20, 5)) chart_1 = fig.add_subplot(121) chart_2 = fig.add_subplot(122) # Pass Fitted Model, and our test sets, see how they do plot_confusion_matrix(pipe1, X_test, y_test, normalize='true', ax=chart_1) chart_1.set_title('Confusion Matrix') plot_roc_curve(pipe1, X, y, ax=chart_2) chart_2.set_title('ROC Curve') plt.show() ``` ## KNN ``` y_pred = fit2.predict(X_test) print(classification_report(y_test, y_pred, target_names=["non-smoker", "smoker"])) fig = plt.figure(1, figsize=(20, 5)) chart_1 = fig.add_subplot(121) chart_2 = fig.add_subplot(122) # Pass Fitted Model, and our test sets, see how they do plot_confusion_matrix(pipe2, X_test, y_test, normalize='true', ax=chart_1) chart_1.set_title('Confusion Matrix') plot_roc_curve(pipe2, X, y, ax=chart_2) chart_2.set_title('ROC Curve') plt.show() ``` ## Random Forest ``` y_pred = fit3.predict(X_test) print(classification_report(y_test, y_pred, target_names=["non-smoker", "smoker"])) fig = plt.figure(1, figsize=(20, 5)) chart_1 = fig.add_subplot(121) chart_2 = fig.add_subplot(122) # Pass Fitted Model, and our test sets, see how they do plot_confusion_matrix(pipe3, X_test, y_test, normalize='true', ax=chart_1) chart_1.set_title('Confusion Matrix') plot_roc_curve(pipe3, X, y, ax=chart_2) chart_2.set_title('ROC Curve') plt.show() ``` ## Gradient Boosting ``` y_pred = fit4.predict(X_test) print(classification_report(y_test, y_pred, target_names=["non-smoker", "smoker"])) fig = plt.figure(1, figsize=(20, 5)) chart_1 = fig.add_subplot(121) chart_2 = fig.add_subplot(122) # Pass Fitted Model, and our test sets, see how they do plot_confusion_matrix(pipe4, X_test, y_test, normalize='true', ax=chart_1) chart_1.set_title('Confusion Matrix') plot_roc_curve(pipe4, X, y, ax=chart_2) chart_2.set_title('ROC Curve') plt.show() ``` ## Logistic Regression ``` y_pred = fit5.predict(X_test) print(classification_report(y_test, y_pred, target_names=["non-smoker", "smoker"])) fig = plt.figure(1, figsize=(20, 5)) chart_1 = fig.add_subplot(121) chart_2 = fig.add_subplot(122) # Pass Fitted Model, and our test sets, see how they do plot_confusion_matrix(pipe5, X_test, y_test, normalize='true', ax=chart_1) chart_1.set_title('Confusion Matrix') plot_roc_curve(pipe5, X, y, ax=chart_2) chart_2.set_title('ROC Curve') plt.show() ``` ## Decision Tree ``` y_pred = fit6.predict(X_test) print(classification_report(y_test, y_pred, target_names=["non-smoker", "smoker"])) fig = plt.figure(1, figsize=(20, 5)) chart_1 = fig.add_subplot(121) chart_2 = fig.add_subplot(122) # Pass Fitted Model, and our test sets, see how they do plot_confusion_matrix(pipe6, X_test, y_test, normalize='true', ax=chart_1) chart_1.set_title('Confusion Matrix') plot_roc_curve(pipe6, X, y, ax=chart_2) chart_2.set_title('ROC Curve') plt.show() from mlxtend.evaluate import paired_ttest_5x2cv t, p = paired_ttest_5x2cv(estimator1=gb, estimator2=rf, X=X_train, y=y_train, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) ```
github_jupyter
``` %pwd %cd /Users/elin/Documents/cam/PWML/HowQuicklyCanWeGetBackToThePub/tti import os import numpy as np import pandas as pd from tqdm.notebook import trange from tqdm import tqdm import importlib import pickle import tti_explorer from tti_explorer import config, utils from tti_explorer.case import simulate_case, CaseFactors from tti_explorer.contacts import EmpiricalContactsSimulator from tti_explorer.strategies import TTIFlowModel, RETURN_KEYS import matplotlib.pyplot as plt %matplotlib inline importlib.reload(tti_explorer) importlib.reload(tti_explorer.config) importlib.reload(tti_explorer.utils) importlib.reload(tti_explorer.case) importlib.reload(tti_explorer.strategies) importlib.reload(tti_explorer.contacts) def print_doc(func): print(func.__doc__) name = 'S3_test_based_TTI' rng = np.random.RandomState(0) case_config = config.get_case_config("delve") #case_config = config.get_case_config("delve_mostly_old") #case_config = config.get_case_config("delve_mostly_twenties") contacts_config = config.get_contacts_config("delve") policy_config = config.get_strategy_configs("delve", name)[name] factor_config = utils.get_sub_dictionary(policy_config, config.DELVE_CASE_FACTOR_KEYS) strategy_config = utils.get_sub_dictionary(policy_config, config.DELVE_STRATEGY_FACTOR_KEYS) def load_csv(pth): return np.loadtxt(pth, dtype=int, skiprows=1, delimiter=",") path_to_bbc_data = os.path.join("data", "processed") child_no_school = load_csv(os.path.join(path_to_bbc_data, "childnoschool.csv")) child_school = load_csv(os.path.join(path_to_bbc_data, "childschool.csv")) university = load_csv(os.path.join(path_to_bbc_data, "university.csv")) twenties = load_csv(os.path.join(path_to_bbc_data, "twenties.csv")) thirties_to_fifties = load_csv(os.path.join(path_to_bbc_data, "thirtiestofifties.csv")) fifties_to_seventies = load_csv(os.path.join(path_to_bbc_data, "fiftiestoseventies.csv")) seventy_plus = load_csv(os.path.join(path_to_bbc_data, "seventyplus.csv")) double_dose = False vaccine_strategy = 'gov' #Vaccine strategies are: none, all, gov, young_inc_children, # young_exc_children simulate_contacts = EmpiricalContactsSimulator(child_no_school, child_school, university, twenties, thirties_to_fifties, fifties_to_seventies, seventy_plus, double_dose, vaccine_strategy, rng) tti_model = TTIFlowModel(rng, **strategy_config) n_seperate_starting_cases = 1000 n_repetitions = 10 outputs = list() death_rates = [] for i in tqdm(range(n_seperate_starting_cases)): old_probs = [] for i in range(n_repetitions): case = simulate_case(rng, p_for_categories_continued=old_probs, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) (contacts, old_probs, death_rate) = simulate_contacts(case, double_dose, vaccine_strategy, None, **contacts_config) res = tti_model(case, contacts, case_factors) outputs.append(res) death_rates.append(death_rate) to_show = [ RETURN_KEYS.base_r, RETURN_KEYS.reduced_r ] x = pd.DataFrame(outputs).mean(0).loc[to_show].to_frame().to_numpy().flatten() base_r = x[0] effective_r = x[1] relevant_death_rate_sum = sum(n for n in death_rates if n != -1) relevant_death_rate_count = sum(1 for n in death_rates if n != -1) dr = relevant_death_rate_sum / relevant_death_rate_count print("Average Mortality Rate For Contacts: " + str(round(dr*100,2)) + "%") print("Base R: " + str(round(base_r, 3))) print("Effective R: " + str(round(effective_r, 3))) #New variant n_seperate_starting_cases = 100 n_repetitions = 10 effective_R_rates = list() base_R_values = list() death_rates = [] av_mortality_rates = [] def find_R(variant, dose, vaccine, vaccine_efficacy): R_values = list() contacts_config = config.get_contacts_config("delve") contacts_config = dict(home_sar=contacts_config.get("home_sar") * variant, work_sar=contacts_config.get("work_sar") * variant, other_sar=contacts_config.get("other_sar") * variant, period=10, asymp_factor=0.5) for i in range(n_seperate_starting_cases): old_probs = [] for i in range(n_repetitions): case = simulate_case(rng, p_for_categories_continued=old_probs, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) (contacts, old_probs, death_rate) = simulate_contacts(case, dose, vaccine, vaccine_efficacy, **contacts_config) res = tti_model(case, contacts, case_factors) R_values.append(res) death_rates.append(death_rate) results_list = pd.DataFrame(R_values).mean(0).loc[to_show] effective_R_rates.append(results_list.get("Effective R")) base_R_values.append(results_list.get("Base R")) relevant_death_rate_sum = sum(n for n in death_rates if n != -1) relevant_death_rate_count = sum(1 for n in death_rates if n != -1) if(relevant_death_rate_count == 0): dr = 0 else: dr = relevant_death_rate_sum / relevant_death_rate_count av_mortality_rates.append(dr) # New variant effects on R for i in tqdm(np.linspace(1, 2, 100)): find_R(i, False, 'none', None) fig, ax = plt.subplots() ax.plot(np.linspace(0, 1, 100), base_R_values, color = "red", label="Base R Rates") ax.plot(np.linspace(0, 1, 100), effective_R_rates, color = "orange", label="Effective R Rates") ax.set_xlabel("N501Y Increased transmission rate") ax.set_ylabel("R rate") ax2 = ax.twinx() ax2.set_ylabel("Mortality Rate") ax2.plot(np.linspace(0, 1, 100), av_mortality_rates, color = "blue", label="Mortality Rates") fig.tight_layout() plt.figure() plt.show() contacts_config = config.get_contacts_config("delve") base_r_rates = [] effective_r_rates = [] av_mortality_rates = [] to_show = [ RETURN_KEYS.base_r, RETURN_KEYS.reduced_r ] for immunity in tqdm(np.linspace(0,0.99,100)): n_seperate_starting_cases = 100 n_repetitions = 10 outputs = list() death_rates = [] for i in range(n_seperate_starting_cases): old_probs = [] for i in range(n_repetitions): case = simulate_case(rng, p_for_categories_continued=old_probs, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) (contacts, old_probs, death_rate) = simulate_contacts(case, None, immunity, None, **contacts_config) res = tti_model(case, contacts, case_factors) outputs.append(res) death_rates.append(death_rate) x = pd.DataFrame(outputs).mean(0).loc[to_show].to_frame().to_numpy().flatten() base_r_rates.append(x[0]) effective_r_rates.append(x[1]) relevant_death_rate_sum = sum(n for n in death_rates if n != -1) relevant_death_rate_count = sum(1 for n in death_rates if n != -1) if(relevant_death_rate_count == 0): dr = 0 else: dr = relevant_death_rate_sum / relevant_death_rate_count av_mortality_rates.append(dr) with open('results/immunity_br.pkl', 'wb+') as f: pickle.dump(base_r_rates, f) with open('results/immunity_er.pkl', 'wb+') as f: pickle.dump(effective_r_rates, f) with open('results/immunity_dr.pkl', 'wb+') as f: pickle.dump(av_mortality_rates, f) fig, ax = plt.subplots() ax.plot(np.linspace(0,.99,100), base_r_rates, color = "red", label="Base R Rates") ax.plot(np.linspace(0,.99,100), effective_r_rates, color = "orange", label="Effective R Rates") ax.set_xlabel("Population Immunity") ax.set_ylabel("R rate") ax2 = ax.twinx() ax2.set_ylabel("Mortality Rate") ax2.plot(np.linspace(0,.99,100), av_mortality_rates, color = "blue", label="Mortality Rates") #ax2.set_ylim(0, 0.1) fig.tight_layout() plt.figure() plt.show() # vaccine efficacy vaccine_strategy = 'all' #Vaccine strategies are: none, all, gov, young_inc_children, # young_exc_children base_r_rates = [] effective_r_rates = [] av_mortality_rates = [] to_show = [ RETURN_KEYS.base_r, RETURN_KEYS.reduced_r ] for vaccine_efficacy in tqdm(np.linspace(0.2,0.99,100)): n_seperate_starting_cases = 100 n_repetitions = 10 outputs = list() death_rates = [] for i in range(n_seperate_starting_cases): old_probs = [] for i in range(n_repetitions): case = simulate_case(rng, p_for_categories_continued=old_probs, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) (contacts, old_probs, death_rate) = simulate_contacts(case, False, vaccine_strategy, vaccine_efficacy, **contacts_config) res = tti_model(case, contacts, case_factors) outputs.append(res) death_rates.append(death_rate) x = pd.DataFrame(outputs).mean(0).loc[to_show].to_frame().to_numpy().flatten() base_r_rates.append(x[0]) effective_r_rates.append(x[1]) relevant_death_rate_sum = sum(n for n in death_rates if n != -1) relevant_death_rate_count = sum(1 for n in death_rates if n != -1) if(relevant_death_rate_count == 0): dr = 0 else: dr = relevant_death_rate_sum / relevant_death_rate_count av_mortality_rates.append(dr) with open('results/efficacy_br.pkl', 'wb+') as f: pickle.dump(base_r_rates, f) with open('results/efficacy_er.pkl', 'wb+') as f: pickle.dump(effective_r_rates, f) with open('results/efficacy_dr.pkl', 'wb+') as f: pickle.dump(av_mortality_rates, f) fig, ax = plt.subplots() ax.plot(np.linspace(0.2,0.99,100), base_r_rates, color = "red", label="Base R Rates") ax.plot(np.linspace(0.2,0.99,100), effective_r_rates, color = "orange", label="Effective R Rates") ax.set_xlabel("Vaccine Efficacy") ax.set_ylabel("R rate") ax2 = ax.twinx() ax2.set_ylabel("Mortality Rate") ax2.plot(np.linspace(0.2,0.99,100), av_mortality_rates, color = "blue", label="Mortality Rates") #ax2.set_ylim(0, 0.1) fig.tight_layout() plt.figure() plt.show() configs = [(False, 'none'), (True, '30s_prioritised'), (True, 'equal'), (True, 'gov'), (True, 'young_inc_children'), (True, 'young_exc_children'), (False, '30s_prioritised'), (False, 'equal'), (False, 'gov'), (False, 'young_inc_children'), (False, 'young_exc_children')] to_show = [ RETURN_KEYS.base_r, RETURN_KEYS.reduced_r ] f = open('results/vaccine_methods2.txt', 'w+') for c in configs: double_dose = c[0] vaccine_strategy = c[1] n_seperate_starting_cases = 200 n_repetitions = 20 outputs = list() death_rates = [] for i in range(n_seperate_starting_cases): old_probs = [] for i in range(n_repetitions): case = simulate_case(rng, p_for_categories_continued=old_probs, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) (contacts, old_probs, death_rate) = simulate_contacts(case, double_dose, vaccine_strategy, None, **contacts_config) res = tti_model(case, contacts, case_factors) outputs.append(res) death_rates.append(death_rate) x = pd.DataFrame(outputs).mean(0).loc[to_show].to_frame().to_numpy().flatten() print() l1 = ("CONFIG: (Double Dose: " + str(c[0]) + ", Strategy: " + str(c[1]) + ")\n") l2 = ("Base R Rate: " + str(round(x[0], 2)) + "\n") l3 = ("Effective R Rate: " + str(round(x[1], 2)) + "\n") relevant_death_rate_sum = sum(n for n in death_rates if n != -1) relevant_death_rate_count = sum(1 for n in death_rates if n != -1) dr = relevant_death_rate_sum / relevant_death_rate_count l4 = ("Mortality Rate: " + str(round(dr, 4)) + "\n\n") f.writelines([l1, l2, l3, l4]) print(l1) print(l2) print(l3) print(l4) f.close() to_show = [ RETURN_KEYS.base_r, RETURN_KEYS.reduced_r ] def return_base_r_rate(vaccine_distribution): contacts_config = config.get_contacts_config("delve") n_seperate_starting_cases = 40 n_repetitions = 5 outputs = list() death_rates = [] for i in range(n_seperate_starting_cases): old_probs = [] for i in range(n_repetitions): case = simulate_case(rng, p_for_categories_continued=old_probs, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) (contacts, old_probs, death_rate) = simulate_contacts(case, double_dose, None, vaccine_distribution, None, **contacts_config) res = tti_model(case, contacts, case_factors) outputs.append(res) death_rates.append(death_rate) x = pd.DataFrame(outputs).mean(0).loc[to_show].to_frame().to_numpy().flatten() return x[0] def return_effective_r_rate(vaccine_distribution): n_seperate_starting_cases = 100 n_repetitions = 10 outputs = list() death_rates = [] for i in range(n_seperate_starting_cases): old_probs = [] for i in range(n_repetitions): case = simulate_case(rng, p_for_categories_continued=old_probs, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) (contacts, old_probs, death_rate) = simulate_contacts(case, double_dose, None, vaccine_distribution, None, **contacts_config) res = tti_model(case, contacts, case_factors) outputs.append(res) death_rates.append(death_rate) x = pd.DataFrame(outputs).mean(0).loc[to_show].to_frame().to_numpy().flatten() return x[1] def return_mortality_rate(vaccine_distribution): n_seperate_starting_cases = 500 n_repetitions = 10 outputs = list() death_rates = [] for i in range(n_seperate_starting_cases): old_probs = [] for i in range(n_repetitions): case = simulate_case(rng, p_for_categories_continued=old_probs, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) (contacts, old_probs, death_rate) = simulate_contacts(case, double_dose, None, vaccine_distribution, None, **contacts_config) res = tti_model(case, contacts, case_factors) outputs.append(res) death_rates.append(death_rate) x = pd.DataFrame(outputs).mean(0).loc[to_show].to_frame().to_numpy().flatten() relevant_death_rate_sum = sum(n for n in death_rates if n != -1) relevant_death_rate_count = sum(1 for n in death_rates if n != -1) dr = relevant_death_rate_sum / relevant_death_rate_count return dr from emukit.core import ContinuousParameter, ParameterSpace from emukit.sensitivity.monte_carlo import ModelFreeMonteCarloSensitivity np.random.seed(10) def test_base_r(x): to_return = np.zeros(x.shape[0]) for i, y in enumerate(x): to_return[i] = return_base_r_rate(y) return to_return def test_effective_r(x): to_return = np.zeros(x.shape[0]) for i, y in enumerate(x): to_return[i] = return_effective_r_rate(y) return to_return def test_mortality(x): to_return = np.zeros(x.shape[0]) for i, y in enumerate(x): to_return[i] = return_mortality_rate(y) return to_return variable_domain = (0, 1) space = ParameterSpace([ContinuousParameter('child no school', variable_domain[0], variable_domain[1]), ContinuousParameter('children school', variable_domain[0], variable_domain[1]), ContinuousParameter('uni', variable_domain[0], variable_domain[1]), ContinuousParameter('18-30 no uni', variable_domain[0], variable_domain[1]), ContinuousParameter('30-50', variable_domain[0], variable_domain[1]), ContinuousParameter('50-70', variable_domain[0], variable_domain[1]), ContinuousParameter('70+', variable_domain[0], variable_domain[1])]) n = 1000 # Number of MC samples sens = ModelFreeMonteCarloSensitivity(test_base_r, space) main_effects_base_r, total_effects_base_r, _ = sens.compute_effects(num_monte_carlo_points = n) print(total_effects_base_r) print() print(main_effects_base_r) variable_domain = (0, 1) space = ParameterSpace([ContinuousParameter('child no school', variable_domain[0], variable_domain[1]), ContinuousParameter('children school', variable_domain[0], variable_domain[1]), ContinuousParameter('uni', variable_domain[0], variable_domain[1]), ContinuousParameter('18-30 no uni', variable_domain[0], variable_domain[1]), ContinuousParameter('30-50', variable_domain[0], variable_domain[1]), ContinuousParameter('50-70', variable_domain[0], variable_domain[1]), ContinuousParameter('70+', variable_domain[0], variable_domain[1])]) n = 100 # Number of MC samples sens = ModelFreeMonteCarloSensitivity(test_effective_r, space) main_effects_effective_r, total_effects_effective_r, _ = sens.compute_effects(num_monte_carlo_points = n) print(total_effects_effective_r) print() print(main_effects_effective_r) variable_domain = (0, 1) space = ParameterSpace([ContinuousParameter('child no school', variable_domain[0], variable_domain[1]), ContinuousParameter('children school', variable_domain[0], variable_domain[1]), ContinuousParameter('uni', variable_domain[0], variable_domain[1]), ContinuousParameter('18-30 no uni', variable_domain[0], variable_domain[1]), ContinuousParameter('30-50', variable_domain[0], variable_domain[1]), ContinuousParameter('50-70', variable_domain[0], variable_domain[1]), ContinuousParameter('70+', variable_domain[0], variable_domain[1])]) n = 100 # Number of MC samples sens = ModelFreeMonteCarloSensitivity(test_mortality, space) main_effects_mortality, total_effects_mortality, _ = sens.compute_effects(num_monte_carlo_points = n) print(total_effects_mortality) print() print(main_effects_mortality) ```
github_jupyter
``` %matplotlib inline ``` # Compute source space connectivity and visualize it using a circular graph This example computes the all-to-all connectivity between 68 regions in source space based on dSPM inverse solutions and a FreeSurfer cortical parcellation. The connectivity is visualized using a circular graph which is ordered based on the locations of the regions. ``` # Authors: Martin Luessi <mluessi@nmr.mgh.harvard.edu> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Nicolas P. Rougier (graph code borrowed from his matplotlib gallery) # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne.datasets import sample from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator from mne.connectivity import spectral_connectivity from mne.viz import circular_layout, plot_connectivity_circle print(__doc__) data_path = sample.data_path() subjects_dir = data_path + '/subjects' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Load data inverse_operator = read_inverse_operator(fname_inv) raw = mne.io.read_raw_fif(fname_raw) events = mne.read_events(fname_event) # Add a bad channel raw.info['bads'] += ['MEG 2443'] # Pick MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, exclude='bads') # Define epochs for left-auditory condition event_id, tmin, tmax = 1, -0.2, 0.5 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6)) # Compute inverse solution and for each epoch. By using "return_generator=True" # stcs will be a generator object instead of a list. snr = 1.0 # use lower SNR for single epochs lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", return_generator=True) # Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi labels = mne.read_labels_from_annot('sample', parc='aparc', subjects_dir=subjects_dir) label_colors = [label.color for label in labels] # Average the source estimates within each label using sign-flips to reduce # signal cancellations, also here we return a generator src = inverse_operator['src'] label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip', return_generator=True) # Now we are ready to compute the connectivity in the alpha band. Notice # from the status messages, how mne-python: 1) reads an epoch from the raw # file, 2) applies SSP and baseline correction, 3) computes the inverse to # obtain a source estimate, 4) averages the source estimate to obtain a # time series for each label, 5) includes the label time series in the # connectivity computation, and then moves to the next epoch. This # behaviour is because we are using generators and allows us to # compute connectivity in computationally efficient manner where the amount # of memory (RAM) needed is independent from the number of epochs. fmin = 8. fmax = 13. sfreq = raw.info['sfreq'] # the sampling frequency con_methods = ['pli', 'wpli2_debiased'] con, freqs, times, n_epochs, n_tapers = spectral_connectivity( label_ts, method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1) # con is a 3D array, get the connectivity for the first (and only) freq. band # for each method con_res = dict() for method, c in zip(con_methods, con): con_res[method] = c[:, :, 0] # Now, we visualize the connectivity using a circular graph layout # First, we reorder the labels based on their location in the left hemi label_names = [label.name for label in labels] lh_labels = [name for name in label_names if name.endswith('lh')] # Get the y-location of the label label_ypos = list() for name in lh_labels: idx = label_names.index(name) ypos = np.mean(labels[idx].pos[:, 1]) label_ypos.append(ypos) # Reorder the labels based on their location lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))] # For the right hemi rh_labels = [label[:-2] + 'rh' for label in lh_labels] # Save the plot order and create a circular layout node_order = list() node_order.extend(lh_labels[::-1]) # reverse the order node_order.extend(rh_labels) node_angles = circular_layout(label_names, node_order, start_pos=90, group_boundaries=[0, len(label_names) / 2]) # Plot the graph using node colors from the FreeSurfer parcellation. We only # show the 300 strongest connections. plot_connectivity_circle(con_res['pli'], label_names, n_lines=300, node_angles=node_angles, node_colors=label_colors, title='All-to-All Connectivity left-Auditory ' 'Condition (PLI)') plt.savefig('circle.png', facecolor='black') # Plot connectivity for both methods in the same plot fig = plt.figure(num=None, figsize=(8, 4), facecolor='black') no_names = [''] * len(label_names) for ii, method in enumerate(con_methods): plot_connectivity_circle(con_res[method], no_names, n_lines=300, node_angles=node_angles, node_colors=label_colors, title=method, padding=0, fontsize_colorbar=6, fig=fig, subplot=(1, 2, ii + 1)) plt.show() ```
github_jupyter
#Mock Data There have been some models created already around some simplistic data sets, I want to create a more complex mock dataset to hopefully give a test model a run for its money. The data points are based off the intake survey, so if anyone has any suggestions on that, I'm all ears! ^_^ I was thinking about using the following features for the model to consider: - skills - mentee_load - career_capable - availability All of the features of note for matching are housed within the Mentor Class. Let me know if anyone else can think of any other relevant data points we should be using in our matching criteria. ## Update After our latest Stakeholder meeting, we decided to clean up the mock data a little bit and remove some previous features and add some different ones. The mock data now has a more streamline approach for the model that will be coming. ``` # The following is basically source data, the real fun starts in the next code cell male_first_name = ( "Liam", "Noah", "Oliver", "Elijah", "William", "James", "Benjamin", "Lucas", "Henry", "Alexander", "Mason", "Michael", "Ethan", "Daniel", "Jacob", "Logan", "Jackson", "Levi", "Sebastian", "Mateo", "Jack", "Owen", "Theodore", "Aiden", "Samuel", "Joseph", "John", "David", "Wyatt", "Matthew", "Luke", "Asher", "Carter", "Julian", "Grayson", "Leo", "Jayden", "Gabriel", "Isaac", "Lincoln", "Anthony", "Hudson", "Dylan", "Ezra", "Thomas", "Charles", "Christopher", "Jaxon", "Maverick", "Josiah", "Isaiah", "Andrew", "Elias", "Joshua", "Nathan", "Caleb", "Ryan", "Adrian", "Miles", "Eli", "Nolan", "Christian", "Aaron", "Cameron", "Ezekiel", "Colton", "Luca", "Landon", "Hunter", "Jonathan", "Santiago", "Axel", "Easton", "Cooper", "Jeremiah", "Angel", "Roman", "Connor", "Jameson", "Robert", "Greyson", "Jordan", "Ian", "Carson", "Jaxson", "Leonardo", "Nicholas", "Dominic", "Austin", "Everett", "Brooks", "Xavier", "Kai", "Jose", "Parker", "Adam", "Jace", "Wesley", "Kayden", "Silas", "Bennett", "Declan", "Waylon", "Weston", "Evan", "Emmett", "Micah", "Ryder", "Beau", "Damian", "Brayden", "Gael", "Rowan", "Harrison", "Bryson", "Sawyer", "Amir", "Kingston", "Jason", "Giovanni", "Vincent", "Ayden", "Chase", "Myles", "Diego", "Nathaniel", "Legend", "Jonah", "River", "Tyler", "Cole", "Braxton", "George", "Milo", "Zachary", "Ashton", "Luis", "Jasper", "Kaiden", "Adriel", "Gavin", "Bentley", "Calvin", "Zion", "Juan", "Maxwell", "Max", "Ryker", "Carlos", "Emmanuel", "Jayce", "Lorenzo", "Ivan", "Jude", "August", "Kevin", "Malachi", "Elliott", "Rhett", "Archer", "Karter", "Arthur", "Luka", "Elliot", "Thiago", "Brandon", "Camden", "Justin", "Jesus", "Maddox", "King", "Theo", "Enzo", "Matteo", "Emiliano", "Dean", "Hayden", "Finn", "Brody", "Antonio", "Abel", "Alex", "Tristan", "Graham", "Zayden", "Judah", "Xander", "Miguel", "Atlas", "Messiah", "Barrett", "Tucker", "Timothy", "Alan", "Edward", "Leon", "Dawson", "Eric", "Ace", "Victor", "Abraham", "Nicolas", "Jesse", "Charlie", "Patrick", "Walker", "Joel", "Richard", "Beckett", "Blake", "Alejandro", "Avery", "Grant", "Peter", "Oscar", "Matias", "Amari", "Lukas", "Andres", "Arlo", "Colt", "Adonis", "Kyrie", "Steven", "Felix", "Preston", "Marcus", "Holden", "Emilio", "Remington", "Jeremy", "Kaleb", "Brantley", "Bryce", "Mark", "Knox", "Israel", "Phoenix", "Kobe", "Nash", "Griffin", "Caden", "Kenneth", "Kyler", "Hayes", "Jax", "Rafael", "Beckham", "Javier", "Maximus", "Simon", "Paul", "Omar", "Kaden", "Kash", "Lane", "Bryan", "Riley", "Zane", "Louis", "Aidan", "Paxton", "Maximiliano", "Karson", "Cash", "Cayden", "Emerson", "Tobias", "Ronan", "Brian", "Dallas", "Bradley", "Jorge", "Walter", "Josue", "Khalil", "Damien", "Jett", "Kairo", "Zander", "Andre", "Cohen", "Crew", "Hendrix", "Colin", "Chance", "Malakai", "Clayton", "Daxton", "Malcolm", "Lennox", "Martin", "Jaden", "Kayson", "Bodhi", "Francisco", "Cody", "Erick", "Kameron", "Atticus", "Dante", "Jensen", "Cruz", "Finley", "Brady", "Joaquin", "Anderson", "Gunner", "Muhammad", "Zayn", "Derek", "Raymond", "Kyle", "Angelo", "Reid", "Spencer", "Nico", "Jaylen", "Jake", "Prince", "Manuel", "Ali", "Gideon", "Stephen", "Ellis", "Orion", "Rylan", "Eduardo", "Mario", "Rory", "Cristian", "Odin", "Tanner", "Julius", "Callum", "Sean", "Kane", "Ricardo", "Travis", "Wade", "Warren", "Fernando", "Titus", "Leonel", "Edwin", "Cairo", "Corbin", "Dakota", "Ismael", "Colson", "Killian", "Major", "Tate", "Gianni", "Elian", "Remy", "Lawson", "Niko", "Nasir", "Kade", "Armani", "Ezequiel", "Marshall", "Hector", "Desmond", "Kason", "Garrett", "Jared", "Cyrus", "Russell", "Cesar", "Tyson", "Malik", "Donovan", "Jaxton", "Cade", "Romeo", "Nehemiah", "Sergio", "Iker", "Caiden", "Jay", "Pablo", "Devin", "Jeffrey", "Otto", "Kamari", "Ronin", "Johnny", "Clark", "Ari", "Marco", "Edgar", "Bowen", "Jaiden", "Grady", "Zayne", "Sullivan", "Jayceon", "Sterling", "Andy", "Conor", "Raiden", "Royal", "Royce", "Solomon", "Trevor", "Winston", "Emanuel", "Finnegan", "Pedro", "Luciano", "Harvey", "Franklin", "Noel", "Troy", "Princeton", "Johnathan", "Erik", "Fabian", "Oakley", "Rhys", "Porter", "Hugo", "Frank", "Damon", "Kendrick", "Mathias", "Milan", "Peyton", "Wilder", "Callan", "Gregory", "Seth", "Matthias", "Briggs", "Ibrahim", "Roberto", "Conner", "Quinn", "Kashton", "Sage", "Santino", "Kolton", "Alijah", "Dominick", "Zyaire", "Apollo", "Kylo", "Reed", "Philip", "Kian", "Shawn", "Kaison", "Leonidas", "Ayaan", "Lucca", "Memphis", "Ford", "Baylor", "Kyson", "Uriel", "Allen", "Collin", "Ruben", "Archie", "Dalton", "Esteban", "Adan", "Forrest", "Alonzo", "Isaias", "Leland", "Jase", "Dax", "Kasen", "Gage", "Kamden", "Marcos", "Jamison", "Francis", "Hank", "Alexis", "Tripp", "Frederick", "Jonas", "Stetson", "Cassius", "Izaiah", "Eden", "Maximilian", "Rocco", "Tatum", "Keegan", "Aziel", "Moses", "Bruce", "Lewis", "Braylen", "Omari", "Mack", "Augustus", "Enrique", "Armando", "Pierce", "Moises", "Asa", "Shane", "Emmitt", "Soren", "Dorian", "Keanu", "Zaiden", "Raphael", "Deacon", "Abdiel", "Kieran", "Phillip", "Ryland", "Zachariah", "Casey", "Zaire", "Albert", "Baker", "Corey", "Kylan", "Denver", "Gunnar", "Jayson", "Drew", "Callen", "Jasiah", "Drake", "Kannon", "Braylon", "Sonny", "Bo", "Moshe", "Huxley", "Quentin", "Rowen", "Santana", "Cannon", "Kenzo", "Wells", "Julio", "Nikolai", "Conrad", "Jalen", "Makai", "Benson", "Derrick", "Gerardo", "Davis", "Abram", "Mohamed", "Ronald", "Raul", "Arjun", "Dexter", "Kaysen", "Jaime", "Scott", "Lawrence", "Ariel", "Skyler", "Danny", "Roland", "Chandler", "Yusuf", "Samson", "Case", "Zain", "Roy", "Rodrigo", "Sutton", "Boone", "Saint", "Saul", "Jaziel", "Hezekiah", "Alec", "Arturo", "Jamari", "Jaxtyn", "Julien", "Koa", "Reece", "Landen", "Koda", "Darius", "Sylas", "Ares", "Kyree", "Boston", "Keith", "Taylor", "Johan", "Edison", "Sincere", "Watson", "Jerry", "Nikolas", "Quincy", "Shepherd", "Brycen", "Marvin", "Dariel", "Axton", "Donald", "Bodie", "Finnley", "Onyx", "Rayan", "Raylan", "Brixton", "Colby", "Shiloh", "Valentino", "Layton", "Trenton", "Landyn", "Alessandro", "Ahmad", "Gustavo", "Ledger", "Ridge", "Ander", "Ahmed", "Kingsley", "Issac", "Mauricio", "Tony", "Leonard", "Mohammed", "Uriah", "Duke", "Kareem", "Lucian", "Marcelo", "Aarav", "Leandro", "Reign", "Clay", "Kohen", "Dennis", "Samir", "Ermias", "Otis", "Emir", "Nixon", "Ty", "Sam", "Fletcher", "Wilson", "Dustin", "Hamza", "Bryant", "Flynn", "Lionel", "Mohammad", "Cason", "Jamir", "Aden", "Dakari", "Justice", "Dillon", "Layne", "Zaid", "Alden", "Nelson", "Devon", "Titan", "Chris", "Khari", "Zeke", "Noe", "Alberto", "Roger", "Brock", "Rex", "Quinton", "Alvin", "Cullen", "Azariah", "Harlan", "Kellan", "Lennon", "Marcel", "Keaton", "Morgan", "Ricky", "Trey", "Karsyn", "Langston", "Miller", "Chaim", "Salvador", "Amias", "Tadeo", "Curtis", "Lachlan", "Amos", "Anakin", "Krew", "Tomas", "Jefferson", "Yosef", "Bruno", "Korbin", "Augustine", "Cayson", "Mathew", "Vihaan", "Jamie", "Clyde", "Brendan", "Jagger", "Carmelo", "Harry", "Nathanael", "Mitchell", "Darren", "Ray", "Jedidiah", "Jimmy", "Lochlan", "Bellamy", "Eddie", "Rayden", "Reese", "Stanley", "Joe", "Houston", "Douglas", "Vincenzo", "Casen", "Emery", "Joziah", "Leighton", "Marcellus", "Atreus", "Aron", "Hugh", "Musa", "Tommy", "Alfredo", "Junior", "Neil", "Westley", "Banks", "Eliel", "Melvin", "Maximo", "Briar", "Colten", "Lance", "Nova", "Trace", "Axl", "Ramon", "Vicente", "Brennan", "Caspian", "Remi", "Deandre", "Legacy", "Lee", "Valentin", "Ben", "Louie", "Westin", "Wayne", "Benicio", "Grey", "Zayd", "Gatlin", "Mekhi", "Orlando", "Bjorn", "Harley", "Alonso", "Rio", "Aldo", "Byron", "Eliseo", "Ernesto", "Talon", "Thaddeus", "Brecken", "Kace", "Kellen", "Enoch", "Kiaan", "Lian", "Creed", "Rohan", "Callahan", "Jaxxon", "Ocean", "Crosby", "Dash", "Gary", "Mylo", "Ira", "Magnus", "Salem", "Abdullah", "Kye", "Tru", "Forest", "Jon", "Misael", "Madden", "Braden", "Carl", "Hassan", "Emory", "Kristian", "Alaric", "Ambrose", "Dario", "Allan", "Bode", "Boden", "Juelz", "Kristopher", "Genesis", "Idris", "Ameer", "Anders", "Darian", "Kase", "Aryan", "Dane", "Guillermo", "Elisha", "Jakobe", "Thatcher", "Eugene", "Ishaan", "Larry", "Wesson", "Yehuda", "Alvaro", "Bobby", "Bronson", "Dilan", "Kole", "Kyro", "Tristen", "Blaze", "Brayan", "Jadiel", "Kamryn", "Demetrius", "Maurice", "Arian", "Kabir", "Rocky", "Rudy", "Randy", "Rodney", "Yousef", "Felipe", "Robin", "Aydin", "Dior", "Kaiser", "Van", "Brodie", "London", "Eithan", "Stefan", "Ulises", "Camilo", "Branson", "Jakari", "Judson", "Yahir", "Zavier", "Damari", "Jakob", "Jaxx", "Bentlee", "Cain", "Niklaus", "Rey", "Zahir", "Aries", "Blaine", "Kyng", "Castiel", "Henrik", "Joey", "Khalid", "Bear", "Graysen", "Jair", "Kylen", "Darwin", "Alfred", "Ayan", "Kenji", "Zakai", "Avi", "Cory", "Fisher", "Jacoby", "Osiris", "Harlem", "Jamal", "Santos", "Wallace", "Brett", "Fox", "Leif", "Maison", "Reuben", "Adler", "Zev", "Calum", "Kelvin", "Zechariah", "Bridger", "Mccoy", "Seven", "Shepard", "Azrael", "Leroy", "Terry", "Harold", "Mac", "Mordechai", "Ahmir", "Cal", "Franco", "Trent", "Blaise", "Coen", "Dominik", "Marley", "Davion", "Jeremias", "Riggs", "Jones", "Will", "Damir", "Dangelo", "Canaan", "Dion", "Jabari", "Landry", "Salvatore", "Kody", "Hakeem", "Truett", "Gerald", "Lyric", "Gordon", "Jovanni", "Kamdyn", "Alistair", "Cillian", "Foster", "Terrance", "Murphy", "Zyair", "Cedric", "Rome", "Abner", "Colter", "Dayton", "Jad", "Xzavier", "Rene", "Vance", "Duncan", "Frankie", "Bishop", "Davian", "Everest", "Heath", "Jaxen", "Marlon", "Maxton", "Reginald", "Harris", "Jericho", "Keenan", "Korbyn", "Wes", "Eliezer", "Jeffery", "Kalel", "Kylian", "Turner", "Willie", "Rogelio", "Ephraim", ) female_first_name = ( "Olivia", "Emma", "Ava", "Charlotte", "Sophia", "Amelia", "Isabella", "Mia", "Evelyn", "Harper", "Camila", "Gianna", "Abigail", "Luna", "Ella", "Elizabeth", "Sofia", "Emily", "Avery", "Mila", "Scarlett", "Eleanor", "Madison", "Layla", "Penelope", "Aria", "Chloe", "Grace", "Ellie", "Nora", "Hazel", "Zoey", "Riley", "Victoria", "Lily", "Aurora", "Violet", "Nova", "Hannah", "Emilia", "Zoe", "Stella", "Everly", "Isla", "Leah", "Lillian", "Addison", "Willow", "Lucy", "Paisley", "Natalie", "Naomi", "Eliana", "Brooklyn", "Elena", "Aubrey", "Claire", "Ivy", "Kinsley", "Audrey", "Maya", "Genesis", "Skylar", "Bella", "Aaliyah", "Madelyn", "Savannah", "Anna", "Delilah", "Serenity", "Caroline", "Kennedy", "Valentina", "Ruby", "Sophie", "Alice", "Gabriella", "Sadie", "Ariana", "Allison", "Hailey", "Autumn", "Nevaeh", "Natalia", "Quinn", "Josephine", "Sarah", "Cora", "Emery", "Samantha", "Piper", "Leilani", "Eva", "Everleigh", "Madeline", "Lydia", "Jade", "Peyton", "Brielle", "Adeline", "Vivian", "Rylee", "Clara", "Raelynn", "Melanie", "Melody", "Julia", "Athena", "Maria", "Liliana", "Hadley", "Arya", "Rose", "Reagan", "Eliza", "Adalynn", "Kaylee", "Lyla", "Mackenzie", "Alaia", "Isabelle", "Charlie", "Arianna", "Mary", "Remi", "Margaret", "Iris", "Parker", "Ximena", "Eden", "Ayla", "Kylie", "Elliana", "Josie", "Katherine", "Faith", "Alexandra", "Eloise", "Adalyn", "Amaya", "Jasmine", "Amara", "Daisy", "Reese", "Valerie", "Brianna", "Cecilia", "Andrea", "Summer", "Valeria", "Norah", "Ariella", "Esther", "Ashley", "Emerson", "Aubree", "Isabel", "Anastasia", "Ryleigh", "Khloe", "Taylor", "Londyn", "Lucia", "Emersyn", "Callie", "Sienna", "Blakely", "Kehlani", "Genevieve", "Alina", "Bailey", "Juniper", "Maeve", "Molly", "Harmony", "Georgia", "Magnolia", "Catalina", "Freya", "Juliette", "Sloane", "June", "Sara", "Ada", "Kimberly", "River", "Ember", "Juliana", "Aliyah", "Millie", "Brynlee", "Teagan", "Morgan", "Jordyn", "London", "Alaina", "Olive", "Rosalie", "Alyssa", "Ariel", "Finley", "Arabella", "Journee", "Hope", "Leila", "Alana", "Gemma", "Vanessa", "Gracie", "Noelle", "Marley", "Elise", "Presley", "Kamila", "Zara", "Amy", "Kayla", "Payton", "Blake", "Ruth", "Alani", "Annabelle", "Sage", "Aspen", "Laila", "Lila", "Rachel", "Trinity", "Daniela", "Alexa", "Lilly", "Lauren", "Elsie", "Margot", "Adelyn", "Zuri", "Brooke", "Sawyer", "Lilah", "Lola", "Selena", "Mya", "Sydney", "Diana", "Ana", "Vera", "Alayna", "Nyla", "Elaina", "Rebecca", "Angela", "Kali", "Alivia", "Raegan", "Rowan", "Phoebe", "Camilla", "Joanna", "Malia", "Vivienne", "Dakota", "Brooklynn", "Evangeline", "Camille", "Jane", "Nicole", "Catherine", "Jocelyn", "Julianna", "Lena", "Lucille", "Mckenna", "Paige", "Adelaide", "Charlee", "Mariana", "Myla", "Mckenzie", "Tessa", "Miriam", "Oakley", "Kailani", "Alayah", "Amira", "Adaline", "Phoenix", "Milani", "Annie", "Lia", "Angelina", "Harley", "Cali", "Maggie", "Hayden", "Leia", "Fiona", "Briella", "Journey", "Lennon", "Saylor", "Jayla", "Kaia", "Thea", "Adriana", "Mariah", "Juliet", "Oaklynn", "Kiara", "Alexis", "Haven", "Aniyah", "Delaney", "Gracelynn", "Kendall", "Winter", "Lilith", "Logan", "Amiyah", "Evie", "Alexandria", "Gracelyn", "Gabriela", "Sutton", "Harlow", "Madilyn", "Makayla", "Evelynn", "Gia", "Nina", "Amina", "Giselle", "Brynn", "Blair", "Amari", "Octavia", "Michelle", "Talia", "Demi", "Alaya", "Kaylani", "Izabella", "Fatima", "Tatum", "Makenzie", "Lilliana", "Arielle", "Palmer", "Melissa", "Willa", "Samara", "Destiny", "Dahlia", "Celeste", "Ainsley", "Rylie", "Reign", "Laura", "Adelynn", "Gabrielle", "Remington", "Wren", "Brinley", "Amora", "Lainey", "Collins", "Lexi", "Aitana", "Alessandra", "Kenzie", "Raelyn", "Elle", "Everlee", "Haisley", "Hallie", "Wynter", "Daleyza", "Gwendolyn", "Paislee", "Ariyah", "Veronica", "Heidi", "Anaya", "Cataleya", "Kira", "Avianna", "Felicity", "Aylin", "Miracle", "Sabrina", "Lana", "Ophelia", "Elianna", "Royalty", "Madeleine", "Esmeralda", "Joy", "Kalani", "Esme", "Jessica", "Leighton", "Ariah", "Makenna", "Nylah", "Viviana", "Camryn", "Cassidy", "Dream", "Luciana", "Maisie", "Stevie", "Kate", "Lyric", "Daniella", "Alicia", "Daphne", "Frances", "Charli", "Raven", "Paris", "Nayeli", "Serena", "Heaven", "Bianca", "Helen", "Hattie", "Averie", "Mabel", "Selah", "Allie", "Marlee", "Kinley", "Regina", "Carmen", "Jennifer", "Jordan", "Alison", "Stephanie", "Maren", "Kayleigh", "Angel", "Annalise", "Jacqueline", "Braelynn", "Emory", "Rosemary", "Scarlet", "Amanda", "Danielle", "Emelia", "Ryan", "Carolina", "Astrid", "Kensley", "Shiloh", "Maci", "Francesca", "Rory", "Celine", "Kamryn", "Zariah", "Liana", "Poppy", "Maliyah", "Keira", "Skyler", "Noa", "Skye", "Nadia", "Addilyn", "Rosie", "Eve", "Sarai", "Edith", "Jolene", "Maddison", "Meadow", "Charleigh", "Matilda", "Elliott", "Madelynn", "Holly", "Leona", "Azalea", "Katie", "Mira", "Ari", "Kaitlyn", "Danna", "Cameron", "Kyla", "Bristol", "Kora", "Armani", "Nia", "Malani", "Dylan", "Remy", "Maia", "Dior", "Legacy", "Alessia", "Shelby", "Maryam", "Sylvia", "Yaretzi", "Lorelei", "Madilynn", "Abby", "Helena", "Jimena", "Elisa", "Renata", "Amber", "Aviana", "Carter", "Emmy", "Haley", "Alondra", "Elaine", "Erin", "April", "Emely", "Imani", "Kennedi", "Lorelai", "Hanna", "Kelsey", "Aurelia", "Colette", "Jaliyah", "Kylee", "Macie", "Aisha", "Dorothy", "Charley", "Kathryn", "Adelina", "Adley", "Monroe", "Sierra", "Ailani", "Miranda", "Mikayla", "Alejandra", "Amirah", "Jada", "Jazlyn", "Jenna", "Jayleen", "Beatrice", "Kendra", "Lyra", "Nola", "Emberly", "Mckinley", "Myra", "Katalina", "Antonella", "Zelda", "Alanna", "Amaia", "Priscilla", "Briar", "Kaliyah", "Itzel", "Oaklyn", "Alma", "Mallory", "Novah", "Amalia", "Fernanda", "Alia", "Angelica", "Elliot", "Justice", "Mae", "Cecelia", "Gloria", "Ariya", "Virginia", "Cheyenne", "Aleah", "Jemma", "Henley", "Meredith", "Leyla", "Lennox", "Ensley", "Zahra", "Reina", "Frankie", "Lylah", "Nalani", "Reyna", "Saige", "Ivanna", "Aleena", "Emerie", "Ivory", "Leslie", "Alora", "Ashlyn", "Bethany", "Bonnie", "Sasha", "Xiomara", "Salem", "Adrianna", "Dayana", "Clementine", "Karina", "Karsyn", "Emmie", "Julie", "Julieta", "Briana", "Carly", "Macy", "Marie", "Oaklee", "Christina", "Malaysia", "Ellis", "Irene", "Anne", "Anahi", "Mara", "Rhea", "Davina", "Dallas", "Jayda", "Mariam", "Skyla", "Siena", "Elora", "Marilyn", "Jazmin", "Megan", "Rosa", "Savanna", "Allyson", "Milan", "Coraline", "Johanna", "Melany", "Chelsea", "Michaela", "Melina", "Angie", "Cassandra", "Yara", "Kassidy", "Liberty", "Lilian", "Avah", "Anya", "Laney", "Navy", "Opal", "Amani", "Zaylee", "Mina", "Sloan", "Romina", "Ashlynn", "Aliza", "Liv", "Malaya", "Blaire", "Janelle", "Kara", "Analia", "Hadassah", "Hayley", "Karla", "Chaya", "Cadence", "Kyra", "Alena", "Ellianna", "Katelyn", "Kimber", "Laurel", "Lina", "Capri", "Braelyn", "Faye", "Kamiyah", "Kenna", "Louise", "Calliope", "Kaydence", "Nala", "Tiana", "Aileen", "Sunny", "Zariyah", "Milana", "Giuliana", "Eileen", "Elodie", "Rayna", "Monica", "Galilea", "Journi", "Lara", "Marina", "Aliana", "Harmoni", "Jamie", "Holland", "Emmalyn", "Lauryn", "Chanel", "Tinsley", "Jessie", "Lacey", "Elyse", "Janiyah", "Jolie", "Ezra", "Marleigh", "Roselyn", "Lillie", "Louisa", "Madisyn", "Penny", "Kinslee", "Treasure", "Zaniyah", "Estella", "Jaylah", "Khaleesi", "Alexia", "Dulce", "Indie", "Maxine", "Waverly", "Giovanna", "Miley", "Saoirse", "Estrella", "Greta", "Rosalia", "Mylah", "Teresa", "Bridget", "Kelly", "Adalee", "Aubrie", "Lea", "Harlee", "Anika", "Itzayana", "Hana", "Kaisley", "Mikaela", "Naya", "Avalynn", "Margo", "Sevyn", "Florence", "Keilani", "Lyanna", "Joelle", "Kataleya", "Royal", "Averi", "Kallie", "Winnie", "Baylee", "Martha", "Pearl", "Alaiya", "Rayne", "Sylvie", "Brylee", "Jazmine", "Ryann", "Kori", "Noemi", "Haylee", "Julissa", "Celia", "Laylah", "Rebekah", "Rosalee", "Aya", "Bria", "Adele", "Aubrielle", "Tiffany", "Addyson", "Kai", "Bellamy", "Leilany", "Princess", "Chana", "Estelle", "Selene", "Sky", "Dani", "Thalia", "Ellen", "Rivka", "Amelie", "Andi", "Kynlee", "Raina", "Vienna", "Alianna", "Livia", "Madalyn", "Mercy", "Novalee", "Ramona", "Vada", "Berkley", "Gwen", "Persephone", "Milena", "Paula", "Clare", "Kairi", "Linda", "Paulina", "Kamilah", "Amoura", "Hunter", "Isabela", "Karen", "Marianna", "Sariyah", "Theodora", "Annika", "Kyleigh", "Nellie", "Scarlette", "Keyla", "Kailey", "Mavis", "Lilianna", "Rosalyn", "Sariah", "Tori", "Yareli", "Aubriella", "Bexley", "Bailee", "Jianna", "Keily", "Annabella", "Azariah", "Denisse", "Promise", "August", "Hadlee", "Halle", "Fallon", "Oakleigh", "Zaria", "Jaylin", "Paisleigh", "Crystal", "Ila", "Aliya", "Cynthia", "Giana", "Maleah", "Rylan", "Aniya", "Denise", "Emmeline", "Scout", "Simone", "Noah", "Zora", "Meghan", "Landry", "Ainhoa", "Lilyana", "Noor", "Belen", "Brynleigh", "Cleo", "Meilani", "Karter", "Amaris", "Frida", "Iliana", "Violeta", "Addisyn", "Nancy", "Denver", "Leanna", "Braylee", "Kiana", "Wrenley", "Barbara", "Khalani", "Aspyn", "Ellison", "Judith", "Robin", "Valery", "Aila", "Deborah", "Cara", "Clarissa", "Iyla", "Lexie", "Anais", "Kaylie", "Nathalie", "Alisson", "Della", "Addilynn", "Elsa", "Zoya", "Layne", "Marlowe", "Jovie", "Kenia", "Samira", "Jaylee", "Jenesis", "Etta", "Shay", "Amayah", "Avayah", "Egypt", "Flora", "Raquel", "Whitney", "Zola", "Giavanna", "Raya", "Veda", "Halo", "Paloma", "Nataly", "Whitley", "Dalary", "Drew", "Guadalupe", "Kamari", "Esperanza", "Loretta", "Malayah", "Natasha", "Stormi", "Ansley", "Carolyn", "Corinne", "Paola", "Brittany", "Emerald", "Freyja", "Zainab", "Artemis", "Jillian", "Kimora", "Zoie", "Aislinn", "Emmaline", "Ayleen", "Queen", "Jaycee", "Murphy", "Nyomi", "Elina", "Hadleigh", "Marceline", "Marisol", "Yasmin", "Zendaya", "Chandler", "Emani", "Jaelynn", "Kaiya", "Nathalia", "Violette", "Joyce", "Paityn", "Elisabeth", "Emmalynn", "Luella", "Yamileth", "Aarya", "Luisa", "Zhuri", "Araceli", "Harleigh", "Madalynn", "Melani", "Laylani", "Magdalena", "Mazikeen", "Belle", "Kadence", ) last_name = ( "Smith", "Johnson", "Williams", "Brown", "Jones", "Garcia", "Miller", "Davis", "Rodriguez", "Martinez", "Hernandez", "Lopez", "Gonzales", "Wilson", "Anderson", "Thomas", "Taylor", "Moore", "Jackson", "Martin", "Lee", "Perez", "Thompson", "White", "Harris", "Sanchez", "Clark", "Ramirez", "Lewis", "Robinson", "Walker", "Young", "Allen", "King", "Wright", "Scott", "Torres", "Nguyen", "Hill", "Flores", "Green", "Adams", "Nelson", "Baker", "Hall", "Rivera", "Campbell", "Mitchell", "Carter", "Roberts", "Gomez", "Phillips", "Evans", "Turner", "Diaz", "Parker", "Cruz", "Edwards", "Collins", "Reyes", "Stewart", "Morris", "Morales", "Murphy", "Cook", "Rogers", "Gutierrez", "Ortiz", "Morgan", "Cooper", "Peterson", "Bailey", "Reed", "Kelly", "Howard", "Ramos", "Kim", "Cox", "Ward", "Richardson", "Watson", "Brooks", "Chavez", "Wood", "James", "Bennet", "Gray", "Mendoza", "Ruiz", "Hughes", "Price", "Alvarez", "Castillo", "Sanders", "Patel", "Myers", "Long", "Ross", "Foster", "Jimenez", ) state_names = ( "Alaska", "Alabama", "Arkansas", "American Samoa", "Arizona", "California", "Colorado", "Connecticut", "District of Columbia", "Delaware", "Florida", "Georgia", "Guam", "Hawaii", "Iowa", "Idaho", "Illinois", "Indiana", "Kansas", "Kentucky", "Louisiana", "Massachusetts", "Maryland", "Maine", "Minnesota", "Missouri", "Mississippi", "Montana", "North Carolina", "North Dakota", "Nebraska", "New Hampshire", "New Jersey", "New Mexico", "Nevada", "New York", "Ohio", "Oklahoma", "Oregon", "Pennsylvania", "Puerto Rico", "Rhode Island", "South Carolina", "South Dakota", "Tennessee", "Texas", "Utah", "Virginia", "Virgin Islands", "Vermont", "Washington", "Wisconsin", "West Virginia", "Wyoming", "Michigan", ) convictions = ( "Misdemeanors", "Felony", "Wobbler", ) skills = ( "HTML", "CSS", "JavaScript", "Python", "Ruby", "C++", "Axios", "React", "Node.JS", "C#", "Django", "MongoDB", "SQL", "PostgreSQL", "NoSQL", "PHP", "Angular", "Microsoft Azure", "IOS", "Android" ) time_zones = ( "Pacific", "Mountain", "Central", "Eastern" ) purpose = ( "Technical", "Career Preparation" ) exp_level = ( "Beginner", "Intermediate", "Advanced", "Unsure" ) ``` Now that we have some source data out of the way, we can get to the fun part, creating the Mentor and Mentee classes that we will use to populate our DataFrame ``` import random as r import pandas as pd class Mentee: email_end = "@fake.com" def __init__(self): self.profile_id = "E" + str(r.randint(1000000, 70000000000000)) self.tech_or_career = r.sample(purpose, k=r.randint(1, 2)) self.skill_rank = r.sample(exp_level, k=1) self.first_name = r.choice(male_first_name) self.last_name = r.choice(last_name) self.full_name_mentee = self.first_name + " " + self.last_name self.time_zone = r.sample(time_zones, k=1) self.email = self.profile_id + self.last_name.lower() + self.email_end self.incarcerated = bool(r.randint(0, 1)) if self.incarcerated: self.list_convictions = r.choice(convictions) else: self.list_convictions = "None" self.skills = r.sample(skills, k=3) @classmethod def to_df(cls, num_rows): return pd.DataFrame(vars(cls()) for _ in range(num_rows)) class Mentor: def __init__(self): self.first_name = r.choice(male_first_name) self.last_name = r.choice(last_name) self.full_name_mentor = self.first_name + " " + self.last_name self.profile_id = "O" + str(hash(r.randint(2000000, 70000000000000000))) self.skills = sorted(r.sample(skills, k=r.randint(1, 4))) self.tech_or_career = r.sample(purpose, k=r.randint(1, 2)) self.skill_rank = sorted(r.sample(exp_level, k=r.randint(1, 4))) self.time_zone = r.sample(time_zones, k=1) @classmethod def to_df(cls, num_rows): return pd.DataFrame(vars(cls()) for _ in range(num_rows)) mentor = Mentor() mentor_df = mentor.to_df(40) mentor_df['key']=1 mentee = Mentee() mentee_df = mentee.to_df(200) mentee_df['key']=1 mentor_df mentee_df ``` Key Features (mentee): experience, tech_or_career, desired_skils, time_zone Key Features (mentor); skill_rank, career_capable, skills, time_zone ``` from sklearn.metrics.pairwise import cosine_similarity def clean_data(df): columns_needed = ['skills','skill_rank','time_zone','tech_or_career'] df = df[columns_needed] df = pd.concat([pd.get_dummies(df[c].apply(pd.Series).stack()).sum(level=0) for c in columns_needed], axis=1) return df clean_data(mentor_df).shape clean_data(mentee_df).shape cos_model_df = cosine_similarity(clean_data(mentee_df),clean_data(mentor_df)) cos_model_df.shape cos_model_df = pd.DataFrame(cos_model_df,index=mentee_df.full_name_mentee,columns=mentor_df.full_name_mentor) cos_model_df.mean() cos_model_df = cos_model_df.unstack().reset_index() cos_model_df cos_model_df = cos_model_df.sort_values(by = ['full_name_mentee', 0], ascending=False) Desmond_Roberts =cos_model_df[cos_model_df.full_name_mentee =='Desmond Roberts'] Desmond_Roberts ```
github_jupyter
## Text classification using Neural Networks The goal of this notebook is to learn to use Neural Networks for text classification. In this notebook, we will: - Train a shallow model with learning embeddings - Download pre-trained embeddings from Glove - Use these pre-trained embeddings However keep in mind: - Deep Learning can be better on text classification that simpler ML techniques, but only on very large datasets and well designed/tuned models. - We won't be using the most efficient (in terms of computing) techniques, as Keras is good for prototyping but rather inefficient for training small embedding models on text. - The following projects can replicate similar word embedding models much more efficiently: [word2vec](https://github.com/dav/word2vec) and [gensim's word2vec](https://radimrehurek.com/gensim/models/word2vec.html) (self-supervised learning only), [fastText](https://github.com/facebookresearch/fastText) (both supervised and self-supervised learning), [Vowpal Wabbit](https://github.com/JohnLangford/vowpal_wabbit/wiki) (supervised learning). - Plain shallow sparse TF-IDF bigrams features without any embedding and Logistic Regression or Multinomial Naive Bayes is often competitive in small to medium datasets. ## The BBC topic classification dataset The BBC provides some benchmark topic classification datasets in English at: http://mlg.ucd.ie/datasets/bbc.html. The raw text (encoded with the latin-1 character encoding) of the news can be downloaded as a ZIP archive: ``` import os import os.path as op import zipfile try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve BBC_DATASET_URL = "http://mlg.ucd.ie/files/datasets/bbc-fulltext.zip" zip_filename = BBC_DATASET_URL.rsplit('/', 1)[1] BBC_DATASET_FOLDER = 'bbc' if not op.exists(zip_filename): print("Downloading %s to %s..." % (BBC_DATASET_URL, zip_filename)) urlretrieve(BBC_DATASET_URL, zip_filename) if not op.exists(BBC_DATASET_FOLDER): with zipfile.ZipFile(zip_filename, 'r') as f: print("Extracting contents of %s..." % zip_filename) f.extractall('.') ``` Each of the five folders contains text files from one of the five topics: ``` target_names = sorted(folder for folder in os.listdir(BBC_DATASET_FOLDER) if op.isdir(op.join(BBC_DATASET_FOLDER, folder))) target_names ``` Let's randomly partition the text files in a training and test set while recording the target category of each file as an integer: ``` import numpy as np from sklearn.model_selection import train_test_split target = [] filenames = [] for target_id, target_name in enumerate(target_names): class_path = op.join(BBC_DATASET_FOLDER, target_name) for filename in sorted(os.listdir(class_path)): filenames.append(op.join(class_path, filename)) target.append(target_id) target = np.asarray(target, dtype=np.int32) target_train, target_test, filenames_train, filenames_test = train_test_split( target, filenames, test_size=200, random_state=0) len(target_train), len(filenames_train) len(target_test), len(filenames_test) ``` Let's check that text of some document have been loaded correctly: ``` idx = 0 with open(filenames_train[idx], 'rb') as f: print("class:", target_names[target_train[idx]]) print() print(f.read().decode('latin-1')[:500] + '...') size_in_bytes = sum([len(open(fn, 'rb').read()) for fn in filenames_train]) print("Training set size: %0.3f MB" % (size_in_bytes / 1e6)) ``` This dataset is small so we can preload it all in memory once and for all to simplify the notebook. ``` texts_train = [open(fn, 'rb').read().decode('latin-1') for fn in filenames_train] texts_test = [open(fn, 'rb').read().decode('latin-1') for fn in filenames_test] ``` ## A first baseline model For simple topic classification problems, one should always try a simple method first. In this case a good baseline is extracting TF-IDF normalized bag of bi-grams features and then use a simple linear classifier such as logistic regression. It's a very efficient method and should give us a strong baseline to compare our deep learning method against. ``` from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline text_classifier = make_pipeline( TfidfVectorizer(min_df=3, max_df=0.8, ngram_range=(1, 2)), LogisticRegression(multi_class="multinomial", solver="lbfgs"), ) %time _ = text_classifier.fit(texts_train, target_train) text_classifier.score(texts_test, target_test) ``` 6 classification errors on 200 test documents for a model fit in less than 10s. It's quite unlikely that we can significantly beat that baseline with a more complex deep learning based model. However let's try to reach a comparable level of accuracy with Embeddings-based models just for teaching purpose. ### Preprocessing text for the (supervised) CBOW model We will implement a simple classification model in Keras. Raw text requires (sometimes a lot of) preprocessing. The following cells uses Keras to preprocess text: - using a tokenizer. You may use different tokenizers (from scikit-learn, NLTK, custom Python function etc.). This converts the texts into sequences of indices representing the `20000` most frequent words - sequences have different lengths, so we pad them (add 0s at the end until the sequence is of length `1000`) - we convert the output classes as 1-hot encodings ``` from tensorflow.keras.preprocessing.text import Tokenizer MAX_NB_WORDS = 20000 # vectorize the text samples into a 2D integer tensor tokenizer = Tokenizer(num_words=MAX_NB_WORDS, char_level=False) tokenizer.fit_on_texts(texts_train) sequences = tokenizer.texts_to_sequences(texts_train) sequences_test = tokenizer.texts_to_sequences(texts_test) word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index)) ``` Tokenized sequences are converted to list of token ids (with an integer code): ``` sequences[0][:10] ``` The tokenizer object stores a mapping (vocabulary) from word strings to token ids that can be inverted to reconstruct the original message (without formatting): ``` type(tokenizer.word_index), len(tokenizer.word_index) index_to_word = dict((i, w) for w, i in tokenizer.word_index.items()) " ".join([index_to_word[i] for i in sequences[0]]) ``` Let's have a closer look at the tokenized sequences: ``` seq_lens = [len(s) for s in sequences] print("average length: %0.1f" % np.mean(seq_lens)) print("max length: %d" % max(seq_lens)) %matplotlib inline import matplotlib.pyplot as plt plt.hist(seq_lens, bins=50); ``` Let's zoom on the distribution of regular sized posts. The vast majority of the posts have less than 1000 symbols: ``` plt.hist([l for l in seq_lens if l < 3000], bins=50); ``` Let's truncate and pad all the sequences to 1000 symbols to build the training set: ``` from tensorflow.keras.preprocessing.sequence import pad_sequences MAX_SEQUENCE_LENGTH = 1000 # pad sequences with 0s x_train = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) x_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of data tensor:', x_train.shape) print('Shape of data test tensor:', x_test.shape) from tensorflow.keras.utils import to_categorical y_train = to_categorical(target_train) print('Shape of label tensor:', y_train.shape) ``` ### A simple supervised CBOW model in Keras The following computes a very simple model, as described in [fastText](https://github.com/facebookresearch/fastText): <img src="images/fasttext.svg" style="width: 600px;" /> - Build an embedding layer mapping each word to a vector representation - Compute the vector representation of all words in each sequence and average them - Add a dense layer to output 20 classes (+ softmax) ``` from tensorflow.keras.layers import Dense, Input, Flatten from tensorflow.keras.layers import GlobalAveragePooling1D, Embedding from tensorflow.keras.models import Model from tensorflow.keras import optimizers EMBEDDING_DIM = 50 N_CLASSES = len(target_names) # input: a sequence of MAX_SEQUENCE_LENGTH integers sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedding_layer = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=True) embedded_sequences = embedding_layer(sequence_input) average = GlobalAveragePooling1D()(embedded_sequences) predictions = Dense(N_CLASSES, activation='softmax')(average) model = Model(sequence_input, predictions) model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.01), metrics=['acc']) model.fit(x_train, y_train, validation_split=0.1, epochs=10, batch_size=32) ``` **Exercices** - Compute model accuracy on test set ``` # %load solutions/accuracy.py ``` ### Building more complex models **Exercise** - From the previous template, build more complex models using: - **1d convolution and 1d maxpooling**. Note that you will still need a GloabalAveragePooling or Flatten after the convolutions as the final `Dense` layer expects a fixed size input; - **Recurrent neural networks through LSTM** (you will need to **reduce sequence length before using the LSTM layer**). <img src="images/unrolled_rnn_one_output_2.svg" style="width: 600px;" /> **Bonus** - You may try different architectures with: - more intermediate layers, combination of dense, conv, recurrent - different recurrent (GRU, RNN) - bidirectional LSTMs **Note**: The goal is to build working models rather than getting better test accuracy as this task is already very well solved by the simple model. Build your model, and verify that they converge to OK results. ``` from tensorflow.keras.layers import Embedding, Dense, Input, Flatten from tensorflow.keras.layers import Conv1D, LSTM, GRU from tensorflow.keras.layers import MaxPooling1D, GlobalAveragePooling1D from tensorflow.keras.models import Model EMBEDDING_DIM = 50 N_CLASSES = len(target_names) # input: a sequence of MAX_SEQUENCE_LENGTH integers sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedding_layer = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=True) embedded_sequences = embedding_layer(sequence_input) # TODO model = Model(sequence_input, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) # %load solutions/conv1d.py # %load solutions/lstm.py model.fit(x_train, y_train, validation_split=0.1, epochs=5, batch_size=32) output_test = model(x_test) test_casses = np.argmax(output_test, axis=-1) print("Test accuracy:", np.mean(test_casses == target_test)) ``` ### Loading pre-trained embeddings The file `glove100K.100d.txt` is an extract of [Glove](http://nlp.stanford.edu/projects/glove/) Vectors, that were trained on english Wikipedia 2014 + Gigaword 5 (6B tokens). We extracted the `100 000` most frequent words. They have a dimension of `100` ``` embeddings_index = {} embeddings_vectors = [] with open('glove100K.100d.txt', 'rb') as f: word_idx = 0 for line in f: values = line.decode('utf-8').split() word = values[0] vector = np.asarray(values[1:], dtype='float32') embeddings_index[word] = word_idx embeddings_vectors.append(vector) word_idx = word_idx + 1 inv_index = {v: k for k, v in embeddings_index.items()} print("found %d different words in the file" % word_idx) # Stack all embeddings in a large numpy array glove_embeddings = np.vstack(embeddings_vectors) glove_norms = np.linalg.norm(glove_embeddings, axis=-1, keepdims=True) glove_embeddings_normed = glove_embeddings / glove_norms print(glove_embeddings.shape) def get_emb(word): idx = embeddings_index.get(word) if idx is None: return None else: return glove_embeddings[idx] def get_normed_emb(word): idx = embeddings_index.get(word) if idx is None: return None else: return glove_embeddings_normed[idx] get_emb("computer") ``` ### Finding most similar words **Exercice** Build a function to find most similar words, given a word as query: - lookup the vector for the query word in the Glove index; - compute the cosine similarity between a word embedding and all other words; - display the top 10 most similar words. **Bonus** Change your function so that it takes multiple words as input (by averaging them) ``` # %load solutions/most_similar.py most_similar("cpu") most_similar("pitt") most_similar("jolie") ``` Predict the future better than tarot: ``` np.dot(get_normed_emb('aniston'), get_normed_emb('pitt')) np.dot(get_normed_emb('jolie'), get_normed_emb('pitt')) most_similar("1") # bonus: yangtze is a chinese river most_similar(["river", "chinese"]) ``` ### Displaying vectors with t-SNE ``` from sklearn.manifold import TSNE word_emb_tsne = TSNE(perplexity=30).fit_transform(glove_embeddings_normed[:1000]) %matplotlib inline import matplotlib.pyplot as plt plt.figure(figsize=(40, 40)) axis = plt.gca() np.set_printoptions(suppress=True) plt.scatter(word_emb_tsne[:, 0], word_emb_tsne[:, 1], marker=".", s=1) for idx in range(1000): plt.annotate(inv_index[idx], xy=(word_emb_tsne[idx, 0], word_emb_tsne[idx, 1]), xytext=(0, 0), textcoords='offset points') plt.savefig("tsne.png") plt.show() ``` ### Using pre-trained embeddings in our model We want to use these pre-trained embeddings for transfer learning. This process is rather similar than transfer learning in image recognition: the features learnt on words might help us bootstrap the learning process, and increase performance if we don't have enough training data. - We initialize embedding matrix from the model with Glove embeddings: - take all words from our 20 Newsgroup vocabulary (`MAX_NB_WORDS = 20000`), and look up their Glove embedding - place the Glove embedding at the corresponding index in the matrix - if the word is not in the Glove vocabulary, we only place zeros in the matrix - We may fix these embeddings or fine-tune them ``` EMBEDDING_DIM = 100 # prepare embedding matrix nb_words_in_matrix = 0 nb_words = min(MAX_NB_WORDS, len(word_index)) embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM)) for word, i in word_index.items(): if i >= MAX_NB_WORDS: continue embedding_vector = get_emb(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector nb_words_in_matrix = nb_words_in_matrix + 1 print("added %d words in the embedding matrix" % nb_words_in_matrix) ``` Build a layer with pre-trained embeddings: ``` pretrained_embedding_layer = Embedding( MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, ) ``` ### A model with pre-trained Embeddings Average word embeddings pre-trained with Glove / Word2Vec usually works surprisingly well. However, when averaging more than `10-15` words, the resulting vector becomes too noisy and classification performance is degraded. ``` sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences = pretrained_embedding_layer(sequence_input) average = GlobalAveragePooling1D()(embedded_sequences) predictions = Dense(N_CLASSES, activation='softmax')(average) model = Model(sequence_input, predictions) # We don't want to fine-tune embeddings model.layers[1].trainable = False model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.01), metrics=['acc']) model.fit(x_train, y_train, validation_split=0.1, epochs=15, batch_size=32) ``` **Remarks:** - On this type of task, using pre-trained embeddings can degrade results as we train much less parameters and we average a large number pre-trained embeddings. - Pre-trained embeddings followed by global averaging prevents overfitting but can also cause some underfitting. - Using convolutions / LSTM should help counter the underfitting effect. - It is also advisable to treat separately pre-trained embeddings and words out of vocabulary. Pre-trained embeddings can be very useful when the training set is small and the individual text documents to classify are short: in this case there might be a single very important word in a test document that drives the label. If that word has never been seen in the training set but some synonyms were seen, the semantic similarity captured by the embedding will allow the model to generalized out of the restricted training set vocabulary. We did not observe this effect here because the document are long enough so that guessing the topic can be done redundantly. Shortening the documents to make the task more difficult could possibly highlight this benefit. ### Reality check On small/medium datasets, simpler classification methods usually perform better, and are much more efficient to compute. Here are two resources to go further: - Naive Bayes approach, using scikit-learn http://scikit-learn.org/stable/datasets/twenty_newsgroups.html - Alec Radford (OpenAI) gave a very interesting presentation, showing that you need a VERY large dataset to have real gains from GRU/LSTM in text classification https://www.slideshare.net/odsc/alec-radfordodsc-presentation However, when looking at features, one can see that classification using simple methods isn't very robust, and won't generalize well to slightly different domains (e.g. forum posts => emails) Note: Implementation in Keras for text is very slow due to python overhead and lack of hashing techniques. The fastText implementation https://github.com/facebookresearch/fasttext is much, much faster. ## Going further - Compare pre-trained embeddings vs specifically trained embeddings - Train your own wordvectors in any language using [gensim's word2vec](https://radimrehurek.com/gensim/models/word2vec.html) - Check [Keras Examples](https://github.com/fchollet/keras/tree/master/examples) on `imdb` sentiment analysis - Install fastText (Linux or macOS only, use the Linux VM if under Windows) and give it a try on the classification example in its repository. - Today, the **state-of-the-art text classification** can be achieved by **transfer learning from a language model** instead of using traditional word embeddings. See for instance: [ULMFit, Fine-tuned Language Models for Text Classification](https://arxiv.org/abs/1801.06146), [ELMO](https://allennlp.org/elmo), [GPT](https://blog.openai.com/language-unsupervised/), [BERT](https://arxiv.org/abs/1810.04805), [GPT-2](https://github.com/openai/gpt-2). The second notebook introduces how to train such a language model from unlabeled data.
github_jupyter
# Regression example California house-prices dataset. Predict median house value for California districts. More information about the dataset: * https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_california_housing.html#sklearn.datasets.fetch_california_housing * https://scikit-learn.org/stable/datasets/real_world.html#california-housing-dataset * http://lib.stat.cmu.edu/datasets/ ``` # Import packages import numpy as np import sklearn.datasets import sklearn.linear_model import sklearn.preprocessing # Make the results reproducible np.random.seed(42) # Load dataset data = sklearn.datasets.fetch_california_housing() # Split into training, validation, and test data sets n_train = int(data.data.shape[0] * 0.75 + 0.5) # Train on 75 % n_val = int(data.data.shape[0] * 0.15 + 0.5) # Validate on 15 % n_test = int(data.data.shape[0] * 0.10 + 0.5) # Test on 10 % X = data.data[:n_train, :] y = data.target[:n_train] X_val = data.data[n_train:n_train + n_val, :] y_val = data.target[n_train:n_train + n_val] # Note! Do not use (at all!) the test data until the very end! X_test = data.data[n_train + n_val:, :] y_test = data.target[n_train + n_val:] print(f"Training set size X : {X.shape}") print(f"Training set size y : {y.shape}") print(f"Validation set size X: {X_val.shape}") print(f"Validation set size y: {y_val.shape}") print(f"Test set size X : {X_test.shape}") print(f"Test set size y : {y_test.shape}") print(f"Feature names : {data.feature_names}") # Task1: Preprocess the data # - Try without preprocessing, try with different kinds. # - Evaluate and compare models on the validation data. # # Note that we fit the preprocessing function to the training data! # Then we apply the learned transformation to the validation and test data sets. standard_scaler = sklearn.preprocessing.StandardScaler() standard_scaler.fit(X) X_ = standard_scaler.transform(X) X_val_ = standard_scaler.transform(X_val) X_test_ = standard_scaler.transform(X_test) # Fit baseline model model_baseline = sklearn.linear_model.LinearRegression(fit_intercept=True) _ = model_baseline.fit(X_, y) # Evaluate baseline model yhat = model_baseline.predict(X_) yhat_val = model_baseline.predict(X_val_) mse = sklearn.metrics.mean_squared_error(y, yhat) mse_val = sklearn.metrics.mean_squared_error(y_val, yhat_val) print(f"Training data mean squared error : {mse:.3f}") print(f"Validation data mean squared error: {mse_val:.3f}") # Task 2: Find a better model # - Try different regression methods # - Evaluate them on the validation data # - Beat the baseline model and select the best one you can find # - You can look here for potential models to use: # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model model = "... add your own regression model code here!" # Note that we fit on the preprocessed data in X_ _ = model.fit(X_, y) # Evaluate better model yhat = model.predict(X_) yhat_val = model.predict(X_val_) mse = sklearn.metrics.mean_squared_error(y, yhat) mse_val = sklearn.metrics.mean_squared_error(y_val, yhat_val) print(f"Training data mean squared error : {mse:.3f}") print(f"Validation data mean squared error: {mse_val:.3f}") # Task 3: Determine the importance of the input variables # ... your code here # Evaluate the final model on the test data. # This is only ever done once, and as the last thing we do. # Training another model after this, based on the performance on the test data # leads to biased results! yhat = model.predict(X_) yhat_val = model.predict(X_val_) yhat_test = model.predict(X_test_) mse = sklearn.metrics.mean_squared_error(y, yhat) mse_val = sklearn.metrics.mean_squared_error(y_val, yhat_val) mse_test = sklearn.metrics.mean_squared_error(y_test, yhat_test) print(f"Training data mean squared error : {mse:.3f}") print(f"Validation data mean squared error: {mse_val:.3f}") print(f"Test data mean squared error : {mse_test:.3f}") ```
github_jupyter
# 9.6 目标检测数据集(皮卡丘) ``` %matplotlib inline import os import json import numpy as np import torch import torchvision from PIL import Image import sys sys.path.append("..") import d2lzh_pytorch as d2l print(torch.__version__) data_dir = '../../data/pikachu' ``` ## 9.6.1 下载数据集 请运行[脚本](https://github.com/ShusenTang/Dive-into-DL-PyTorch/blob/master/code/chapter09_computer-vision/9.6.0_prepare_pikachu.ipynb)准备好数据集。 ``` assert os.path.exists(os.path.join(data_dir, "train")) ``` ## 9.6.2 读取数据集 ``` # 本类已保存在d2lzh_pytorch包中方便以后使用 class PikachuDetDataset(torch.utils.data.Dataset): """皮卡丘检测数据集类""" def __init__(self, data_dir, part, image_size=(256, 256)): assert part in ["train", "val"] self.image_size = image_size self.image_dir = os.path.join(data_dir, part, "images") with open(os.path.join(data_dir, part, "label.json")) as f: self.label = json.load(f) self.transform = torchvision.transforms.Compose([ # 将 PIL 图片转换成位于[0.0, 1.0]的floatTensor, shape (C x H x W) torchvision.transforms.ToTensor()]) def __len__(self): return len(self.label) def __getitem__(self, index): image_path = str(index + 1) + ".png" cls = self.label[image_path]["class"] label = np.array([cls] + self.label[image_path]["loc"], dtype="float32")[None, :] PIL_img = Image.open(os.path.join(self.image_dir, image_path) ).convert('RGB').resize(self.image_size) img = self.transform(PIL_img) sample = { "label": label, # shape: (1, 5) [class, xmin, ymin, xmax, ymax] "image": img # shape: (3, *image_size) } return sample # 本函数已保存在d2lzh_pytorch包中方便以后使用 def load_data_pikachu(batch_size, edge_size=256, data_dir = '../../data/pikachu'): """edge_size:输出图像的宽和高""" image_size = (edge_size, edge_size) train_dataset = PikachuDetDataset(data_dir, 'train', image_size) val_dataset = PikachuDetDataset(data_dir, 'val', image_size) train_iter = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) val_iter = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4) return train_iter, val_iter batch_size, edge_size = 32, 256 train_iter, _ = load_data_pikachu(batch_size, edge_size, data_dir) batch = iter(train_iter).next() print(batch["image"].shape, batch["label"].shape) ``` ## 9.6.3 图示数据 ``` imgs = batch["image"][0:10].permute(0,2,3,1) bboxes = batch["label"][0:10, 0, 1:] axes = d2l.show_images(imgs, 2, 5).flatten() for ax, bb in zip(axes, bboxes): d2l.show_bboxes(ax, [bb*edge_size], colors=['w']) ```
github_jupyter
<a href="https://colab.research.google.com/github/MoohShadox/MCNN_Training/blob/main/S%C3%A9ance_0_Pr%C3%A9requis_en_Alg%C3%A8bre_et_en_Optimisation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # La descente de gradient : un outil indispensable d'optimisation numérique ## Introduction Souvent en Machine Learning les modèles de décision dépendent de paramètres qui peuvent être """optimisés""" et donc pour trouver les meilleurs paramètres il faut souvent minimiser une fonction d'erreur. Cette fonction est généralement **différentiable** (elle admet une dérivée) et donc la descente de gradient est largement utilisés pour "fit" un réseau de neurones (le faire coller aux données). ## Principe de base Imaginez qu'on ait une fonction simple de $\mathcal{R}$ dans $\mathcal{R}$ : $$f(x) = (x-5)^2 + 3$$ Il est clair que le problème $z^* = min_{x \in \mathcal{R}}(f(x))$ admet comme solution optimale $x^*=5$ avec comme valeur $z^* =3$ Imaginons qu'on veuille résoudre numériquement le problème suivant La fonction a optimiser est différentiable (Polynôme) et sa dérivée est : $$f'(x) = 2(x-5)$$ D'abord essayons d'afficher ces deux fonctions pour trouver un moyen graphique de conçevoir un algorithme permettant de résoudre ce problème. ``` import matplotlib.pyplot as plt import numpy as np f = lambda x : (x-5)**2 + 3 f_p = lambda x : 2*(x-5) x_0 = +800 #Par exemple X = np.linspace(-1000,1000,1000) Y = f(X) plt.plot(X,Y) while (np.abs(f_p(x_0)) > 1e-10): plt.scatter(x_0,f(x_0)) plt.quiver(x_0,f(x_0),-f_p(x_0),0) x_0 = x_0 - 0.1*f_p(x_0) print("Fin avec x_0 = ",x_0) ``` ## Auto-diff : Calcul automatique du gradient d'une fonction Torch et Tensorflow proposent un mécanisme d'auto-differentiation qui permet de calculer le gradient d'une fonction représentée par un DAG (Direct Acyclique Graph) de manière automatique en utilisant la règle de composition (The Chain Rule). D'abord commençons par montrer comment on peut construire un DAG. ``` import torch t = torch.tensor(10, dtype=torch.float , requires_grad=True) t.shape import torch x_0 = -700.0 # Pour commencer définissons des tenseurs dans lesquels nous allons introduire nos données. x = torch.tensor(x_0) #Ensuite la fonction f définie précédemment doit être donnée par la feuille de l'arbre donc : y_1 = x - 5 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 print("Contenu du tensor : " , f_t) print("Retour de la fonction pour le même x_0 ",f(x_0)) ``` Ensuite on va utiliser la librairie d'auto-differentiation pour calculer la dérivée de f_t en x_0 automatiquement. Pour ce faire un repéte le même processus en ajoutant un paramètre qui va spécifier a torch qu'il faut prévoire le calcul du gradient. ``` x = torch.tensor(x_0,requires_grad=True) y_1 = x - 1 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 print("Contenu du tensor : " , f_t) !pip install torchviz from torchviz import make_dot make_dot(f_t) from torch.autograd import Variable x = torch.tensor(8).float() x_v = Variable(x,requires_grad=True) y_1 = x_v - 5 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 print("x_v = ",x_v) print("y_1 = ",y_1) print("y_2 = ",y_2) print("f_t = ",f_t) f_t.backward() print("x_v = ",x_v.grad) print("y_1 = ",y_1.grad) print("y_2 = ",y_2.grad) print("f_t = ",f_t.grad) ``` Ici on va essayer de dérouler manuellement les différentes étapes de calcul du gradient de la loss en utilisant la chain rule. Pour rappel la chain rule fonctionne comme suit : - Je veux calculer la dérviée d'une fonction w.r.t x disons que je veuille calculer $\frac{\partial f}{\partial x}$ et f s'écrit comme $f(x) = g(y) $ avec y=h(x) c'est a dire avec une fonction qui ne s'écrit pas en x mais en une autre variable y qu'on obtient par une fonction sur x. On peut calculer le gradient progressivement grâce a la relation : $\frac{\partial f}{\partial x} = \frac{\partial f}{\partial y} * \frac{\partial y}{\partial x}$ ``` x = torch.tensor(8.) x_v = Variable(x,requires_grad=True) y_1 = x_v - 5 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 y_1.retain_grad() y_2.retain_grad() f_t.retain_grad() f_t.backward() print("x_v = ",x_v.grad) # x_v = 8 #6 print("y_1 = ",y_1.grad) # y_1 = x_v - 5 = 3 --> dy_1/dx_v = 1 en remplaçant 8 ça donne 1 et en multipliant par 6 ça remonte 6 print("y_2 = ",y_2.grad) # y_2 = y_1**2 = 9 --> dy_2/d_y1 = 2y_1 en remplaçant en 3 on trouve 6 et on multuplie par 1 donc ça remonte 6 print("f_t = ",f_t.grad) # f_t = y_2 + 3 = 12 --> df_t/dy_2 = 1 en remplaçant en 9 on trouve 1 print("Vraie dérivée = ",f_p(8)) ``` A présent montrons comment on peut utiliser le gradient de la même manière que précédemment afin d'effectuer une descente de gradient. ``` x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) for i in range(0,1000): y_1 = x_v - 5 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 f_t.backward() #x_v = x_v - 0.01 * x_v.grad #x_v -= 0.01 * x_v.grad with torch.no_grad(): x_v -= 0.01 * x_v.grad x_v.grad.zero_() print("x = ",x) ``` Pas trés pratique de devoir fixer un nombre d'itérations avant convergence.. Nous préférerons par conséquence partir sur une formulation tel que celle ci qui fixe comme condition de sortie que le gradient soit suffisament proche de 0 (ce qui indique qu'on est proche de l'optimum) ``` x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 y_1 = x_v - 5 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 f_t.backward() with torch.no_grad(): x_v -= 0.01 * x_v.grad #A l'occasion 0.1 : Learning Rate if (np.abs(x_v.grad) < 1e-4): break x_v.grad.zero_() print("x_v = ",x_v) print(f"En {it} itérations ! ") ``` ## Quelques conseils de conception : Dans cette partie je vais essayer de détailler des conventions qui sont fréquemment utilisés quand on veut conçevoir des DAG en vu de se servir du module d'auto-differentiation. Nous allons utiliser chaque convention séquentiellement sur l'exemple précédent pour montrer comment nous pouvons améliorer la lisiblité du code et sa modularité. ### Séparer dans deux fonctions le calcul de la fonction a optimiser et le processus d'optimisation Pour faciliter la manipulation des DAG il est pratique de les considérer comme des objets qui sont construits par l'intermédiaire d'une fonction. Le processus d'optimisation également peut être modélisé par une fonction ce qui a pour aventage de permettre de le paramètrer plus facilement. ``` """ Cette fonction construit un DAG et va "brancher" le x que je lui donne en paramètre """ def get_f(x): y_1 = x - 5 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 return f_t """ Et celle la déclenche l'optimisation a partir du x_0 que je lui donne en paramètre et renvoie la valeur optimale de x. Je peux donner la précision et le teau d'apprentissage en paramètres en leur donnant une valeur par défaut. Je peux également récupérer le nombre d'itérations avant convergence. """ def optimize(x_0, precision = 1e-4, alpha = 0.01): x_0 = float(x_0) #essayez de commenter cette ligne. x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 f_t = get_f(x_v) f_t.backward() with torch.no_grad(): x_v -= alpha * x_v.grad if (np.abs(x_v.grad) < precision): break x_v.grad.zero_() return x_v, it x_v, it = optimize(10) print("x_v = ",x_v) print(f"En seulement {it} itérations ! ") ``` ### Servez vous du fait que les fonctions soient des objets En python les fonctions sont également des objets qui peuvent être passés en paramètres. Petit exemple : ``` def composition(f, g, x): print("Composé : ",f(g(x))) return f(g(x)) def f(x): return x + 1 def g(x): return x**2 composition(f,f,2) def f1(x): y_1 = x - 1 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 return f_t def f2(x): y_1 = x - 5 y_2 = torch.pow(y_1,4) f_t = y_2 + 3 return f_t #J'ai ajouté le maximum d'itérations je vous laisse deviner pourquoi (non je l'explique dans la partie sur comment débugger ) def optimize(x_0, func, precision = 1e-4, alpha = 0.01, max_it = 1000): x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 f_t = func(x_v) f_t.backward() with torch.no_grad(): x_v -= alpha * x_v.grad if (np.abs(x_v.grad) < precision): break x_v.grad.zero_() if(it == max_it): break return x_v, it #A présent on peut donner en paramètre la fonction a optimiser. x_v, it = optimize(20.0,f1) print("optimum de f1 = ",x_v) print(f"En {it} itérations ! ") x_v, it = optimize(20.0,f2) print("optimum de f2 = ",x_v) print(f"En {it} itérations ! ") ``` ### Personne ne le sait mais on peut typer en Python A vrai dire le typage peut éviter a vos camarades de mal utiliser vos fonctions en plus de faciliter la relecture du code donc n'hésitez pas a vous en servir. ``` #Trés utile pour préciser qu'on attend un certain genre de fonctions en paramètres #Donc pour preciser par exemple qu'un argument est une fonction qui prend en paramètres deux entiers et retourne un float on écrira : # func : Callable[[int, int], float] from typing import Callable def get_f(x : torch.tensor ) -> torch.tensor: y_1 = x - 1 y_2 = torch.pow(y_1,2) f_t = y_2 + 3 return f_t #Pas la peine de le mettre sur tout les paramètres juste sur ceux qui sont délicats def optimize(x_0 : torch.tensor , func:Callable[[torch.tensor], torch.tensor] , precision = 1e-4, alpha = 0.01, max_it = 1000) -> (torch.tensor, int): x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 f_t = func(x_v) f_t.backward() with torch.no_grad(): x_v -= alpha * x_v.grad if (np.abs(x_v.grad) < precision): break x_v.grad.zero_() if(it == max_it): break return x_v, it ``` ### La documentation c'est vital C'est bien beau d'avoir paramètré et typé nos paramètres mais maintenant il faut qu'on puisse comprendre ce qui se passe quand on les change sans avoir a éplucher le code. De plus, quand on fait du dévelopemment on passe infiniment plus de temps a lire du code qu'a en écrire et particulièrement si on travaille en équipe. Ainsi faciliter la relecture de son code rapporte plus de temps a une équipe que d'écrire du code rapidement. ``` #Dans cette partie je vais écrire ce qui s'appelle une "docstring" les docstring suivent en général toujours le même format qui est en l'occurrence le suivant : """ [Un petit résumé de ce que fait la fonction dans les grandes lignes (pas la peine de mettre "cette fonction additionne si la fonction s'appelle add)] Params (même conseil les paramètres évidents il n'y a pas besoin de les expliquer): - param1 : son explication - param2 : son explication -... etc Returns : - premier retour : son explication - deuxième retour : son explication - etc... [Eventuellement des notes et des remarques] N'oubliez pas qu'une fonction qui effectue des modifications "in place" au lieu de retourner une copie modifiée est indiquée par le fait qu'elle se termine par un "_" (en général) """ def optimize(x_0 : torch.tensor , func:Callable[[torch.tensor], torch.tensor] , precision = 1e-4, alpha = 0.01, max_it = 1000) -> (torch.tensor, int): """ Compute a gradient descent. Params: - x_0: Initial point - func: Function to optimize (DAG). - alpha: Learning rate - max_it: Maximum of iterations Returns: - x_v: Optimum - it: Number of iterations before convergence. """ x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 f_t = func(x_v) f_t.backward() with torch.no_grad(): x_v -= alpha * x_v.grad if (np.abs(x_v.grad) < precision): break x_v.grad.zero_() if(it == max_it): break return x_v, it help(optimize) ``` ### Pour ceux qui développent un **nouvel** outil : prévoir des utilitaires facilitant le suivi de l'évolution des paramètres Quand on développe une nouvel approche ce qui n'est généralement pas le cas (sauf si vous avez un PFE) il faut souvent fournir des courbes montrant l'efficacité de notre approche et donc prévoire certaines fonctions utiles pour ça tel que l'enregistrement de l'évolution du certains paramètres liés a au déroulement de l'algorithme. Ici typiquement il serais intéressant de voire l'allure de la courbe correspondant a l'évolution du gradient et de l'objectif au fil des itérations afin de l'inclure dans un rapport. ``` def optimize(x_0 : torch.tensor , func:Callable[[torch.tensor], torch.tensor] , precision = 1e-4, alpha = 0.01, max_it = 1000) -> (torch.tensor, int): """ Compute a gradient descent. Params: - x_0: Initial point - func: Function to optimize. - alpha: Learning rate - max_it: Maximum of iterations Returns: - x_v: Optimum - it: Number of iterations before convergence. """ #Par convention on appelle ça des "logs" logs = { "grad":[], "obj":[] } x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 f_t = func(x_v) f_t.backward() with torch.no_grad(): x_v -= alpha * x_v.grad logs["grad"].append(float(x_v.grad.detach().numpy())) logs["obj"].append(float(f_t.detach().numpy())) if (np.abs(x_v.grad) < precision): break x_v.grad.zero_() if(it == max_it): break return x_v, it, logs x_v, it, logs = optimize(20.0,f1) print("optimum de f2 = ",x_v) print(f"En {it} itérations ! ") ``` Ces logs sont trés pratiques car ils permettent de dessiner des figures ``` plt.plot(np.arange(it), logs["grad"]) plt.xlabel("Itérations") plt.ylabel("Gradient") plt.title("Evolution du gradient en fonction du nombre d'itérations") plt.plot(np.arange(it), logs["obj"]) plt.xlabel("Itérations") plt.ylabel("Objectif") plt.title("Evolution de l'objectif en fonction du nombre d'itérations") ``` ### Pensez a regrouper les fonctionnalités en utilisant la programmation orientée objet Souvent Python est apprécié pour son paradigm fonctionnel mais le paradigm orienté objet peut dans plusieurs cas s'avérer utile. Dans l'exemple précédent je ne retourne les historiques que pour pouvoir afficher des figures et rien d'autre, une autre façon de voire le fonctionnement du code serrait d'inclure la fonction d'affichage au coté de la fonction d'optimisation au sein d'un objet et d'utiliser un attribut interne du dit objet pour stocker les différents historiques. ``` class Optimizer(): #C'est comme ça qu'on défini un constructeur. def __init__(self): self.logs = { "grad":[], "obj":[] } self.it = 0 self.obj = None def optimize(self,x_0 : torch.tensor , func:Callable[[torch.tensor], torch.tensor] , precision = 1e-4, alpha = 0.01, max_it = 1000) -> (torch.tensor, int): """ Compute a gradient descent. Params: - x_0: Initial point - func: Function to optimize. - alpha: Learning rate - max_it: Maximum of iterations Returns: - x_v: Optimum """ #Par convention on appelle ça des "logs" self.obj = func x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) self.it = 0 while (True): self.it += 1 f_t = func(x_v) f_t.backward() with torch.no_grad(): x_v -= alpha * x_v.grad self.logs["grad"].append(float(x_v.grad.detach().numpy())) self.logs["obj"].append(float(f_t.detach().numpy())) if (np.abs(x_v.grad) < precision): break x_v.grad.zero_() if(self.it == max_it): break return x_v def plot_gradient_evol(self): plt.plot(np.arange(it), logs["grad"]) plt.xlabel("Itérations") plt.ylabel("Gradient") plt.title("Evolution du gradient en fonction du nombre d'itérations") return plt def plot_objectif_evol(self): plt.plot(np.arange(it), logs["obj"]) plt.xlabel("Itérations") plt.ylabel("Objectif") plt.title("Evolution de l'objectif en fonction du nombre d'itérations") return plt def plot_obj_func(self): X = torch.tensor(np.linspace(-40,40, 1000)) y = self.obj(X).detach().numpy() plt.plot(X,y) return plt op = Optimizer() x_v = op.optimize(10,f1) print("x_v = ",x_v) print(f"En seulement {op.it} itérations ! ") op.plot_obj_func() op.plot_gradient_evol() op.plot_objectif_evol() ``` ### N'en faites pas trop ! Le concept le plus important a retenir s'appelle KISS : Keep It Simple Stupid. Il ne faut compléxifier le code qu'en cas d'extème nécéssité, par exemple la pertinence de l'utilisation d'une classe dans les cellules précédentes en cas réel est très discutable, de même que de passer une fonction en paramètres. Le plus clair du temps la fonction a optimiser sera unique, et les courbes que vous chercherez a afficher vous les afficherez une seule fois et vous les enregistrerez pour votre rapport mais le fait de pouvoir afficher ce genre de courbes ne doit pas **nécéssairement** faire partie des fonctionnalités accessibles a la personne qui utilise votre code. Donc selon moi, le plus clair du temps vos collegues, supérieurs et enseignants s'attendrons a un produit final qui ressemble a ça : ``` def optimize(x_0 : torch.tensor , func:Callable[[torch.tensor], torch.tensor] , precision = 1e-4, alpha = 0.01, max_it = 1000) -> (torch.tensor, int): """ Compute a gradient descent. Params: - x_0: Initial point - func: Function to optimize. - alpha: Learning rate - max_it: Maximum of iterations Returns: - x_v: Optimum """ x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 f_t = func(x_v) f_t.backward() with torch.no_grad(): x_v -= alpha * x_v.grad if (np.abs(x_v.grad) < precision): break x_v.grad.zero_() if(it == max_it): break return x_v ``` ## Quelques conseils de développement Dans cette partie je vais aborder certaines spécificités a connaitre quand on développe une solution sous pyTorch. ### Utiliser les optimizers de pytorch Comme vous pouvez vous en douter nous ne sommes pas les premiers a penser a développer une classe qui permet d'optimiser une fonction. Pytorch propose pour cela le module Optimizer qui contient plusieurs algorithmes de descente de gradient plus ou moins sophistiqués. ``` def optimize(x_0 : torch.tensor , func:Callable[[torch.tensor], torch.tensor] , precision = 1e-4, alpha = 0.01, max_it = 1000) -> (torch.tensor, int): """ Compute a gradient descent. Params: - x_0: Initial point - func: Function to optimize. - alpha: Learning rate - max_it: Maximum of iterations Returns: - x_v: Optimum - it: Number of iterations before convergence. """ x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) #SGD : Stochastic Gradient Descent optimizer = torch.optim.SGD( [x_v] , lr=alpha) it = 0 for i in range(max_it): it += 1 f_t = func(x_v) ## Attention ces trois lignes vous les verrez jusqu'a l'indigestion donc comprenez les bien ! ## === Etape 01 : Refaire monter le gradient jusqu'au variables qui sont dans l'optimizer f_t.backward() ## == Etape 02 : On effectue une étape d'optimisation, cette étape peut être aussi simple que celle qu'on faisait ou être plus sophistiquée (en utilisant un moment par exemple) optimizer.step() ## == Etape 03 : On efface les gradients pour qu'ils ne s'accumulent pas optimizer.zero_grad() ## ==== return x_v optimize(10.0, f1) def optimize(x_0 : torch.tensor , func:Callable[[torch.tensor], torch.tensor] , precision = 1e-4, alpha = 0.01, max_it = 1000) -> (torch.tensor, int): """ Compute a gradient descent. Params: - x_0: Initial point - func: Function to optimize. - alpha: Learning rate - max_it: Maximum of iterations Returns: - x_v: Optimum - it: Number of iterations before convergence. """ x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) #Adam Optimizer optimizer = torch.optim.Adam([x_v], lr=1e-1) it = 0 for i in range(max_it): it += 1 f_t = func(x_v) f_t.backward() optimizer.step() optimizer.zero_grad() return x_v optimize(10.0, f1, max_it=4000) ``` ### Exécuter sur un GPU voire un TPU Il arrive qu'on soit fréiné dans nos recherches par le nombre de simulations qu'on peut exécuter a cause du fait notamment qu'elles peuvent prendre beaucoup de temps, l'une des solutions qu'il est possible d'envisager est d'éxécuter les opérations tensorielles sur un GPU ou sur un TPU pour qu'elles soient plus rapides. Etant donné que vous disposez quasi-tous d'un GPU sur votre ordinateur nous allons d'abord montrer comment une exécuter sur GPU est possible. ``` import torch import numpy as np #D'abord on assigne a cette variable la chaine "cuda" si ce périphérique est accessible device_gpu = 'cuda' if torch.cuda.is_available() else 'cpu' #Maintenant définissons une deuxième chaine pour qu'on puisse comparer les performances device_cpu = "cpu" #On crée des matrices mat1 = torch.rand(10) mat2 = torch.rand(10) mat2 #C'est comme ça qu'on transfère une matrice sur un périphérique mat1 = mat1.to(device_gpu) mat1 mat2 = mat2.to(device_cpu) mat2 #Essayons de faire des opérations par exemple une multiplication #mat1@mat2 #comme tout le monde peut le deviner ça va pas marcher parce qu'ils ne sont pas sur le même périphérique #Donc on les déplace mat2 = mat2.to(device_gpu) m = mat1@mat2 m #Et si on essayais de le transformer en un tenseur numpy (je dis ça au hasard) #arr = np.array(m) Oh mais quel surprise ! ça marche pas ! arr = np.array(m.cpu()) #m.cpu() permet de copier le contenu du tenseur m sur le CPU arr ``` A présent afin d'expérimenter les différence de performances au niveau du temps d'exécution on va définir une fonction qui va effectuer un certain nombre d'opérations et qui aura comme paramètre un device sur lesquels les executer. ``` def simulation(device): mat1 = torch.rand((1000,1000)).to(device) ei = torch.eig(mat1) %timeit simulation("cpu") %timeit simulation("cuda") ``` Alors la ça se voit pas énormement mais en réalité quand on entrain des gros modèles ça fait une grande différence (pour plus d'informations regardez [ici](https://github.com/ilkarman/DeepLearningFrameworks) Maintenant examinons comment on peut exécuter des calculs sur un TPU. Déja une principale différence c'est que a part si vous êtes quelqu'un de chez Google a priori vous n'avez pas accès physiquement a un TPU donc il faut utiliser un TPU accessible en SaaS sur une plateforme Cloud (comme Collab). ``` !pip install cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.7-cp36-cp36m-linux_x86_64.whl import torch_xla.core.xla_model as xm device = xm.xla_device() %timeit simulation(device) ``` ## Quelques conseils sur comment investiger une erreur ``` """ A quoi sert le maximum d'itérations ? En général quand on code on n'est pas a l'abri des erreurs qui transforment la fonction objectif en une fonction non bornée ce qui fait que la boucle ne se termine jamais D'ou l'intéret de limiter son exécution ! Ainsi quand on tombe sur un cas comme le précedent ou l'algorithme se termine sur une valeur infini on peut investiger la raison de cela. """ X = torch.tensor(np.linspace(-1000,1000, 1000)) #je transforme en tenseur pour pouvoir utiliser la fonction précédente y = f2(X).detach().numpy() #Au passage c'est comme ça qu'on retransforme un tenseur pytorch en tenseur numpy, on verra plus tard le sens du detach plt.plot(X,y) #Ajouter de la verbosité ! def optimize(x_0, func, precision = 1e-4, alpha = 0.01, max_it = 1000, verbose = False): x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 f_t = func(x_v) f_t.backward() with torch.no_grad(): x_v -= alpha * x_v.grad if (verbose): print("val: ", x_v) print("grad: ",x_v.grad) if (np.abs(x_v.grad) < precision): break x_v.grad.zero_() if(it == max_it): break return x_v, it x_v, it = optimize(20.0,f2,verbose = True,max_it = 10) x_v #Voire ajouter du visuel ! def optimize(x_0, func, precision = 1e-4, alpha = 0.01, max_it = 1000, verbose = False, visualize=False): x_0 = float(x_0) x = torch.tensor(x_0) x_v = Variable(x,requires_grad=True) it = 0 while (True): it += 1 f_t = func(x_v) f_t.backward() with torch.no_grad(): if (verbose): print("val: ", x_v) print("grad: ",x_v.grad) if (visualize): plt.scatter(x_v.detach().numpy(), f_t.detach().numpy()) if (np.abs(x_v.grad) < precision): break x_v -= alpha * x_v.grad x_v.grad.zero_() if(it == max_it): break return x_v, it X = torch.tensor(np.linspace(-40,40, 1000)) y = f2(X).detach().numpy() plt.plot(X,y) x_v, it = optimize(20.0,f2,verbose = True,max_it = 2,visualize=True) plt.show() ``` Ce phénomène est trés courant et s'appelle : **Divergence du gradient** Ce qui c'est passé c'est que j'étais a gauche de l'optimum et en métant a jour avec le gradient je me suis trop décalé a droite du coup le gradient était encore plus grand, et donc je suis allé encore plus a gauche ce qui a encore augmenté le gradient etc... La solution connue est : Diminuer le teau d'apprentissage. ``` x_v, it = optimize(20.0,f2,alpha=0.001,max_it = 100000) plt.show() print("optimum de f2 = ",x_v) print(f"En {it} itérations ! ") ``` # Gradient Descent : cas à plusieurs variables ### Concept de base Le mot dérivée partielles est un mot qui effraie beaucoup de gens alors qu'en réalité c'est juste l'extension du concept de dérivée a une fonction qui dépend de plusieurs paramètres. Reprenons la fonction précédente mais imaginons cette fois qu'elle prend deux paramètres. $$f(x, y) = (x-5)^2 + (y-3)^2 + 5$$ Première difficulté a envisager, comment représenter cette fonction ? Une façon simple est d'utiliser une "heatmap" c'est a dire une grille avec en abscisse les valeurs de x, en ordonnée celles de y et dont les couleurs des cases représenterons la valeur de la fonction f. ``` #J'en profite pour montrer cette notation qui permet de définir facilement une fonction sur une seule ligne f_2 = lambda x,y : (x-5)**2 + (y-3)**2 + 5 #On initialise la grille grid = np.zeros((1000,1000)) for x in range(grid.shape[0]): for y in range(grid.shape[1]): grid[x][y] = f_2(x, y) #Plus c'est rouge plus c'est une grande valeur et plus c'est rouge plus c'est une petite valeur plt.imshow(grid, cmap="inferno") plt.scatter(5,3,c = "magenta") ``` Maintenant pour effectuer une descente de gradient il faut calculer les dérivées "partielles" c'est a dire les dérivées w.r.t (with respect to) chaque paramètre (en considérant tout les autres commet des constantes) et donc : $$\frac{\partial f}{\partial x} = 2(x-5)$$ Idem pour y $$\frac{\partial f}{\partial y} = 2(y-3)$$ Et donc le **gradient** de f est la fonction qui a chaque (x,y) associe le vecteur $$(2(x-5), 2(y-3))$$ qui correspond a la direction vers lequel f augmente localement (tel3a). ``` def f_2_p(x,y): return (2*(x-5), 2*(y-3)) #Et donc la descente de gradient fonctionne normalement x_0, y_0 = (500,800) f_2_p(x_0,y_0) ``` Donc a présent nous pouvons effectuer une descente de gradient comme précédemment : ``` x_0, y_0 = (800,800) x,y = x_0,y_0 plt.imshow(grid, cmap="inferno") plt.scatter(5,3,c = "blue") alpha = 0.1 for i in range(100): grad = f_2_p(x,y) plt.scatter(x,y,c="red") x = x - alpha*grad[0] y = y - alpha*grad[1] print(f"Finalement x= {x}, y = {y}") ``` ## Utilisation de Pytorch Bien évidemment on peut utiliser pytorch pour calculer automatiquement le gradient. ``` def f(x,y): y_1 = x - 5 y_2 = y - 3 f_t = torch.pow(y_1, 2) + torch.pow(y_2, 2) + 5 return f_t x_0, y_0 = (800.0,800.0) x = Variable(torch.tensor(x_0),requires_grad=True) y = Variable(torch.tensor(y_0),requires_grad=True) f_t = f(x,y) make_dot(f_t) x_0, y_0 = (800.0,800.0) x = Variable(torch.tensor(x_0),requires_grad=True) y = Variable(torch.tensor(y_0),requires_grad=True) #On précise les deux variables optimizer = torch.optim.SGD([x,y], lr=1e-1) for i in range(100): f_t = f(x,y) f_t.backward() optimizer.step() optimizer.zero_grad() print(x,y) ``` ## Un dernier petit point conception : En réalité souvent la différence entre un tensor et une Variable est qu'une variable est généralement prévue pour contenir des paramètres a optimiser (ici typiquement x et y) alors que les variables tensors contiennent les données utilisées pour évaluer la fonction a optimiser (ici il n'y en a pas car f se calcule uniquement a partir de x et de y mais plus tard ça sera le dataset de test) Et les paramètres sont mises dans un vecteur (généralement appelé $\theta$ ) donc en l'occurrence plûtot que d'avoir deux paramètres il serait plus correct de créer un vecteur de deux éléments. ``` def f(theta): y_1 = theta[0] - 5 y_2 = theta[1] - 3 f_t = torch.pow(y_1, 2) + torch.pow(y_2, 2) + 5 return f_t theta_0 = torch.tensor([300. , 800.]) theta = Variable(theta_0,requires_grad=True) f_t = f(theta) make_dot(f_t) #On précise theta au lieu de x et y optimizer = torch.optim.SGD([theta], lr=1e-1) for i in range(100): f_t = f(theta) f_t.backward() optimizer.step() optimizer.zero_grad() print(theta) ``` # Recette : Comment définir un modèle Dans cette partie on va détailler les étapes a suivre dans la conception d'un modèle, ces étapes vont garentir que l'implémentation sous pytorch sera simple et efficace, on va les appliquer par la suite a deux cas concrêts : - **L'optimisation d'un modèle d'apprenitssage statistique par maximum de vraisemblance (Max Likelihood)** - **L'optimisation d'un modèle par minimisation d'un coût** Donc pour revenir a nos étapes, définir un modèle nécessite de passer par les étapes suivantes : 1. Définir les données, les paramètres 2. Préciser le calcul de la fonction a optimiser (faut qu'elle soit différentiable sinon la descente de gradient ne va pas fonctionner et il faudra se tourner vers un algorithme évolutionnaire cf mon dernier workshop) 3. Ecrire l'algorithme d'optimisation en précisant ces hyper-paramètres. C'est flou ? c'est normal c'est a ça que vont servir les examples. ## Maximum de vraisemblance pour le modèle le plus simple de l'univers ### Problématique Imaginons qu'on ait une pièce et qu'on veuille prédire les tirages qu'elle est suceptible de faire typiquement on veut répondre a la question : "combien de chances j'ai d'avoir pile et pile ?" Cette expérience (de jeter une pièce) admet deux résultats possibles : pile et face, donc on peut la modéliser en utilisant une distribution de bernouilli. ### Quelques notions en apprentissage statistique --- **Rappel** : Une distribution de bernouilli c'est une distribution paramètrée par un seul paramètre $\theta = [ p ]$ ce paramètre correspond a la probabilité d'obtenir le résultat 1. $$P(x=1, \theta = [p] ) = p$$ $$P(x=0, \theta = [p] ) = 1-p$$ --- On peut calculer la probabilité de notre ensemble de données en multipliant les probabilité de chaque tirage qui s'appelle **vraisemblance** de l'ensemble de donnée. $$\mathcal{L}(\theta) = P(x=X, \theta = [p] ) = \prod_{x_i \in X} P(x=x_i, \theta = [p] )$$ Et comme les produits on est pas méga fan parce que ça s'annule rapidement on aime bien passer au logarithme. $$log(\mathcal{L}(\theta)) = \sum_{x_i \in X} log(P(x=x_i, \theta = [p] ))$$ ### Et en pratique ça sert a quoi ? J'ai envie de conçevoir un modèle qui calcule la probabilité d'apparition d'un phénomène donné (ici un tirage de pièce), je choisi un modèle que je pense capable de modéliser le phénomène (ici un modèle de bernoulli) mais ce modèle va dépende de paramètre (ici la probabilité de faire pile) donc comment optimiser ce modèle ? **Solution** Avoir une base de ces phénomène me permet de calculer pour un $\theta$ donné la probabilité que le modéle ait généré cette base de donnée, cette probabilité s'appelle **vraisemblance** et si j'utilise la log-vraisemblance comme loss je peux optimiser mon $\theta$. Toujours un peu flou ? on va tester ça ! **Etape 01** La log vraisemblance dépend de deux choses : Les données (un tenseur) qui sont les tirages de pile ou face et les paramètres (un tenseur de variables) en l'occurrence un seul paramètre représentant la probabilité de tirer pile. **Etape 02** La fonction qu'on veut optimiser c'est la log-vraisemblance on veut l'augmenter donc on doit diminuer son négatif. ``` #J'écris un vecteur de 0 et de 1 X = [1, 0, 1, 0, 1, 0, 0, 1] #Pour un theta donné la probabilité d'un x est facile a calculer def p(x,theta): p = theta[0] p2 = 1 - theta[0] pr1 = torch.pow(p,x) #Si x vaut 1 p^x vaut p sinon il vaut 1 pr2 = torch.pow(p,(1-x)) # Si x vaut 0 (1-x) vaut 1 et donc p^(1-x) vaut p sinon il vaut 1 return pr1*pr2 #Et donc la log-vraisemblance d'un échantillon de donnée est facile a calculer def Likelyhood(X,theta): p = theta[0] p2 = 1 - theta[0] P_X = torch.pow(p2,(1-X))*torch.pow(p,X) #S'applique sur un tenseur pareil que sur un scalaire return P_X theta = Variable(torch.tensor([0.2,]),requires_grad=True) Likelyhood(torch.tensor(X),theta) def logLikelihood(X,theta): datas = torch.tensor(X) return -torch.sum(torch.log(Likelyhood(datas,theta))) X = [1, 0, 1, 0, 1, 0, 0, 1] log_l = logLikelihood(X,theta) make_dot(log_l) #Quelqu'un peut dire pourquoi X est pas dans le graphe ? et qu'il est dans celui ci ? et lequel est plus correct def logLikelihood2(X,theta): datas = torch.tensor(X,requires_grad=True) return -torch.sum(torch.log(Likelyhood(datas,theta))) X = [1, 0, 1, 0, 1, 0, 0, 1] log_l = logLikelihood2(np.array(X).astype(float),theta) make_dot(log_l) ``` **Etape 03** On optimize ! C'est différentiable donc aucun souci de ce coté la. ``` X = [1, 0, 1, 0, 1 , 1, 1, 1] def logLikelihood(X,theta): datas = torch.tensor(X) return -torch.sum(torch.log(Likelyhood(datas,theta))) theta = torch.rand((1,)) theta = Variable(theta, requires_grad=True) optimizer = torch.optim.SGD( [theta], lr=1e-3) for i in range(1000): log_l = logLikelihood(X,theta) log_l.backward() optimizer.step() optimizer.zero_grad() print(theta) X = np.random.randint(0,2,250) def logLikelihood(X,theta): datas = torch.tensor(X) return -torch.sum(torch.log(Likelyhood(datas,theta))) theta = torch.rand((1,)) theta = Variable(theta, requires_grad=True) optimizer = torch.optim.SGD([theta], lr=1e-3) for i in range(1000): log_l = logLikelihood(X,theta) log_l.backward() optimizer.step() optimizer.zero_grad() print(theta) print("Mean of X : ",X.mean()) ``` Donc pour faire bref vous pouvez modéliser n'importe quel phénomène par n'importe quel distribution qui vous viens en tête, vous pourrez toujours apprendre les paramètres de cette distribution en maximisant la vraisemblance par une descente de gradient. Ici le modèle est simple pour que vous puissiez facilement comprendre l'idée mais des modéles très sophistiqués existent dans la littérature voire [ici](https://towardsdatascience.com/probability-concepts-explained-maximum-likelihood-estimation-c7b4342fdbb1) si ça vous intéresse ## Approche par minimisation du coût la plus simple de l'univers : une regréssion linéaire Un modèle de regréssion linéaire tente de prédire une variable y a partir de plusieurs samples rangés dans une matrice X. Le nombre de colonnes de X représente le nombre d'attributs. Le nombre de lignes de X représente le nombre d'exemples. On va aborder successivement trois exemples : 1. Regression de données jouet un attribut (visualisable). 2. Regression de données jouet a plusieurs attributs. ### Regression de données jouet avec un attribut ``` import numpy as np import matplotlib.pyplot as plt import torch from torch.autograd import Variable #On va commencer par une regression sur un seule attribut parce qu'on peut la visualiser facilement X = np.linspace(-10,10,50) X = torch.tensor(X) y = 3*X + 5 + 2*np.random.normal(0,1,X.shape) #La j'affiche le nuage de points plt.scatter(X,y) #La j'affiche la droite qui l'a généré et qu'on va essayer de reconstruire plt.plot(X,3*X + 5) #Rappel une droite c'est y = a.x +b donc ici il s'agit d'apprendre a et b donc un vecteur de variables a deux composante theta = [a,b] theta = torch.rand(2) theta = Variable(theta, requires_grad= True) theta #A présent il faut définir comment calculer les prédictions pour un vecteur de paramètres theta fixé et pour ce faire on va utiliser une astuce de calcul # [x,1]@[a,b].T donne ax + b donc on va ajouter une colonne de 1 et on aura X@theta qui générera une matrice colonne de prédictions #On génére autant de 1 qu'il n'y a de X o = torch.ones(X.shape[0]) #On les STACK (on ne les concatène pas) X_o = torch.stack([torch.tensor(X),o]) X_o.shape #On préférerais avoir 50 lignes et deux colonnes donc on transpose ça o = torch.ones(X.shape[0]) X_o = torch.stack([torch.tensor(X),o]).T #J'ai ajouté un .T X_o.shape #Donc la on peut facilement calculer la prédiction #Bon reflexe avant de faire une multiplication matricielle vérifier la forme de theta theta.shape #ça cloche parce que theta est un vecteur et X_o une matrice donc on va changer la shape avec view theta.view((2,1)) #J'ai mis (2,1) comme shape comme ça X_o@theta va donner (50,2)@(2,1) -> (50,1) -> (50) # testez ça torch.matmul(X_o,theta) y_p = torch.matmul(X_o.float(),theta) y_p import torch from torch.autograd import Variable #La loss qu'on va utiliser est la distance au sens des moindres carrés #On récapitule def loss(theta: torch.tensor,X : torch.tensor ,y_true: torch.tensor) -> torch.tensor: """ Compute the mean square error loss between the prediction and the real target Params: - theta: vector [a,b] of params - X: Data - y_true: labels Returns: -y_p: Predicted labels -loss: mean square loss """ y_p = torch.matmul(X.float(),theta) loss = torch.mean(torch.pow(y_p-y_true,2)) return y_p, loss #ça c'est des prétraitement theta = torch.rand(2) theta = Variable(theta, requires_grad= True) theta.view((2,1)) o = torch.ones(X.shape[0]) X_o = torch.stack([torch.tensor(X),o]).T #On test la fonction y_true = torch.tensor(y) loss(theta,X_o,y_true) #Boucle d'optimisation theta = torch.rand(2) theta = Variable(theta, requires_grad= True) theta.view((2,1)) o = torch.ones(X.shape[0]) X_o = torch.stack([torch.tensor(X),o]).T y_true = torch.tensor(y) optimizer = torch.optim.SGD([theta], lr=1e-2) for i in range(20000): _, mse_loss = loss(theta,X_o,y_true) #Osef des prédictions mse_loss.backward() optimizer.step() optimizer.zero_grad() theta.detach().numpy() #Voyons voire la droite qu'on obtient t = theta.detach().numpy() t = t.reshape((2,1)) X_o = X_o.reshape((50,2)) pred = X_o@t plt.plot(X,pred) plt.plot(X,3*X + 5, c="red") plt.scatter(X,y) ``` ### Regression de données jouet avec plusieurs attributs On génère plusieurs colonnes de données cette fois, ``` #On génère des données jouet N_LIGNES = 1000 N_COLONNES = 15 X = np.random.random((N_LIGNES,N_COLONNES)) X = torch.tensor(X) X.shape #On défini un theta_v qui va nous servir a générer un y theta_v = np.random.randint(0,40,N_COLONNES).reshape((N_COLONNES,1)) theta_v = torch.tensor(theta_v).float() print(theta_v.shape) print(theta_v.T) #On génère les y y_t = torch.matmul(X.float(),theta_v) y = y_t + 2*np.random.normal(0,1,y_t.shape) y.shape X= torch.tensor(X).float() y_true = torch.tensor(y).float() #On récapitule def loss(theta: torch.tensor,X : torch.tensor ,y_true: torch.tensor) -> torch.tensor: """ Compute the mean square error loss between the prediction and the real target Params: - theta: vector of params - X: Data - y_true: labels Returns: -y_p: Predicted labels -loss: mean square loss """ y_p = torch.matmul(X.float(),theta) loss = torch.mean(torch.pow(y_p-y_true,2)) return y_p, loss from IPython.display import clear_output, display theta = torch.rand(N_COLONNES).view((N_COLONNES,1)) theta = Variable(theta.float(), requires_grad= True) theta.view((N_COLONNES,1)) optimizer = torch.optim.Adam([theta], lr=1e-1) for i in range(10000): _, mse_loss = loss(theta,X,y_true) #Osef des prédictions mse_loss.backward() optimizer.step() optimizer.zero_grad() display(mse_loss) theta.detach().numpy() theta_v.T _, mse_loss = loss(theta_v,X,y_true) mse_loss ``` ## Utiliser des modules pour encapsuler un modèle prédictif ``` import torch.nn as nn class Regression(nn.Module): def __init__(self, nb_colonnes): super().__init__() self.theta = nn.Parameter(torch.randn(nb_colonnes, requires_grad=True, dtype=torch.float).view((nb_colonnes,1))) #A l'intérieur d'un module on utilie nn.Parametre pour définir un paramètre. self.biais = nn.Parameter(torch.randn(1)) def forward(self, x): #Cette méthode sert a préciser comment a partir d'un x de taille (? , NB_COLONNES) on peut générer un vecteur colonne (? , 1) de prédictions #Pour l'utiliser on appelle le module qu'on a construit comme si c'était une fonction return torch.matmul(x,self.theta) + self.biais #On check que ça marche comme prévu R = Regression(X.shape[1]) for x,y in zip(X,y): print(R(x).shape) break #On peut même print le modèle !pip install torchviz from torchviz import make_dot p = R(x) make_dot(p) R = Regression(X.shape[1]) losses = [] optimizer = torch.optim.SGD(R.parameters(), lr=1e-4) mse_loss = nn.MSELoss(reduction='mean') for epoch in range(1000): R.train() #ça c'est pour mettre le modèle en mode apprentissage for batch_x,batch_y in zip(X.float(),y.float()): p = R(batch_x) loss = mse_loss(batch_y, p) loss.backward() losses.append(loss.item()) optimizer.step() optimizer.zero_grad() R = Regression(X.shape[1]) losses = [] optimizer = torch.optim.SGD(R.parameters(), lr=1e-2) mse_loss = nn.MSELoss(reduction='mean') for epoch in range(1000): R.train() #ça c'est pour mettre le modèle en mode apprentissage for batch_x,batch_y in zip(X.float(),y.float()): p = R(batch_x) loss = mse_loss(batch_y.view((-1,)), p.view((-1,))) loss.backward() losses.append(loss.item()) optimizer.step() optimizer.zero_grad() print("Training loss : ",loss.item()) plt.plot(np.arange(len(losses)), losses) ``` # Et les réseaux de neurones dans tout ça ? Aprés toutes ces interludes, vous devez probablement vous demander le rapport entre tout ce qu'on a fait et les réseaux de neurones. Pour faire bref, ce qu'on a fait en dernier était en réalité un réseau de neurones, un réseau de neurones a un seul noeud. ``` import torch.nn as nn def train_step_constructor(model,loss_f,optimizer): def train_step(batch_x, batch_y): p = model(batch_x) loss = loss_f(batch_y.view((-1,)), p.view((-1,))) loss.backward() optimizer.step() optimizer.zero_grad() return loss.item() return train_step class Regression_NN(nn.Module): def __init__(self, nb_colonnes): super().__init__() self.l1 = nn.Linear(nb_colonnes, 1, bias = True) #(prend un vecteur (?,nb_colonnes) et en renvoie un (?,1) correspondant a la prédiction) def forward(self, x): return self.l1(x) from torch.utils.data import Dataset, TensorDataset, DataLoader from tqdm.notebook import tqdm class Toy_Dataset(Dataset): def __init__(self, x_tensor, y_tensor): self.x = x_tensor self.y = y_tensor def __getitem__(self, index): return (self.x[index], self.y[index]) def __len__(self): return len(self.x) train_data = Toy_Dataset(X.float(), y_true.float()) y_true.shape def train_NN(alpha = 1e-5, epochs = 100, device = "cpu", batch_size = 100) -> (nn.Module, list, list): """ Perform the training on the x_tensor, y_tensor splitted. Params: - alpha : Learning rate Returns: - R: Trained Model - losses: Training Losses - val_losses: Validation Losses """ losses = [] val_losses = [] R = Regression_NN(X.shape[1]).to(device) optimizer = torch.optim.Adam(R.parameters(), lr=alpha) mse_loss = nn.MSELoss(reduction='mean') train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True) train_step = train_step_constructor(R, mse_loss, optimizer) outer = tqdm(total=epochs, desc='Epochs', position=0) for epoch in range(epochs): inner = tqdm(total=len(train_loader), desc=f'Batch {epoch+1}', position=1) for batch_x,batch_y in train_loader: inner.update(1) batch_x = batch_x.to(device) batch_y = batch_y.to(device) R.train() l = train_step(batch_x, batch_y) losses.append(l) inner.set_postfix({'loss': l}) outer.update(1) print("Model params: ",list(R.parameters())) return R, losses, val_losses R, losses, val_losses = train_NN(1e-1, 50, batch_size = 5) theta_v.T class Regression_DNN(nn.Module): def __init__(self, nb_colonnes): super().__init__() self.l1 = nn.Linear(nb_colonnes, 24, bias = True) self.l2 = nn.Linear(24, 24, bias = True) self.l3 = nn.Linear(24, 1, bias=True ) def forward(self, x): x = self.l1(x) x = self.l2(x) return self.l3(x) from tqdm.notebook import tqdm def train_DNN(alpha = 1e-5, epochs = 100, device = "cpu", batch_size = 100) -> (nn.Module, list, list): """ Perform the training on the x_tensor, y_tensor splitted. Params: - alpha : Learning rate Returns: - R: Trained Model - losses: Training Losses - val_losses: Validation Losses """ losses = [] val_losses = [] R = Regression_DNN(x_train_tensor.shape[1]).to(device) optimizer = torch.optim.Adam(R.parameters(), lr=alpha) mse_loss = nn.MSELoss(reduction='mean') train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True) train_step = train_step_constructor(R,mse_loss, optimizer) #ça c'est la barre du nombre d'epochs outer = tqdm(total=epochs, desc='Epochs', position=0) for epoch in range(epochs): #On crée une barre par epoch inner = tqdm(total=len(train_loader), desc=f'Batch {epoch+1}', position=1) for batch_x,batch_y in train_loader: #On ajoute une étape d'entrainement inner.update(1) batch_x = batch_x.to(device) batch_y = batch_y.to(device) R.train() l = train_step(batch_x, batch_y) losses.append(l) R.eval() pred_val = R(x_test_tensor.to(device)) val_loss = mse_loss(y_test_tensor.view((-1,)).to(device), pred_val.view((-1,)).to(device)) val_losses.append(val_loss.item()) #MAJ de l'affichage de l'epoch courant inner.set_postfix({'val_loss': val_loss.item(),'loss': l}) ###### #On ajoute le nombre d'epochs outer.update(1) return R, losses, val_losses R, losses, val_losses = train_DNN(1e-2, 50, batch_size = 64, device ="cuda") mse_loss = nn.MSELoss(reduction='mean') pred_val = R(x_test_tensor.to("cuda")).to("cpu") val_loss = mse_loss(y_test_tensor.view((-1,)), pred_val.view((-1,))) val_loss make_dot(val_loss) ``` Dernier petit point sur l'enregistrement d'un modèle. ``` torch.save(R.state_dict(), "out.net") !ls model = Regression_DNN(x_train_tensor.shape[1]) model.load_state_dict(torch.load("out.net")) model.eval() #Mode inférence pred_val = model(x_test_tensor) val_loss = mse_loss(y_test_tensor.view((-1,)), pred_val.view((-1,))) val_loss ```
github_jupyter
# Run TFLite Converter from Arachne Here, we explain how to use the TFLite Converter from Arachne especially focusing on controlling the tool behavior. ## Prepare a Model First, we have to prepare a model to be used in this tutorial. Here, we will use a ResNet-50 v2 model tuning for the `tf_flowers` dataset. ``` import tensorflow as tf import tensorflow_datasets as tfds # Initialize a model model = tf.keras.applications.resnet_v2.ResNet50V2(weights=None, classes=5) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"]) model.summary() # Load the tf_flowers dataset train_dataset, val_dataset = tfds.load( "tf_flowers", split=["train[:90%]", "train[90%:]"], as_supervised=True ) # Preprocess the datasets def preprocess_dataset(is_training=True): def _pp(image, label): if is_training: image = tf.image.resize(image, (280, 280)) image = tf.image.random_crop(image, (224, 224, 3)) image = tf.image.random_flip_left_right(image) else: image = tf.image.resize(image, (224, 224)) image = tf.keras.applications.imagenet_utils.preprocess_input(x=image, mode='tf') label = tf.one_hot(label, depth=5) return image, label return _pp def prepare_dataset(dataset, is_training=True): dataset = dataset.map(preprocess_dataset(is_training), num_parallel_calls=tf.data.AUTOTUNE) return dataset.batch(16).prefetch(tf.data.AUTOTUNE) train_dataset = prepare_dataset(train_dataset, True) val_dataset = prepare_dataset(val_dataset, False) # Training model.fit(train_dataset, validation_data=val_dataset, epochs=20) model.evaluate(val_dataset) model.save("/tmp/resnet50-v2.h5") ``` ## Run TFLite Converter from Arachne Now, let's convert the model into a TFLite model by Arachne. To use the TFLite Converter, we have to specify `+tools=tflite_converter` to `arachne.driver.cli`. Available options can be seen by adding `--help`. ``` %%bash python -m arachne.driver.cli +tools=tflite_converter --help ``` ### Convert with FP32 Precision First, we will start with the simplest case. You can convert a TF model into a TFLite mode without the post-training quantization (PTQ) by the following command. ``` %%bash python -m arachne.driver.cli +tools=tflite_converter model_file=/tmp/resnet50-v2.h5 output_path=/tmp/output_fp32.tar ``` To check the converted model, please unpack the output TAR file and inspect the tflite model file by a model viewer like the Netron. ``` %%bash tar xf /tmp/output_fp32.tar -C /tmp ls /tmp/model_0.tflite ``` ### Convert with Dynamic-Range or FP16 Precision To convert with the dynamic range or FP16 precision, just set `dynamic_range` or `fp16` to the `tools.tflite_converter.ptq.method` option. ``` %%bash python -m arachne.driver.cli +tools=tflite_converter model_file=/tmp/resnet50-v2.h5 output_path=/tmp/output_dr.tar \ tools.tflite_converter.ptq.method=dynamic_range python -m arachne.driver.cli +tools=tflite_converter model_file=/tmp/resnet50-v2.h5 output_path=/tmp/output_fp16.tar \ tools.tflite_converter.ptq.method=fp16 ``` ### Convert with INT8 Precision To convert with INT8 precision, we need calibrate or estimate the range of all floating-point tensors in the model. We provide an interface to feed the dataset to be used in the calibration. First, we have to prepare a NPY file that contains a list of `np.ndarray` which is a dataset used for calibration. ``` import numpy as np calib_dataset = [] for image, label in val_dataset.unbatch().batch(1).take(100): calib_dataset.append(image.numpy()) np.save("/tmp/calib_dataset.npy", calib_dataset) ``` Next, specify `int8` to the `tools.tflite_converter.ptq.method` option and pass the NPY file to the `tools.tflite_converter.ptq.representative_dataset`. ``` %%bash python -m arachne.driver.cli +tools=tflite_converter model_file=/tmp/resnet50-v2.h5 output_path=/tmp/output_int8.tar \ tools.tflite_converter.ptq.method=int8 tools.tflite_converter.ptq.representative_dataset=/tmp/calib_dataset.npy ``` ## Run TFLite Converter from Arachne Python Interface The following code shows an example of using the TFLite Converter from Arachne Python interface. ``` from arachne.utils.model_utils import init_from_file, save_model from arachne.tools.tflite_converter import TFLiteConverter, TFLiteConverterConfig model_file_path = "/tmp/resnet50-v2.h5" input = init_from_file(model_file_path) cfg = TFLiteConverterConfig() # plz modify the config object to control the converter behavior # cfg.ptq.method = "FP16" output = TFLiteConverter.run(input, cfg) save_model(model=output, output_path="/tmp/output.tar") ```
github_jupyter
# C rate dependence Peter Attia, April 18 2020 This notebook has Bulter-Volmer fits to C rate, which I realized later isn't valid. I kept the notebook here for reference. ``` import re from pathlib import Path import numpy as np import pandas as pd import scipy.stats from scipy.signal import savgol_filter from scipy.signal import find_peaks from scipy.optimize import curve_fit from scipy.constants import R from scipy.constants import physical_constants from scipy.stats import pearsonr import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib import rcParams from matplotlib.ticker import AutoMinorLocator from mpl_toolkits.axes_grid1.inset_locator import inset_axes from matplotlib.legend_handler import HandlerBase ``` Set plotting settings: ``` fig_width = 3.25 #ECS spec is 3.25" width fig_height = (3/4)*fig_width # standard ratio rcParams['lines.markersize'] = 5 rcParams['lines.linewidth'] = 1.0 rcParams['font.size'] = 7 rcParams['legend.fontsize'] = 7 rcParams['legend.frameon'] = False rcParams['font.sans-serif'] = 'Arial' rcParams['mathtext.fontset'] = 'custom' rcParams['mathtext.rm'] = 'Arial' rcParams['pdf.fonttype'] = 42 rcParams['ps.fonttype'] = 42 figpath = Path.cwd().parent / 'figures' ``` ## Load data Metadata: ``` metadata = pd.read_csv('masses.csv') metadata ``` ## Constant current data Raw data: ``` files_constcurr = list((Path.cwd() / 'constant current data').glob('*.txt')) files_constcurr ``` Parse and process: ``` all_data_constcurr = {} for file in files_constcurr: sub_dict = {} # C rate parsing C_rate = int(re.findall(r'\d+', file.name.split('_')[1])[0]) C_rate_string = file.name.split('_')[1].replace('over', '/') if 'Cover' in file.name: C_rate = 1 / C_rate # Logging print(f'{file.stem}: {C_rate}, {C_rate_string}') # Load data data = np.loadtxt(file, skiprows=1) # Get mass mass = metadata[metadata['File name'] == file.stem]['Mass (mg)'].iloc[0] / 1000 # mg -> g # Normalize capacity by mass data[:, 2] = data[:, 2] / mass # mAh -> mAh/g # Index different cycle numbers first_lith_idx = np.where(data[:, 0] == 1)[0][0] second_cycle_idx = np.where(data[:, 0] == 2)[0][0] second_lith_idx = first_lith_idx + np.where(data[first_lith_idx:second_cycle_idx, 1] == np.max(data[first_lith_idx:second_cycle_idx, 1]))[0][0] try: third_cycle_idx = np.where(data[:, 0] == 3)[0][0] third_lith_idx = second_cycle_idx + np.where(data[second_cycle_idx:third_cycle_idx, 1] == np.max(data[second_cycle_idx:third_cycle_idx, 1]))[0][0] except IndexError: # 10C data has only 2 cycles third_lith_idx = second_cycle_idx + np.where(data[second_cycle_idx:, 1] == np.max(data[second_cycle_idx:, 1]))[0][0] # Extract (de)lith steps first_lith_data = data[:first_lith_idx] first_delith_data = data[first_lith_idx:second_lith_idx] second_lith_data = data[second_lith_idx:second_cycle_idx] second_delith_data = data[second_cycle_idx:third_lith_idx] # Get first-lithiation capacity above/below 0.5V idx_0pt5V = np.where(data[:, 1] < 0.5)[0][0] Qlith_cyc1_above0pt5 = np.max(first_lith_data[:idx_0pt5V]) Qlith_cyc1_below0pt5 = np.max(first_lith_data) - Qlith_cyc1_above0pt5 # Get first-lithiation time I = 200 * C_rate # 1C = 200 mAh/g tlith_cyc1 = np.max(first_lith_data) / I # t = Q/I; mAh/g / mA/g -> h # Get capacities for CE Qlith_cyc2 = np.max(second_lith_data) Qdelith_cyc2 = np.max(second_delith_data) CE_cyc2 = 100 * Qdelith_cyc2 / Qlith_cyc2 # Create dictionary sub_dict['First lith data'] = first_lith_data sub_dict['First delith data'] = first_delith_data sub_dict['Second lith data'] = second_lith_data sub_dict['Second delith data'] = second_delith_data sub_dict['C rate'] = C_rate sub_dict['C rate string'] = C_rate_string sub_dict['1st lith capacity above 0.5V'] = Qlith_cyc1_above0pt5 sub_dict['1st lith capacity below 0.5V'] = Qlith_cyc1_below0pt5 sub_dict['1st lith time'] = tlith_cyc1 sub_dict['2nd cycle CE'] = CE_cyc2 if C_rate_string != '10C': Q = first_lith_data[:,2] V = first_lith_data[:,1] # Filter and get dQ/dV Q_filt = savgol_filter(Q, 101, 1) V_filt = savgol_filter(V, 101, 1) dQdV = np.gradient(Q_filt, V_filt) # Get V_peak idx1pt2 = np.where(V_filt < 1.2)[0][0] idx0pt5 = np.where(V_filt < 0.5)[0][0] peak_idx, _ = find_peaks(-dQdV[idx1pt2:idx0pt5], distance=1e7) peak_idx = idx1pt2 + peak_idx[0] V_peak = V_filt[peak_idx] dQdV_peak = dQdV[peak_idx] sub_dict['V_filt'] = V_filt sub_dict['dQdV'] = dQdV sub_dict['V_peak'] = V_peak sub_dict['dQdV_peak'] = dQdV_peak # Append to main dictionary all_data_constcurr[file.stem] = sub_dict ``` Sort: ``` all_data_constcurr = {k: v for k, v in sorted(all_data_constcurr.items(), key=lambda item: item[1]['C rate'])} ``` Min/max V_peak: ``` for k, (key, value) in enumerate(all_data_constcurr.items()): if '10C' not in key: print(f"{key}: {value['V_peak']}") ``` ## Multistep data Raw data: ``` files_multistep = list((Path.cwd() / 'multistep current data').glob('*.txt')) files_multistep ``` Parse and process: ``` all_data_multistep = {} for file in files_multistep: sub_dict = {} # C rate parsing C_rate = int(re.findall(r'\d+', file.name.split('_')[1])[0]) C_rate_string = file.name.split('_')[1].replace('over', '/') + '(0.5V)-C/10' if C_rate_string == 'C/10(0.5V)-C/10': C_rate_string = 'C/10' if 'Cover' in file.name: C_rate = 1 / C_rate # Logging print(f'{file.stem}: {C_rate}, {C_rate_string}') # Load data data = np.loadtxt(file, skiprows=1) # Get mass mass = metadata[metadata['File name'] == file.stem]['Mass (mg)'].iloc[0] / 1000 # mg -> g # Normalize capacity by mass data[:, 2] = data[:, 2] / mass # mAh -> mAh/g # Index different cycle numbers first_lith_idx = np.where(data[:, 0] == 1)[0][0] second_cycle_idx = np.where(data[:, 0] == 2)[0][0] third_cycle_idx = np.where(data[:, 0] == 3)[0][0] second_lith_idx = first_lith_idx + np.where(data[first_lith_idx:second_cycle_idx, 1] == np.max(data[first_lith_idx:second_cycle_idx, 1]))[0][0] third_lith_idx = second_cycle_idx + np.where(data[second_cycle_idx:third_cycle_idx, 1] == np.max(data[second_cycle_idx:third_cycle_idx, 1]))[0][0] # Extract (de)lith steps first_lith_data = data[:first_lith_idx] first_delith_data = data[first_lith_idx:second_lith_idx] second_lith_data = data[second_lith_idx:second_cycle_idx] second_delith_data = data[second_cycle_idx:third_lith_idx] # Get first-lithiation capacity above/below 0.5V # I exclude the capacity above 0.5 V *after* the overpotential relaxation idx_0pt5V_above = np.where(data[:, 1] < 0.501)[0][0] idx_0pt5V_below = np.where(data[:, 1] < 0.499)[0][0] Qlith_cyc1_above0pt5 = np.max(first_lith_data[:idx_0pt5V_above]) Qlith_intermediate = first_lith_data[idx_0pt5V_below, 2] - first_lith_data[idx_0pt5V_above, 2] Qlith_cyc1_below0pt5 = np.max(first_lith_data) - Qlith_cyc1_above0pt5 - Qlith_intermediate # Get first-lithiation time. a = before split, b = after split Ia = 200 * C_rate # 1C = 200 mAh/g Ib = 200 * 0.1 # 1C = 200 mAh/g tlith_cyc1a = Qlith_cyc1_above0pt5 / Ia # t = Q/I; mAh/g / mA/g -> h tlith_cyc1b = (Qlith_intermediate + Qlith_cyc1_below0pt5) / Ib # t = Q/I; mAh/g / mA/g -> h # Get capacities for CE Qlith_cyc2 = np.max(second_lith_data) Qdelith_cyc2 = np.max(second_delith_data) CE_cyc2 = 100 * Qdelith_cyc2 / Qlith_cyc2 # Create dictionary sub_dict['First lith data'] = first_lith_data sub_dict['First delith data'] = first_delith_data sub_dict['Second lith data'] = second_lith_data sub_dict['Second delith data'] = second_delith_data sub_dict['C rate'] = C_rate sub_dict['C rate string'] = C_rate_string sub_dict['1st lith capacity above 0.5V'] = Qlith_cyc1_above0pt5 sub_dict['1st lith capacity below 0.5V'] = Qlith_cyc1_below0pt5 sub_dict['1st lith time'] = tlith_cyc1a + tlith_cyc1b sub_dict['2nd cycle CE'] = CE_cyc2 if C_rate_string != '10C(0.5V)-C/10': Q = first_lith_data[:idx_0pt5V_above,2] V = first_lith_data[:idx_0pt5V_above,1] # Filter and get dQ/dV Q_filt = savgol_filter(Q, 101, 1) V_filt = savgol_filter(V, 101, 1) dQdV = np.gradient(Q_filt, V_filt) # Get V_peak idx1pt2 = np.where(V_filt < 1.2)[0][0] peak_idx, _ = find_peaks(-dQdV[idx1pt2:], distance=1e7) peak_idx = idx1pt2 + peak_idx[0] V_peak = V_filt[peak_idx] dQdV_peak = dQdV[peak_idx] sub_dict['V_filt'] = V_filt sub_dict['dQdV'] = dQdV sub_dict['V_peak'] = V_peak sub_dict['dQdV_peak'] = dQdV_peak # Append to main dictionary all_data_multistep[file.stem] = sub_dict # Plot as sanity check plt.figure() plt.plot(first_lith_data[:, 2], first_lith_data[:, 1]) plt.plot(first_lith_data[idx_0pt5V_above, 2], first_lith_data[idx_0pt5V_above, 1], '.k') plt.plot(first_lith_data[idx_0pt5V_below, 2], first_lith_data[idx_0pt5V_below, 1], 'sk') ``` Sort: ``` all_data_multistep = {k: v for k, v in sorted(all_data_multistep.items(), key=lambda item: item[1]['C rate'])} ``` ## 10C supplementary figure We can see the peak in the second cycle of 10C ``` class AnyObjectHandler(HandlerBase): def create_artists(self, legend, orig_handle, x0, y0, width, height, fontsize, trans): l1 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height], color=orig_handle[0]) l2 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], color=orig_handle[1]) return [l1, l2] fig, ax = plt.subplots(figsize=(fig_width, fig_height), nrows=1, ncols=1) colors_blue = cm.Blues(np.linspace(0.9, 0.3, 5))[:,0:3] colors_red = cm.Reds( np.linspace(0.9, 0.3, 5))[:,0:3] ax.set_xlabel('Capacity (mAh g$^{-1}$)') ax.set_ylabel('Voltage (V)') ax.set_xlim([0, 420]) ax.set_ylim([0, 1.2]) ax.xaxis.set_minor_locator(AutoMinorLocator()) ax.yaxis.set_minor_locator(AutoMinorLocator()) ax_inset0 = inset_axes(ax, width='100%', height='100%', bbox_to_anchor=(0.58, 0.375, 0.35, 0.35), bbox_transform=ax.transAxes, loc='upper left') ax_inset0.set_xlabel('Voltage (V)', labelpad=-0.1) ax_inset0.set_ylabel('dQ/dV') ax_inset0.xaxis.set_minor_locator(AutoMinorLocator()) ax_inset0.set_xlim([0.6, 1.0]) ax_inset0.set_ylim([-500, 250]) ax_inset0.get_yaxis().set_ticks([]) ax_inset0.axhline(0, color='tab:gray') lith1 = all_data_constcurr['cellC_10C_CF3']['First lith data'] delith1 = all_data_constcurr['cellC_10C_CF3']['First delith data'] lith2 = all_data_constcurr['cellC_10C_CF3']['Second lith data'] delith2 = all_data_constcurr['cellC_10C_CF3']['Second delith data'] ax.plot(lith1[:,2], lith1[:,1], color=colors_blue[0]) ax.plot(delith1[:,2], delith1[:,1], color=colors_red[0]) ax.plot(lith2[:,2], lith2[:,1], color=colors_blue[1]) ax.plot(delith2[:,2], delith2[:,1], color=colors_red[1]) # Filter and get dQ/dV filter_size = 101 Q_filt_lith1 = savgol_filter(lith1[:,2], filter_size, 1) V_filt_lith1 = savgol_filter(lith1[:,1], filter_size, 1) dQdV_lith1 = np.gradient(Q_filt_lith1, V_filt_lith1) Q_filt_delith1 = savgol_filter(delith1[:,2], filter_size, 1) V_filt_delith1 = savgol_filter(delith1[:,1], filter_size, 1) dQdV_delith1 = np.gradient(Q_filt_delith1, V_filt_delith1) Q_filt_lith2 = savgol_filter(lith2[:,2], filter_size, 1) V_filt_lith2 = savgol_filter(lith2[:,1], filter_size, 1) dQdV_lith2 = np.gradient(Q_filt_lith2, V_filt_lith2) Q_filt_delith2 = savgol_filter(delith2[:,2], filter_size, 1) V_filt_delith2 = savgol_filter(delith2[:,1], filter_size, 1) dQdV_delith2 = np.gradient(Q_filt_delith2, V_filt_delith2) ax_inset0.plot(V_filt_lith1, dQdV_lith1, color=colors_blue[0]) ax_inset0.plot(V_filt_delith1, dQdV_delith1, color=colors_red[0]) ax_inset0.plot(V_filt_lith2, dQdV_lith2, color=colors_blue[1]) ax_inset0.plot(V_filt_delith2, dQdV_delith2, color=colors_red[1]) ax.legend([(colors_blue[0], colors_red[0]), (colors_blue[1], colors_red[1])], ['Cycle 1', 'Cycle 2'], handler_map={tuple: AnyObjectHandler()}, loc='upper right', title='10C') plt.tight_layout() plt.savefig(figpath / 'C_rate_10C.eps', bbox_inches='tight', format='eps') ``` ## Create plot Create colors: ``` colors_constcurr = cm.Blues( np.linspace(0.4, 0.9, len(all_data_constcurr.items())))[:,0:3] colors_multistep = cm.Purples( np.linspace(0.4, 0.9, len(all_data_multistep.items())))[:,0:3] def make_legend(ax, loc): ax.plot(-1, 1, 'ok', label='Constant current') ax.plot(-1, 1, 'sk', label='Multistep current') ax.legend(loc=loc) fig, ax = plt.subplots(figsize=(fig_width*2, fig_height*3), nrows=3, ncols=2) ax = ax.ravel() # Set main axes labels for k, a in enumerate(ax): a.set_title(chr(97+k), loc='left', weight='bold') if k == 0 or k == 1: a.set_xlabel('Capacity (mAh g$^{-1}$)') else: a.set_xlabel('C rate/C rate before 0.5V') make_legend(a, 'lower left' if k != 2 else 'best') ax[0].set_ylabel('Voltage (V)') ax[1].set_ylabel('Voltage (V)') ax[2].set_ylabel(r'1$^{\rm st}$-lithiation capacity above 0.5 V (mAh g$^{-1}$)') ax[3].set_ylabel(r'1$^{\rm st}$-lithiation capacity below 0.5 V (mAh g$^{-1}$)') ax[4].set_ylabel(r'1$^{\rm st}$-lithiation time (hours)') ax[5].set_ylabel(r'2$^{\rm nd}$-cycle Coulombic efficiency (%)') ax[0].set_xlim([0, 800]) ax[1].set_xlim([0, 800]) ax[0].set_ylim([0, 1.2]) ax[1].set_ylim([0, 1.2]) ax[2].set_ylim([0, 450]) ax[3].set_ylim([0, 450]) ax[5].set_ylim([40, 100]) ax[0].xaxis.set_minor_locator(AutoMinorLocator()) ax[1].xaxis.set_minor_locator(AutoMinorLocator()) ax[0].yaxis.set_minor_locator(AutoMinorLocator()) ax[1].yaxis.set_minor_locator(AutoMinorLocator()) ax[2].yaxis.set_minor_locator(AutoMinorLocator()) ax[3].yaxis.set_minor_locator(AutoMinorLocator()) ax[5].yaxis.set_minor_locator(AutoMinorLocator()) ax[1].plot([0, 350], [0.5, 0.5], color='tab:red', lw=0.5, ls=':') ## Constant current cycling for k, (key, value) in enumerate(all_data_constcurr.items()): # Extract values first_lith_data = value['First lith data'] C_rate = value['C rate'] C_rate_string = value['C rate string'] CE_cyc2 = value['2nd cycle CE'] tlith_cyc1 = value['1st lith time'] Qlith_cyc1_above0pt5 = value['1st lith capacity above 0.5V'] Qlith_cyc1_below0pt5 = value['1st lith capacity below 0.5V'] Q = first_lith_data[:, 2] V = first_lith_data[:, 1] ax[0].plot(Q, V, color=colors_constcurr[k], label=C_rate_string) ax[2].semilogx(C_rate, Qlith_cyc1_above0pt5, 'o', color=colors_constcurr[k]) ax[3].semilogx(C_rate, Qlith_cyc1_below0pt5, 'o', color=colors_constcurr[k]) ax[4].loglog(C_rate, tlith_cyc1, 'o', color=colors_constcurr[k]) ax[5].semilogx(C_rate, CE_cyc2, 'o', color=colors_constcurr[k]) ax[0].legend(loc='upper right', title='Constant current') ## Multistep cycling for k, (key, value) in enumerate(all_data_multistep.items()): # Extract values first_lith_data = value['First lith data'] C_rate = value['C rate'] C_rate_string = value['C rate string'] CE_cyc2 = value['2nd cycle CE'] tlith_cyc1 = value['1st lith time'] Qlith_cyc1_above0pt5 = value['1st lith capacity above 0.5V'] Qlith_cyc1_below0pt5 = value['1st lith capacity below 0.5V'] Q = first_lith_data[:, 2] V = first_lith_data[:, 1] ax[1].plot(Q, V, color=colors_multistep[k], label=C_rate_string) ax[2].semilogx(C_rate, Qlith_cyc1_above0pt5, 's', color=colors_multistep[k]) ax[3].semilogx(C_rate, Qlith_cyc1_below0pt5, 's', color=colors_multistep[k]) ax[4].loglog(C_rate, tlith_cyc1, 's', color=colors_multistep[k]) ax[5].semilogx(C_rate, CE_cyc2, 's', color=colors_multistep[k]) ax[1].legend(loc='upper right', title='Multistep current') plt.tight_layout() plt.savefig(figpath / 'C_rate.eps', bbox_inches='tight', format='eps') ``` ## Correlation analysis ``` def make_legend(ax, loc): ax.plot(-100, 1, 'ok', label='Constant current') ax.plot(-100, 1, 'sk', label='Multistep current') ax.legend(loc=loc) Qlith_cyc1_above0pt5_list = [] Qlith_cyc1_below0pt5_list = [] CE_cyc2_list = [] fig, ax = plt.subplots(figsize=(fig_width, fig_height*2), nrows=2, ncols=1) ax = ax.ravel() ax[0].set_title('a', loc='left', weight='bold') ax[1].set_title('b', loc='left', weight='bold') ax[0].set_xlabel(r'1$^{\rm st}$-lithiation capacity above 0.5 V (mAh g$^{-1}$)') ax[1].set_xlabel(r'1$^{\rm st}$-lithiation capacity below 0.5 V (mAh g$^{-1}$)') ax[0].set_ylabel(r'2$^{\rm nd}$-cycle Coulombic efficiency (%)') ax[1].set_ylabel(r'2$^{\rm nd}$-cycle Coulombic efficiency (%)') ax[0].set_xlim([0, 450]) ax[1].set_xlim([0, 450]) ax[0].set_ylim([40, 100]) ax[1].set_ylim([40, 100]) ax[0].xaxis.set_minor_locator(AutoMinorLocator()) ax[1].xaxis.set_minor_locator(AutoMinorLocator()) ax[0].yaxis.set_minor_locator(AutoMinorLocator()) ax[1].yaxis.set_minor_locator(AutoMinorLocator()) make_legend(ax[0], 'lower right') make_legend(ax[1], 'lower right') for k, (key, value) in enumerate(all_data_constcurr.items()): Qlith_cyc1_above0pt5_list.append(value['1st lith capacity above 0.5V']) Qlith_cyc1_below0pt5_list.append(value['1st lith capacity below 0.5V']) CE_cyc2_list.append(value['2nd cycle CE']) ax[0].plot(Qlith_cyc1_above0pt5_list[-1], CE_cyc2_list[-1], 'o', color=colors_constcurr[k]) ax[1].plot(Qlith_cyc1_below0pt5_list[-1], CE_cyc2_list[-1], 'o', color=colors_constcurr[k]) for k, (key, value) in enumerate(all_data_multistep.items()): Qlith_cyc1_above0pt5_list.append(value['1st lith capacity above 0.5V']) Qlith_cyc1_below0pt5_list.append(value['1st lith capacity below 0.5V']) CE_cyc2_list.append(value['2nd cycle CE']) ax[0].plot(Qlith_cyc1_above0pt5_list[-1], CE_cyc2_list[-1], 's', color=colors_multistep[k]) ax[1].plot(Qlith_cyc1_below0pt5_list[-1], CE_cyc2_list[-1], 's', color=colors_multistep[k]) # Get correlations r_above = pearsonr(Qlith_cyc1_above0pt5_list, CE_cyc2_list)[0] r_below = pearsonr(Qlith_cyc1_below0pt5_list, CE_cyc2_list)[0] ax[0].legend(title='$r=%.2f$' % r_above) ax[1].legend(title='$r=%.2f$' % r_below, loc='lower right') plt.tight_layout() plt.savefig(figpath / 'C_rate_CE_corrlations.eps', bbox_inches='tight', format='eps') ``` ## dQ/dV and Bulter-Volmer fits Physical constants ``` n = 2 # number of electrons in reaction F = physical_constants['Faraday constant'][0] # C mol^-1 T = 273.15 + 30 # deg C f = n * F / (R * T) # V^-1. Note that n is incorporated into f here for simplicity ``` Butler-Volmer and Tafel equations. The log versions are better for fitting ``` def Butler_Volmer(x, i0, alpha, E0): return i0 * ( np.exp(-alpha*f*(x - E0)) - np.exp((1-alpha)*f*(x - E0)) ) def Tafel(x, i0, alpha, E0): return i0 * np.exp(alpha*f*(E0 - x)) def Butler_Volmer_log(x, i0, alpha, E0): return np.log( i0 * ( np.exp(-alpha*f*(x - E0)) - np.exp((1-alpha)*f*(x - E0)) ) ) def Tafel_log(x, i0, alpha, E0): return np.log( i0 * np.exp(alpha*f*(E0 - x)) ) ``` Plot: ``` def make_legend(ax, loc): ax.plot(-1, 1, 'ok', label='Constant current') ax.plot(-1, 1, 'sk', label='Multistep current') ax.legend(loc=loc) fig, ax = plt.subplots(figsize=(fig_width, fig_height*2), nrows=2, ncols=1) ax = ax.ravel() # Set main axes labels ax[0].set_title('a', loc='left', weight='bold') ax[1].set_title('b', loc='left', weight='bold') ax[0].set_xlabel('Voltage (V)') ax[0].set_ylabel('dQ/dV (mAh g$^{-1}$ V$^{-1}$)') ax[1].set_xlabel('Peak voltage from dQ/dV (V)') ax[1].set_ylabel('Current (C rate)') ax[0].set_xlim([0.5, 1.2]) ax[1].set_xlim([0.5, 1.2]) ax[0].set_ylim([-3000, 0]) #ax[1].set_ylim([0, 1.2]) ax[0].xaxis.set_minor_locator(AutoMinorLocator()) ax[1].xaxis.set_minor_locator(AutoMinorLocator()) ax[0].yaxis.set_minor_locator(AutoMinorLocator()) ax[0].axhline(0, color='tab:gray') make_legend(ax[1], 'upper right' if k != 2 else 'best') ## dQ/dV colors = cm.Blues( np.linspace(0.4, 0.9, len(all_data_constcurr.items())))[:,0:3] V_peaks = [] currents = [] for k, (key, value) in enumerate(all_data_constcurr.items()): # Extract values C_rate_string = value['C rate string'] C_rate = value['C rate'] Q = first_lith_data[:, 2] V = first_lith_data[:, 1] if C_rate == 10: continue # Get values V_filt = value['V_filt'] dQdV = value['dQdV'] V_peak = value['V_peak'] dQdV_peak = value['dQdV_peak'] # Peaks V_peaks.append(V_peak) currents.append(C_rate) # Plots ax[0].plot(V_filt, dQdV, color=colors_constcurr[k], label=C_rate_string) ax[0].plot(V_peak, dQdV_peak, 'ok') ax[1].semilogy(V_peak, C_rate, 'o', color=colors_constcurr[k]) ax[0].legend(title='Constant current', loc='lower left') for k, (key, value) in enumerate(all_data_multistep.items()): # Extract values C_rate = value['C rate'] if C_rate > 2: continue # Get values V_peak = value['V_peak'] dQdV_peak = value['dQdV_peak'] # Peaks V_peaks.append(V_peak) currents.append(C_rate) ax[1].semilogy(V_peak, C_rate, 's', color=colors_multistep[k]) popt, pcov = curve_fit(Butler_Volmer_log, V_peaks, np.log(currents), bounds=([0., 0., 1.], [1, 1., 3.])) ## This is hard coded, I was being a bit lazy. The values are obtained from the bottom of the notebook i0_CIs = [0.00788188, 0.01492495] alpha_CIs = [0.18396905171246833, 0.20642523327694473] E0_CIs = [1.04966447, 1.06957168] i0, alpha, E0 = tuple(popt) print(i0, alpha, E0) label = 'Butler-Volmer fit:\n' \ + r' $i_0=%.2g' % i0 + '$' + ' ' + '$[%.1g, %.2g]$' % tuple(i0_CIs) + ' (C rate)\n' \ + r' $\alpha=%#.2g' % alpha + '$' + ' ' + '$[%.2g, %.2g]$' % tuple(alpha_CIs) + '\n' \ + r' $E_0=%.3g' % E0 + '$' + ' ' + '$[%.3g, %.3g]$ (V)' % tuple(E0_CIs) V_linspace = np.linspace(0.6, 1.4, 100) I_fit1 = Butler_Volmer(V_linspace, *popt) ax[1].semilogy(V_linspace, I_fit1, '-k') ax[1].annotate(label, (0.05, 0.05), xycoords='axes fraction', horizontalalignment='left', verticalalignment='bottom') plt.tight_layout() plt.savefig(figpath / 'C_rate_dQdV.eps', bbox_inches='tight', format='eps') ``` ### Bulter-Volmer fit residuals ``` def make_legend(ax, loc): ax.plot(-1, 1, 'ok', label='Constant current') ax.plot(-1, 1, 'sk', label='Multistep current') ax.legend(loc=loc) fig, ax = plt.subplots(figsize=(fig_width, fig_height), nrows=1, ncols=1) ax.axhline(0, color='tab:gray') ax.set_xlabel('C rate/C rate before 0.5V') ax.set_ylabel('Actual log(I) - predicted log(I) (log(C rate))') make_legend(ax, 'best') ax.set_ylim([-0.18, 0.18]) for k, (key, value) in enumerate(all_data_constcurr.items()): if value['C rate'] == 10: continue I_pred = Butler_Volmer_log(value['V_peak'], *popt) residual = np.log(currents[k]) - I_pred ax.semilogx(value['C rate'], residual, 'o', color=colors_constcurr[k]) for k, (key, value) in enumerate(all_data_multistep.items()): if value['C rate'] > 2: continue I_pred = Butler_Volmer_log(value['V_peak'], *popt) residual = np.log(currents[k]) - I_pred ax.semilogx(value['C rate'], residual, 's', color=colors_multistep[k]) plt.tight_layout() plt.savefig(figpath / 'C_rate_residuals.eps', bbox_inches='tight', format='eps') ``` ## Calculating k^0 from i_0 Bard and Faulkner, Eqn 3.4.7, expresses a simplified equation for the relation between $i_0$ and $k^0$: $$ i_0 = FAk^0C_{EC} $$ Solving for $k^0$: $$ k^0 = i_0 / (FAC_{EC}) $$ $ k^0 $ is most commonly expressed as cm/s. For these calculations, I'll try out the [`pint` package](https://pint.readthedocs.io/en/0.11/). ``` import pint ureg = pint.UnitRegistry() ``` Set $i_0$, and convert to mA/g (1C=200 mA/g) ``` i0_units = i0 * 200 * ureg.milliamps / ureg.gram i0_units ``` Define F: ``` F = physical_constants['Faraday constant'][0] * ureg.coulomb / ureg.mol F ``` Since we use specific current, we need to use specific area: ``` A = 62 * ureg.meters ** 2 / ureg.gram A ``` The EC concentration requires a bit of work. We are using 1.0 M LiPF6 in 1:1 EC:DEC by weight %. We know the density of EC:DEC from [this reference](https://chemistry-europe.onlinelibrary.wiley.com/doi/pdf/10.1002/cphc.201700320): ``` rho_EC_DEC = 1.16 * ureg.gram / ureg.milliliter rho_EC_DEC ``` The total solvent mass in 1 L of this electrolyte is: ``` solvent_mass = (1 * ureg.liter * rho_EC_DEC).to_reduced_units() solvent_mass ``` The EC mass is 50% of the solvent mass: ``` EC_mass = 0.5 * solvent_mass EC_mass ``` We can now convert to moles using the molecular weight, and then create find the moles in 1 liter. Technically, I'm using 1 L of solvent instead of 1 L of electrolyte here, but this difference is likely negligible (I'm only interested in an order of magnitude number anyway): ``` EC_MW = 88.06 * ureg.gram / ureg.mol EC_moles = EC_mass / EC_MW C_EC = EC_moles / (1 * ureg.liter) C_EC ``` Get $k^0$: ``` k0 = (i0_units / (F * A * C_EC)) k0.to_root_units() ``` `5 x 10^-12` cm/s is *extremely* low; Bard and Faulkner (pg 96) report 10^-9 cm/s as a practical lower limit. Given the complexity of this reaction, however, I'm not too surprised. ## Confidence intervals from F test Here I calculate CIs using the F test. These asymmetric profile confidence intervals are more representative than the asymptotic approximate symmetrical 95% CIs. See these references for the basic procedure: - [Motulsky and Ransnas](10.1096/fasebj.1.5.3315805) (justification for F test) - [Kemmer and Keller](http://doi.org/10.1038/nprot.2009.182): good illustrations of the pitfalls of linearly approximated 95% CIs - [Wikipedia](https://en.wikipedia.org/wiki/F-test#Regression_problems) - [GraphPad profile likelihood confidence intervals](https://www.graphpad.com/guides/prism/8/curve-fitting/reg_how_confidence_intervals_are_c.htm) - [GraphPad F test](https://www.graphpad.com/guides/prism/7/curve-fitting/reg_howtheftestworks.htm) ## 95% CIs: i0 ``` x = V_peaks y = np.log(currents) # Get baseline fit popt, pcov = curve_fit(Butler_Volmer_log, x, y, bounds=([0., 0., 1.], [1, 1., 3.])) print(popt[0]) n = len(x) p = len(popt) # Calculate SSR y_pred = Butler_Volmer_log(x, *popt) SSR = np.sum((y - y_pred) ** 2) # Calculate SSR threshold. 95% CI SSR_threshold = SSR * (1 + scipy.stats.f.isf(0.05, 1, n - p) / (n - p) ) # Preinitialize n_points = 100 i0_left = np.linspace(5e-3, popt[0], n_points) i0_right = np.linspace(popt[0], 2e-2, n_points) SSR_left = np.zeros((n_points, 1)) SSR_right = np.zeros((n_points, 1)) f_mod = lambda x, alpha, E0: Butler_Volmer_log(x, i0, alpha, E0) # Go through left side of parameter for k, i0 in enumerate(i0_left): popt_left, _ = curve_fit(f_mod, x, y, maxfev=1e5, bounds=([0.01, 1.], [0.99, 1.2])) y_pred = f_mod(x, *popt_left) SSR_left[k] = np.sum((y - y_pred) ** 2) # Go through right side of exponent for k, i0 in enumerate(i0_right): popt_right, _ = curve_fit(f_mod, x, y, maxfev=1e5, bounds=([0.01, 1.], [0.99, 1.2])) y_pred = f_mod(x, *popt_right) SSR_right[k] = np.sum((y - y_pred) ** 2) # Find the intersection points try: exp_lower_CI = i0_left[ np.where(SSR_left < SSR_threshold)[0][0]] exp_upper_CI = i0_right[np.where(SSR_right > SSR_threshold)[0][0]] exp_CIs = np.array([exp_lower_CI, exp_upper_CI]) except IndexError: # CIs are very large; set as undefined exp_CIs = [np.nan, np.nan] # Plot plt.plot(i0_left, SSR_left) plt.plot(i0_right, SSR_right) plt.axhline(SSR, color='k') plt.axhline(SSR_threshold, color='k') plt.ylim([0.8*SSR, 1.1*SSR_threshold]) exp_CIs, 1/exp_CIs ``` ## 95% CIs: Alpha ``` x = V_peaks y = np.log(currents) # Get baseline fit popt, pcov = curve_fit(Butler_Volmer_log, x, y, bounds=([0., 0., 1.], [1, 1., 3.])) print(popt[1]) n = len(x) p = len(popt) # Calculate SSR y_pred = Butler_Volmer_log(x, *popt) SSR = np.sum((y - y_pred) ** 2) # Calculate SSR threshold. 95% CI SSR_threshold = SSR * (1 + scipy.stats.f.isf(0.05, 1, n - p) / (n - p) ) # Preinitialize n_points = 100 alpha_left = np.linspace(0.18, popt[1], n_points) alpha_right = np.linspace(popt[1], 0.22, n_points) SSR_left = np.zeros((n_points, 1)) SSR_right = np.zeros((n_points, 1)) f_mod = lambda x, i0, E0: Butler_Volmer_log(x, i0, alpha, E0) # Go through left side of parameter for k, alpha in enumerate(alpha_left): popt_left, _ = curve_fit(f_mod, x, y, maxfev=1e5, bounds=([0, 1.], [0.1, 2.])) y_pred = f_mod(x, *popt_left) SSR_left[k] = np.sum((y - y_pred) ** 2) # Go through right side of exponent for k, alpha in enumerate(alpha_right): popt_right, _ = curve_fit(f_mod, x, y, maxfev=1e5, bounds=([0, 1.], [0.1, 2.])) y_pred = f_mod(x, *popt_right) SSR_right[k] = np.sum((y - y_pred) ** 2) # Find the intersection points try: exp_lower_CI = alpha_left[ np.where(SSR_left < SSR_threshold)[0][0]] exp_upper_CI = alpha_right[np.where(SSR_right > SSR_threshold)[0][0]] exp_CIs = [exp_lower_CI, exp_upper_CI] except IndexError: # CIs are very large; set as undefined exp_CIs = [np.nan, np.nan] # Plot plt.plot(alpha_left, SSR_left) plt.plot(alpha_right, SSR_right) plt.axhline(SSR, color='k') plt.axhline(SSR_threshold, color='k') plt.ylim([0.8*SSR, 1.1*SSR_threshold]) exp_CIs ``` ## 95% CIs: E_0 ``` x = V_peaks y = np.log(currents) # Get baseline fit popt, pcov = curve_fit(Butler_Volmer_log, x, y, bounds=([0., 0., 1.], [1, 1., 3.])) print(popt[2]) n = len(x) p = len(popt) # Calculate SSR y_pred = Butler_Volmer_log(x, *popt) SSR = np.sum((y - y_pred) ** 2) # Calculate SSR threshold. 95% CI SSR_threshold = SSR * (1 + scipy.stats.f.isf(0.05, 1, n - p) / (n - p) ) # Preinitialize n_points = 100 E0_left = np.linspace(1.045, popt[2], n_points) E0_right = np.linspace(popt[2], 1.1, n_points) SSR_left = np.zeros((n_points, 1)) SSR_right = np.zeros((n_points, 1)) f_mod = lambda x, i0, alpha: Butler_Volmer_log(x, i0, alpha, E0) # Go through left side of parameter for k, E0 in enumerate(E0_left): popt_left, _ = curve_fit(f_mod, x, y, maxfev=1e5, bounds=([0, 0.01], [0.1, 0.99])) y_pred = f_mod(x, *popt_left) SSR_left[k] = np.sum((y - y_pred) ** 2) # Go through right side of exponent for k, E0 in enumerate(E0_right): popt_right, _ = curve_fit(f_mod, x, y, maxfev=1e5, bounds=([0, 0.01], [0.1, 0.99])) y_pred = f_mod(x, *popt_right) SSR_right[k] = np.sum((y - y_pred) ** 2) # Find the intersection points try: exp_lower_CI = E0_left[ np.where(SSR_left < SSR_threshold)[0][0]] exp_upper_CI = E0_right[np.where(SSR_right > SSR_threshold)[0][0]] exp_CIs = np.array([exp_lower_CI, exp_upper_CI]) except IndexError: # CIs are very large; set as undefined exp_CIs = [np.nan, np.nan] # Plot plt.plot(E0_left, SSR_left) plt.plot(E0_right, SSR_right) plt.axhline(SSR, color='k') plt.axhline(SSR_threshold, color='k') plt.ylim([0.8*SSR, 1.1*SSR_threshold]) exp_CIs ``` This is a good example of an asymptotic confidence interval
github_jupyter
# Quick Start The easiest way to get up and running is to load in one of our example datasets (or load in some data of your own) and to convert them to either a [HindcastEnsemble](api/climpred.classes.HindcastEnsemble.html#climpred.classes.HindcastEnsemble) or [PerfectModelEnsemble](api/climpred.classes.PerfectModelEnsemble.html#climpred.classes.PerfectModelEnsemble) object. `climpred` provides example datasets from the MPI-ESM-LR decadal prediction ensemble and the CESM decadal prediction ensemble. See our [examples](examples.html) to see some analysis cases. ``` %matplotlib inline import matplotlib.pyplot as plt import xarray as xr from climpred import HindcastEnsemble from climpred.tutorial import load_dataset import climpred xr.set_options(display_style='text') ``` You can view the datasets available to be loaded with the [load_datasets()](api/climpred.tutorial.load_dataset.html#climpred.tutorial.load_dataset) command without passing any arguments: ``` load_dataset() ``` From here, loading a dataset is easy. Note that you need to be connected to the internet for this to work -- the datasets are being pulled from the [climpred-data](https://github.com/pangeo-data/climpred-data) repository. Once loaded, it is cached on your computer so you can reload extremely quickly. These datasets are very small (< 1MB each) so they won't take up much space. ``` hind = climpred.tutorial.load_dataset('CESM-DP-SST') # Add lead attribute units. hind["lead"].attrs["units"] = "years" obs = climpred.tutorial.load_dataset('ERSST') ``` Make sure your prediction ensemble's dimension labeling conforms to `climpred`'s [standards](setting-up-data.html). In other words, you need an `init`, `lead`, and (optional) `member` dimension. Make sure that your `init` and `lead` dimensions align. *E.g.*, a November 1st, 1954 initialization should be labeled as `init=1954` so that the lead=1 forecast is 1955. ``` print(hind.coords) ``` We'll quickly process the data to create anomalies. CESM-DPLE's drift-correction occurs over 1964-2014, so we'll remove that from the observations. ``` obs = obs - obs.sel(time=slice(1964,2014)).mean('time') ``` We can create a [HindcastEnsemble](api/climpred.classes.HindcastEnsemble.html#climpred.classes.HindcastEnsemble) object and add our observations. ``` hindcast = HindcastEnsemble(hind) hindcast = hindcast.add_observations(obs) print(hindcast) ``` `PredictionEnsemble.plot()` shows all associated datasets. ``` hindcast.plot() ``` We'll also remove a quadratic trend so that it doesn't artificially boost our predictability. `PredictionEnsemble.map(func)` tries to apply/map `func` to all associated datasets. ``` from esmtools.stats import rm_poly hindcast = hindcast.map(rm_poly, dim='init', order=2).map(rm_poly, dim='time', order=2) hindcast.plot() ``` Now we'll quickly calculate skill against persistence. We require users to define `metric`, `comparison`, `dim`, and `alignment`. This ensures that `climpred` isn't treated like a black box -- there are no "defaults" to the prediction analysis framework. You can choose from a variety of possible [metrics](https://climpred.readthedocs.io/en/latest/metrics.html) by entering their associated strings. [Comparison](https://climpred.readthedocs.io/en/latest/comparisons.html) strategies vary for hindcast and perfect model systems. Here we chose to compare the ensemble mean to observations (`'e2o'`). We reduce this operation over the initialization dimension. Lastly, we choose the `'same_verif'` alignment, which uses the same set of verification dates across all leads (see alignment strategies [here](https://climpred.readthedocs.io/en/latest/alignment.html)). An optional keyword used here is `reference`. Here, we ask to compute the `'acc'` metric with a persistence forecast, so that we can establish skill over some baseline forecast. ``` result = hindcast.verify(metric='acc', comparison='e2o', dim='init', alignment='same_verif', reference='persistence') skill = result.sel(skill='initialized') persistence = result.sel(skill='persistence') print(skill) plt.style.use('fivethirtyeight') f, ax = plt.subplots(figsize=(8, 3)) skill.SST.plot(marker='o', markersize=10, label='skill') persistence.SST.plot(marker='o', markersize=10, label='persistence', color='#a9a9a9') plt.legend() ax.set(title='Global Mean SST Predictability', ylabel='Anomaly \n Correlation Coefficient', xlabel='Lead Year') plt.show() ``` We can also check accuracy (error) of our forecasts. ``` result = hindcast.verify(metric='rmse', comparison='e2o', dim='init', alignment='same_verif', reference='persistence') skill = result.sel(skill='initialized') persistence = result.sel(skill='persistence') plt.style.use('fivethirtyeight') f, ax = plt.subplots(figsize=(8, 3)) skill.SST.plot(marker='o', markersize=10, label='initialized forecast') persistence.SST.plot(marker='o', markersize=10, label='persistence', color='#a9a9a9') plt.legend() ax.set(title='Global Mean SST Forecast Error', ylabel='RMSE', xlabel='Lead Year') plt.show() ```
github_jupyter
``` # #Python Libraries import numpy as np import scipy as sp import pandas as pd import pandas_profiling %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import os import sys import time import requests import datetime import math import missingno as msno from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import Ridge, LogisticRegression from sklearn.metrics import mean_squared_error, mean_squared_log_error from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.preprocessing import LabelBinarizer from scipy.sparse import csr_matrix, hstack df_train = pd.read_csv(filepath_or_buffer="data/train.tsv", sep="\t", ) df_test = pd.read_csv(filepath_or_buffer="data/test.tsv", sep="\t", ) print("Dataset dimensions - Train: ", df_train.shape) print("Dataset dimensions - Test: ", df_test.shape) df_train.head() df_test.head() # df_input = pd.concat([df_train, df_test], 0) # nrow_train = df_train.shape[0] df_input = df_train df_input['category_name'].fillna(value='other', inplace=True) df_input['brand_name'].fillna(value='other', inplace=True) df_input['item_description'].fillna(value='other', inplace=True) df_test['category_name'].fillna(value='other', inplace=True) df_test['brand_name'].fillna(value='other', inplace=True) df_test['item_description'].fillna(value='other', inplace=True) df_input['category_name'] = df_input['category_name'].astype('category') df_input['brand_name'] = df_input['brand_name'].astype('category') df_input['item_condition_id'] = df_input['item_condition_id'].astype('category') df_test['category_name'] = df_test['category_name'].astype('category') df_test['brand_name'] = df_test['brand_name'].astype('category') df_test['item_condition_id'] = df_test['item_condition_id'].astype('category') msno.matrix(df_input) df_input.dtypes def func_count_vectorizer(df_in, var_col): model_cv = CountVectorizer(min_df=10) return model_cv.fit_transform(df_in[var_col]) mat_input_name_cv = func_count_vectorizer(df_input, "name") mat_input_category_name_cv = func_count_vectorizer(df_input, "category_name") mat_input_brand_name_cv = func_count_vectorizer(df_input, "brand_name") mat_input_item_desc_cv = func_count_vectorizer(df_input, "item_description") print("--- Matrix Dimensions ---") print("mat_input_name_cv", mat_input_name_cv.shape) print("mat_input_category_name_cv", mat_input_category_name_cv.shape) print("mat_input_brand_name_cv", mat_input_brand_name_cv.shape) print("mat_input_item_desc_cv", mat_input_item_desc_cv.shape) def func_tfidf_vectorizer(df_in, var_col): model_tfidf = TfidfVectorizer(max_features = 55000, ngram_range = (1,3), stop_words = "english") return model_tfidf.fit_transform(df_in[var_col]) mat_input_name_tfidf = func_count_vectorizer(df_input, "name") mat_input_category_name_tfidf = func_count_vectorizer(df_input, "category_name") mat_input_brand_name_tfidf = func_count_vectorizer(df_input, "brand_name") mat_input_item_desc_tfidf = func_count_vectorizer(df_input, "item_description") print("--- Matrix Dimensions ---") print("mat_input_name_tfidf", mat_input_name_tfidf.shape) print("mat_input_category_name_tfidf", mat_input_category_name_tfidf.shape) print("mat_input_brand_name_tfidf", mat_input_brand_name_tfidf.shape) print("mat_input_item_desc_tfidf", mat_input_item_desc_tfidf.shape) mat_input_stack = sp.sparse.hstack((mat_input_name_cv, mat_input_category_name_cv, mat_input_brand_name_cv, mat_input_item_desc_cv, mat_input_name_tfidf, mat_input_category_name_tfidf, mat_input_brand_name_tfidf, mat_input_item_desc_tfidf)).tocsr() mat_input_stack.shape # #Test-Train Split X = mat_input_stack y = np.log1p(df_input["price"]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42) print("Train Dataset Shape - X_train: ", X_train.shape) print("Train Dataset Shape - y_train: ", y_train.shape) print("--------------------------------") print("Test Dataset Shape - X_test: ", X_test.shape) print("Test Dataset Shape - y_test: ", y_test.shape) model_2 = Ridge(solver = "lsqr", fit_intercept=False) # model_2 = RandomForestRegressor(max_depth=15, random_state=0, n_jobs=-1) model_2.fit(X_train, y_train) y_predict = model_2.predict(X_test) math.sqrt(mean_squared_log_error(y_predict, y_test)) y_predict df_model_2_submission = pd.DataFrame() df_model_2_submission['test_id'] = df_test['test_id'] df_model_2_submission['price'] = pd.Series(y_predict) df_model_2_submission.to_csv("submissions/model_2_submission_1.csv", index=False) !head submissions/model_2_submission_1.csv ```
github_jupyter
# Funciones Lo más importante para programar, y no solo en Python, es saber organizar el código en piezas más pequeñas que hagan tareas independientes y combinarlas entre sí. Las **funciones** son el primer nivel de organización del código: reciben unas *entradas*, las *procesan* y devuelven unas *salidas*. ![Black box](../images/blackbox.jpg) ## Ejercicio 1: Función que imprime Como primer paso, vamos a crear una función que no recibe ninguna entrada ni produce ninguna salida, pero que imprime una frase por pantalla. Para eso emplearemos la palabra clave `def`, seguida del nombre de la función, y abriremos un bloque nuevo. <div class="alert alert-warning">¡No olvides los dos puntos! Si el sangrado del código no avanza automáticamente, es que te los has dejado.</div> ``` def funcion1(): print("¡Soy la función 1!") ``` Y ahora invocamos la función con la sintaxis que ya conocemos: ``` funcion1() ``` <div class="alert alert-warning">¡Observa que no aparece `Out [2]`! Eso es porque, en realidad, la función no tiene salidas: solo una llamada a `print`. Mira lo que ocurre si intentamos asignar la salida de la función a una variable:</div> ``` salida = funcion1() salida salida is None ``` En el siguiente ejercicio vamos a ver cómo evitar esto. ## Ejercicio 2: Función que devuelve Vamos a crear ahora una función sin entradas pero con una salida. Para ello usamos la palabra clave `return`. ``` def funcion2(): return "Salida de la función 2" funcion2() ``` Y ahora sí podemos asignar esa salida a una variable: ``` salida = funcion2() print(salida) salida[4::2] # Lo primero que se me ha venido a la cabeza ``` Aclarado el concepto (que a veces puede quedar difuso cuando se trabaja en modo interactivo) normalmente querremos devolver valores. De esta forma podemos enlazar funciones como si fueran bloques, uno detrás de otro, y estructurar nuestro programa mucho mejor. ## Ejercicio 3: Función de una entrada Vamos a crear ahora una función que compruebe si un número es mayor o menor que cinco. La salida ahora no nos importa mucho: lo importante es que al declarar los argumentos de entrada en la definición de la función, podremos usarlos dentro de ella con el nombre que decidamos. ``` def comparar_cinco(num): if num < 5: return "El número es menor que cinco" elif num == 5: return "El número es igual a cinco" else: return "El número es mayor que cinco" print(comparar_cinco(2)) mi_numero = 7 # Aquí la variable se llama `mi_numero` print(comparar_cinco(mi_numero)) # ¡Dentro de la función eso me da igual! ``` <div class="alert alert-info">Apuntes: <ul><li>Podríamos haber puesto un `elif num > 5` en la última parte en vez de un `else`. En este caso es obvio, pero en otros puede no ser tan evidente.</li> <li>Algunos prefieren sacar los `return` fuera del condicional, o incluso que solo haya uno. http://stackoverflow.com/q/9191388/554319 ¡Cuestión de gustos!</li> </ul> </div> _En esta clase hemos visto cómo crear funciones que encapsulen tareas de nuestro programa_ **Referencias** * Libro "Learn Python the Hard Way" http://learnpythonthehardway.org/book/ * Python Tutor, para visualizar código Python paso a paso http://pythontutor.com/ * Libro "How To Think Like a Computer Scientist" http://interactivepython.org/runestone/static/thinkcspy/toc.html * Project Euler: ejercicios para aprender Python https://projecteuler.net/problems * Python Challenge (!) http://www.pythonchallenge.com/ --- <br/> #### <h4 align="right">¡Síguenos en Twitter! <br/> ###### <a href="https://twitter.com/AeroPython" class="twitter-follow-button" data-show-count="false">Follow @AeroPython</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <br/> ###### Este notebook ha sido realizado por: Juan Luis Cano, y Álex Sáez <br/> ##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"><img alt="Licencia Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo</span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>.
github_jupyter
# SVM on Mobile Price Range Prediction Dataset [Dataset](https://github.com/heenalsapovadia/ml_practices_2018/blob/master/Labs/Lab1/Heenal/Data/mobilePricePred.csv) [EDA on dataset](https://github.com/heenalsapovadia/ml_practices_2018/blob/master/Labs/Lab1/Heenal/Python\Notebooks/Mobile\Price\Range\Prediction.ipynb) ``` #importing all the necessary libraries import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from matplotlib.patches import Patch from sklearn.preprocessing import LabelBinarizer from sklearn.linear_model import LogisticRegression from sklearn import svm from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import learning_curve,validation_curve from sklearn.model_selection import ShuffleSplit from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import (TimeSeriesSplit, KFold, ShuffleSplit, StratifiedKFold, GroupShuffleSplit, GroupKFold, StratifiedShuffleSplit) from yellowbrick.features import ParallelCoordinates from yellowbrick import classifier from yellowbrick.classifier import ClassificationReport, ConfusionMatrix,PrecisionRecallCurve from imblearn.over_sampling import SMOTE mobile = pd.read_csv('../../Lab1/Heenal/Data/mobilePricePred.csv') mobile.shape # Separating the data and the target variable X = mobile[['ram', 'battery_power']].values y = mobile['price_range'].values X.shape, y.shape # Splitting the data into TRAIN and TEST sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) print("Number transactions X_train dataset: ", X_train.shape) print("Number transactions y_train dataset: ", y_train.shape) print("Number transactions X_test dataset: ", X_test.shape) print("Number transactions y_test dataset: ", y_test.shape) ``` ## Visualizing the range of values of the features selected ``` visualizer = ParallelCoordinates() visualizer.fit_transform(X, y) visualizer.poof() sc = StandardScaler() sc.fit(X_train) # Scaling the train and test sets. X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test) visualizer = ParallelCoordinates() visualizer.fit_transform(X_train_std, y) visualizer.poof() # Create SVM classification object model_lin = svm.SVC(kernel='linear', C=1, gamma=10) model_lin.fit(X_train_std, y_train) model_rbf = svm.SVC(kernel='rbf', C=1, gamma=10) model_rbf.fit(X_train_std, y_train) #model_poly = svm.SVC(kernel='poly', C=1000, gamma=10) #model_poly.fit(X_train_std, y_train) def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) # plot all samples X_test, y_test = X[test_idx, :], y[test_idx] for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl) # highlight test samples if test_idx: X_test, y_test = X[test_idx, :], y[test_idx] plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidth=1, marker='o', s=55, label='test set') X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X_combined_std, y_combined, classifier=model_lin, test_idx=range(275,392)) plt.xlabel('RAM [standardized]') plt.ylabel('Battery Power [standardized]') plt.legend(loc='upper left') plt.show() plot_decision_regions(X_combined_std, y_combined, classifier=model_rbf, test_idx=range(275,392)) plt.xlabel('RAM [standardized]') plt.ylabel('Battery Power [standardized]') plt.legend(loc='upper left') plt.show() plot_decision_regions(X_combined_std, y_combined, classifier=model_lin, test_idx=range(275,392)) plt.title('Linear') plt.xlabel('RAM [standardized]') plt.ylabel('Battery Power [standardized]') plt.legend(loc='upper left') plt.show() plot_decision_regions(X_combined_std, y_combined, classifier=model_rbf, test_idx=range(275,392)) plt.title('RBF') plt.xlabel('RAM [standardized]') plt.ylabel('Battery Power [standardized]') plt.legend(loc='upper left') plt.show() # get the separating hyperplane w = model_lin.coef_[0] a = -w[0] / w[1] xx = np.linspace(-2, 2) yy = a * xx - (model_lin.intercept_[0]) / w[1] w1 = model_lin.coef_[5] a1 = -w1[0] / w1[1] xx1 = np.linspace(-2, 2) yy1 = a1 * xx1 - (model_lin.intercept_[5]) / w1[1] model_lin.intercept_ ``` # Plotting the learning curve using sklearn ``` # Defining the function to plot the learning curve: def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)): plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") return plt # Plotting for SVM title = "Learning Curves (SVM)" cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0) estimator = model = svm.SVC(kernel='poly', C=1000, gamma=10) plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4) plt.show() ``` # Checking out all the metrics ## Plotting the confusion matrix selecting different features ``` This would help us to analyse which features properly identify the distinctions rather borders between the 4 classes. ``` ``` # Features = battery and RAM cm = ConfusionMatrix(model_lin, classes=[0,1,2,3]) cm.score(X_test_std, y_test) cm.poof() # Features = battery and RAM cm = ConfusionMatrix(model_rbf, classes=[0,1,2,3]) cm.score(X_test_std, y_test) cm.poof() # C=1000 y_pred = model_lin.predict(X_test_std) metrics.accuracy_score(y_test, y_pred) # C=1 y_pred = model_lin.predict(X_test_std) metrics.accuracy_score(y_test, y_pred) # C =1000 y_pred = model_rbf.predict(X_test_std) metrics.accuracy_score(y_test, y_pred) # C=1 y_pred = model_rbf.predict(X_test_std) metrics.accuracy_score(y_test, y_pred) # Generating the classification report containing measures of precision, recall and F1-score visualizer = ClassificationReport(model_lin, support=True) visualizer.fit(X_train_std, y_train) visualizer.score(X_test_std, y_test) visualizer.poof() # Generating the classification report containing measures of precision, recall and F1-score visualizer = ClassificationReport(model_rbf, support=True) visualizer.fit(X_train_std, y_train) visualizer.score(X_test_std, y_test) visualizer.poof() ```
github_jupyter
# LAB 4a: Creating a Sampled Dataset. **Learning Objectives** 1. Setup up the environment 1. Sample the natality dataset to create train/eval/test sets 1. Preprocess the data in Pandas dataframe ## Introduction In this notebook, we'll read data from BigQuery into our notebook to preprocess the data within a Pandas dataframe for a small, repeatable sample. We will set up the environment, sample the natality dataset to create train/eval/test splits, and preprocess the data in a Pandas dataframe. ## Set up environment variables and load necessary libraries Check that the Google BigQuery library is installed and if not, install it. ``` %%bash sudo pip freeze | grep google-cloud-bigquery==1.6.1 || \ sudo pip install google-cloud-bigquery==1.6.1 ``` Import necessary libraries. ``` from google.cloud import bigquery import pandas as pd ``` Set environment variables so that we can use them throughout the entire lab. We will be using our project name for our bucket, so you only need to change your project and region. ``` %%bash export PROJECT=$(gcloud config list project --format "value(core.project)") echo "Your current GCP Project Name is: "$PROJECT PROJECT = "cloud-training-demos" # Replace with your PROJECT ``` ## Create ML datasets by sampling using BigQuery We'll begin by sampling the BigQuery data to create smaller datasets. Let's create a BigQuery client that we'll use throughout the lab. ``` bq = bigquery.Client(project = PROJECT) ``` We need to figure out the right way to divide our hash values to get our desired splits. To do that we need to define some values to hash with in the modulo. Feel free to play around with these values to get the perfect combination. ``` modulo_divisor = 100 train_percent = 80.0 eval_percent = 10.0 train_buckets = int(modulo_divisor * train_percent / 100.0) eval_buckets = int(modulo_divisor * eval_percent / 100.0) ``` We can make a series of queries to check if our bucketing values result in the correct sizes of each of our dataset splits and then adjust accordingly. Therefore, to make our code more compact and reusable, let's define a function to return the head of a dataframe produced from our queries up to a certain number of rows. ``` def display_dataframe_head_from_query(query, count=10): """Displays count rows from dataframe head from query. Args: query: str, query to be run on BigQuery, results stored in dataframe. count: int, number of results from head of dataframe to display. Returns: Dataframe head with count number of results. """ df = bq.query( query + " LIMIT {limit}".format( limit=count)).to_dataframe() return df.head(count) ``` For our first query, we're going to use the original query above to get our label, features, and columns to combine into our hash which we will use to perform our repeatable splitting. There are only a limited number of years, months, days, and states in the dataset. Let's see what the hash values are. We will need to include all of these extra columns to hash on to get a fairly uniform spread of the data. Feel free to try less or more in the hash and see how it changes your results. ``` # Get label, features, and columns to hash and split into buckets hash_cols_fixed_query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, year, month, CASE WHEN day IS NULL THEN CASE WHEN wday IS NULL THEN 0 ELSE wday END ELSE day END AS date, IFNULL(state, "Unknown") AS state, IFNULL(mother_birth_state, "Unknown") AS mother_birth_state FROM publicdata.samples.natality WHERE year > 2000 AND weight_pounds > 0 AND mother_age > 0 AND plurality > 0 AND gestation_weeks > 0 """ display_dataframe_head_from_query(hash_cols_fixed_query) ``` Using `COALESCE` would provide the same result as the nested `CASE WHEN`. This is preferable when all we want is the first non-null instance. To be precise the `CASE WHEN` would become `COALESCE(wday, day, 0) AS date`. You can read more about it [here](https://cloud.google.com/bigquery/docs/reference/standard-sql/conditional_expressions). Next query will combine our hash columns and will leave us just with our label, features, and our hash values. ``` data_query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT( CONCAT( CAST(year AS STRING), CAST(month AS STRING), CAST(date AS STRING), CAST(state AS STRING), CAST(mother_birth_state AS STRING) ) ) AS hash_values FROM ({CTE_hash_cols_fixed}) """.format(CTE_hash_cols_fixed=hash_cols_fixed_query) display_dataframe_head_from_query(data_query) ``` The next query is going to find the counts of each of the unique 657484 `hash_values`. This will be our first step at making actual hash buckets for our split via the `GROUP BY`. ``` # Get the counts of each of the unique hashs of our splitting column first_bucketing_query = """ SELECT hash_values, COUNT(*) AS num_records FROM ({CTE_data}) GROUP BY hash_values """.format(CTE_data=data_query) display_dataframe_head_from_query(first_bucketing_query) ``` The query below performs a second layer of bucketing where now for each of these bucket indices we count the number of records. ``` # Get the number of records in each of the hash buckets second_bucketing_query = """ SELECT ABS(MOD(hash_values, {modulo_divisor})) AS bucket_index, SUM(num_records) AS num_records FROM ({CTE_first_bucketing}) GROUP BY ABS(MOD(hash_values, {modulo_divisor})) """.format( CTE_first_bucketing=first_bucketing_query, modulo_divisor=modulo_divisor) display_dataframe_head_from_query(second_bucketing_query) ``` The number of records is hard for us to easily understand the split, so we will normalize the count into percentage of the data in each of the hash buckets in the next query. ``` # Calculate the overall percentages percentages_query = """ SELECT bucket_index, num_records, CAST(num_records AS FLOAT64) / ( SELECT SUM(num_records) FROM ({CTE_second_bucketing})) AS percent_records FROM ({CTE_second_bucketing}) """.format(CTE_second_bucketing=second_bucketing_query) display_dataframe_head_from_query(percentages_query) ``` We'll now select the range of buckets to be used in training. ``` # Choose hash buckets for training and pull in their statistics train_query = """ SELECT *, "train" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= 0 AND bucket_index < {train_buckets} """.format( CTE_percentages=percentages_query, train_buckets=train_buckets) display_dataframe_head_from_query(train_query) ``` We'll do the same by selecting the range of buckets to be used evaluation. ``` # Choose hash buckets for validation and pull in their statistics eval_query = """ SELECT *, "eval" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= {train_buckets} AND bucket_index < {cum_eval_buckets} """.format( CTE_percentages=percentages_query, train_buckets=train_buckets, cum_eval_buckets=train_buckets + eval_buckets) display_dataframe_head_from_query(eval_query) ``` Lastly, we'll select the hash buckets to be used for the test split. ``` # Choose hash buckets for testing and pull in their statistics test_query = """ SELECT *, "test" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= {cum_eval_buckets} AND bucket_index < {modulo_divisor} """.format( CTE_percentages=percentages_query, cum_eval_buckets=train_buckets + eval_buckets, modulo_divisor=modulo_divisor) display_dataframe_head_from_query(test_query) ``` In the below query, we'll `UNION ALL` all of the datasets together so that all three sets of hash buckets will be within one table. We added `dataset_id` so that we can sort on it in the query after. ``` # Union the training, validation, and testing dataset statistics union_query = """ SELECT 0 AS dataset_id, * FROM ({CTE_train}) UNION ALL SELECT 1 AS dataset_id, * FROM ({CTE_eval}) UNION ALL SELECT 2 AS dataset_id, * FROM ({CTE_test}) """.format(CTE_train=train_query, CTE_eval=eval_query, CTE_test=test_query) display_dataframe_head_from_query(union_query) ``` Lastly, we'll show the final split between train, eval, and test sets. We can see both the number of records and percent of the total data. It is really close to the 80/10/10 that we were hoping to get. ``` # Show final splitting and associated statistics split_query = """ SELECT dataset_id, dataset_name, SUM(num_records) AS num_records, SUM(percent_records) AS percent_records FROM ({CTE_union}) GROUP BY dataset_id, dataset_name ORDER BY dataset_id """.format(CTE_union=union_query) display_dataframe_head_from_query(split_query) ``` Now that we know that our splitting values produce a good global splitting on our data, here's a way to get a well-distributed portion of the data in such a way that the train/eval/test sets do not overlap and takes a subsample of our global splits. ``` # every_n allows us to subsample from each of the hash values # This helps us get approximately the record counts we want every_n = 1000 splitting_string = "ABS(MOD(hash_values, {0} * {1}))".format(every_n, modulo_divisor) def create_data_split_sample_df(query_string, splitting_string, lo, up): """Creates a dataframe with a sample of a data split. Args: query_string: str, query to run to generate splits. splitting_string: str, modulo string to split by. lo: float, lower bound for bucket filtering for split. up: float, upper bound for bucket filtering for split. Returns: Dataframe containing data split sample. """ query = "SELECT * FROM ({0}) WHERE {1} >= {2} and {1} < {3}".format( query_string, splitting_string, int(lo), int(up)) df = bq.query(query).to_dataframe() return df train_df = create_data_split_sample_df( data_query, splitting_string, lo=0, up=train_percent) eval_df = create_data_split_sample_df( data_query, splitting_string, lo=train_percent, up=train_percent + eval_percent) test_df = create_data_split_sample_df( data_query, splitting_string, lo=train_percent + eval_percent, up=modulo_divisor) print("There are {} examples in the train dataset.".format(len(train_df))) print("There are {} examples in the validation dataset.".format(len(eval_df))) print("There are {} examples in the test dataset.".format(len(test_df))) ``` ## Preprocess data using Pandas We'll perform a few preprocessing steps to the data in our dataset. Let's add extra rows to simulate the lack of ultrasound. That is we'll duplicate some rows and make the `is_male` field be `Unknown`. Also, if there is more than child we'll change the `plurality` to `Multiple(2+)`. While we're at it, we'll also change the plurality column to be a string. We'll perform these operations below. Let's start by examining the training dataset as is. ``` train_df.head() ``` Also, notice that there are some very important numeric fields that are missing in some rows (the count in Pandas doesn't count missing data) ``` train_df.describe() ``` It is always crucial to clean raw data before using in machine learning, so we have a preprocessing step. We'll define a `preprocess` function below. Note that the mother's age is an input to our model so users will have to provide the mother's age; otherwise, our service won't work. The features we use for our model were chosen because they are such good predictors and because they are easy enough to collect. ``` def preprocess(df): """ Preprocess pandas dataframe for augmented babyweight data. Args: df: Dataframe containing raw babyweight data. Returns: Pandas dataframe containing preprocessed raw babyweight data as well as simulated no ultrasound data masking some of the original data. """ # Clean up raw data # Filter out what we don"t want to use for training df = df[df.weight_pounds > 0] df = df[df.mother_age > 0] df = df[df.gestation_weeks > 0] df = df[df.plurality > 0] # Modify plurality field to be a string twins_etc = dict(zip([1,2,3,4,5], ["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)"])) df["plurality"].replace(twins_etc, inplace=True) # Clone data and mask certain columns to simulate lack of ultrasound no_ultrasound = df.copy(deep=True) # Modify is_male no_ultrasound["is_male"] = "Unknown" # Modify plurality condition = no_ultrasound["plurality"] != "Single(1)" no_ultrasound.loc[condition, "plurality"] = "Multiple(2+)" # Concatenate both datasets together and shuffle return pd.concat( [df, no_ultrasound]).sample(frac=1).reset_index(drop=True) ``` Let's process the train/eval/test set and see a small sample of the training data after our preprocessing: ``` train_df = preprocess(train_df) eval_df = preprocess(eval_df) test_df = preprocess(test_df) train_df.head() train_df.tail() ``` Let's look again at a summary of the dataset. Note that we only see numeric columns, so `plurality` does not show up. ``` train_df.describe() ``` ## Write to .csv files In the final versions, we want to read from files, not Pandas dataframes. So, we write the Pandas dataframes out as csv files. Using csv files gives us the advantage of shuffling during read. This is important for distributed training because some workers might be slower than others, and shuffling the data helps prevent the same data from being assigned to the slow workers. ``` # Define columns columns = ["weight_pounds", "is_male", "mother_age", "plurality", "gestation_weeks"] # Write out CSV files train_df.to_csv( path_or_buf="train.csv", columns=columns, header=False, index=False) eval_df.to_csv( path_or_buf="eval.csv", columns=columns, header=False, index=False) test_df.to_csv( path_or_buf="test.csv", columns=columns, header=False, index=False) %%bash wc -l *.csv %%bash head *.csv %%bash tail *.csv ``` ## Lab Summary: In this lab, we set up the environment, sampled the natality dataset to create train/eval/test splits, and preprocessed the data in a Pandas dataframe. Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
``` import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np import math from tensorboardX import SummaryWriter from torchvision import datasets, transforms import torchvision.utils as vutils from tqdm import tqdm, trange ``` $$y = x + 0.3 sin(2\pi(x + \epsilon)) + 0.3sin(4\pi(x + \epsilon)) + \epsilon$$ $$ \epsilon \sim N(0, 0.02)$$ ``` N = 1000 x = np.linspace(0, 0.5, N)[:, np.newaxis] x_test = np.linspace(-0.5, 1.0, 2 * N)[:, np.newaxis] eps = np.random.normal(0, 0.02, x.shape) y = x + 0.3 * np.sin(2 * np.pi * (x + eps))\ + 0.3 * np.sin(4 * np.pi * (x + eps)) + eps y_test = x_test + 0.3 * np.sin(2 * np.pi * x_test)\ + 0.3 * np.sin(4 * np.pi * x_test) import matplotlib.pyplot as plt %matplotlib inline plt.scatter(x, y, color='k') plt.plot(x_test, y_test, color='k') x.shape class Gaussian(object): def __init__(self, mu, rho): self.mu = mu self.rho = rho self.normal = torch.distributions.Normal(0, 1) def sample(self): epsilon = self.normal.sample(self.rho.size()) return self.mu + self.sigma * epsilon @property def sigma(self): return torch.log1p(torch.exp(self.rho)) def log_prob(self, input): return (-0.5 * math.log(2 * math.pi) - torch.log(self.sigma)\ - ((input - self.mu) ** 2) / (2 * self.sigma ** 2)).sum() class ScaleMixtureGaussian(object): def __init__(self, pi, sigma1, sigma2): self.pi = pi self.sigma1 = sigma1 self.sigma2 = sigma2 self.gaussian1 = torch.distributions.Normal(0, sigma1) self.gaussian2 = torch.distributions.Normal(0, sigma2) def log_prob(self, input): prob1 = torch.exp(self.gaussian1.log_prob(input)) prob2 = torch.exp(self.gaussian2.log_prob(input)) return (torch.log(self.pi * prob1 + (1 - self.pi) * prob2)).sum() PI = 0.5 SIGMA_1 = torch.FloatTensor([math.exp(-0)]) SIGMA_2 = torch.FloatTensor([math.exp(-6)]) class BayesianLinear(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.in_features = in_features self.out_features = out_features # Weight paramters self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-0.2, 0.2)) self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-5, -4)) self.weight = Gaussian(self.weight_mu, self.weight_rho) # Bias parameters self.bias_mu = nn.Parameter(torch.Tensor(out_features).uniform_(-0.2, 0.2)) self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(-5, -4)) self.bias = Gaussian(self.bias_mu, self.bias_rho) # Prior distributions self.weight_prior = ScaleMixtureGaussian(PI, SIGMA_1, SIGMA_2) self.bias_prior = ScaleMixtureGaussian(PI, SIGMA_1, SIGMA_2) self.log_prior = 0 self.log_variational_posterior = 0 def forward(self, input, sample=False, calculate_log_probs=False): if self.training or sample: weight = self.weight.sample() bias = self.bias.sample() else: weight = self.weight.mu bias = self.bias.mu if self.training or calculate_log_probs: self.log_prior = self.weight_prior.log_prob(weight)\ + self.bias_prior.log_prob(bias) self.log_variational_posterior = self.weight.log_prob(weight)\ + self.bias.log_prob(bias) else: self.log_prior = 0 self.log_variational_posterior = 0 return F.linear(input, weight, bias) SAMPLES = 10 BATCH_SIZE = 32 OUTPUT_DIM = 1 class BayesianNetwork(nn.Module): def __init__(self): super().__init__() self.l1 = BayesianLinear(1, 20) self.l2 = BayesianLinear(20, 20) self.l3 = BayesianLinear(10, 1) def forward(self, x, sample=False): x = F.relu(self.l1(x, sample)) x = F.relu(self.l2(x, sample)) x = self.l3(x, sample) return x def log_prior(self): return self.l1.log_prior\ + self.l2.log_prior\ + self.l3.log_prior def log_variational_posterior(self): return self.l1.log_variational_posterior\ + self.l2.log_variational_posterior\ + self.l3.log_variational_posterior def sample_elbo(self, input, target, batch_size=BATCH_SIZE, output_dim=OUTPUT_DIM, samples=SAMPLES): outputs = torch.zeros(samples, batch_size, output_dim) log_priors = torch.zeros(samples) log_variational_posteriors = torch.zeros(samples) for i in range(samples): outputs[i] = self(input, sample=True) log_priors[i] = self.log_prior() log_variational_posteriors[i] = self.log_variational_posterior() log_prior = log_priors.mean() log_variational_posterior = log_variational_posteriors.mean() mse_loss = F.mse_loss(outputs.mean(0), target, size_average=False) loss = (log_variational_posterior - log_prior) / NUM_BATCHES\ + mse_loss return loss, log_prior, log_variational_posterior, mse_loss net = BayesianNetwork() writer = SummaryWriter() def write_weight_histograms(epoch): writer.add_histogram('histogram/w1_mu', net.l1.weight_mu,epoch) writer.add_histogram('histogram/w1_rho', net.l1.weight_rho,epoch) writer.add_histogram('histogram/w2_mu', net.l2.weight_mu,epoch) writer.add_histogram('histogram/w2_rho', net.l2.weight_rho,epoch) writer.add_histogram('histogram/w3_mu', net.l3.weight_mu,epoch) writer.add_histogram('histogram/w3_rho', net.l3.weight_rho,epoch) writer.add_histogram('histogram/b1_mu', net.l1.bias_mu,epoch) writer.add_histogram('histogram/b1_rho', net.l1.bias_rho,epoch) writer.add_histogram('histogram/b2_mu', net.l2.bias_mu,epoch) writer.add_histogram('histogram/b2_rho', net.l2.bias_rho,epoch) writer.add_histogram('histogram/b3_mu', net.l3.bias_mu,epoch) writer.add_histogram('histogram/b3_rho', net.l3.bias_rho,epoch) def write_loss_scalars(epoch, batch_idx, loss, log_prior, log_variational_posterior, negative_log_likelihood): writer.add_scalar('logs/loss', loss, epoch*NUM_BATCHES+batch_idx) writer.add_scalar('logs/complexity_cost', log_variational_posterior-log_prior, epoch*NUM_BATCHES+batch_idx) writer.add_scalar('logs/log_prior', log_prior, epoch*NUM_BATCHES+batch_idx) writer.add_scalar('logs/log_variational_posterior', log_variational_posterior, epoch*NUM_BATCHES+batch_idx) writer.add_scalar('logs/negative_log_likelihood', negative_log_likelihood, epoch*NUM_BATCHES+batch_idx) def train(net, optimizer, epcoh): net.train() for batch_idx, (data, target) in enumerate(tqdm(train_loader)): net.zero_grad() loss, log_prior, log_variational_posterior, mse_loss = net.sample_elbo(data, target) loss.backward() optimizer.step() write_loss_scalars(epoch, batch_idx, loss, log_prior, log_variational_posterior, mse_loss) write_weight_histograms(epoch+1) optimizer = optim.Adam(net.parameters()) for epoch in range(TRAIN_EPOCHS): train(net, optimizer, epoch) def test_ensemble(net): net.eval() correct = 0 corrects = np.zeros(TEST_SAMPLES + 1, dtype=int) with torch.no_grad(): for data, target in test_loader: outputs = torch.zeros(TEST_SAMPLES + 1, TEST_BATCH_SIZE, CLASSES) for i in range(TEST_SAMPLES): outputs[i] = net(data, sample=True) outputs[TEST_SAMPLES] = net(data, sample=False) output = outputs.mean(0) preds = outputs.max(2, keepdim=True)[1] pred = output.max(1, keepdim=True)[1] corrects += preds.eq(target.view_as(pred)).sum(dim=1).squeeze().numpy() correct += pred.eq(target.view_as(pred)).sum().item() for index, num in enumerate(corrects): if index < TEST_SAMPLES: print('Component {} Accuracy: {}/{}'.format(index, num, TEST_SIZE)) else: print('Posterior Mean Accuracy: {}/{}'.format(num, TEST_SIZE)) print('Ensemble Accuracy: {}/{}'.format(correct, TEST_SIZE)) test_ensemble(net) x.max(0) help(x.max) help(torch.max) ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. # Tutorial #1: Train an image classification model with Azure Machine Learning In this tutorial, you train a machine learning model both locally and on remote compute resources. You'll use the training and deployment workflow for Azure Machine Learning service (preview) in a Python Jupyter notebook. You can then use the notebook as a template to train your own machine learning model with your own data. This tutorial is **part one of a two-part tutorial series**. This tutorial trains a simple logistic regression using the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset and [scikit-learn](http://scikit-learn.org) with Azure Machine Learning. MNIST is a popular dataset consisting of 70,000 grayscale images. Each image is a handwritten digit of 28x28 pixels, representing a number from 0 to 9. The goal is to create a multi-class classifier to identify the digit a given image represents. Learn how to: > * Set up your development environment > * Access and examine the data > * Train a simple logistic regression model locally using the popular scikit-learn machine learning library > * Train multiple models on a remote cluster > * Review training results, find and register the best model You'll learn how to select a model and deploy it in [part two of this tutorial](deploy-models.ipynb) later. ## Prerequisites Use [these instructions](https://aka.ms/aml-how-to-configure-environment) to: * Create a workspace and its configuration file (**config.json**) * Save your **config.json** to the same folder as this notebook ## Set up your development environment All the setup for your development work can be accomplished in a Python notebook. Setup includes: * Importing Python packages * Connecting to a workspace to enable communication between your local computer and remote resources * Creating an experiment to track all your runs * Creating a remote compute target to use for training ### Import packages Import Python packages you need in this session. Also display the Azure Machine Learning SDK version. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import azureml.core from azureml.core import Workspace # check core SDK version number print("Azure ML SDK Version: ", azureml.core.VERSION) ``` ### Connect to workspace Create a workspace object from the existing workspace. `Workspace.from_config()` reads the file **config.json** and loads the details into an object named `ws`. ``` # load workspace configuration from the config.json file in the current folder. ws = Workspace.from_config() print(ws.name, ws.location, ws.resource_group, ws.location, sep = '\t') ``` ### Create experiment Create an experiment to track the runs in your workspace. A workspace can have muliple experiments. ``` experiment_name = 'sklearn-mnist' from azureml.core import Experiment exp = Experiment(workspace=ws, name=experiment_name) ``` ### Create or Attach existing AmlCompute You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource. **Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process. ``` from azureml.core.compute import AmlCompute from azureml.core.compute import ComputeTarget import os # choose a name for your cluster compute_name = os.environ.get("AML_COMPUTE_CLUSTER_NAME", "cpucluster") compute_min_nodes = os.environ.get("AML_COMPUTE_CLUSTER_MIN_NODES", 0) compute_max_nodes = os.environ.get("AML_COMPUTE_CLUSTER_MAX_NODES", 4) # This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6 vm_size = os.environ.get("AML_COMPUTE_CLUSTER_SKU", "STANDARD_D2_V2") if compute_name in ws.compute_targets: compute_target = ws.compute_targets[compute_name] if compute_target and type(compute_target) is AmlCompute: print('found compute target. just use it. ' + compute_name) else: print('creating a new compute target...') provisioning_config = AmlCompute.provisioning_configuration(vm_size = vm_size, min_nodes = compute_min_nodes, max_nodes = compute_max_nodes) # create the cluster compute_target = ComputeTarget.create(ws, compute_name, provisioning_config) # can poll for a minimum number of nodes and for a specific timeout. # if no min node count is provided it will use the scale settings for the cluster compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20) # For a more detailed view of current AmlCompute status, use get_status() print(compute_target.get_status().serialize()) ``` You now have the necessary packages and compute resources to train a model in the cloud. ## Explore data Before you train a model, you need to understand the data that you are using to train it. You also need to copy the data into the cloud so it can be accessed by your cloud training environment. In this section you learn how to: * Download the MNIST dataset * Display some sample images * Upload data to the cloud ### Download the MNIST dataset Download the MNIST dataset and save the files into a `data` directory locally. Images and labels for both training and testing are downloaded. ``` import urllib.request os.makedirs('./data', exist_ok = True) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename='./data/train-images.gz') urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename='./data/train-labels.gz') urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename='./data/test-images.gz') urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename='./data/test-labels.gz') ``` ### Display some sample images Load the compressed files into `numpy` arrays. Then use `matplotlib` to plot 30 random images from the dataset with their labels above them. Note this step requires a `load_data` function that's included in an `util.py` file. This file is included in the sample folder. Please make sure it is placed in the same folder as this notebook. The `load_data` function simply parses the compresse files into numpy arrays. ``` # make sure utils.py is in the same directory as this code from utils import load_data # note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster. X_train = load_data('./data/train-images.gz', False) / 255.0 y_train = load_data('./data/train-labels.gz', True).reshape(-1) X_test = load_data('./data/test-images.gz', False) / 255.0 y_test = load_data('./data/test-labels.gz', True).reshape(-1) # now let's show some randomly chosen images from the traininng set. count = 0 sample_size = 30 plt.figure(figsize = (16, 6)) for i in np.random.permutation(X_train.shape[0])[:sample_size]: count = count + 1 plt.subplot(1, sample_size, count) plt.axhline('') plt.axvline('') plt.text(x=10, y=-10, s=y_train[i], fontsize=18) plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys) plt.show() ``` Now you have an idea of what these images look like and the expected prediction outcome. ### Upload data to the cloud Now make the data accessible remotely by uploading that data from your local machine into Azure so it can be accessed for remote training. The datastore is a convenient construct associated with your workspace for you to upload/download data, and interact with it from your remote compute targets. It is backed by Azure blob storage account. The MNIST files are uploaded into a directory named `mnist` at the root of the datastore. ``` ds = ws.get_default_datastore() print(ds.datastore_type, ds.account_name, ds.container_name) ds.upload(src_dir='./data', target_path='mnist', overwrite=True, show_progress=True) ``` You now have everything you need to start training a model. ## Train a local model Train a simple logistic regression model using scikit-learn locally. **Training locally can take a minute or two** depending on your computer configuration. ``` %%time from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X_train, y_train) ``` Next, make predictions using the test set and calculate the accuracy. ``` y_hat = clf.predict(X_test) print(np.average(y_hat == y_test)) ``` With just a few lines of code, you have a 92% accuracy. ## Train on a remote cluster Now you can expand on this simple model by building a model with a different regularization rate. This time you'll train the model on a remote resource. For this task, submit the job to the remote training cluster you set up earlier. To submit a job you: * Create a directory * Create a training script * Create an estimator object * Submit the job ### Create a directory Create a directory to deliver the necessary code from your computer to the remote resource. ``` script_folder = './sklearn-mnist' os.makedirs(script_folder, exist_ok=True) ``` ### Create a training script To submit the job to the cluster, first create a training script. Run the following code to create the training script called `train.py` in the directory you just created. This training adds a regularization rate to the training algorithm, so produces a slightly different model than the local version. ``` %%writefile $script_folder/train.py import argparse import os import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.externals import joblib from azureml.core import Run from utils import load_data # let user feed in 2 parameters, the location of the data files (from datastore), and the regularization rate of the logistic regression model parser = argparse.ArgumentParser() parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point') parser.add_argument('--regularization', type=float, dest='reg', default=0.01, help='regularization rate') args = parser.parse_args() data_folder = os.path.join(args.data_folder, 'mnist') print('Data folder:', data_folder) # load train and test set into numpy arrays # note we scale the pixel intensity values to 0-1 (by dividing it with 255.0) so the model can converge faster. X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0 X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0 y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1) y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep = '\n') # get hold of the current run run = Run.get_context() print('Train a logistic regression model with regularizaion rate of', args.reg) clf = LogisticRegression(C=1.0/args.reg, random_state=42) clf.fit(X_train, y_train) print('Predict the test set') y_hat = clf.predict(X_test) # calculate accuracy on the prediction acc = np.average(y_hat == y_test) print('Accuracy is', acc) run.log('regularization rate', np.float(args.reg)) run.log('accuracy', np.float(acc)) os.makedirs('outputs', exist_ok=True) # note file saved in the outputs folder is automatically uploaded into experiment record joblib.dump(value=clf, filename='outputs/sklearn_mnist_model.pkl') ``` Notice how the script gets data and saves models: + The training script reads an argument to find the directory containing the data. When you submit the job later, you point to the datastore for this argument: `parser.add_argument('--data-folder', type=str, dest='data_folder', help='data directory mounting point')` + The training script saves your model into a directory named outputs. <br/> `joblib.dump(value=clf, filename='outputs/sklearn_mnist_model.pkl')`<br/> Anything written in this directory is automatically uploaded into your workspace. You'll access your model from this directory later in the tutorial. The file `utils.py` is referenced from the training script to load the dataset correctly. Copy this script into the script folder so that it can be accessed along with the training script on the remote resource. ``` import shutil shutil.copy('utils.py', script_folder) ``` ### Create an estimator An estimator object is used to submit the run. Create your estimator by running the following code to define: * The name of the estimator object, `est` * The directory that contains your scripts. All the files in this directory are uploaded into the cluster nodes for execution. * The compute target. In this case you will use the AmlCompute you created * The training script name, train.py * Parameters required from the training script * Python packages needed for training In this tutorial, this target is AmlCompute. All files in the script folder are uploaded into the cluster nodes for execution. The data_folder is set to use the datastore (`ds.as_mount()`). ``` from azureml.train.estimator import Estimator script_params = { '--data-folder': ds.as_mount(), '--regularization': 0.8 } est = Estimator(source_directory=script_folder, script_params=script_params, compute_target=compute_target, entry_script='train.py', conda_packages=['scikit-learn']) ``` ### Submit the job to the cluster Run the experiment by submitting the estimator object. ``` run = exp.submit(config=est) run ``` Since the call is asynchronous, it returns a **Preparing** or **Running** state as soon as the job is started. ## Monitor a remote run In total, the first run takes **approximately 10 minutes**. But for subsequent runs, as long as the script dependencies don't change, the same image is reused and hence the container start up time is much faster. Here is what's happening while you wait: - **Image creation**: A Docker image is created matching the Python environment specified by the estimator. The image is uploaded to the workspace. Image creation and uploading takes **about 5 minutes**. This stage happens once for each Python environment since the container is cached for subsequent runs. During image creation, logs are streamed to the run history. You can monitor the image creation progress using these logs. - **Scaling**: If the remote cluster requires more nodes to execute the run than currently available, additional nodes are added automatically. Scaling typically takes **about 5 minutes.** - **Running**: In this stage, the necessary scripts and files are sent to the compute target, then data stores are mounted/copied, then the entry_script is run. While the job is running, stdout and the ./logs directory are streamed to the run history. You can monitor the run's progress using these logs. - **Post-Processing**: The ./outputs directory of the run is copied over to the run history in your workspace so you can access these results. You can check the progress of a running job in multiple ways. This tutorial uses a Jupyter widget as well as a `wait_for_completion` method. ### Jupyter widget Watch the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes. ``` from azureml.widgets import RunDetails RunDetails(run).show() ``` If you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). ### Get log results upon completion Model training and monitoring happen in the background. Wait until the model has completed training before running more code. Use `wait_for_completion` to show when the model training is complete. ``` run.wait_for_completion(show_output=False) # specify True for a verbose log ``` ### Display run results You now have a model trained on a remote cluster. Retrieve the accuracy of the model: ``` print(run.get_metrics()) ``` In the next tutorial you will explore this model in more detail. ## Register model The last step in the training script wrote the file `outputs/sklearn_mnist_model.pkl` in a directory named `outputs` in the VM of the cluster where the job is executed. `outputs` is a special directory in that all content in this directory is automatically uploaded to your workspace. This content appears in the run record in the experiment under your workspace. Hence, the model file is now also available in your workspace. You can see files associated with that run. ``` print(run.get_file_names()) ``` Register the model in the workspace so that you (or other collaborators) can later query, examine, and deploy this model. ``` # register model model = run.register_model(model_name='sklearn_mnist', model_path='outputs/sklearn_mnist_model.pkl') print(model.name, model.id, model.version, sep = '\t') ``` ## Next steps In this Azure Machine Learning tutorial, you used Python to: > * Set up your development environment > * Access and examine the data > * Train a simple logistic regression locally using the popular scikit-learn machine learning library > * Train multiple models on a remote cluster > * Review training details and register the best model You are ready to deploy this registered model using the instructions in the next part of the tutorial series: > [Tutorial 2 - Deploy models](img-classification-part2-deploy.ipynb)
github_jupyter
## Text classification using Neural Networks The goal of this notebook is to learn to use Neural Networks for text classification. In this notebook, we will: - Train a shallow model with learning embeddings - Download pre-trained embeddings from Glove - Use these pre-trained embeddings However keep in mind: - Deep Learning can be better on text classification that simpler ML techniques, but only on very large datasets and well designed/tuned models. - We won't be using the most efficient (in terms of computing) techniques, as Keras is good for prototyping but rather inefficient for training small embedding models on text. - The following projects can replicate similar word embedding models much more efficiently: [word2vec](https://github.com/dav/word2vec) and [gensim's word2vec](https://radimrehurek.com/gensim/models/word2vec.html) (self-supervised learning only), [fastText](https://github.com/facebookresearch/fastText) (both supervised and self-supervised learning), [Vowpal Wabbit](https://github.com/JohnLangford/vowpal_wabbit/wiki) (supervised learning). - Plain shallow sparse TF-IDF bigrams features without any embedding and Logistic Regression or Multinomial Naive Bayes is often competitive in small to medium datasets. ``` #load watermark %load_ext watermark %watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,matplotlib,nltk ``` ## The BBC topic classification dataset The BBC provides some benchmark topic classification datasets in English at: http://mlg.ucd.ie/datasets/bbc.html. The raw text (encoded with the latin-1 character encoding) of the news can be downloaded as a ZIP archive: ``` import os import os.path as op import zipfile try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve BBC_DATASET_URL = "http://mlg.ucd.ie/files/datasets/bbc-fulltext.zip" zip_filename = BBC_DATASET_URL.rsplit('/', 1)[1] BBC_DATASET_FOLDER = 'bbc' if not op.exists(zip_filename): print("Downloading %s to %s..." % (BBC_DATASET_URL, zip_filename)) urlretrieve(BBC_DATASET_URL, zip_filename) if not op.exists(BBC_DATASET_FOLDER): with zipfile.ZipFile(zip_filename, 'r') as f: print("Extracting contents of %s..." % zip_filename) f.extractall('.') ``` Each of the five folders contains text files from one of the five topics: ``` target_names = sorted(folder for folder in os.listdir(BBC_DATASET_FOLDER) if op.isdir(op.join(BBC_DATASET_FOLDER, folder))) target_names ``` Let's randomly partition the text files in a training and test set while recording the target category of each file as an integer: ``` import numpy as np from sklearn.model_selection import train_test_split target = [] filenames = [] for target_id, target_name in enumerate(target_names): class_path = op.join(BBC_DATASET_FOLDER, target_name) for filename in os.listdir(class_path): filenames.append(op.join(class_path, filename)) target.append(target_id) target = np.asarray(target, dtype=np.int32) target_train, target_test, filenames_train, filenames_test = train_test_split( target, filenames, test_size=200, random_state=0) len(target_train), len(filenames_train) len(target_test), len(filenames_test) ``` Let's check that text of some document have been loaded correctly: ``` idx = 0 with open(filenames_train[idx], 'rb') as f: print("class:", target_names[target_train[idx]]) print() print(f.read().decode('latin-1')[:500] + '...') size_in_bytes = sum([len(open(fn, 'rb').read()) for fn in filenames_train]) print("Training set size: %0.3f MB" % (size_in_bytes / 1e6)) ``` This dataset is small so we can preload it all in memory once and for all to simplify the notebook. ``` texts_train = [open(fn, 'rb').read().decode('latin-1') for fn in filenames_train] texts_test = [open(fn, 'rb').read().decode('latin-1') for fn in filenames_test] ``` ## A first baseline model For simple topic classification problems, one should always try a simple method first. In this case a good baseline is extracting TF-IDF normalized bag of bi-grams features and then use a simple linear classifier such as logistic regression. It's a very efficient method and should give us a strong baseline to compare our deep learning method against. ``` from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline text_classifier = make_pipeline( TfidfVectorizer(min_df=3, max_df=0.8, ngram_range=(1, 2)), LogisticRegression(), ) %time _ = text_classifier.fit(texts_train, target_train) text_classifier.score(texts_test, target_test) ``` 4 classification errors on 200 test documents for a model fit in less than 10s. It's quite unlikely that we can significantly beat that baseline with a more complex deep learning based model. However let's try to reach a comparable level of accuracy with Embeddings-based models just for teaching purpose. ### Preprocessing text for the (supervised) CBOW model We will implement a simple classification model in Keras. Raw text requires (sometimes a lot of) preprocessing. The following cells uses Keras to preprocess text: - using a tokenizer. You may use different tokenizers (from scikit-learn, NLTK, custom Python function etc.). This converts the texts into sequences of indices representing the `20000` most frequent words - sequences have different lengths, so we pad them (add 0s at the end until the sequence is of length `1000`) - we convert the output classes as 1-hot encodings ``` from tensorflow.contrib import keras from keras.preprocessing.text import Tokenizer MAX_NB_WORDS = 20000 # vectorize the text samples into a 2D integer tensor tokenizer = Tokenizer(num_words=MAX_NB_WORDS, char_level=False) tokenizer.fit_on_texts(texts_train) sequences = tokenizer.texts_to_sequences(texts_train) sequences_test = tokenizer.texts_to_sequences(texts_test) word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index)) ``` Tokenized sequences are converted to list of token ids (with an integer code): ``` sequences[0][:10] ``` The tokenizer object stores a mapping (vocabulary) from word strings to token ids that can be inverted to reconstruct the original message (without formatting): ``` type(tokenizer.word_index), len(tokenizer.word_index) index_to_word = dict((i, w) for w, i in tokenizer.word_index.items()) " ".join([index_to_word[i] for i in sequences[0]]) ``` Let's have a closer look at the tokenized sequences: ``` seq_lens = [len(s) for s in sequences] print("average length: %0.1f" % np.mean(seq_lens)) print("max length: %d" % max(seq_lens)) %matplotlib inline import matplotlib.pyplot as plt plt.hist(seq_lens, bins=50); ``` Let's zoom on the distribution of regular sized posts. The vast majority of the posts have less than 1000 symbols: ``` plt.hist([l for l in seq_lens if l < 3000], bins=50); ``` Let's truncate and pad all the sequences to 1000 symbols to build the training set: ``` from keras.preprocessing.sequence import pad_sequences MAX_SEQUENCE_LENGTH = 1000 # pad sequences with 0s x_train = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) x_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of data tensor:', x_train.shape) print('Shape of data test tensor:', x_test.shape) from keras.utils.np_utils import to_categorical y_train = to_categorical(target_train) print('Shape of label tensor:', y_train.shape) ``` ### A simple supervised CBOW model in Keras The following computes a very simple model, as described in [fastText](https://github.com/facebookresearch/fastText): <img src="images/fasttext.svg" style="width: 600px;" /> - Build an embedding layer mapping each word to a vector representation - Compute the vector representation of all words in each sequence and average them - Add a dense layer to output 20 classes (+ softmax) ``` from keras.layers import Dense, Input, Flatten from keras.layers import GlobalAveragePooling1D, Embedding from keras.models import Model from keras import optimizers EMBEDDING_DIM = 50 N_CLASSES = len(target_names) # input: a sequence of MAX_SEQUENCE_LENGTH integers sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedding_layer = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=True) embedded_sequences = embedding_layer(sequence_input) average = GlobalAveragePooling1D()(embedded_sequences) predictions = Dense(N_CLASSES, activation='softmax')(average) model = Model(sequence_input, predictions) model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.01), metrics=['acc']) model.fit(x_train, y_train, validation_split=0.1, epochs=10, batch_size=32) ``` **Exercices** - Compute model accuracy on test set ``` # %load solutions/accuracy.py output_test = model.predict(x_test) test_casses = np.argmax(output_test, axis=-1) print("Test accuracy:", np.mean(test_casses == target_test)) ``` ### Building more complex models **Exercise** - From the previous template, build more complex models using: - **1d convolution and 1d maxpooling**. Note that you will still need a GloabalAveragePooling or Flatten after the convolutions as the final `Dense` layer expects a fixed size input; - **Recurrent neural networks through LSTM** (you will need to **reduce sequence length before using the LSTM layer**). <img src="images/unrolled_rnn_one_output_2.svg" style="width: 600px;" /> **Bonus** - You may try different architectures with: - more intermediate layers, combination of dense, conv, recurrent - different recurrent (GRU, RNN) - bidirectional LSTMs **Note**: The goal is to build working models rather than getting better test accuracy as this task is already very well solved by the simple model. Build your model, and verify that they converge to OK results. ``` from keras.layers import Embedding, Dense, Input, Flatten from keras.layers import Conv1D, LSTM, GRU from keras.layers import MaxPooling1D, GlobalAveragePooling1D from keras.models import Model EMBEDDING_DIM = 50 N_CLASSES = len(target_names) # input: a sequence of MAX_SEQUENCE_LENGTH integers sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedding_layer = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=True) embedded_sequences = embedding_layer(sequence_input) # TODO model = Model(sequence_input, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) # %load solutions/conv1d.py # %load solutions/lstm.py model.fit(x_train, y_train, validation_split=0.1, epochs=5, batch_size=32) output_test = model.predict(x_test) test_casses = np.argmax(output_test, axis=-1) print("Test accuracy:", np.mean(test_casses == target_test)) ``` ### Loading pre-trained embeddings The file `glove100K.100d.txt` is an extract of [Glove](http://nlp.stanford.edu/projects/glove/) Vectors, that were trained on english Wikipedia 2014 + Gigaword 5 (6B tokens). We extracted the `100 000` most frequent words. They have a dimension of `100` ``` embeddings_index = {} embeddings_vectors = [] with open('glove100K.100d.txt', 'rb') as f: word_idx = 0 for line in f: values = line.decode('utf-8').split() word = values[0] vector = np.asarray(values[1:], dtype='float32') embeddings_index[word] = word_idx embeddings_vectors.append(vector) word_idx = word_idx + 1 inv_index = {v: k for k, v in embeddings_index.items()} print("found %d different words in the file" % word_idx) # Stack all embeddings in a large numpy array glove_embeddings = np.vstack(embeddings_vectors) glove_norms = np.linalg.norm(glove_embeddings, axis=-1, keepdims=True) glove_embeddings_normed = glove_embeddings / glove_norms print(glove_embeddings.shape) def get_emb(word): idx = embeddings_index.get(word) if idx is None: return None else: return glove_embeddings[idx] def get_normed_emb(word): idx = embeddings_index.get(word) if idx is None: return None else: return glove_embeddings_normed[idx] get_emb("computer") ``` ### Finding most similar words **Exercice** Build a function to find most similar words, given a word as query: - lookup the vector for the query word in the Glove index; - compute the cosine similarity between a word embedding and all other words; - display the top 10 most similar words. **Bonus** Change your function so that it takes multiple words as input (by averaging them) ``` # %load solutions/most_similar.py most_similar("cpu") most_similar("pitt") most_similar("jolie") ``` Predict the future better than tarot: ``` np.dot(get_normed_emb('aniston'), get_normed_emb('pitt')) np.dot(get_normed_emb('jolie'), get_normed_emb('pitt')) most_similar("1") # bonus: yangtze is a chinese river most_similar(["river", "chinese"]) ``` ### Displaying vectors with t-SNE ``` from sklearn.manifold import TSNE word_emb_tsne = TSNE(perplexity=30).fit_transform(glove_embeddings_normed[:1000]) %matplotlib inline import matplotlib.pyplot as plt plt.figure(figsize=(40, 40)) axis = plt.gca() np.set_printoptions(suppress=True) plt.scatter(word_emb_tsne[:, 0], word_emb_tsne[:, 1], marker=".", s=1) for idx in range(1000): plt.annotate(inv_index[idx], xy=(word_emb_tsne[idx, 0], word_emb_tsne[idx, 1]), xytext=(0, 0), textcoords='offset points') plt.savefig("tsne.png") plt.show() ``` ### Using pre-trained embeddings in our model We want to use these pre-trained embeddings for transfer learning. This process is rather similar than transfer learning in image recognition: the features learnt on words might help us bootstrap the learning process, and increase performance if we don't have enough training data. - We initialize embedding matrix from the model with Glove embeddings: - take all words from our 20 Newgroup vocabulary (`MAX_NB_WORDS = 20000`), and look up their Glove embedding - place the Glove embedding at the corresponding index in the matrix - if the word is not in the Glove vocabulary, we only place zeros in the matrix - We may fix these embeddings or fine-tune them ``` EMBEDDING_DIM = 100 # prepare embedding matrix nb_words_in_matrix = 0 nb_words = min(MAX_NB_WORDS, len(word_index)) embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM)) for word, i in word_index.items(): if i >= MAX_NB_WORDS: continue embedding_vector = get_emb(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector nb_words_in_matrix = nb_words_in_matrix + 1 print("added %d words in the embedding matrix" % nb_words_in_matrix) ``` Build a layer with pre-trained embeddings: ``` pretrained_embedding_layer = Embedding( MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, ) ``` ### A model with pre-trained Embeddings Average word embeddings pre-trained with Glove / Word2Vec usually works suprisingly well. However, when averaging more than `10-15` words, the resulting vector becomes too noisy and classification performance is degraded. ``` sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences = pretrained_embedding_layer(sequence_input) average = GlobalAveragePooling1D()(embedded_sequences) predictions = Dense(N_CLASSES, activation='softmax')(average) model = Model(sequence_input, predictions) # We don't want to fine-tune embeddings model.layers[1].trainable = False model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.01), metrics=['acc']) model.fit(x_train, y_train, validation_split=0.1, epochs=15, batch_size=32) ``` **Remarks:** - On this type of task, using pre-trained embedings can degrade results as we train much less parameters and we average a large number pre-trained embeddings. - Pre-trained embeddings followed by global averaging prevents overfitting but can also cause some underfitting. - Using convolutions / LSTM should help counter the underfitting effect. - It is also advisable to treat seperately pre-trained embeddings and words out of vocabulary. Pre-trained embeddings can be very useful when the training set is small and the individual text documents to classify are short: in this case there might be a single very important word in a test document that drives the label. If that word has never been seen in the training set but some synonyms were seen, the semantic similarity captured by the embedding will allow the model to generalized out of the restricted training set vocabulary. We did not observe this effect here because the document are long enough so that guessing the topic can be done redundantly. Shortening the documents to make the task more difficult could possibly highlight this benefit. ### Reality check On small/medium datasets, simpler classification methods usually perform better, and are much more efficient to compute. Here are two resources to go further: - Naive Bayes approach, using scikit-learn http://scikit-learn.org/stable/datasets/twenty_newsgroups.html - Alec Radford (OpenAI) gave a very interesting presentation, showing that you need a VERY large dataset to have real gains from GRU/LSTM in text classification https://www.slideshare.net/odsc/alec-radfordodsc-presentation However, when looking at features, one can see that classification using simple methods isn't very robust, and won't generalize well to slightly different domains (e.g. forum posts => emails) Note: Implementation in Keras for text is very slow due to python overhead and lack of hashing techniques. The fastText implementation https://github.com/facebookresearch/fasttext is much, much faster. ## Going further - Compare pre-trained embeddings vs specifically trained embeddings - Train your own wordvectors in any language using [gensim's word2vec](https://radimrehurek.com/gensim/models/word2vec.html) - Check [Keras Examples](https://github.com/fchollet/keras/tree/master/examples) on `imdb` sentiment analysis - Install fastText (Linux or macOS only, use the Linux VM if under Windows) and give it a try on the classification example in its repository. - Today, the **state-of-the-art text classification** can be achieved by **transfer learning from a language model** instead of using traditional word embeddings. See for instance: FitLaM, Fine-tuned Language Models for Text Classification https://arxiv.org/abs/1801.06146. The second notebook introduces how to train such a language model from unlabeled data.
github_jupyter
# Style Transfer with Deep Neural Networks In this notebook, we’ll *recreate* a style transfer method that is outlined in the paper, [Image Style Transfer Using Convolutional Neural Networks, by Gatys](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Gatys_Image_Style_Transfer_CVPR_2016_paper.pdf) in PyTorch. In this paper, style transfer uses the features found in the 19-layer VGG Network, which is comprised of a series of convolutional and pooling layers, and a few fully-connected layers. In the image below, the convolutional layers are named by stack and their order in the stack. Conv_1_1 is the first convolutional layer that an image is passed through, in the first stack. Conv_2_1 is the first convolutional layer in the *second* stack. The deepest convolutional layer in the network is conv_5_4. <img src='notebook_ims/vgg19_convlayers.png' width=80% /> ### Separating Style and Content Style transfer relies on separating the content and style of an image. Given one content image and one style image, we aim to create a new, _target_ image which should contain our desired content and style components: * objects and their arrangement are similar to that of the **content image** * style, colors, and textures are similar to that of the **style image** An example is shown below, where the content image is of a cat, and the style image is of [Hokusai's Great Wave](https://en.wikipedia.org/wiki/The_Great_Wave_off_Kanagawa). The generated target image still contains the cat but is stylized with the waves, blue and beige colors, and block print textures of the style image! <img src='notebook_ims/style_tx_cat.png' width=80% /> In this notebook, we'll use a pre-trained VGG19 Net to extract content or style features from a passed in image. We'll then formalize the idea of content and style _losses_ and use those to iteratively update our target image until we get a result that we want. You are encouraged to use a style and content image of your own and share your work on Twitter with @udacity; we'd love to see what you come up with! ``` # import resources %matplotlib inline from PIL import Image from io import BytesIO import matplotlib.pyplot as plt import numpy as np import torch import torch.optim as optim import requests from torchvision import transforms, models ``` ## Load in VGG19 (features) VGG19 is split into two portions: * `vgg19.features`, which are all the convolutional and pooling layers * `vgg19.classifier`, which are the three linear, classifier layers at the end We only need the `features` portion, which we're going to load in and "freeze" the weights of, below. ``` # get the "features" portion of VGG19 (we will not need the "classifier" portion) vgg = models.vgg19(pretrained=True).features # freeze all VGG parameters since we're only optimizing the target image for param in vgg.parameters(): param.requires_grad_(False) # move the model to GPU, if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") vgg.to(device) ``` ### Load in Content and Style Images You can load in any images you want! Below, we've provided a helper function for loading in any type and size of image. The `load_image` function also converts images to normalized Tensors. Additionally, it will be easier to have smaller images and to squish the content and style images so that they are of the same size. ``` def load_image(img_path, max_size=400, shape=None): ''' Load in and transform an image, making sure the image is <= 400 pixels in the x-y dims.''' if "http" in img_path: response = requests.get(img_path) image = Image.open(BytesIO(response.content)).convert('RGB') else: image = Image.open(img_path).convert('RGB') # large images will slow down processing if max(image.size) > max_size: size = max_size else: size = max(image.size) if shape is not None: size = shape in_transform = transforms.Compose([ transforms.Resize(size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # discard the transparent, alpha channel (that's the :3) and add the batch dimension image = in_transform(image)[:3,:,:].unsqueeze(0) return image ``` Next, I'm loading in images by file name and forcing the style image to be the same size as the content image. ``` # load in content and style image content = load_image('73291300_2371381546435786_6669656685348388864_o.jpg').to(device) # Resize style to match content, makes code easier style = load_image('45bda587238569.5db1ad02e5ea3.png', shape=content.shape[-2:]).to(device) # helper function for un-normalizing an image # and converting it from a Tensor image to a NumPy image for display def im_convert(tensor): """ Display a tensor as an image. """ image = tensor.to("cpu").clone().detach() image = image.numpy().squeeze() image = image.transpose(1,2,0) image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406)) image = image.clip(0, 1) return image # display the images fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10)) # content and style ims side-by-side ax1.imshow(im_convert(content)) ax2.imshow(im_convert(style)) ``` --- ## VGG19 Layers To get the content and style representations of an image, we have to pass an image forward through the VGG19 network until we get to the desired layer(s) and then get the output from that layer. ``` # print out VGG19 structure so you can see the names of various layers # print(vgg) ``` ## Content and Style Features #### TODO: complete the mapping of layer names to the names found in the paper for the _content representation_ and the _style representation_. The first layer (0) to `conv1_1` has been done for you, below. ``` def get_features(image, model, layers=None): """ Run an image forward through a model and get the features for a set of layers. Default layers are for VGGNet matching Gatys et al (2016) """ ## TODO: Complete mapping layer names of PyTorch's VGGNet to names from the paper ## Need the layers for the content and style representations of an image if layers is None: layers = {'0': 'conv1_1', '5': 'conv2_1', '10': 'conv3_1', '19': 'conv4_1', '21': 'conv4_2', ## content representation '28': 'conv5_1'} features = {} x = image # model._modules is a dictionary holding each module in the model for name, layer in model._modules.items(): x = layer(x) if name in layers: features[layers[name]] = x return features ``` --- ## Gram Matrix The output of every convolutional layer is a Tensor with dimensions associated with the `batch_size`, a depth, `d` and some height and width (`h`, `w`). The Gram matrix of a convolutional layer can be calculated as follows: * Get the depth, height, and width of a tensor using `batch_size, d, h, w = tensor.size()` * Reshape that tensor so that the spatial dimensions are flattened * Calculate the gram matrix by multiplying the reshaped tensor by it's transpose *Note: You can multiply two matrices using `torch.mm(matrix1, matrix2)`.* #### TODO: Complete the `gram_matrix` function. ``` def gram_matrix(tensor): """ Calculate the Gram Matrix of a given tensor Gram Matrix: https://en.wikipedia.org/wiki/Gramian_matrix """ # get the batch_size, depth, height, and width of the Tensor _, d, h, w = tensor.size() # reshape so we're multiplying the features for each channel tensor = tensor.view(d, h * w) # calculate the gram matrix gram = torch.mm(tensor, tensor.t()) return gram ``` ## Putting it all Together Now that we've written functions for extracting features and computing the gram matrix of a given convolutional layer; let's put all these pieces together! We'll extract our features from our images and calculate the gram matrices for each layer in our style representation. ``` # get content and style features only once before forming the target image content_features = get_features(content, vgg) style_features = get_features(style, vgg) # calculate the gram matrices for each layer of our style representation style_grams = {layer: gram_matrix(style_features[layer]) for layer in style_features} # create a third "target" image and prep it for change # it is a good idea to start off with the target as a copy of our *content* image # then iteratively change its style target = content.clone().requires_grad_(True).to(device) ``` --- ## Loss and Weights #### Individual Layer Style Weights Below, you are given the option to weight the style representation at each relevant layer. It's suggested that you use a range between 0-1 to weight these layers. By weighting earlier layers (`conv1_1` and `conv2_1`) more, you can expect to get _larger_ style artifacts in your resulting, target image. Should you choose to weight later layers, you'll get more emphasis on smaller features. This is because each layer is a different size and together they create a multi-scale style representation! #### Content and Style Weight Just like in the paper, we define an alpha (`content_weight`) and a beta (`style_weight`). This ratio will affect how _stylized_ your final image is. It's recommended that you leave the content_weight = 1 and set the style_weight to achieve the ratio you want. ``` # weights for each style layer # weighting earlier layers more will result in *larger* style artifacts # notice we are excluding `conv4_2` our content representation style_weights = {'conv1_1': 1., 'conv2_1': 0.8, 'conv3_1': 0.5, 'conv4_1': 0.3, 'conv5_1': 0.1} # you may choose to leave these as is content_weight = 1 # alpha style_weight = 1e6 # beta ``` ## Updating the Target & Calculating Losses You'll decide on a number of steps for which to update your image, this is similar to the training loop that you've seen before, only we are changing our _target_ image and nothing else about VGG19 or any other image. Therefore, the number of steps is really up to you to set! **I recommend using at least 2000 steps for good results.** But, you may want to start out with fewer steps if you are just testing out different weight values or experimenting with different images. Inside the iteration loop, you'll calculate the content and style losses and update your target image, accordingly. #### Content Loss The content loss will be the mean squared difference between the target and content features at layer `conv4_2`. This can be calculated as follows: ``` content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2) ``` #### Style Loss The style loss is calculated in a similar way, only you have to iterate through a number of layers, specified by name in our dictionary `style_weights`. > You'll calculate the gram matrix for the target image, `target_gram` and style image `style_gram` at each of these layers and compare those gram matrices, calculating the `layer_style_loss`. > Later, you'll see that this value is normalized by the size of the layer. #### Total Loss Finally, you'll create the total loss by adding up the style and content losses and weighting them with your specified alpha and beta! Intermittently, we'll print out this loss; don't be alarmed if the loss is very large. It takes some time for an image's style to change and you should focus on the appearance of your target image rather than any loss value. Still, you should see that this loss decreases over some number of iterations. #### TODO: Define content, style, and total losses. ``` # for displaying the target image, intermittently show_every = 400 # iteration hyperparameters optimizer = optim.Adam([target], lr=0.003) steps = 2000 # decide how many iterations to update your image (5000) for ii in range(1, steps+1): # get the features from your target image target_features = get_features(target, vgg) # the content loss content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2) # the style loss # initialize the style loss to 0 style_loss = 0 # then add to it for each layer's gram matrix loss for layer in style_weights: # get the "target" style representation for the layer target_feature = target_features[layer] target_gram = gram_matrix(target_feature) _, d, h, w = target_feature.shape # get the "style" style representation style_gram = style_grams[layer] # the style loss for one layer, weighted appropriately layer_style_loss = style_weights[layer] * torch.mean((target_gram - style_gram)**2) # add to the style loss style_loss += layer_style_loss / (d * h * w) # calculate the *total* loss total_loss = content_weight * content_loss + style_weight * style_loss # update your target image optimizer.zero_grad() total_loss.backward() optimizer.step() # display intermediate images and print the loss if ii % show_every == 0: print('Total loss: ', total_loss.item()) plt.imshow(im_convert(target)) plt.show() ``` ## Display the Target Image ``` # display content and final, target image fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10)) ax1.imshow(im_convert(content)) ax2.imshow(im_convert(target)) ```
github_jupyter
# United States - Crime Rates - 1960 - 2014 ### Introduction: This time you will create a data Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. ### Step 1. Import the necessary libraries ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mpd ``` ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv). ### Step 3. Assign it to a variable called crime. ``` crime = pd.read_csv('US_Crime_Rates_1960_2014.csv') crime.head() ``` ### Step 4. What is the type of the columns? ``` crime.dtypes ``` ##### Have you noticed that the type of Year is int64. But pandas has a different type to work with Time Series. Let's see it now. ### Step 5. Convert the type of the column Year to datetime64 ``` crime['Year'] = pd.to_datetime(crime['Year'], format='%Y') ``` ### Step 6. Set the Year column as the index of the dataframe ``` crime.set_index('Year', inplace=True) crime.head(2) ``` ### Step 7. Delete the Total column ``` # Solution 1 crime.drop(columns='Total', inplace=True) # Solution 2 #del crime['Total'] crime.head() ``` ### Step 8. Group the year by decades and sum the values #### Pay attention to the Population column number, summing this column is a mistake ``` _={} for col in crime.columns : if col == 'Population': _[col]=['mean', 'max'] else: _[col]='sum' _ # Solution 1 crime_dec = crime.reset_index().groupby(pd.Grouper(key='Year', freq='10Y')).agg(_) crime_dec.columns = ['__'.join(c).strip() for c in crime_dec.columns.values] crime_dec # Solution 2 crime_dec = crime.resample('10AS').agg(_) crime_dec.columns = ['__'.join(c).strip() for c in crime_dec.columns.values] crime_dec import matplotlib.ticker as mtick fig, axs = plt.subplots(1,2) fig.patch.set_facecolor('white') fig.set_size_inches(12,3) fig.subplots_adjust(wspace=0.4) for i in crime_dec.columns[2:]: axs[0].plot(crime_dec.index, crime_dec.loc[:,i]/1E6,\ 'o-', label=i.strip('__sum').replace('_', ' ')) ax0bis = axs[0].twinx() ax0bis.plot(crime_dec.index, crime_dec.iloc[:,2:].agg(sum, axis=1)/1E9, \ '-.',color='k',lw=4, label='total crimes') axs[0].set_xlabel("Decade"), axs[0].set_ylabel("Number (millions)") ax0bis.set_ylabel("Total number (billions)") axs[0].legend(ncol=3, bbox_to_anchor=(1.15,1.35)).get_frame().set_alpha(0.3) ax0bis.legend(ncol=1, bbox_to_anchor=(0.38,1)).get_frame().set_alpha(0.3) ax0bis.set(ylim=(0,0.3)) for i in crime_dec.columns[2:]: axs[1].plot(crime_dec.index, crime_dec.loc[:,i].div(crime_dec.iloc[:,0])*100,\ 'o-', label=i.strip('__sum').replace('_', ' ')) axs[1].plot(crime_dec.index, crime_dec.iloc[:,2:].apply(lambda x: x*100/crime_dec.iloc[:,0]).agg(sum,axis=1), \ '-.',color='k',lw=4, label='total crimes') ax1bis = axs[1].twinx() ax1bis.plot(crime_dec.index, crime_dec.iloc[:,1]/1E6, \ '--',color='grey',lw=3, label='mean population') ax1bis.legend(ncol=1, bbox_to_anchor=(0.5,1.15)).get_frame().set_alpha(0.3) ax1bis.set_ylabel('Mean Population (Millions)') ax1bis.set(ylim=(0,350)) axs[1].set_xlabel("Decade"), axs[1].set_ylabel("Percent of population") axs[1].yaxis.set_major_formatter(mtick.PercentFormatter()) ``` ### Step 9. What is the most dangerous decade to live in the US? ``` sum_dec = crime_dec.iloc[:,2:].agg(sum, axis=1) print(sum_dec.idxmax(),sum_dec.max()) sum_dec xlab = [d.year for d in sum_dec.index.to_pydatetime()] x = np.arange(len(xlab)) y = sum_dec.values/1E6 ypct = crime_dec.iloc[:,2:].apply(lambda x: x*100/crime_dec.iloc[:,0]).agg(sum,axis=1) fig, ax1 = plt.subplots(1) ax1.bar(x-0.15, y, width=0.3, color='b') ax2 = ax1.twinx() ax2.bar(x+0.15, ypct, width=0.3, color='r') plt.gcf().patch.set_facecolor('white') ax1.set_xlabel('decade') ax1.set_ylabel('sum (millions)') ax2.set_ylabel('sum pct') # ax.set_xticklabels([x.year for x in sum_dec.index.to_pydatetime()]) ax1.xaxis.set_tick_params(rotation=0) for t in ax1.yaxis.get_ticklabels(): t.set_color('b'), t.set_fontweight('bold') for t in ax2.yaxis.get_ticklabels(): t.set_color('r'), t.set_fontweight('bold') ax2.set_xticklabels(['phantom']+xlab) plt.show() ```
github_jupyter
##### Copyright 2020 The TensorFlow IO Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 音频数据准备和增强 <table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://tensorflow.google.cn/io/tutorials/audio"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 TensorFlow.org 上查看</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/io/tutorials/audio.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行</a></td> <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/io/tutorials/audio.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 GitHub 上查看源代码</a></td> <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/io/tutorials/audio.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a></td> </table> ## 概述 自动语音识别面临的最大挑战之一是音频数据的准备和增强。音频数据分析可能涉及时域或频域,与图像等其他数据源相比,这提高了复杂性。 作为 TensorFlow 生态系统的一部分,`tensorflow-io` 软件包提供了不少与音频相关的 API。这些 API 非常有用,可简化音频数据的准备和增强。 ## 设置 ### 安装要求的软件包,然后重新启动运行时 ``` !pip install tensorflow-io ``` ## 使用方法 ### 读取音频文件 在 TensorFlow IO 中,利用类 `tfio.audio.AudioIOTensor` 可以将音频文件读取到延迟加载的 `IOTensor` 中: ``` import tensorflow as tf import tensorflow_io as tfio audio = tfio.audio.AudioIOTensor('gs://cloud-samples-tests/speech/brooklyn.flac') print(audio) ``` 在上面的示例中,Flac 文件 `brooklyn.flac` 来自 [Google Cloud](https://cloud.google.com/speech-to-text/docs/quickstart-gcloud) 中可公开访问的音频片段。 示例中直接使用 GCS 地址 `gs://cloud-samples-tests/speech/brooklyn.flac`,因为 TensorFlow 支持 GCS 文件系统。除了 `Flac` 格式,凭借自动文件格式检测,`AudioIOTensor` 还支持 `WAV`、`Ogg`、`MP3` 和 `MP4A` 格式。 `AudioIOTensor` 是一个延迟加载张量,因此,刚开始只显示形状、dtype 和采样率。`AudioIOTensor` 的形状用 `[samples, channels]` 表示,这表示您加载的音频片段是单声道音频(`int16` 类型的 `28979` 个样本)。 仅需要时才会读取该音频片段的内容。要读取音频片段的内容,可通过 `to_tensor()` 将 `AudioIOTensor` 转换为 `Tensor`,也可以通过切片读取。如果只需要一个大音频片段的一小部分,切片尤其实用: ``` audio_slice = audio[100:] # remove last dimension audio_tensor = tf.squeeze(audio_slice, axis=[-1]) print(audio_tensor) ``` 音频可通过以下方式播放: ``` from IPython.display import Audio Audio(audio_tensor.numpy(), rate=audio.rate.numpy()) ``` 更方便的方式是,将张量转换为浮点数并在计算图中显示音频片段: ``` import matplotlib.pyplot as plt tensor = tf.cast(audio_tensor, tf.float32) / 32768.0 plt.figure() plt.plot(tensor.numpy()) ``` ### 降噪 为音频降噪有时很有意义,这可以通过 API `tfio.audio.trim` 实现。从该 API 返回的是片段的一对 `[start, stop]` 位置: ``` position = tfio.audio.trim(tensor, axis=0, epsilon=0.1) print(position) start = position[0] stop = position[1] print(start, stop) processed = tensor[start:stop] plt.figure() plt.plot(processed.numpy()) ``` ### 淡入和淡出 一种有用的音频工程技术是淡入淡出,也就是逐渐增强或减弱音频信号。这可以通过 `tfio.audio.fade` 实现。`tfio.audio.fade` 支持不同的淡入淡出形状,如 `linear`、`logarithmic` 或 `exponential`: ``` fade = tfio.audio.fade( processed, fade_in=1000, fade_out=2000, mode="logarithmic") plt.figure() plt.plot(fade.numpy()) ``` ### 声谱图 高级音频处理通常需要根据时间调整音频频率。在 `tensorflow-io` 中,可通过 `tfio.audio.spectrogram` 将波形图转换为声谱图。 ``` # Convert to spectrogram spectrogram = tfio.audio.spectrogram( fade, nfft=512, window=512, stride=256) plt.figure() plt.imshow(tf.math.log(spectrogram).numpy()) ``` 也可以转换为其他不同的比例: ``` # Convert to mel-spectrogram mel_spectrogram = tfio.audio.melscale( spectrogram, rate=16000, mels=128, fmin=0, fmax=8000) plt.figure() plt.imshow(tf.math.log(mel_spectrogram).numpy()) # Convert to db scale mel-spectrogram dbscale_mel_spectrogram = tfio.audio.dbscale( mel_spectrogram, top_db=80) plt.figure() plt.imshow(dbscale_mel_spectrogram.numpy()) ``` ### SpecAugment 除上述数据准备和增强 API 外,`tensorflow-io` 软件包还提供了高级声谱图增强,最主要的是在 [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition (Park et al., 2019)](https://arxiv.org/pdf/1904.08779.pdf) 中讨论的频率掩蔽和时间掩蔽。 #### 频率掩蔽 在频率掩蔽中,对频率通道 `[f0, f0 + f)` 进行掩蔽,其中 `f` 选自从 `0` 到频率掩蔽参数 `F` 的均匀分布,而 `f0` 则选自 `(0, ν − f)`,其中 `ν` 是频率通道的数量。 ``` # Freq masking freq_mask = tfio.audio.freq_mask(dbscale_mel_spectrogram, param=10) plt.figure() plt.imshow(freq_mask.numpy()) ``` #### 时间掩蔽 在时间掩蔽中,对 `t` 个连续时间步骤 `[t0, t0 + t)` 进行掩蔽,其中 `t` 选自从 `0` 到时间掩蔽参数 `T` 的均匀分布,而 `t0` 则选自 `[0, τ − t)`,其中 `τ` 是时间步数。 ``` # Time masking time_mask = tfio.audio.time_mask(dbscale_mel_spectrogram, param=10) plt.figure() plt.imshow(time_mask.numpy()) ```
github_jupyter
# Hyperparameters and Model Validation In the previous section, we saw the basic recipe for applying a supervised machine learning model: 1. Choose a class of model 2. Choose model hyperparameters 3. Fit the model to the training data 4. Use the model to predict labels for new data The first two pieces of this—the choice of model and choice of hyperparameters—are perhaps the most important part of using these tools and techniques effectively. In order to make an informed choice, we need a way to *validate* that our model and our hyperparameters are a good fit to the data. While this may sound simple, there are some pitfalls that you must avoid to do this effectively. ## Thinking about Model Validation In principle, model validation is very simple: after choosing a model and its hyperparameters, we can estimate how effective it is by applying it to some of the training data and comparing the prediction to the known value. The following sections first show a naive approach to model validation and why it fails, before exploring the use of holdout sets and cross-validation for more robust model evaluation. ### Model validation the wrong way Let's demonstrate the naive approach to validation using the Iris data, which we saw in the previous section. We will start by loading the data: ``` from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target ``` Next we choose a model and hyperparameters. Here we'll use a *k*-neighbors classifier with ``n_neighbors=1``. This is a very simple and intuitive model that says "the label of an unknown point is the same as the label of its closest training point:" ``` from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=1) ``` Then we train the model, and use it to predict labels for data we already know: ``` model.fit(X, y) y_model = model.predict(X) ``` Finally, we compute the fraction of correctly labeled points: ``` from sklearn.metrics import accuracy_score accuracy_score(y, y_model) ``` We see an accuracy score of 1.0, which indicates that 100% of points were correctly labeled by our model! But is this truly measuring the expected accuracy? Have we really come upon a model that we expect to be correct 100% of the time? As you may have gathered, the answer is no. In fact, this approach contains a fundamental flaw: *it trains and evaluates the model on the same data*. Furthermore, the nearest neighbor model is an *instance-based* estimator that simply stores the training data, and predicts labels by comparing new data to these stored points: except in contrived cases, it will get 100% accuracy *every time!* ### Model validation the right way: Holdout sets So what can be done? A better sense of a model's performance can be found using what's known as a *holdout set*: that is, we hold back some subset of the data from the training of the model, and then use this holdout set to check the model performance. This splitting can be done using the ``train_test_split`` utility in Scikit-Learn: ``` from sklearn.cross_validation import train_test_split # split the data with 50% in each set X1, X2, y1, y2 = train_test_split(X, y, random_state=0, train_size=0.5) # fit the model on one set of data model.fit(X1, y1) # evaluate the model on the second set of data y2_model = model.predict(X2) accuracy_score(y2, y2_model) ``` We see here a more reasonable result: the nearest-neighbor classifier is about 90% accurate on this hold-out set. The hold-out set is similar to unknown data, because the model has not "seen" it before. ### Model validation via cross-validation One disadvantage of using a holdout set for model validation is that we have lost a portion of our data to the model training. In the above case, half the dataset does not contribute to the training of the model! This is not optimal, and can cause problems – especially if the initial set of training data is small. One way to address this is to use *cross-validation*; that is, to do a sequence of fits where each subset of the data is used both as a training set and as a validation set. Visually, it might look something like this: ![](figures/05.03-2-fold-CV.png) [figure source in Appendix](06.00-Figure-Code.ipynb#2-Fold-Cross-Validation) Here we do two validation trials, alternately using each half of the data as a holdout set. Using the split data from before, we could implement it like this: ``` y2_model = model.fit(X1, y1).predict(X2) y1_model = model.fit(X2, y2).predict(X1) accuracy_score(y1, y1_model), accuracy_score(y2, y2_model) ``` What comes out are two accuracy scores, which we could combine (by, say, taking the mean) to get a better measure of the global model performance. This particular form of cross-validation is a *two-fold cross-validation*—that is, one in which we have split the data into two sets and used each in turn as a validation set. We could expand on this idea to use even more trials, and more folds in the data—for example, here is a visual depiction of five-fold cross-validation: ![](figures/05.03-5-fold-CV.png) [figure source in Appendix](06.00-Figure-Code.ipynb#5-Fold-Cross-Validation) Here we split the data into five groups, and use each of them in turn to evaluate the model fit on the other 4/5 of the data. This would be rather tedious to do by hand, and so we can use Scikit-Learn's ``cross_val_score`` convenience routine to do it succinctly: ``` from sklearn.cross_validation import cross_val_score cross_val_score(model, X, y, cv=5) ``` Repeating the validation across different subsets of the data gives us an even better idea of the performance of the algorithm. Scikit-Learn implements a number of useful cross-validation schemes that are useful in particular situations; these are implemented via iterators in the ``cross_validation`` module. For example, we might wish to go to the extreme case in which our number of folds is equal to the number of data points: that is, we train on all points but one in each trial. This type of cross-validation is known as *leave-one-out* cross validation, and can be used as follows: ``` from sklearn.cross_validation import LeaveOneOut scores = cross_val_score(model, X, y, cv=LeaveOneOut(len(X))) scores ``` Because we have 150 samples, the leave one out cross-validation yields scores for 150 trials, and the score indicates either successful (1.0) or unsuccessful (0.0) prediction. Taking the mean of these gives an estimate of the error rate: ``` scores.mean() ``` Other cross-validation schemes can be used similarly. For a description of what is available in Scikit-Learn, use IPython to explore the ``sklearn.cross_validation`` submodule, or take a look at Scikit-Learn's online [cross-validation documentation](http://scikit-learn.org/stable/modules/cross_validation.html). ## Selecting the Best Model Now that we've seen the basics of validation and cross-validation, we will go into a litte more depth regarding model selection and selection of hyperparameters. These issues are some of the most important aspects of the practice of machine learning, and I find that this information is often glossed over in introductory machine learning tutorials. Of core importance is the following question: *if our estimator is underperforming, how should we move forward?* There are several possible answers: - Use a more complicated/more flexible model - Use a less complicated/less flexible model - Gather more training samples - Gather more data to add features to each sample The answer to this question is often counter-intuitive. In particular, sometimes using a more complicated model will give worse results, and adding more training samples may not improve your results! The ability to determine what steps will improve your model is what separates the successful machine learning practitioners from the unsuccessful. ### The Bias-variance trade-off Fundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between *bias* and *variance*. Consider the following figure, which presents two regression fits to the same dataset: ![](figures/05.03-bias-variance.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Bias-Variance-Tradeoff) It is clear that neither of these models is a particularly good fit to the data, but they fail in different ways. The model on the left attempts to find a straight-line fit through the data. Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well. Such a model is said to *underfit* the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high *bias*. The model on the right attempts to fit a high-order polynomial through the data. Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data. Such a model is said to *overfit* the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high *variance*. To look at this in another light, consider what happens if we use these two models to predict the y-value for some new data. In the following diagrams, the red/lighter points indicate data that is omitted from the training set: ![](figures/05.03-bias-variance-2.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Bias-Variance-Tradeoff-Metrics) The score here is the $R^2$ score, or [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination), which measures how well a model performs relative to a simple mean of the target values. $R^2=1$ indicates a perfect match, $R^2=0$ indicates the model does no better than simply taking the mean of the data, and negative values mean even worse models. From the scores associated with these two models, we can make an observation that holds more generally: - For high-bias models, the performance of the model on the validation set is similar to the performance on the training set. - For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set. If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure: ![](figures/05.03-validation-curve.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Validation-Curve) The diagram shown here is often called a *validation curve*, and we see the following essential features: - The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen. - For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data. - For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data. - For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance. The means of tuning the model complexity varies from model to model; when we discuss individual models in depth in later sections, we will see how each model allows for such tuning. ### Validation curves in Scikit-Learn Let's look at an example of using cross-validation to compute the validation curve for a class of models. Here we will use a *polynomial regression* model: this is a generalized linear model in which the degree of the polynomial is a tunable parameter. For example, a degree-1 polynomial fits a straight line to the data; for model parameters $a$ and $b$: $$ y = ax + b $$ A degree-3 polynomial fits a cubic curve to the data; for model parameters $a, b, c, d$: $$ y = ax^3 + bx^2 + cx + d $$ We can generalize this to any number of polynomial features. In Scikit-Learn, we can implement this with a simple linear regression combined with the polynomial preprocessor. We will use a *pipeline* to string these operations together (we will discuss polynomial features and pipelines more fully in [Feature Engineering](05.04-Feature-Engineering.ipynb)): ``` from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.pipeline import make_pipeline def PolynomialRegression(degree=2, **kwargs): return make_pipeline(PolynomialFeatures(degree), LinearRegression(**kwargs)) ``` Now let's create some data to which we will fit our model: ``` import numpy as np def make_data(N, err=1.0, rseed=1): # randomly sample the data rng = np.random.RandomState(rseed) X = rng.rand(N, 1) ** 2 y = 10 - 1. / (X.ravel() + 0.1) if err > 0: y += err * rng.randn(N) return X, y X, y = make_data(40) ``` We can now visualize our data, along with polynomial fits of several degrees: ``` %matplotlib inline import matplotlib.pyplot as plt import seaborn; seaborn.set() # plot formatting X_test = np.linspace(-0.1, 1.1, 500)[:, None] plt.scatter(X.ravel(), y, color='black') axis = plt.axis() for degree in [1, 3, 5]: y_test = PolynomialRegression(degree).fit(X, y).predict(X_test) plt.plot(X_test.ravel(), y_test, label='degree={0}'.format(degree)) plt.xlim(-0.1, 1.0) plt.ylim(-2, 12) plt.legend(loc='best'); ``` The knob controlling model complexity in this case is the degree of the polynomial, which can be any non-negative integer. A useful question to answer is this: what degree of polynomial provides a suitable trade-off between bias (under-fitting) and variance (over-fitting)? We can make progress in this by visualizing the validation curve for this particular data and model; this can be done straightforwardly using the ``validation_curve`` convenience routine provided by Scikit-Learn. Given a model, data, parameter name, and a range to explore, this function will automatically compute both the training score and validation score across the range: ``` from sklearn.learning_curve import validation_curve degree = np.arange(0, 21) train_score, val_score = validation_curve(PolynomialRegression(), X, y, 'polynomialfeatures__degree', degree, cv=7) plt.plot(degree, np.median(train_score, 1), color='blue', label='training score') plt.plot(degree, np.median(val_score, 1), color='red', label='validation score') plt.legend(loc='best') plt.ylim(0, 1) plt.xlabel('degree') plt.ylabel('score'); ``` This shows precisely the qualitative behavior we expect: the training score is everywhere higher than the validation score; the training score is monotonically improving with increased model complexity; and the validation score reaches a maximum before dropping off as the model becomes over-fit. From the validation curve, we can read-off that the optimal trade-off between bias and variance is found for a third-order polynomial; we can compute and display this fit over the original data as follows: ``` plt.scatter(X.ravel(), y) lim = plt.axis() y_test = PolynomialRegression(3).fit(X, y).predict(X_test) plt.plot(X_test.ravel(), y_test); plt.axis(lim); ``` Notice that finding this optimal model did not actually require us to compute the training score, but examining the relationship between the training score and validation score can give us useful insight into the performance of the model. ## Learning Curves One important aspect of model complexity is that the optimal model will generally depend on the size of your training data. For example, let's generate a new dataset with a factor of five more points: ``` X2, y2 = make_data(200) plt.scatter(X2.ravel(), y2); ``` We will duplicate the preceding code to plot the validation curve for this larger dataset; for reference let's over-plot the previous results as well: ``` degree = np.arange(21) train_score2, val_score2 = validation_curve(PolynomialRegression(), X2, y2, 'polynomialfeatures__degree', degree, cv=7) plt.plot(degree, np.median(train_score2, 1), color='blue', label='training score') plt.plot(degree, np.median(val_score2, 1), color='red', label='validation score') plt.plot(degree, np.median(train_score, 1), color='blue', alpha=0.3, linestyle='dashed') plt.plot(degree, np.median(val_score, 1), color='red', alpha=0.3, linestyle='dashed') plt.legend(loc='lower center') plt.ylim(0, 1) plt.xlabel('degree') plt.ylabel('score'); ``` The solid lines show the new results, while the fainter dashed lines show the results of the previous smaller dataset. It is clear from the validation curve that the larger dataset can support a much more complicated model: the peak here is probably around a degree of 6, but even a degree-20 model is not seriously over-fitting the data—the validation and training scores remain very close. Thus we see that the behavior of the validation curve has not one but two important inputs: the model complexity and the number of training points. It is often useful to to explore the behavior of the model as a function of the number of training points, which we can do by using increasingly larger subsets of the data to fit our model. A plot of the training/validation score with respect to the size of the training set is known as a *learning curve.* The general behavior we would expect from a learning curve is this: - A model of a given complexity will *overfit* a small dataset: this means the training score will be relatively high, while the validation score will be relatively low. - A model of a given complexity will *underfit* a large dataset: this means that the training score will decrease, but the validation score will increase. - A model will never, except by chance, give a better score to the validation set than the training set: this means the curves should keep getting closer together but never cross. With these features in mind, we would expect a learning curve to look qualitatively like that shown in the following figure: ![](figures/05.03-learning-curve.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Learning-Curve) The notable feature of the learning curve is the convergence to a particular score as the number of training samples grows. In particular, once you have enough points that a particular model has converged, *adding more training data will not help you!* The only way to increase model performance in this case is to use another (often more complex) model. ### Learning curves in Scikit-Learn Scikit-Learn offers a convenient utility for computing such learning curves from your models; here we will compute a learning curve for our original dataset with a second-order polynomial model and a ninth-order polynomial: ``` from sklearn.learning_curve import learning_curve fig, ax = plt.subplots(1, 2, figsize=(16, 6)) fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1) for i, degree in enumerate([2, 9]): N, train_lc, val_lc = learning_curve(PolynomialRegression(degree), X, y, cv=7, train_sizes=np.linspace(0.3, 1, 25)) ax[i].plot(N, np.mean(train_lc, 1), color='blue', label='training score') ax[i].plot(N, np.mean(val_lc, 1), color='red', label='validation score') ax[i].hlines(np.mean([train_lc[-1], val_lc[-1]]), N[0], N[-1], color='gray', linestyle='dashed') ax[i].set_ylim(0, 1) ax[i].set_xlim(N[0], N[-1]) ax[i].set_xlabel('training size') ax[i].set_ylabel('score') ax[i].set_title('degree = {0}'.format(degree), size=14) ax[i].legend(loc='best') ``` This is a valuable diagnostic, because it gives us a visual depiction of how our model responds to increasing training data. In particular, when your learning curve has already converged (i.e., when the training and validation curves are already close to each other) *adding more training data will not significantly improve the fit!* This situation is seen in the left panel, with the learning curve for the degree-2 model. The only way to increase the converged score is to use a different (usually more complicated) model. We see this in the right panel: by moving to a much more complicated model, we increase the score of convergence (indicated by the dashed line), but at the expense of higher model variance (indicated by the difference between the training and validation scores). If we were to add even more data points, the learning curve for the more complicated model would eventually converge. Plotting a learning curve for your particular choice of model and dataset can help you to make this type of decision about how to move forward in improving your analysis. ## Validation in Practice: Grid Search The preceding discussion is meant to give you some intuition into the trade-off between bias and variance, and its dependence on model complexity and training set size. In practice, models generally have more than one knob to turn, and thus plots of validation and learning curves change from lines to multi-dimensional surfaces. In these cases, such visualizations are difficult and we would rather simply find the particular model that maximizes the validation score. Scikit-Learn provides automated tools to do this in the grid search module. Here is an example of using grid search to find the optimal polynomial model. We will explore a three-dimensional grid of model features; namely the polynomial degree, the flag telling us whether to fit the intercept, and the flag telling us whether to normalize the problem. This can be set up using Scikit-Learn's ``GridSearchCV`` meta-estimator: ``` from sklearn.grid_search import GridSearchCV param_grid = {'polynomialfeatures__degree': np.arange(21), 'linearregression__fit_intercept': [True, False], 'linearregression__normalize': [True, False]} grid = GridSearchCV(PolynomialRegression(), param_grid, cv=7) ``` Notice that like a normal estimator, this has not yet been applied to any data. Calling the ``fit()`` method will fit the model at each grid point, keeping track of the scores along the way: ``` grid.fit(X, y); ``` Now that this is fit, we can ask for the best parameters as follows: ``` grid.best_params_ ``` Finally, if we wish, we can use the best model and show the fit to our data using code from before: ``` model = grid.best_estimator_ plt.scatter(X.ravel(), y) lim = plt.axis() y_test = model.fit(X, y).predict(X_test) plt.plot(X_test.ravel(), y_test, hold=True); plt.axis(lim); ``` The grid search provides many more options, including the ability to specify a custom scoring function, to parallelize the computations, to do randomized searches, and more. For information, see the examples in [In-Depth: Kernel Density Estimation](05.13-Kernel-Density-Estimation.ipynb) and [Feature Engineering: Working with Images](05.14-Image-Features.ipynb), or refer to Scikit-Learn's [grid search documentation](http://Scikit-Learn.org/stable/modules/grid_search.html).
github_jupyter
# Black-Litterman allocation The Black-Litterman method is a very powerful way of converting your views on asset returns, along with your uncertainty in these views, into a portfolio. For a description of the theory, please read the [documentation page](https://pyportfolioopt.readthedocs.io/en/latest/BlackLitterman.html) and the links therein. In this recipe, we will cover: - Downloading data for the Black-Litterman method - Constructing the prior return vector based on market equilibrium - Two ways of constructing the uncertainty matrix - Combining Black-Litterman with mean-variance optimisation ## Downloading data In addition to price data, constructing a market prior requires market-caps. ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import yfinance as yf tickers = ["MSFT", "AMZN", "NAT", "BAC", "DPZ", "DIS", "KO", "MCD", "COST", "SBUX"] ohlc = yf.download(tickers, period="max") prices = ohlc["Adj Close"] prices.tail() market_prices = yf.download("SPY", period="max")["Adj Close"] market_prices.head() mcaps = {} for t in tickers: stock = yf.Ticker(t) mcaps[t] = stock.info["marketCap"] mcaps ``` ## Constructing the prior ``` import pypfopt pypfopt.__version__ from pypfopt import black_litterman, risk_models from pypfopt import BlackLittermanModel, Plotting S = risk_models.CovarianceShrinkage(prices).ledoit_wolf() delta = black_litterman.market_implied_risk_aversion(market_prices) delta Plotting.plot_covariance(S); market_prior = black_litterman.market_implied_prior_returns(mcaps, delta, S) market_prior market_prior.plot.barh(figsize=(10,5)); ``` ## Views In the BL method, views are specified via the matrix P (picking matrix) and the vector Q. Q contains the magnitude of each view, while P maps the views to the assets they belong to. If you are providing **absolute views** (i.e a return estimate for each asset), you don't have to worry about P and Q, you can just pass your views as a dictionary. ``` # You don't have to provide views on all the assets viewdict = { "AMZN": 0.10, "BAC": 0.30, "COST": 0.05, "DIS": 0.05, "DPZ": 0.20, "KO": -0.05, # I think Coca-Cola will go down 5% "MCD": 0.15, "MSFT": 0.10, "NAT": 0.50, # but low confidence, which will be reflected later "SBUX": 0.10 } bl = BlackLittermanModel(S, pi=market_prior, absolute_views=viewdict) ``` Black-Litterman also allows for relative views, e.g you think asset A will outperform asset B by 10%. If you'd like to incorporate these, you will have to build P and Q yourself. An explanation for this is given in the [docs](https://pyportfolioopt.readthedocs.io/en/latest/BlackLitterman.html#views). ## View confidences In this section, we provide two ways that you may wish to construct the uncertainty matrix. The first is known as Idzorek's method. It allows you to specify a vector/list of percentage confidences. ``` confidences = [ 0.6, 0.4, 0.2, 0.5, 0.7, # confident in dominos 0.7, # confident KO will do poorly 0.7, 0.5, 0.1, 0.4 ] bl = BlackLittermanModel(S, pi=market_prior, absolute_views=viewdict, omega="idzorek", view_confidences=confidences) fig, ax = plt.subplots(figsize=(7,7)) im = ax.imshow(bl.omega) # We want to show all ticks... ax.set_xticks(np.arange(len(bl.tickers))) ax.set_yticks(np.arange(len(bl.tickers))) ax.set_xticklabels(bl.tickers) ax.set_yticklabels(bl.tickers) plt.show() np.diag(bl.omega) ``` Note how NAT, which we gave the lowest confidence, also has the highest uncertainty. Instead of inputting confidences, we can calculate the uncertainty matrix directly by specifying 1 standard deviation confidence intervals, i.e bounds which we think will contain the true return 68% of the time. This may be easier than coming up with somewhat arbitrary percentage confidences ``` intervals = [ (0, 0.25), (0.1, 0.4), (-0.1, 0.15), (-0.05, 0.1), (0.15, 0.25), (-0.1, 0), (0.1, 0.2), (0.08, 0.12), (0.1, 0.9), (0, 0.3) ] variances = [] for lb, ub in intervals: sigma = (ub - lb)/2 variances.append(sigma ** 2) print(variances) omega = np.diag(variances) ``` ## Posterior estimates Given the inputs, we can compute a posterior estiamte of returns ``` # We are using the shortcut to automatically compute market-implied prior bl = BlackLittermanModel(S, pi="market", market_caps=mcaps, risk_averison="delta", absolute_views=viewdict, omega=omega) # Posterior estimate of returns ret_bl = bl.bl_returns() ret_bl ``` We can visualise how this compares to the prior and our views: ``` rets_df = pd.DataFrame([market_prior, ret_bl, pd.Series(viewdict)], index=["Prior", "Posterior", "Views"]).T rets_df rets_df.plot.bar(figsize=(12,8)); ``` Notice that the posterior is always between the prior and the views. This supports the fact that the BL method is essentially a Bayesian weighted-average of the prior and views, where the weight is determined by the confidence. A similar but less intuitive procedure can be used to produce the posterior covariance estimate: ``` S_bl = bl.bl_cov() Plotting.plot_covariance(S_bl); ``` ## Portfolio allocation Now that we have constructed our Black-Litterman posterior estimate, we can proceed to use any of the optimisers discussed in previous recipes. ``` from pypfopt import EfficientFrontier, objective_functions ef = EfficientFrontier(ret_bl, S_bl) ef.add_objective(objective_functions.L2_reg) ef.max_sharpe() weights = ef.clean_weights() weights pd.Series(weights).plot.pie(figsize=(10,10)); from pypfopt import DiscreteAllocation da = DiscreteAllocation(weights, prices.iloc[-1], total_portfolio_value=20000) alloc, leftover = da.lp_portfolio() print(f"Leftover: ${leftover:.2f}") alloc ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Name" data-toc-modified-id="Name-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Name</a></span></li><li><span><a href="#Search" data-toc-modified-id="Search-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Search</a></span><ul class="toc-item"><li><span><a href="#Load-Cached-Results" data-toc-modified-id="Load-Cached-Results-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Load Cached Results</a></span></li><li><span><a href="#Run-From-Scratch" data-toc-modified-id="Run-From-Scratch-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Run From Scratch</a></span></li></ul></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Gender-Breakdown" data-toc-modified-id="Gender-Breakdown-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Gender Breakdown</a></span></li><li><span><a href="#Face-Sizes" data-toc-modified-id="Face-Sizes-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Face Sizes</a></span></li><li><span><a href="#Appearances-on-a-Single-Show" data-toc-modified-id="Appearances-on-a-Single-Show-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Appearances on a Single Show</a></span></li><li><span><a href="#Screen-Time-Across-All-Shows" data-toc-modified-id="Screen-Time-Across-All-Shows-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Screen Time Across All Shows</a></span></li></ul></li><li><span><a href="#Persist-to-Cloud" data-toc-modified-id="Persist-to-Cloud-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Persist to Cloud</a></span><ul class="toc-item"><li><span><a href="#Save-Model-to-GCS" data-toc-modified-id="Save-Model-to-GCS-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Save Model to GCS</a></span><ul class="toc-item"><li><span><a href="#Make-sure-the-GCS-file-is-valid" data-toc-modified-id="Make-sure-the-GCS-file-is-valid-4.1.1"><span class="toc-item-num">4.1.1&nbsp;&nbsp;</span>Make sure the GCS file is valid</a></span></li></ul></li><li><span><a href="#Save-Labels-to-DB" data-toc-modified-id="Save-Labels-to-DB-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Save Labels to DB</a></span><ul class="toc-item"><li><span><a href="#Commit-the-person-and-labeler" data-toc-modified-id="Commit-the-person-and-labeler-4.2.1"><span class="toc-item-num">4.2.1&nbsp;&nbsp;</span>Commit the person and labeler</a></span></li><li><span><a href="#Commit-the-FaceIdentity-labels" data-toc-modified-id="Commit-the-FaceIdentity-labels-4.2.2"><span class="toc-item-num">4.2.2&nbsp;&nbsp;</span>Commit the FaceIdentity labels</a></span></li></ul></li></ul></li></ul></div> ``` from esper.prelude import * from esper.identity import * from esper import embed_google_images ``` # Name ``` name = 'Megyn Kelly' ``` # Search ## Load Cached Results ``` assert name != '' results = FaceIdentityModel.load(name=name) imshow(np.hstack([cv2.resize(x[1][0], (200, 200)) for x in results.model_params['images']])) plt.show() plot_precision_and_cdf(results) ``` ## Run From Scratch Run this section if you do not have a cached model and precision curve estimates. ``` assert name != '' img_dir = embed_google_images.fetch_images(name) face_imgs = load_and_select_faces_from_images(img_dir) face_embs = embed_google_images.embed_images(face_imgs) assert(len(face_embs) == len(face_imgs)) imshow(np.hstack([cv2.resize(x[0], (200, 200)) for x in face_imgs if x])) plt.show() face_ids_by_bucket, face_ids_to_score = face_search_by_embeddings(face_embs) precision_model = PrecisionModel(face_ids_by_bucket) print('Select all MISTAKES. Ordered by DESCENDING score. Expecting {} frames'.format(precision_model.get_lower_count())) lower_widget = precision_model.get_lower_widget() lower_widget print('Select all NON-MISTAKES. Ordered by ASCENDING distance. Expecting {} frames'.format(precision_model.get_upper_count())) upper_widget = precision_model.get_upper_widget() upper_widget ``` Run the following cell after labelling. ``` lower_precision = precision_model.compute_precision_for_lower_buckets(lower_widget.selected) upper_precision = precision_model.compute_precision_for_upper_buckets(upper_widget.selected) precision_by_bucket = {**lower_precision, **upper_precision} results = FaceIdentityModel( name=name, face_ids_by_bucket=face_ids_by_bucket, face_ids_to_score=face_ids_to_score, precision_by_bucket=precision_by_bucket, model_params={ 'images': list(zip(face_embs, face_imgs)) } ) plot_precision_and_cdf(results) # Save the model results.save() ``` # Analysis ## Gender Breakdown ``` gender_breakdown = compute_gender_breakdown(results) print('Raw counts:') for k, v in gender_breakdown.items(): print(' ', k, ':', v) print() print('Proportions:') denominator = sum(v for v in gender_breakdown.values()) for k, v in gender_breakdown.items(): print(' ', k, ':', v / denominator) print() print('Showing examples:') show_gender_examples(results) ``` ## Face Sizes ``` plot_histogram_of_face_sizes(results) ``` ## Appearances on a Single Show ``` show_name = 'The Kelly File' screen_time_by_video_id = compute_screen_time_by_video(results, show_name) plot_histogram_of_screen_times_by_video(name, show_name, screen_time_by_video_id) plot_screentime_over_time(name, show_name, screen_time_by_video_id) plot_distribution_of_appearance_times_by_video(results, show_name) ``` ## Screen Time Across All Shows ``` screen_time_by_show = get_screen_time_by_show(results) plot_screen_time_by_show(name, screen_time_by_show) ``` # Persist to Cloud ## Save Model to GCS ``` gcs_model_path = results.save_to_gcs() ``` ### Make sure the GCS file is valid ``` gcs_results = FaceIdentityModel.load_from_gcs(name=name) plot_precision_and_cdf(gcs_results) ``` ## Save Labels to DB ``` from django.core.exceptions import ObjectDoesNotExist def standardize_name(name): return name.lower() person_type = ThingType.objects.get(name='person') try: person = Thing.objects.get(name=standardize_name(name), type=person_type) print('Found person:', person.name) except ObjectDoesNotExist: person = Thing(name=standardize_name(name), type=person_type) print('Creating person:', person.name) labeler = Labeler(name='face-identity-{}'.format(person.name), data_path=gcs_model_path) ``` ### Commit the person and labeler ``` person.save() labeler.save() ``` ### Commit the FaceIdentity labels ``` commit_face_identities_to_db(results, person, labeler) print('Committed {} labels to the db'.format(FaceIdentity.objects.filter(labeler=labeler).count())) ```
github_jupyter
``` # hide # dont_test %load_ext nb_black ``` # Welcome to django_fileresponse > Serve files directly from Django. `django_fileresponse` ([link to documentation](https://ephes.github.io/django_fileresponse/)) is a library that allows you to serve files directly from Django. ## Features of django_fileresponse `django_fileresponse` provides the following features for developers: - **Use asyncio to serve files with high concurrency** directly from Django. - Uses [aiofiles](https://github.com/Tinche/aiofiles) to **asynchronously read from filesystem** and [aiobotocore](https://github.com/aio-libs/aiobotocore) to **asynchronously read from s3 compatible object stores** ## Installing `django_fileresponse` is on PyPI so you can just run `pip install django_fileresponse`. ## Replace Default ASGIHandler You have to replace Djangos `ASGIHandler`, because it synchronously calls `__next__` in [for part in response](https://github.com/django/django/blob/66af94d56ea08ccf8d906708a6cc002dd3ab24d3/django/core/handlers/asgi.py#L242) which makes it impossible to await reading from a filesystem/object-store. So you have to replace the default `ASGIHandler` in `asgi.py`. ``` # hide # make django importable import django from django.conf import settings try: settings.configure() except RuntimeError: pass ``` So instead of building your application like this: ``` from django.core.asgi import get_asgi_application application = get_asgi_application() ``` You have to import a modified ASGIHandler from fileresponse: ``` from fileresponse.asgi import get_asgi_application application = get_asgi_application() ``` If you use a different mechanism to launch your application, you could also just import the modified `AsyncFileASGIHandler` directly: ``` from fileresponse.handlers import AsyncFileASGIHandler django.setup(set_prefix=False) application = AsyncFileASGIHandler() ``` ## How to use Async Fileresponses in your Views Add functions below to your `views.py` ### Serving from Filesystem ``` from fileresponse.http import AiofileFileResponse as AiofileFileResponse async def get_file(request, path): file_path = Path(path) return AiofileFileResponse(file_path) ``` ### Serve Files from an S3 Compatible Object Store ``` from fileresponse.http import AiobotocoreFileResponse async def get_file(request, key): bucket = settings.FILERESPONSE_S3_ACCESS_KEY_ID return AiobotocoreFileResponse(bucket, key, chunk_size=4096) ``` ## Settings ### Example Settings for an S3 Compatible Object Store ``` FILERESPONSE_S3_ACCESS_KEY_ID="minioadmin" FILERESPONSE_S3_SECRET_ACCESS_KEY="minioadmin" FILERESPONSE_S3_REGION="us-west-2" FILERESPONSE_S3_STORAGE_BUCKET_NAME="fileresponse" FILERESPONSE_S3_ENDPOINT_URL="http://localhost:9000" ``` ## More Information * [DjangoCon Europe 2021 Talk about file serving with Django](https://wersdoerfer.de/blogs/ephes_blog/djangocon-2021/) * [Podcast episode about DjangoCon Europe 2021](https://python-podcast.de/show/djangoconeu-2021/)
github_jupyter
``` #### To measure all running time # https://github.com/cpcloud/ipython-autotime %load_ext autotime import gzip from collections import defaultdict import scipy import scipy.optimize import numpy import random import pandas as pd import json import numpy as np import time import csv from collections import Counter ``` - Download dataset from https://drive.google.com/drive/folders/1dnCnSqniJMDFGw8VIiKG5S-_hJmGBJqt ``` # colnames=['user_id', 'product_id', 'rating'] # rating_df = pd.read_csv(path, names=colnames, header=None, compression='gzip') def parse(path): for line in gzip.open(path, 'r'): yield json.loads(line) ``` ### For 5-core ( start ) review ``` DATA_DIR = './Dataset/' fn_5core = 'reviews_Clothing_Shoes_and_Jewelry_5_2.json.gz' path = DATA_DIR + fn_5core print(path) BATCH_SIZE = 100000 i = 0 dataset = [] usersPerItem = defaultdict(set) itemsPerUser = defaultdict(set) reviewsPerUser = defaultdict(list) reviewsPerItem = defaultdict(list) for line in parse(path): d = dict() d['user_id'] = line['reviewerID'] d['product_id'] = line['asin'] d['rating'] = int(line['overall']) dataset.append(d) i += 1 if i > BATCH_SIZE: break for d in dataset: user,item = d['user_id'], d['product_id'] reviewsPerUser[user].append(d) reviewsPerItem[item].append(d) usersPerItem[item].add(user) itemsPerUser[user].add(item) dataset[10] N = len(dataset) nUsers = len(reviewsPerUser) nItems = len(reviewsPerItem) #Getting a list of keys users = list(reviewsPerUser.keys()) items = list(reviewsPerItem.keys()) #This is equivalent to our Rating Mean from week 1 alpha = sum([d['rating'] for d in dataset]) / len(dataset) #Create another two defaultdict's, this time being float types because they are prediction based userBiases = defaultdict(float) itemBiases = defaultdict(float) def MSE(predictions, labels): differences = [(x-y)**2 for x,y in zip(predictions,labels)] return sum(differences) / len(differences) def Jaccard(s1, s2): numer = len(s1.intersection(s2)) denom = len(s1.union(s2)) return numer / denom def predictRating(user,item): ratings = [] similarities = [] for d in reviewsPerUser[user]: i2 = d['product_id'] if i2 == item: continue ratings.append(d['rating']) similarities.append(Jaccard(usersPerItem[item],usersPerItem[i2])) if (sum(similarities) > 0): weightedRatings = [(x*y) for x,y in zip(ratings,similarities)] return sum(weightedRatings) / sum(similarities) else: # User hasn't rated any similar items return alpha class Logger(): def __init__(self): self.STATUS = 'OFF' self.START_TIME = None self.END_TIME = None self.EXECUTION_TIME = None self.LOGS = [] self.MODEL = None self.SCORE = None self.STAT = None def start(self, model=None, stat=None, score=None): self.START_TIME = time.time() self.STATUS = 'ON' if model: self.MODEL = model self.LOGS.append("Model: {m}".format(m=model)) if stat: self.STAT = stat self.LOGS.append("Statistic: {s}".format(s=stat)) if score: self.SCORE = score self.LOGS.append("Score: {s}".format(s=score)) def end(self, display=True, score=None): if self.STATUS == 'OFF': print("No timer started.") else: self.END_TIME = time.time() self.EXECUTION_TIME = self.END_TIME - self.START_TIME self.LOGS.append("Time: {t}".format(t=self.EXECUTION_TIME)) if score: self.SCORE = score self.LOGS.append("Score: {s}".format(s=score)) if display == True: self.getStats(last=False) else: r = self.LOGS self.tearDown() return r self.tearDown() def tearDown(self): self.STATUS = 'OFF' self.LOGS = [] def getStats(self, show=True, last=True): if show == True: if last == True: print("STATUS: {v}".format(v=self.STATUS)) print("START_TIME: {v}".format(v=self.START_TIME)) print("END_TIME: {v}".format(v=self.END_TIME)) print("EXECUTION_TIME: {v}".format(v=self.EXECUTION_TIME)) print("MODEL: {v}".format(v=self.MODEL)) print("STAT: {v}".format(v=self.STAT)) print("SCORE: {v}".format(v=self.SCORE)) else: for l in self.LOGS: print(l) else: return self.MODEL, self.STAT, self.SCORE, self.EXECUTION_TIME timer = Logger() labels = [d['rating'] for d in dataset] # baseline alwaysPredictMean = [alpha for d in dataset] labels = [d['rating'] for d in dataset] MSE(alwaysPredictMean, labels) cfPredictions = [predictRating(d['user_id'], d['product_id']) for d in dataset] print(MSE(alwaysPredictMean, labels)) print(MSE(cfPredictions, labels)) print(MSE(alwaysPredictMean, labels), MSE(cfPredictions, labels)) print() timer.start(model='Baseline', stat='MSE', score=MSE(alwaysPredictMean, labels)) alwaysPredictMean = [alpha for d in dataset] timer.end() print() ``` ### Heuristic - Heuristic analysis is an expert based analysis that determines the susceptibility of a system towards particular threat/risk using various decision rules or weighing methods. MultiCriteria analysis (MCA) is one of the means of weighing. - https://en.wikipedia.org/wiki/Heuristic_analysis ``` score = MSE(cfPredictions, labels) cfPredictions = [predictRating(d['user_id'], d['product_id']) for d in dataset] print("Mode: Weighted Ratings Heuristic") print("Score by MSE: ", score) def mostSimilar(item, n): similarities = [] users = usersPerItem[item] for i2 in usersPerItem: if i2 == item: continue sim = Jaccard(users, usersPerItem[i2]) similarities.append([sim,i2]) similarities.sort(reverse=True) return similarities[:n] def mostSimilarFast(item, n): similarities = [] users = usersPerItem[item] candidateItems = set() for u in users: candidateItems = candidateItems.union(itemsPerUser[u]) for i2 in candidateItems: if i2 == item: continue sim = Jaccard(users, usersPerItem[i2]) similarities.append([sim, i2]) similarities.sort(reverse=True) return similarities[:n] # Test Params n = 10 idx = 101 query = dataset[idx]['product_id'] print("Index: {i}".format(i=idx)) print("ProductID: {q}".format(q=query)) print("Number Matches: {i}".format(i=n)) timer.start(model='Most Similar', stat='Jaccard Similarity') sims1 = mostSimilar(query, n) timer.end(display=True) sims1 timer.start(model='Most Similar Optimized', stat='Jaccard Similarity') sims2 = mostSimilarFast(query, n) timer.end(display=True) sims2 df = pd.DataFrame(dataset) X = df[['user_id', 'product_id']] y = df[['rating']] df.head() ``` # Collaberative filtering * Product Similarity recommedation * User Similarity recomendation This model uses historical user/item ratings that are similar to predict ratings. ### Documention for Surprise - http://surpriselib.com/ ``` # !pip install surprise from surprise import KNNWithMeans from surprise import Dataset from surprise import accuracy from surprise import Reader import os from surprise.model_selection import train_test_split #Reading the dataset reader = Reader(rating_scale=(0, 5)) data = Dataset.load_from_df(df,reader) #Splitting the dataset trainset, testset = train_test_split(data, test_size=0.3, random_state=11, shuffle=True) ``` ### Use user_based true/false to switch between user-based or item-based collaborative filtering ``` timer.start(model='Product KNN', stat='MSE') algo = KNNWithMeans(k=5, sim_options={'name': 'cosine', 'user_based': False}) algo.fit(trainset) test_pred = algo.test(testset) acc = accuracy.mse(test_pred, verbose=False) timer.end() print("Score: ", acc) timer.start(model='User KNN', stat='MSE') algo = KNNWithMeans(k=5, sim_options={'name': 'cosine', 'user_based': True}) algo.fit(trainset) test_pred = algo.test(testset) acc = accuracy.mse(test_pred, verbose=False) timer.end() print("Score: ", acc) ```
github_jupyter
# [모듈 1.2] 고객 이탈 데이터 준비, Autogluon 훈련 및 평가 **기존 노트북에서 XGBoost 알고리즘으로 모델 훈련을 하였습니다. 여기서는 AutoML 솔류션중의 하나인 AutoGluon 머신 러닝 라이브러리를 통하여 모델 학습을 합니다.** ### [알림] <font coler="red"> conda_python3 커널 </font> 과 함께 사용해야 합니다. * 이 노트북은 `0.1.Install_Package` 반드시 먼저 실행해야 합니다. ### 참고: - 노트북에서 사용 중인 "고객 이탈 데이터 셋" 설명, 데이터 탐색, SageMaker XGBoost 의 훈련 및 배포는 아래 노트북에서 참조가 가능합니다. - 이 노트북은 참조 노트북에서 다루지 않은 내용을 중심으로 합니다. - Customer churn prediction with SageMaker XGBoost - https://github.com/mullue/churn-pred-xgboost - AutoGluon 은 아래 링크를 통해서 기본적인 정보를 확인 바랍니다. - [강력 추천] AutoGluon Quick Start - https://github.com/mullue/autogluon # 0. 환경 셋업 ``` import pandas as pd pd.set_option("display.max_columns", 500) import numpy as np import matplotlib.pyplot as plt import os import time import seaborn as sns import matplotlib.pyplot as plt sns.set(style="whitegrid") import sagemaker from sagemaker import get_execution_role sess = sagemaker.Session() bucket = sess.default_bucket() prefix = "ml_data_prep_workshop/xgboost-churn" role = get_execution_role() %load_ext autoreload %autoreload 2 ``` # 1. 데이타 준비 고객 이탈 데이타를 아래 S3 에서 다운로드 합니다. ``` !aws s3 cp s3://sagemaker-sample-files/datasets/tabular/synthetic/churn.txt ./ churn = pd.read_csv("./churn.txt") churn ``` ### 레이블 컬럼을 숫자형으로 변경 - 레이블 컬럼인 Churn? 의 값을 숫자형으로 1, 0 로 바꾸고, 데이터 프레임의 가장 앞에 위치 시킴. - 이유는 "상관 계수 분석시" 에 레이블 컬럼도 포함시기키 위함 입니다. ``` from src.tabular_utils import change_y churn = change_y(churn, col='Churn?', isChange=True) churn ``` ## 피쳐 제거 - 이전 토트북 '1.1.xgb_churn.ipynb' 과 동일하게 아래 피쳐를 제거 합니다. - ['State','Area Code','Phone'] ``` drop_cols = ['State','Area Code','Phone'] churn_cl = churn.drop(columns=drop_cols) churn_cl ``` ## 카테고리 변수의 이진 변수 (One-Hot Encoding) 변환 - AutoGluon 은 내부적으로 피쳐 변환을 하기에 이 노트북에서는 진행하지 않습니다. # 3. 데이터 세트 분리 - 전체 데이타를 8:1:1 의 비율로 훈련, 검증, 테스트 데이터 셋으로 분리 합니다. - 훈련과 검증 데이터는 CSV 데이터로 로컬에 저장 합니다. ``` train_data, validation_data, test_data = np.split( churn_cl.sample(frac=1, random_state=1024), [int(0.8 * len(churn_cl)), int(0.9 * len(churn_cl))], ) train_data.to_csv("train.csv", header=True, index=False) validation_data.to_csv("validation.csv", header=True, index=False) ``` # 4. Autogluon 모델링 ``` train_df = train_data.copy() val_df = validation_data.copy() test_df = test_data.copy() train_df.head() ``` ## 오토글루온 데이터 타입으로 변경 오토글루온이 정의한 데이터 세트로 변환 합니다. ``` from autogluon.tabular import TabularPredictor as task from autogluon.tabular import TabularDataset # train_data = TabularDataset(auto_train_file) train_data = TabularDataset(train_df) val_data = TabularDataset(val_df) test_data = TabularDataset(test_df) ``` ## 오토 글루온 모델 설정 ``` import autogluon.core as ag eval_metric = 'roc_auc' # 검증 데이터의 모델 평가 지표 save_path = 'basic_autogluon_models' # 모델 저장 경로 presets = 'medium_quality_faster_train' # 미디엄 성능 및 빠른 훈련 # presets = 'optimize_for_deployment' label = 'Churn?' tabular_predictor = task(label=label, path = save_path, eval_metric = eval_metric, ) # 앙상블 모델에서 제거하고 싶은 모델을 정의 # exclude_model_list = ['KNN','GBM','NN','FASTAI','RF'] exclude_model_list = ['KNN','NN','FASTAI','RF'] ``` ## 모델 훈련 ``` %%time predictor = tabular_predictor.fit( train_data = train_data, tuning_data = val_data, presets=presets, excluded_model_types= exclude_model_list ) ``` ## 모델 추론 위한 테스트 데이터 로딩 ``` y_test = test_data[label] # values to predict test_data_nolab = test_data.drop(labels=[label],axis=1) # delete label column to prove we're not cheating ``` ## 모델 추론 ``` prediction = predictor.predict(test_data_nolab) prediction_prob = predictor.predict_proba(test_data_nolab) # print("Predictions: ", prediction) # perf = predictor.evaluate_predictions(y_true=y_test, y_pred=prediction, auxiliary_metrics=True) perf = predictor.evaluate_predictions(y_true=y_test, y_pred=prediction_prob, auxiliary_metrics=True) ``` ## 추론 확률 0.5 (500 점) 을 기준으로 0, 1로 구분 ``` from src.tabular_utils import get_prediction_set, compute_f1 threshold = 500 df_pred = get_prediction_set(prediction, prediction_prob, threshold ) df_pred ``` ## 모델 평가 ``` from sklearn.metrics import classification_report, roc_auc_score from IPython.display import display as dp from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt #from src import p_utils %matplotlib inline %config InlineBackend.figure_format='retina' compute_f1(y_test, df_pred.pred.values) ``` ## 리더 보드 생성 ``` predictor.leaderboard(test_data, extra_info=False, silent=True) ``` ## Feature Importance ``` import seaborn as sns import matplotlib.pyplot as plt sns.set(style="whitegrid") fea_importance_raw = predictor.feature_importance(test_data) fea_importance = fea_importance_raw['importance'] f, ax = plt.subplots(figsize=(10,5)) plot = sns.barplot(x=fea_importance.index, y = fea_importance.values) ax.set_title('Feature Importance') plot.set_xticklabels(plot.get_xticklabels(),rotation='vertical') plt.show() # fea_importance_raw ```
github_jupyter
``` # Copyright 2021 Google LLC # Use of this source code is governed by an MIT-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/MIT. # Author(s): Kevin P. Murphy (murphyk@gmail.com) and Mahmoud Soliman (mjs@aucegypt.edu) ``` <a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a> <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/figures/chapter17_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## Figure 17.1:<a name='17.1'></a> <a name='gaussInterpDemo'></a> Interpolating a function from noise-free data using a smoothness prior with prior precision $\lambda $. Blue crosses are observations. Red line is posterior mean, thin black lines are posterior samples. Shaded gray area is the pointwise 95\% marginal credible interval for $f(x_j)$, i.e., $\mu _j \pm 2 \sqrt \Sigma _ 1|2, jj $. (a) $\lambda =1$. (b) $\lambda =0.1$. Adapted from Figure 7.1 of <a href='#Calvetti07'>[CS07]</a> . Figure(s) generated by [gaussInterpDemoStable.m](https://github.com/probml/pmtk3/blob/master/demos/gaussInterpDemoStable.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/gaussInterpNoisyDemoStable_obsVar0_priorVar1.png") pmlt.show_image("/pyprobml/book1/figures/images/gaussInterpNoisyDemoStable_obsVar0_priorVar10.png") ``` ## Figure 17.2:<a name='17.2'></a> <a name='ARDkernel'></a> Function samples from a GP with an ARD kernel. (a) $\ell _1=\ell _2=1$. Both dimensions contribute to the response. (b) $\ell _1=1$, $\ell _2=5$. The second dimension is essentially ignored. Adapted from Figure 5.1 of <a href='#Rasmussen06'>[RW06]</a> . Figure(s) generated by [gprDemoArd.m](https://github.com/probml/pmtk3/blob/master/demos/gprDemoArd.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/GPARD.png") pmlt.show_image("/pyprobml/book1/figures/images/GPARD2.png") ``` ## Figure 17.3:<a name='17.3'></a> <a name='maternKernel'></a> Functions sampled from a GP with a Matern kernel. (a) $\nu =5/2$. (b) $\nu =1/2$. Figure(s) generated by [gpKernelPlot.m](https://github.com/probml/pmtk3/blob/master/demos/gpKernelPlot.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/GPmatern_kernel_5Samples.png") pmlt.show_image("/pyprobml/book1/figures/images/GPmatern_kernel_1Samples.png") ``` ## Figure 17.4:<a name='17.4'></a> <a name='GPsamplesPeriodic'></a> Functions sampled from a GP using various stationary periodic kernels. Figure(s) generated by [gpKernelPlot.m](https://github.com/probml/pmtk3/blob/master/demos/gpKernelPlot.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/GPper_kernelSamples.png") pmlt.show_image("/pyprobml/book1/figures/images/GPcos_kernelSamples.png") ``` ## Figure 17.5:<a name='17.5'></a> <a name='duvenaud-2-2'></a> Examples of 1d structures obtained by multiplying elementary kernels. Top row shows $\mathcal K (x,x'=1)$. Bottom row shows some functions sampled from $GP(f|0,\mathcal K )$. From Figure 2.2 of <a href='#duvenaud-thesis-2014'>[Duv14]</a> . Used with kind permission of David Duvenaud. ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/duvenaud-2-2.png") ``` ## Figure 17.6:<a name='17.6'></a> <a name='duvenaud-2-4'></a> Examples of 1d structures obtained by adding elementary kernels. Here $\mathrm SE ^ (\mathrm short ) $ and $\mathrm SE ^ (\mathrm long ) $ are two SE kernels with different length scales. From Figure 2.4 of <a href='#duvenaud-thesis-2014'>[Duv14]</a> . Used with kind permission of David Duvenaud. ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/duvenaud-2-4.png") ``` ## Figure 17.7:<a name='17.7'></a> <a name='GPGM'></a> A Gaussian process for 2 training points, $\mathbf x _1$ and $\mathbf x _2$, and 1 testing point, $\mathbf x _ * $, represented as a graphical model representing $p(\mathbf y ,\mathbf f _ X |\mathbf X ) = \mathcal N (\mathbf f _ X |m(\mathbf X ), \mathcal K (\mathbf X )) \DOTSB \prod@ \slimits@ _i p(y_i|f_i)$. The hidden nodes $f_i=f(\mathbf x _i)$ represent the value of the function at each of the data points. These hidden nodes are fully interconnected by undirected edges, forming a Gaussian graphical model; the edge strengths represent the covariance terms $\Sigma _ ij =\mathcal K (\mathbf x _i,\mathbf x _j)$. If the test point $\mathbf x _ * $ is similar to the training points $\mathbf x _1$ and $\mathbf x _2$, then the value of the hidden function $f_ * $ will be similar to $f_1$ and $f_2$, and hence the predicted output $y_*$ will be similar to the training values $y_1$ and $y_2$. ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/GPGM.png") ``` ## Figure 17.8:<a name='17.8'></a> <a name='gpr'></a> Left: some functions sampled from a GP prior with squared exponential kernel. Right: some samples from a GP posterior, after conditioning on 5 noise-free observations. The shaded area represents $\mathbb E \left [ f(\mathbf x ) \right ] \pm 2 \mathrm std \left [ f(\mathbf x ) \right ]$. Adapted from Figure 2.2 of <a href='#Rasmussen06'>[RW06]</a> . Figure(s) generated by [gprDemoNoiseFree.m](https://github.com/probml/pmtk3/blob/master/demos/gprDemoNoiseFree.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/gprDemoNoiseFreePrior.png") pmlt.show_image("/pyprobml/book1/figures/images/gprDemoNoiseFreePost.png") ``` ## Figure 17.9:<a name='17.9'></a> <a name='gprParams'></a> Some 1d GPs with SE kernels but different hyper-parameters fit to 20 noisy observations. The kernel has the form in \cref eqn:SE1 . The hyper-parameters $(\ell ,\sigma _f,\sigma _y)$ are as follows: (a) (1,1,0.1) (b) (0.3, 1.08, 0.00005), (c) (3.0, 1.16, 0.89). Adapted from Figure 2.5 of <a href='#Rasmussen06'>[RW06]</a> . Figure(s) generated by [gprDemoChangeHparams.m](https://github.com/probml/pmtk3/blob/master/demos/gprDemoChangeHparams.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/gprDemoChangeHparams1.png") pmlt.show_image("/pyprobml/book1/figures/images/gprDemoChangeHparams2.png") pmlt.show_image("/pyprobml/book1/figures/images/gprDemoChangeHparams3.png") ``` ## Figure 17.10:<a name='17.10'></a> <a name='gprLocalMin'></a> Illustration of local minima in the marginal likelihood surface. (a) We plot the log marginal likelihood vs $\sigma _y^2$ and $\ell $, for fixed $\sigma _f^2=1$, using the 7 data points shown in panels b and c. (b) The function corresponding to the lower left local minimum, $(\ell ,\sigma ^2_n) \approx (1,0.2)$. This is quite ``wiggly'' and has low noise. (c) The function corresponding to the top right local minimum, $(\ell ,\sigma ^2_n) \approx (10,0.8)$. This is quite smooth and has high noise. The data was generated using $(\ell ,\sigma ^2_n)=(1,0.1)$. From Figure 5.5 of <a href='#Rasmussen06'>[RW06]</a> . Figure(s) generated by [gprDemoMarglik.m](https://github.com/probml/pmtk3/blob/master/demos/gprDemoMarglik.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/rasmussen5-5a.png") pmlt.show_image("/pyprobml/book1/figures/images/rasmussen5-5b.png") pmlt.show_image("/pyprobml/book1/figures/images/rasmussen5-5c.png") ``` ## Figure 17.11:<a name='17.11'></a> <a name='gpClassifyIris2'></a> GP classifier for a binary classification problem on Iris flowers (setosa vs versicolor) using a single input feature (sepal length). The fat vertical line is the credible interval for the decision boundary. (a) SE kernel. (b) SE plus linear kernel. Adapted from Figures 7.11--7.12 of <a href='#Martin2018'>[Mar18]</a> . Figure(s) generated by [gp_classify_iris_1d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/gp_classify_iris_1d_pymc3.py) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_and_run("/pyprobml/scripts/gp_classify_iris_1d_pymc3.py") ``` ## Figure 17.12:<a name='17.12'></a> <a name='gpClassifySpaceFlu'></a> (a) Fictitious ``space flu'' binary classification problem. (b) Fit from a GP with SE kernel. Adapted from Figures 7.13--7.14 of <a href='#Martin2018'>[Mar18]</a> . Figure(s) generated by [gp_classify_spaceflu_1d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/gp_classify_spaceflu_1d_pymc3.py) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_and_run("/pyprobml/scripts/gp_classify_spaceflu_1d_pymc3.py") ``` ## Figure 17.13:<a name='17.13'></a> <a name='largeMargin'></a> Illustration of the large margin principle. Left: a separating hyper-plane with large margin. Right: a separating hyper-plane with small margin. ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/largeMarginPrinciple2.png") ``` ## Figure 17.14:<a name='17.14'></a> <a name='margin'></a> (a) Illustration of the geometry of a linear decision boundary in 2d. A point $\mathbf x $ is classified as belonging in decision region $\mathcal R _1$ if $f(\mathbf x )>0$, otherwise it belongs in decision region $\mathcal R _0$; $\mathbf w $ is a vector which is perpendicular to the decision boundary. The term $w_0$ controls the distance of the decision boundary from the origin. $\mathbf x _ \perp $ is the orthogonal projection of $\mathbf x $ onto the boundary. The signed distance of $\mathbf x $ from the dboundary is given by $f(\mathbf x )/||\mathbf w ||$. Adapted from Figure 4.1 of <a href='#BishopBook'>[Bis06]</a> . (b) Points with circles around them are support vectors, and have dual variables $\alpha _n >0$. In the soft margin case, we associate a slack variable $\xi _n$ with each example. If $0 < \xi _n < 1$, the point is inside the margin, but on the correct side of the decision boundary. If $\xi _n>1$, the point is on the wrong side of the boundary. Adapted from Figure 7.3 of <a href='#BishopBook'>[Bis06]</a> . ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/geomLinDiscrim.png") pmlt.show_image("/pyprobml/book1/figures/images/softMargin.png") ``` ## Figure 17.15:<a name='17.15'></a> <a name='SVMfeatureScaling'></a> Illustration of the benefits of scaling the input features before computing a max margin classifier. Adapted from Figure 5.2 of <a href='#Geron2019'>[Aur19]</a> . Figure(s) generated by [svm_classifier_feature_scaling.py](https://github.com/probml/pyprobml/blob/master/scripts/svm_classifier_feature_scaling.py) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_and_run("/pyprobml/scripts/svm_classifier_feature_scaling.py") ``` ## Figure 17.16:<a name='17.16'></a> <a name='tipping-logodds'></a> Log-odds vs $x$ for 3 different methods. Adapted from Figure 10 of <a href='#Tipping01'>[Tip01]</a> . Used with kind permission of Mike Tipping. ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/tipping-logodds.png") ``` ## Figure 17.17:<a name='17.17'></a> <a name='multiclassDiscrim'></a> (a) The one-versus-rest approach. The green region is predicted to be both class 1 and class 2. (b) The one-versus-one approach. The label of the green region is ambiguous. Adapted from Figure 4.2 of <a href='#BishopBook'>[Bis06]</a> . ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/oneVsRest.png") pmlt.show_image("/pyprobml/book1/figures/images/oneVsOne.png") ``` ## Figure 17.18:<a name='17.18'></a> <a name='rbfMoons'></a> SVM classifier with RBF kernel with precision $\gamma $ and regularizer $C$ applied to two moons data. Adapted from Figure 5.9 of <a href='#Geron2019'>[Aur19]</a> . Figure(s) generated by [svm_classifier_2d.py](https://github.com/probml/pyprobml/blob/master/scripts/svm_classifier_2d.py) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_and_run("/pyprobml/scripts/svm_classifier_2d.py") ``` ## Figure 17.19:<a name='17.19'></a> <a name='SVMvsCgamma'></a> (a) A cross validation estimate of the 0-1 error for an SVM classifier with RBF kernel with different precisions $\gamma =1/(2\sigma ^2)$ and different regularizer $\lambda =1/C$, applied to a synthetic data set drawn from a mixture of 2 Gaussians. (b) A slice through this surface for $\gamma =5$ The red dotted line is the Bayes optimal error, computed using Bayes rule applied to the model used to generate the data. Adapted from Figure 12.6 of <a href='#HastieBook'>[HTF09]</a> . Figure(s) generated by [svmCgammaDemo.m](https://github.com/probml/pmtk3/blob/master/demos/svmCgammaDemo.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/svmCvSurf.png") pmlt.show_image("/pyprobml/book1/figures/images/svmCvGamma5.png") ``` ## Figure 17.20:<a name='17.20'></a> <a name='etube'></a> (a) Illustration of $\ell _2$, Huber and $\epsilon $-insensitive loss functions, where $\epsilon =1.5$. Figure(s) generated by [huberLossPlot.m](https://github.com/probml/pmtk3/blob/master/demos/huberLossPlot.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/vapnikLoss.png") pmlt.show_image("/pyprobml/book1/figures/images/epsTube.png") ``` ## Figure 17.21:<a name='17.21'></a> <a name='SVR'></a> Illustration of support vector regression. Adapted from Figure 5.11 of <a href='#Geron2019'>[Aur19]</a> . Figure(s) generated by [svm_regression_1d.py](https://github.com/probml/pyprobml/blob/master/scripts/svm_regression_1d.py) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_and_run("/pyprobml/scripts/svm_regression_1d.py") ``` ## Figure 17.22:<a name='17.22'></a> <a name='kernelClassif'></a> Example of non-linear binary classification using an RBF kernel with bandwidth $\sigma =0.3$. (a) L2VM with $\lambda =5$. (b) L1VM with $\lambda =1$. (c) RVM. (d) SVM with $C=1/\lambda $ chosen by cross validation. Black circles denote the support vectors. 178 out of the 200 points are chosen as SVs. Figure(s) generated by [kernelBinaryClassifDemo.m](https://github.com/probml/pmtk3/blob/master/demos/kernelBinaryClassifDemo.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/kernelBinaryClassifDemologregL2.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelBinaryClassifDemologregL1.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelBinaryClassifDemoRVM.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelBinaryClassifDemoSVM.png") ``` ## Figure 17.23:<a name='17.23'></a> <a name='kernelRegrDemoData'></a> Example of kernel based regression on the noisy sinc function using an RBF kernel with bandwidth $\sigma =0.3$. (a) L2VM with $\lambda =0.5$. (b) L1VM with $\lambda =0.5$. (c) RVM. (d) SVM regression with $C=1/\lambda $. and $\epsilon =0.1$ (the default for SVMlight). Red circles denote the retained training exemplars. Figure(s) generated by [svmRegrDemo.m](https://github.com/probml/pmtk3/blob/master/demos/svmRegrDemo.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/kernelRegrDemoDatalinregL2.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelRegrDemoDatalinregL1.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelRegrDemoDataRVM.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelRegrDemoDataSVMQP.png") ``` ## Figure 17.24:<a name='17.24'></a> <a name='kernelRegrDemoStem'></a> Coefficient vectors of length $N=100$ for the models in \cref fig:kernelRegrDemoData . Figure(s) generated by [svmRegrDemo.m](https://github.com/probml/pmtk3/blob/master/demos/svmRegrDemo.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/kernelRegrDemoStemlinregL2.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelRegrDemoStemlinregL1.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelRegrDemoStemRVM.png") pmlt.show_image("/pyprobml/book1/figures/images/kernelRegrDemoStemSVMQP.png") ``` ## References: <a name='Geron2019'>[Aur19]</a> G. Aur'elien "Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques for BuildingIntelligent Systems (2nd edition)". (2019). <a name='BishopBook'>[Bis06]</a> C. Bishop "Pattern recognition and machine learning". (2006). <a name='Calvetti07'>[CS07]</a> D. Calvetti and E. Somersalo. "Introduction to Bayesian Scientific Computing". (2007). <a name='duvenaud-thesis-2014'>[Duv14]</a> D. Duvenaud "Automatic Model Construction with Gaussian Processes". (2014). <a name='HastieBook'>[HTF09]</a> T. Hastie, R. Tibshirani and J. Friedman. "The Elements of Statistical Learning". (2009). <a name='Martin2018'>[Mar18]</a> O. Martin "Bayesian analysis with Python". (2018). <a name='Rasmussen06'>[RW06]</a> C. E. Rasmussen and C. I. Williams. "Gaussian Processes for Machine Learning". (2006). <a name='Tipping01'>[Tip01]</a> M. Tipping "Sparse Bayesian learning and the relevance vector machine". In: jmlr (2001).
github_jupyter
``` import numpy as np import sklearn import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline from sklearn.datasets import load_boston boston_dataset = load_boston() boston = pd.DataFrame(boston_dataset.data, columns=boston_dataset.feature_names) X = boston.to_numpy() y = boston_dataset.target print(X) print(np.shape(X)) from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import train_test_split poly = PolynomialFeatures(2) X = poly.fit_transform(X) print(np.shape(X)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=5) # Lets now train the model (2 Degree) from sklearn.linear_model import LinearRegression lin_model = LinearRegression() lin_model.fit(X_train, y_train) # Model Evaluation for 2 Degree Polynomial # Lets first evaluate on training set from sklearn.metrics import r2_score def rmse(predictions, targets): return np.sqrt(((predictions - targets) ** 2).mean()) y_pred_train = lin_model.predict(X_train) rmse_train = rmse(y_pred_train, y_train) r2_train = r2_score(y_train, y_pred_train) print("Training RMSE = " + str(rmse_train)) print("Training R2 = " + str(r2_train)) # Let us now evaluate on the test set y_pred_test = lin_model.predict(X_test) rmse_test = rmse(y_pred_test, y_test) r2_test = r2_score(y_test, y_pred_test) print("Test RMSE = " + str(rmse_test)) print("Test R2 = " + str(r2_test)) from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import train_test_split poly = PolynomialFeatures(3) X = poly.fit_transform(X) print(np.shape(X)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=5) # Lets now train the model (2 Degree) from sklearn.linear_model import LinearRegression lin_model = LinearRegression() lin_model.fit(X_train, y_train) # Model Evaluation for 2 Degree Polynomial # Lets first evaluate on training set from sklearn.metrics import r2_score def rmse(predictions, targets): return np.sqrt(((predictions - targets) ** 2).mean()) y_pred_train = lin_model.predict(X_train) rmse_train = rmse(y_pred_train, y_train) r2_train = r2_score(y_train, y_pred_train) print("Training RMSE = " + str(rmse_train)) print("Training R2 = " + str(r2_train)) # Let us now evaluate on the test set y_pred_test = lin_model.predict(X_test) rmse_test = rmse(y_pred_test, y_test) r2_test = r2_score(y_test, y_pred_test) print("Test RMSE = " + str(rmse_test)) print("Test R2 = " + str(r2_test)) ```
github_jupyter
# Mount Drive ``` from google.colab import drive drive.mount('/content/drive') !pip install -U -q PyDrive !pip install httplib2==0.15.0 import os from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from pydrive.files import GoogleDriveFileList from google.colab import auth from oauth2client.client import GoogleCredentials from getpass import getpass import urllib # 1. Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # Cloning CLIPPER to access modules. if 'CLIPPER' not in os.listdir(): cmd_string = 'git clone https://github.com/PAL-ML/CLIPPER.git' os.system(cmd_string) ``` # Installation ## Install multi label metrics dependencies ``` ! pip install scikit-learn==0.24 ``` ## Install CLIP dependencies ``` import subprocess CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1] print("CUDA version:", CUDA_version) if CUDA_version == "10.0": torch_version_suffix = "+cu100" elif CUDA_version == "10.1": torch_version_suffix = "+cu101" elif CUDA_version == "10.2": torch_version_suffix = "" else: torch_version_suffix = "+cu110" ! pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} -f https://download.pytorch.org/whl/torch_stable.html ftfy regex ! pip install ftfy regex ! wget https://openaipublic.azureedge.net/clip/bpe_simple_vocab_16e6.txt.gz -O bpe_simple_vocab_16e6.txt.gz !pip install git+https://github.com/Sri-vatsa/CLIP # using this fork because of visualization capabilities ``` ## Install clustering dependencies ``` !pip -q install umap-learn>=0.3.7 ``` ## Install dataset manager dependencies ``` !pip install wget ``` # Imports ``` # ML Libraries import tensorflow as tf import tensorflow_hub as hub import torch import torch.nn as nn import torchvision.models as models import torchvision.transforms as transforms import keras # Data processing import PIL import base64 import imageio import pandas as pd import numpy as np import json from PIL import Image import cv2 import imgaug.augmenters as iaa # Plotting import seaborn as sns import matplotlib.pyplot as plt from IPython.core.display import display, HTML from matplotlib import cm # Models import clip # Datasets import tensorflow_datasets as tfds # Misc import progressbar import logging from abc import ABC, abstractmethod import time import urllib.request import os import itertools # Modules from CLIPPER.code.ExperimentModules import embedding_models from CLIPPER.code.ExperimentModules import simclr_data_augmentations from CLIPPER.code.ExperimentModules.dataset_manager import DatasetManager from CLIPPER.code.ExperimentModules.weight_imprinting_classifier import WeightImprintingClassifier from CLIPPER.code.ExperimentModules.utils import (save_npy, load_npy, get_folder_id, create_expt_dir, save_to_drive, load_all_from_drive_folder, download_file_by_name, delete_file_by_name) logging.getLogger('googleapicliet.discovery_cache').setLevel(logging.ERROR) ``` # Initialization & Constants **Edited** ``` dataset_name = 'ImagenetSketch' folder_name = "ImagenetSketch-Embeddings-28-02-21" # Change parentid to match that of experiments root folder in gdrive parentid = '1bK72W-Um20EQDEyChNhNJthUNbmoSEjD' # Filepaths # train_labels_filename = "train_labels.npz" val_labels_filename = "val_labels.npz" # train_embeddings_filename_suffix = "_embeddings_train.npz" val_embeddings_filename_suffix = "_embeddings_val.npz" # Initialize sepcific experiment folder in drive folderid = create_expt_dir(drive, parentid, folder_name) ``` # Load data ``` def get_ndarray_from_drive(drive, folderid, filename): download_file_by_name(drive, folderid, filename) return np.load(filename)['data'] # train_labels = get_ndarray_from_drive(drive, folderid, train_labels_filename) val_labels = get_ndarray_from_drive(drive, folderid, val_labels_filename) # test_labels = get_ndarray_from_drive(drive, folderid, test_labels_filename) dm = DatasetManager() test_data_generator = dm.load_dataset('imagenet_sketch', split='val') class_names = dm.get_class_names() print(class_names) ``` # Create label dictionary ``` unique_labels = np.unique(val_labels) print(len(unique_labels)) label_dictionary = {la:[] for la in unique_labels} for i in range(len(val_labels)): la = val_labels[i] label_dictionary[la].append(i) ``` # CLIP Linear Probe ## Function definitions ``` def start_progress_bar(bar_len): widgets = [ ' [', progressbar.Timer(format= 'elapsed time: %(elapsed)s'), '] ', progressbar.Bar('*'),' (', progressbar.ETA(), ') ', ] pbar = progressbar.ProgressBar( max_value=bar_len, widgets=widgets ).start() return pbar def prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, labels, shuffle=False ): eval_indices = [] train_indices = [] wi_y = [] eval_y = [] label_dictionary = {la:label_dictionary[la] for la in label_dictionary if len(label_dictionary[la]) >= (num_shot+num_eval)} unique_labels = list(label_dictionary.keys()) pbar = start_progress_bar(num_episodes) for s in range(num_episodes): # Setting random seed for replicability np.random.seed(s) _train_indices = [] _eval_indices = [] selected_labels = np.random.choice(unique_labels, size=num_ways, replace=False) for la in selected_labels: la_indices = label_dictionary[la] select = np.random.choice(la_indices, size = num_shot+num_eval, replace=False) tr_idx = list(select[:num_shot]) ev_idx = list(select[num_shot:]) _train_indices = _train_indices + tr_idx _eval_indices = _eval_indices + ev_idx if shuffle: np.random.shuffle(_train_indices) np.random.shuffle(_eval_indices) train_indices.append(_train_indices) eval_indices.append(_eval_indices) _wi_y = labels[_train_indices] _eval_y = labels[_eval_indices] wi_y.append(_wi_y) eval_y.append(_eval_y) pbar.update(s+1) return train_indices, eval_indices, wi_y, eval_y def embed_images( embedding_model, train_indices, num_augmentations, trivial=False ): def augment_image(image, num_augmentations, trivial): """ Perform SimCLR augmentations on the image """ if np.max(image) > 1: image = image/255 augmented_images = [image] # augmentations = iaa.Sequential([ # iaa.Affine( # translate_percent={'x':(-0.1, 0.1), 'y':(-0.1, 0.1)}, # rotate=(-15, 15), # shear=(-15, 15), # ), # iaa.Fliplr(0.5) # ]) def _run_filters(image): width = image.shape[1] height = image.shape[0] image_aug = simclr_data_augmentations.random_crop_with_resize( image, height, width ) image_aug = tf.image.random_flip_left_right(image_aug) image_aug = simclr_data_augmentations.random_color_jitter(image_aug) image_aug = simclr_data_augmentations.random_blur( image_aug, height, width ) image_aug = tf.reshape(image_aug, [image.shape[0], image.shape[1], 3]) image_aug = tf.clip_by_value(image_aug, 0., 1.) return image_aug.numpy() for _ in range(num_augmentations): # aug_image = augmentations(image=image) if trivial: aug_image = image else: aug_image = _run_filters(image) augmented_images.append(aug_image) augmented_images = np.stack(augmented_images) return augmented_images embedding_model.load_model() unique_indices = np.unique(np.array(train_indices)) ds = dm.load_dataset('imagenet_sketch', split='val') embeddings = [] IMAGE_IDX = 0 pbar = start_progress_bar(unique_indices.size+1) num_done=0 for idx, item in enumerate(ds): if idx in unique_indices: image = item[IMAGE_IDX][0] if num_augmentations > 0: aug_images = augment_image(image, num_augmentations, trivial) else: aug_images = image processed_images = embedding_model.preprocess_data(aug_images) embedding = embedding_model.embed_images(processed_images) embeddings.append(embedding) num_done += 1 pbar.update(num_done+1) if idx == unique_indices[-1]: break embeddings = np.stack(embeddings) return unique_indices, embeddings def train_model_for_episode( indices_and_embeddings, train_indices, wi_y, num_augmentations, train_epochs=None, train_batch_size=5, multi_label=True ): train_embeddings = [] train_labels = [] ind = indices_and_embeddings[0] emb = indices_and_embeddings[1] for idx, tr_idx in enumerate(train_indices): train_embeddings.append(emb[np.argwhere(ind==tr_idx)[0][0]]) train_labels += [wi_y[idx] for _ in range(num_augmentations+1)] train_embeddings = np.concatenate(train_embeddings) train_embeddings = WeightImprintingClassifier.preprocess_input(train_embeddings) wi_weights, label_mapping = WeightImprintingClassifier.get_imprinting_weights( train_embeddings, train_labels, False, multi_label ) wi_parameters = { "num_classes": num_ways, "input_dims": train_embeddings.shape[-1], "scale": False, "kaiming_init": None, "multi_label": multi_label } wi_cls = WeightImprintingClassifier(wi_parameters) if train_epochs: # ep_y = train_labels rev_label_mapping = {label_mapping[val]:val for val in label_mapping} train_y = np.zeros((len(train_labels), num_ways)) for idx_y, l in enumerate(train_labels): if multi_label: for _l in l: train_y[idx_y, rev_label_mapping[_l]] = 1 else: train_y[idx_y, rev_label_mapping[l]] = 1 wi_cls.train(train_embeddings, train_y, train_epochs, train_batch_size) return wi_cls, label_mapping # chenni change def evaluate_model_for_episode( model, eval_x, eval_y, label_mapping, metrics=['hamming', 'jaccard', 'subset_accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'], threshold=0.7, multi_label=True ): eval_x = WeightImprintingClassifier.preprocess_input(eval_x) #cc logits = model.predict_scores(eval_x).tolist() if multi_label: pred_y = model.predict_multi_label(eval_x, threshold) pred_y = [[label_mapping[v] for v in l] for l in pred_y] met = model.evaluate_multi_label_metrics( eval_x, eval_y, label_mapping, threshold, metrics ) else: pred_y = model.predict_single_label(eval_x) pred_y = [label_mapping[l] for l in pred_y] met = model.evaluate_single_label_metrics( eval_x, eval_y, label_mapping, metrics ) #cc return pred_y, met, logits # chenni change def run_episode_through_model( indices_and_embeddings, train_indices, eval_indices, wi_y, eval_y, thresholds=None, num_augmentations=0, train_epochs=None, train_batch_size=5, metrics=['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'], embeddings=None, multi_label=True ): metrics_values = {m:[] for m in metrics} wi_cls, label_mapping = train_model_for_episode( indices_and_embeddings, train_indices, wi_y, num_augmentations, train_epochs, train_batch_size, multi_label=multi_label ) # if embeddings is None: # eval_x = [] # IMAGE_IDX=0 # for ev_idx in eval_indices: # image = get_item_from_dataset(ev_idx)[IMAGE_IDX] # processed_image = embedding_model.preprocess_data(image) # embedding = embedding_model.embed_images(processed_image) # eval_x.append(embedding) # eval_x = np.concatenate(eval_x) # else: eval_x = embeddings[eval_indices] #cc ep_logits = [] if multi_label: for t in thresholds: #cc pred_labels, met, logits = evaluate_model_for_episode( wi_cls, eval_x, eval_y, label_mapping, threshold=t, metrics=metrics, multi_label=True ) #cc ep_logits.append(logits) for m in metrics: metrics_values[m].append(met[m]) else: #cc pred_labels, metrics_values, logits = evaluate_model_for_episode( wi_cls, eval_x, eval_y, label_mapping, metrics=metrics, multi_label=False ) #cc ep_logits = logits #cc return metrics_values, ep_logits # chenni change def run_evaluations( indices_and_embeddings, train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, thresholds, verbose=True, normalize=True, train_epochs=None, train_batch_size=5, metrics=['hamming', 'jaccard', 'subset_accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'], embeddings=None, num_augmentations=0, multi_label=True ): metrics_values = {m:[] for m in metrics} #cc all_logits = [] if verbose: pbar = start_progress_bar(num_episodes) for idx_ep in range(num_episodes): _train_indices = train_indices[idx_ep] _eval_indices = eval_indices[idx_ep] _wi_y = [label for label in wi_y[idx_ep]] _eval_y = [label for label in eval_y[idx_ep]] #cc met, ep_logits = run_episode_through_model( indices_and_embeddings, _train_indices, _eval_indices, _wi_y, _eval_y, num_augmentations=num_augmentations, train_epochs=train_epochs, train_batch_size=train_batch_size, embeddings=embeddings, thresholds=thresholds, metrics=metrics, multi_label=multi_label ) #cc all_logits.append(ep_logits) for m in metrics: metrics_values[m].append(met[m]) if verbose: pbar.update(idx_ep+1) #cc return metrics_values, all_logits # def run_train_loop( # embeddings, # train_indices, # eval_indices, # wi_y, # eval_y, # num_episodes, # num_ways, # verbose=True, # normalize=True, # train_epochs_loop=[5], # train_batch_size=5, # metrics=["hamming", "jaccard", "f1_score"], # threshold=0.72 # ): # metrics_values = [{m:[] for m in metrics} for _ in range(len(train_epochs_loop)+1)] # if verbose: # pbar = start_progress_bar(num_episodes) # for idx_ep in range(num_episodes): # wi_x = embeddings[train_indices[idx_ep]] # eval_x = embeddings[eval_indices[idx_ep]] # if normalize: # wi_x = WeightImprintingClassifier.preprocess_input(wi_x) # eval_x = WeightImprintingClassifier.preprocess_input(eval_x) # wi_weights, label_mapping = WeightImprintingClassifier.get_imprinting_weights( # wi_x, wi_y[idx_ep], False, True # ) # wi_parameters = { # "num_classes": num_ways, # "input_dims": wi_x.shape[-1], # "scale": False, # "dense_layer_weights": wi_weights, # "multi_label": True # } # wi_cls = WeightImprintingClassifier(wi_parameters) # met = wi_cls.evaluate_multi_label_metrics( # eval_x, eval_y[idx_ep], label_mapping, threshold, metrics # ) # for m in met: # metrics_values[0][m].append(met[m]) # for idx_tr_ep in range(len(train_epochs_loop)): # ep_y = wi_y[idx_ep] # rev_label_mapping = {label_mapping[val]:val for val in label_mapping} # train_y = np.zeros((len(ep_y), num_ways)) # for idx_y, _y in enumerate(ep_y): # for l in _y: # train_y[idx_y, rev_label_mapping[l]] = 1 # wi_cls.train(wi_x, train_y, train_epochs_loop[idx_tr_ep], train_batch_size) # met = wi_cls.evaluate_multi_label_metrics( # eval_x, eval_y[idx_ep], label_mapping, threshold, metrics # ) # for m in met: # metrics_values[idx_tr_ep+1][m].append(met[m]) # # _pred_y = wi_cls.predict_multi_label(eval_x, threshold) # # for j in range(len(_eval_y)): # # print([label_mapping[p] for p in _pred_y[j]], " vs ", _eval_y[j]) # # met = wi_cls.evaluate_multi_label_metrics( # # eval_x, eval_y[idx_ep], label_mapping, threshold, metrics # # ) # # for m in met: # # metrics_values[m].append(met[m]) # del wi_x # del eval_x # del wi_cls # if verbose: # pbar.update(idx_ep+1) # return metrics_values def get_max_mean_jaccard_index_by_threshold(metrics_thresholds): max_mean_jaccard = np.max([np.mean(mt['jaccard']) for mt in metrics_thresholds]) return max_mean_jaccard def get_max_mean_jaccard_index_with_threshold(metrics_thresholds): # max_mean_jaccard = np.max([np.mean(mt['jaccard']) for mt in metrics_thresholds]) # threshold = np.argmax([np.mean(mt['jaccard']) for mt in metrics_thresholds]) arr = np.array(metrics_thresholds['jaccard']) max_mean_jaccard = np.max(np.mean(arr, 0)) threshold = np.argmax(np.mean(arr, 0)) return max_mean_jaccard, threshold def get_max_mean_f1_score_with_threshold(metrics_thresholds): # max_mean_jaccard = np.max([np.mean(mt['f1_score']) for mt in metrics_thresholds]) # threshold = np.argmax([np.mean(mt['f1_score']) for mt in metrics_thresholds]) arr = np.array(metrics_thresholds['f1_score']) max_mean_jaccard = np.max(np.mean(arr, 0)) threshold = np.argmax(np.mean(arr, 0)) return max_mean_jaccard, threshold def get_mean_max_jaccard_index_by_episode(metrics_thresholds): mean_max_jaccard = np.mean(np.max(np.array([mt['jaccard'] for mt in metrics_thresholds]), axis=0)) return mean_max_jaccard def plot_metrics_by_threshold( metrics_thresholds, thresholds, metrics=['hamming', 'jaccard', 'subset_accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'], title_suffix="" ): legend = [] fig = plt.figure(figsize=(10,10)) if 'jaccard' in metrics: mean_jaccard_threshold = np.mean(np.array(metrics_thresholds['jaccard']), axis=0) opt_threshold_jaccard = thresholds[np.argmax(mean_jaccard_threshold)] plt.plot(thresholds, mean_jaccard_threshold, c='blue') plt.axvline(opt_threshold_jaccard, ls="--", c='blue') legend.append("Jaccard Index") legend.append(opt_threshold_jaccard) if 'hamming' in metrics: mean_hamming_threshold = np.mean(np.array(metrics_thresholds['hamming']), axis=0) opt_threshold_hamming = thresholds[np.argmin(mean_hamming_threshold)] plt.plot(thresholds, mean_hamming_threshold, c='green') plt.axvline(opt_threshold_hamming, ls="--", c='green') legend.append("Hamming Score") legend.append(opt_threshold_hamming) if 'map' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['map']), axis=0) opt_threshold_f1_score = thresholds[np.argmax(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='red') plt.axvline(opt_threshold_f1_score, ls="--", c='red') legend.append("mAP") legend.append(opt_threshold_f1_score) if 'o_f1' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['o_f1']), axis=0) opt_threshold_f1_score = thresholds[np.argmax(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='yellow') plt.axvline(opt_threshold_f1_score, ls="--", c='yellow') legend.append("OF1") legend.append(opt_threshold_f1_score) if 'c_f1' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['c_f1']), axis=0) opt_threshold_f1_score = thresholds[np.argmax(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='orange') plt.axvline(opt_threshold_f1_score, ls="--", c='orange') legend.append("CF1") legend.append(opt_threshold_f1_score) if 'o_precision' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['o_precision']), axis=0) opt_threshold_f1_score = thresholds[np.argmax(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='purple') plt.axvline(opt_threshold_f1_score, ls="--", c='purple') legend.append("OP") legend.append(opt_threshold_f1_score) if 'c_precision' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['c_precision']), axis=0) opt_threshold_f1_score = thresholds[np.argmax(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='cyan') plt.axvline(opt_threshold_f1_score, ls="--", c='cyan') legend.append("CP") legend.append(opt_threshold_f1_score) if 'o_recall' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['o_recall']), axis=0) opt_threshold_f1_score = thresholds[np.argmin(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='brown') plt.axvline(opt_threshold_f1_score, ls="--", c='brown') legend.append("OR") legend.append(opt_threshold_f1_score) if 'c_recall' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['c_recall']), axis=0) opt_threshold_f1_score = thresholds[np.argmin(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='pink') plt.axvline(opt_threshold_f1_score, ls="--", c='pink') legend.append("CR") legend.append(opt_threshold_f1_score) if 'c_accuracy' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['c_accuracy']), axis=0) opt_threshold_f1_score = thresholds[np.argmax(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='maroon') plt.axvline(opt_threshold_f1_score, ls="--", c='maroon') legend.append("CACC") legend.append(opt_threshold_f1_score) if 'top1_accuracy' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['top1_accuracy']), axis=0) opt_threshold_f1_score = thresholds[np.argmax(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='magenta') plt.axvline(opt_threshold_f1_score, ls="--", c='magenta') legend.append("TOP1") legend.append(opt_threshold_f1_score) if 'top5_accuracy' in metrics: mean_f1_score_threshold = np.mean(np.array(metrics_thresholds['top5_accuracy']), axis=0) opt_threshold_f1_score = thresholds[np.argmax(mean_f1_score_threshold)] plt.plot(thresholds, mean_f1_score_threshold, c='slategray') plt.axvline(opt_threshold_f1_score, ls="--", c='slategray') legend.append("TOP5") legend.append(opt_threshold_f1_score) plt.xlabel('Threshold') plt.ylabel('Value') plt.legend(legend) title = title_suffix+"\nMulti label metrics by threshold" plt.title(title) plt.grid() fname = os.path.join(PLOT_DIR, title_suffix) plt.savefig(fname) plt.show() ``` ## Setting multiple thresholds ``` thresholds = np.arange(0.43, 0.73, 0.01) # thresholds = np.arange(0.3, 0.72, 0.05) thresholds ``` # 5 way 5 shot ## Picking indices ``` num_ways = 5 num_shot = 5 num_eval = 15 shuffle = False num_episodes = 100 train_indices, eval_indices, wi_y, eval_y = prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, val_labels, shuffle ) embedding_model = embedding_models.CLIPEmbeddingWrapper() num_augmentations = 0 trivial=False indices, embeddings = embed_images( embedding_model, train_indices, num_augmentations, trivial=trivial ) ``` ## CLIP ``` clip_embeddings_val_fn = "clip" + val_embeddings_filename_suffix clip_embeddings_val = get_ndarray_from_drive(drive, folderid, clip_embeddings_val_fn) import warnings warnings.filterwarnings('ignore') if trivial: #cc results_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_lp_metrics_with_logits.json" else: #cc results_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_lp_metrics_with_logits.json" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) download_file_by_name(drive, folderid, results_filename) if results_filename in os.listdir(): with open(results_filename, 'r') as f: #cc json_loaded = json.load(f) #cc clip_metrics_over_train_epochs = json_loaded['metrics'] #cc logits_over_train_epochs = json_loaded["logits"] else: clip_metrics_over_train_epochs = [] #cc logits_over_train_epochs = [] train_epochs_arr = [50] multi_label=False thresholds_val = thresholds # None # metrics_vals = ['hamming', 'jaccard', 'f1_score'] # ['accuracy', 'f1_score'] metrics_val = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] for idx, train_epochs in enumerate(train_epochs_arr): if idx < len(clip_metrics_over_train_epochs): continue print(train_epochs) #cc clip_metrics_thresholds, all_logits = run_evaluations( (indices, embeddings), train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, thresholds, train_epochs=train_epochs, num_augmentations=num_augmentations, embeddings=clip_embeddings_val, multi_label=multi_label, metrics=metrics_val ) clip_metrics_over_train_epochs.append(clip_metrics_thresholds) #cc logits_over_train_epochs.append(all_logits) #cc fin_list = [] #cc the whole for loop for a1 in wi_y: fin_a1_list = [] for a2 in a1: # fin_a2_list = [] # for a3 in a2: # new_val = a3.decode("utf-8") # fin_a2_list.append(new_val) new_val = str(a2) fin_a1_list.append(new_val) fin_list.append(fin_a1_list) with open(results_filename, 'w') as f: #cc results = {'metrics': clip_metrics_over_train_epochs, "logits": logits_over_train_epochs, "true_labels": fin_list} json.dump(results, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, results_filename) save_to_drive(drive, folderid, results_filename) if trivial: PLOT_DIR = "NewMetrics_clip_lp_softmax_ImagenetSketch" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_trivial_plots" else: PLOT_DIR = "NewMetrics_clip_lp_softmax_ImagenetSketch" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_plots" os.mkdir(PLOT_DIR) # chenni change whole block def get_best_metric_and_threshold(mt, metric_name, thresholds, optimal='max'): if optimal=='max': opt_value = np.max(np.mean(np.array(mt[metric_name]), axis=0)) opt_threshold = thresholds[np.argmax(np.mean(np.array(mt[metric_name]), axis=0))] if optimal=='min': opt_value = np.min(np.mean(np.array(mt[metric_name]), axis=0)) opt_threshold = thresholds[np.argmin(np.mean(np.array(mt[metric_name]), axis=0))] return opt_value, opt_threshold # chenni change whole block #all_metrics = ['hamming', 'jaccard', 'subset_accuracy', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'c_accuracy'] all_metrics = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] f1_vals = [] f1_t_vals = [] jaccard_vals = [] jaccard_t_vals = [] final_dict = {} for ind_metric in all_metrics: vals = [] t_vals = [] final_array = [] for mt in clip_metrics_over_train_epochs: if ind_metric == "hamming": ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"min") else: ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"max") vals.append(ret_val) t_vals.append(ret_t_val) final_array.append(vals) final_array.append(t_vals) final_dict[ind_metric] = final_array if trivial: graph_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_lp_metrics_graphs.json" else: graph_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_lp_metrics_graphs.json" with open(graph_filename, 'w') as f: json.dump(final_dict, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, graph_filename) save_to_drive(drive, folderid, graph_filename) zip_dirname = PLOT_DIR + ".zip" zip_source = PLOT_DIR ! zip -r $zip_dirname $zip_source auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) save_to_drive(drive, folderid, zip_dirname) ``` # 20 way 5 shot ## Picking indices ``` num_ways = 20 num_shot = 5 num_eval = 5 shuffle = False num_episodes = 100 train_indices, eval_indices, wi_y, eval_y = prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, val_labels, shuffle ) embedding_model = embedding_models.CLIPEmbeddingWrapper() num_augmentations = 0 trivial=False indices, embeddings = embed_images( embedding_model, train_indices, num_augmentations, trivial=trivial ) ``` ## CLIP ``` clip_embeddings_test_fn = "clip" + val_embeddings_filename_suffix clip_embeddings_test = get_ndarray_from_drive(drive, folderid, clip_embeddings_test_fn) import warnings warnings.filterwarnings('ignore') if trivial: #cc results_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_lp_metrics_with_logits.json" else: #cc results_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_lp_metrics_with_logits.json" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) download_file_by_name(drive, folderid, results_filename) if results_filename in os.listdir(): with open(results_filename, 'r') as f: #cc json_loaded = json.load(f) #cc clip_metrics_over_train_epochs = json_loaded['metrics'] #cc logits_over_train_epochs = json_loaded["logits"] else: clip_metrics_over_train_epochs = [] #cc logits_over_train_epochs = [] train_epochs_arr = [50] multi_label=False thresholds_val = thresholds # None metrics_val = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] for idx, train_epochs in enumerate(train_epochs_arr): if idx < len(clip_metrics_over_train_epochs): continue print(train_epochs) #cc clip_metrics_thresholds, all_logits = run_evaluations( (indices, embeddings), train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, thresholds, train_epochs=train_epochs, num_augmentations=num_augmentations, embeddings=clip_embeddings_val, multi_label=multi_label, metrics=metrics_val ) clip_metrics_over_train_epochs.append(clip_metrics_thresholds) #cc logits_over_train_epochs.append(all_logits) #cc fin_list = [] #cc the whole for loop for a1 in wi_y: fin_a1_list = [] for a2 in a1: # fin_a2_list = [] # for a3 in a2: # new_val = a3.decode("utf-8") # fin_a2_list.append(new_val) new_val = str(a2) fin_a1_list.append(new_val) fin_list.append(fin_a1_list) with open(results_filename, 'w') as f: #cc results = {'metrics': clip_metrics_over_train_epochs, "logits": logits_over_train_epochs, "true_labels": fin_list} json.dump(results, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, results_filename) save_to_drive(drive, folderid, results_filename) if trivial: PLOT_DIR = "NewMetrics_clip_lp_softmax_ImagenetSketch" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_trivial_plots" else: PLOT_DIR = "NewMetrics_clip_lp_softmax_ImagenetSketch" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_plots" os.mkdir(PLOT_DIR) # chenni change whole block def get_best_metric_and_threshold(mt, metric_name, thresholds, optimal='max'): if optimal=='max': opt_value = np.max(np.mean(np.array(mt[metric_name]), axis=0)) opt_threshold = thresholds[np.argmax(np.mean(np.array(mt[metric_name]), axis=0))] if optimal=='min': opt_value = np.min(np.mean(np.array(mt[metric_name]), axis=0)) opt_threshold = thresholds[np.argmin(np.mean(np.array(mt[metric_name]), axis=0))] return opt_value, opt_threshold # chenni change whole block f1_vals = [] f1_t_vals = [] jaccard_vals = [] jaccard_t_vals = [] final_dict = {} for ind_metric in all_metrics: vals = [] t_vals = [] final_array = [] for mt in clip_metrics_over_train_epochs: if ind_metric == "hamming": ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"min") else: ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"max") vals.append(ret_val) t_vals.append(ret_t_val) final_array.append(vals) final_array.append(t_vals) final_dict[ind_metric] = final_array if trivial: graph_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_lp_metrics_graphs.json" else: graph_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_lp_metrics_graphs.json" with open(graph_filename, 'w') as f: json.dump(final_dict, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, graph_filename) save_to_drive(drive, folderid, graph_filename) zip_dirname = PLOT_DIR + ".zip" zip_source = PLOT_DIR ! zip -r $zip_dirname $zip_source auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) save_to_drive(drive, folderid, zip_dirname) ``` # 5 way 1 shot ## Picking indices ``` num_ways = 5 num_shot = 1 num_eval = 19 shuffle = False num_episodes = 100 train_indices, eval_indices, wi_y, eval_y = prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, val_labels, shuffle ) embedding_model = embedding_models.CLIPEmbeddingWrapper() num_augmentations = 0 trivial=False indices, embeddings = embed_images( embedding_model, train_indices, num_augmentations, trivial=trivial ) ``` ## CLIP ``` clip_embeddings_test_fn = "clip" + val_embeddings_filename_suffix clip_embeddings_test = get_ndarray_from_drive(drive, folderid, clip_embeddings_test_fn) import warnings warnings.filterwarnings('ignore') if trivial: #cc results_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_lp_metrics_with_logits.json" else: #cc results_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_lp_metrics_with_logits.json" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) download_file_by_name(drive, folderid, results_filename) if results_filename in os.listdir(): with open(results_filename, 'r') as f: #cc json_loaded = json.load(f) #cc clip_metrics_over_train_epochs = json_loaded['metrics'] #cc logits_over_train_epochs = json_loaded["logits"] else: clip_metrics_over_train_epochs = [] #cc logits_over_train_epochs = [] train_epochs_arr = [50] multi_label=False thresholds_val = thresholds # None # metrics_vals = ['hamming', 'jaccard', 'f1_score'] # ['accuracy', 'f1_score'] metrics_val = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] for idx, train_epochs in enumerate(train_epochs_arr): if idx < len(clip_metrics_over_train_epochs): continue print(train_epochs) #cc clip_metrics_thresholds, all_logits = run_evaluations( (indices, embeddings), train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, thresholds, train_epochs=train_epochs, num_augmentations=num_augmentations, embeddings=clip_embeddings_val, multi_label=multi_label, metrics=metrics_val ) clip_metrics_over_train_epochs.append(clip_metrics_thresholds) #cc logits_over_train_epochs.append(all_logits) #cc fin_list = [] #cc the whole for loop for a1 in wi_y: fin_a1_list = [] for a2 in a1: # fin_a2_list = [] # for a3 in a2: # new_val = a3.decode("utf-8") # fin_a2_list.append(new_val) new_val = str(a2) fin_a1_list.append(new_val) fin_list.append(fin_a1_list) with open(results_filename, 'w') as f: #cc results = {'metrics': clip_metrics_over_train_epochs, "logits": logits_over_train_epochs, "true_labels": fin_list} json.dump(results, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, results_filename) save_to_drive(drive, folderid, results_filename) if trivial: PLOT_DIR = "NewMetrics_clip_lp_softmax_ImagenetSketch" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_trivial_plots" else: PLOT_DIR = "NewMetrics_clip_lp_softmax_ImagenetSketch" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_plots" os.mkdir(PLOT_DIR) # chenni change whole block def get_best_metric_and_threshold(mt, metric_name, thresholds, optimal='max'): if optimal=='max': opt_value = np.max(np.mean(np.array(mt[metric_name]), axis=0)) opt_threshold = thresholds[np.argmax(np.mean(np.array(mt[metric_name]), axis=0))] if optimal=='min': opt_value = np.min(np.mean(np.array(mt[metric_name]), axis=0)) opt_threshold = thresholds[np.argmin(np.mean(np.array(mt[metric_name]), axis=0))] return opt_value, opt_threshold # chenni change whole block f1_vals = [] f1_t_vals = [] jaccard_vals = [] jaccard_t_vals = [] final_dict = {} for ind_metric in all_metrics: vals = [] t_vals = [] final_array = [] for mt in clip_metrics_over_train_epochs: if ind_metric == "hamming": ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"min") else: ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"max") vals.append(ret_val) t_vals.append(ret_t_val) final_array.append(vals) final_array.append(t_vals) final_dict[ind_metric] = final_array if trivial: graph_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_lp_metrics_graphs.json" else: graph_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_lp_metrics_graphs.json" with open(graph_filename, 'w') as f: json.dump(final_dict, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, graph_filename) save_to_drive(drive, folderid, graph_filename) zip_dirname = PLOT_DIR + ".zip" zip_source = PLOT_DIR ! zip -r $zip_dirname $zip_source auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) save_to_drive(drive, folderid, zip_dirname) ``` # 20 way 1 shot ## Picking indices ``` num_ways = 20 num_shot = 1 num_eval = 10 shuffle = False num_episodes = 100 train_indices, eval_indices, wi_y, eval_y = prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, val_labels, shuffle ) embedding_model = embedding_models.CLIPEmbeddingWrapper() num_augmentations = 0 trivial=False indices, embeddings = embed_images( embedding_model, train_indices, num_augmentations, trivial=trivial ) ``` ## CLIP ``` clip_embeddings_test_fn = "clip" + val_embeddings_filename_suffix clip_embeddings_test = get_ndarray_from_drive(drive, folderid, clip_embeddings_test_fn) import warnings warnings.filterwarnings('ignore') if trivial: #cc results_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_lp_metrics_with_logits.json" else: #cc results_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_lp_metrics_with_logits.json" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) download_file_by_name(drive, folderid, results_filename) if results_filename in os.listdir(): with open(results_filename, 'r') as f: #cc json_loaded = json.load(f) #cc clip_metrics_over_train_epochs = json_loaded['metrics'] #cc logits_over_train_epochs = json_loaded["logits"] else: clip_metrics_over_train_epochs = [] #cc logits_over_train_epochs = [] train_epochs_arr = [50] multi_label=False thresholds_val = thresholds # None # metrics_vals = ['hamming', 'jaccard', 'f1_score'] # ['accuracy', 'f1_score'] metrics_val = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] for idx, train_epochs in enumerate(train_epochs_arr): if idx < len(clip_metrics_over_train_epochs): continue print(train_epochs) #cc clip_metrics_thresholds, all_logits = run_evaluations( (indices, embeddings), train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, thresholds, train_epochs=train_epochs, num_augmentations=num_augmentations, embeddings=clip_embeddings_val, multi_label=multi_label, metrics=metrics_val ) clip_metrics_over_train_epochs.append(clip_metrics_thresholds) #cc logits_over_train_epochs.append(all_logits) #cc fin_list = [] #cc the whole for loop for a1 in wi_y: fin_a1_list = [] for a2 in a1: # fin_a2_list = [] # for a3 in a2: # new_val = a3.decode("utf-8") # fin_a2_list.append(new_val) new_val = str(a2) fin_a1_list.append(new_val) fin_list.append(fin_a1_list) with open(results_filename, 'w') as f: #cc results = {'metrics': clip_metrics_over_train_epochs, "logits": logits_over_train_epochs, "true_labels": fin_list} json.dump(results, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, results_filename) save_to_drive(drive, folderid, results_filename) if trivial: PLOT_DIR = "NewMetrics_clip_lp_softmax_ImagenetSketch" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_trivial_plots" else: PLOT_DIR = "NewMetrics_clip_lp_softmax_ImagenetSketch" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_plots" os.mkdir(PLOT_DIR) # chenni change whole block def get_best_metric_and_threshold(mt, metric_name, thresholds, optimal='max'): if optimal=='max': opt_value = np.max(np.mean(np.array(mt[metric_name]), axis=0)) opt_threshold = thresholds[np.argmax(np.mean(np.array(mt[metric_name]), axis=0))] if optimal=='min': opt_value = np.min(np.mean(np.array(mt[metric_name]), axis=0)) opt_threshold = thresholds[np.argmin(np.mean(np.array(mt[metric_name]), axis=0))] return opt_value, opt_threshold # chenni change whole block f1_vals = [] f1_t_vals = [] jaccard_vals = [] jaccard_t_vals = [] final_dict = {} for ind_metric in all_metrics: vals = [] t_vals = [] final_array = [] for mt in clip_metrics_over_train_epochs: if ind_metric == "hamming": ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"min") else: ret_val, ret_t_val = get_best_metric_and_threshold(mt,ind_metric,thresholds,"max") vals.append(ret_val) t_vals.append(ret_t_val) final_array.append(vals) final_array.append(t_vals) final_dict[ind_metric] = final_array if trivial: graph_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_lp_metrics_graphs.json" else: graph_filename = "new_metrics"+dataset_name+"_softmax_"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_lp_metrics_graphs.json" with open(graph_filename, 'w') as f: json.dump(final_dict, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, graph_filename) save_to_drive(drive, folderid, graph_filename) zip_dirname = PLOT_DIR + ".zip" zip_source = PLOT_DIR ! zip -r $zip_dirname $zip_source auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) save_to_drive(drive, folderid, zip_dirname) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 %aimport utils_1_1 import pandas as pd import numpy as np import altair as alt from os.path import join import math from utils_1_1 import ( get_country_color_map_none_pediatric, get_visualization_subtitle, apply_theme, ) from web import for_website ``` # Data Preprocessing ## Lab Variation Data From Figshare (Not supported yet) Use the latest data from xxx ``` # Lab_VariationByCountry.csv # labs = pd.read_csv("https://ndownloader.figshare.com/files/22345587") # labs.head() labs = pd.read_csv('../data/Figure_3_lab_variation_day0_7.csv', header=[0]) # For loading local data labs processed_labs = labs.copy() processed_labs['country'] = processed_labs['country'].apply(lambda x: 'USA' if x == 'USA' else x.capitalize()) processed_labs['scale'] = processed_labs['scale'].apply(lambda x: x.capitalize()) processed_labs = processed_labs.rename(columns={ 'ever': 'Ever', 'never': 'Never', 'diff': 'Difference' }) loinc_map = { 'alanine aminotransferase (ALT)': '1742-6, alanine aminotransferase (ALT) (U/L)', 'albumin': '1751-7, albumin (g/dL)', 'aspartate aminotransferase (AST)': '1920-8, aspartate aminotransferase (AST) (U/L)', 'C-reactive protein (CRP) (Normal Sensitivity)': '1988-5, C-reactive protein (CRP) (Normal Sensitivity) (mg/L)', 'cardiac troponin (High Sensitivity)': '49563-0, cardiac troponin (High Sensitivity) (ng/mL)', 'cardiac troponin (Normal Sensitivity)': '6598-7, cardiac troponin (Normal Sensitivity) (ug/L)', 'creatinine': '2160-0, creatinine (mg/dL)', 'D-dimer': '48065-7, 48066-5, D-dimer (ng/mL)', 'Ferritin': '2276-4, Ferritin (ng/mL)', 'Fibrinogen': '3255-7, Fibrinogen (mg/dL)', 'lactate dehydrogenase (LDH)': '2532-0, lactate dehydrogenase (LDH) (U/L)', 'lymphocyte count': '731-0, lymphocyte count (10*3/uL)', 'neutrophil count': '751-8, neutrophil count (10*3/uL)', 'procalcitonin': '33959-8, procalcitonin (ng/mL)', 'prothrombin time (PT)': '5902-2, prothrombin time (PT) (s)', 'total bilirubin': '1975-2, total bilirubin (mg/dL)', 'white blood cell count (Leukocytes)': '6690-2, white blood cell count (Leukocytes) (10*3/uL)' } processed_labs["labname"] = processed_labs["labname"].apply(lambda x: loinc_map[x]) # Use only the original scale data processed_labs = processed_labs[processed_labs['scale'] == 'Original'] processed_labs = pd.melt( processed_labs, id_vars=['labname', 'scale', 'country'], value_vars=['Ever', 'Never', 'Difference'], var_name='severity', value_name='var' ) processed_labs # processed_labs['country'].unique() # for debug COUNTRY_NAMES = ['Within country', 'Between country'] + list(get_country_color_map_none_pediatric().keys()) + ['Within site'] COUNTRY_COLORS = ['#000000', '#000000'] + list(get_country_color_map_none_pediatric().values()) + ['#000000'] COUNTRY_NAMES ``` # Visualization ``` input_dropdown = alt.binding_select(options=processed_labs['labname'].unique()) selection = alt.selection_single(fields=['labname'], bind=input_dropdown, name='Lab ', init={'labname': processed_labs['labname'].unique()[0]}) legend_selection = alt.selection_multi(fields=['country'], bind="legend") color_scale = alt.Scale(domain=COUNTRY_NAMES, range=COUNTRY_COLORS) tick_size = 30 """ For SD """ # base = alt.Chart(processed_labs).mark_bar(size=tick_size).encode( # x=alt.Y('country:N'), # color=alt.Color('country:N', scale=color_scale, title=None), # stroke=alt.Color('country:N', scale=color_scale), # strokeWidth=alt.value(1), # y=alt.Y('y:Q', title='Lab variation'), # y2=alt.Y2('y:Q'), # # tooltip=[ # # alt.Tooltip("Country", title="Category"), # # alt.Tooltip("mean_val", title="Mean", format=".2f"), # # alt.Tooltip("stdev_val", title="Standard deviation", format=".2f"), # # alt.Tooltip("days_since_positive", title="Days since positive") # # ] # ) mean = alt.Chart( processed_labs ).mark_tick( size=tick_size, thickness=4 ).encode( x=alt.X('country:N', title=None, scale=alt.Scale(domain=COUNTRY_NAMES)), opacity=alt.value(1), color=alt.Color('country:N', scale=color_scale, title=None), y=alt.Y('var:Q', title='Lab variation'), tooltip=[ alt.Tooltip('labname', title="Labs"), alt.Tooltip("country", title="Country"), alt.Tooltip("var", title="Variation", format=".2f"), ], ) plot = ( mean .properties(height=400, width=400) ).facet( facet=alt.Facet("severity:N", title=None, header=alt.Header(labels=False), sort=['ever', 'never', 'difference']) ) plot = plot.add_selection( selection ).transform_filter( selection ).add_selection( legend_selection ).transform_filter( legend_selection ) plot = apply_theme(plot, legend_orient="right", header_label_font_size=15).properties( title={ "text": "Lab Variation Across Sites on Day 0", "subtitle": get_visualization_subtitle(data_release='2020-08-03', with_num_sites=True, num_sites=45), "subtitleColor": "gray", "anchor": "middle", }, # width=350, height=400 # This generates error (which should be submitted to the repo as an issue: More readable error message) ) for_website(plot, "1.1_Labs", "Lab variation across sites on Day 0") plot ```
github_jupyter
<img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> ## _*The Bernstein-Vazirani Algorithm*_ In this tutorial, we introduce the [Bernstein-Vazirani algorithm](http://epubs.siam.org/doi/abs/10.1137/S0097539796300921), which is one of the earliest algorithms demonstrating the power of quantum computing. Despite its simplicity, it is often used and is the inspiration for many other quantum algorithms even today; it is the basis of the power of the short-depth quantum circuits, as in [Bravyi et al.](https://arxiv.org/abs/1704.00690) that uses its non-oracular version, or in [Linke et al.](http://www.pnas.org/content/114/13/3305.full) that uses it to test the performance of the quantum processors (see also the [talk by Ken Brown](https://www.youtube.com/watch?v=eHV9LTiePrQ) at the ThinkQ 2017 conference). Here, we show the implementation of the Bernstein-Vazirani algorithm **without using entanglement** based on [Du et al.](https://arxiv.org/abs/quant-ph/0012114) on QISKit and test it on IBM Q systems. The latest version of this notebook is available on https://github.com/QISKit/qiskit-tutorial. *** ### Contributors Rudy Raymond ## Introduction <a id='introduction'></a> The Bernstein-Vazirani algorithm deals with finding a hidden integer $a \in \{0,1\}^n$ from an oracle $f_a$ that returns a bit $a \cdot x \equiv \sum_i a_i x_i \mod 2$ upon receiving an input $x \in \{0,1\}^n$. A classical oracle returns $f_a(x) = a \cdot x \mod 2$ given an input $x$. Meanwhile, a quantum oracle behaves similarly but can be queried with superposition of input $x$'s. Classically, the hidden integer $a$ can be revealed by querying the oracle with $x = 1, 2, \ldots, 2^i, \ldots, 2^{n-1}$, where each query reveals the $i$-th bit of $a$ (or, $a_i$). For example, with $x=1$ one can obtain the least significant bit of $a$, and so on. This turns out to be an optimal strategy; any classical algorithm that finds the hidden integer with high probability must query the oracle $\Omega(n)$ times. However, given a corresponding quantum oracle, the hidden integer can be found with only $1$ query using the Bernstein-Vazirani algorithm. ## The Algorithm The Bernstein-Vazirani algorithm to find the hidden integer is very simple: start from a $|0\rangle$ state, apply Hadamard gates, query the oracle, apply Hadamard gates, and measure. The correctness of the algorithm is best explained by looking at the transformation of a quantum register $|a \rangle$ by $n$ Hadamard gates, each applied to the qubit of the register. It can be shown that $$ |a\rangle \xrightarrow{H^{\otimes n}} \frac{1}{\sqrt{2^n}} \sum_{x\in \{0,1\}^n} (-1)^{a\cdot x}|x\rangle. $$ In particular, when we start with a quantum register $|0\rangle$ and apply $n$ Hadamard gates to it, we have the familiar quantum superposition as below $$ |0\rangle \xrightarrow{H^{\otimes n}} \frac{1}{\sqrt{2^n}} \sum_{x\in \{0,1\}^n} |x\rangle, $$ which is slightly different from the Hadamard transform of the reqister $|a \rangle$ by the phase $(-1)^{a\cdot x}$. Now, the quantum oracle $f_a$ returns $1$ on input $x$ such that $a \cdot x \equiv 1 \mod 2$, and returns $0$ otherwise. This means we have the following transformation: $$ |x \rangle \left(|0\rangle - |1\rangle \right) \xrightarrow{f_a} | x \rangle \left(|0 \oplus f_a(x) \rangle - |1 \oplus f_a(x) \rangle \right) = (-1)^{a\cdot x} |x \rangle \left(|0\rangle - |1\rangle \right). $$ Notice that the second register $|0\rangle - |1\rangle$ in the above does not change and can be omitted for simplicity. In short, the oracle can be used to create $(-1)^{a\cdot x}|x\rangle$ from the input $|x \rangle$. In this tutorial, we follow [Du et al.](https://arxiv.org/abs/quant-ph/0012114) to generate a circuit for a quantum oracle without the need of an ancilla qubit (often used in the standard quantum oracle). The algorithm to reveal the hidden integer follows naturally by querying the quantum oracle $f_a$ with the quantum superposition obtained from the Hadamard transformation of $|0\rangle$. Namely, $$ |0\rangle \xrightarrow{H^{\otimes n}} \frac{1}{\sqrt{2^n}} \sum_{x\in \{0,1\}^n} |x\rangle \xrightarrow{f_a} \frac{1}{\sqrt{2^n}} \sum_{x\in \{0,1\}^n} (-1)^{a\cdot x}|x\rangle. $$ Because the inverse of the $n$ Hadamard gates is again the $n$ Hadamard gates, we can obtain $a$ by $$ \frac{1}{\sqrt{2^n}} \sum_{x\in \{0,1\}^n} (-1)^{a\cdot x}|x\rangle \xrightarrow{H^{\otimes n}} |a\rangle. $$ ## The (Inner-Product) Oracle <a id='oracle'></a> Here, we describe how to build the oracle used in the Bernstein-Vazirani algorithm. The oracle is also referred to as the [inner-product oracle](https://arxiv.org/pdf/quant-ph/0108095.pdf) (while the oracle of the Grover search is known as the Equivalence, or EQ, oracle). Notice that it transforms $|x\rangle$ into $(-1)^{a\cdot x} |x\rangle$. Clearly, we can observe that $$ (-1)^{a\cdot x} = (-1)^{a_1 x_1} \ldots (-1)^{a_ix_i} \ldots (-1)^{a_nx_n} = \prod_{i: a_i = 1} (-1)^{x_i}. $$ Therefore, the inner-product oracle can be realized by the following unitary transformation, which is decomposable as single-qubit unitaries: $$ O_{f_a} = O^1 \otimes O^2 \otimes \ldots \otimes O^i \otimes \ldots \otimes O^n, $$ where $O^i = (1 - a_i)I + a_i Z$, where $Z$ is the Pauli $Z$ matrix and $I$ is the identity matrix for $a_i \in \{0,1\}$. Notice that we start from a separable quantum state $|0\rangle$ and apply a series of transformations that are separable (i.e., can be described by unitaries acting on a single qubit): Hadamard gates to each qubit, followed by the call to the *decomposable* quantum oracle as [Du et al.](https://arxiv.org/abs/quant-ph/0012114), and another Hadamard gate. Hence, there is no entanglement created during the computation. This is in contrast with the circuit at [Linke et al.](http://www.pnas.org/content/114/13/3305.full) that used CNOT gates to realize the oracle and an ancilla qubit to store the answer of the oracle. ## The Circuit <a id="circuit"></a> We now implement the Bernstein-Vazirani algorithm with QISKit by first preparing the environment. ``` #initialization import sys, getpass import matplotlib.pyplot as plt %matplotlib inline import numpy as np # importing the QISKit # importing the QISKit from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, QISKitError from qiskit import available_backends, execute, register, get_backend try: sys.path.append("../../") # go to parent dir import Qconfig qx_config = { "APItoken": Qconfig.APItoken, "url": Qconfig.config['url']} print('Qconfig loaded from %s.' % Qconfig.__file__) except: APItoken = getpass.getpass('Please input your token and hit enter: ') qx_config = { "APItoken": APItoken, "url":"https://quantumexperience.ng.bluemix.net/api"} print('Qconfig.py not found in qiskit-tutorial directory; Qconfig loaded using user input.') # import basic plot tools from qiskit.tools.visualization import plot_histogram ``` We first set the number of qubits used in the experiment, and the hidden integer $a$ to be found by the Bernstein-Vazirani algorithm. The hidden integer $a$ determines the circuit for the quantum oracle. ``` nQubits = 16 # number of physical qubits a = 101 # the hidden integer whose bitstring is 1100101 # make sure that a can be represented with nQubits a = a % 2**(nQubits) ``` We then use QISKit to program the Bernstein-Vazirani algorithm. ``` # Creating registers # qubits for querying the oracle and finding the hidden integer qr = QuantumRegister(nQubits) # for recording the measurement on qr cr = ClassicalRegister(nQubits) circuitName = "BernsteinVazirani" bvCircuit = QuantumCircuit(qr, cr) # Apply Hadamard gates before querying the oracle for i in range(nQubits): bvCircuit.h(qr[i]) # Apply barrier so that it is not optimized by the compiler bvCircuit.barrier() # Apply the inner-product oracle for i in range(nQubits): if (a & (1 << i)): bvCircuit.z(qr[i]) else: bvCircuit.iden(qr[i]) # Apply barrier bvCircuit.barrier() #Apply Hadamard gates after querying the oracle for i in range(nQubits): bvCircuit.h(qr[i]) # Measurement for i in range(nQubits): bvCircuit.measure(qr[i], cr[i]) ``` ## Experiment with Simulators We can run the above circuit on the simulator. ``` # use local simulator backend = "local_qasm_simulator" shots = 1000 results = execute(bvCircuit, backend=backend, shots=shots).result() answer = results.get_counts() plot_histogram(answer) ``` We can see that the result of the measurement is the binary representation of the hidden integer $a$. ## Experiment with Real Devices We can run the circuit on the real device as below. ``` #to enable sleep import time #connect to remote API to be able to use remote simulators and real devices register(qx_config['APItoken'], qx_config['url']) print("Available backends:", available_backends()) # to run on remote simulator backend = "ibmq_qasm_simulator" # uncomment below to run on ibmqx5 # backend = "ibmqx5" shots = 1000 if get_backend(backend).status["operational"] is True: job_exp = execute(bvCircuit, backend=backend, shots=shots) lapse = 0 interval = 10 while not job_exp.done: print('Status @ {} seconds'.format(interval * lapse)) print(job_exp.status) time.sleep(interval) lapse += 1 print(job_exp.status) results = job_exp.result() answer = results.get_counts(bvCircuit) threshold = int(0.03 * shots) #the threshold of plotting significant measurements filteredAnswer = {k: v for k,v in answer.items() if v >= threshold} #filter the answer for better view of plots removedCounts = np.sum([ v for k,v in answer.items() if v < threshold ]) #number of counts removed filteredAnswer['other_bitstring'] = removedCounts #the removed counts is assigned to a new index plot_histogram(filteredAnswer) print(filteredAnswer) ``` We indeed see that the outcome is the binary representation of the hidden integer $a$ with high probability.
github_jupyter
<a href="https://colab.research.google.com/github/NataliaDiaz/colab/blob/master/Intro_to_continual_learning_Nov19%20ENSEIRB.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # A Gentle Introduction to Continual Learning in PyTorch In this brief tutorial we will learn the basics of *Continual Learning* using *PyTorch 0.4.0*. We will use the standard MNIST benchmark so that you can swiftly run this notebook from anywhere! This notebook is part of the **[Continual AI Colab](https://github.com/ContinualAI/colab)** is a repository meant for tutorials and demo running on Google Colaboratory. [Continual AI](https://www.continualai.org/) is an open research community on the topic of Continual Learning and AI! Join us today [on slack](https://continualai.herokuapp.com/)! :-D We will start with learning over the standard *MNIST* benchmark, then we will move in the actual continual learning setting with the *Permuted MNIST* benchmark. Let's have some fun! :-) --- ** Connecting a local runtime** In case resources are not enough for you (no GPU for example), you can always connect another [local runtime](https://research.google.com/colaboratory/local-runtimes.html) or to a [runtime on a Google Compute Engine instance](https://research.google.com/colaboratory/local-runtimes.html). This notebook has been designed to run fast enough on simple CPUs so you shouldn't find any trouble here, using a free *hosted account*. --- **Requisites to run it locally, outside colab (not recommended)** * Python 3.x * Jupyter * PyTorch 0.4.0 * Numpy * Matplolib --- ## Google Colaboratory First of all, take a moment to look around and discover Google Colab if you haven't before! You can run the commands below to understand how much resources you're using and are still available. Then consider also that you can also connect your Google Drive for additional space or for easily loading your own files. You can always reset the entire VM with "*Runtime > Reset all runtime*" in case of difficulty. Make also sure you're using the GPU or TPU in the same tab ("*Runtime > Change runtime type*"). ``` !free -m !df -h !nvidia-smi ``` **Questions to explore:** * How to connect your Google Drive with Google Colab? * How to import a new notebook and save it to your GDrive? * How to use files which are contained in your GDrive? Some tips here: https://medium.com/deep-learning-turkey/google-colab-free-gpu-tutorial-e113627b9f5d ## Installing PyTorch 0.4.0 Tensorflow is installed by default in Google Colab (guess why :P). If you want to use another DL toolkit you have to do it by yourself. Run the command below to install it. It should take less than a couple of minutes. ``` # http://pytorch.org/ from os import path from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()) accelerator = 'cu80' #'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu' print('Platform:', platform, 'Accelerator:', accelerator) !pip install --upgrade --force-reinstall -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.0-{platform}-linux_x86_64.whl torchvision import torch print('Torch', torch.__version__, 'CUDA', torch.version.cuda) ``` Ok, torch is installed and imported! Let' see if it can see the GPU: ``` import torch torch.cuda.is_available() ``` That's great, let us import then a few libraries, which we'll be using during this tutorial! ``` import torch import torch.nn as nn import torchvision.datasets as datasets import torchvision.transforms as transforms import torch.optim as optim import torch.nn.functional as F import numpy as np import matplotlib.pyplot as plt ``` **Questions to explore:** * What's new in Pythorch 0.4? Some tips here: https://pytorch.org/blog/pytorch-0_4_0-migration-guide/ ## MNIST: Digits recognition with PyTorch All right, let's start then making sure we all know the basics! Let's recognize the ten handwritten digits learning from 60.000, 28x28 grayscale images. For simplicity let's import a loading script we have already developed inside the **Continual AI Colab** repository: ``` !git clone https://github.com/ContinualAI/colab.git continualai/colab from continualai.colab.scripts import mnist mnist.init() x_train, t_train, x_test, t_test = mnist.load() print("x_train dim and type: ", x_train.shape, x_train.dtype) print("t_train dim and type: ", t_train.shape, t_train.dtype) print("x_test dim and type: ", x_test.shape, x_test.dtype) print("t_test dim and type: ", t_test.shape, t_test.dtype) ``` Let's take a look at the actual images! ``` f, axarr = plt.subplots(2,2) axarr[0,0].imshow(x_train[1, 0], cmap="gray") axarr[0,1].imshow(x_train[2, 0], cmap="gray") axarr[1,0].imshow(x_train[3, 0], cmap="gray") axarr[1,1].imshow(x_train[4, 0], cmap="gray") np.vectorize(lambda ax:ax.axis('off'))(axarr); ``` Good! Let's now set up a few general settings before using torch... ``` # switch to False to use CPU use_cuda = True use_cuda = use_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu"); torch.manual_seed(1); ``` ... and define our first conv-net! We will use 3 layers of convolutions and two fully connected layers: ``` class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return x ``` Then we can write the *train* and *test* functions. Note that for simplicity here we are not using PyTorch [Data Loaders](https://pytorch.org/tutorials/beginner/data_loading_tutorial.html) but this is not recommended for efficiency. ``` def train(model, device, x_train, t_train, optimizer, epoch): model.train() for start in range(0, len(t_train)-1, 256): end = start + 256 x, y = torch.from_numpy(x_train[start:end]), torch.from_numpy(t_train[start:end]).long() x, y = x.to(device), y.to(device) optimizer.zero_grad() output = model(x) loss = F.cross_entropy(output, y) loss.backward() optimizer.step() #print(loss.item()) print('Train Epoch: {} \tLoss: {:.6f}'.format(epoch, loss.item())) def test(model, device, x_test, t_test): model.eval() test_loss = 0 correct = 0 for start in range(0, len(t_test)-1, 256): end = start + 256 with torch.no_grad(): x, y = torch.from_numpy(x_test[start:end]), torch.from_numpy(t_test[start:end]).long() x, y = x.to(device), y.to(device) output = model(x) test_loss += F.cross_entropy(output, y).item() # sum up batch loss pred = output.max(1, keepdim=True)[1] # get the index of the max logit correct += pred.eq(y.view_as(pred)).sum().item() test_loss /= len(t_train) print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(t_test), 100. * correct / len(t_test))) return 100. * correct / len(t_test) ``` Then we are ready to instantiate our model and start the training! ``` model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) for epoch in range(1, 3): train(model, device, x_train, t_train, optimizer, epoch) test(model, device, x_test, t_test) ``` Wow! 94% accuracy in such a short time. **Questions to explore:** * Can you find a better parametrization to improve the final accuracy? * Can you change the network architecture to improve the final accuracy? * Can you achieve the same performances with a smaller architecture? * What's the difference in accuracy if you change convolutions with fully connected layers? Some tips here: http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#4d4e495354 But what if now we want we the same model being able to solve a new task we encounter over time like a permuted version of the same MNIST? Let's define our custom function to permute it! ``` def permute_mnist(mnist, seed): """ Given the training set, permute pixels of each img the same way. """ np.random.seed(seed) print("starting permutation...") h = w = 28 perm_inds = list(range(h*w)) np.random.shuffle(perm_inds) # print(perm_inds) perm_mnist = [] for set in mnist: num_img = set.shape[0] flat_set = set.reshape(num_img, w * h) perm_mnist.append(flat_set[:, perm_inds].reshape(num_img, 1, w, h)) print("done.") return perm_mnist x_train2, x_test2 = permute_mnist([x_train, x_test], 0) f, axarr = plt.subplots(1,2) axarr[0].imshow(x_train[1, 0], cmap="gray") axarr[1].imshow(x_train2[2, 0], cmap="gray") np.vectorize(lambda ax:ax.axis('off'))(axarr); ``` Amazing. Now let's see how our pre-trained model is working on both the original and the permuted MNIST dataset: ``` print("Testing on the first task:") test(model, device, x_test, t_test) print("Testing on the second task:") test(model, device, x_test2, t_test); ``` Mmmh... that's pretty bad, our model cannot generalize to this apparently very different new task! Well, we can just finetune our model using the new permuted training set! ``` for epoch in range(1, 3): train(model, device, x_train2, t_train, optimizer, epoch) test(model, device, x_test2, t_test) print("Testing on the first task:") test(model, device, x_test, t_test) print("Testing on the second task:") test(model, device, x_test2, t_test); ``` This is very annoying! Now we are not able to solve the original MNSIT task anymore! :-( This is the phenomenon known in literature as **Catastrophic Forgetting**! In the following section we well compare three different strategies for learning continually (and trying to not forget!) **Questions to explore:** * When the permuted MNIST benchmark has been firstly introduced? * Can simple Dropout and Regularization techniques reduce forgetting? * In the permuted MNIT task, do convolutions still help increasing the accuracy? Some tips here: https://papers.nips.cc/paper/5059-compete-to-compute ## CL Strategies Let us now focus on some strategies for reducing catastrofic forgetting, one of the principal problems when learning continuously. in this section we will take a look at three different strategies: 1. Naive 2. Rehearsal 3. Elastic Weight Consolidation (EWC) and run it on a 3-tasks Permuted MNIST. Finally we will plot our results for comparison. For a more comprehensive overview on recent CL strategies for deep learning take a look at the recent paper "[Continuous Learning in Single-Incremental-Task Scenarios](https://arxiv.org/abs/1806.08568)". Let's start by defining our 3 tasks with the function we have already introduced before: ``` # task 1 task_1 = [(x_train, t_train), (x_test, t_test)] # task 2 x_train2, x_test2 = permute_mnist([x_train, x_test], 1) task_2 = [(x_train2, t_train), (x_test2, t_test)] # task 3 x_train3, x_test3 = permute_mnist([x_train, x_test], 2) task_3 = [(x_train3, t_train), (x_test3, t_test)] # task list tasks = [task_1, task_2, task_3] ``` ### Naive Strategy The *Naive* strategy, is the simple idea of continuing the back-prop process on the new batches/tasks. This is very simple, but at the same time very prone to forgetting as we have witnessed before. Let's how it works on three tasks: ``` model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) naive_accs = [] for id, task in enumerate(tasks): avg_acc = 0 print("Training on task: ", id) (x_train, t_train), _ = task for epoch in range(1, 2): train(model, device, x_train, t_train, optimizer, epoch) for id_test, task in enumerate(tasks): print("Testing on task: ", id_test) _, (x_test, t_test) = task acc = test(model, device, x_test, t_test) avg_acc = avg_acc + acc naive_accs.append(avg_acc / 3) print("Avg acc: ", avg_acc / 3) ``` **Questions to explore:** * Does the order of the tasks effect the final results? Some tips here: http://proceedings.mlr.press/v78/lomonaco17a/lomonaco17a.pdf ### Rehearsal Strategy Another simple CL idea is to carry on *all* or *part* of the previously encountered examples (of the previous tasks), shuffling them with the data of the current task. Using *all* the past data is near to the optimal performance we can desire at the end of the task sequence but at the expense of much bigger memory usage. Let's start by defining a function to shuffle our data: ``` def shuffle_in_unison(dataset, seed, in_place=False): """ Shuffle two (or more) list in unison. """ np.random.seed(seed) rng_state = np.random.get_state() new_dataset = [] for x in dataset: if in_place: np.random.shuffle(x) else: new_dataset.append(np.random.permutation(x)) np.random.set_state(rng_state) if not in_place: return new_dataset ``` Now we can reset the model and optimizer and run our training over the tasks sequence: ``` model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) rehe_accs = [] for id, task in enumerate(tasks): avg_acc = 0 print("Training on task: ", id) (x_train, t_train), _ = task # for previous task for i in range(id): (past_x_train, past_t_train), _ = tasks[i] x_train = np.concatenate((x_train, past_x_train)) t_train = np.concatenate((t_train, past_t_train)) x_train, t_train = shuffle_in_unison([x_train, t_train], 0) for epoch in range(1, 2): train(model, device, x_train, t_train, optimizer, epoch) for id_test, task in enumerate(tasks): print("Testing on task: ", id_test) _, (x_test, t_test) = task acc = test(model, device, x_test, t_test) avg_acc = avg_acc + acc print("Avg acc: ", avg_acc / 3) rehe_accs.append(avg_acc/3) ``` **Questions to explore:** * Can you find a way to reduce the number of examples of the previous tasks to maintain in memory? * Can you find a good trade-off between memory overhead and final accuracy? * Why is shuffling needed here? Some tips here: https://arxiv.org/abs/1809.05922 ### Elastic Weights Consolidation (EWC) Strategy Elastic Weights Consolidation (EWC) is a common CL strategy firstly proposed in the paper: "[Overcoming catastrophic forgetting in neural networks](https://arxiv.org/abs/1612.00796)" for deep neural networks. It is based on the computation of the importance of each weight (fisher information) and a squared regularization loss, penalizing changes in the most important wheights for the previous tasks. It has the great advantage of **not using any** of the previous tasks data! ``` fisher_dict = {} optpar_dict = {} ewc_lambda = 0.4 model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) ``` Now we need to define an additional function to compute the fisher information for each weight at the end of each task: ``` def on_task_update(task_id, x_mem, t_mem): model.train() optimizer.zero_grad() # accumulating gradients for start in range(0, len(t_mem)-1, 256): end = start + 256 x, y = torch.from_numpy(x_mem[start:end]), torch.from_numpy(t_mem[start:end]).long() x, y = x.to(device), y.to(device) output = model(x) loss = F.cross_entropy(output, y) loss.backward() fisher_dict[task_id] = {} optpar_dict[task_id] = {} # gradients accumulated can be used to calculate fisher for name, param in model.named_parameters(): optpar_dict[task_id][name] = param.data.clone() fisher_dict[task_id][name] = param.grad.data.clone().pow(2) ``` We need also to modify our *train* function to add the new regularization loss: ``` def train_ewc(model, device, task_id, x_train, t_train, optimizer, epoch): model.train() for start in range(0, len(t_train)-1, 256): end = start + 256 x, y = torch.from_numpy(x_train[start:end]), torch.from_numpy(t_train[start:end]).long() x, y = x.to(device), y.to(device) optimizer.zero_grad() output = model(x) loss = F.cross_entropy(output, y) ### magic here! :-) for task in range(task_id): for name, param in model.named_parameters(): fisher = fisher_dict[task][name] optpar = optpar_dict[task][name] loss += (fisher * (optpar - param).pow(2)).sum() * ewc_lambda loss.backward() optimizer.step() #print(loss.item()) print('Train Epoch: {} \tLoss: {:.6f}'.format(epoch, loss.item())) ``` Finally we can run the train over the three tasks sequence of th *Permuted MNIST*: ``` ewc_accs = [] for id, task in enumerate(tasks): avg_acc = 0 print("Training on task: ", id) (x_train, t_train), _ = task for epoch in range(1, 3): train_ewc(model, device, id, x_train, t_train, optimizer, epoch) on_task_update(id, x_train, t_train) for id_test, task in enumerate(tasks): print("Testing on task: ", id_test) _, (x_test, t_test) = task acc = test(model, device, x_test, t_test) avg_acc = avg_acc + acc print("Avg acc: ", avg_acc / 3) ewc_accs.append(avg_acc / 3) ``` **Questions to explore:** * How much the `ewc_lambda` parameter effect the final results? * Can you find a better parametrization to improve stability? * Can you find the memory overhead introduced by EWC with respect to the Naive approach? Some tips here: https://arxiv.org/pdf/1805.06370.pdf ### Plot Results To conclude, let's summerize our results in a nice plot! :-) ``` plt.plot([1, 2, 3], naive_accs, '-o', label="Naive") plt.plot([1, 2, 3], rehe_accs, '-o', label="Rehearsal") plt.plot([1, 2, 3], ewc_accs, '-o', label="EWC") plt.xlabel('Tasks Encountered', fontsize=14) plt.ylabel('Average Accuracy', fontsize=14) plt.title('CL Strategies Comparison on MNSIT', fontsize=14); plt.xticks([1, 2, 3]) plt.legend(prop={'size': 16}); ``` **Questions to explore:** * What's the difference in terms of memory utilization among the three methods? * Can you plot a similar graph highlighting the memory increase over time? Some tips here: https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python/30316760 **Copyright (c) 2018. Continual AI. All rights reserved. ** See the accompanying LICENSE file in the GitHub repository for terms. *Date: 29-09-2018 Author: Vincenzo Lomonaco E-mail: contact@continualai.org Website: continualai.org*
github_jupyter
## Chapter4.3 パーセプトロン ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt ``` 簡単なデータセットを作ります ``` X_dataset = [[0.4, 0.4], [-0.2, 0.3], [0.3, -0.1], [-0.4, -0.4], [-0.2, -0.3], [-0.5, 0.2]] y_dataset = [1, 1, 1, -1, -1, -1] dataset = pd.DataFrame(X_dataset, columns=['x0', 'x1']) dataset['y'] = y_dataset dataset ``` プロット用の関数を定義します ``` def plot_dataset(dataset): fig, ax = plt.subplots() for key, g in dataset.groupby('y'): color = 'k' if key == 1 else 'w' g.plot(ax=ax, kind='scatter', x='x0', y='x1', label=key, color=color, edgecolor='black', linewidth='1', xlim=(-1, 1), ylim=(-1, 1)) return fig, ax def plot_boundary(m, n, c, bias): x = np.array(range(-2, 2)) y = eval('%f*x + %f' % ((-m / (n + 1e-6)), (-c * bias / (n + 1e-6)))) plt.plot(x, y) ``` パーセプトロンの学習を実行します ``` def train(dataset, epochs=1): learning_rate = 1.0 bias = 0.0 c = 0.0 m = 0.0 n = 1.0 for epoch in range(epochs): for i, (x0, x1, y) in dataset.iterrows(): if y * (m * x0 + n * x1 + bias * c) > 0: pass # 合っているので何もしない else: print(c, m, n, bias, x0, x1) fig, ax = plot_dataset(dataset) plot_boundary(m, n, c, bias) # 間違っている点をプロット circle1 = plt.Circle((x0, x1), 0.1, fill=False, linestyle='dashed', linewidth=2) ax.add_patch(circle1) c += learning_rate * y * bias m += learning_rate * y * x0 n += learning_rate * y * x1 print(c, m, n, bias) return m, n m, n = train(dataset=dataset, epochs=5) plot_dataset(dataset) plot_boundary(m, n, c=0.0, bias=0.0) ``` 計算を`np.dot`を使った方法に書き換えます ``` def train(dataset, epochs=1): learning_rate = 1.0 w = np.array([0.0, 0.0, 1.0]) # 初期値 for epoch in range(epochs): for i, (x0, x1, y) in dataset.iterrows(): x = np.array([1.0, x0, x1]) if y * np.dot(w, x) > 0: pass # 合っているので何もしない else: w += learning_rate * y * x return w ``` 識別境界が原点を通らないデータセットを作成 ``` from sklearn.datasets import make_blobs X_dataset, y_dataset = make_blobs(centers=[[-0.25, 0.5], [0.15, -0.2]], cluster_std=0.2, n_samples=20, center_box=(-1.0, 1.0), random_state=42) dataset = pd.DataFrame(X_dataset, columns=['x0', 'x1']) dataset['y'] = y_dataset dataset['y'] = dataset.y.apply(lambda x: 1 if x == 1 else -1) ``` 学習して識別境界をプロットします ``` w = train(dataset=dataset, epochs=20) print(w) plot_dataset(dataset) plot_boundary(w[1], w[2], w[0], 1.0) ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"></ul></div> ``` import torch from torch import nn import numpy as np from matplotlib.patches import Rectangle from matplotlib import patches, gridspec from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show from matplotlib.colors import ListedColormap, LinearSegmentedColormap import seaborn as sns import matplotlib.font_manager import matplotlib.pyplot as plt matplotlib.rcParams['mathtext.fontset'] = 'custom' matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans' matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic' matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold' #Direct input plt.rcParams['text.latex.preamble']=[r"\usepackage{lmodern}", r'\usepackage{amssymb}', r'\usepackage{amsmath}', r'\usepackage{wasysym}'] #Options params = {'text.usetex' : True, 'font.size' : 16, 'font.family' : 'sans-serif', 'font.serif' : 'Computer Modern Sans serif', 'text.latex.unicode': True, } plt.rcParams.update(params) sns.set_style("darkgrid") plot_colors = [ "red", # Color of repeating class "limegreen", # Color Left "royalblue", # "#39ED48", # Color Right ] c_names = [ "Red", "Blue", # -.- inverted from above "Green", ] # Define loss function the_loss = nn.BCEWithLogitsLoss(reduction="none") red_center = np.array([.25, 1]).reshape(1, 2) red_center /= np.linalg.norm(red_center, axis=1, keepdims=True) green_center = np.array([.85, .25]).reshape(1, 2) green_center /= np.linalg.norm(green_center, axis=1, keepdims=True) blue_center = np.array([-.25, 1]).reshape(1, 2) blue_center /= np.linalg.norm(blue_center, axis=1, keepdims=True) angles = torch.tensor([[np.cos(np.pi*x), np.sin(np.pi*x)] for x in np.linspace(0, 1, 100)]) data2 = .05* np.random.randn(10, 2) + np.array([.25, 1]).reshape(1, 2) data3 = data2 * np.array([[-1, 1]]) data1 = .05* np.random.randn(10, 2) + green_center _data2 = torch.from_numpy(data2) _data3 = torch.from_numpy(data3) _data1 = torch.from_numpy(data1) outputs_blue = {} outputs_green = {} BS = [1, 2, 50]#.astype(int) for B in BS: output1 = angles @ (_data1/_data1.norm(p=2, dim=1, keepdim=True)).T output1 = (output1.abs()**B) * output1.sign() * _data1.norm(p=2, dim=1, keepdim=True).T output2 = angles @ (_data2/_data2.norm(p=2, dim=1, keepdim=True)).T output2 = (output2.abs()**B) * output2.sign() * _data2.norm(p=2, dim=1, keepdim=True).T output3 = angles @ (_data3/_data3.norm(p=2, dim=1, keepdim=True)).T output3 = (output3.abs()**B) * output3.sign() * _data3.norm(p=2, dim=1, keepdim=True).T output = torch.cat([output2, output1], dim=1) output /= 20 * (_data1.norm(p=2, dim=1).sum() + _data2.norm(p=2, dim=1).sum()) outputs_blue[B] = output output = torch.cat([output2, output3], dim=1) output /= 20 * (_data1.norm(p=2, dim=1).sum() + _data3.norm(p=2, dim=1).sum()) outputs_green[B] = output sns.set_style("darkgrid") fig = plt.figure(figsize=(16 * .9, 12 * .9), constrained_layout=True) offset = .5025 gs = gridspec.GridSpec(nrows=3, ncols=3, width_ratios=[1.5, 4, 1.5], height_ratios=[1, 1, 1], wspace=0.05, hspace=0.175) axes = np.array([fig.add_subplot(gs[i, 1]) for i in range(3)]) arrows = [] arrows2 = [] minima_green = [] minima_blue = [] bce_axes = axes # Filling in the losses for i, B in enumerate(BS): ax = axes[i] # print(ax.get_aspect()) ax.set_aspect("auto") # ["Red vs.~Orange", "Red vs.~{}".format(c_names[2])] loss_green = the_loss(outputs_green[B], torch.cat([torch.ones_like(output2), torch.zeros_like(output1)], dim=1)).mean(1) loss_blue = the_loss(outputs_blue[B], torch.cat([torch.ones_like(output2), torch.zeros_like(output1)], dim=1)).mean(1) x_green = np.linspace(0, 180, 100)[loss_green.argmin()] min_green = loss_green.min() minima_green.append((min_green, x_green)) x_blue = np.linspace(0, 180, 100)[loss_blue.argmin()] min_blue = loss_blue.min() minima_blue.append((min_blue, x_blue)) arrows2.append((0, 0, *(0.5 * np.array([np.cos(x_green*np.pi/180), np.sin(x_green*np.pi/180)])))) arrows.append((0, 0, *(0.5 * np.array([np.cos(x_blue*np.pi/180), np.sin(x_blue*np.pi/180)])))) p_length = 2 plot_blue = ax.plot(np.linspace(0, 180, 100)[:], loss_blue, color=plot_colors[2], lw=3, alpha=1, label="Red vs.~{}".format(c_names[2])) plot_green = ax.plot(np.linspace(0, 180, 100)[:], loss_green, color=plot_colors[1], lw=3, alpha=1, label="Red vs.~{}".format(c_names[1])) ax.plot(np.linspace(0, 180, 100)[:], loss_blue, color=plot_colors[1], lw=5, alpha=.75, label="Red vs.~{}".format(c_names[2])) ax.plot(np.linspace(0, 180, 100)[:], loss_green, color=plot_colors[2], lw=5, alpha=.75, label="Red vs.~{}".format(c_names[1])) ax.scatter(x_green, min_green, marker="s", s=250, edgecolor=plot_colors[2], facecolor="none", lw=4) ax.scatter(x_blue, min_blue, marker="s", s=250, edgecolor=plot_colors[1], facecolor="none", lw=4) ax.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off left=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelleft=False) # plt.grid() lim = ax.get_ylim() lim = lim[0] - (lim[1] - lim[0]) * .05, lim[1] + (lim[1] - lim[0]) * .05, ax.margins(y=1.2) ax.set_ylim(*lim) ax.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off left=False, # ticks along the bottom edge are off right=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelright=False) if i == 0: ax.set_title("BCE Loss for different angles of $\mathbf w$", fontsize=24, fontweight="bold", pad=32) # if i < 2: ax.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off left=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) if i == 0 or True: # ax.set_title("Direction of weight vector $\mathbf w$", fontsize=24, y=1.4) ax.set_xticks([0, 45, 90, 135, 180]) ax.set_xticklabels([0, 45, 90, 135, 180]) xlim = ax.get_xlim() ylim = ax.get_ylim() xscale = xlim[1] - xlim[0] yscale = xlim[1] - xlim[0] off = -.03 fig.patches.append(patches.ConnectionPatch((0.05, -.075), (.95, (-.1)), coordsA=ax.transAxes, coordsB=ax.transAxes, edgecolor=(1, 1, 1, .75), linewidth=36, )) for angle in [0., 22.5, 45, 45 + 22.5, 90., 90 + 22.5, 135., 135 + 22.5, 180.]: rad = 2 * angle / 360 * np.pi ydelta = 15 / 180 * (lim[1] - lim[0]) * np.sin(rad) + (lim[1] - lim[0]) * .075 xdelta = 2 * 10 * np.cos(rad) arrow = patches.ConnectionPatch( (angle-xdelta/2, lim[0] - ydelta), (angle-xdelta/2 + xdelta, lim[0] + 2 * 15 / 180 * (lim[1] - lim[0]) * np.sin(rad) - ydelta), coordsA=ax.transData, coordsB=ax.transData, # Default shrink parameter is 0 so can be omitted edgecolor="black", facecolor="black", arrowstyle="-|>", # "normal" arrow mutation_scale=10, # controls arrow head size linewidth=3.5, # arrowidth=2 # width=4 ) fig.patches.append(arrow) ax.set_xticks([0, 22.5, 45, 67.5, 90, 112.5, 135, 157.5, 180]) ax.set_xlim(-5, 185) if i == 0 and False: ax.legend([mean_line_green, mean_line_blue, mean_line_red], ["", "", "Mean class angles"], ncol=3, loc="upper left", fontsize=20, bbox_to_anchor=[.0, 1.025], handlelength=1.55, columnspacing=0, handletextpad=.5, borderaxespad=.05, framealpha=1, edgecolor="black", facecolor="white" ).get_frame().set_linewidth(1.2) if i == 0: pa1 = plt.plot(*(np.nan, np.nan), marker="", # edgecolor="black", color=plot_colors[2], lw=3, linestyle=(0, (2, 1))) pa2 = plt.plot(*(np.nan, np.nan), marker="", # edgecolor="black", color=plot_colors[1], lw=3, linestyle=(0, (2, 1))) pa1 = plt.plot((np.nan, np.nan), color="black") pa11 = patches.Rectangle((np.nan, np.nan), 6, .05, facecolor='red', lw=.5, edgecolor=plot_colors[2]) pa2 = patches.Rectangle((np.nan, np.nan), 6, .05, facecolor='#39ED48', lw=.5, edgecolor=plot_colors[1]) pa3 = patches.Rectangle((np.nan, np.nan), 6, .05, facecolor='#0C66F7', lw=.5, edgecolor=plot_colors[2]) pa4 = patches.Rectangle((np.nan, np.nan), 6, .05, facecolor='white', lw=.5, alpha=0) ax.legend([ # g1, plot_blue[0], # , g2, # pa4, pa4, # b1, plot_green[0], # b2 ], # ["", "", "{} vs.~{}".format(c_names[0], c_names[2]), ["Problem 1", "Problem 2"], # "", "", "{} vs.~{}".format(c_names[0], c_names[1])], ncol=8, loc="upper left", fontsize=20, bbox_to_anchor=[.16, 1+.1], handlelength=1.25, # handleheight=.05, columnspacing=1, handletextpad=.75, borderaxespad=.05, framealpha=1, edgecolor="black", facecolor="white" ).get_frame().set_linewidth(1.2) axes = np.array([fig.add_subplot(gs[i, 0]) for i in range(3)]) first_axes = axes y_labels = ["B$=$$1$\n (linear)", "$B_2$$>$B$_1$", "B$\gg$$1$"] colors = ["green", "white", plot_colors[0]] cmap = LinearSegmentedColormap.from_list("mycmap", colors) # Filling in the side plots to the left for i, B in enumerate(BS): ax = axes[i] ax.set_aspect("equal") if i == 0: ax.set_title("Problem 1".format(c_names[0], c_names[2]), fontsize=24, pad=30) def z_func(x, y): grid = np.concatenate([x[None], y[None]], axis=0) grid_norm = np.linalg.norm(grid, 2, axis=0) cos = ((np.array((arrows2[i][-2:]) / np.sqrt(2))[:, None, None] * grid / grid_norm).sum(0)) return (np.abs(cos) ** B * np.sign(cos) * grid_norm) x = np.arange(-1.0 / 2, 1.1 / 2, .005) y = np.arange(-1.0 / 2, 1.1 / 2, .005) X, Y = meshgrid(x, y) # grid of point Z = z_func(X, Y) # evaluation of the function on the grid vrange = np.max(np.abs(Z).flatten()) if i < 3: im = ax.imshow(Z.clip(0), cmap=cmap, vmin=-vrange, vmax=vrange, extent=(-1.25, 1.25, 1.25, -1.25)) # drawing the function else: Z = Z.clip(None, 0) + saved.clip(0, None) im = ax.imshow((Z), cmap=cmap, vmin=-vrange, vmax=vrange, extent=(-1.25, 1.25, 1.25, -1.25)) # drawing the function if i == 2: saved = Z ax.set_xlabel("Optimal weights\n per B (arrows)", fontweight="bold", fontsize=26, labelpad=20) if i == 1 and False: ax.text(len(X) + 60, len(Y) / 2, "Increasing B", fontweight="bold", fontsize=24, rotation=270, horizontalalignment='center', verticalalignment='center') # ax2.set_yticks([]) # if i == 0: # ax.set_title("Linear (B=1)", fontsize=18, fontweight="bold") # elif i < 3: # ax.set_title("B={} {}".format(B, "" if B>1 else "(linear)"), fontsize=18, fontweight="bold") # else: # ax.set_title("B={} (2 units)".format(B), fontsize=18, fontweight="bold") ax.scatter(data3[:, 0], data3[:, 1], marker="o", color="gainsboro", edgecolor="black", linewidth=2, s=150) ax.scatter(data2[:, 0], data2[:, 1], marker="o", color=plot_colors[0], edgecolor="black", linewidth=2, s=150) # if i == 3: # ax.arrow(*5 * arrows2[i - 1], head_width=2, head_length=1, fc=plot_colors[1], ec='black', width=.5) # ax.arrow(*5 * arrows2[i][:2], *(-5 * np.array(arrows2[i][-2:])) # , head_width=16, head_length=1, fc='#39ED48', ec='black', width=.5) # else: ax.arrow(*arrows2[i], head_width=.15, head_length=.1, fc='black', ec='black', width=.05) if i != 1: ax.text(-.5, .5, y_labels[i], fontsize=28, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) # ax.invert_yaxis() ax.set_xlim(-1.25, 1.25) ax.set_ylim(-1.25, 1.25) ax.grid(False) ax.set_xticks([]) ax.set_yticks([]) ax.patch.set_edgecolor(plot_colors[2]) ax.patch.set_linewidth(10) axes = np.array([fig.add_subplot(gs[i, 2]) for i in range(3)]) plt.figtext(0.08, .44, "Increasing B", ha="center", rotation=90, fontsize=28,) # Filling in the side plots to the right second_axes = axes for i, B in enumerate(BS): ax = axes[i] if i == 0: ax.set_title("Problem 2".format(c_names[0], c_names[1]), fontsize=24, pad=30) def z_func(x, y): grid = np.concatenate([x[None], y[None]], axis=0) grid_norm = np.linalg.norm(grid, 2, axis=0) cos = ((np.array((arrows[i][-2:]) / np.sqrt(2))[:, None, None] * grid / grid_norm).sum(0)) return (np.abs(cos) ** B * np.sign(cos) * grid_norm) x = np.arange(-1.0 / 2, 1.1 / 2, .005) y = np.arange(-1.0 / 2, 1.1 / 2, .005) X, Y = meshgrid(x, y) Z = z_func(X, Y) vrange = np.max(np.abs(Z).flatten()) if i < 3: im = ax.imshow(Z.clip(0), cmap=cmap, vmin=-vrange, vmax=vrange, extent=(-1.25, 1.25, 1.25, -1.25)) # drawing the function else: Z = Z.clip(None, 0) + saved.clip(0, None) im = ax.imshow((Z), cmap=cmap, vmin=-vrange, vmax=vrange, extent=(-1.25, 1.25, 1.25, -1.25)) # drawing the function ax.set_xlim(-1.25, 1.25) ax.set_ylim(-1.25, 1.25) if i == 2: saved = Z ax.set_xlabel("Optimal weights\n per B (arrows)", fontweight="bold", fontsize=26, labelpad=20) ax.scatter(data1[:, 0], data1[:, 1], marker="o", color="gainsboro", edgecolor="black", linewidth=2, s=150) ax.scatter(data2[:, 0], data2[:, 1], marker="o", color=plot_colors[0], edgecolor="black", linewidth=2, s=150) ax.arrow(*arrows[i], head_width=.15, head_length=.1, fc='black', ec='black', width=.05) # ax.invert_yaxis() ax.grid(False) ax.set_xticks([]) ax.set_yticks([]) ax.patch.set_edgecolor(plot_colors[1]) ax.patch.set_linewidth(10) cmap = matplotlib.cm.get_cmap('hot') # Frame around rows for i in range(3): # outer outergs = gridspec.GridSpec(1, 1, wspace=0, hspace=0, top=0) pos = axes[i].get_position(False) outergs.update(bottom=pos.y0 - 0.0125, left=.115, top=pos.y1 + 0.0125, right=.91) outerax = fig.add_subplot(outergs[0]) outerax.tick_params(axis='both', which='both', bottom=0, left=0, labelbottom=0, labelleft=0) outerax.set_facecolor("white") outerax.patch.set_edgecolor("black") outerax.patch.set_linewidth(2.25) outerax.patch.set_linestyle("dashed") outerax.patch.set_alpha(0.3) outerax.grid(False) outerax.set_zorder(-10) # Connection between sides and center for minima, sign, axes in [(minima_green, 1, first_axes), (minima_blue, -1, second_axes)]: for i, minimum in enumerate(minima): delta = bce_axes[i].get_ylim() delta = 0.05*(delta[1]-delta[0]) arrow = patches.ConnectionPatch( (minimum[1]- sign*4, minimum[0]+delta), (sign*1.31, 1.3), coordsA=bce_axes[i].transData, coordsB=axes[i].transData, color=plot_colors[2 + min(0, sign)], linestyle="--", # "normal" arrow linewidth=2, ) fig.patches.append(arrow) arrow = patches.ConnectionPatch( (minimum[1]-sign*5, minimum[0]-delta), (sign*1.32, -1.29), coordsA=bce_axes[i].transData, coordsB=axes[i].transData, color=plot_colors[2 + min(0, sign)], linestyle="--", # "normal" arrow linewidth=2, ) fig.patches.append(arrow) arrow = patches.ConnectionPatch( (-1.7, 1.25), (-1.7, -1.25), coordsA=first_axes[0].transData, coordsB=first_axes[-1].transData, color="black", arrowstyle="-|>", mutation_scale=30, linewidth=3, ) fig.patches.append(arrow) bce_axes[2].set_xlabel("Arrows denote the direction of $\mathbf w$ (x-axis)", fontsize=22, labelpad=45) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 import os import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 sys.path.insert(0, '../src') import classifier import detector from image import Image from image import build_histogram_equalizer TRAIN_DATA_DIR = os.path.abspath("../trainset") COLORS = ['COLOR_STOP_SIGN_RED', 'COLOR_OTHER_RED', 'COLOR_BROWN' , 'COLOR_ORANGE' , 'COLOR_BLUE' , 'COLOR_OTHER' ] data = {c: [] for c in COLORS} files = os.listdir(TRAIN_DATA_DIR) for fname in files: name, ext = os.path.splitext(fname) if ext == ".npz": if name + '.jpg' in files: img = Image.load(os.path.join(TRAIN_DATA_DIR, name) + '.jpg') elif name + '.png' in files: img = Image.load(os.path.join(TRAIN_DATA_DIR, name) + '.png') npzfname = os.path.join(TRAIN_DATA_DIR, fname) npzdata = np.load(npzfname) for c in COLORS: if npzdata[c].size > 0: mat = npzdata[c] mat = mat.reshape(-1, 3).astype(np.uint8) data[c].append(mat) for c in COLORS: data[c] = np.vstack(data[c]) print('---- done ------') N_DATA_PER_CLASS = 200000 APPEND_YCRCB = False APPEND_BIAS = False ONLY_YCRCB = False labelmp = { 'COLOR_STOP_SIGN_RED': 0, 'COLOR_OTHER_RED': 1, 'COLOR_ORANGE': 1, 'COLOR_BROWN': 1, 'COLOR_BLUE': 1, 'COLOR_OTHER': 1 } X, y = [], [] for ci, c in enumerate(COLORS): print(c, data[c].shape) rndidx = np.random.choice(data[c].shape[0], N_DATA_PER_CLASS, replace=False) x = data[c][rndidx, :] if ONLY_YCRCB: xycc = cv2.cvtColor(x.reshape(-1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2YCrCb) xycc = xycc.reshape(-1, 3) x = xycc elif APPEND_YCRCB: xycc = cv2.cvtColor(x.reshape(-1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2YCrCb) xycc = xycc.reshape(-1, 3) x = np.hstack([x, xycc]) if APPEND_BIAS: x = np.hstack([x, np.ones((N_DATA_PER_CLASS, 1))]) X.append(x) y.append(np.ones((N_DATA_PER_CLASS, 1)) * labelmp[c]) X = np.vstack(X).astype(np.float64) y = np.vstack(y).astype(np.int32).reshape(-1) print('-----------done------------') def ssred_accuracy(clf, X, y): pred = clf.predict(X) pred = pred == 0 y = y == 0 return np.sum(pred == y) / y.shape[0] def ssred_precision(clf, X, y): pred = clf.predict(X) pred = pred == 0 y = y == 0 return np.sum(pred[pred == y]) / np.sum(pred) def ssred_recall(clf, X, y): pred = clf.predict(X) pred = pred == 0 y = y == 0 return np.sum(pred[pred == y]) / np.sum(y) scoring = { 'accuracy': ssred_accuracy, 'precision': ssred_precision, 'recall': ssred_recall } def print_scores(scores): for key, val in scores.items(): print(f'\t{key}: %0.2f (+/- %0.2f)' % (val.mean(), val.std() * 2)) %reload_ext autoreload from sklearn.model_selection import cross_validate from sklearn.utils import shuffle from classifier import LogisticRegression X, y = shuffle(X, y) XX = np.hstack([X, np.ones((X.shape[0], 1))]) clf = LogisticRegression(max_iter=500, learning_rate=0.005, batchsize=3000) lr_score = cross_validate(clf, XX, y, cv=5, n_jobs=-1, scoring=scoring, error_score='raise') print('Logistic Regression') print_scores(lr_score) %reload_ext autoreload from sklearn.model_selection import cross_validate from sklearn.utils import shuffle from classifier import OneVsAllLogisticRegression X, y = shuffle(X, y) XX = np.hstack([X, np.ones((X.shape[0], 1))]) clf = OneVsAllLogisticRegression(max_iter=500, learning_rate=0.005, batchsize=3000) ovalr_score = cross_validate(clf, XX, y, cv=5, n_jobs=-1, scoring=scoring, error_score='raise') print('1vall Logistic Regression') print_scores(ovalr_score) %reload_ext autoreload from sklearn.model_selection import cross_validate from sklearn.utils import shuffle from classifier import KaryLogisticRegression X, y = shuffle(X, y) XX = np.hstack([X, np.ones((X.shape[0], 1))]) clf = KaryLogisticRegression(max_iter=500, learning_rate=0.005, batchsize=3000) klr_score = cross_validate(clf, XX, y, cv=5, n_jobs=-1, scoring=scoring, error_score='raise') print('Kary Logistic Regression') print_scores(klr_score) %reload_ext autoreload from sklearn.model_selection import cross_validate from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.utils import shuffle from classifier import GaussianNaiveBayes X, y = shuffle(X, y) XX = X clf = classifier.GaussianNaiveBayes() gnb_score = cross_validate(clf, XX, y, cv=5, n_jobs=-1, scoring=scoring, error_score='raise') print('Gaussian Naive Bayes') print_scores(gnb_score) # X, y = shuffle(X, y, random_state=1) # clf = classifier.GaussianNaiveBayes() # clf.fit(X, y) # clf.save('../model/gnb_300000_histeq.pic') ```
github_jupyter
<img src="images/dask_horizontal.svg" align="right" width="30%"> # Lazy execution Here we discuss some of the concepts behind dask, and lazy execution of code. You do not need to go through this material if you are eager to get on with the tutorial, but it may help understand the concepts underlying dask, how these things fit in with techniques you might already be using, and how to understand things that can go wrong. ## Prelude As Python programmers, you probably already perform certain *tricks* to enable computation of larger-than-memory datasets, parallel execution or delayed/background execution. Perhaps with this phrasing, it is not clear what we mean, but a few examples should make things clearer. The point of Dask is to make simple things easy and complex things possible! Aside from the [detailed introduction](http://dask.pydata.org/en/latest/), we can summarize the basics of Dask as follows: - process data that doesn't fit into memory by breaking it into blocks and specifying task chains - parallelize execution of tasks across cores and even nodes of a cluster - move computation to the data rather than the other way around, to minimize communication overheads All of this allows you to get the most out of your computation resources, but program in a way that is very familiar: for-loops to build basic tasks, Python iterators, and the Numpy (array) and Pandas (dataframe) functions for multi-dimensional or tabular data, respectively. The remainder of this notebook will take you through the first of these programming paradigms. This is more detail than some users will want, who can skip ahead to the iterator, array and dataframe sections; but there will be some data processing tasks that don't easily fit into those abstractions and need to fall back to the methods here. We include a few examples at the end of the notebooks showing that the ideas behind how Dask is built are not actually that novel, and experienced programmers will have met parts of the design in other situations before. Those examples are left for the interested. ## Dask is a graph execution engine Dask allows you to construct a prescription for the calculation you want to carry out. That may sound strange, but a simple example will demonstrate that you can achieve this while programming with perfectly ordinary Python functions and for-loops. We saw this in Chapter 02. ``` from dask import delayed @delayed def inc(x): return x + 1 @delayed def add(x, y): return x + y ``` Here we have used the delayed annotation to show that we want these functions to operate lazily - to save the set of inputs and execute only on demand. `dask.delayed` is also a function which can do this, without the annotation, leaving the original function unchanged, e.g., ```python delayed_inc = delayed(inc) ``` ``` # this looks like ordinary code x = inc(15) y = inc(30) total = add(x, y) # incx, incy and total are all delayed objects. # They contain a prescription of how to execute ``` Calling a delayed function created a delayed object (`incx, incy, total`) - examine these interactively. Making these objects is somewhat equivalent to constructs like the `lambda` or function wrappers (see below). Each holds a simple dictionary describing the task graph, a full specification of how to carry out the computation. We can visualize the chain of calculations that the object `total` corresponds to as follows; the circles are functions, rectangles are data/results. ``` total.visualize() ``` But so far, no functions have actually been executed. This demonstrated the division between the graph-creation part of Dask (`delayed()`, in this example) and the graph execution part of Dask. To run the "graph" in the visualization, and actually get a result, do: ``` # execute all tasks total.compute() ``` **Why should you care about this?** By building a specification of the calculation we want to carry out before executing anything, we can pass the specification to an *execution engine* for evaluation. In the case of Dask, this execution engine could be running on many nodes of a cluster, so you have access to the full number of CPU cores and memory across all the machines. Dask will intelligently execute your calculation with care for minimizing the amount of data held in memory, while parallelizing over the tasks that make up a graph. Notice that in the animated diagram below, where four workers are processing the (simple) graph, execution progresses vertically up the branches first, so that intermediate results can be expunged before moving onto a new branch. With `delayed` and normal pythonic looped code, very complex graphs can be built up and passed on to Dask for execution. See a nice example of [simulated complex ETL](http://matthewrocklin.com/blog/work/2017/01/24/dask-custom) work flow. <img src="images/grid_search_schedule.gif"> ### Exercise We will apply `delayed` to a real data processing task, albeit a simple one. Consider reading three CSV files with `pd.read_csv` and then measuring their total length. We will consider how you would do this with ordinary Python code, then build a graph for this process using delayed, and finally execute this graph using Dask, for a handy speed-up factor of more than two (there are only three inputs to parallelize over). ``` import pandas as pd import os filenames = [os.path.join('data', 'accounts.%d.csv' % i) for i in [0, 1, 2]] filenames %%time # normal, sequential code a = pd.read_csv(filenames[0]) b = pd.read_csv(filenames[1]) c = pd.read_csv(filenames[2]) na = len(a) nb = len(b) nc = len(c) total = sum([na, nb, nc]) print(total) ``` Your task is to recreate this graph again using the delayed function on the original Python code. The three functions you want to delay are `pd.read_csv`, `len` and `sum`.. ```python delayed_read_csv = delayed(pd.read_csv) a = delayed_read_csv(filenames[0]) ... total = ... # execute %time total.compute() ``` ``` # your verbose code here ``` Next, repeat this using loops, rather than writing out all the variables. ``` # your concise code here %load solutions/Foundations-03.py ``` **Notes** Delayed objects support various operations: ```python x2 = x + 1 ``` if `x` was a delayed result (like `total`, above), then so is `x2`. Supported operations include arithmetic operators, item or slice selection, attribute access and method calls - essentially anything that could be phrased as a `lambda` expression. Operations which are *not* supported include mutation, setter methods, iteration (for) and bool (predicate). ## Appendix: Further detail and examples The following examples show that the kinds of things Dask does are not so far removed from normal Python programming when dealing with big data. These examples are **only meant for experts**, typical users can continue with the next notebook in the tutorial. ### Example 1: simple word count This directory contains a file called `README.md`. How would you count the number of words in that file? The simplest approach would be to load all the data into memory, split on whitespace and count the number of results. Here we use a regular expression to split words. ``` import re splitter = re.compile('\w+') with open('README.md', 'r') as f: data = f.read() result = len(splitter.findall(data)) result ``` The trouble with this approach is that it does not scale - if the file is very large, it, and the generated list of words, might fill up memory. We can easily avoid that, because we only need a simple sum, and each line is totally independent of the others. Now we evaluate each piece of data and immediately free up the space again, so we could perform this on arbitrarily-large files. Note that there is often a trade-off between time-efficiency and memory footprint: the following uses very little memory, but may be slower for files that do not fill a large faction of memory. In general, one would like chunks small enough not to stress memory, but big enough for efficient use of the CPU. ``` result = 0 with open('README.md', 'r') as f: for line in f: result += len(splitter.findall(line)) result ``` ### Example 2: background execution There are many tasks that take a while to complete, but don't actually require much of the CPU, for example anything that requires communication over a network, or input from a user. In typical sequential programming, execution would need to halt while the process completes, and then continue execution. That would be dreadful for a user experience (imagine the slow progress bar that locks up the application and cannot be canceled), and wasteful of time (the CPU could have been doing useful work in the meantime. For example, we can launch processes and get their output as follows: ```python import subprocess p = subprocess.Popen(command, stdout=subprocess.PIPE) p.returncode ``` The task is run in a separate process, and the return-code will remain `None` until it completes, when it will change to `0`. To get the result back, we need `out = p.communicate()[0]` (which would block if the process was not complete). Similarly, we can launch Python processes and threads in the background. Some methods allow mapping over multiple inputs and gathering the results, more on that later. The thread starts and the cell completes immediately, but the data associated with the download only appears in the queue object some time later. ``` import threading import queue import urllib def get_webdata(url, q): u = urllib.request.urlopen(url) # raise ValueError q.put(u.read()) q = queue.Queue() t = threading.Thread(target=get_webdata, args=('http://www.google.com', q)) t.start() # fetch result back into this thread. If the worker thread is not done, this would wait. q.get() ``` Consider: what would you see if there had been an exception within the `get_webdata` function? You could uncomment the `raise` line, above, and re-execute the two cells. What happens? Is there any way to debug the execution to find the lYou may need ### Example 3: delayed execution There are many ways in Python to specify the computation you want to execute, but only run it *later*. ``` def add(x, y): return x + y # Sometimes we defer computations with strings x = 15 y = 30 z = "add(x, y)" eval(z) # we can use lambda or other "closure" x = 15 y = 30 z = lambda: add(x, y) z() # A very similar thing happens in functools.partial import functools z = functools.partial(add, x, y) z() # Python generators are delayed execution by default # Many Python functions expect such iterable objects def gen(): res = x yield res res += y yield y g = gen() # run once: we get one value and execution halts within the generator # run again and the execution completes next(g) ``` ### Dask graphs Any Dask object, such as `total`, above, has an attribute which describes the calculations necessary to produce that result. Indeed, this is exactly the graph that we have been talking about, which can be visualized. We see that it is a simple dictionary, the keys are unique task identifiers, and the values are the functions and inputs for calculation. `delayed` is a handy mechanism for creating the Dask graph, but the adventerous may wish to play with the full fexibility afforded by building the graph dictionaries directly. Detailed information can be found [here](http://dask.pydata.org/en/latest/graphs.html). ``` total.dask dict(total.dask) ```
github_jupyter
# Welcome to Speech recognition community week - version 2 ![toto](https://huggingface.co/front/assets/huggingface_logo-noborder.svg) ## HuggingFace challenge informations You will be able to find challenge informations, important dates and getting started on <https://discuss.huggingface.co/>. Discord channel for this event is available on <http://hf.co/join/discord>. ## OVHcloud infrastructure informations OVHcloud is a european cloud provider who's joining forces with HuggingFace for this challenge. If you read these lines, we are happy to count you as a competitor using our AI tools! ### OVHcloud AI tools concepts OVHcloud provides managed **AI Notebooks** (Jupyter and VSCode), **AI Training** and **AI Apps** (high-availability deployments). All of them can be powered by CPUs (from 1 to 12) or NVIDIA GPUs (1 to 4) like V100S 32GB. This enviroment is currently hosted on AI Training with a specific Docker image (see after). We can represent AI Tools concept like this: ![AI Tools](https://i.imgur.com/7p5QJUi.png) ### About this Jupyter notebook environment For this challenge we decided to craft a specific Docker image with preinstalled Python libraries. The Dockerfile can be found on <https://github.com/baaastijn/Dockerimages>. As explained in the Dockerfile, we start from an OVHcloud image containing Jupyter, CUDA drivers and PyTorch. them we install transformers, datasets, librosa, jiwer and few other libraries. Transformers and datasets are installed from git sources to get the latest hotfixes. This Docker image is hosted in public registry, on <https://hub.docker.com/r/baaastijn/ovh_huggingface/> Feel free to work without this image if you prefer your own environment. ### Working efficiently with AI Training and data OVHcloud AI Tools can be managed through a **UI** (OVHcloud Control panel) or the **ovhai CLI** ([installation instructions](https://docs.ovh.com/gb/en/ai-training/install-client/). When you start a new AI Notebook, AI Training or Apps, you will have: - **A local and ephemeral storage**: each GPU is provided with 750GiB local SSD NVMe storage (/workspace). - **Ability to attach and sync remote storage containers**: you can put you data in OVHcloud Object Storage, then during the job creation, sync this data. The data will be mounted as volumes, in read-only or read-write. **We strongly recommend to:** - Attach a first Object Storage container who will take your datasets inputs, in read-only and cache activated (so it can be used by multiple jobs in parralel). - Attach a second Object Storage container for your results, in read-write. - Both Object Storage containers should be in the same region as your GPU (GRA, france or BHS, Canada). Why ? because by doing this you will be able **to launch multiple jobs on the same data sources** , giving you more performance and flexibility. We provide some caching features especially useful when dealing with large amount of data. Also, you will **not lose you data when you stop your job**. Example with the CLI to start a job with 2 x Object sotrage containers ```bash # Upload a local folder to OVHcloud Storage (if container doesn't exist, we will create it) ovhai data copy some/local-folder/ datasets@GRA:some-prefix/ # Run a new job with 1 GPU and provided Docker image ovhai job run \ --name ovh_huggingface-job-name \ --flavor ai1-1-gpu \ --gpu 1 \ --volume datasets@GRA/:/workspace/my_data:RO:cache \ --volume outputs@GRA/:/workspace/output_data:RW \ baaastijn/ovh_huggingface:latest ``` We do hope that you enjoy this Speech Recognition community week ! OVHcloud Team x HuggingFace Team
github_jupyter
``` from collections import defaultdict from pathlib import Path import re import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from tensorboard.backend.event_processing.event_accumulator import EventAccumulator import toml import tqdm def logdir2df(logdir): """convert tf.events files in a logs directory into a pandas DataFrame tf.events files are created by SummaryWriter from PyTorch or Tensorflow Parameters ---------- logdir : str, Path path to directory containing tfevents file(s) saved by a SummaryWriter Returns ------- df : pandas.Dataframe with columns 'step', 'wall_time', and all Scalars from the tfevents file """ if issubclass(type(logdir), Path): logdir = str(logdir) ea = EventAccumulator(path=logdir) ea.Reload() # load all data written so far scalar_tags = ea.Tags()['scalars'] # list of tags for values written to scalar dfs = {} for scalar_tag in scalar_tags: dfs[scalar_tag] = pd.DataFrame(ea.Scalars(scalar_tag), columns=["wall_time", "step", scalar_tag.replace('val/', '')]) dfs[scalar_tag] = dfs[scalar_tag].set_index("step") dfs[scalar_tag].drop("wall_time", axis=1, inplace=True) return pd.concat([v for k, v in dfs.items()], axis=1) def logdir2csv(logdir): """convert tf.events files in a logs directory into a pandas DataFrame Parameters ---------- logdir Returns ------- """ df = logdir2df(logdir) name = list(logdir.glob('*tfevents*'))[0].name csv_fname = name + '.csv' df.to_csv(logdir.joinpath(csv_fname)) re_int = re.compile(r'[0-9]+') def int_from_dir_path(dir_path): name = dir_path.name return int(re_int.search(name)[0]) BR_RESULTS_ROOT = Path('~/Documents/repos/coding/birdsong/tweetynet/results/BirdsongRecognition').expanduser().resolve() BIRD_RESULTS_ROOT = BR_RESULTS_ROOT.joinpath('Bird8') sorted(BIRD_RESULTS_ROOT.iterdir()) RESULTS_ROOT = BIRD_RESULTS_ROOT.joinpath('results_200505_092509') train_dur_dirs = sorted(RESULTS_ROOT.glob('train_dur_*'), key=int_from_dir_path) train_dur_dirs train_history_dfs = {} for train_dur_dir in train_dur_dirs: train_dur = int_from_dir_path(train_dur_dir) print(f'getting tf.events files for training duration: {train_dur}') train_history_dfs[train_dur] = {} replicate_dirs = sorted(train_dur_dir.glob('replicate_*'), key=int_from_dir_path) for replicate_dir in replicate_dirs: replicate_num = int_from_dir_path(replicate_dir) print(f'\treplicate: {replicate_num}') events_file = sorted(replicate_dir.glob('**/events*')) assert len(events_file) == 1 events_file = events_file[0] logdir = events_file.parent log_df = logdir2df(logdir) train_history_dfs[train_dur][replicate_num] = log_df for train_dur, replicate_df_dict in train_history_dfs.items(): for replicate, df in replicate_df_dict.items(): df['avg_error/val'] = 1 - df['avg_acc/val'] n_train_durs = len(train_history_dfs) a_train_dur = list(train_history_dfs)[0] n_replicates = len(train_history_dfs[a_train_dur]) fig, ax = plt.subplots(n_train_durs, 5, figsize=(25, 20)) train_durs = sorted(train_history_dfs.keys()) for row_ind, train_dur in enumerate(train_durs): replicate_df_dict = train_history_dfs[train_dur] replicate_nums = sorted(replicate_df_dict.keys()) for replicate_num, df in replicate_df_dict.items(): sns.lineplot(x=df.index, y='loss/train', data=df, ax=ax[row_ind, 0], alpha=0.5) sns.lineplot(x=df.index, y='avg_loss/val', data=df, ax=ax[row_ind, 1], alpha=0.5) ax[row_ind, 1].set_ylim([0.0, 0.4]) sns.lineplot(x=df.index, y='avg_error/val', data=df, ax=ax[row_ind, 2], alpha=0.5) ax[row_ind, 2].set_ylim([0.0, 0.2]) sns.lineplot(x=df.index, y='avg_levenshtein/val', data=df, ax=ax[row_ind, 3], alpha=0.5) ax[row_ind, 3].set_ylim([100, 250]) sns.lineplot(x=df.index, y='avg_segment_error_rate/val', data=df, ax=ax[row_ind, 4], alpha=0.5) ax[row_ind, 4].set_ylim([0.0, 0.1]) learncurve_df = pd.read_csv(RESULTS_ROOT.joinpath('learning_curve.csv')) learncurve_df['avg_error'] = 1- learncurve_df['avg_acc'] fig, ax = plt.subplots(1, 4, figsize=(20, 4)) ax = ax.ravel() sns.stripplot(x='train_set_dur', y='avg_loss', data=learncurve_df, ax=ax[0]) sns.boxplot(x='train_set_dur', y='avg_loss', data=learncurve_df, ax=ax[0]) sns.pointplot(x='train_set_dur', y='avg_loss', data=learncurve_df, ax=ax[0]); sns.stripplot(x='train_set_dur', y='avg_error', data=learncurve_df, ax=ax[1]) sns.boxplot(x='train_set_dur', y='avg_error', data=learncurve_df, ax=ax[1]) sns.pointplot(x='train_set_dur', y='avg_error', data=learncurve_df, ax=ax[1]); sns.stripplot(x='train_set_dur', y='avg_levenshtein', data=learncurve_df, ax=ax[2]) sns.boxplot(x='train_set_dur', y='avg_levenshtein', data=learncurve_df, ax=ax[2]) sns.pointplot(x='train_set_dur', y='avg_levenshtein', data=learncurve_df, ax=ax[2]); sns.stripplot(x='train_set_dur', y='avg_segment_error_rate', data=learncurve_df, ax=ax[3]) sns.boxplot(x='train_set_dur', y='avg_segment_error_rate', data=learncurve_df, ax=ax[3]) sns.pointplot(x='train_set_dur', y='avg_segment_error_rate', data=learncurve_df, ax=ax[3]); ```
github_jupyter
``` import pandas as pd import numpy as np from datetime import datetime, timedelta import math import os import plotly.graph_objects as go from plotly.subplots import make_subplots import dash import dash_table import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc from dash.dependencies import Input, Output def df_for_lineplot_diff(dfs, CaseType): '''This is the function for construct df for line plot''' assert type(CaseType) is str, "CaseType must be one of the following three strings Confirmed/Recovered/Deaths" # Construct confirmed cases dataframe for line plot DateList = [] ChinaList =[] OtherList = [] for key, df in dfs.items(): dfTpm = df.groupby(['Country/Region'])[CaseType].agg(np.sum) dfTpm = pd.DataFrame({'Region':dfTpm.index, CaseType:dfTpm.values}) #dfTpm = dfTpm.sort_values(by=CaseType, ascending=False).reset_index(drop=True) DateList.append(df['Date_last_updated_AEDT'][0]) ChinaList.append(dfTpm.loc[dfTpm['Region'] == 'China', CaseType].iloc[0]) OtherList.append(dfTpm.loc[dfTpm['Region'] != 'China', CaseType].sum()) df = pd.DataFrame({'Date':DateList, 'Mainland China':ChinaList, 'Other locations':OtherList}) df['Total']=df['Mainland China']+df['Other locations'] # Calculate differenec in a 24-hour window for index, _ in df.iterrows(): # Calculate the time differnece in hour diff=(df['Date'][0] - df['Date'][index]).total_seconds()/3600 # find out the latest time after 24-hour if diff >= 24: break plusNum = df['Total'][0] - df['Total'][index] plusPercentNum = (df['Total'][0] - df['Total'][index])/df['Total'][index] # Select the latest data from a given date df['date_day']=[d.date() for d in df['Date']] df=df.groupby(by=df['date_day'], sort=False).transform(max).drop_duplicates(['Date']) df=df.reset_index(drop=True) return df, plusNum, plusPercentNum %%time ################################################################################ #### Data processing ################################################################################ # Method #1 # Import csv file and store each csv in to a df list filename = os.listdir('../raw_data/') sheet_name = [i.replace('.csv', '') for i in filename if 'data' not in i and i.endswith('.csv')] sheet_name.sort(reverse=True) dfs = {sheet_name: pd.read_csv('../raw_data/{}.csv'.format(sheet_name)) for sheet_name in sheet_name} # Method #2 # Import xls file and store each sheet in to a df list #xl_file = pd.ExcelFile('./data.xls') #dfs = {sheet_name: xl_file.parse(sheet_name) # for sheet_name in xl_file.sheet_names} %%time # Method #2 # Import xls file and store each sheet in to a df list xl_file = pd.ExcelFile('./data.xls') dfs2 = {sheet_name: xl_file.parse(sheet_name) for sheet_name in xl_file.sheet_names} %%time ################################################################################ #### Data processing ################################################################################ # Method #1 # Import csv file and store each csv in to a df list filename = os.listdir('./raw_data/') sheet_name = [i.replace('.csv', '') for i in filename if 'data' not in i and i.endswith('.csv')] sheet_name.sort(reverse=True) dfs = {sheet_name: pd.read_csv('./raw_data/{}.csv'.format(sheet_name)) for sheet_name in sheet_name} # Method #2 # Import xls file and store each sheet in to a df list #xl_file = pd.ExcelFile('./data.xls') #dfs = {sheet_name: xl_file.parse(sheet_name) # for sheet_name in xl_file.sheet_names} # Data from each sheet can be accessed via key keyList = list(dfs.keys()) # Data cleansing for key, df in dfs.items(): dfs[key].loc[:,'Confirmed'].fillna(value=0, inplace=True) dfs[key].loc[:,'Deaths'].fillna(value=0, inplace=True) dfs[key].loc[:,'Recovered'].fillna(value=0, inplace=True) dfs[key]=dfs[key].astype({'Confirmed':'int64', 'Deaths':'int64', 'Recovered':'int64'}) # Change as China for coordinate search dfs[key]=dfs[key].replace({'Country/Region':'Mainland China'}, 'China') # Add a zero to the date so can be convert by datetime.strptime as 0-padded date dfs[key]['Last Update'] = '0' + dfs[key]['Last Update'] # Convert time as Australian eastern daylight time dfs[key]['Date_last_updated_AEDT'] = [datetime.strptime(d, '%m/%d/%Y %H:%M') for d in dfs[key]['Last Update']] dfs[key]['Date_last_updated_AEDT'] = dfs[key]['Date_last_updated_AEDT'] + timedelta(hours=16) # Add coordinates for each area in the list for the latest table sheet # To save time, coordinates calling was done seperately # Import the data with coordinates dfs[keyList[0]]=pd.read_csv('{}_data.csv'.format(keyList[0])) dfs[keyList[0]]=dfs[keyList[0]].astype({'Date_last_updated_AEDT':'datetime64'}) dfs[keyList[0]] def make_country_table(countryName): '''This is the function for building df for Province/State of a given country''' countryTable = dfs[keyList[0]].loc[dfs[keyList[0]]['Country/Region'] == countryName] # Suppress SettingWithCopyWarning pd.options.mode.chained_assignment = None countryTable['Remaining'] = countryTable['Confirmed'] - countryTable['Recovered'] - countryTable['Deaths'] countryTable = countryTable[['Province/State','Remaining','Confirmed','Recovered','Deaths','lat','lon']] countryTable = countryTable.sort_values(by=['Remaining', 'Confirmed'], ascending=False).reset_index(drop=True) # Set row ids pass to selected_row_ids countryTable['id'] = countryTable['Province/State'] countryTable.set_index('id', inplace=True, drop=False) # Turn on SettingWithCopyWarning pd.options.mode.chained_assignment = 'warn' return countryTable %%time CNTable = make_country_table('China') AUSTable = make_country_table('Australia') USTable = make_country_table('US') CANTable = make_country_table('Canada') CANTable %%time # Save numbers into variables to use in the app confirmedCases=dfs[keyList[0]]['Confirmed'].sum() deathsCases=dfs[keyList[0]]['Deaths'].sum() recoveredCases=dfs[keyList[0]]['Recovered'].sum() # Construct confirmed cases dataframe for line plot and 24-hour window case difference df_confirmed, plusConfirmedNum, plusPercentNum1 = df_for_lineplot_diff(dfs, 'Confirmed') # Construct recovered cases dataframe for line plot and 24-hour window case difference df_recovered, plusRecoveredNum, plusPercentNum2 = df_for_lineplot_diff(dfs, 'Recovered') # Construct death case dataframe for line plot and 24-hour window case difference df_deaths, plusDeathNum, plusPercentNum3 = df_for_lineplot_diff(dfs, 'Deaths') # Create data table to show in app # Generate sum values for Country/Region level dfCase = dfs[keyList[0]].groupby(by='Country/Region', sort=False).sum().reset_index() dfCase = dfCase.sort_values(by=['Confirmed'], ascending=False).reset_index(drop=True) # As lat and lon also underwent sum(), which is not desired, remove from this table. dfCase = dfCase.drop(columns=['lat','lon']) # Grep lat and lon by the first instance to represent its Country/Region dfGPS = dfs[keyList[0]].groupby(by=['Country/Region'], sort=False).first().reset_index() dfGPS = dfGPS[['Country/Region','lat','lon']] # Merge two dataframes dfSum = pd.merge(dfCase, dfGPS, how='inner', on='Country/Region') dfSum = dfSum.replace({'Country/Region':'China'}, 'Mainland China') dfSum['Remaining'] = dfSum['Confirmed'] - dfSum['Recovered'] - dfSum['Deaths'] # Rearrange columns to correspond to the number plate order dfSum = dfSum[['Country/Region','Remaining','Confirmed','Recovered','Deaths','lat','lon']] # Sort value based on Remaining cases and then Confirmed cases dfSum = dfSum.sort_values(by=['Remaining', 'Confirmed'], ascending=False).reset_index(drop=True) # Set row ids pass to selected_row_ids dfSum['id'] = dfSum['Country/Region'] dfSum.set_index('id', inplace=True, drop=False) # Save numbers into variables to use in the app latestDate=datetime.strftime(df_confirmed['Date'][0], '%b %d, %Y %H:%M AEDT') secondLastDate=datetime.strftime(df_confirmed['Date'][1], '%b %d') daysOutbreak=(df_confirmed['Date'][0] - datetime.strptime('12/31/2019', '%m/%d/%Y')).days ############################################################################################# #### Start to make plots ############################################################################################# # Line plot for confirmed cases # Set up tick scale based on confirmed case number tickList = list(np.arange(0, df_confirmed['Mainland China'].max()+1000, 10000)) # Create empty figure canvas fig_confirmed = go.Figure() # Add trace to the figure fig_confirmed.add_trace(go.Scatter(x=df_confirmed['Date'], y=df_confirmed['Mainland China'], mode='lines+markers', line_shape='spline', name='Mainland China', line=dict(color='#921113', width=4), marker=dict(size=4, color='#f4f4f2', line=dict(width=1,color='#921113')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_confirmed['Date']], hovertext=['Mainland China confirmed<br>{:,d} cases<br>'.format(i) for i in df_confirmed['Mainland China']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>')) fig_confirmed.add_trace(go.Scatter(x=df_confirmed['Date'], y=df_confirmed['Other locations'], mode='lines+markers', line_shape='spline', name='Other Region', line=dict(color='#eb5254', width=4), marker=dict(size=4, color='#f4f4f2', line=dict(width=1,color='#eb5254')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_confirmed['Date']], hovertext=['Other region confirmed<br>{:,d} cases<br>'.format(i) for i in df_confirmed['Other locations']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>')) # Customise layout fig_confirmed.update_layout( # title=dict( # text="<b>Confirmed Cases Timeline<b>", # y=0.96, x=0.5, xanchor='center', yanchor='top', # font=dict(size=20, color="#292929", family="Playfair Display") # ), margin=go.layout.Margin( l=10, r=10, b=10, t=5, pad=0 ), yaxis=dict( showline=False, linecolor='#272e3e', zeroline=False, #showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, tickmode='array', # Set tick range based on the maximum number tickvals=tickList, # Set tick label accordingly ticktext=["{:.0f}k".format(i/1000) for i in tickList] ), # yaxis_title="Total Confirmed Case Number", xaxis=dict( showline=False, linecolor='#272e3e', showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, zeroline=False ), xaxis_tickformat='%b %d', hovermode = 'x', legend_orientation="h", # legend=dict(x=.35, y=-.05), plot_bgcolor='#f4f4f2', paper_bgcolor='#cbd2d3', font=dict(color='#292929') ) # Line plot for combine cases # Set up tick scale based on confirmed case number tickList = list(np.arange(0, df_recovered['Mainland China'].max()+1000, 5000)) # Create empty figure canvas fig_combine = go.Figure() # Add trace to the figure fig_combine.add_trace(go.Scatter(x=df_recovered['Date'], y=df_recovered['Total'], mode='lines+markers', line_shape='spline', name='Total Recovered Cases', line=dict(color='#168038', width=4), marker=dict(size=4, color='#f4f4f2', line=dict(width=1,color='#168038')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_recovered['Date']], hovertext=['Total recovered<br>{:,d} cases<br>'.format(i) for i in df_recovered['Total']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>')) fig_combine.add_trace(go.Scatter(x=df_deaths['Date'], y=df_deaths['Total'], mode='lines+markers', line_shape='spline', name='Total Death Cases', line=dict(color='#626262', width=4), marker=dict(size=4, color='#f4f4f2', line=dict(width=1,color='#626262')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_deaths['Date']], hovertext=['Total death<br>{:,d} cases<br>'.format(i) for i in df_deaths['Total']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>')) # Customise layout fig_combine.update_layout( # title=dict( # text="<b>Confirmed Cases Timeline<b>", # y=0.96, x=0.5, xanchor='center', yanchor='top', # font=dict(size=20, color="#292929", family="Playfair Display") # ), margin=go.layout.Margin( l=10, r=10, b=10, t=5, pad=0 ), yaxis=dict( showline=False, linecolor='#272e3e', zeroline=False, #showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, tickmode='array', # Set tick range based on the maximum number tickvals=tickList, # Set tick label accordingly ticktext=["{:.0f}k".format(i/1000) for i in tickList] ), # yaxis_title="Total Confirmed Case Number", xaxis=dict( showline=False, linecolor='#272e3e', showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, zeroline=False ), xaxis_tickformat='%b %d', hovermode = 'x', legend_orientation="h", # legend=dict(x=.35, y=-.05), plot_bgcolor='#f4f4f2', paper_bgcolor='#cbd2d3', font=dict(color='#292929') ) # Line plot for death rate cases # Set up tick scale based on confirmed case number tickList = list(np.arange(0, (df_deaths['Mainland China']/df_confirmed['Mainland China']*100).max(), 0.5)) # Create empty figure canvas fig_rate = go.Figure() # Add trace to the figure fig_rate.add_trace(go.Scatter(x=df_deaths['Date'], y=df_deaths['Mainland China']/df_confirmed['Mainland China']*100, mode='lines+markers', line_shape='spline', name='Mainland China', line=dict(color='#626262', width=4), marker=dict(size=4, color='#f4f4f2', line=dict(width=1,color='#626262')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_deaths['Date']], hovertext=['Mainland China death rate<br>{:.2f}%'.format(i) for i in df_deaths['Mainland China']/df_confirmed['Mainland China']*100], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>')) fig_rate.add_trace(go.Scatter(x=df_deaths['Date'], y=df_deaths['Other locations']/df_confirmed['Other locations']*100, mode='lines+markers', line_shape='spline', name='Other Region', line=dict(color='#a7a7a7', width=4), marker=dict(size=4, color='#f4f4f2', line=dict(width=1,color='#a7a7a7')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_deaths['Date']], hovertext=['Other region death rate<br>{:.2f}%'.format(i) for i in df_deaths['Other locations']/df_confirmed['Other locations']*100], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>')) # Customise layout fig_rate.update_layout( margin=go.layout.Margin( l=10, r=10, b=10, t=5, pad=0 ), yaxis=dict( showline=False, linecolor='#272e3e', zeroline=False, #showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, tickmode='array', # Set tick range based on the maximum number tickvals=tickList, # Set tick label accordingly ticktext=['{:.1f}'.format(i) for i in tickList] ), # yaxis_title="Total Confirmed Case Number", xaxis=dict( showline=False, linecolor='#272e3e', showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, zeroline=False ), xaxis_tickformat='%b %d', hovermode = 'x', legend_orientation="h", # legend=dict(x=.35, y=-.05), plot_bgcolor='#f4f4f2', paper_bgcolor='#cbd2d3', font=dict(color='#292929') ) %%time # FUnction for generating cumulative line plot for each Country/Region Region = 'China' CaseType = ['Confirmed', 'Recovered', 'Deaths'] # Read cumulative data of a given region from ./cumulative_data folder df_region = pd.read_csv('./cumulative_data/{}.csv'.format(Region)) df_region=df_region.astype({'Date_last_updated_AEDT':'datetime64', 'date_day':'datetime64'}) # Line plot for confirmed cases # Set up tick scale based on confirmed case number #tickList = list(np.arange(0, df_confirmed['Mainland China'].max()+1000, 10000)) # Create empty figure canvas fig = make_subplots(specs=[[{"secondary_y": True}]]) # Add trace to the figure fig.add_trace(go.Scatter(x=df_region['date_day'], y=df_region['Confirmed'], mode='lines+markers', #line_shape='spline', name='Confirmed case', line=dict(color='#921113', width=2), #marker=dict(size=4, color='#f4f4f2', # line=dict(width=1,color='#921113')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region['date_day']], hovertext=['{} confirmed<br>{:,d} cases<br>'.format(Region, i) for i in df_region['Confirmed']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>'), secondary_y=False,) fig.add_trace(go.Scatter(x=df_region['date_day'], y=df_region['Recovered'], mode='lines+markers', #line_shape='spline', name='Recovered case', line=dict(color='#168038', width=2), #marker=dict(size=4, color='#f4f4f2', # line=dict(width=1,color='#168038')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region['date_day']], hovertext=['{} Recovered<br>{:,d} cases<br>'.format(Region, i) for i in df_region['Recovered']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>'), secondary_y=False,) fig.add_trace(go.Scatter(x=df_region['date_day'], y=df_region['Deaths'], mode='lines+markers', #line_shape='spline', name='Death case', line=dict(color='#626262', width=2), #marker=dict(size=4, color='#f4f4f2', # line=dict(width=1,color='#626262')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region['date_day']], hovertext=['{} Deaths<br>{:,d} cases<br>'.format(Region, i) for i in df_region['Deaths']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>'), secondary_y=False,) fig.add_trace(go.Bar(x=df_region['date_day'], y=df_region['New'], #mode='lines+markers', #line_shape='spline', name='Daily New Cases', text=df_region['New'], marker_color='#626262', opacity = .3, #marker=dict(size=4, color='#f4f4f2', # line=dict(width=1,color='#626262')), #text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region['date_day']], hovertext=['{} New<br>{} cases<br>'.format(Region, i) for i in df_region['New']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>' ), secondary_y=True,) # Customise layout fig.update_layout( #title=dict( # text="<b>Confirmed Cases Timeline<b>", # y=0.96, x=0.5, xanchor='center', yanchor='top', # font=dict(size=20, color="#292929", family="Playfair Display") #), margin=go.layout.Margin( l=10, r=0, b=10, t=50, pad=0 ), yaxis=dict( showline=False, linecolor='#272e3e', zeroline=False, #showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, tickmode='array', # Set tick range based on the maximum number #tickvals=tickList, # Set tick label accordingly #ticktext=["{:.0f}k".format(i/1000) for i in tickList] ), yaxis2=dict( showline=False, linecolor='#272e3e', #zeroline=False, showgrid=False, #gridcolor='rgba(203, 210, 211,.3)', #gridwidth = .1, tickmode='array', #overlaying="y", side="right", ), # yaxis_title="Total Confirmed Case Number", xaxis=dict( showline=False, linecolor='#272e3e', showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, zeroline=False ), xaxis_tickformat='%b %d', hovermode = 'x', legend_orientation="h", # legend=dict(x=.35, y=-.05), plot_bgcolor='#f4f4f2', paper_bgcolor='#cbd2d3', font=dict(color='#292929') ) ################################################################################################## #### Start dash app ################################################################################################## app = dash.Dash(__name__, assets_folder='./assets/', meta_tags=[ {"name": "author", "content": "Jun Ye"}, {"name": "description", "content": "The coronavirus COVID-19 monitor provides up-to-date data for the global spread of coronavirus."}, {"property": "og:title", "content": "Coronavirus COVID-19 Outbreak Global Cases Monitor"}, {"property": "og:type", "content": "website"}, {"property":"og:image", "content": "https://junye0798.com/post/build-a-dashboard-to-track-the-spread-of-coronavirus-using-dash/featured_hu676943c67ca727a9a973d1fe66ac6f83_849996_1200x0_resize_lanczos_2.png"}, {"property": "og:url", "content": "https://dash-coronavirus-2020.herokuapp.com/"}, {"property":"og:description", "content": "The coronavirus COVID-19 monitor provides up-to-date data for the global spread of coronavirus."}, {"name": "twitter:card", "content": "summary_large_image"}, {"name": "twitter:site", "content": "@perishleaf"}, {"name": "twitter:title", "content": "Coronavirus COVID-19 Outbreak Global Cases Monitor"}, {"name": "twitter:description", "content": "The coronavirus COVID-19 monitor provides up-to-date data for the global spread of coronavirus."}, {"name": "twitter:image", "content": "https://junye0798.com/post/build-a-dashboard-to-track-the-spread-of-coronavirus-using-dash/featured_hu676943c67ca727a9a973d1fe66ac6f83_849996_1200x0_resize_lanczos_2.png"}, {"name": "viewport", "content": "width=device-width, height=device-height, initial-scale=1.0"} ] ) app.title = 'Coronavirus COVID-19 Global Monitor' # Section for Google annlytic and donation # app.index_string = """<!DOCTYPE html> <html> <head> <script data-name="BMC-Widget" src="https://cdnjs.buymeacoffee.com/1.0.0/widget.prod.min.js" data-id="qPsBJAV" data-description="Support the app server for running!" data-message="Please support the app server for running!" data-color="#FF813F" data-position="right" data-x_margin="18" data-y_margin="18"></script> <!-- Global site tag (gtag.js) - Google Analytics --> <script async src="https://www.googletagmanager.com/gtag/js?id=UA-154901818-2"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-154901818-2'); </script> {%metas%} <title>{%title%}</title> {%favicon%} {%css%} </head> <body> {%app_entry%} <footer> {%config%} {%scripts%} {%renderer%} </footer> </body> </html>""" server = app.server app.layout = html.Div(style={'backgroundColor':'#f4f4f2'}, children=[ html.Div( id="header", children=[ html.H4(children="Coronavirus (COVID-19) Outbreak Global Cases Monitor"), html.P( id="description", children="On Dec 31, 2019, the World Health Organization (WHO) was informed of \ an outbreak of “pneumonia of unknown cause” detected in Wuhan City, Hubei Province, China – the \ seventh-largest city in China with 11 million residents. As of {}, there are over {:,d} cases \ of COVID-19 confirmed globally.\ This dash board is developed to visualise and track the recent reported \ cases on a hourly timescale.".format(latestDate, confirmedCases), ), # html.P( # id="note", # children=['⚠️ Source from ', # html.A('The National Health Commission of China', href='http://www.nhc.gov.cn/yjb/s7860/202002/553ff43ca29d4fe88f3837d49d6b6ef1.shtml'), # ': in its February 14 official report, deducted \ # 108 previously reported deaths and 1,043 previously reported cases from the total in Hubei Province due to "repeated counting." \ # Data have been corrected for these changes.'] # ), # html.P( # id="note", # children=['⚠️ Source from ', # html.A('读卖新闻', href='https://www.yomiuri.co.jp/national/20200216-OYT1T50089/'), # ': Diamond Princess cruise confirmed 70 new infections, bringing the total infected cases to 355.'] # ), # html.P( # id="note", # children=['⚠️ Source from ', # html.A('anews', href='http://www.anews.com.tr/world/2020/02/21/iran-says-two-more-deaths-among-13-new-coronavirus-cases'), # ': Iran\'s health ministry Friday reported two more deaths among 13 new cases of coronavirus in the Islamic republic, bringing the total number of deaths to four and infections to 18.'] # ), # html.P( # id="note", # children=['⚠️ Source from ', # html.A('The New York Times', href='https://www.nytimes.com/2020/03/01/world/coronavirus-news.html'), # ': New York State Reports First Case.'] # ), html.P( id='time-stamp', #style={'fontWeight':'bold'}, children="🔴 Last updated on {}. (Sorry, the app server may experince short period of interruption while updating data)".format(latestDate)) ] ), html.Div( id="number-plate", style={'marginLeft':'1.5%','marginRight':'1.5%','marginBottom':'.5%'}, children=[ html.Div( style={'width':'24.4%','backgroundColor':'#cbd2d3','display':'inline-block', 'marginRight':'.8%','verticalAlign':'top'}, children=[ html.H3(style={'textAlign':'center', 'fontWeight':'bold','color':'#2674f6'}, children=[ html.P(style={'color':'#cbd2d3','padding':'.5rem'}, children='xxxx xx xxx xxxx xxx xxxxx'), '{}'.format(daysOutbreak), ]), html.H5(style={'textAlign':'center','color':'#2674f6','padding':'.1rem'}, children="Days Since Outbreak") ]), html.Div( style={'width':'24.4%','backgroundColor':'#cbd2d3','display':'inline-block', 'marginRight':'.8%','verticalAlign':'top'}, children=[ html.H3(style={'textAlign':'center', 'fontWeight':'bold','color':'#d7191c'}, children=[ html.P(style={'padding':'.5rem'}, children='+ {:,d} in the past 24h ({:.1%})'.format(plusConfirmedNum, plusPercentNum1)), '{:,d}'.format(confirmedCases) ]), html.H5(style={'textAlign':'center','color':'#d7191c','padding':'.1rem'}, children="Confirmed Cases") ]), html.Div( style={'width':'24.4%','backgroundColor':'#cbd2d3','display':'inline-block', 'marginRight':'.8%','verticalAlign':'top'}, children=[ html.H3(style={'textAlign':'center', 'fontWeight':'bold','color':'#1a9622'}, children=[ html.P(style={'padding':'.5rem'}, children='+ {:,d} in the past 24h ({:.1%})'.format(plusRecoveredNum, plusPercentNum2)), '{:,d}'.format(recoveredCases), ]), html.H5(style={'textAlign':'center','color':'#1a9622','padding':'.1rem'}, children="Recovered Cases") ]), html.Div( style={'width':'24.4%','backgroundColor':'#cbd2d3','display':'inline-block', 'verticalAlign':'top'}, children=[ html.H3(style={'textAlign':'center', 'fontWeight':'bold','color':'#6c6c6c'}, children=[ html.P(style={'padding':'.5rem'}, children='+ {:,d} in the past 24h ({:.1%})'.format(plusDeathNum, plusPercentNum3)), '{:,d}'.format(deathsCases) ]), html.H5(style={'textAlign':'center','color':'#6c6c6c','padding':'.1rem'}, children="Death Cases") ]) ]), html.Div( id='dcc-plot', style={'marginLeft':'1.5%','marginRight':'1.5%','marginBottom':'.35%','marginTop':'.5%'}, children=[ html.Div( style={'width':'32.79%','display':'inline-block','marginRight':'.8%','verticalAlign':'top'}, children=[ html.H5(style={'textAlign':'center','backgroundColor':'#cbd2d3', 'color':'#292929','padding':'1rem','marginBottom':'0'}, children='Confirmed Case Timeline'), dcc.Graph(style={'height':'300px'},figure=fig_confirmed)]), html.Div( style={'width':'32.79%','display':'inline-block','marginRight':'.8%','verticalAlign':'top'}, children=[ html.H5(style={'textAlign':'center','backgroundColor':'#cbd2d3', 'color':'#292929','padding':'1rem','marginBottom':'0'}, children='Recovered/Death Case Timeline'), dcc.Graph(style={'height':'300px'},figure=fig_combine)]), html.Div( style={'width':'32.79%','display':'inline-block','verticalAlign':'top'}, children=[ html.H5(style={'textAlign':'center','backgroundColor':'#cbd2d3', 'color':'#292929','padding':'1rem','marginBottom':'0'}, children='Death Rate (%) Timeline'), dcc.Graph(style={'height':'300px'},figure=fig_rate)])]), html.Div( id='dcc-map', style={'marginLeft':'1.5%','marginRight':'1.5%','marginBottom':'.5%'}, children=[ html.Div(style={'width':'66.41%','marginRight':'.8%','display':'inline-block','verticalAlign':'top'}, children=[ html.H5(style={'textAlign':'center','backgroundColor':'#cbd2d3', 'color':'#292929','padding':'1rem','marginBottom':'0'}, children='Latest Coronavirus Outbreak Map'), dcc.Graph( id='datatable-interact-map', style={'height':'500px'},), dcc.Graph( id='datatable-interact-lineplot', style={'height':'300px'},), ]), html.Div(style={'width':'32.79%','display':'inline-block','verticalAlign':'top'}, children=[ html.H5(style={'textAlign':'center','backgroundColor':'#cbd2d3', 'color':'#292929','padding':'1rem','marginBottom':'0'}, children='Cases by Country/Region'), dcc.Tabs( value='tab-1', parent_className='custom-tabs', className='custom-tabs-container', children=[ dcc.Tab(label='The World', value='tab-1', className='custom-tab', selected_className='custom-tab--selected', children=[ dash_table.DataTable( id='datatable-interact-location', # Don't show coordinates columns=[{"name": i, "id": i} for i in dfSum.columns[0:5]], # But still store coordinates in the table for interactivity data=dfSum.to_dict("rows"), row_selectable="single", sort_action="native", style_as_list_view=True, style_cell={'font_family':'Arial', 'font_size':'1.2rem', 'padding':'.1rem', 'backgroundColor':'#f4f4f2',}, fixed_rows={'headers':True,'data':0}, style_table={'minHeight': '750px', 'height': '750px', 'maxHeight': '750px' #'Height':'300px', #'overflowY':'scroll', #'overflowX':'scroll', }, style_header={'backgroundColor':'#f4f4f2', 'fontWeight':'bold'}, style_cell_conditional=[{'if': {'column_id':'Country/Regions'},'width':'28%'}, {'if': {'column_id':'Remaining'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'width':'18%'}, {'if': {'column_id':'Recovered'},'width':'18%'}, {'if': {'column_id':'Deaths'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'color':'#d7191c'}, {'if': {'column_id':'Recovered'},'color':'#1a9622'}, {'if': {'column_id':'Deaths'},'color':'#6c6c6c'}, {'textAlign': 'center'}], ) ]), dcc.Tab(label='Australia', className='custom-tab', selected_className='custom-tab--selected', children=[ dash_table.DataTable( # Don't show coordinates columns=[{"name": i, "id": i} for i in AUSTable.columns[0:5]], # But still store coordinates in the table for interactivity data=AUSTable.to_dict("rows"), #row_selectable="single", sort_action="native", style_as_list_view=True, style_cell={'font_family':'Arial', 'font_size':'1.2rem', 'padding':'.1rem', 'backgroundColor':'#f4f4f2',}, fixed_rows={'headers':True,'data':0}, style_table={'minHeight': '750px', 'height': '750px', 'maxHeight': '750px' #'Height':'300px', #'overflowY':'scroll', #'overflowX':'scroll', }, style_header={'backgroundColor':'#f4f4f2', 'fontWeight':'bold'}, style_cell_conditional=[{'if': {'column_id':'Province/State'},'width':'28%'}, {'if': {'column_id':'Remaining'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'width':'18%'}, {'if': {'column_id':'Recovered'},'width':'18%'}, {'if': {'column_id':'Deaths'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'color':'#d7191c'}, {'if': {'column_id':'Recovered'},'color':'#1a9622'}, {'if': {'column_id':'Deaths'},'color':'#6c6c6c'}, {'textAlign': 'center'}], ) ]), dcc.Tab(label='Canada', className='custom-tab', selected_className='custom-tab--selected', children=[ dash_table.DataTable( # Don't show coordinates columns=[{"name": i, "id": i} for i in CANTable.columns[0:5]], # But still store coordinates in the table for interactivity data=CANTable.to_dict("rows"), #row_selectable="single", sort_action="native", style_as_list_view=True, style_cell={'font_family':'Arial', 'font_size':'1.2rem', 'padding':'.1rem', 'backgroundColor':'#f4f4f2',}, fixed_rows={'headers':True,'data':0}, style_table={'minHeight': '750px', 'height': '750px', 'maxHeight': '750px' #'Height':'300px', #'overflowY':'scroll', #'overflowX':'scroll', }, style_header={'backgroundColor':'#f4f4f2', 'fontWeight':'bold'}, style_cell_conditional=[{'if': {'column_id':'Province/State'},'width':'28%'}, {'if': {'column_id':'Remaining'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'width':'18%'}, {'if': {'column_id':'Recovered'},'width':'18%'}, {'if': {'column_id':'Deaths'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'color':'#d7191c'}, {'if': {'column_id':'Recovered'},'color':'#1a9622'}, {'if': {'column_id':'Deaths'},'color':'#6c6c6c'}, {'textAlign': 'center'}], ) ]), dcc.Tab(label='Mainland China', className='custom-tab', selected_className='custom-tab--selected', children=[ dash_table.DataTable( # Don't show coordinates columns=[{"name": i, "id": i} for i in CNTable.columns[0:5]], # But still store coordinates in the table for interactivity data=CNTable.to_dict("rows"), #row_selectable="single", sort_action="native", style_as_list_view=True, style_cell={'font_family':'Arial', 'font_size':'1.2rem', 'padding':'.1rem', 'backgroundColor':'#f4f4f2',}, fixed_rows={'headers':True,'data':0}, style_table={'minHeight': '750px', 'height': '750px', 'maxHeight': '750px' #'Height':'300px', #'overflowY':'scroll', #'overflowX':'scroll', }, style_header={'backgroundColor':'#f4f4f2', 'fontWeight':'bold'}, style_cell_conditional=[{'if': {'column_id':'Province/State'},'width':'28%'}, {'if': {'column_id':'Remaining'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'width':'18%'}, {'if': {'column_id':'Recovered'},'width':'18%'}, {'if': {'column_id':'Deaths'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'color':'#d7191c'}, {'if': {'column_id':'Recovered'},'color':'#1a9622'}, {'if': {'column_id':'Deaths'},'color':'#6c6c6c'}, {'textAlign': 'center'}], ) ]), dcc.Tab(label='United States', className='custom-tab', selected_className='custom-tab--selected', children=[ dash_table.DataTable( # Don't show coordinates columns=[{"name": i, "id": i} for i in USTable.columns[0:5]], # But still store coordinates in the table for interactivity data=USTable.to_dict("rows"), #row_selectable="single", sort_action="native", style_as_list_view=True, style_cell={'font_family':'Arial', 'font_size':'1.2rem', 'padding':'.1rem', 'backgroundColor':'#f4f4f2',}, fixed_rows={'headers':True,'data':0}, style_table={'minHeight': '760px', 'height': '760px', 'maxHeight': '760px' #'Height':'300px', #'overflowY':'scroll', #'overflowX':'scroll', }, style_header={'backgroundColor':'#f4f4f2', 'fontWeight':'bold'}, style_cell_conditional=[{'if': {'column_id':'Province/State'},'width':'28%'}, {'if': {'column_id':'Remaining'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'width':'18%'}, {'if': {'column_id':'Recovered'},'width':'18%'}, {'if': {'column_id':'Deaths'},'width':'18%'}, {'if': {'column_id':'Confirmed'},'color':'#d7191c'}, {'if': {'column_id':'Recovered'},'color':'#1a9622'}, {'if': {'column_id':'Deaths'},'color':'#6c6c6c'}, {'textAlign': 'center'}], ) ]),] ) ]) ]), html.Div( id='my-footer', style={'marginLeft':'1.5%','marginRight':'1.5%'}, children=[ html.P(style={'textAlign':'center','margin':'auto'}, children=[" 🙏 God Bless the World 🙏 |", " Developed by ",html.A('Jun', href='https://junye0798.com/')," with ❤️ in Sydney"])]), ]) @app.callback( Output('datatable-interact-map', 'figure'), [Input('datatable-interact-location', 'derived_virtual_selected_rows'), Input('datatable-interact-location', 'selected_row_ids')] ) def update_figures(derived_virtual_selected_rows, selected_row_ids): # When the table is first rendered, `derived_virtual_data` and # `derived_virtual_selected_rows` will be `None`. This is due to an # idiosyncracy in Dash (unsupplied properties are always None and Dash # calls the dependent callbacks when the component is first rendered). # So, if `rows` is `None`, then the component was just rendered # and its value will be the same as the component's dataframe. # Instead of setting `None` in here, you could also set # `derived_virtual_data=df.to_rows('dict')` when you initialize # the component. if derived_virtual_selected_rows is None: derived_virtual_selected_rows = [] dff = dfSum mapbox_access_token = "pk.eyJ1IjoicGxvdGx5bWFwYm94IiwiYSI6ImNqdnBvNDMyaTAxYzkzeW5ubWdpZ2VjbmMifQ.TXcBE-xg9BFdV2ocecc_7g" # Generate a list for hover text display textList=[] for area, region in zip(dfs[keyList[0]]['Province/State'], dfs[keyList[0]]['Country/Region']): if type(area) is str: if region == "Hong Kong" or region == "Macau" or region == "Taiwan": textList.append(area) else: textList.append(area+', '+region) else: textList.append(region) # Generate a list for color gradient display colorList=[] for comfirmed, recovered, deaths in zip(dfs[keyList[0]]['Confirmed'],dfs[keyList[0]]['Recovered'],dfs[keyList[0]]['Deaths']): remaining = recovered / (comfirmed - deaths) colorList.append(remaining) fig2 = go.Figure(go.Scattermapbox( lat=dfs[keyList[0]]['lat'], lon=dfs[keyList[0]]['lon'], mode='markers', marker=go.scattermapbox.Marker( color=['#d7191c' if i < 1 else '#1a9622' for i in colorList], size=[i**(1/3) for i in dfs[keyList[0]]['Confirmed']], sizemin=1, sizemode='area', sizeref=2.*max([math.sqrt(i) for i in dfs[keyList[0]]['Confirmed']])/(100.**2), ), text=textList, hovertext=['Comfirmed: {}<br>Recovered: {}<br>Death: {}'.format(i, j, k) for i, j, k in zip(dfs[keyList[0]]['Confirmed'], dfs[keyList[0]]['Recovered'], dfs[keyList[0]]['Deaths'])], hovertemplate = "<b>%{text}</b><br><br>" + "%{hovertext}<br>" + "<extra></extra>") ) fig2.update_layout( plot_bgcolor='#151920', paper_bgcolor='#cbd2d3', margin=go.layout.Margin(l=10,r=10,b=10,t=0,pad=40), hovermode='closest', transition = {'duration':50}, annotations=[ dict( x=.5, y=-.01, align='center', showarrow=False, text="Points are placed based on data geolocation levels.<br><b>Province/State level<b> - China, Australia, United States, and Canada; <b>Country level<b> - other countries.", xref="paper", yref="paper", font=dict(size=10, color='#292929'), )], mapbox=go.layout.Mapbox( accesstoken=mapbox_access_token, style="light", # The direction you're facing, measured clockwise as an angle from true north on a compass bearing=0, center=go.layout.mapbox.Center( lat=14.056159 if len(derived_virtual_selected_rows)==0 else dff.loc[selected_row_ids[0]].lat, lon=22.920039 if len(derived_virtual_selected_rows)==0 else dff.loc[selected_row_ids[0]].lon ), pitch=0, zoom=1.03 if len(derived_virtual_selected_rows)==0 else 4 ) ) return fig2 @app.callback( Output('datatable-interact-lineplot', 'figure'), [Input('datatable-interact-location', 'derived_virtual_selected_rows'), Input('datatable-interact-location', 'selected_row_ids')] ) def update_lineplot(derived_virtual_selected_rows, selected_row_ids): if derived_virtual_selected_rows is None: derived_virtual_selected_rows = [] dff = dfSum if selected_row_ids: if dff.loc[selected_row_ids[0]]['Country/Region'] == 'Mainland China': Region = 'China' else: Region = dff.loc[selected_row_ids[0]]['Country/Region'] else: Region = 'Australia' # Read cumulative data of a given region from ./cumulative_data folder df_region = pd.read_csv('./cumulative_data/{}.csv'.format(Region)) df_region=df_region.astype({'Date_last_updated_AEDT':'datetime64', 'date_day':'datetime64'}) # Line plot for confirmed cases # Set up tick scale based on confirmed case number #tickList = list(np.arange(0, df_confirmed['Mainland China'].max()+1000, 10000)) # Create empty figure canvas fig3 = make_subplots(specs=[[{"secondary_y": True}]]) # Add trace to the figure fig3.add_trace(go.Scatter(x=df_region['date_day'], y=df_region['Confirmed'], mode='lines+markers', line_shape='spline', name='Confirmed case', line=dict(color='#921113', width=2), #marker=dict(size=4, color='#f4f4f2', # line=dict(width=1,color='#921113')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region['date_day']], hovertext=['{} confirmed<br>{:,d} cases<br>'.format(Region, i) for i in df_region['Confirmed']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>'), secondary_y=False,) fig3.add_trace(go.Scatter(x=df_region['date_day'], y=df_region['Recovered'], mode='lines+markers', line_shape='spline', name='Recovered case', line=dict(color='#168038', width=2), #marker=dict(size=4, color='#f4f4f2', # line=dict(width=1,color='#168038')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region['date_day']], hovertext=['{} Recovered<br>{:,d} cases<br>'.format(Region, i) for i in df_region['Recovered']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>'), secondary_y=False,) fig3.add_trace(go.Scatter(x=df_region['date_day'], y=df_region['Deaths'], mode='lines+markers', line_shape='spline', name='Death case', line=dict(color='#626262', width=2), #marker=dict(size=4, color='#f4f4f2', # line=dict(width=1,color='#626262')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region['date_day']], hovertext=['{} Deaths<br>{:,d} cases<br>'.format(Region, i) for i in df_region['Deaths']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>'), secondary_y=False,) fig3.add_trace(go.Bar(x=df_region['date_day'], y=df_region['New'], #mode='lines+markers', #line_shape='spline', name='Daily New Cases', marker_color='#626262', opacity = .3, #marker=dict(size=4, color='#f4f4f2', # line=dict(width=1,color='#626262')), text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region['date_day']], hovertext=['{} New<br>{} cases<br>'.format(Region, i) for i in df_region['New']], hovertemplate='<b>%{text}</b><br></br>'+ '%{hovertext}'+ '<extra></extra>'), secondary_y=True,) # Customise layout fig3.update_layout( #title=dict( # text="<b>Confirmed Cases Timeline<b>", # y=0.96, x=0.5, xanchor='center', yanchor='top', # font=dict(size=20, color="#292929", family="Playfair Display") #), margin=go.layout.Margin( l=10, r=10, b=10, t=5, pad=0 ), annotations=[ dict( x=.5, y=.4, xref="paper", yref="paper", text=Region, opacity=0.5, font=dict(family='Arial, sans-serif', size=60, color="grey"), ) ], yaxis=dict( showline=False, linecolor='#272e3e', zeroline=False, #showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, tickmode='array', # Set tick range based on the maximum number #tickvals=tickList, # Set tick label accordingly #ticktext=["{:.0f}k".format(i/1000) for i in tickList] ), xaxis_title="Cumulative Cases (Select Country/Region From Table)", xaxis=dict( showline=False, linecolor='#272e3e', showgrid=False, gridcolor='rgba(203, 210, 211,.3)', gridwidth = .1, zeroline=False ), xaxis_tickformat='%b %d', #transition = {'duration':500}, hovermode = 'x', legend_orientation="h", legend=dict(x=.02, y=.95, bgcolor="rgba(0,0,0,0)",), plot_bgcolor='#f4f4f2', paper_bgcolor='#cbd2d3', font=dict(color='#292929') ) return fig3 if __name__ == '__main__': app.run_server(port=8882) ```
github_jupyter
``` # libraries import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns # reading dataset df = pd.read_csv('/home/pedro/Desktop/covid/influd_5may/INFLU20-04052020.csv',sep=';',encoding = "ISO-8859-1") # Selecting people from South Center with COVID-19 df = df[df['PCR_SARS2']==1][(df['SG_UF_NOT']=='SP') | (df['SG_UF_NOT']=='RJ')| (df['SG_UF_NOT']=='RS')\ | (df['SG_UF_NOT']=='SC')| (df['SG_UF_NOT']=='DF')| (df['SG_UF_NOT']=='ES')| (df['SG_UF_NOT']=='PR')\ | (df['SG_UF_NOT']=='MG')| (df['SG_UF_NOT']=='MS')| (df['SG_UF_NOT']=='GO')| (df['SG_UF_NOT']=='MT')]\ [df['CS_RACA']!=9.0][df['CS_RACA'].notna()][df['HOSPITAL']==1] # Selecting people from North Northeast with COVID-19 #df = df[df['PCR_SARS2']==1][(df['SG_UF_NOT']=='RO') | (df['SG_UF_NOT']=='TO')| (df['SG_UF_NOT']=='RN')\ #| (df['SG_UF_NOT']=='PE')| (df['SG_UF_NOT']=='AM')| (df['SG_UF_NOT']=='SE')| (df['SG_UF_NOT']=='CE')\ #| (df['SG_UF_NOT']=='PB')| (df['SG_UF_NOT']=='BA')| (df['SG_UF_NOT']=='RR')| (df['SG_UF_NOT']=='AC')\ #| (df['SG_UF_NOT']=='PA')| (df['SG_UF_NOT']=='AL')| (df['SG_UF_NOT']=='PI')| (df['SG_UF_NOT']=='MA') #| (df['SG_UF_NOT']=='AP')][df['CS_RACA']!=9.0][df['CS_RACA'].notna()][df['HOSPITAL']==1] #print('confirmed data in North Northeast:',df.shape[0]) print('confirmed data in South Center:',df.shape[0]) # comorbidities df = df[['CS_RACA','EVOLUCAO','CARDIOPATI','HEMATOLOGI','HEPATICA','ASMA','DIABETES','NEUROLOGIC','PNEUMOPATI','IMUNODEPRE',\ 'RENAL','OBESIDADE']] # replacing values df = df.fillna(0) df.iloc[:,2:] = df.iloc[:,2:].replace([9,2], 0) # adding comorbidities df['sum_comorbidities'] = df.iloc[:,2:].sum(axis=1) # Normalization white = df['sum_comorbidities'][((df['CS_RACA'] == 1)& ((df['EVOLUCAO']==1) |(df['EVOLUCAO']==2)))].value_counts().sort_index(ascending=True) black = df['sum_comorbidities'][((df['CS_RACA'] == 2)& ((df['EVOLUCAO']==1) |(df['EVOLUCAO']==2)))].value_counts().sort_index(ascending=True) brown = df['sum_comorbidities'][((df['CS_RACA'] == 4)& ((df['EVOLUCAO']==1) |(df['EVOLUCAO']==2)))].value_counts().sort_index(ascending=True) yellow = df['sum_comorbidities'][((df['CS_RACA'] == 3)& ((df['EVOLUCAO']==1) |(df['EVOLUCAO']==2)))].value_counts().sort_index(ascending=True) # Death and cured people white_dea = df['sum_comorbidities'][((df['CS_RACA'] == 1)&(df['EVOLUCAO']==2))].value_counts().sort_index(ascending=True) black_dea = df['sum_comorbidities'][((df['CS_RACA'] == 2)&(df['EVOLUCAO']==2))].value_counts().sort_index(ascending=True) yellow_dea = df['sum_comorbidities'][((df['CS_RACA'] == 3)&(df['EVOLUCAO']==2))].value_counts().sort_index(ascending=True) brown_dea = df['sum_comorbidities'][((df['CS_RACA'] == 4)&(df['EVOLUCAO']==2))].value_counts().sort_index(ascending=True) white_cur = df['sum_comorbidities'][((df['CS_RACA'] == 1)&(df['EVOLUCAO']==1))].value_counts().sort_index(ascending=True) black_cur = df['sum_comorbidities'][((df['CS_RACA'] == 2)&(df['EVOLUCAO']==1))].value_counts().sort_index(ascending=True) yellow_cur = df['sum_comorbidities'][((df['CS_RACA'] == 3)&(df['EVOLUCAO']==1))].value_counts().sort_index(ascending=True) brown_cur = df['sum_comorbidities'][((df['CS_RACA'] == 4)&(df['EVOLUCAO']==1))].value_counts().sort_index(ascending=True) # 2 = death 1 = cure # Plotting the graphic plt.figure(figsize=(10,5)) barWidth = 0.15 plt.bar(white_cur.index-0.25, white_cur/white.sum(), color='pink', alpha =0.5, width=barWidth, edgecolor='black') plt.bar(brown_cur.index- 0.1, brown_cur/brown.sum(), color='brown', alpha =0.5, width=barWidth, edgecolor='black') plt.bar(black_cur.index+ 0.05, black_cur/black.sum() , color='black', alpha =0.5, width=barWidth, edgecolor='black') plt.bar(yellow_cur.index + 0.20, yellow_cur/yellow.sum() ,color='yellow', alpha =0.5, width=barWidth, edgecolor='black') plt.bar(white_dea.index-0.25, -white_dea/white.sum(), color='pink', alpha =0.5, width=barWidth, edgecolor='black', label='Branca') plt.bar(brown_dea.index - 0.1, -brown_dea/brown.sum(), color='brown', alpha =0.5, width=barWidth, edgecolor='black', label='Parda') plt.bar(black_dea.index+ 0.05, -black_dea/black.sum() , color='black', alpha =0.5, width=barWidth, edgecolor='black', label='Preta') plt.bar(yellow_dea.index + 0.20, -yellow_dea/yellow.sum() ,color='yellow', alpha =0.5, width=barWidth, edgecolor='black', label='Amarela') plt.ylabel('Prevalence', fontsize=20) plt.xlabel('Number of Comorbidities', fontsize=20) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.title('Central-South', fontsize=20) #plt.title('North', fontsize=20) plt.axhline(y=0,linewidth=1,color='k') plt.legend(fontsize=12) plt.text(4, 0.2, 'Survivor', fontsize=20) plt.text(4, -0.15, 'Non-Survivor', fontsize=20) plt.tight_layout() plt.savefig('south_comorbidities.pdf') #plt.savefig('north_comorbidities.pdf') plt.show() plt.show() ```
github_jupyter
## *DISCLAIMER* <p style="font-size:16px; color:#117d30;"> By accessing this code, you acknowledge the code is made available for presentation and demonstration purposes only and that the code: (1) is not subject to SOC 1 and SOC 2 compliance audits; (2) is not designed or intended to be a substitute for the professional advice, diagnosis, treatment, or judgment of a certified financial services professional; (3) is not designed, intended or made available as a medical device; and (4) is not designed or intended to be a substitute for professional medical advice, diagnosis, treatment or judgement. Do not use this code to replace, substitute, or provide professional financial advice or judgment, or to replace, substitute or provide medical advice, diagnosis, treatment or judgement. You are solely responsible for ensuring the regulatory, legal, and/or contractual compliance of any use of the code, including obtaining any authorizations or consents, and any solution you choose to build that incorporates this code in whole or in part. </p> # Production Performance - Near Real-Time Analysis <h2><span style="color: #117d30;"> Using Azure Cosmos DB - Azure Synapse Link </span></h2> # Overview Consider a typical industry scenario in which a conveyor belt scans or checks the quality of manufactured products and categorizes them into three categories viz. $Good$ OR $Snag$ OR $Reject$. The product quality whole process gets saved to a local database and gets pushed to the Synapse SQL server instance every night. Normally, we create an ETL pipeline that runs every night on the above data and creates meaningful insights out of it. So in such a case, to see the overall performance of machines, we need to wait for 24 hours. But, what if we can get this analysis in near real-time, say every 30 minutes or so, then this will make a huge difference, and can also derive many possibilities for changes. Here, in this notebook we will showcase the capability of Azure Cosmos DB with Azure Synapse Link and how we can achieve near real time analytics on data. See the workflow below. # Workflow ![Image alt text](https://dreamdemostorageforgen2.blob.core.windows.net/mfgdemocontainer/notebook-images/htap2.png) ### Scenario Let's consider where we are getting telemetry data every 15 minutes from 5 machines into our Azure Cosmos DB with the following information : - Overall Good / Snag / Reject - Overall Average of Good / (Good + Snag + Reject) - Timestamp **Dataset**: Above Quality data coming every 15 minutes from 5 machines, describing the number of items that were Good / Snag / Reject and what is the average production quality. Total records a day per machine = 1440/15 = 96 5 Machines, 96 * 5 = 480 Records per day = 480 ***Database contains data from 24th June 2019 to 30th June 2019.*** Total records in database = 480 * 7 = **3360 records** ### Tools/Techniques: - Azure Cosmos DB Analytical store feature and Azure Synapse Link - Visualization Using Matplotlib ### Notebook Organization This notebook uses Azure Synapse Link functionality and retrieves near real-time data from Azure Cosmos DB Analytical Store and performs near real-time analytics on data. Following are key things achieved in the notebook : + How to read data from Azure Cosmos DB Analytical Store - Container Name: mfg-mes-quality - Azure Cosmos DB Linked Service Name : MESQuality + Aggregate data for each machine over a day. + Find out which machine's performance is degrading in near real-time. ## Load data from Azure Cosmos DB Analytical Store into a Spark DataFrame ``` # Let's load near-real time data from Telemetry storage - Azure Cosmos DB. To do the same, we use modern # syntax of connecting to Azure Cosmos DB Analytical Store. # In this code block, we are connecting to Azure Cosmos DB via Linked Service Connection. You can find # the same in Synapse Studio -> Data -> Linked (Tab) -> Cosmos DB -> Your linked service name. # We also specify Cosmos DB Container, which is mfg-mes-quality in this case. df_ProductionPerformance = spark.read\ .format("cosmos.olap")\ .option("spark.synapse.linkedService", "#COSMOS_LINKED_SERVICE#")\ .option("spark.cosmos.container", "mfg-mes-quality1")\ .load() # To proceed further for performing analysis in the Notebook, we need to import few of Python's data processing # libraries. Additionally, we will use matplotlib library to plot the required charts. import pandas as pd import numpy as np import matplotlib.pyplot as plt; plt.rcdefaults() from datetime import datetime ``` ## Data Transformation ``` # Let's load data into Python DataFrame. Here, we convert Spark DataFrame into Pandas DataFrame, which # in combination with Matplotlib provides an extensive set of tools for creating visualizations. # Once we have fetched Telemetry data we convert raw Timestamp values in String to datetime object. # This allows us perform aggregation as needed over the Time Series. # converting Spark DataFrame to Pandas for data exploration and visualization df_ProductionQuality = df_ProductionPerformance.toPandas() # converting a column datetime object column df_ProductionQuality['ProductionMonth'] = pd.to_datetime(df_ProductionQuality['ProductionMonth']) ``` ## Data Aggregation - We will **aggregate data for each day** grouped by machines. ``` # We receive Quality data from MES System at regular interval. To better understand this data, we perform grouping on basis of: # 1. Date # 2. Machine Instance # Post grouping, let's see how data looks by displaying entire Aggregated DataFrame. # setting datetime index on Pandas DataFrame df_ProductionQuality.set_index(df_ProductionQuality["ProductionMonth"],inplace=True) # grouping data by machine and aggregating using daily mean df_AggProductionQuality = df_ProductionQuality.groupby(["MachineInstance"], sort=False).resample('D').mean().reset_index() display(df_AggProductionQuality.head()) ``` ## Tabulating the daywise averages for machines - The above dataFrame (df_ProdPerfBatch) has aggregated data per day for each machine. - We will showcase this using a Table. - Lets keep a **threshold of 95%**. whenever a machine's daily average falls below 95% it will be marked as critical. ``` # It's time to visualize our DataFrame and identify what fits our requirements from a business perspective. # As a business rule, threshold of 95% is minimum acceptable Quality. # Lower than 95% threshold indicates a problem which may need further inspection. # For visualizing data, we use matplotlib to draw a table # with yellow background indicating a problem. # plotting a table to see percentage of daily quality data for each machine cols = df_AggProductionQuality['ProductionMonth'].unique() index = df_AggProductionQuality['MachineInstance'].unique() index.sort() df_AvgMachineProduction = pd.DataFrame(index=index,columns=cols) df_TableColors = pd.DataFrame(index=index,columns=cols) rgb_normal = '#01B8AA' rgb_alert = '#E2B803' for i,row in df_AggProductionQuality.iterrows(): r = row['MachineInstance'] c = row['ProductionMonth'] avg_str = str(round(row['Avg'],2))[:5] if len(avg_str) < 5: avg_str+='0' df_AvgMachineProduction.loc[r,c] = avg_str if(row['Avg']<95): df_TableColors.loc[r,c]=rgb_alert else: df_TableColors.loc[r,c]=rgb_normal fig, ax = plt.subplots(figsize = (4,4)) ax.axis('off') ax.axis('tight') colNames = [dt.strftime("%d %b") for dt in pd.to_datetime(cols)] the_table = ax.table(cellText=np.array(df_AvgMachineProduction),cellColours=np.array(df_TableColors), cellLoc ='center', colWidths=[0.15]*len(cols) ,rowLabels=index,colLabels=colNames,loc='center') the_table.auto_set_font_size(False) the_table.set_fontsize(12) the_table.scale(3,2) fig.tight_layout() plt.show() ``` ## Horizontal Stacked Bar Chart Visualization - Quality data contains number of items processed into three categories : - Good - Snag - Reject - We will plot it as a Stack chart. ``` # Once we have plotted Quality averages, we need to see distribution between elements of Quality, viz, Good, Snag and Reject. # To plot elements of Quality, we use Horizontally Stacked Bar Charts, with customized starting point to see all values. # Plotting a horizontal stacked bar chart to show daily 'Good', 'Snag', and 'Reject' outputs for each machine # Note: These numbers represent the production calculated by averaging each batch in a day and summed across all machines df_stackPlot = df_ProductionQuality.resample('D').sum().reset_index() y = df_stackPlot['ProductionMonth'] df_Good = df_stackPlot['Good'] df_Snag = df_stackPlot['Snag'] df_Reject = df_stackPlot['Reject'] #Horizontal Stack Bar Plot fig, axs = plt.subplots(figsize=(13,7)) y_index = np.arange(len(y)) color_good = ["#02B480"] color_snag= ["#E2B803"] color_reject = ["#FD6C53"] colNames = [dt.strftime("%d-%b") for dt in pd.to_datetime(y)] bar1 = axs.barh(y_index, df_Good, alpha=0.7, color = color_good, linewidth = 2, edgecolor = color_good, tick_label=colNames) bar2 = axs.barh(y_index, df_Snag,left=df_Good, alpha=0.7, color = color_snag,linewidth= 2,edgecolor=color_snag,tick_label=colNames) bar3 = axs.barh(y_index, df_Reject, left =(df_Snag+df_Good), alpha=0.7, color = color_reject,linewidth= 2,edgecolor=color_reject,tick_label=colNames) # setting x limit min_val = min(df_Good) max_val = max(df_Good+df_Snag+df_Reject) diff = max_val - min_val x_min = 2000 # min_val-diff/2 x_max = 11000 #max_val+diff/2 axs.set_xlim(x_min, x_max) # writing text inside bars for i, patch in enumerate(bar1.get_children()): bl = patch.get_xy() x_cord = x_min/2 + 0.5*patch.get_width() + bl[0] y_cord = 0.5*patch.get_height() + bl[1] axs.text(x_cord,y_cord, str(int(round(df_Good[i]))), ha='center',va='center') axs.set_xlabel('Production') axs.set_ylabel('Date') fig.suptitle('Overall Production') plt.legend((bar1[0], bar2[0], bar3[0]), ('Good', 'Snag', 'Reject')) plt.show() ``` ## Horizontal Bar Chart Visualization - To get an insight of which machine is behaving abnormaly or showing poor performance, lets compare all of them using a bar chart. - We are calculating mean of each machine's production AVG for all seven days. ``` # Up until now, we have been visualizing daily metrics. To interprete exact problem, # we also need to see how each machine Quality output looks like. By combining data from both # sides, we can pinpoint a specific machine that appears to be under performing. # plotting bar graph chart to show total quality of production for each machine #Bar Chart df_MachineTotalAvg = df_ProductionQuality.groupby(["MachineInstance"], sort=True).mean() # creating a list of avg production of each machine performance = df_MachineTotalAvg['Avg'] names= df_MachineTotalAvg.index.tolist() # plotting bar graph plt.figure(figsize=(13,7)) y_pos = np.arange(len(names)) edge_colour = ["#FFA132"] color_map = ['#FFA132', '#FFAA46', '#FFB35A', '#FFBC6D', '#FFC581', '#FFCE94', '#FFD7A8'] plt.barh(y_pos, performance, align='center', alpha=1, color = color_map, edgecolor=color_map) plt.yticks(y_pos, names) perf_min = min(performance) perf_max = max(performance) perf_diff = (perf_max - perf_min)/2 plt.xlim(perf_min-perf_diff, perf_max+perf_diff) plt.xlabel('Percentage') plt.ylabel('Machines') plt.title('Production Performance of Machines') plt.show() # Let's visualize correlation between Quality of production of all machines per day against number of Good pieces produced per day. # For doing the same, we are going to use Date on X-Axis. Stackbars will represent Average quality output of all machines. # Line overlapped over Stackbars will represent number of Good pieces produced across all machines for that day. # plotting graph to show the correlation between daily overall quality and average quality of all batches in a day df_barPlot = df_AggProductionQuality.set_index(df_AggProductionQuality["ProductionMonth"]).resample('D').mean().reset_index() df_linePlot = df_AggProductionQuality.set_index(df_AggProductionQuality["ProductionMonth"]).resample('D').sum().reset_index() x = df_barPlot['ProductionMonth'].tolist() x_Names = [dt.strftime("%d-%b") for dt in pd.to_datetime(x)] x_pos = np.arange(len(x)) y = df_barPlot['Avg'].tolist() y_min = min(y) y_max = max(y) y_diff = (y_max - y_min) y2 = df_linePlot['Good'].tolist() # plotting graph fig, ax1 = plt.subplots(figsize=(13,7)) color_map = ['#FFA132', '#FFAA46', '#FFB35A', '#FFBC6D', '#FFC581', '#FFCE94', '#FFD7A8'] # bar plot p1 = ax1.bar(x_pos, y, align='center', alpha=1, color = color_map,tick_label=x_Names) ax1.set_xticks(x_pos,x_Names) ax1.set_ylim(y_min-y_diff, y_max+y_diff/2) # line plot line_color = '#02C480' ax2 = ax1.twinx() p2 = ax2.plot(x_pos,y2,color = line_color) ax2.set_ylabel('Quality') ax1.set_ylabel('Average Percentage of Availability') ax1.set_xlabel('Date') fig.suptitle("Overall Quality") plt.legend((p1[0], p2[0]), ('Quality', 'Good')) plt.show() ``` ## Visualizing Average Availability for all Machines till 30th June 7:00 AM ``` # Let's see what is happening currently. Through Azure Synapse Link feature, we can # perform near-real time analytics on data. # Let's plot a graph showing average Quality output of all machines for the last few hours. df_AvailabilityChartData = df_ProductionPerformance.toPandas() df_AvailabilityChartData.sort_values('ProductionMonth', inplace=True) df_RecentRecords = df_AvailabilityChartData.tail(60) df_RecentRecords = df_RecentRecords.groupby(["ProductionMonth"], sort=True).mean() performance = df_RecentRecords['Avg'].tolist() names = df_RecentRecords.index.tolist() perf_min = min(performance) perf_max = max(performance) perf_diff = (perf_max - perf_min)/2 # performance = [x for x in performance] # plotting bar graph plt.figure(figsize=(13,7)) y_pos = np.arange(len(names)) colNames = [dt.strftime("%H:%M") for dt in pd.to_datetime(names)] color_map=['#02B480','#02B480','#02B480','#E2B803','#02B480','#02B480','#02B480','#F4B71C','#F6A629','#F79537','#F98444','#FB7351'] plt.bar(y_pos, performance, align='center', alpha=1, color = color_map, edgecolor=color_map) plt.xticks(y_pos, colNames) plt.ylim(perf_min-perf_diff, perf_max+perf_diff) plt.ylabel('Average Percentage of Quality') plt.xlabel('Time of the day ({})'.format(pd.to_datetime(names)[0].strftime("%d-%b"))) plt.title("Machines Average Quality (Till 30th June 7:00 AM)") plt.show() ```
github_jupyter
``` print('================= Welcome Message =================') print('The program would enable you to import required data from FTP directory into MySQL database.') print() print() print('Program begins.') print('You would have to enter required date(YYYY/MM/dd) within the following prompt to continue the process.') print('***Reminder: YYYY contains 4 numbers, while MM and dd contains 2.***') year = str(input("Please enter YYYY: ")) month = str(input("Please enter mm: ")) day = str(input("Please enter dd: ")) print() print() print('Importing packages needed...') import os import ftplib from ftplib import FTP import re import pandas as pd import sys from os import listdir from os.path import isfile, join import numpy as np while True: try: import pymysql except BaseException: print("Have you installed PyMySQL Package within your local system? If not, install it.") print("================= The program has been terminated =================.") print("Reminder: Try again after fulfilling the requirements.") break else: break print('Packages imported successfully.') # Change working directory tempData directory print() print() while True: try: os.chdir('./TempData') except BaseException: pathnow = os.getcwd() print('Working directory now:', pathnow) print("Have you created 'TempData' directory within python working directory? If not, create it.") print("================= The program has been terminated =================.") print("Reminder: Try again after fulfilling the requirements.") break else: pathnow = os.getcwd() print("Python working directory has now been changed to", pathnow) print('Program would then continue.') break # Connect to FTP and print out the directory print() print() while True: try: print('Connecting to ftp server....') ftp = ftplib.FTP('XXX', 'XX', 'XX') except BaseException: print("Unable to connect to FTP site.") print("================= The program has been terminated =================.") print("Reminder: Try again after fulfilling the requirements.") break else: pathnow = os.getcwd() print("Connected successfully") print('Program would then continue.') break print() print() while True: try: ftp.cwd(year) ftp.cwd(month) ftp.cwd(day) print('Connecting to directory', year,'/', month, '/', day) print('Printing out file list within directory...') print("File List: ") files = ftp.dir() print(files) except BaseException: print('Connecting to directory', year,'/', month, '/', day) print("Unable find required directory, check if the values are given correctly.") print("================= The program has been terminated =================") print("Reminder: Try again after fulfilling the requirements.") break else: pathnow = os.getcwd() print("Sucessfully connected") print('Program would then continue.') break # Append file list within python list dirlist = [] ftp.dir(dirlist.append) # Create new list includes only filename newdirlist = [] for i in range(len(dirlist)): newdirlist.append(dirlist[i][56:]) print('File List:') for i,j in enumerate(newdirlist): print(newdirlist[i]) TFT_ID = [] CF_ID = [] EQP_ID = [] # Look up required document 'XXXXRealTime.csv$', and for i in range(len(newdirlist)): string = newdirlist[i] regex = re.compile('RealTime.csv$') match = regex.search(string) if str(match) != 'None': TFT_ID.append(newdirlist[i][0:13]) CF_ID.append(newdirlist[i][17:30]) EQP_ID.append(newdirlist[i][34:38]) filename = newdirlist[i] # Download required data (to python working directory) def getFile(ftp, filename): try: ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write) except: print "Error" for i, j in enumerate(newdirlist): getFile(ftp,j) # Import downloaded data from local filesystem fileInDirectory = [f for f in listdir('.') if isfile(join('.', f))] # Connect to MySQl Database while True: try: print('Connecting to MySQl Database....') conn = pymysql.connect(host='XXX', port=3306, user='XXXX', passwd='XXXX', db='XXXX') cur = conn.cursor() except BaseException: print("Unable to connect to MySQl Database.") print("================= The program has been terminated =================.") print("Reminder: Try again after fulfilling the requirements.") break else: print("Sucessfully connected") print('Program would then continue.') print() print() print() break # Data import starts, combine the data and export to MySQL Database encoding = 'utf-16' for i,j in enumerate(fileInDirectory): for a,b in enumerate(TFT_ID): IDName = b fileName = j # Filter out data matches TFT_ID regex = re.compile('RealTime.csv$') match = regex.search(fileName) # If filename match, then.... if str(match) != 'None': # Import data matched importedData = pd.read_csv(filename, encoding=encoding) # Select the specific value of each imported file selected_TFT_ID = TFT_ID[a] selected_CF_ID = CF_ID[a] selected_EQP_ID = EQP_ID[a] # Append new column/data to imported file importedData['TFT_ID'] = selected_TFT_ID importedData['CF_ID'] = selected_CF_ID importedData['EQP_ID'] = selected_EQP_ID # Insert combined pandas dataframe into MySQL Database importedData.to_sql(con=conn, name='vas_pressure_trans', if_exists='append', flavor='mysql') print(filename,'has been inserted into MySQl database.') # Close FTP Connection ftp.quit() print('The program has been successfully executed.') ```
github_jupyter
# belief_network_lib # Example. Using Markov blanket to reduce 20 variables to 4 #1 Introduction We are interested in understanding the relationship between a variable X and a response variable Y. This is often done in the context of many (sometimes hundreds, thousands, or more) other variables. In general it is difficult to know if a change in Y was due to X2, and not another variable. Isolating the relationship between two variables might in general be accomplished by randomizing for the presence of the other variables. While in some circumstances this is feasible, when there are many (tens, hundreds, or thousands) of other variables, it is not feasible to acquire sufficient data to support this. If there is a way to reduce the complete set of variables, to a smaller set of variables that are most significant to Y, this would provide a smaller, more feasible set to randomize for. This smaller set of variables is precisely the Markov blanket for Y. In this example: - We show how belief_network_lib identifies this smaller set of variables comprising the Markov blanket. - We demonstrate that this significantly smaller set of variables contains the most important information to Y, by comparing a classifier of Y based upon all (20) variables, to one based on the four in the Markov blanket, and showing its (slightly) better performance. ## 1.1 Example description In this example, there are twenty variables: Variables: X1, X2, X20, X21, X22, X23, X24, X3, X30, X31, X32, X33, X34, X4, X40, X41, X42, X43, X44 and one response variable, Y Each of the variables is a binary variable, taking on the value 0 or 1. ``` %pylab inline import network import network_learner import pandas from pandas import DataFrame, Series import sklearn import numpy as np ``` #2 Example ``` df = pandas.read_csv("./data_samples/bn_data_example_3.csv") df.head() ``` ##2.1 Find Markov blanket around Y ``` input_file = open("./data_samples/bn_data_example_3.csv") aNL = network_learner.NetworkLearner(input_file) aNL.find_markov_blanket_for("Y", significance=0.001) ``` ##2.2 Build classifier based on all 20 variables ###2.2.1 De-bias the data by making an even split between Y=0 and Y=1 ``` indices =np.where(df.Y==0)[0] to_keep = [random.choice(indices) for x in range(3508)] neg_rows = [df[["X1", "X2", "X20", "X21", "X22", "X23", "X24", "X3", "X30", "X31", "X32", "X33", "X34", "X4", "X40", "X41", "X42", "X43", "X44"]].iloc[index] for index in to_keep] df_neg_rows = DataFrame(neg_rows) neg_feature_rows = df_neg_rows.as_matrix() pos_indices = np.where(df.Y==1)[0] pos_rows = [df[["X1", "X2", "X20", "X21", "X22", "X23", "X24", "X3", "X30", "X31", "X32", "X33", "X34", "X4", "X40", "X41", "X42", "X43", "X44"]].iloc[index] for index in pos_indices] df_pos_rows = DataFrame(pos_rows) pos_feature_rows = df_pos_rows.as_matrix() data_set_df = df_neg_rows.append(df_pos_rows) Y = [0 for x in range(3508)] Y.extend([1 for x in range(3508)]) ``` ### Select training set ``` training_set_df = None #Randomly select two-thirds of positive pos_indices_train = numpy.random.choice(pos_indices, int(math.floor( (float(2)/3) *len(pos_indices))) , replace=False) pos_rows_train = [df[["X1", "X2", "X20", "X21", "X22", "X23", "X24", "X3", "X30", "X31", "X32", "X33", "X34", "X4", "X40", "X41", "X42", "X43", "X44"]].iloc[index] for index in pos_indices_train] df_pos_rows_train = DataFrame(pos_rows_train) neg_indices = to_keep #Randomly select two-thirds of negative neg_indices_train = numpy.random.choice(neg_indices, int(math.floor( (float(2)/3) *len(neg_indices))), replace=False ) neg_rows_train = [df[["X1", "X2", "X20", "X21", "X22", "X23", "X24", "X3", "X30", "X31", "X32", "X33", "X34", "X4", "X40", "X41", "X42", "X43", "X44"]].iloc[index] for index in neg_indices_train] df_neg_rows_train = DataFrame(neg_rows_train) # Complete training set training_set_df = df_neg_rows_train.append(df_pos_rows_train) Y_train = [0 for x in range( len(neg_rows_train) )] Y_train.extend([1 for x in range( len(pos_rows_train) )]) ``` ### Select test set ``` pos_indices_test = [index for index in pos_indices if index not in pos_indices_train] pos_rows_test = [df[["X1", "X2", "X20", "X21", "X22", "X23", "X24", "X3", "X30", "X31", "X32", "X33", "X34", "X4", "X40", "X41", "X42", "X43", "X44"]].iloc[index] for index in pos_indices_test] df_pos_rows_test = DataFrame(pos_rows_test) neg_indices_test = [index for index in neg_indices if index not in neg_indices_train] neg_rows_test = [df[["X1", "X2", "X20", "X21", "X22", "X23", "X24", "X3", "X30", "X31", "X32", "X33", "X34", "X4", "X40", "X41", "X42", "X43", "X44"]].iloc[index] for index in neg_indices_test] df_neg_rows_test = DataFrame(neg_rows_test) # Complete test set test_set_df = df_neg_rows_test.append(df_pos_rows_test) Y_test = [0 for x in range( len(neg_rows_test))] Y_test.extend([1 for x in range( len(pos_rows_test))]) ``` ###2.2.2 Build classifier ``` from sklearn.linear_model import LogisticRegression est = LogisticRegression() est.fit(training_set_df.as_matrix(), Y_train) ``` #### Measure test set accuracy ``` est.score(test_set_df.as_matrix(), Y_test) ``` ##2.3 Classifier based on 4 Markov blanket variables ``` train_vals_mb = training_set_df[["X1", "X2", "X3", "X4"]].as_matrix() train_vals_mb est2 = LogisticRegression() est2.fit(train_vals_mb, Y_train) ``` ### Measure test set accuracy ``` est2.score(test_set_df[["X1", "X2", "X3", "X4"]].as_matrix(), Y_test) ``` ## 2.4 Attempting dimensionality reduction via PCA instead from sklearn.decomposition import PCA ``` pca = PCA() pca.fit(training_set_df.as_matrix()) variances = pca.explained_variance_ratio_ ``` ## Plot variance as a function of principal components ``` Series(variances).plot() ``` As can be seen in the figure above, the variance captured by the principal components does not exhibit a sharp drop off, as would be the case if the variables exhibited linear relationships with each other. The number of principal components that would have to be included in any basis transformation is nearly as many as the original 20 dimensions.
github_jupyter
# Introduction: Partitioning Data Problem: we have a large dataset that we want to partition into smaller sections in order to run a feature engineering pipeline over these partitions in parallel. Approach: divide the data into partitions by hashing the customer id to an integer and then modolu (integer) dividing by the number of partitions. After determining the partition for each customer id, we can iterate over the data and write the partitions to the correct directory. At the end of the process, we'll have `N_PARTITIONS` of customer data, each containing all the data for a subset of customers. ``` import pandas as pd import hashlib import os from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" N_PARTITIONS = 1000 ``` ## Hashing Hashing is the process of mapping a string of characters to a fixed length value or key. For this problem, we'll take a string, the customer id (`msno` in the data) and map it to an integer (16 bits). To convert to a partition number, we then integer (modulo) divide this integer by the number of partitions. ### Consistency of Hashing To make sure that the integer for a given string is always the same, we use the `md5` algorithm form the `hashlib` library. There is no way to go backwards from an integer to the string, but we can always go from the string to the same exact hash. Since we don't need to partition our customers in any particular groupings, randomly assigning customers to partitions is an acceptable approach. ``` def id_to_hash(customer_id): """Return a 16-bit integer hash of a customer id string""" return int(hashlib.md5(customer_id.encode('utf-8')).hexdigest(), 16) cwd = os.getcwd() members = pd.read_csv(f'{cwd}/data/members_v3.csv', nrows = 1) members transactions = pd.read_csv(f'{cwd}/data/transactions.csv', nrows = 1) transactions logs = pd.read_csv(f'{cwd}/data/user_logs.csv', nrows = 1) logs train = pd.read_csv(f'{cwd}/data/train.csv', nrows = 1) train test = pd.read_csv(f'{cwd}/data/sample_submission_v2.csv', nrows = 1) test id_to_hash(members.loc[0, 'msno']) id_to_hash(members.loc[0, 'msno']) % N_PARTITIONS id_to_hash(transactions.loc[0, 'msno']) id_to_hash(transactions.loc[0, 'msno']) % N_PARTITIONS ``` The following code creates `N_PARTITIONS` empty directories. The files within each directory will be named exactly the same so the directory name can be used to distinguish partitions. ``` cwd = os.getcwd() base_dir = f'{cwd}/data/partitions/' if not os.path.exists(base_dir + 'p999'): # Create a new directory for each partition for i in range(N_PARTITIONS): os.makedirs(base_dir + f'p{i}', exist_ok=False) len(os.listdir(base_dir)) ``` ## Create Files In each partition there are 5 csv files. * `transactions.csv` * `train.csv` * `test.csv` * `members.csv` * `logs.csv` The following code writes the header for each of the five files in each of the `N_PARTITION` partitions. ``` InteractiveShell.ast_node_interactivity = 'last_expr' ','.join(list(transactions.columns)) def create_blank_partitions(): """Create blank files in each partition and write the file header""" # For each partition create the files with headers for i in range(N_PARTITIONS): directory = base_dir + f'p{i}/' # Create five files for file in ['transactions.csv', 'train.csv', 'test.csv', 'members.csv', 'logs.csv']: # Write file header as first line with open(directory + file, 'w') as f: if file == 'transactions.csv': f.write(','.join(list(transactions.columns))) elif file == 'train.csv': f.write(','.join(list(train.columns))) elif file == 'test.csv': f.write(','.join(list(train.columns))) elif file == 'members.csv': f.write(','.join(list(members.columns))) elif file == 'logs.csv': f.write(','.join(list(logs.columns))) return directory directory = create_blank_partitions() os.listdir(directory) ``` When it comes time to write the data, we'll open the existing files using append (`a`) option and add to whatever is there. At this point, each file just has a header. ``` pd.read_csv(directory + 'members.csv').head() ``` ### Example of Writing a Row For each file, one option to write the data to the partitions is to iterate over the rows one at a time. The process for handling a row is: 1. Convert the customer id to an integer by hashing 2. Convert the integer to a partition number by modulus dividing by the number of partitions 3. Append the row to the correct partition directory and file Let's see how this works with a single row. ``` # Iterate through the dataframe one row at a time for i, row in members.iterrows(): # Find the partition by hashing the id partition = id_to_hash(row['msno']) % N_PARTITIONS # Open the file for appending with open(base_dir + f'p{partition}/members.csv', 'a') as f: # Write a newline and then the information f.write('\n') f.write(','.join([str(x) for x in row.values])) if i > 1: break pd.read_csv(base_dir + f'p{partition}/members.csv') ``` Everything looks like it went well with the first attempt. However, we might want to ask if iterating over the dataset one row at a time using `iterrows` is the quickest options. # Performance of Different Methods There are a number of different options to handle writing the data to the correct partition. To find out which method is the best, we'll try 4 approaches: 1. `df.iterrows()`: iterate through the dataframe one row at a time with rows represented as series 2. `df.itertuples()`: iterate through the dataframe one row at a time with rows represented as tuples 3. `df.apply()`: iterate through the data one row at a time using `apply` 4. `groupby(partition)` and save each group with `to_csv()`: iterate through the dataframe one partition at a time The four approaches have different applicability and performance characteristics (see this [Stack Overflow](https://stackoverflow.com/questions/24870953/does-iterrows-have-performance-issues/24871316#24871316) answer). The way to find out which one is the quickest is to try them all. This isn't meant to represent all use cases, so your particular results may vary. ``` from timeit import default_timer as timer ``` ### Member Information We'll start off with the members. ``` members = pd.read_csv(f'{cwd}/data/members_v3.csv') members.shape ``` ## Iterrows The first implementation to try is `iterrows`. This is fairly slow because Pandas packages the row as a Pandas series before iteration. However, it does allow us to access each value using conventional locating. ``` start = timer() for i, row in members.iterrows(): # Find the partition number by hashing the id partition = id_to_hash(row['msno']) % N_PARTITIONS # Open file for appending with open(base_dir + f'p{partition}/members.csv', 'a') as f: # Write a new line and then data f.write('\n') f.write(','.join([str(x) for x in row.values])) if i % 10000 == 0: print(f'{100 * round(i / members.shape[0], 2)}% complete. {round(timer() - start)} seconds elapsed.', end = '\r') end = timer() print(f'Processing {i} lines took {round(end - start)} seconds using iterrows.') pd.read_csv(base_dir + f'p{partition}/members.csv').head() ``` This approach works but is fairly slow. ## Itertuples Itertuples should be faster than iterrows because Pandas packages the row as a tuple instead of as a series. The tradeoff is we need to be careful when accessing the elements of the series since we can't refer to them by name. For example, to make sure we are hashing the customer id (`msno`) we need to grab the second element of the tuple. ``` _ = create_blank_partitions() start = timer() for i, tup in enumerate(members.itertuples()): # Find the partition number by hashing the id partition = id_to_hash(tup[1]) % N_PARTITIONS # Open file for appending with open(base_dir + f'p{partition}/members.csv', 'a') as f: # Write a new line and then data f.write('\n') f.write(','.join([str(x) for x in tup[1:]])) if i % 10000 == 0: print(f'{100 * round(i / members.shape[0], 2)}% complete. {round(timer() - start)} seconds elapsed.', end = '\r') end = timer() print(f'Processing {i} lines took {round(end - start)} seconds using itertuples.') pd.read_csv(base_dir + f'p{partition}/members.csv').head() ``` This approach was much faster because Pandas does not have to convert each row into a Series which has more overhead than a tuple. ``` _ = create_blank_partitions() ``` ## Apply Another option would be to use the apply over the rows. To use `apply`, we write a small function that saves the row and then call apply to the dataframe using `axis = 1` which sends each row to the function. This also sends a row as a Series to the function, but in practice it seems to be much faster than `iterrows`. ``` def save_row(row, name): # Find the partition number by hashing the id partition = id_to_hash(row['msno']) % N_PARTITIONS # Open file for appending with open(base_dir + f'p{partition}/{name}.csv', 'a') as f: # Write a new line and then data f.write('\n') f.write(','.join([str(x) for x in row.values])) from tqdm import tqdm_notebook from tqdm import tqdm tqdm.pandas() start = timer() members.progress_apply(save_row, axis = 1, name = 'members') end = timer() print(f'Processing {members.shape[0]} rows took {round(end - start)} seconds using apply.') pd.read_csv(base_dir + f'p{partition}/members.csv').head() ``` So `apply` is faster than `iterrows` but slower than `itertuples` (at least in this case). ``` _ = create_blank_partitions() ``` ## Groupby The final option we'll try is to group the data by the partition after converting all the customer ids into partition numbers in once operation. 1. Compute the partitions with the hashing function all at once 2. Groupby the partition 3. Write the grouped dataframe to the correct partition directory and file To find out the quickest way to convert all the customer ids to an integer, we can compare `map` and `apply`. ``` %%timeit -n 1 -r 3 members['msno'].map(id_to_hash) % 1000 %%timeit -n 1 -r 3 members['msno'].apply(id_to_hash) % 1000 ``` It looks like `apply` is slightly faster although the difference is not significant. We'll convert all the customer ids to the partitions once so this is not a large time cost. Before we go on, we should make sure that the hashing and conversion to partition operation is creating partitions close to the same size. ``` members['partition'] = members['msno'].apply(id_to_hash) % 1000 members['partition'].value_counts().head() members['partition'].value_counts().describe() ``` It looks like the number of members in each partition is fairly constant. We can check another dataset to make sure. ``` transactions = pd.read_csv(f'{cwd}/data/transactions.csv') transactions['partition'] = transactions['msno'].apply(id_to_hash) % 1000 transactions['partition'].value_counts().describe() ``` The following cell runs the group by approach to partitioning the data. The biggest thing to watch out for is making sure we are appending to the file each time (open with `a`). When we write to a csv with `to_csv`, we can pass in an already open file. We also don't write a header since we already created the headers in every file, and do not write the index. ``` start = timer() members['partition'] = members['msno'].apply(id_to_hash) % N_PARTITIONS # Iteration through grouped partitions for partition, grouped in members.groupby('partition'): grouped = grouped.drop(columns = 'partition') # Open file for appending with open(base_dir + f'p{partition}/members.csv', 'a') as f: f.write('\n') grouped.to_csv(f, header = False, index = False) end = timer() print(f'Processing {members.shape[0]} rows took {round(end - start)} seconds using groupby.') pd.read_csv(base_dir + f'p{partition}/members.csv').head() ``` The group by approach to partitioning the data is by far the fastest method. We'll put this into a function to use with all the datasets. ## Reusable Hashing Dataframe Function To make the process reusable, we'll write a function that does this for us. It will take in a dataframe, a name for the file to save the data to, and an optional progress argument. The function will map the customer id (`msno`) to a partition number using the hash modulo the number of partitions, group the dataframe by the partition, and write the grouped dataframe to the appropriate directory. ``` members = pd.read_csv(f'{cwd}/data/members_v3.csv', nrows = 1) transactions = pd.read_csv(f'{cwd}/data/transactions.csv', nrows = 1) logs = pd.read_csv(f'{cwd}/data/user_logs.csv', nrows = 1) train = pd.read_csv(f'{cwd}/data/train.csv', nrows = 1) test = pd.read_csv(f'{cwd}/data/sample_submission_v2.csv', nrows = 1) _ = create_blank_partitions() pd.read_csv(f'{cwd}/data/partitions/p999/members.csv').head() def partition_by_hashing(df, name, progress = None): """Partition a dataframe into N_PARTITIONS by hashing the id. Params -------- df (pandas dataframe): dataframe for partition. Must have 'msno' column. name (str): name of dataframe. Used for saving the row data. progress (int, optional): number of rows to be processed before displaying information. Defaults to None Returns: -------- Nothing returned. Dataframe is saved one line at a time as csv files to the N_PARTITIONS """ start = timer() # Map the customer id to a partition number df['partition'] = df['msno'].apply(id_to_hash) % N_PARTITIONS # Iterate through one row at a time for partition, grouped in df.groupby('partition'): # Don't need to save the partition column grouped = grouped.drop(columns = 'partition') # Open file for appending with open(base_dir + f'p{partition}/{name}.csv', 'a') as f: # Write a new line and then data f.write('\n') grouped.to_csv(f, header = False, index = False) # Record progress every `progress` steps if progress is not None: if partition % progress == 0: print(f'{100 * round(partition / N_PARTITIONS, 2)}% complete. {round(timer() - start)} seconds elapsed.', end = '\r') end = timer() if progress is not None: print(f'\n{df.shape[0]} rows processed in {round(end - start)} seconds.') members = pd.read_csv(f'{cwd}/data/members_v3.csv') partition_by_hashing(members, name = 'members', progress = 10) pd.read_csv(base_dir + f'p{partition}/members.csv').head() ``` ## Training Data Now we can use this function to partition the training data. ``` train = pd.read_csv(f'{cwd}/data/train.csv') partition_by_hashing(train, name = 'train', progress = 10) pd.read_csv(base_dir + f'p{partition}/train.csv').head() ``` ## Testing Data The nice thing about a function is we can keep applying it, changing only the arguments! ``` test = pd.read_csv(f'{cwd}/data/sample_submission_v2.csv') partition_by_hashing(test, name = 'test', progress = 10) pd.read_csv(base_dir + f'p{partition}/test.csv').head() ``` ## Transactional Data The second to last dataset is the customer transactions. ``` transactions = pd.read_csv(f'{cwd}/data/transactions.csv') partition_by_hashing(transactions, name = 'transactions', progress = 10) pd.read_csv(base_dir + f'p{partition}/transactions.csv').head() ``` ## User Log Data The final dataset cannot be pass directly into the function because of the size which won't even let us read the entire file into memory. Instead, we can read it in a chunk at a time using Pandas and apply the function to each chunk. There are actually two log files of significantly different sizes, but we'll use the same chunking approach for each. ``` print(os.stat(f'{cwd}/data/user_logs.csv').st_size / 1e9) print(os.stat(f'{cwd}/data/user_logs_v2.csv').st_size / 1e9) ``` The second user logs can be processed in the previous manner because it can be read completely into memory, but we'll go ahead and apply the chunking method. The chunksize refers to the number of rows read in at a time. Using Pandas `read_csv` and specifying the `chunksize`, we can then iterate over the file one chunk at a time. ``` chunksize = 1e6 start = timer() for chunk in pd.read_csv(f'{cwd}/data/user_logs_v2.csv', chunksize = chunksize): partition_by_hashing(chunk, name = 'logs', progress = None) if (i + 1) % 10 == 0: print(f'{i * chunksize} rows processed.', end = '\r') end = timer() print(f'\nOverall time: {round(end - start)} seconds.') pd.read_csv(base_dir + f'p{partition}/logs.csv').head() ``` The final dataset has about 400 million rows. We can read it in 10 million rows at a time. ``` chunksize = 1e7 start = timer() for i, chunk in enumerate(pd.read_csv(f'{cwd}/data/user_logs.csv', chunksize = chunksize)): partition_by_hashing(chunk, name = 'logs', progress = None) if (i + 1) % 10 == 0: print(f'{i * chunksize} rows processed.', end = '\r') end = timer() print(f'\nOverall time: {round(end - start)} seconds.') ``` With the grouping approach, we were able to get 30 GB of data partitioned in less than 2 hours! Dealing with large datasets can be tough, but breaking down the problem makes it much more manageable. ``` pd.read_csv(base_dir + f'p{partition}/logs.csv').tail() ``` # Conclusions In this notebook, we implemented a partitioning of a dataset that would normally be too large to fit in memory. After trying several options, we eventually decided on the fastest process which was: 1. Map customer ids to a partition using a hashing function. 2. Compute the integer hash and then divide by the number of partitions 3. Group the dataframe by the partition and write each partition to appropriate directory and file 4. For large files that cannot fit all in memory, read in via chunking and send each chunk through the partitioning function Now we can work on an individual partition to develop an automated feature engineering pipeline. After the pipeline has been developed, we can use a framework such as Spark or Dask to run the partitions through the pipeline in parallel. This will speed up the overall feature engineering process and allow us to scale to larger datasets. ## Next Steps To implement a machine learning solution, we need to take several steps outlined in the following process: 1. Prediction Engineering: define a business need and translate into a machine learning problem. Create a set of labeled historical examples (called label times) that can be used to build features for each label. 2. Use the label times to build features for each label by filtering the data to times before the cutoff time. This procedure can be rapidly completed using automated feature engineering. 3. Train a machine learning algorithm to predict the labels from the features. Once the model has been optimized, use it to make predictions on new data. With the data in partitions, the first two steps can be done rapidly in parallel. The first step is implemented in the `Prediction Engineering` notebook.
github_jupyter
# Block Aligner Data Analysis and Visualizations This notebook contains code for collecting, cleaning, and analyzing data produced by block aligner's experiments. To run this, you will need to install all the libraries imported below, along with [altair-saver](https://github.com/altair-viz/altair_saver), which has some extra dependencies for PDF saving. Run each cell one by one to reproduce the experiments. This may take a while. For accurate benchmarking, it is recommended to run the entire notebook in the command line with `nbconvert`. ``` import altair as alt from altair_saver import save from altair import datum import pandas as pd from io import StringIO def csv_to_pandas(csv, d = "\\s*,\\s*", t = None): s = StringIO("\n".join(csv)) data = pd.read_csv(s, sep = d, thousands = t, comment = "#", engine = "python") return data ``` ## Block Aligner Image ``` !cd .. && cargo run --example block_img --release --features simd_avx2 --quiet -- vis/block_img1.png vis/block_img2.png ``` <img src = "block_img1.png" width = "300px" /> <img src = "block_img2.png" width = "300px" /> ## Random Data Accuracy ``` output = !cd .. && cargo run --example accuracy --release --features simd_avx2 --quiet output data = csv_to_pandas(output) data data["% wrong"] = data["wrong"] / data["iter"] data["k %"] = data["k"] / data["len"] ``` Error Rate on Random DNA Sequences with 10% Insert ``` c = alt.Chart(data).mark_point(opacity = 1, filled = True).encode( x = alt.X("% wrong", axis = alt.Axis(format = "%")), y = alt.Y("k %:N", axis = alt.Axis(format = "~%", grid = True)), color = "max size:N", shape = "max size:N", row = alt.Row("len:N", header = alt.Header(title = "length")) ).transform_filter( datum.insert == True ).properties( width = 100, height = 50 ) save(c, "random_dna_accuracy.pdf") c ``` ## Prefix Scan Benchmark ``` output = !cd .. && cargo bench --features simd_avx2 --quiet -- prefix_scan | grep 'bench:' | awk '{print $2"\t"$5}' output.insert(0, "algorithm\ttime") output data = csv_to_pandas(output, d = "\t", t = ",") data data["algorithm"] = data["algorithm"].map({ "bench_naive_prefix_scan": "naive", "bench_opt_prefix_scan": "ours" }) data ``` Prefix Scan Benchmark (AVX2) ``` c = alt.Chart(data).mark_bar().encode( x = alt.X("time", axis = alt.Axis(title = "time (ns)")), y = "algorithm", color = alt.Color("algorithm", legend = None) ).properties( width = 150 ) save(c, "prefix_scan_bench.pdf") c ``` ## Random Data Benchmark ``` output = !cd .. && cargo bench --features simd_avx2 --quiet -- bench_ | grep 'bench:' | grep -v 'prefix_scan' | awk '{print $2"\t"$5}' output cleaned = ["algorithm\talphabet\tk\tlength\tproperty\ttime"] names = ["parasailors_aa", "rustbio_aa", "scan_aa", "scan_nuc", "triple_accel"] new_names = ["parasailors\tprotein", "rust bio\tprotein", "ours\tprotein", "ours\tnucleotide", "triple accel\tnucleotide"] for o in output: o = o[len("bench_"):] for n, nn in zip(names, new_names): if o.startswith(n): suffix = o[len(n):].replace("_", "\t") o = nn + suffix break if len(o.split("\t")) < len(cleaned[0].split("\t")): insert_idx = o.rindex("\t") o = o[:insert_idx] + "\tdefault" + o[insert_idx:] cleaned.append(o) cleaned data = csv_to_pandas(cleaned, d = "\t", t = ",") data data["algorithm property"] = data["algorithm"] + " " + data["property"] data["time"] /= 1000 ``` Random Protein Sequences Benchmark (AVX2) ``` c = alt.Chart(data).mark_point(opacity = 1, filled = True).encode( x = alt.X("time", axis = alt.Axis(title = "time (us)"), scale = alt.Scale(type = "log", domain = [1, 50000])), y = alt.Y("algorithm property", axis = alt.Axis(title = "algorithm", grid = True), sort = alt.EncodingSortField(field = "time")), color = "length:N", shape = "length:N" ).transform_filter( datum.alphabet == "protein" ).properties( width = 200, height = 150 ) save(c, "random_protein_bench.pdf") c ``` Random DNA Sequences Benchmark (AVX2) ``` c = alt.Chart(data).mark_point(opacity = 1, filled = True).encode( x = alt.X("time", axis = alt.Axis(title = "time (us)"), scale = alt.Scale(type = "log", domain = [1, 50000])), y = alt.Y("algorithm property", axis = alt.Axis(title = "algorithm", grid = True), sort = alt.EncodingSortField(field = "time")), color = alt.Color("length:N", scale = alt.Scale(domain = [100, 1000, 10000])), shape = alt.Color("length:N", scale = alt.Scale(domain = [100, 1000, 10000])) ).transform_filter( datum.alphabet == "nucleotide" ).properties( width = 200, height = 50 ) save(c, "random_dna_bench.pdf") c ``` ## Uniclust 30 Data Accuracy ``` output = !cd .. && cargo run --example uc_accuracy --release --features simd_avx2 --quiet output data = csv_to_pandas(output) data data["% wrong"] = data["wrong"] / data["count"] data["seq identity"] = data["seq identity"].map({ 0.0: "0-10%", 0.1: "10-20%", 0.2: "20-30%", 0.3: "30-40%", 0.4: "40-50%", 0.5: "50-60%", 0.6: "60-70%", 0.7: "70-80%", 0.8: "80-90%", 0.9: "90-100%" }) data ``` Uniclust30 Error Rate ``` c = alt.Chart(data).mark_bar().encode( x = "seq identity", y = alt.Y("% wrong", axis = alt.Axis(format = "%")), column = alt.Column("size", sort = ["32-32", "32-256", "256-256"]), row = "dataset", color = alt.Color("size", legend = None, sort = ["32-32", "32-256", "256-256"]) ).properties( width = 100, height = 100 ) save(c, "uniclust30_accuracy.pdf") c ``` Uniclust30 % Error ``` c = alt.Chart(data).mark_bar().encode( x = "seq identity", y = alt.Y("wrong % error", axis = alt.Axis(format = "%")), column = alt.Column("size", sort = ["32-32", "32-256", "256-256"]), row = "dataset", color = alt.Color("size", legend = None, sort = ["32-32", "32-256", "256-256"]) ).properties( width = 100, height = 100 ) save(c, "uniclust30_percent_error.pdf") c agg_data = data.copy() agg_data = agg_data.groupby(["dataset", "size"]).agg({"count": "sum", "wrong": "sum"}).reset_index() agg_data["% wrong"] = agg_data["wrong"] / agg_data["count"] agg_data ``` Overall Uniclust30 Error Rate ``` c = alt.Chart(agg_data).mark_bar().encode( x = alt.X("size", axis = None, sort = ["32-32", "32-256", "256-256"]), y = alt.Y("% wrong", axis = alt.Axis(format = "%")), column = alt.Column("dataset", header = alt.Header(orient = "bottom")), color = alt.Color("size", sort = ["32-32", "32-256", "256-256"]) ).properties( width = 50, height = 100 ) save(c, "uniclust30_overall_accuracy.pdf") c ``` ## Uniclust 30 Data Benchmark ``` output = !cd .. && cargo run --example uc_bench --release --features simd_avx2 --quiet output data = csv_to_pandas(output) data ``` Uniclust30 Benchmark (AVX2) ``` c = alt.Chart(data).mark_bar().encode( x = alt.X("algorithm", axis = None), y = alt.Y("time", axis = alt.Axis(title = "time (s)"), scale = alt.Scale(domain = [0.0, 1.0])), column = alt.Column("dataset", header = alt.Header(orient = "bottom")), color = "algorithm" ).transform_filter( (datum.size == "32-256") | (datum.algorithm == "parasail") ).properties( width = 50, height = 100 ).configure_range( category = {"scheme": "dark2"} ) save(c, "uniclust30_bench.pdf") c ``` Uniclust30 Block Size Benchmark (AVX2) ``` c = alt.Chart(data).mark_bar().encode( x = alt.X("size", axis = None, sort = ["32-32", "32-256", "256-256"]), y = alt.Y("time", axis = alt.Axis(title = "time (s)"), scale = alt.Scale(domain = [0.0, 1.0])), column = alt.Column("dataset", header = alt.Header(orient = "bottom")), color = alt.Color("size", sort = ["32-32", "32-256", "256-256"]) ).transform_filter( datum.algorithm == "ours (no trace)" ).properties( width = 50, height = 100 ) save(c, "uniclust30_size_bench.pdf") c ``` ## DNA Reads Global Alignment ``` output = !cd .. && cargo run --example nanopore_accuracy --release --features simd_avx2 --quiet output data = csv_to_pandas(output) data data["error rate"] = data["wrong"] / data["total"] data = data[["dataset", "size", "total", "wrong", "error rate", "wrong % error"]] data = data.fillna(0) data = data.rename(columns = {"total": "reads"}) data["error rate"] = data["error rate"].map("{:.1%}".format) data["wrong % error"] = data["wrong % error"].map("{:.1%}".format) print(data) ``` ## Nanopore Data Compare and Benchmark Setup To run the comparisons and benchmarks below, you need to clone the following repos, place them in the same directory where this repo (block aligner) is located, and follow their setup instructions: * [diff-bench-paper](https://github.com/Daniel-Liu-c0deb0t/diff-bench-paper) * [adaptivebandbench](https://github.com/Daniel-Liu-c0deb0t/adaptivebandbench) ## Nanopore Data Compare ``` output = !cd ../../diff-bench-paper/supplementary_data/benchmark_codes && ./custom_scores.sh 2>&1 | grep '\.tsv' output lengths = [] for f in output: l = f[len("scores_"):f.index(".")] lengths.append(l[1:] if l[0] == "l" else l) lengths path_prefix = "../diff-bench-paper/" outputs = [] for f in output: o = !cd .. && cargo run --example compare --release --features simd_avx2 --quiet -- {path_prefix + f} 50 outputs.append(o) outputs data = [] for o in outputs: d = csv_to_pandas(o) data.append(d) data = pd.concat(data, keys = lengths) data = data.reset_index() data = data.drop(columns = ["level_1"]) data = data.rename(columns = {"level_0": "length"}) data["band width"] = 32 data output = !cd ../../adaptivebandbench && ./custom_scores.sh 2>&1 | grep '\.tsv' output lengths = [] band_widths = [] for f in output: l = f[len("scores_"):f.index(".")] if l[0] == "l": lengths.append(l[1:l.index("_")]) l = l[l.index("_") + 1:] else: lengths.append("default") if l[0] == "b": band_widths.append(l[1:]) print(lengths) print(band_widths) path_prefix = "../adaptivebandbench/" outputs = [] for f in output: o = !cd .. && cargo run --example compare --release --features simd_avx2 --quiet -- {path_prefix + f} 100000 outputs.append(o) outputs data2 = [] for o in outputs: d = csv_to_pandas(o) data2.append(d) index = list(zip(lengths, band_widths)) data2 = pd.concat(data2, keys = index) data2 = data2.reset_index() data2 = data2.drop(columns = ["level_2"]) data2 = data2.rename(columns = {"level_0": "length", "level_1": "band width"}) data2 data["other better %"] = data["other better"] / data["total"] data["us better %"] = data["us better"] / data["total"] data2["other better %"] = data2["other better"] / data2["total"] data2["us better %"] = data2["us better"] / data2["total"] data["equal %"] = 1.0 - data["other better %"] - data["us better %"] data2["equal %"] = 1.0 - data2["other better %"] - data2["us better %"] cleaned = data.copy() cleaned = cleaned.melt(id_vars = ["length", "band width", "max size"], value_vars = ["us better %", "other better %", "equal %"]) cleaned["variable"] = cleaned["variable"].map({"us better %": "ours better %", "other better %": "adaptive banding better %", "equal %": "equal %"}) cleaned2 = data2.copy() cleaned2 = cleaned2.melt(id_vars = ["length", "band width", "max size"], value_vars = ["us better %", "other better %", "equal %"]) cleaned2["variable"] = cleaned2["variable"].map({"us better %": "ours better %", "other better %": "static banding better %", "equal %": "equal %"}) order = {"ours better %": 0, "equal %": 1, "adaptive banding better %": 2} cleaned["order"] = cleaned.apply(lambda r: order[r["variable"]], axis = 1) order = {"ours better %": 0, "equal %": 1, "static banding better %": 2} cleaned2["order"] = cleaned2.apply(lambda r: order[r["variable"]], axis = 1) ``` Comparison with Adaptive Banding ``` c = alt.Chart(cleaned).mark_bar().encode( x = "length", y = alt.Y("sum(value)", axis = alt.Axis(title = "", format = "%")), color = alt.Color("variable", title = "", sort = alt.EncodingSortField(field = "order")), row = "max size:N", order = "order" ).properties( width = 100, height = 100 ) save(c, "compare_adaptive_banding.pdf") c ``` Comparison with Static Banding ``` c = alt.Chart(cleaned2).mark_bar().encode( x = "length", y = alt.Y("sum(value)", axis = alt.Axis(title = "", format = "%")), color = alt.Color("variable", title = "", sort = alt.EncodingSortField(field = "order")), row = alt.Row("max size:N", title = "max block size"), column = alt.Column("band width:N", title = "static band width", sort = ["256", "2048"]), order = "order" ).properties( width = 100, height = 100 ) save(c, "compare_diagonal.pdf") c ``` ## Nanopore Data Benchmark ``` output = !cd .. && cargo run --example nanopore_bench --release --features simd_avx2 --quiet output data = csv_to_pandas(output) data output2 = !cd ../../diff-bench-paper/supplementary_data/benchmark_codes && ./custom_bench.sh for i, o in enumerate(output2): if o.startswith("cells("): break output2 = output2[i + 1:] output2.insert(0, "algorithm\tfill time\ttrace time\tconvert time\ttotal time\tscore\tfail") output2 data2 = csv_to_pandas(output2, d = "\t") data2 cleaned2 = data2.drop(columns = ["trace time", "convert time", "total time", "score", "fail"]) cleaned2 = cleaned2.rename(columns = {"fill time": "time"}) cleaned2["time"] /= 1e9 cleaned2 cleaned = data.drop(index = [1, 3, 5]) cleaned = cleaned.drop(columns = ["dataset"]) cleaned = cleaned.append(cleaned2, ignore_index = True) cleaned ``` 25kbp Nanopore Reads Benchmark (AVX2) ``` chart1 = alt.Chart(cleaned).mark_point(opacity = 1, filled = True).encode( x = alt.X("time", axis = alt.Axis(title = "time (s)", grid = True), scale = alt.Scale(type = "log")), y = alt.Y("algorithm", axis = alt.Axis(grid = True), sort = alt.EncodingSortField(field = "time")) ).transform_filter((datum.algorithm != "ours (trace 32-32)") & (datum.algorithm != "ours (no trace 32-32)") & (datum.algorithm != "ours (trace 32-64)")) chart2 = alt.Chart(cleaned).mark_point(color = "red", filled = True).encode( x = alt.X("time", axis = alt.Axis(title = "time (s)", grid = True), scale = alt.Scale(type = "log")), y = alt.Y("algorithm", axis = alt.Axis(grid = True), sort = alt.EncodingSortField(field = "time")) ).transform_filter((datum.algorithm == "ours (trace 32-32)") | (datum.algorithm == "ours (no trace 32-32)") | (datum.algorithm == "ours (trace 32-64)")) c = (chart1 + chart2).properties( width = 150, height = 150 ) save(c, "nanopore_bench.pdf") c ``` ## WASM SIMD [Wasmtime](https://wasmtime.dev/) is needed to run the webassembly code. ``` output = !CARGO_TARGET_WASM32_WASI_RUNNER="wasmtime --wasm-features simd --" cargo bench --target=wasm32-wasi --features simd_wasm --quiet -- --nocapture | grep 'bench:' | awk '{print $2"\t"$5}' output cleaned = ["algorithm\talphabet\tk\tlength\tproperty\ttime"] names = ["rustbio_aa", "scan_aa", "scan_nuc"] new_names = ["rust bio\tprotein", "ours\tprotein", "ours\tnucleotide"] for o in output: o = o[len("bench_"):] for n, nn in zip(names, new_names): if o.startswith(n): suffix = o[len(n):].replace("_", "\t") o = nn + suffix break if len(o.split("\t")) < len(cleaned[0].split("\t")): insert_idx = o.rindex("\t") o = o[:insert_idx] + "\tdefault" + o[insert_idx:] cleaned.append(o) cleaned data = csv_to_pandas(cleaned, d = "\t", t = ",") data data["algorithm property"] = data["algorithm"] + " " + data["property"] data["time"] /= 1000 ``` Random Protein Sequences Benchmark (WASM SIMD) ``` c = alt.Chart(data).mark_point(opacity = 1, filled = True).encode( x = alt.X("time", axis = alt.Axis(title = "time (us)"), scale = alt.Scale(type = "log")), y = alt.Y("algorithm property", axis = alt.Axis(title = "algorithm", grid = True), sort = alt.EncodingSortField(field = "time")), color = "length:N", shape = "length:N" ).transform_filter( datum.alphabet == "protein" ).properties( width = 200, height = 150 ) save(c, "random_protein_bench_wasm.pdf") c ```
github_jupyter
## Regression: Breakout ## **ML Course (Bogotá, Colombia, J. Bloom, 2019)** <img src="https://upload.wikimedia.org/wikipedia/commons/2/20/Columbia-Aquidneck-2011.jpg" width=80%> A yacht named "Columbia" (not Colombia 😏) was used to win the [America's Cup in 1958](https://en.wikipedia.org/wiki/1958_America%27s_Cup). Technology has improved a lot since then. In this breakout, you're going making a predictive model to determine the resistance of a boat given it's geometry and speed. This is usually only measureable with advanced simulations but we can get <a href="http://archive.ics.uci.edu/ml/datasets/Yacht+Hydrodynamics">7-dimensional data to build a model</a> and then determine this value for arbitrary new boat design: <ul> <li> *Prediction of residuary resistance of sailing yachts at the initial design stage is of a great value for evaluating the ship's performance and for estimating the required propulsive power. Essential inputs include the basic hull dimensions and the boat velocity. The Delft data set comprises 308 full-scale experiments, which were performed at the Delft Ship Hydromechanics Laboratory for that purpose. These experiments include 22 different hull forms...* </ul> Variations concern hull geometry coefficients and the Froude number: 1. Longitudinal position of the center of buoyancy. 2. Prismatic coefficient. 3. Length-displacement ratio. 4. Beam-draught ratio. 5. Length-beam ratio. 6. Froude number. The measured variable is the residuary resistance per unit weight of displacement: 7. Residuary resistance per unit weight of displacement, adimensional. ``` import requests from io import StringIO dat_file = requests.get("http://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data") data = StringIO(dat_file.text) %matplotlib inline import matplotlib.pyplot as plt import numpy as np d = np.loadtxt(data) np.random.shuffle(d) # shuffle the instances since it appears to be ordered ``` a. Chunk the data into the variable you are going to predict (and call that vector Y). Split into a training set with 90% of the complete data and 10% testing set. Be sure to scale the feature data. ``` import math Y = d[:,6] X = d[:,:6] cut = # FIXME: use 90% of the data for training from sklearn import preprocessing X_scaled = preprocessing.scale(X) # many methods work better on scaled X train_X = X_scaled[:cut] train_Y = Y[:cut] test_X = X_scaled[cut:] test_Y = Y[cut:] ``` b. Learn/fit a kNNR model with k=5 ``` from sklearn import neighbors # FIXME ``` c. Determine the mean_squared error of the result ``` from sklearn.metrics import mean_squared_error # FIXME ``` d. What MSE would we expect from (naively) guessing the mean of the training data? ``` mean_squared_error(test_Y,Y.mean()*np.ones(test_Y.shape)) ``` e. If time, try to be a `RandomForestRegressor` model. We'll learn about RFs later. How did you do? ``` from sklearn.ensemble import RandomForestRegressor clf = RandomForestRegressor() # FIXME ```
github_jupyter
# Solve Cournot oligopoly model via collocation **Randall Romero Aguilar, PhD** This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by Mario Miranda and Paul Fackler. Original (Matlab) CompEcon file: **demapp07.m** Running this file requires the Python version of CompEcon. This can be installed with pip by running !pip install compecon --upgrade <i>Last updated: 2021-Oct-01</i> <hr> ## About To illustrate implementation of the collocation method for implicit function problems, consider the example of Cournot oligopoly. In the standard microeconomic model of the firm, the firm maximizes profit by equating marginal revenue to marginal cost (MC). An oligopolistic firm, recognizing that its actions affect price, takes the marginal revenue to be $p + q\frac{dp}{dq}$, where $p$ is price, $q$ is quantity produced, and $\frac{dp}{dq}$ is the marginal impact of output on market price. The Cournot assumption is that the firm acts as if any change in its output will be unmatched by its competitors. This implies that \begin{equation} \frac{dp}{dq} = \frac{1}{D'(p)} \end{equation} where $D(p)$ is the market demand curve. Suppose we wish to derive the effective supply function for the firm, which specifies the quantity $q = S(p)$ it will supply at any price. The firm's effective supply function is characterized by the functional equation \begin{equation} p + \frac{S(p)}{D'(p)} - MC(S(p)) = 0 \end{equation} for all positive prices $p$. In simple cases, this function can be found explicitly. However, in more complicated cases, no explicit solution exists. ## Initial tasks ``` import numpy as np import matplotlib.pyplot as plt from compecon import BasisChebyshev, NLP, demo ``` ## The model ### Parameters Here, the demand elasticity and the marginal cost function parameter are ``` alpha, eta = 1.0, 3.5 D = lambda p: p**(-eta) ``` ### Approximation structure A degree-25 Chebychev basis on the interval [0.5, 3.0] is selected; also, the associated collocation nodes `p` are computed. ``` n, a, b = 25, 0.5, 2.0 S = BasisChebyshev(n, a, b, labels=['price'], y=np.ones(n)) p = S.nodes S2 = BasisChebyshev(n, a, b, labels=['price'], l=['supply']) S2.y = np.ones_like(p) ``` ### Residual function Suppose, for example, that \begin{equation} D(p) = p^{-\eta} \quad\text{and}\quad MC(q) = \alpha\sqrt{q} + q^2 \end{equation} Then the functional equation to be solved for S(p), \begin{equation} \left[p - \frac{S(p)p^{\eta+1}}{\eta}\right] -\left[\alpha\sqrt{S(p)} + S(p)^2\right] = 0 \end{equation} has no known closed-form solution. ``` def resid(c): S.c = c # update interpolation coefficients q = S(p) # compute quantity supplied at price nodes marginal_income = p - q * (p ** (eta+1) / eta) marginal_cost = alpha * np.sqrt(q) + q ** 2 return marginal_income - marginal_cost ``` Notice that `resid` only takes one argument. The other parameters (`Q`, `p`, `eta`, `alpha`) should be declared as such in the main script, were Python's scoping rules will find them. ### Solve for effective supply function Class `NLP` defines nonlinear problems. It can be used to solve `resid` by Broyden's method. ``` cournot = NLP(resid) S.c = cournot.broyden(S.c, tol=1e-12) ``` ### Plot demand and effective supply for m=5 firms ``` prices = np.linspace(a, b, 501) fig1, ax = plt.subplots() ax.plot(5 * S(prices), prices, label='Supply') ax.plot(D(prices), prices, label='Demand') ax.set(title='Cournot Effective Firm Supply Function', xlabel='Quantity', ylabel='Price', xlim=[0, 4], ylim=[0.5, 2]) ax.legend(); ``` ### Plot residual Notice that `resid` does not take explicit parameters, so to evaluate it when prices are `prices` we need to assign `p = prices`. In order to assess the quality of the approximation, one plots the residual function over the approximation domain. Here, the residual function is plotted by computing the residual at a refined grid of 501 equally spaced points. ``` p = prices fig2, ax = plt.subplots() ax.hlines(0, a, b, 'k', '--', lw=2) ax.plot(prices, resid(S.c)) ax.set(title='Residual Function for Cournot Problem', xlabel='Quantity', ylabel='Residual'); ``` ### Plot demand and effective supply for m=1, 3, 5, 10, 15, 20 firms ``` fig3, ax = plt.subplots(figsize=[9,4]) ax.set(title='Industry Supply and Demand Functions', xlabel='Quantity', ylabel='Price', xlim=[0, 12]) lcolor = [z['color'] for z in plt.rcParams['axes.prop_cycle']] for i, m in enumerate([1, 3, 5, 10, 15, 20]): ax.plot(m*S(prices), prices) # supply ax.annotate(f'm={m:d}', [m*S(1.2)+.025, 1.4-i/12], color=lcolor[i], fontsize=12) ax.plot(D(prices), prices, linewidth=4, color='grey') # demand ax.annotate('demand', [10, 0.6], color='grey', fontsize=12); ``` ### Plot equilibrium price as a function of number of firms m ``` m = np.arange(1,26) x0 = np.full_like(m, 0.7, dtype=float) #initial guess eqprices = NLP(lambda p: m*S(p) - D(p)).broyden(x0) fig4, ax = plt.subplots() ax.set(title='Cournot Equilibrium Price as Function of Industry Size', xlabel='Number of Firms', ylabel='Price') ax.plot(m, eqprices); ``` ### Save all figures to disc ``` #demo.savefig([fig1,fig2,fig3,fig4], name='demapp07') ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.ticker import StrMethodFormatter import scipy.stats as t plt.style.use("seaborn-deep") df = pd.read_csv("./time_evolution_10_levels.csv") def std_std(sample): n = len(sample) mu = t.moment(sample, moment=4) return (((mu - sample.std() ** 4) / n) ** 0.5) / (2 * sample.std()) bid_price_series = df['bid_price_0'] ask_price_series = df['ask_price_0'] mid_price_series = df["mid_price"] f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(8, 14)) ax1.scatter([t for t in range(len(bid_price_series))], bid_price_series, marker=".", linewidth=0, color=plt.rcParams['axes.prop_cycle'].by_key()['color'][0]) plt.xlabel("volume_bar_label") ax1.set_ylabel("$b(t)$", rotation=0) bid_volatility = np.log(np.array(bid_price_series[1:])/np.array(bid_price_series[:-1])) bid_volatility_error = std_std(bid_volatility) ax1.set_title(f"$V^b = ${bid_volatility.std():.1e} $\pm$ {bid_volatility_error:.0e}") ax2.scatter([t for t in range(len(ask_price_series))], ask_price_series, marker=".", linewidth=0, color=plt.rcParams['axes.prop_cycle'].by_key()['color'][1]) ax2.set_ylabel("$a(t)$", rotation=0) ask_volatility = np.log(np.array(ask_price_series[1:])/np.array(ask_price_series[:-1])) ask_volatility_error = std_std(ask_volatility) ax2.set_title(f"$V^a = ${ask_volatility.std():.2e} $\pm$ {ask_volatility_error:.0e}") ax3.scatter([t for t in range(len(mid_price_series))], mid_price_series, marker=".", linewidth=0, color=plt.rcParams['axes.prop_cycle'].by_key()['color'][2]) ax3.set_ylabel("$m(t)$", rotation=0) mid_volatility = np.log(np.array(mid_price_series[1:])/np.array(mid_price_series[:-1])) mid_volatility_error = std_std(mid_volatility) ax3.set_title(f"$V^m = ${mid_volatility.std():.2e} $\pm$ {mid_volatility_error:.0e}") plt.show() bid_volatility = [] ask_volatility = [] bid_volatility_error = [] ask_volatility_error = [] for level in range(10): bid_price_series = df.groupby("volume_bar_label")['bid_price_{}'.format(level)].min() ask_price_series = df.groupby("volume_bar_label")['ask_price_{}'.format(level)].max() bid_volatility.append(np.log(np.array(bid_price_series[1:])/np.array(bid_price_series[:-1]))) bid_volatility_error.append(std_std(bid_volatility[level])) bid_volatility[level] = np.std(bid_volatility[level]) ask_volatility.append(np.log(np.array(ask_price_series[1:])/np.array(ask_price_series[:-1]))) ask_volatility_error.append(std_std(ask_volatility[level])) ask_volatility[level] = np.std(ask_volatility[level]) f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(5, 10)) ax1.yaxis.set_major_formatter(StrMethodFormatter('{x:,.2e}')) ax1.errorbar(range(10), bid_volatility, bid_volatility_error, label="$V^b$", marker=".", capsize=3.0, fmt="--", color=plt.rcParams['axes.prop_cycle'].by_key()['color'][0]) ax1.set_title("$V^b$ for each level") plt.xlabel("Level") ax1.set_ylabel("Volatility") ax1.set_xticks(range(10)) ax1.legend() ax2.errorbar(range(10), ask_volatility, ask_volatility_error, label="$V^a$", marker=".", capsize=3.0, fmt="--", color=plt.rcParams['axes.prop_cycle'].by_key()['color'][1]) ax2.legend() ax2.set_title("$V^a$ for each level") ax2.yaxis.set_major_formatter(StrMethodFormatter('{x:,.2e}')) ax2.set_ylabel("Volatility") ax2.set_xticks(range(10)) plt.show() ```
github_jupyter
# Introduction This demo shows: - how to train machine learning model in k8s - how to serve machine learning model in k8s - how to provision machine learning environment in k8s #### Useful links: 1. ACK = Alibaba Container Service for Kubernetes : https://www.alibabacloud.com/product/kubernetes 2. arena repo: https://github.com/kubeflow/arena 3. arena example: https://github.com/jianhuashao/AliCloud_Demo_Container_Kubernetes/tree/master/arena # Session 1: create a managed k8s GPU cluster in ACK 1. assume a k8s GPU cluster has been created in ACK 2. more details on how to create a k8s cluster in ack: https://www.alibabacloud.com/help/doc-detail/85903.htm ``` %%time %%bash echo -e echo -e "#### lists nodes in k8s cluster" kubectl get nodes echo -e echo -e "#### lists nodes utilisation" kubectl top nodes %%time %%bash echo "# get node description" #kubectl get nodes | tail -1 | sed 's/ .*//' | kubectl describe node/cn-shanghai.192.168.0.174 %%time %%bash kubectl create namespace ml %%time %%bash kubectl get all --namespace ml %%time %%bash ## only run this to delete all resoruce created with this demo # kubectl delete --all pods --namespace ml # kubectl delete --all services --namespace ml # kubectl delete --all serviceaccount --namespace ml # kubectl delete --all rs --namespace ml %%time %%bash ## check if arena is installed and get version if it is installed arena version %%time %%bash ## install arena if it not installed ## pre-require golang echo -e "#### gopath: " $(go env GOPATH) rm -r $(go env GOPATH)/src/github.com/kubeflow/arena || true mkdir -p $(go env GOPATH)/src/github.com/kubeflow cd $(go env GOPATH)/src/github.com/kubeflow git clone https://github.com/kubeflow/arena.git cd $(go env GOPATH)/src/github.com/kubeflow/arena make cd $(go env GOPATH)/src/github.com/kubeflow ls -lha ./arena/bin cp $(go env GOPATH)/src/github.com/kubeflow/arena/bin/arena /usr/local/bin/ ls -lha /usr/local/bin/arena ## check which arena has been installed echo -e "\n#### check which arena " which arena ``` # Session 2: configure k8s environemnt - instakk kubectl - install arena_cli - install crd: tf - install operator: ps, mpi ``` %%time %%bash ## you do not need to pass namesapce in, as they have been hard-coding into the yaml file bellow. echo -e "\n#### install TFJob controller" kubectl apply -f ./arena/kubernetes-artifacts/jobmon/jobmon-role.yaml kubectl apply -f ./arena/kubernetes-artifacts/tf-operator/tf-crd.yaml kubectl apply -f ./arena/kubernetes-artifacts/tf-operator/tf-operator.yaml echo -e "\n#### install arena Dashboard" kubectl apply -f ./arena/kubernetes-artifacts/dashboard/dashboard.yaml echo -e "\n#### install MPIJob controller" kubectl apply -f arena/kubernetes-artifacts/mpi-operator/mpi-operator.yaml ``` # Session 3: model training ## to train distributed TensorFlow model in PS mode ``` Submit TFJob as training job. Usage: arena submit tfjob flags Aliases: tfjob, tf Flags: -a, --annotation stringArray the annotations --chief enable chief, which is required for estimator. --chief-cpu string the cpu resource to use for the Chief, like 1 for 1 core. --chief-memory string the memory resource to use for the Chief, like 1Gi. --chief-port int the port of the chief. --chief-selector stringArray assigning jobs with "Chief" role to some k8s particular nodes(this option would cover --selector), usage: "--chief-selector=key=value" --clean-task-policy string How to clean tasks after Training is done, only support Running, None. (default "Running") -d, --data stringArray specify the datasource to mount to the job, like <name_of_datasource>:<mount_point_on_job> --data-dir stringArray the data dir. If you specify /data, it means mounting hostpath /data into container path /data -e, --env stringArray the environment variables --evaluator enable evaluator, which is optional for estimator. --evaluator-cpu string the cpu resource to use for the evaluator, like 1 for 1 core. --evaluator-memory string the memory resource to use for the evaluator, like 1Gi. --evaluator-selector stringArray assigning jobs with "Evaluator" role to some k8s particular nodes(this option would cover --selector), usage: "--evaluator-selector=key=value" --gpus int the GPU count of each worker to run the training. -h, --help help for tfjob --image string the docker image name of training job --logdir string the training logs dir, default is /training_logs (default "/training_logs") --name string override name -p, --priority string priority class name --ps int the number of the parameter servers. --ps-cpu string the cpu resource to use for the parameter servers, like 1 for 1 core. --ps-image string the docker image for tensorflow workers --ps-memory string the memory resource to use for the parameter servers, like 1Gi. --ps-port int the port of the parameter server. --ps-selector stringArray assigning jobs with "PS" role to some k8s particular nodes(this option would cover --selector), usage: "--ps-selector=key=value" --rdma enable RDMA --retry int retry times. --selector stringArray assigning jobs to some k8s particular nodes, usage: "--selector=key=value" or "--selector key=value" --sync-image string the docker image of syncImage --sync-mode string syncMode: support rsync, hdfs, git --sync-source string sync-source: for rsync, it's like 10.88.29.56::backup/data/logoRecoTrain.zip; for git, it's like https://github.com/kubeflow/tf-operator.git --tensorboard enable tensorboard --tensorboard-image string the docker image for tensorboard (default "registry.cn-zhangjiakou.aliyuncs.com/tensorflow-samples/tensorflow:1.12.0-devel") --toleration stringArray tolerate some k8s nodes with taints,usage: "--toleration taint-key" or "--toleration all" --worker-cpu string the cpu resource to use for the worker, like 1 for 1 core. --worker-image string the docker image for tensorflow workers --worker-memory string the memory resource to use for the worker, like 1Gi. --worker-port int the port of the worker. --worker-selector stringArray assigning jobs with "Worker" role to some k8s particular nodes(this option would cover --selector), usage: "--worker-selector=key=value" --workers int the worker number to run the distributed training. (default 1) --working-dir string working directory to extract the code. If using syncMode, the $workingDir/code contains the code (default "/root") ``` ``` %%time %%bash # The following command is an example. In this example, # it defines 2 workers and 1 PS, and each worker has 1 GPU. # The source code of worker and PS are located in git, and the tensorboard are enabled. arena submit tf \ --name=tf-dist-git \ --gpus=1 \ --workers=2 \ --worker-image=tensorflow/tensorflow:1.5.0-devel-gpu \ --sync-mode=git \ --sync-source=https://github.com/cheyang/tensorflow-sample-code.git \ --ps=1 \ --ps-image=tensorflow/tensorflow:1.5.0-devel \ --tensorboard \ "python code/tensorflow-sample-code/tfjob/docker/v1alpha2/distributed-mnist/main.py --log_dir=/training_logs --data_dir=code/tensorflow-sample-code/data" \ --namespace ml ``` ## train distributed TensorFlow model in MPI mode ``` Submit MPIjob as training job. Usage: arena submit mpijob [flags] Aliases: mpijob, mpi, mj Flags: -a, --annotation stringArray the annotations --cpu string the cpu resource to use for the training, like 1 for 1 core. -d, --data stringArray specify the datasource to mount to the job, like <name_of_datasource>:<mount_point_on_job> --data-dir stringArray the data dir. If you specify /data, it means mounting hostpath /data into container path /data -e, --env stringArray the environment variables --gpus int the GPU count of each worker to run the training. -h, --help help for mpijob --image string the docker image name of training job --logdir string the training logs dir, default is /training_logs (default "/training_logs") --memory string the memory resource to use for the training, like 1Gi. --name string override name -p, --priority string priority class name --rdma enable RDMA --retry int retry times. --selector stringArray assigning jobs to some k8s particular nodes, usage: "--selector=key=value" or "--selector key=value" --sync-image string the docker image of syncImage --sync-mode string syncMode: support rsync, hdfs, git --sync-source string sync-source: for rsync, it's like 10.88.29.56::backup/data/logoRecoTrain.zip; for git, it's like https://github.com/kubeflow/tf-operator.git --tensorboard enable tensorboard --tensorboard-image string the docker image for tensorboard (default "registry.cn-zhangjiakou.aliyuncs.com/tensorflow-samples/tensorflow:1.12.0-devel") --toleration stringArray tolerate some k8s nodes with taints,usage: "--toleration taint-key" or "--toleration all" --workers int the worker number to run the distributed training. (default 1) --working-dir string working directory to extract the code. If using syncMode, the $workingDir/code contains the code (default "/root") Global Flags: --arena-namespace string The namespace of arena system service, like tf-operator (default "arena-system") --config string Path to a kube config. Only required if out-of-cluster --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") -n, --namespace string the namespace of the job (default "default") --pprof enable cpu profile --trace enable trace ``` ``` %%time %%bash # The following command is an example. In this example, # it defines 2 workers and 1 PS, and each worker has 1 GPU. # The source code of worker and PS are located in git, and the tensorboard are enabled. arena submit mpi --name=mpi-dist \ --gpus=1 \ --workers=2 \ --image=uber/horovod:0.13.11-tf1.10.0-torch0.4.0-py3.5 \ --env=GIT_SYNC_BRANCH=cnn_tf_v1.9_compatible \ --sync-mode=git \ --sync-source=https://github.com/tensorflow/benchmarks.git \ --tensorboard \ "mpirun python code/benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py --model resnet101 --batch_size 64 --variable_update horovod --train_dir=/training_logs --summary_verbosity=3 --save_summaries_steps=10" \ --namespace ml %%time %%bash arena list --namespace ml %%time %%bash arena top job --namespace ml %%time %%bash arena top node --namespace ml %%time %%bash arena get tf-dist-git --namespace ml %%time %%bash arena get mpi-dist --namespace ml %%time %%bash arena logs tf-dist-git --namespace ml %%time %%bash arena logs mpi-dist --namespace ml ``` # Session 4: model serving ``` Submit tensorflow serving job to deploy and serve machine learning models. Usage: arena serve tensorflow [flags] Aliases: tensorflow, tf Flags: --command string the command will inject to container's command. --cpu string the request cpu of each replica to run the serve. -d, --data stringArray specify the trained models datasource to mount for serving, like <name_of_datasource>:<mount_point_on_job> --enable-istio enable Istio for serving or not (disable Istio by default) -e, --envs stringArray the environment variables --expose-service expose service using Istio gateway for external access or not (not expose by default) --gpumemory int the limit GPU memory of each replica to run the serve. --gpus int the limit GPU count of each replica to run the serve. -h, --help help for tensorflow --image string the docker image name of serve job, and the default image is tensorflow/serving:latest (default "tensorflow/serving:latest") --image-pull-policy string the policy to pull the image, and the default policy is IfNotPresent (default "IfNotPresent") --memory string the request memory of each replica to run the serve. --model-name string the model name for serving --model-path string the model path for serving in the container --modelConfigFile string Corresponding with --model_config_file in tensorflow serving --name string the serving name --port int the port of tensorflow gRPC listening port (default 8500) --replicas int the replicas number of the serve job. (default 1) --restfulPort int the port of tensorflow RESTful listening port (default 8501) --version string the serving version --versionPolicy string support latest, latest:N, specific:N, all Global Flags: --arena-namespace string The namespace of arena system service, like tf-operator (default "arena-system") --config string Path to a kube config. Only required if out-of-cluster --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") -n, --namespace string the namespace of the job (default "default") --pprof enable cpu profile --trace enable trace ``` ``` %%time %%bash arena serve tf \ --name=mymnist \ --model-name=mnist \ --image=tensorflow/serving:latest \ --data=tfmodel:/tfmodel \ --model-path=/tfmodel/mnist \ --versionPolicy=specific:1 \ --namespace ml %%time %%bash arena serve list --namespace ml # use sshuttle for quick test # sshuttle -r root@47.103.139.226 172.0.0.0/8 # sshuttle -r root@47.103.139.226 0.0.0.0/0 d7 = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3294117748737335, 0.7254902124404907, 0.6235294342041016, 0.5921568870544434, 0.2352941334247589, 0.1411764770746231, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8705883026123047, 0.9960784912109375, 0.9960784912109375, 0.9960784912109375, 0.9960784912109375, 0.9450981020927429, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.6666666865348816, 0.2039215862751007, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.26274511218070984, 0.44705885648727417, 0.2823529541492462, 0.44705885648727417, 0.6392157077789307, 0.8901961445808411, 0.9960784912109375, 0.8823530077934265, 0.9960784912109375, 0.9960784912109375, 0.9960784912109375, 0.9803922176361084, 0.8980392813682556, 0.9960784912109375, 0.9960784912109375, 0.5490196347236633, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06666667014360428, 0.25882354378700256, 0.05490196496248245, 0.26274511218070984, 0.26274511218070984, 0.26274511218070984, 0.23137256503105164, 0.08235294371843338, 0.9254902601242065, 0.9960784912109375, 0.41568630933761597, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.32549020648002625, 0.9921569228172302, 0.8196079134941101, 0.07058823853731155, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08627451211214066, 0.9137255549430847, 1.0, 0.32549020648002625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5058823823928833, 0.9960784912109375, 0.9333333969116211, 0.1725490242242813, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.23137256503105164, 0.9764706492424011, 0.9960784912109375, 0.24313727021217346, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5215686559677124, 0.9960784912109375, 0.7333333492279053, 0.019607843831181526, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03529411926865578, 0.803921639919281, 0.9725490808486938, 0.22745099663734436, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4941176772117615, 0.9960784912109375, 0.7137255072593689, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.29411765933036804, 0.9843137860298157, 0.9411765336990356, 0.22352942824363708, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.07450980693101883, 0.8666667342185974, 0.9960784912109375, 0.6509804129600525, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.011764707043766975, 0.7960785031318665, 0.9960784912109375, 0.8588235974311829, 0.13725490868091583, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.14901961386203766, 0.9960784912109375, 0.9960784912109375, 0.3019607961177826, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.12156863510608673, 0.8784314393997192, 0.9960784912109375, 0.45098042488098145, 0.003921568859368563, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5215686559677124, 0.9960784912109375, 0.9960784912109375, 0.2039215862751007, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2392157018184662, 0.9490196704864502, 0.9960784912109375, 0.9960784912109375, 0.2039215862751007, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4745098352432251, 0.9960784912109375, 0.9960784912109375, 0.8588235974311829, 0.1568627506494522, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4745098352432251, 0.9960784912109375, 0.8117647767066956, 0.07058823853731155, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] d = d7 import os import json import ast import numpy as numpy import PIL.Image as pil testImage = (numpy.array(d, dtype='float')).reshape(28,28) img = pil.fromarray(numpy.uint8(testImage * 255) , 'L') print("#### preview the image for testing") img %%time %%bash # pass an image of 7 curl \ -X POST http://172.21.11.198:8501/v1/models/mnist:predict \ -d '{"signature_name": "predict_images", "instances": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3294117748737335, 0.7254902124404907, 0.6235294342041016, 0.5921568870544434, 0.2352941334247589, 0.1411764770746231, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8705883026123047, 0.9960784912109375, 0.9960784912109375, 0.9960784912109375, 0.9960784912109375, 0.9450981020927429, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.7764706611633301, 0.6666666865348816, 0.2039215862751007, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.26274511218070984, 0.44705885648727417, 0.2823529541492462, 0.44705885648727417, 0.6392157077789307, 0.8901961445808411, 0.9960784912109375, 0.8823530077934265, 0.9960784912109375, 0.9960784912109375, 0.9960784912109375, 0.9803922176361084, 0.8980392813682556, 0.9960784912109375, 0.9960784912109375, 0.5490196347236633, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06666667014360428, 0.25882354378700256, 0.05490196496248245, 0.26274511218070984, 0.26274511218070984, 0.26274511218070984, 0.23137256503105164, 0.08235294371843338, 0.9254902601242065, 0.9960784912109375, 0.41568630933761597, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.32549020648002625, 0.9921569228172302, 0.8196079134941101, 0.07058823853731155, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08627451211214066, 0.9137255549430847, 1.0, 0.32549020648002625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5058823823928833, 0.9960784912109375, 0.9333333969116211, 0.1725490242242813, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.23137256503105164, 0.9764706492424011, 0.9960784912109375, 0.24313727021217346, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5215686559677124, 0.9960784912109375, 0.7333333492279053, 0.019607843831181526, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03529411926865578, 0.803921639919281, 0.9725490808486938, 0.22745099663734436, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4941176772117615, 0.9960784912109375, 0.7137255072593689, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.29411765933036804, 0.9843137860298157, 0.9411765336990356, 0.22352942824363708, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.07450980693101883, 0.8666667342185974, 0.9960784912109375, 0.6509804129600525, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.011764707043766975, 0.7960785031318665, 0.9960784912109375, 0.8588235974311829, 0.13725490868091583, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.14901961386203766, 0.9960784912109375, 0.9960784912109375, 0.3019607961177826, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.12156863510608673, 0.8784314393997192, 0.9960784912109375, 0.45098042488098145, 0.003921568859368563, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5215686559677124, 0.9960784912109375, 0.9960784912109375, 0.2039215862751007, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2392157018184662, 0.9490196704864502, 0.9960784912109375, 0.9960784912109375, 0.2039215862751007, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4745098352432251, 0.9960784912109375, 0.9960784912109375, 0.8588235974311829, 0.1568627506494522, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4745098352432251, 0.9960784912109375, 0.8117647767066956, 0.07058823853731155, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]}' %%time %%bash # arena serve delete mymnist --namespace ml ``` # Session 5: create a jupyter notebook ``` %%time %%bash # foo.bar.com can be replace to your own domain name domain="foo.bar.com" openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=$domain/O=$domain" ls -lha . %%time %%bash kubectl delete secret notebook-secret --namespace ml # delete it if exists kubectl create secret tls notebook-secret --key tls.key --cert tls.crt --namespace ml %%time %%bash curl -s https://raw.githubusercontent.com/AliyunContainerService/ai-starter/master/scripts/install_notebook.sh | \ bash -s -- \ --notebook-name susan \ --ingress --ingress-domain foo.bar.com --ingress-secret notebook-secret \ --pvc-name tfmodel \ --namespace ml %%time %%bash kubectl get pods --namespace ml %%time %%bash curl -s https://raw.githubusercontent.com/AliyunContainerService/ai-starter/master/scripts/print_notebook.sh | \ bash -s -- --notebook-name susan --namespace ml %%bash open http://172.20.0.13:8888 ``` # Session 6: model saving and transfer to continue ...
github_jupyter
## Sample 4.3 Random drawing samples from a normal distribution ``` %matplotlib inline import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import matplotlib matplotlib.rc('xtick', labelsize=12) matplotlib.rc('ytick', labelsize=12) #generate random number following a normal # density with location of 1.0 and scale of 3.0 mu = 1.0 sigma2 = 9.0 sigma = np.sqrt(sigma2) #method 1: use numpy random to generate normal random samples x1 = np.random.normal(loc=mu,scale=sigma,size=100000) #method 2: use scipy stats to generate normal random samples pnorm = stats.norm(loc=mu, scale=sigma) x2 = pnorm.rvs(size=100000) #analyze the random samples with a histogram xgrid = np.arange(-10,12,0.5) xcenter = (xgrid[1:]+xgrid[0:len(xgrid)-1])/2. hx1,xedge = np.histogram(x1,xgrid) hx2,xedge = np.histogram(x2,xgrid) #draw the histogram fig = plt.figure(figsize=[10,5]) ax = fig.add_subplot(111) e1, = ax.plot(xcenter,hx1,'ko-') e2, = ax.plot(xcenter,hx2,'ro-') #you can add legends for symbols in the figure like this plt.legend([e1,e2],['numpy.random.normal','scipy.stats.norm.rvs'],\ fontsize=12) ax.set_xlabel('x',fontsize=12) # fig.show() fig.savefig('normrand_hist.png',bbox_inches='tight') print('Normal distribution') print('mu=%(mu).3f\tsigma^2=%(sig).3f' %{'mu':mu,'sig':sigma2}) print('From numpy: E(x)=%(m).3f\tD(x)=%(d).3f' % {'m':np.mean(x1), 'd':np.var(x1)}) print('From scipy: E(x)=%(m).3f\tD(x)=%(d).3f' % {'m':np.mean(x2), 'd':np.var(x2)}) ''' average of two Normal samples obtain a new random normal sample with narrower distribution ''' y = (x1+x2)/2 #xgrid is same as above hy,xedge = np.histogram(y,xgrid) #draw the histogram fig = plt.figure(figsize=[10,5]) ax = fig.add_subplot(111) e1, = ax.plot(xcenter,hx2,'ko-') e2, = ax.plot(xcenter,hy,'ro-') plt.legend([e1,e2],[r'$N(0,9)$',r'$[N(0,9)+N(0,9)]/2$'],fontsize=12) ax.set_xlabel('x',fontsize=12) # fig.show() ''' sum of squared follows chi-2 distribution ''' z = (x1**2+x2**2) xgrid = np.arange(0,160,4) xcenter = (xgrid[1:]+xgrid[0:len(xgrid)-1])/2. hz,xedge = np.histogram(z,xgrid) #draw the histogram fig = plt.figure(figsize=[10,5]) ax = fig.add_subplot(111) e1, = ax.plot(xcenter,hz,'ko-') ax.set_xlabel('x',fontsize=12) # fig.show() ''' random draw a sample following a 2D Gaussian distribution ''' mu = np.array([1.,1.]) s1 = 1. #sigma_1^2 s2 = 0.2 #sigma_2^2 rho = 0.8 #rho sig = np.array([[s1, rho*np.sqrt(s1*s2)],[rho*np.sqrt(s1*s2),s2]]) #covariance matrix xx = np.random.multivariate_normal(mu,sig,1000000) #generate random numbers from 2D normal distribution xgrid = np.arange(-2.,4.,0.2) ygrid = np.arange(-2.,4.,0.2) xcenter = (xgrid[0:-1]+xgrid[1:])/2. ycenter = (ygrid[0:-1]+ygrid[1:])/2. #make 2d histogram hxx,xedge,yedge = np.histogram2d(xx[:,0],xx[:,1],bins=[xgrid,ygrid]) fig = plt.figure(figsize=[6,6]) ax = fig.add_subplot(111) ax.plot(xx[0:1000,0],xx[0:1000,1],'k.',markersize=1)#random number ax.contour(xcenter,ycenter,hxx.T)#contour ax.set_xlabel(r'$x_1$',fontsize=20) ax.set_ylabel(r'$x_2$',fontsize=20) fig.show() ```
github_jupyter
## Visualizing data in python In this exercise we will use the [nilearn](https://nilearn.github.io) package to visualize neuroimaging data. ``` import os import collections import xml.etree.ElementTree as ET import nibabel from nilearn import plotting import matplotlib.pyplot as plt %matplotlib inline ``` For this exercise we will be using some of the data files from the FSL distribution as well as some other files downloaded from neurovault. ``` FSLDIR=os.environ['FSLDIR'] print(FSLDIR) T1file=os.path.join(FSLDIR,'data/standard/MNI152_T1_2mm_brain.nii.gz') statmap='../data/neurovault/nv304.nii.gz' tractfile=os.path.join(FSLDIR,'data/atlases/JHU/JHU-ICBM-tracts-prob-2mm.nii.gz') ``` First, plot the anatomical image using the ```plot_anat``` function from nilearn.plotting. ``` plotting.plot_anat(T1file) ``` **Problem 1**: Plot the statistical map from our example (which we loaded above into the variable ```statmap```) overlaid on the T1-weighted anatomical image, using the appropriate plotting command from nilearn.plotting. Threshold the statistical image at Z=3. The result should look like this: ![Solution](Problem1_solution.png "Intended solution for Problem 1") ``` # INSERT SOLUTION HERE ``` # Loading FSL atlases Now let's load the white matter atlas from FSL and display it using the ```prob_atlas``` function from nilearn.plotting. ``` p=plotting.plot_prob_atlas(tractfile) ``` Plotting all 20 of the tracts at once is not very helpful, so let's focus in on a specific tract. First we need to load the metadata file so that we can find out which image is associated with which tract. ``` def get_atlas_metadata(atlasfile): """ function to load metadata from FSL atlas """ tree=ET.parse(atlasfile) md=collections.OrderedDict() for i in tree.iter(): if 'index' in i.attrib: md[int(i.attrib['index'])]=i.text return md # load the tract image and data/metadata tractimg=nibabel.load(tractfile) tractdata=tractimg.get_data() jhu_metadata_file=os.path.join(FSLDIR,'data/atlases/JHU-tracts.xml') jhu_md=get_atlas_metadata(jhu_metadata_file) ``` **Problem 2**: Loop through each of the regions in the metadata variable ```jhu_md``` and print the tract number followed by the name of the structure. The output should look like this: ``` 0 Anterior thalamic radiation L 1 Anterior thalamic radiation R 2 Corticospinal tract L 3 Corticospinal tract R ... ``` and so on for all of the tracts ``` # INSERT SOLUTION HERE ``` **Problem 3**: Now let's loop through and display each of these tracts using a "glass brain" projection. The code snippet below does some of the work - you need to fill in the blanks as noted in the comments. Here is an example of what the output should look like for the first tract: ![Solution](Problem3_solution.png "Intended solution for Problem 3") ``` # loop through and display each tract as a glass brain for i in jhu_md: # create temporary 3d Nifti image for each tract td=tractdata[:,:,:,i] ti=nibabel.Nifti1Image(td,affine=tractimg.affine) # plot the image as a "glass brain" using nilearn.plotting with the title as shown in the example above # INSERT PLOTTING FUNCTION HERE ```
github_jupyter
# PLEASE NOTE: Please run this notebook OUTSIDE a Spark notebook as it should run in a plain Default Python 3.6 Free Environment This is the last assignment for the Coursera course "Advanced Machine Learning and Signal Processing" Just execute all cells one after the other and you are done - just note that in the last one you should update your email address (the one you've used for coursera) and obtain a submission token, you get this from the programming assignment directly on coursera. Please fill in the sections labelled with "###YOUR_CODE_GOES_HERE###" The purpose of this assignment is to learn how feature engineering boosts model performance. You will apply Discrete Fourier Transformation on the accelerometer sensor time series and therefore transforming the dataset from the time to the frequency domain. After that, you’ll use a classification algorithm of your choice to create a model and submit the new predictions to the grader. Done. ``` from IPython.display import Markdown, display def printmd(string): display(Markdown('# <span style="color:red">'+string+'</span>')) if ('sc' in locals() or 'sc' in globals()): printmd('<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>') !pip install pyspark==2.4.5 !pip install https://github.com/IBM/coursera/blob/master/systemml-1.3.0-SNAPSHOT-python.tar.gz?raw=true from pyspark import SparkContext, SparkConf from pyspark.sql import SQLContext, SparkSession from pyspark.sql.types import StructType, StructField, DoubleType, IntegerType, StringType sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]")) from pyspark.sql import SparkSession spark = SparkSession \ .builder \ .getOrCreate() ``` So the first thing we need to ensure is that we are on the latest version of SystemML, which is 1.3.0 (as of 20th March'19) Please use the code block below to check if you are already on 1.3.0 or higher. 1.3 contains a necessary fix, that's we are running against the SNAPSHOT ``` !mkdir -p /home/dsxuser/work/systemml from systemml import MLContext, dml ml = MLContext(spark) ml.setConfigProperty("sysml.localtmpdir", "mkdir /home/dsxuser/work/systemml") print(ml.version()) if not ml.version() == '1.3.0-SNAPSHOT': raise ValueError('please upgrade to SystemML 1.3.0, or restart your Kernel (Kernel->Restart & Clear Output)') !wget https://github.com/IBM/coursera/blob/master/coursera_ml/shake.parquet?raw=true !mv shake.parquet?raw=true shake.parquet ``` Now it’s time to read the sensor data and create a temporary query table. ``` df=spark.read.parquet('shake.parquet') df.show() df.printSchema() !pip install pixiedust import pixiedust display(df) df.createOrReplaceTempView("df") ``` We’ll use Apache SystemML to implement Discrete Fourier Transformation. This way all computation continues to happen on the Apache Spark cluster for advanced scalability and performance. As you’ve learned from the lecture, implementing Discrete Fourier Transformation in a linear algebra programming language is simple. Apache SystemML DML is such a language and as you can see the implementation is straightforward and doesn’t differ too much from the mathematical definition (Just note that the sum operator has been swapped with a vector dot product using the %*% syntax borrowed from R ): <img style="float: left;" src="https://wikimedia.org/api/rest_v1/media/math/render/svg/1af0a78dc50bbf118ab6bd4c4dcc3c4ff8502223"> ``` dml_script = ''' PI = 3.141592654 N = nrow(signal) n = seq(0, N-1, 1) k = seq(0, N-1, 1) M = (n %*% t(k))*(2*PI/N) Xa = cos(M) %*% signal Xb = sin(M) %*% signal DFT = cbind(Xa, Xb) ''' ``` Now it’s time to create a function which takes a single row Apache Spark data frame as argument (the one containing the accelerometer measurement time series for one axis) and returns the Fourier transformation of it. In addition, we are adding an index column for later joining all axis together and renaming the columns to appropriate names. The result of this function is an Apache Spark DataFrame containing the Fourier Transformation of its input in two columns. ``` from pyspark.sql.functions import monotonically_increasing_id def dft_systemml(signal,name): prog = dml(dml_script).input('signal', signal).output('DFT') return ( #execute the script inside the SystemML engine running on top of Apache Spark ml.execute(prog) #read result from SystemML execution back as SystemML Matrix .get('DFT') #convert SystemML Matrix to ApacheSpark DataFrame .toDF() #rename default column names .selectExpr('C1 as %sa' % (name), 'C2 as %sb' % (name)) #add unique ID per row for later joining .withColumn("id", monotonically_increasing_id()) ) ``` Now it’s time to create individual DataFrames containing only a subset of the data. We filter simultaneously for accelerometer each sensor axis and one for each class. This means you’ll get 6 DataFrames. Please implement this using the relational API of DataFrames or SparkSQL. Please use class 1 and 2 and not 0 and 1. <h1><span style="color:red">Please make sure that each DataFrame has only ONE colum (only the measurement, eg. not CLASS column)</span></h1> ``` from pyspark.sql.functions import countDistinct df.select(countDistinct('CLASS')).show() x0 = df.filter(df.CLASS == 0).select('X') y0 = df.filter(df.CLASS == 0).select('Y') z0 = df.filter(df.CLASS == 0).select('Z') x1 = df.filter(df.CLASS == 1).select('X') y1 = df.filter(df.CLASS == 1).select('Y') z1 = df.filter(df.CLASS == 1).select('Z') ``` Since we’ve created this cool DFT function before, we can just call it for each of the 6 DataFrames now. And since the result of this function call is a DataFrame again we can use the pyspark best practice in simply calling methods on it sequentially. So what we are doing is the following: - Calling DFT for each class and accelerometer sensor axis. - Joining them together on the ID column. - Re-adding a column containing the class index. - Stacking both Dataframes for each classes together ``` from pyspark.sql.functions import lit df_class_0 = dft_systemml(x0,'x') \ .join(dft_systemml(y0,'y'), on=['id'], how='inner') \ .join(dft_systemml(z0,'z'), on=['id'], how='inner') \ .withColumn('class', lit(0)) df_class_1 = dft_systemml(x1,'x') \ .join(dft_systemml(y1,'y'), on=['id'], how='inner') \ .join(dft_systemml(z1,'z'), on=['id'], how='inner') \ .withColumn('class', lit(1)) df_dft = df_class_0.union(df_class_1) df_dft.show() ``` Please create a VectorAssembler which consumes the newly created DFT columns and produces a column “features” ``` from pyspark.ml.feature import VectorAssembler vectorAssembler = VectorAssembler(inputCols=['xa', 'xb', 'ya', 'yb', 'za', 'zb'], outputCol='features') ``` Please insatiate a classifier from the SparkML package and assign it to the classifier variable. Make sure to set the “class” column as target. ``` from pyspark.ml.classification import GBTClassifier classifier = GBTClassifier(featuresCol='features', labelCol='class', maxIter=10) ``` Let’s train and evaluate… ``` from pyspark.ml import Pipeline pipeline = Pipeline(stages=[vectorAssembler, classifier]) model = pipeline.fit(df_dft) prediction = model.transform(df_dft) prediction.show() from pyspark.ml.evaluation import MulticlassClassificationEvaluator binEval = MulticlassClassificationEvaluator().setMetricName("accuracy") .setPredictionCol("prediction").setLabelCol("class") binEval.evaluate(prediction) ``` If you are happy with the result (I’m happy with > 0.8) please submit your solution to the grader by executing the following cells, please don’t forget to obtain an assignment submission token (secret) from the Courera’s graders web page and paste it to the “secret” variable below, including your email address you’ve used for Coursera. ``` !rm -Rf a2_m4.json prediction = prediction.repartition(1) prediction.write.json('a2_m4.json') !rm -f rklib.py !wget wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py from rklib import zipit zipit('a2_m4.json.zip','a2_m4.json') !base64 a2_m4.json.zip > a2_m4.json.zip.base64 from rklib import submit key = "-fBiYHYDEeiR4QqiFhAvkA" part = "IjtJk" email = ###YOUR_CODE_GOES_HERE### submission_token = ###YOUR_CODE_GOES_HERE### # (have a look here if you need more information on how to obtain the token https://youtu.be/GcDo0Rwe06U?t=276) with open('a2_m4.json.zip.base64', 'r') as myfile: data=myfile.read() submit(email, submission_token, key, part, [part], data) ```
github_jupyter
<h1 style="text-align:center">"Zeros" in a Forced Response</h1> <h3 style="text-align:center">MCHE 485: Mechanical Vibrations</h3> <p style="text-align:center">Dr. Joshua Vaughan <br> <a href="mailto:joshua.vaughan@louisiana.edu">joshua.vaughan@louisiana.edu</a><br> http://www.ucs.louisiana.edu/~jev9637/ </p> <p style="text-align:center"> <img src="http://shared.crawlab.org/FourMass_5Spring_Undamped.png" alt="A Four-Mass-Spring System" width=100%/><br> <strong>Figure 1: A Four-Mass-Spring System with Excitation Force on the First Mass</strong> </p> This notebook demonstrates the eigenvalue/eigenvector problem using a four-mass-spring-damper system shown in Figure 1. We'll just look at one example set of parameters. The same techniques apply for other parameters and for larger matrices. The equations of motion for the system are: $ \quad m_1 \ddot{x}_1 + (k_1+k_2)x_1 - k_2 x_2 = f $ $ \quad m_2 \ddot{x}_2 -k_2 x_1 + (k_2 + k_3)x_2 - k_3 x_3 = 0 $ $ \quad m_3 \ddot{x}_3 -k_3 x_2 + (k_3 + k_4)x_3 - k_4 x_4 = 0 $ $ \quad m_4 \ddot{x}_4 -k_4 x_3 + (k_4 + k_5)x_4 = 0 $ We could also write these equations in matrix form: $ \quad \begin{bmatrix} m_1 & 0 & 0 & 0\\ 0 & m_2 & 0 & 0\\ 0 & 0 & m_3 & 0\\ 0 & 0 & 0 & m_4\\ \end{bmatrix}\begin{bmatrix}\ddot{x}_1 \\ \ddot{x}_2 \\ \ddot{x}_3\\ \ddot{x}_4\end{bmatrix} + % \begin{bmatrix} k_1 + k_2 & -k_2 & 0 & 0 \\ -k_2 & k_2 + k_3 & -k_3 & 0 \\ 0 & -k_3 & k_3 + k_4 & -k_4 \\ 0 & 0 & -k_4 & k_4+k_5\end{bmatrix}\begin{bmatrix}x_1 \\ x_2\\ x_3\\ x_4\end{bmatrix} = \begin{bmatrix}f \\ 0 \\ 0 \\ 0 \end{bmatrix}$ Define $ \quad M = \begin{bmatrix} m_1 & 0 & 0 & 0\\ 0 & m_2 & 0 & 0\\ 0 & 0 & m_3 & 0\\ 0 & 0 & 0 & m_4\\ \end{bmatrix} $ and $ \quad K = \begin{bmatrix} k_1 + k_2 & -k_2 & 0 & 0 \\ -k_2 & k_2 + k_3 & -k_3 & 0 \\ 0 & -k_3 & k_3 + k_4 & -k_4 \\ 0 & 0 & -k_4 & k_4+k_5\end{bmatrix} $ Using $M$ and $K$, we want to solve: $ \quad \left[K - \omega^2 M\right]\bar{X} = 0 $ for $\bar{X}$. This is an eigenvalue problem. For information on how to obtain these equations, you can see the lectures at the [class website](http://www.ucs.louisiana.edu/~jev9637/MCHE485.html). We'll use the [Scipy version of the linear algebra module](http://docs.scipy.org/doc/scipy-0.13.0/reference/generated/scipy.linalg.eigh.html). It allows us to solve the "general" eignevalue problem. ``` import numpy as np # We want our plots to be displayed inline, not in a separate window %matplotlib inline # Import the plotting functions # Note: Using the 'from module import *' notation is usually a bad idea. import matplotlib.pyplot as plt # Let's also improve the printing of NumPy arrays. np.set_printoptions(precision=3, suppress=True) ``` To see how to solve this eigenvalue problem, we will use some example parameters, set up below. All the spring constants are equal and the masses are equal. ``` # Define the matrices m1 = 1.0 m2 = 1.0 m3 = 1.0 m4 = 1.0 k1 = 4.0 k2 = 4.0 k3 = 4.0 k4 = 4.0 k5 = 4.0 M = np.array([[m1, 0, 0, 0], [0, m2, 0, 0], [0, 0, m3, 0], [0, 0, 0, m4]]) K = np.array([[k1 + k2, -k2, 0, 0], [-k2, k2 + k3, -k3, 0], [0, -k3, k3 + k4, -k4], [0, 0, -k4, k4+k5]]) # We'll use the scipy version of the linear algebra from scipy import linalg eigenvals, eigenvects = linalg.eigh(K,M) ``` The linalg.eigh function returns two arrays, one of the eigenvalues and one of the eigenvectors. The eigenvalues are the square of the two natural frequencies. The eigenvectors are returned in normalized form, with each ''row'' of the array representing an eigenvector. ``` print('\n') print('The resulting eigenalues are {:.2f}, {:.2f}, {:.2f}, and {:.2f}.'.format(eigenvals[0], eigenvals[1], eigenvals[2], eigenvals[3])) print('\n') print('So the natrual frequencies are {:.2f}rad/s, {:.2f}rad/s, {:.2f}rad/s, and {:.2f}rad/s.'.format(np.sqrt(eigenvals[0]), np.sqrt(eigenvals[1]), np.sqrt(eigenvals[2]), np.sqrt(eigenvals[3]))) print('\n') print('\n') print('The first eigenvector is ' + str(eigenvects[:,0]) + '.') print('\n') print('The second eigenvector is ' + str(eigenvects[:,1]) + '.') print('\n') print('The third eigenvector is ' + str(eigenvects[:,2]) + '.') print('\n') print('The fourth eigenvector is ' + str(eigenvects[:,3]) + '.') print('\n') ``` # Responses Now, let's look at the response and see how it reflects the four modes of the system. ``` # Define the equations of motion # Define the system as a series of 1st order ODEs (beginnings of state-space form) def eq_of_motion(w, t, p): """ Defines the differential equations for the coupled spring-mass system. Arguments: w : vector of the state variables: w = [x1, x1_dot, x2, x2_dot, x3, x3_dot, x4, x4_dot] t : time p : vector of the parameters: p = [m1, m2, m3, m4, k1, k2, k3, k4, k5] """ x1, x1_dot, x2, x2_dot, x3, x3_dot, x4, x4_dot = w m1, m2, m3, m4, k1, k2, k3, k4, k5 = p # Create sysODE = (x', x_dot'): - Here, we're assuming f(t) = 0 sysODE = [x1_dot, (-(k1+k2)*x1 + k2*x2) / m1, x2_dot, (k2*x1 - (k2+k3)*x2 + k3*x3) / m2, x3_dot, (k3*x2 - (k3+k4)*x3 + k4*x4) / m3, x4_dot, (k4*x3 - (k4+k5)*x4) / m4] return sysODE # Import the ODE solver from scipy.integrate import odeint # Set up simulation parameters # ODE solver parameters abserr = 1.0e-9 relerr = 1.0e-9 max_step = 0.01 stoptime = 10.0 numpoints = 10001 # Create the time samples for the output of the ODE solver t = np.linspace(0.0,stoptime,numpoints) ``` ## Example Free Vibration Let's start by looking at some free vibration. For this set of parameters. In the code below, we choose initial conditions: $ \quad x_1(0) = x_2(0) = x_0$ $ \quad x_3(0) = x_4(0) = 0$ and $ \quad \dot{x}_1(0) = \dot{x}_2(0) = \dot{x}_3(0) = \dot{x}_4(0) = 0$ ``` # Initial conditions x1_init = 0.5 # initial x1 position x1_dot_init = 0.0 # initial x1 velocity x2_init = 0.5 # initial x2 position x2_dot_init = 0.0 # initial x2 velocity x3_init = 0.0 x3_dot_init = 0.0 x4_init = 0.0 x4_dot_init = 0.0 # Pack the parameters and initial conditions into arrays p = [m1, m2, m3, m4, k1, k2, k3, k4, k5] x0 = [x1_init, x1_dot_init, x2_init, x2_dot_init, x3_init, x3_dot_init, x4_init, x4_dot_init] # Call the ODE solver. resp = odeint(eq_of_motion, x0, t, args=(p,), atol=abserr, rtol=relerr, hmax=max_step) # Set the plot size - 3x2 aspect ratio is best fig = plt.figure(figsize=(6,4)) ax = plt.gca() plt.subplots_adjust(bottom=0.17,left=0.17,top=0.96,right=0.96) # Change the axis units to serif plt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18) plt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18) ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') # Turn on the plot grid and set appropriate linestyle and color ax.grid(True,linestyle=':',color='0.75') ax.set_axisbelow(True) # Define the X and Y axis labels plt.xlabel('Time (s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel('Position (m)',family='serif',fontsize=22,weight='bold',labelpad=10) plt.plot(t,resp[:,0],linewidth=2,label=r'$x_1$') plt.plot(t,resp[:,2],linewidth=2,linestyle="--",label=r'$x_2$') plt.plot(t,resp[:,4],linewidth=2,linestyle="-.",label=r'$x_3$') plt.plot(t,resp[:,6],linewidth=2,linestyle=":",label=r'$x_4$') # uncomment below and set limits if needed # plt.xlim(0,5) plt.ylim(-1,1.35) plt.yticks([-0.5,0,0.5,1.0],['$-x_0$','$0$','$x_0$','$2x_0$']) # Create the legend, then fix the fontsize leg = plt.legend(loc='upper right', ncol = 2, fancybox=True) ltext = leg.get_texts() plt.setp(ltext,family='serif',fontsize=18) # Adjust the page layout filling the page using the new tight_layout command plt.tight_layout(pad=0.5) # save the figure as a high-res pdf in the current folder # It's saved at the original 6x4 size # plt.savefig('FreeVibration_mode_1.pdf') fig.set_size_inches(9,6) # Resize the figure for better display in the notebook ``` ## Frequency Response – Force on $m_1$ Now, let's look at the frequency response of this system. It will tell us how many frequencies there can be zero amplitude response for each mass. ``` F1 = 1.0 F2 = 0.0 F3 = 0.0 F4 = 0.0 F = [F1, F2, F3, F4] w = np.linspace(0,6,1800) X = np.zeros((len(w),4)) # This is (K-w^2 M)^-1 * F for ii, freq in enumerate(w): X[ii,:] = np.dot(linalg.inv(K - freq**2 * M), F) # Let's mask the discontinuity, so it isn't plotted pos = np.where(np.abs(X[:,0]) >= 15) X[pos,:] = np.nan w[pos] = np.nan # Set the plot size - 3x2 aspect ratio is best fig = plt.figure(figsize=(12,8)) plt.subplots_adjust(bottom=0.17,left=0.17,top=0.96,right=0.96) # Change the axis units to CMU Serif plt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18) plt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18) plt.subplot(2,2,1) plt.plot(w,X[:,0],linewidth=2,label=r'$\bar{x}_1$') # Define the X and Y axis labels plt.xlabel('Frequency (rad/s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel(r'$\bar{x}_1$',family='serif',fontsize=22,weight='bold',labelpad=10) plt.ylim(-4,4) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.subplot(2,2,2) plt.plot(w,X[:,1],linewidth=2,linestyle="-",label=r'$\bar{x}_2$') # Define the X and Y axis labels plt.xlabel('Frequency (rad/s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel(r'$\bar{x}_2$',family='serif',fontsize=22,weight='bold',labelpad=10) plt.ylim(-4,4) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.subplot(2,2,3) plt.plot(w,X[:,2],linewidth=2,linestyle="-",label=r'$\bar{x}_3$') # Define the X and Y axis labels plt.xlabel('Frequency (rad/s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel(r'$\bar{x}_3$',family='serif',fontsize=22,weight='bold',labelpad=10) plt.ylim(-4,4) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.subplot(2,2,4) plt.plot(w,X[:,3],linewidth=2,linestyle="-",label=r'$\bar{x}_4$') # Define the X and Y axis labels plt.xlabel('Frequency (rad/s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel(r'$\bar{x}_4$',family='serif',fontsize=22,weight='bold',labelpad=10) plt.ylim(-4,4) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') # # Create the legend, then fix the fontsize # leg = plt.legend(loc='upper right', fancybox=True) # ltext = leg.get_texts() # plt.setp(ltext,family='serif',fontsize=16) # Adjust the page layout filling the page using the new tight_layout command plt.tight_layout(pad=0.5, w_pad=3.0, h_pad=2.0) # save the figure as a high-res pdf in the current folder # plt.savefig('Spring_Pendulum_Example_Amp.pdf') # fig.set_size_inches(9,6) # Resize the figure for better display in the notebook ``` ## Frequency Response – Force on $m_2$ All we need to change to examine the case in Figure 2, which has the force input on the second mass, is the $F$ matrix we defined above. Then, a replot of the frequency repsonses will show at what (and how many) frequencies each mass has a zero amplitude response. <p style="text-align:center"> <img src="http://shared.crawlab.org/FourMass_5Spring_Undamped_2massForced.png" alt="A Four-Mass-Spring System" width=100%/><br> <strong>Figure 2: A Four-Mass-Spring System with Excitation Force on the Second Mass</strong> </p> ``` F1 = 0.0 F2 = 1.0 F3 = 0.0 F4 = 0.0 F = [F1, F2, F3, F4] w = np.linspace(0,6,1200) X = np.zeros((len(w),4)) # This is (K-w^2 M)^-1 * F for ii, freq in enumerate(w): X[ii,:] = np.dot(linalg.inv(K - freq**2 * M), F) # Let's mask the discontinuity, so it isn't plotted pos = np.where(np.abs(X[:,0]) >= 15) X[pos,:] = np.nan w[pos] = np.nan # Set the plot size - 3x2 aspect ratio is best fig = plt.figure(figsize=(12,8)) plt.subplots_adjust(bottom=0.17,left=0.17,top=0.96,right=0.96) # Change the axis units to CMU Serif plt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18) plt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18) plt.subplot(2,2,1) plt.plot(w,X[:,0],linewidth=2,label=r'$\bar{x}_1$') # Define the X and Y axis labels plt.xlabel('Frequency (rad/s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel(r'$\bar{x}_1$',family='serif',fontsize=22,weight='bold',labelpad=10) plt.ylim(-2,2) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.subplot(2,2,2) plt.plot(w,X[:,1],linewidth=2,linestyle="-",label=r'$\bar{x}_2$') # Define the X and Y axis labels plt.xlabel('Frequency (rad/s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel(r'$\bar{x}_2$',family='serif',fontsize=22,weight='bold',labelpad=10) plt.ylim(-2,2) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.subplot(2,2,3) plt.plot(w,X[:,2],linewidth=2,linestyle="-",label=r'$\bar{x}_3$') # Define the X and Y axis labels plt.xlabel('Frequency (rad/s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel(r'$\bar{x}_3$',family='serif',fontsize=22,weight='bold',labelpad=10) plt.ylim(-2,2) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.subplot(2,2,4) plt.plot(w,X[:,3],linewidth=2,linestyle="-",label=r'$\bar{x}_4$') # Define the X and Y axis labels plt.xlabel('Frequency (rad/s)',family='serif',fontsize=22,weight='bold',labelpad=5) plt.ylabel(r'$\bar{x}_4$',family='serif',fontsize=22,weight='bold',labelpad=10) plt.ylim(-2,2) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') # # Create the legend, then fix the fontsize # leg = plt.legend(loc='upper right', fancybox=True) # ltext = leg.get_texts() # plt.setp(ltext,family='serif',fontsize=16) # Adjust the page layout filling the page using the new tight_layout command plt.tight_layout(pad=0.5, w_pad=3.0, h_pad=2.0) # save the figure as a high-res pdf in the current folder # plt.savefig('Spring_Pendulum_Example_Amp.pdf') # fig.set_size_inches(9,6) # Resize the figure for better display in the notebook ``` <hr style="border: 0px; height: 1px; text-align: center; background: #333; background-image: -webkit-linear-gradient(left, #ccc, #333, #ccc); background-image: -moz-linear-gradient(left, #ccc, #333, #ccc); background-image: -ms-linear-gradient(left, #ccc, #333, #ccc); background-image: -o-linear-gradient(left, #ccc, #333, #ccc);"> #### Licenses Code is licensed under a 3-clause BSD style license. See the licenses/LICENSE.md file. Other content is provided under a [Creative Commons Attribution-NonCommercial 4.0 International License](http://creativecommons.org/licenses/by-nc/4.0/), CC-BY-NC 4.0. ``` # This cell will just improve the styling of the notebook # You can ignore it, if you are okay with the default sytling from IPython.core.display import HTML import urllib.request response = urllib.request.urlopen("https://cl.ly/1B1y452Z1d35") HTML(response.read().decode("utf-8")) ```
github_jupyter
## Only for Colab users To get this to run on colab. Click on the badge. {{ badge }} The script is also set up to get the needed files from the repo and performs the installation of the script's [dependencies](https://github.com/mnm-rnd/competitions/blob/master/zindi/airqo-ugandan-air-quality-forecast-challenge/requirements.txt) ``` !git clone https://github.com/mnm-rnd/competitions.git !mv ./competitions/zindi/airqo-ugandan-air-quality-forecast-challenge/mlod ./mlod !mv ./competitions/zindi/airqo-ugandan-air-quality-forecast-challenge/requirements.txt ./requirements.txt !rm -rf ./competitions ``` # AirQo Ugandan Air Quality Forecast Challenge This notebook contains the reformat of the code, for proper set up during implementation. Most of the code abstractions are written inside our `mlod` package, which should be included with this notebook. ``` # Installing the requirements !pip install -r ./requirements.txt ``` ## Init Steps This section involves setting up the data from `Zindi` to use for the competition ### Setting up the data Please upload the train and test files to the `./data` path inside the workspace folder. Run the cell below, repeateadly till when there are no errors. Make sure the uploaded data is the `Train.csv` and `Test.csv` used in the competition ``` from pathlib import Path train_file_csv = Path('./data/Train.csv') test_file_csv = Path('./data/Test.csv') # check if Train file doesn't exist assert train_file_csv.exists(), 'Make sure the Test csv file exists the path "%s"' % train_file_csv # check if Test file doesn't exist assert test_file_csv.exists(), 'Make sure the Test csv file exists the path "%s"' % test_file_csv ``` ## Actual sequence of processes ### Initiating different processes Necessary steps before training ``` import random import numpy as np # using our chosen seed number for reproducibility from mlod import SEED_NUMBER as MLOD_SEED_NUMBER # Setting the seed random.seed(MLOD_SEED_NUMBER) np.random.seed(MLOD_SEED_NUMBER) ``` ### Load and preprocess data ``` import pandas as pd ## Fetching the data train_df = pd.read_csv(train_file_csv) test_df = pd.read_csv(test_file_csv) TEST_IDS = test_df['ID'] ``` ### Preprocessing the data [Low-level Preprocessing]<br /> By using the `mlod.preprocessors.*` involves preprocessing the data in the following ways - Modifying the data such that each row has its atomic values, thus making the data **grow** in size - Performing **special** feature engineering some of which include: - Acquiring Cyclic Representation of selected (idx) features - Using wind speed (`wind_spd`) and direction (`wind_dir`) to obtain catersian components (`u` and `v`) of the wind variable. - Add lag features ### Preprocess + Model Training Owing to our ensemble, the data is preprocessed differently before training either model. The code below therefore contains the `Model` paired with its appropriate `PreProcessor`. Predictions from the first model are fed back into the data before training the second model. #### 1: LightGBM + Version 1 Pre Processing This first approach includes using our `MlodPreProcessor` and a `LightGBM` ``` from mlod.preprocessors import MlodPreProcessor mlod_preprocessor = MlodPreProcessor() mlod_pp_opts = dict(cols_to_retain=['ID']) x_train, y_train = mlod_preprocessor.process(train_df, **mlod_pp_opts) # Training the LightGBM # ------------------------------ import pandas as pd from mlod.models import LGBModel from sklearn.model_selection import GroupKFold lgb_model = LGBModel('airqo') x_train_ids = x_train.pop('ID') fold_group = x_train['day_idx'].astype(str) + '_' + x_train['24hr_idx'].astype(str) assert 'ID' not in x_train.columns, 'Make sure ID is NOT in the columns' # perfoming evalution using cross validation. lgb_eval_out = lgb_model.train(x_train, y_train, cv=True, kfold=GroupKFold, group=fold_group, n_splits=3) # Save predictions then to feed to the next df_to_feed = pd.DataFrame.from_dict({ 'ID': x_train_ids, 'oof': lgb_eval_out['oof'] }) save_path = './lgb_eval.csv' df_to_feed.to_csv(save_path) print('Saving the OOF values to path: {}'.format(save_path)) import lightgbm as lgb # training the model on the full set lgb_model.train(x_train, y_train, cv=False) # save the model lgb_model.model.save_model('./lgb-airqo') ``` #### 2: CatBoost + Version 2 Pre Processing This approach includes using our `AirQoPreProcessor` and a `CatBoostModel` ``` ## Training the CatBoost Model # ------------------------------ from mlod.preprocessors import AirQoPreProcessor airqo_preprocessor = AirQoPreProcessor() airqo_pp_opts = dict(cols_to_retain=['ID']) x_train, y_train = airqo_preprocessor.process(train_df, **airqo_pp_opts) # Averaging predictions from LightGBM to obtain the mean value of the target train_feed = df_to_feed.groupby('ID').mean() # Add the averaged values to the data before training the new model x_train = x_train.join(train_feed, on='ID') # drop ID column after joining del x_train['ID'] from mlod.models import CatBoostModel from sklearn.model_selection import KFold cb_model = CatBoostModel('airqo') # performing cross validation training cb_eval_out = cb_model.train(x_train, y_train, cv=True, store_cv_models=True, kfold=KFold, n_splits=50) ``` ### Ensemble Prediction Since we are dealing with an ensemble model, the prediction will most likely also have to be different. We would need to take the output of `lgb_model` and use it as an input to the `cb_model`. Below is a function to facilitate this process. ``` import pandas as pd import numpy as np from tqdm import tqdm from mlod.models import Model from mlod.preprocessors import PreProcessor from typing import Tuple import logging logger = logging.getLogger('mlod') class EnsemblePredictor: def __init__(self, trained_lgb_model: Model, cv_trained_cb_model: Model, lgb_pp_opts: Tuple[PreProcessor, dict], cb_pp_opts: Tuple[PreProcessor, dict]): # Checks if the models are trained assert trained_lgb_model.model is not None, "the lgb model is not trained" assert cv_trained_cb_model.is_cv_trained, "the cb model needs to be trained by cross validation" self.lgb = trained_lgb_model self.cb = cv_trained_cb_model # load up the preprocessor and config used in LGB model lgb_pp, lgp_opts = lgb_pp_opts self.lgb_pp = lgb_pp self.lgp_opts = lgp_opts # load up the preprocessor and config used in CatBoost model cb_pp, cb_opts = cb_pp_opts self.cb_pp = cb_pp self.cb_opts = cb_opts def predict(self, x: pd.DataFrame) -> np.ndarray: # pre-process like lgb x_out_lgb = self.lgb_pp.process(x.copy(), test=True, **self.lgp_opts) x_ids = x_out_lgb.pop('ID') # pre-process like cb x_out_cb = self.cb_pp.process(x.copy(), test=True, **self.cb_opts) logger.info('Making prediction using base model') # output for the lgb + merge with x_out_cb to_merge = pd.DataFrame.from_dict({ 'ID': x_ids, 'oof': self.lgb.predict(x_out_lgb) }) # mean merge the values to_merge = to_merge.groupby('ID').mean() x_out_cb = x_out_cb.join(to_merge, on='ID') # remove ID col + empty unneeded data del x_out_cb['ID'] del to_merge # store the list of predictions ls_preds = [] logger.info('Making prediction using each %d cv models' % len(self.cb.cv_models)) # get the models used in the cross validations for cv_model in tqdm(self.cb.get_cv_models()): # make prediction using combined values with the cb model pred = cv_model.predict(x_out_cb) ls_preds.append(pred) # compute the mean of the predictions of # the cross validation models return np.mean(ls_preds, 0) ``` Using this `EnsemblePredictor` and saving predictions ``` import numpy as np from mlod.file_utils import PredictionStorage # Building the ensemble predictor predictor = EnsemblePredictor( lgb_model, cb_model, (mlod_preprocessor, mlod_pp_opts), (airqo_preprocessor, airqo_pp_opts) ) y_test = predictor.predict(test_df) # Store the results for submission mean_rmse = np.mean([cb_eval_out['rmse'], lgb_eval_out['rmse']]) out_df = pd.DataFrame.from_dict(dict(ID=TEST_IDS.values, target=y_test)).set_index('ID') out_df.to_csv(f'./airqo_sub{mean_rmse}.csv') ``` The file to upload should be name `airqo_subXXX.csv`. Where XXX is the RMSE of the OOF predictions.
github_jupyter
``` import tensorflow as tf import numpy as np # if this import fails, add the folder Space-RNN-GRU to sys.path # import sys # sys.path.append("/path/to/folder/Space-RNN-GRU/") from mdrnn import MultiDimensionalRNN tf.__version__ ``` ## Dummy example to test MDRNN flow #### <center>Recurrent function to compute the hidden state at position i,j</center> $$\vec{h}_{ij}=f(\vec{h}_{i-1,j},\vec{h}_{i,j-1}, \vec{h}_{i-1,j-1}, \vec{s}_{ij})$$ ``` # normal python plus numpy implementation of this recursion with a simple computation (sum all the previous states plus the entry) def cell(left_state, top_state, diagonal_state, entry): return left_state + top_state + diagonal_state + entry def recursive_mdrnn(input_matrix, hidden, i,j): if i<0 or j<0: return 0 if hidden[i,j] !=- 1: # MEMOIZATION to speed up the recursion (only computes once) return hidden[i,j] # set hidden[i,j] = cell(recursive_mdrnn(input_matrix, hidden, i, j-1), recursive_mdrnn(input_matrix, hidden, i-1, j), recursive_mdrnn(input_matrix, hidden, i-1, j-1), input_matrix[i,j]) return hidden[i,j] # keras tensorflow model with same computation SHAPE = (5,5,1) class SumCell(tf.keras.layers.Layer): def call(self, x, states): left_state = states[0] top_state = states[1] diagonal_state = states[2] return left_state + top_state + diagonal_state + x def create_model(shape, show_summary = True): model = tf.keras.Sequential() model.add(MultiDimensionalRNN(SumCell(), inital_state=0.0, input_shape=shape)) if show_summary: model.summary() return model model = create_model(SHAPE) # define matrix matrix = np.zeros(SHAPE) matrix[1,2] = 1 matrix[2,3] = 1 print(matrix[:,:,0]) ``` ## Compare results with two solutions ``` hidden_states_matrix = np.zeros(SHAPE) - 1 r0 = recursive_mdrnn(matrix, hidden_states_matrix, SHAPE[0]-1, SHAPE[1]-1) print("Recursive solution:", r0) print(hidden_states_matrix[:,:,0]) r1 = model.predict(np.array([matrix])) print("Tensorflow solution:", r1) # some random matrix texts N = 100 for i in range(N): m = np.random.randint(0,10, SHAPE) hidden_states_matrix = np.zeros(SHAPE) - 1 r0 = recursive_mdrnn(m, hidden_states_matrix, SHAPE[0]-1, SHAPE[1]-1) r1 = model.predict(np.array([m])) assert r0[0]==r1[0][0] print("No errors") from tensorflow.keras import backend as K # some random matrix texts with random shapes N = 100 for i in range(N): shape_rows = np.random.randint(2,10, ()) shape_cols = np.random.randint(2,10, ()) shape = (shape_rows, shape_cols, 1) m = 0.02*np.random.normal(size=shape) -0.01 hidden_states_matrix = np.zeros(shape) - 1 r0 = recursive_mdrnn(m, hidden_states_matrix, shape[0]-1, shape[1]-1) # need to reacreate the model due to shape incompatibility K.clear_session() model = create_model(shape, show_summary=False) r1 = model.predict(np.array([m])) assert np.round(r0[0])==np.round(r1[0][0]) print("No errors") ```
github_jupyter
``` #@title Copyright 2020 The Earth Engine Community Authors { display-mode: "form" } # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Time Series Visualization with Altair Author: jdbcode This tutorial provides methods for generating time series data in Earth Engine and visualizing it with the [Altair](https://altair-viz.github.io/) library using drought and vegetation response as an example. Topics include: - Time series region reduction in Earth Engine - Formatting a table in Earth Engine - Transferring an Earth Engine table to a Colab Python kernel - Converting an Earth Engine table to a [pandas](https://pandas.pydata.org/) DataFrame - Data representation with various Altair chart types **Note** that this tutorial uses the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) in a [Colab notebook](https://developers.google.com/earth-engine/python_install-colab.html). ## Context At the heart of this tutorial is the notion of data reduction and the need to transform data into insights to help inform our understanding of Earth processes and human's role in them. It combines a series of technologies, each best suited to a particular task in the data reduction process. **Earth Engine** is used to access, clean, and reduce large amounts of spatiotemporal data, **pandas** is used to analyze and organize the results, and **Altair** is used to visualize the results. **Note**: This notebook demonstrates an analysis template and interactive workflow that is appropriate for a certain size of dataset, but there are limitations to interactive computation time and server-to-client data transfer size imposed by Colab and Earth Engine. To analyze even larger datasets, you may need to modify the workflow to [export](https://developers.google.com/earth-engine/python_install#exporting-data) `FeatureCollection` results from Earth Engine as static assets and then use the static assets to perform the subsequent steps involving Earth Engine table formatting, conversion to pandas DataFrame, and charting with Altair. ## Materials ### Datasets Climate - Drought severity ([PDSI](https://developers.google.com/earth-engine/datasets/catalog/GRIDMET_DROUGHT)) - Historical climate ([PRISM](https://developers.google.com/earth-engine/datasets/catalog/OREGONSTATE_PRISM_AN81m)) - Projected climate ([NEX-DCP30](https://developers.google.com/earth-engine/datasets/catalog/NASA_NEX-DCP30)) Vegetation proxies - NDVI ([MODIS](https://developers.google.com/earth-engine/datasets/catalog/MODIS_006_MOD13A2)) - NBR ([Landsat](https://developers.google.com/earth-engine/datasets/catalog/landsat/)) ### Region of interest The region of interest for these examples is the Sierra Nevada ecoregion of California. The vegetation grades from mostly ponderosa pine and Douglas-fir at low elevations on the western side, to pines and Sierra juniper on the eastern side, and to fir and other conifers at higher elevations. ## General workflow Preparation of every dataset for visualization follows the same basic steps: 1. Filter the dataset (server-side Earth Engine) 2. Reduce the data region by a statistic (server-side Earth Engine) 3. Format the region reduction into a table (server-side Earth Engine) 4. Convert the Earth Engine table to a DataFrame (server-side Earth Engine > client-side Python kernel) 5. Alter the DataFrame (client-side pandas) 6. Plot the DataFrame (client-side Altair) The first dataset will walk through each step in detail. Following examples will provide less description, unless there is variation that merits note. ## Python setup ### Earth Engine API 1. Import the Earth Engine library. 2. Authenticate access (registration verification and Google account access). 3. Initialize the API. ``` import ee ee.Authenticate() ee.Initialize() ``` ### Other libraries Import other libraries used in this notebook. - [**pandas**](https://pandas.pydata.org/): data analysis (including the [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) data structure) - [**altair**](https://altair-viz.github.io/): declarative visualization library (used for charting) - [**numpy**](https://numpy.org/): array-processing package (used for linear regression) - [**folium**](https://python-visualization.github.io/folium/): interactive web map ``` import pandas as pd import altair as alt import numpy as np import folium ``` ## Region reduction function Reduction of pixels intersecting the region of interest to a statistic will be performed multiple times. Define a reusable function that can perform the task for each dataset. The function accepts arguments such as scale and reduction method to parameterize the operation for each particular analysis. **Note**: most of the reduction operations in this tutorial use a large pixel scale so that operations complete quickly. In your own application, set the scale and other parameter arguments as you wish. ``` def create_reduce_region_function(geometry, reducer=ee.Reducer.mean(), scale=1000, crs='EPSG:4326', bestEffort=True, maxPixels=1e13, tileScale=4): """Creates a region reduction function. Creates a region reduction function intended to be used as the input function to ee.ImageCollection.map() for reducing pixels intersecting a provided region to a statistic for each image in a collection. See ee.Image.reduceRegion() documentation for more details. Args: geometry: An ee.Geometry that defines the region over which to reduce data. reducer: Optional; An ee.Reducer that defines the reduction method. scale: Optional; A number that defines the nominal scale in meters of the projection to work in. crs: Optional; An ee.Projection or EPSG string ('EPSG:5070') that defines the projection to work in. bestEffort: Optional; A Boolean indicator for whether to use a larger scale if the geometry contains too many pixels at the given scale for the operation to succeed. maxPixels: Optional; A number specifying the maximum number of pixels to reduce. tileScale: Optional; A number representing the scaling factor used to reduce aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable computations that run out of memory with the default. Returns: A function that accepts an ee.Image and reduces it by region, according to the provided arguments. """ def reduce_region_function(img): """Applies the ee.Image.reduceRegion() method. Args: img: An ee.Image to reduce to a statistic by region. Returns: An ee.Feature that contains properties representing the image region reduction results per band and the image timestamp formatted as milliseconds from Unix epoch (included to enable time series plotting). """ stat = img.reduceRegion( reducer=reducer, geometry=geometry, scale=scale, crs=crs, bestEffort=bestEffort, maxPixels=maxPixels, tileScale=tileScale) return ee.Feature(geometry, stat).set({'millis': img.date().millis()}) return reduce_region_function ``` ### Formatting The result of the region reduction function above applied to an `ee.ImageCollection` produces an `ee.FeatureCollection`. This data needs to be transferred to the Python kernel, but serialized feature collections are large and awkward to deal with. This step defines a function to convert the feature collection to an `ee.Dictionary` where the keys are feature property names and values are corresponding lists of property values, which `pandas` can deal with handily. 1. Extract the property values from the `ee.FeatureCollection` as a list of lists stored in an `ee.Dictionary` using `reduceColumns()`. 2. Extract the list of lists from the dictionary. 3. Add names to each list by converting to an `ee.Dictionary` where keys are property names and values are the corresponding value lists. The returned `ee.Dictionary` is essentially a table, where keys define columns and list elements define rows. ``` # Define a function to transfer feature properties to a dictionary. def fc_to_dict(fc): prop_names = fc.first().propertyNames() prop_lists = fc.reduceColumns( reducer=ee.Reducer.toList().repeat(prop_names.size()), selectors=prop_names).get('list') return ee.Dictionary.fromLists(prop_names, prop_lists) ``` ## Drought severity In this section we'll look at a time series of drought severity as a calendar heat map and a bar chart. ### Import data 1. Load the gridded Palmer Drought Severity Index (PDSI) data as an `ee.ImageCollection`. 2. Load the EPA Level-3 ecoregion boundaries as an `ee.FeatureCollection` and filter it to include only the Sierra Nevada region, which defines the area of interest (AOI). ``` pdsi = ee.ImageCollection('GRIDMET/DROUGHT').select('pdsi') aoi = ee.FeatureCollection('EPA/Ecoregions/2013/L3').filter( ee.Filter.eq('na_l3name', 'Sierra Nevada')).geometry() ``` **Note**: the `aoi` defined above will be used throughout this tutorial. In your own application, redefine it for your own area of interest. ### Reduce data 1. Create a region reduction function. 2. Map the function over the `pdsi` image collection to reduce each image. 3. Filter out any resulting features that have null computed values (occurs when all pixels in an AOI are masked). ``` reduce_pdsi = create_reduce_region_function( geometry=aoi, reducer=ee.Reducer.mean(), scale=5000, crs='EPSG:3310') pdsi_stat_fc = ee.FeatureCollection(pdsi.map(reduce_pdsi)).filter( ee.Filter.notNull(pdsi.first().bandNames())) ``` --- **STOP**: ### _Optional export_ _If your process is long-running_, you'll want to export the `pdsi_stat_fc` variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on. Please see the Developer Guide section on [exporting with the Python API](https://developers.google.com/earth-engine/python_install#exporting-data). Export to asset: ``` task = ee.batch.Export.table.toAsset( collection=pdsi_stat_fc, description='pdsi_stat_fc export', assetId='users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair') # task.start() ``` Import the asset after the export completes: ``` # pdsi_stat_fc = ee.FeatureCollection('users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair') ``` _\* Remove comments (#) to run the above cells._ --- **CONTINUE**: ### Server to client transfer The `ee.FeatureCollection` needs to be converted to a dictionary and transferred to the Python kernel. 1. Apply the `fc_to_dict` function to convert from `ee.FeatureCollection` to `ee.Dictionary`. 2. Call `getInfo()` on the `ee.Dictionary` to transfer the data client-side. ``` pdsi_dict = fc_to_dict(pdsi_stat_fc).getInfo() ``` The result is a Python dictionary. Print a small part to see how it is formatted. ``` print(type(pdsi_dict), '\n') for prop in pdsi_dict.keys(): print(prop + ':', pdsi_dict[prop][0:3] + ['...']) ``` Convert the Python dictionary to a pandas DataFrame. ``` pdsi_df = pd.DataFrame(pdsi_dict) ``` Preview the DataFrame and check the column data types. ``` display(pdsi_df) print(pdsi_df.dtypes) ``` ### Add date columns Add date columns derived from the milliseconds from Unix epoch column. The pandas library provides functions and objects for timestamps and the DataFrame object allows for easy mutation. Define a function to add date variables to the DataFrame: year, month, day, and day of year (DOY). ``` # Function to add date variables to DataFrame. def add_date_info(df): df['Timestamp'] = pd.to_datetime(df['millis'], unit='ms') df['Year'] = pd.DatetimeIndex(df['Timestamp']).year df['Month'] = pd.DatetimeIndex(df['Timestamp']).month df['Day'] = pd.DatetimeIndex(df['Timestamp']).day df['DOY'] = pd.DatetimeIndex(df['Timestamp']).dayofyear return df ``` **Note**: the above function for adding date information to a DataFrame will be used throughout this tutorial. Apply the `add_date_info` function to the PDSI DataFrame to add date attribute columns, preview the results. ``` pdsi_df = add_date_info(pdsi_df) pdsi_df.head(5) ``` ### Rename and drop columns Often it is desirable to rename columns and/or remove unnecessary columns. Do both here and preview the DataFrame. ``` pdsi_df = pdsi_df.rename(columns={ 'pdsi': 'PDSI' }).drop(columns=['millis', 'system:index']) pdsi_df.head(5) ``` Check the data type of each column. ``` pdsi_df.dtypes ``` At this point the DataFrame is in good shape for charting with Altair. ### Calendar heatmap Chart PDSI data as a calendar heatmap. Set observation year as the x-axis variable, month as y-axis, and PDSI value as color. Note that Altair features a convenient [method for aggregating values within groups](https://altair-viz.github.io/user_guide/transform/aggregate.html) while encoding the chart (i.e., no need to create a new DataFrame). The mean aggregate transform is applied here because each month has three PDSI observations (year and month are the grouping factors). Also note that a tooltip has been added to the chart; hovering over cells reveals the values of the selected variables. ``` alt.Chart(pdsi_df).mark_rect().encode( x='Year:O', y='Month:O', color=alt.Color( 'mean(PDSI):Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))), tooltip=[ alt.Tooltip('Year:O', title='Year'), alt.Tooltip('Month:O', title='Month'), alt.Tooltip('mean(PDSI):Q', title='PDSI') ]).properties(width=600, height=300) ``` The calendar heat map is good for interpretation of relative intra- and inter-annual differences in PDSI. However, since the PDSI variable is represented by color, estimating absolute values and magnitude of difference is difficult. ### Bar chart Chart PDSI time series as a bar chart to more easily interpret absolute values and compare them over time. Here, the observation timestamp is represented on the x-axis and PDSI is represented by both the y-axis and color. Since each PDSI observation has a unique timestamp that can be plotted to the x-axis, there is no need to aggregate PDSI values as in the above chart. A tooltip is added to the chart; hover over the bars to reveal the values for each variable. ``` alt.Chart(pdsi_df).mark_bar(size=1).encode( x='Timestamp:T', y='PDSI:Q', color=alt.Color( 'PDSI:Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))), tooltip=[ alt.Tooltip('Timestamp:T', title='Date'), alt.Tooltip('PDSI:Q', title='PDSI') ]).properties(width=600, height=300) ``` This temporal bar chart makes it easier to interpret and compare absolute values of PDSI over time, but relative intra- and inter-annual variability are arguably harder to interpret because the division of year and month is not as distinct as in the calendar heatmap above. Take note of the extended and severe period of drought from 2012 through 2016. In the next section, we'll look for a vegetation response to this event. ## Vegetation productivity NDVI is a proxy measure of photosynthetic capacity and is used in this tutorial to investigate vegetation response to the 2012-2016 drought identified in the PDSI bar chart above. MODIS provides an analysis-ready 16-day NDVI composite that is well suited for regional investigation of temporal vegetation dynamics. The following steps reduce and prepare this data for charting in the same manner as the PDSI data above; please refer to previous sections to review details. ### Import and reduce 1. Load the MODIS NDVI data as an `ee.ImageCollection`. 1. Create a region reduction function. 3. Apply the function to all images in the time series. 4. Filter out features with null computed values. ``` ndvi = ee.ImageCollection('MODIS/006/MOD13A2').select('NDVI') reduce_ndvi = create_reduce_region_function( geometry=aoi, reducer=ee.Reducer.mean(), scale=1000, crs='EPSG:3310') ndvi_stat_fc = ee.FeatureCollection(ndvi.map(reduce_ndvi)).filter( ee.Filter.notNull(ndvi.first().bandNames())) ``` --- **STOP**: _If your process is long-running_, you'll want to export the `ndvi_stat_fc` variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on. Please see the above **_Optional export_** section for more details. **CONTINUE**: --- ### Prepare DataFrame 1. Transfer data from the server to the client. 2. Convert the Python dictionary to a pandas DataFrame. 3. Preview the DataFrame and check data types. ``` ndvi_dict = fc_to_dict(ndvi_stat_fc).getInfo() ndvi_df = pd.DataFrame(ndvi_dict) display(ndvi_df) print(ndvi_df.dtypes) ``` 4. Remove the NDVI scaling. 5. Add date attribute columns. 6. Preview the DataFrame. ``` ndvi_df['NDVI'] = ndvi_df['NDVI'] / 10000 ndvi_df = add_date_info(ndvi_df) ndvi_df.head(5) ``` These NDVI time series data are now ready for plotting. ### DOY line chart Make a day of year (DOY) line chart where each line represents a year of observations. This chart makes it possible to compare the same observation date among years. Use it to compare NDVI values for years during the drought and not. Day of year is represented on the x-axis and NDVI on the y-axis. Each line represents a year and is distinguished by color. Note that this plot includes a tooltip and has been made interactive so that the axes can be zoomed and panned. ``` highlight = alt.selection( type='single', on='mouseover', fields=['Year'], nearest=True) base = alt.Chart(ndvi_df).encode( x=alt.X('DOY:Q', scale=alt.Scale(domain=[0, 353], clamp=True)), y=alt.Y('NDVI:Q', scale=alt.Scale(domain=[0.1, 0.6])), color=alt.Color('Year:O', scale=alt.Scale(scheme='magma'))) points = base.mark_circle().encode( opacity=alt.value(0), tooltip=[ alt.Tooltip('Year:O', title='Year'), alt.Tooltip('DOY:Q', title='DOY'), alt.Tooltip('NDVI:Q', title='NDVI') ]).add_selection(highlight) lines = base.mark_line().encode( size=alt.condition(~highlight, alt.value(1), alt.value(3))) (points + lines).properties(width=600, height=350).interactive() ``` The first thing to note is that winter dates (when there is snow in the Sierra Nevada ecoregion) exhibit highly variable inter-annual NDVI, but spring, summer, and fall dates are more consistent. With regard to drought effects on vegetation, summer and fall dates are the most sensitive time. Zooming into observations for the summer/fall days (224-272), you'll notice that many years have a u-shaped pattern where NDVI values decrease and then rise. Another way to view these data is to plot the distribution of NDVI by DOY represented as an interquartile range envelope and median line. Here, these two charts are defined and then combined in the following snippet. 1. Define a base chart. 2. Define a line chart for median NDVI (note the use of aggregate median transform grouping by DOY). 3. Define a band chart using `'iqr'` (interquartile range) to represent NDVI distribution grouping on DOY. 4. Combine the line and band charts. ``` base = alt.Chart(ndvi_df).encode( x=alt.X('DOY:Q', scale=alt.Scale(domain=(150, 340)))) line = base.mark_line().encode( y=alt.Y('median(NDVI):Q', scale=alt.Scale(domain=(0.47, 0.53)))) band = base.mark_errorband(extent='iqr').encode( y='NDVI:Q') (line + band).properties(width=600, height=300).interactive() ``` The summary statistics for the summer/fall days (224-272) certainly show an NDVI reduction, but there is also variability; some years exhibit greater NDVI reduction than others as suggested by the wide interquartile range during the middle of the summer. Assuming that NDVI reduction is due to water and heat limiting photosynthesis, we can hypothesize that during years of drought, photosynthesis (NDVI) will be lower than non-drought years. We can investigate the relationship between photosynthesis (NDVI) and drought (PDSI) using a scatter plot and linear regression. ## Dought and productivity relationship A scatterplot is a good way to visualize the relationship between two variables. Here, PDSI (drought indicator) will be plotted on the x-axis and NDVI (vegetation productivity) on the y-axis. To achieve this, both variables must exist in the same DataFrame. Each row will be an observation in time and columns will correspond to PDSI and NDVI values. Currently, PDSI and NDVI are in two different DataFrames and need to be merged. ### Prepare DataFrames Before they can be merged, each variable must be reduced to a common temporal observation unit to define correspondence. There are a number of ways to do this and each will define the relationship between PDSI and NDVI differently. Here, our temporal unit will be an annual observation set where NDVI is reduced to the intra-annual minimum from DOY 224 to 272 and PDSI will be the mean from DOY 1 to 272. We are proposing that average drought severity for the first three quarters of a year are related to minimum summer NDVI for a given year. 1. Filter the NDVI DataFrame to observations that occur between DOY 224 and 272. 2. Reduce the DOY-filtered subset to intra-annual minimum NDVI. ``` ndvi_doy_range = [224, 272] ndvi_df_sub = ndvi_df[(ndvi_df['DOY'] >= ndvi_doy_range[0]) & (ndvi_df['DOY'] <= ndvi_doy_range[1])] ndvi_df_sub = ndvi_df_sub.groupby('Year').agg('min') ``` **Note**: in your own application you may find that a different DOY range is more suitable, change the `ndvi_doy_range` as needed. 3. Filter the PDSI DataFrame to observations that occur between DOY 1 and 272. 4. Reduce the values within a given year to the mean of the observations. ``` pdsi_doy_range = [1, 272] pdsi_df_sub = pdsi_df[(pdsi_df['DOY'] >= pdsi_doy_range[0]) & (pdsi_df['DOY'] <= pdsi_doy_range[1])] pdsi_df_sub = pdsi_df_sub.groupby('Year').agg('mean') ``` **Note**: in your own application you may find that a different DOY range is more suitable, change the `pdsi_doy_range` as needed. 5. Perform a join on 'Year' to combine the two reduced DataFrames. 6. Select only the columns of interest: 'Year', 'NDVI', 'PDSI'. 7. Preview the DataFrame. ``` ndvi_pdsi_df = pd.merge( ndvi_df_sub, pdsi_df_sub, how='left', on='Year').reset_index() ndvi_pdsi_df = ndvi_pdsi_df[['Year', 'NDVI', 'PDSI']] ndvi_pdsi_df.head(5) ``` NDVI and PDSI are now included in the same DataFrame linked by Year. This format is suitable for determining a linear relationship and drawing a line of best fit through the data. Including a line of best fit can be a helpful visual aid. Here, a 1D polynomial is fit through the xy point cloud defined by corresponding NDVI and PDSI observations. The resulting fit is added to the DataFrame as a new column 'Fit'. 8. Add a line of best fit between PDSI and NDVI by determining the linear relationship and predicting NDVI based on PDSI for each year. ``` ndvi_pdsi_df['Fit'] = np.poly1d( np.polyfit(ndvi_pdsi_df['PDSI'], ndvi_pdsi_df['NDVI'], 1))( ndvi_pdsi_df['PDSI']) ndvi_pdsi_df.head(5) ``` ### Scatter plot The DataFrame is ready for plotting. Since this chart is to include points and a line of best fit, two charts need to be created, one for the points and one for the line. The results are combined into the final plot. ``` base = alt.Chart(ndvi_pdsi_df).encode( x=alt.X('PDSI:Q', scale=alt.Scale(domain=(-5, 5)))) points = base.mark_circle(size=60).encode( y=alt.Y('NDVI:Q', scale=alt.Scale(domain=(0.4, 0.6))), color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')), tooltip=[ alt.Tooltip('Year:O', title='Year'), alt.Tooltip('PDSI:Q', title='PDSI'), alt.Tooltip('NDVI:Q', title='NDVI') ]) fit = base.mark_line().encode( y=alt.Y('Fit:Q'), color=alt.value('#808080')) (points + fit).properties(width=600, height=300).interactive() ``` As you can see, there seems to be some degree of positive correlation between PDSI and NDVI (i.e., as wetness increases, vegetation productivity increases; as wetness decreases, vegetation productivity decreases). Note that some of the greatest outliers are 2016, 2017, 2018 - the three years following recovery from the long drought. It is also important to note that there are many other factors that may influence the NDVI signal that are not being considered here. ## Patch-level vegetation mortality At a regional scale there appears to be a relationship between drought and vegetation productivity. This section will look more closely at effects of drought on vegetation at a patch level, with a specific focus on mortality. Here, a Landsat time series collection is created for the period 1984-present to provide greater temporal context for change at a relatively precise spatial resolution. ### Find a point of interest Use [aerial imagery](https://developers.google.com/earth-engine/datasets/catalog/USDA_NAIP_DOQQ) from the National Agriculture Imagery Program (NAIP) in an interactive [Folium](https://python-visualization.github.io/folium/) map to identify a location in the Sierra Nevada ecoregion that appears to have patches of dead trees. 1. Run the following code block to render an interactive Folium map for a selected NAIP image. 2. Zoom and pan around the image to identify a region of recently dead trees (standing silver snags with no fine branches or brown/grey snags with fine branches). 3. Click the map to list the latitude and longitude for a patch of interest. Record these values for use in the following section (the example location used in the following section is presented as a yellow point). ``` # Define a method for displaying Earth Engine image tiles to folium map. def add_ee_layer(self, ee_image_object, vis_params, name): map_id_dict = ee.Image(ee_image_object).getMapId(vis_params) folium.raster_layers.TileLayer( tiles=map_id_dict['tile_fetcher'].url_format, attr='Map Data &copy; <a href="https://earthengine.google.com/">Google Earth Engine, USDA National Agriculture Imagery Program</a>', name=name, overlay=True, control=True).add_to(self) # Add an Earth Engine layer drawing method to folium. folium.Map.add_ee_layer = add_ee_layer # Import a NAIP image for the area and date of interest. naip_img = ee.ImageCollection('USDA/NAIP/DOQQ').filterDate( '2016-01-01', '2017-01-01').filterBounds(ee.Geometry.Point([-118.6407, 35.9665])).first() # Display the NAIP image to the folium map. m = folium.Map(location=[35.9665, -118.6407], tiles='Stamen Terrain', zoom_start=16) m.add_ee_layer(naip_img, None, 'NAIP image, 2016') # Add the point of interest to the map. folium.Circle( radius=15, location=[35.9665, -118.6407], color='yellow', fill=False, ).add_to(m) # Add the AOI to the map. folium.GeoJson( aoi.getInfo(), name='geojson', style_function=lambda x: {'fillColor': '#00000000', 'color': '#000000'}, ).add_to(m) # Add a lat lon popup. folium.LatLngPopup().add_to(m) # Display the map. display(m) ``` ### Prepare Landsat collection Landsat surface reflectance data need to be prepared before being reduced. The steps below will organize data from multiple sensors into congruent collections where band names are consistent, cloud and cloud shadows have been masked out, and the normalized burn ratio (NBR) transformation is calculated and returned as the image representative (NBR is a good indicator of forest disturbance). Finally, all sensor collections will be merged into a single collection and annual composites calculated based on mean annual NBR using a join. 1. Define Landsat observation date window inputs based on NDVI curve plotted previously and set latitude and longitude variables from the map above. ``` start_day = 224 end_day = 272 latitude = 35.9665 longitude = -118.6407 ``` **Note**: in your own application it may be necessary to change these values. 2. Prepare a Landsat surface reflectance collection 1984-present. Those unfamiliar with Landsat might find the following acronym definitions and links helpful. - [OLI](https://www.usgs.gov/land-resources/nli/landsat/landsat-8?qt-science_support_page_related_con=0#qt-science_support_page_related_con) (Landsat's Operational Land Imager sensor) - [ETM+](https://www.usgs.gov/land-resources/nli/landsat/landsat-7?qt-science_support_page_related_con=0#qt-science_support_page_related_con) (Landsat's Enhanced Thematic Mapper Plus sensor) - [TM](https://www.usgs.gov/land-resources/nli/landsat/landsat-5?qt-science_support_page_related_con=0#qt-science_support_page_related_con) (Landsat's Thematic Mapper sensor) - [CFMask](https://www.usgs.gov/land-resources/nli/landsat/cfmask-algorithm) (Landsat USGS surface reflectance mask based on the CFMask algorithm) - [NBR](https://www.usgs.gov/land-resources/nli/landsat/landsat-normalized-burn-ratio#:~:text=NBR%20is%20used%20to%20identify,SWIR%20values%20in%20traditional%20fashion.&text=In%20Landsat%204%2D7%2C%20NBR,Band%205%20%2B%20Band%207). (Normalized Burn Ratio: a spectral vegetation index) - Understanding [Earth Engine joins](https://developers.google.com/earth-engine/joins_intro) ``` # Make lat. and long. vars an `ee.Geometry.Point`. point = ee.Geometry.Point([longitude, latitude]) # Define a function to get and rename bands of interest from OLI. def rename_oli(img): return (img.select( ee.List(['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa']), ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']))) # Define a function to get and rename bands of interest from ETM+. def rename_etm(img): return (img.select( ee.List(['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa']), ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']))) # Define a function to mask out clouds and cloud shadows. def cfmask(img): cloud_shadow_bi_mask = 1 << 3 cloud_bit_mask = 1 << 5 qa = img.select('pixel_qa') mask = qa.bitwiseAnd(cloud_shadow_bi_mask).eq(0).And( qa.bitwiseAnd(cloud_bit_mask).eq(0)) return img.updateMask(mask) # Define a function to add year as an image property. def set_year(img): year = ee.Image(img).date().get('year') return img.set('Year', year) # Define a function to calculate NBR. def calc_nbr(img): return img.normalizedDifference(ee.List(['NIR', 'SWIR2'])).rename('NBR') # Define a function to prepare OLI images. def prep_oli(img): orig = img img = rename_oli(img) img = cfmask(img) img = calc_nbr(img) img = img.copyProperties(orig, orig.propertyNames()) return set_year(img) # Define a function to prepare TM/ETM+ images. def prep_etm(img): orig = img img = rename_etm(img) img = cfmask(img) img = calc_nbr(img) img = img.copyProperties(orig, orig.propertyNames()) return set_year(img) # Import image collections for each Landsat sensor (surface reflectance). tm_col = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR') etm_col = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR') oli_col = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR') # Filter collections and prepare them for merging. oli_col = oli_col.filterBounds(point).filter( ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_oli) etm_col = etm_col.filterBounds(point).filter( ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm) tm_col = tm_col.filterBounds(point).filter( ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm) # Merge the collections. landsat_col = oli_col.merge(etm_col).merge(tm_col) # Get a distinct year collection. distinct_year_col = landsat_col.distinct('Year') # Define a filter that identifies which images from the complete collection # match the year from the distinct year collection. join_filter = ee.Filter.equals(leftField='Year', rightField='Year') # Define a join. join = ee.Join.saveAll('year_matches') # Apply the join and convert the resulting FeatureCollection to an # ImageCollection. join_col = ee.ImageCollection( join.apply(distinct_year_col, landsat_col, join_filter)) # Define a function to apply mean reduction among matching year collections. def reduce_by_join(img): year_col = ee.ImageCollection.fromImages(ee.Image(img).get('year_matches')) return year_col.reduce(ee.Reducer.mean()).rename('NBR').set( 'system:time_start', ee.Image(img).date().update(month=8, day=1).millis()) # Apply the `reduce_by_join` function to the list of annual images in the # properties of the join collection. landsat_col = join_col.map(reduce_by_join) ``` The result of the above code block is an image collection with as many images as there are years present in the merged Landsat collection. Each image represents the annual mean NBR constrained to observations within the given date window. ### Prepare DataFrame 1. Create a region reduction function; use `ee.Reducer.first()` as the reducer since no spatial aggregation is needed (we are interested in the single pixel that intersects the point). Set the region as the geometry defined by the lat. and long. coordinates identified in the above map. 2. Apply the function to all images in the time series. 3. Filter out features with null computed values. ``` reduce_landsat = create_reduce_region_function( geometry=point, reducer=ee.Reducer.first(), scale=30, crs='EPSG:3310') nbr_stat_fc = ee.FeatureCollection(landsat_col.map(reduce_landsat)).filter( ee.Filter.notNull(landsat_col.first().bandNames())) ``` 4. Transfer data from the server to the client.<br> _Note: if the process times out, you'll need to export/import the `nbr_stat_fc` feature collection as described in the **Optional export** section_. 5. Convert the Python dictionary to a pandas DataFrame. 6. Preview the DataFrame and check data types. ``` nbr_dict = fc_to_dict(nbr_stat_fc).getInfo() nbr_df = pd.DataFrame(nbr_dict) display(nbr_df) print(nbr_df.dtypes) ``` 7. Add date attribute columns. 8. Preview the DataFrame. ``` nbr_df = add_date_info(nbr_df) nbr_df.head(5) ``` ### Line chart Display the Landsat NBR time series for the point of interest as a line plot. ``` alt.Chart(nbr_df).mark_line().encode( x=alt.X('Timestamp:T', title='Date'), y='NBR:Q', tooltip=[ alt.Tooltip('Timestamp:T', title='Date'), alt.Tooltip('NBR:Q') ]).properties(width=600, height=300).interactive() ``` As you can see from the above time series of NBR observations, a dramatic decrease in NBR began in 2015, shortly after the severe and extended drought began. The decline continued through 2017, when a minor recovery began. Within the context of the entire time series, it is apparent that the decline is outside of normal inter-annual variability and that the reduction in NBR for this site is quite severe. The lack of major recovery response in NBR in 2017-19 (time of writing) indicates that the event was not ephemeral; the loss of vegetation will have a lasting impact on this site. The corresponding onset of drought and reduction in NBR provides further evidence that there is a relationship between drought and vegetation response in the Sierra Nevada ecoregion. ## Past and future climate The previous data visualizations suggest there is a relationship between drought and vegetation stress and mortality in the Sierra Nevada ecoregion. This section will look at how climate is projected to change in the future, which can give us a sense for what to expect with regard to drought conditions and speculate about its impact on vegetation. We'll look at historical and projected temperature and precipitation. Projected data are represented by NEX-DCP30, and historical observations by PRISM. ### Future climate NEX-DCP30 data contain 33 climate models projected to the year 2100 using several scenarios of greenhouse gas concentration pathways (RCP). Here, we'll use the median of all models for RCP 8.5 (the worst case scenario) to look at potential future temperature and precipitation. #### Import and prepare collection 1. Filter the collection by date and scenario. 2. Calculate 'mean' temperature from median min and max among 33 models. ``` dcp_col = (ee.ImageCollection('NASA/NEX-DCP30_ENSEMBLE_STATS') .select(['tasmax_median', 'tasmin_median', 'pr_median']) .filter( ee.Filter.And(ee.Filter.eq('scenario', 'rcp85'), ee.Filter.date('2019-01-01', '2070-01-01')))) def calc_mean_temp(img): return (img.select('tasmax_median') .add(img.select('tasmin_median')) .divide(ee.Image.constant(2.0)) .addBands(img.select('pr_median')) .rename(['Temp-mean', 'Precip-rate']) .copyProperties(img, img.propertyNames())) dcp_col = dcp_col.map(calc_mean_temp) ``` #### Prepare DataFrame 1. Create a region reduction function. 2. Apply the function to all images in the time series. 3. Filter out features with null computed values. ``` reduce_dcp30 = create_reduce_region_function( geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310') dcp_stat_fc = ee.FeatureCollection(dcp_col.map(reduce_dcp30)).filter( ee.Filter.notNull(dcp_col.first().bandNames())) ``` 4. Transfer data from the server to the client. _Note: if the process times out, you'll need to export/import the `dcp_stat_fc` feature collection as described in the **Optional export** section_. 5. Convert the Python dictionary to a pandas DataFrame. 6. Preview the DataFrame and check the data types. ``` dcp_dict = fc_to_dict(dcp_stat_fc).getInfo() dcp_df = pd.DataFrame(dcp_dict) display(dcp_df) print(dcp_df.dtypes) ``` 7. Add date attribute columns. 8. Preview the DataFrame. ``` dcp_df = add_date_info(dcp_df) dcp_df.head(5) ``` 9. Convert precipitation rate to mm. 10. Convert Kelvin to celsius. 11. Add the model name as a column. 12. Remove the 'Precip-rate' column. ``` dcp_df['Precip-mm'] = dcp_df['Precip-rate'] * 86400 * 30 dcp_df['Temp-mean'] = dcp_df['Temp-mean'] - 273.15 dcp_df['Model'] = 'NEX-DCP30' dcp_df = dcp_df.drop('Precip-rate', 1) dcp_df.head(5) ``` ### Past climate PRISM data are climate datasets for the conterminous United States. Grid cells are interpolated based on station data assimilated from many networks across the country. The datasets used here are monthly averages for precipitation and temperature. They provide a record of historical climate. #### Reduce collection and prepare DataFrame 1. Import the collection and filter by date. 2. Reduce the collection images by region and filter null computed values. 3. Convert the feature collection to a dictionary and transfer it client-side.<br> _Note: if the process times out, you'll need to export/import the `prism_stat_fc` feature collection as described in the **Optional export** section_. 4. Convert the dictionary to a DataFrame. 5. Preview the DataFrame. ``` prism_col = (ee.ImageCollection('OREGONSTATE/PRISM/AN81m') .select(['ppt', 'tmean']) .filter(ee.Filter.date('1979-01-01', '2019-12-31'))) reduce_prism = create_reduce_region_function( geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310') prism_stat_fc = (ee.FeatureCollection(prism_col.map(reduce_prism)) .filter(ee.Filter.notNull(prism_col.first().bandNames()))) prism_dict = fc_to_dict(prism_stat_fc).getInfo() prism_df = pd.DataFrame(prism_dict) display(prism_df) print(prism_df.dtypes) ``` 6. Add date attribute columns. 7. Add model name. 8. Rename columns to be consistent with the NEX-DCP30 DataFrame. 9. Preview the DataFrame. ``` prism_df = add_date_info(prism_df) prism_df['Model'] = 'PRISM' prism_df = prism_df.rename(columns={'ppt': 'Precip-mm', 'tmean': 'Temp-mean'}) prism_df.head(5) ``` ### Combine DataFrames At this point the PRISM and NEX-DCP30 DataFrames have the same columns, the same units, and are distinguished by unique entries in the 'Model' column. Use the `concat` function to concatenate these DataFrames into a single DataFrame for plotting together in the same chart. ``` climate_df = pd.concat([prism_df, dcp_df], sort=True) climate_df ``` ### Charts Chart the past and future precipitation and temperature together to get a sense for where climate has been and where it is projected to go under RCP 8.5. #### Precipitation ``` base = alt.Chart(climate_df).encode( x='Year:O', color='Model') line = base.mark_line().encode( y=alt.Y('median(Precip-mm):Q', title='Precipitation (mm/month)')) band = base.mark_errorband(extent='iqr').encode( y=alt.Y('Precip-mm:Q', title='Precipitation (mm/month)')) (band + line).properties(width=600, height=300) ``` #### Temperature ``` line = alt.Chart(climate_df).mark_line().encode( x='Year:O', y='median(Temp-mean):Q', color='Model') band = alt.Chart(climate_df).mark_errorband(extent='iqr').encode( x='Year:O', y=alt.Y('Temp-mean:Q', title='Temperature (°C)'), color='Model') (band + line).properties(width=600, height=300) ``` Future climate projections suggest that precipitation will decrease and temperature will increase for the selected point of interest. We can hypothesize, given the RCP 8.5 trajectory, that future conditions will more regularly resemble the 2012-2016 drought, which could lead to the same vegetation reduction response documented here and that more frequent drought events could lead to development of plant communities that are better adapted to low precipitation, high temperature conditions.
github_jupyter
# Upperair Obs with Contours By: Kevin Goebbert An example using the declarative syntax to plot upperair observations and overlay contours. ``` from datetime import datetime, timedelta import cartopy.crs as ccrs from metpy.io import add_station_lat_lon from metpy.plots.declarative import * from metpy.units import units import numpy as np from siphon.simplewebservice.iastate import IAStateUpperAir import xarray as xr ``` ## Get Upperair Observations Using the functionality from Siphon to get all of the upperair data from the Iowa State archive. This example uses the current time and determines the date for yesterday to make sure data is available. This data doesn't have lat/lon information, so we add it using the functionality from MetPy to add that information to a dataFrame. ``` # Set the date for data and plot yesterday = datetime.utcnow() - timedelta(days=1) date = datetime(yesterday.year, yesterday.month, yesterday.day, 12) # Request data using Siphon request for data from Iowa State Archive data = IAStateUpperAir.request_all_data(date) # Add lat/lon information to dataframe data = add_station_lat_lon(data, data.station.name) ``` ## Get GFS data For plotting contours we need some gridded output. Using the UCAR THREDDS data server to obtain the appropriate model initial conditions to contour on top of our upperair observations. ``` # Get GFS data for contouring ds = xr.open_dataset('https://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/' f'Global_onedeg_ana/GFS_Global_onedeg_ana_{date:%Y%m%d}_0000.grib2').metpy.parse_cf() ``` ## Make Plot Using the declarative syntax from MetPy, we plot observations and contours on the same map panel. There are also sqaures added to each observation location by using the axes that are generate by the declarative syntax to plot them using classic Matplotlib methods. ``` # Add point observations obs = PlotObs() obs.data = data obs.level = 500 * units.hPa obs.time = date obs.fields = ['temperature', 'dewpoint', 'height'] obs.locations = ['NW', 'SW', 'ENE'] obs.vector_field = ['u_wind', 'v_wind'] obs.vector_field_length = 8 # Add contours of geopotential height cntr = ContourPlot() cntr.data = ds.sel(lat=slice(80, 10), lon=slice(360-140, 360-40)) cntr.level = obs.level cntr.field = 'Geopotential_height_isobaric' cntr.clabels = True cntr.contours = list(range(0, 10000, 60)) # set map panel features panel = MapPanel() panel.projection = 'lcc' panel.area = [-125, -65, 22, 55] panel.layers = ['states', 'borders', 'coastline'] panel.plots = [obs, cntr] # Add map panel to figure pc = PanelContainer() pc.size = (20, 20) pc.panels = [panel] # Add a square marker at sounding site panel.ax.scatter(data.longitude, data.latitude, s=50, marker='s', color='black', transform=ccrs.PlateCarree()) pc.show() ```
github_jupyter
``` from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint from tensorflow.keras import models, layers, metrics from tensorflow.keras.utils import to_categorical from tensorflow.keras.optimizers import SGD from tensorflow.keras.datasets import mnist import matplotlib.pyplot as plt import numpy as np import warnings plt.style.use('classic') warnings.filterwarnings("ignore") (train_images, train_labels), (test_images, test_labels) = mnist.load_data() train_images = train_images.reshape((60000, 28, 28, 1)) train_images = train_images.astype('float32') / 255 test_images = test_images.reshape((10000, 28, 28, 1)) test_images = test_images.astype('float32') / 25 # One-Hot Encoding train_labels = to_categorical(train_labels) test_labels = to_categorical(test_labels) print("Shape of X [N, H, W, C]: ", test_images.shape) print("Shape of y: ", test_labels.shape) def LeNet_5(): model = models.Sequential() # 卷积与池化层 model.add(layers.Conv2D(6, (5, 5), activation='tanh', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(16, (5, 5), activation="tanh")) model.add(layers.MaxPooling2D((2, 2))) # 添加分类器 model.add(layers.Flatten()) model.add(layers.Dense(120, activation="tanh")) model.add(layers.Dense(84, activation="tanh")) model.add(layers.Dense(10, activation="softmax")) return model LeNet_5 = LeNet_5() LeNet_5.summary() # 显示模型结构 LeNet_5.compile(optimizer=SGD(lr=0.001, momentum=0.7, nesterov=True), # 优化器 loss='categorical_crossentropy', # 损失函数 metrics=["categorical_accuracy"]) # 评价指标 mcp_save = ModelCheckpoint('LeNet-5.h5', save_best_only=True, monitor='val_loss', mode='min') LeNet_history = LeNet_5.fit(train_images, train_labels, epochs=200, batch_size=128, shuffle=True, validation_split=0.35, callbacks=[mcp_save]) LeNet_train_loss, LeNet_train_acc = LeNet_5.evaluate(train_images, train_labels) LeNet_test_loss, LeNet_test_acc = LeNet_5.evaluate(test_images, test_labels) LeNet_pred = LeNet_5.predict(test_images) def CNN(): model = models.Sequential() # 添加卷积与池化层 model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) # 添加分类器 model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10, activation='softmax')) return model CNN = CNN() CNN.summary() # 显示模型结构 CNN.compile(optimizer=SGD(lr=0.001, momentum=0.7, nesterov=True), # 优化器 loss='categorical_crossentropy', # 损失函数 metrics=["categorical_accuracy"]) # 评价指标 mcp_save = ModelCheckpoint('CNN.h5', save_best_only=True, monitor='val_loss', mode='min') CNN_history = CNN.fit(train_images, train_labels, epochs=200, batch_size=128, shuffle=True, validation_split=0.35, callbacks=[mcp_save]) CNN_train_loss, CNN_train_acc = CNN.evaluate(train_images, train_labels) CNN_test_loss, CNN_test_acc = CNN.evaluate(test_images, test_labels) CNN_pred = CNN.predict(test_images) def plot_metric(model="LeNet-5"): if model=="LeNet-5": history = LeNet_history else: history = CNN_history train_acc, train_mse = history.history['categorical_accuracy'], history.history['loss'] val_acc, val_mse = history.history['val_categorical_accuracy'], history.history['val_loss'] epochs = range(1, len(train_acc) + 1) fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(15, 5)) # 准确率图 axes[0].plot(epochs, train_acc, label='train_acc') axes[0].plot(epochs, val_acc, label='val_acc') axes[0].set_title('Training and validation accuracy of ' + model) axes[0].set_xlabel("Epochs") axes[0].set_ylabel("Accuracy") axes[0].legend(loc='lower right') axes[0].grid(axis="y") axes[0].grid(axis="x") # 损失函数图 axes[1].plot(epochs, train_mse, label='train_loss') axes[1].plot(epochs, val_mse, label='val_loss') axes[1].set_title('Training and validation loss of ' + model) axes[1].set_xlabel("Epochs") axes[1].set_ylabel("Loss") axes[1].legend(loc='upper right') axes[1].grid(axis="y") axes[1].grid(axis="x") plt.show() plot_metric('LeNet-5') plot_metric('CNN') print('============================================================\n============================================================') print(f"LeNet-5 train_loss: {LeNet_train_loss:>8f}, LeNet-5 train_accuracy: {LeNet_train_acc:>0.3f}% \n") print(f"LeNet-5 test_loss: {LeNet_test_loss:>8f}, LeNet-5 test_accuracy: {LeNet_test_acc:>0.3f}% \n") print('============================================================\n============================================================') print(f"CNN train_loss: {CNN_train_loss:>8f}, CNN train_accuracy: {CNN_train_acc:>0.3f}% \n") print(f"CNN test_loss: {CNN_test_loss:>8f}, CNN test_accuracy: {CNN_test_acc:>0.3f}% \n") LeNet_pred_, CNN_pred_ = np.rint(LeNet_pred), np.rint(CNN_pred) print("=====================LeNet-5 Classification Report=====================") print(classification_report(test_labels, LeNet_pred_)) print("=======================CNN Classification Report=======================") print(classification_report(test_labels, CNN_pred_)) labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] # 生成混淆矩阵 LeNet_cm = confusion_matrix(test_labels.argmax(1), LeNet_pred_.argmax(1)) CNN_cm = confusion_matrix(test_labels.argmax(1), CNN_pred_.argmax(1)) # 绘制混淆矩阵 fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(15, 5)) axes[0].set_title('Confusion matrix of LeNet-5') axes[1].set_title('Confusion matrix of CNN') ConfusionMatrixDisplay(LeNet_cm, display_labels=labels).plot(cmap=plt.cm.Blues, ax=axes[0]) ConfusionMatrixDisplay(CNN_cm, display_labels=labels).plot(cmap=plt.cm.Blues, ax=axes[1]) ```
github_jupyter
``` import pandas as pd, numpy as np from scipy import stats import random stations=pd.read_csv('data/stations.csv').set_index('ID') ``` Setup plot params ``` import matplotlib.pyplot as plt import seaborn as sns from matplotlib.collections import PolyCollection %matplotlib inline import matplotlib as mpl import matplotlib.font_manager as font_manager path = 'KulimPark-Regular.ttf' path2 = 'Symbola.ttf' prop = font_manager.FontProperties(fname=path) prop2 = font_manager.FontProperties(fname=path2) color_ax='#E7CFBC' color_bg='#FFF4EC' color_obs_right0='#F2B880' color_obs_left0=color_ax color_pred_right0='#C98686' color_pred_left0='#966B9D' color_pred_talalt0='#59c687' color_pred_nem_talalt0='#c95498' font_size=12 s=40 obs_talalt_glyph0='★' obs_nem_talalt_glyph0='☆' pred_talalt_glyph0='✔️' pred_nem_talalt_glyph0='✖️' title_icon_right={'Temp':'☼','Wind':'🌀','Hail':'⭕️','Snow':'☃️','Snow Depth':'⛄️','Rain':'☔️','Visib':'☀️'} title_icon_left={'Temp':'✨️','Wind':'☘','Hail':'⚪️','Snow':'⚪️','Snow Depth':'⚪️','Rain':'🌂','Visib':'⛈️'} title_icon={'Temp':'♨️','Rain':'☂️','Hail':'✴️','Snow':'❄️','Snow Depth':'⛷️','Wind':'⛵️','Cloud':'☁️','Visib':'⛅️'} def get_data(data,th): a1=pd.DataFrame(data[data<=th]) a1['g']='left' a2=pd.DataFrame(data[data>th]) a2['g']='right' a3=pd.concat([a1,a2]) a3['x']='x' return a1,a2,a3 def violin_plot(data,th,ax,color_left,color_right): a=0.3 a1,a2,a3=get_data(data,th) a1_augment=True a2_augment=True if len(a1)==0: a1=a3.loc[[a3.index[0]]] a1['g']='left' a1[a1.columns[0]]=5 if len(a2)==0: a2=a3.loc[[a3.index[0]]] a2['g']='right' a2[a2.columns[0]]=5 if len(a1)>1: a1_augment=False if not a1_augment: if a1.nunique()[a1.columns[0]]==1: a1_augment=True if a1_augment: a11=a1.copy().loc[[a1.index[0]]] a11[a11.columns[0]]+=random.random()*0.1*th a11['x']='x' a12=a1.copy().loc[[a1.index[0]]] a12[a12.columns[0]]-=random.random()*0.1*th a12['x']='x' a3=pd.concat([a3,a11,a12]) if len(a2)>1: a2_augment=False if not a2_augment: if a2.nunique()[a2.columns[0]]==1: a2_augment=True if a2_augment: a21=a2.copy().loc[[a2.index[0]]] a21[a21.columns[0]]+=random.random()*0.1*th a21['x']='x' a22=a2.copy().loc[[a2.index[0]]] a22[a22.columns[0]]-=random.random()*0.1*th a22['x']='x' a3=pd.concat([a3,a21,a22]) a3=a3.sort_values('g') ax.axvline(0,color=color_ax) if a3.nunique()['g']>1: sns.violinplot(y=a1.columns[0], x='x',hue='g', data=a3, split=True, ax=ax, inner=None,linewidth=1, scale="count", saturation=1) ax.get_children()[0].set_color(mpl.colors.colorConverter.to_rgba(color_left, alpha=a)) ax.get_children()[0].set_edgecolor(color_left) ax.get_children()[1].set_color(mpl.colors.colorConverter.to_rgba(color_right, alpha=a)) ax.get_children()[1].set_edgecolor(color_right) ax.legend().remove() else: if len(a1)>0: w=a1 c=color_left else: w=a2 c=color_right sns.violinplot(y=w.columns[0], data=w, ax=ax, inner=None,linewidth=1, scale="count", saturation=1) ax.set_xlim([-1,0]) ax.get_children()[0].set_color(mpl.colors.colorConverter.to_rgba(c, alpha=a)) ax.get_children()[0].set_edgecolor(c) def setup_axes(): fig,axes=plt.subplots(1,3,figsize=(8,5),gridspec_kw={'width_ratios': [1, 3, 1]}) axi_top= axes[2].inset_axes([0.1, 0.65, 1, 0.3]) axi_top.axis('off') axi_bottom= axes[2].inset_axes([0.1, 0, 1, 0.5]) axi_bottom.axis('off') axes[0].axis('off') axes[1].axis('off') axes[2].axis('off') axes[0]=axes[0].inset_axes([0, 0.15, 1, 0.85]) axes[1]=axes[1].inset_axes([0, 0.15, 1, 0.85]) axes[0].axis('off') axes[1].axis('off') return fig, axes, axi_top, axi_bottom def stem_plot(data,ax,color,s=s): data=pd.DataFrame(data) x=data.index y=data[data.columns[0]].values for i,e in enumerate(y): ax.plot([0,e],[x[i],x[i]],color=color) ax.scatter(y,x,s,color=color,zorder=10) def stem2_plot(data,th,ax,color_left,color_right,s=s,axv_color=None): if axv_color==None:axv_color=color_right a1,a2,a3=get_data(data,th) stem_plot(a1,ax,color_left,s) stem_plot(a2,ax,color_right,s) ax.axvline(0,color=color_ax) #if th!=0: if True: ax.axvline(th,color=axv_color,ls='--',zorder=5) def icons_plot(axes,kondicio,mennyiseg,observation_th,prediction_th): ylim=axes[0].get_ylim() xlim=axes[1].get_xlim() y_max_coord=ylim[0]+(ylim[1]-ylim[0])*1.05 y_max_coord2=ylim[0]+(ylim[1]-ylim[0])*1.05 #1.04 x_icon_coord_shift=(xlim[1]-xlim[0])*0.1 axes[0].text(observation_th, y_max_coord, title_icon[kondicio], horizontalalignment='center', color=color_obs_right0, fontproperties=prop2, fontsize=font_size*1.5) axes[1].text(prediction_th, y_max_coord, title_icon[mennyiseg], horizontalalignment='center', color=color_ax, fontproperties=prop2, fontsize=font_size*1.5) axes[1].text(prediction_th+x_icon_coord_shift, y_max_coord2, title_icon_right[mennyiseg], horizontalalignment='center', color=color_pred_right, fontproperties=prop2, fontsize=font_size*1.5) axes[1].text(prediction_th-x_icon_coord_shift, y_max_coord2, title_icon_left[mennyiseg], horizontalalignment='center', color=color_pred_left, fontproperties=prop2, fontsize=font_size*1.5) def talalat_plot_line(axes,n_prediction_ts_good,n_prediction_ts_bad, n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt, observation_th,prediction_th): ylim=axes[0].get_ylim() xlim=axes[0].get_xlim() y_max_coord=ylim[0]+(ylim[1]-ylim[0])*(-0.07) x_icon_coord_shift=(xlim[1]-xlim[0])*0.1 x_icon_coord_shift2=(xlim[1]-xlim[0])*0.27 axes[0].text(observation_th+x_icon_coord_shift, y_max_coord, obs_talalt_glyph, horizontalalignment='center', color=color_obs_right, fontproperties=prop2) axes[0].text(observation_th-x_icon_coord_shift, y_max_coord, obs_nem_talalt_glyph, horizontalalignment='center', color=color_obs_left, fontproperties=prop2) axes[0].text(observation_th+x_icon_coord_shift2, y_max_coord, n_prediction_ts_good, horizontalalignment='center', color=color_obs_right, fontproperties=prop) axes[0].text(observation_th-x_icon_coord_shift2, y_max_coord, n_prediction_ts_bad, horizontalalignment='center', color=color_obs_left, fontproperties=prop) axes[0].text(observation_th, y_max_coord, '|', horizontalalignment='center', color=color_obs_right0, fontproperties=prop,fontsize=19) xlim=axes[1].get_xlim() x_icon_coord_shift=(xlim[1]-xlim[0])*0.04 x_icon_coord_shift2=(xlim[1]-xlim[0])*0.1 axes[1].text(prediction_th+x_icon_coord_shift, y_max_coord, pred_talalt_glyph, horizontalalignment='center', color=color_pred_talalt, fontproperties=prop2) axes[1].text(prediction_th-x_icon_coord_shift, y_max_coord, pred_nem_talalt_glyph, horizontalalignment='center', color=color_pred_nem_talalt, fontproperties=prop2) axes[1].text(prediction_th+x_icon_coord_shift2, y_max_coord, n_prediction_ts_good_talalt, horizontalalignment='center', color=color_pred_talalt, fontproperties=prop) axes[1].text(prediction_th-x_icon_coord_shift2, y_max_coord, n_prediction_ts_good_nem_talalt, horizontalalignment='center', color=color_pred_nem_talalt, fontproperties=prop) axes[1].text(prediction_th, y_max_coord, '|', horizontalalignment='center', color=color_pred_right, fontproperties=prop,fontsize=19) y_max_coord=ylim[0]+(ylim[1]-ylim[0])*(-0.14) axes[0].text(observation_th, y_max_coord, 'feltétel', horizontalalignment='center', color=color_obs_right0, fontproperties=prop) axes[1].text(prediction_th, y_max_coord, 'jóslat', horizontalalignment='center', color=color_pred_right, fontproperties=prop) y_max_coord=ylim[0]+(ylim[1]-ylim[0])*(-0.13) x_coord_shift=prediction_th+(prediction_th-xlim[0])*(-0.4) axes[1].annotate('', xy=(x_coord_shift, y_max_coord),xycoords='data',annotation_clip=False, xytext=(xlim[0], y_max_coord),arrowprops=dict(arrowstyle= '->',color=color_obs_right0)) def talalat_plot_violin(axes,n_prediction_ts_good,n_prediction_ts_bad,n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt): y_icon_obs=0.65 y_icon_pred=0.5 if color_obs_right==color_obs_right0: x=0.72 else: x=0.47 axes[2].text(0.72, y_icon_obs, obs_talalt_glyph, horizontalalignment='center', color=color_obs_right, fontproperties=prop2) axes[2].text(0.9, y_icon_obs,n_prediction_ts_good, horizontalalignment='center', color=color_obs_right, fontproperties=prop) axes[2].text(0.47, y_icon_obs, obs_nem_talalt_glyph, horizontalalignment='center', color=color_obs_left, fontproperties=prop2) axes[2].text(0.29, y_icon_obs, n_prediction_ts_bad, horizontalalignment='center', color=color_obs_left, fontproperties=prop) axes[2].text(0.72, y_icon_pred, pred_talalt_glyph, horizontalalignment='center', color=color_pred_talalt, fontproperties=prop2) axes[2].text(0.9, y_icon_pred, n_prediction_ts_good_talalt, horizontalalignment='center', color=color_pred_talalt, fontproperties=prop) axes[2].text(0.47, y_icon_pred, pred_nem_talalt_glyph, horizontalalignment='center', color=color_pred_nem_talalt, fontproperties=prop2) axes[2].text(0.29, y_icon_pred, n_prediction_ts_good_nem_talalt, horizontalalignment='center', color=color_pred_nem_talalt, fontproperties=prop) axes[2].annotate('', xy=(0.59, y_icon_pred*1.04),xycoords='data', xytext=(x, y_icon_obs*0.98),arrowprops=dict(arrowstyle= '->',color=color_obs_right0)) def talalat_plot(axes,ns,observation_th,prediction_th): n_prediction_ts_good,n_prediction_ts_bad,n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt=ns talalat_plot_line(axes,n_prediction_ts_good,n_prediction_ts_bad, n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt, observation_th,prediction_th) talalat_plot_violin(axes,n_prediction_ts_good,n_prediction_ts_bad, n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt) def year_plot(data,ax,k): y=data.values x=data.index ex=max(y)-min(y) text_off=abs(ex*k) text_align='left' if y[0]<0: text_off=-text_off text_align='right' ax.text(y[0]+text_off, x[0], str(int(x[0])), horizontalalignment=text_align, verticalalignment='center', color=color_ax, fontproperties=prop) text_off=abs(text_off) text_align='left' if y[-1]<0: text_off=-text_off text_align='right' ax.text(y[-1]+text_off, x[-1], str(int(x[-1])), horizontalalignment=text_align, verticalalignment='center', color=color_ax, fontproperties=prop) def spine_plot(datum,title,mondas,jelentes,kondicio,mennyiseg, observation_ts,observation_th,prediction_ts,prediction_th,c): #data prediction_ts_good=prediction_ts.loc[observation_ts[observation_ts>observation_th].index] prediction_ts_bad=prediction_ts.loc[observation_ts[observation_ts<=observation_th].index] n_prediction_ts_good=len(prediction_ts_good) n_prediction_ts_bad=len(prediction_ts_bad) if color_obs_right0!=color_obs_right: prediction_ts_good,prediction_ts_bad=prediction_ts_bad,prediction_ts_good prediction_ts_good_nem_talalt,prediction_ts_good_talalt,\ prediction_ts_good_joined=get_data(prediction_ts_good,prediction_th) n_prediction_ts_good_talalt=len(prediction_ts_good_talalt) n_prediction_ts_good_nem_talalt=len(prediction_ts_good_nem_talalt) ns=[n_prediction_ts_good,n_prediction_ts_bad,n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt] #plots #fig, axes, axi_top, axi_bottom=setup_axes() #stem2_plot(observation_ts,observation_th,axes[0],color_obs_left,color_obs_right,s/2,color_obs_right0) #stem2_plot(prediction_ts_good,prediction_th,axes[1],color_pred_left,color_pred_right) #stem_plot(prediction_ts_bad,axes[1],color_ax) #violin_plot(observation_ts,observation_th,axi_top,color_obs_left,color_obs_right) #violin_plot(prediction_ts_good,prediction_th,axi_bottom,color_pred_left,color_pred_right) #icons #icons_plot(axes,kondicio,mennyiseg,observation_th,prediction_th) #talalat #talalat_plot(axes,ns,observation_th,prediction_th) #years obs_year_index=observation_data[[obs_key,'pyear']].dropna().groupby('pyear').mean().index pred_year_index=prediction_data[[pred_key,'pyear']].dropna().groupby('pyear').mean().index pred_year_index_filt=prediction_ts.loc[pred_year_index].dropna().index obs_year_index_filt=observation_ts.loc[obs_year_index].dropna().index pred_year_index2=max(min(pred_year_index_filt),min(obs_year_index_filt)) pred_year_index=range(pred_year_index2,max(pred_year_index_filt)+1) #year_plot(observation_ts.loc[obs_year_index].dropna(),axes[0],0.09) #year_plot(prediction_ts.loc[pred_year_index].dropna(),axes[1],0.03) #titles len_ratio=0.15*(-1+(len(jelentes.split(',')[0])/len(jelentes.split(',')[1]))) #fig.text(0.5+len_ratio,0.04,jelentes.split(',')[0]+',',color=color_obs_right0, # fontproperties=prop,fontsize=font_size*0.7,horizontalalignment='right') if color_pred_talalt==color_pred_talalt0: color_pred_side=color_pred_right else: color_pred_side=color_pred_left #fig.text(0.5+len_ratio,0.04,jelentes.split(',')[1],color=color_pred_side, # fontproperties=prop,fontsize=font_size*0.7,horizontalalignment='left') if n_prediction_ts_good_nem_talalt>=n_prediction_ts_good_talalt: color_title=color_pred_nem_talalt else: color_title=color_pred_talalt verdict=int(100*n_prediction_ts_good_talalt/(n_prediction_ts_good_talalt+n_prediction_ts_good_nem_talalt)) if color_pred_talalt!=color_pred_talalt0: verdict=100-verdict return_verdict=int(verdict) verdict=str(verdict)+'%' #plt.suptitle(title,y=0.11,color=color_title,fontproperties=prop,fontsize=font_size) #fig.text(0.97,0.04,verdict, fontproperties=prop, # horizontalalignment='right', color=color_title, fontsize=font_size*2, ) #fig.text(0.03,0.04, datum, fontproperties=prop, # horizontalalignment='left', color=color_obs_right0, fontsize=font_size*2, ) #plt.savefig(c+'/map/'+str(mondas)+'.png',dpi=300, facecolor=color_bg) #plt.show() return return_verdict def filter_data(dz,observation_range,prediction_range): dgs=[] dhs=[] for year in range(int(dz.min()['year']),int(dz.max()['year'])): k=0 from_date=pd.to_datetime(str(year)+'-'+str(observation_range[k].month)+'-'+str(observation_range[k].day)) from_pred=pd.to_datetime(str(year)+'-'+str(prediction_range[k].month)+'-'+str(prediction_range[k].day)) k=1 to_date=pd.to_datetime(str(year)+'-'+str(observation_range[k].month)+'-'+str(observation_range[k].day)) to_pred=pd.to_datetime(str(year)+'-'+str(prediction_range[k].month)+'-'+str(prediction_range[k].day)) if to_pred<to_date: to_pred+=pd.to_timedelta('1Y') dg=dz.loc[from_date:] dg=dg[:to_date] dg['pyear']=year dgs.append(dg) dh=dz.loc[from_pred:] dh=dh[:to_pred] dh['pyear']=year dhs.append(dh) return pd.concat(dgs),pd.concat(dhs) def set_direction(kondicio, mennyiseg): if kondicio: color_obs_right=color_obs_right0 color_obs_left=color_obs_left0 obs_talalt_glyph='★' obs_nem_talalt_glyph='☆' else: color_obs_right=color_obs_left0 color_obs_left=color_obs_right0 obs_talalt_glyph='☆' obs_nem_talalt_glyph='★' if mennyiseg: color_pred_talalt=color_pred_talalt0 color_pred_nem_talalt=color_pred_nem_talalt0 pred_talalt_glyph='✔️' pred_nem_talalt_glyph='✖️' else: color_pred_talalt=color_pred_nem_talalt0 color_pred_nem_talalt=color_pred_talalt0 pred_talalt_glyph='✖️' pred_nem_talalt_glyph='✔️' return color_obs_right,color_obs_left,obs_talalt_glyph,obs_nem_talalt_glyph,\ color_pred_talalt,color_pred_nem_talalt,pred_talalt_glyph,pred_nem_talalt_glyph def get_sign(sign,key): positive=True if (('-' in sign) or ('+' in sign)): if sign=='-': positive=False elif sign=='+': positive=True elif (('<' in sign) or ('>' in sign)): if '<' in sign: positive=False elif '>' in sign: positive=True return positive universal_normalize=['XTEMP','XVSB','XSPD'] def get_ts_data(data,key,sign): ts=data.groupby('year').mean()[key] if (('-' in sign) or ('+' in sign)): th=ts.mean() else: th=float(sign[1:]) if key in universal_normalize: th-=ts.mean() ts-=ts.mean() return ts,th def get_comp_data(observation_data,obs_key,obs_sign,prediction_data,pred_key,pred_sign): ertek_sign=True irany_sign=True observation_ts=observation_data.groupby('year').mean()[obs_key] prediction_ts=prediction_data.groupby('year').mean()[pred_key] prediction_th=observation_ts.mean() observation_ts-=observation_ts.mean() observation_th=observation_ts.min()*1.01 prediction_th-=prediction_ts.mean() prediction_ts-=prediction_ts.mean() if obs_sign=='A': if pred_sign=='A': observation_th=0 prediction_th=0 else: irany_sign=False return observation_ts,observation_th,prediction_ts,prediction_th,ertek_sign,irany_sign mennyiseg_key={'Temp':'XTEMP','Snow Depth':'XSD','Wind':'XSPD','Rain':'YPCP','Visib':'XVSB', 'Snow':'YSNW','Hail':'YHAL'} stations_to_include={'ro':[150040,151700,151450,152600,152470,150800,152300,150100,151200,152000], 'hu':[128820,128120,127720,128600,128390,128920,128430,128250,128220,128050, 129150,129420,129600,129700,129820,129920,129350,129100]} stations_to_include['huro']=stations_to_include['hu']+stations_to_include['ro'] def get_country(c,h='ds',plot=False): if c=='huro': hu=pd.read_csv('data/'+'hu'+'_'+h+'.csv') #daily data ro=pd.read_csv('data/'+'ro'+'_'+h+'.csv') #daily data df=pd.concat([hu,ro]) else: df=pd.read_csv('data/'+c+'_'+h+'.csv') #daily data # df=pd.read_csv('data/'+c+'_hs.csv') #high_res data df=df[df['ID'].isin(stations_to_include[c])] df['time']=pd.to_datetime(df['time']) df['year']=df['time'].dt.year df['month']=df['time'].dt.month df['day']=df['time'].dt.day df['hour']=df['time'].dt.hour df=df.set_index('time') df=df.sort_index() if plot: df.groupby('year').nunique()['ID'].plot() nepi=pd.read_excel(c+'/idojaras_'+c+'.xlsx') return df,nepi c='huro' df,nepi=get_country(c) color_pred_left=color_pred_left0 color_pred_right=color_pred_right0 mondasok=nepi['ID'].values mondasok=[1,6] shares=[] for st in df['ID'].unique(): dz=df[df['ID']==st].groupby(['time']).mean() print(st) for mondas in mondasok: try: nep=nepi.loc[mondas] if str(nep['Mennyiség'])!='nan': obs_key=mennyiseg_key[nep['Kondíció']] pred_key=mennyiseg_key[nep['Mennyiség']] observation_range=[nep['Dátum:mettől']+pd.to_timedelta('-1D'),nep['Dátum:meddig']+pd.to_timedelta('+2D')] prediction_range=[nep['Periódus:mettől'],nep['Periódus:meddig']+pd.to_timedelta('+1D')] observation_data,prediction_data=filter_data(dz,observation_range,prediction_range) #comparison if str(nep['Érték']) in ['A','B']: #print('comp',mondas) observation_ts,observation_th,prediction_ts,prediction_th,ertek_sign,irany_sign=\ get_comp_data(observation_data,obs_key,nep['Érték'],\ prediction_data,pred_key,nep['Irány']) #time series else: #print('ts',mondas) ertek_sign=get_sign(nep['Érték'],obs_key) irany_sign=get_sign(nep['Irány'],pred_key) observation_ts,observation_th=get_ts_data(observation_data,obs_key,nep['Érték']) prediction_ts,prediction_th=get_ts_data(prediction_data,pred_key,nep['Irány']) color_obs_right,color_obs_left,obs_talalt_glyph,obs_nem_talalt_glyph,\ color_pred_talalt,color_pred_nem_talalt,pred_talalt_glyph,pred_nem_talalt_glyph=\ set_direction(ertek_sign, irany_sign) #datum=str(nep['Dátums'])[:3]+'. '+str(nep['Dátum:mettől'].day) datum=nep['DS'] share=spine_plot(datum,nep['Mondás'].strip(),mondas,nep['Jelentés'].strip(),nep['Kondíció'],nep['Mennyiség'], observation_ts,observation_th,prediction_ts,prediction_th,c) shares.append({'share':share,'station':st,'mondas':mondas}) except: print ('ERROR '+st) dw=pd.DataFrame(shares).set_index('station').join(stations).set_index('mondas').join(nepi.set_index('ID')) import json namer=pd.DataFrame(json.loads(open('data/namer.json','r').read()),index=['name']).T dw=dw.set_index('LOC').join(namer) # !conda install geopandas # !pip install descartes import geopandas def add_basemap(ax, zoom, url='https://maps.wikimedia.org/osm-intl/{z}/{x}/{y}.png?lang=hu'): xmin, xmax, ymin, ymax = ax.axis() basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url) ax.imshow(basemap, extent=extent, interpolation='bilinear') # restore original x/y limits ax.axis((xmin, xmax, ymin, ymax)) df = geopandas.read_file(geopandas.datasets.get_path('nybb')) ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k') df = df.to_crs(epsg=3857) # !conda install contextily import contextily as ctx ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k') add_basemap(ax, zoom=11) lat=dw['LAT'].values lon=dw['LON'].values population = dw['share'].values dist=dw['name'].values # scatter city data, with c reflecting population ax.scatter(lon,lat, latlon=True, c=population,s=700, cmap='YlGnBu_r', alpha=0.5) #create colorbar plt.colorbar(label=r'Population') plt.clim(300000, 4000000) ```
github_jupyter
# Guessing Game Challenge - Solution Let's use `while` loops to create a guessing game. The Challenge: Write a program that picks a random integer from 1 to 100, and has players guess the number. The rules are: 1. If a player's guess is less than 1 or greater than 100, say "OUT OF BOUNDS" 2. On a player's first turn, if their guess is * within 10 of the number, return "WARM!" * further than 10 away from the number, return "COLD!" 3. On all subsequent turns, if a guess is * closer to the number than the previous guess return "WARMER!" * farther from the number than the previous guess, return "COLDER!" 4. When the player's guess equals the number, tell them they've guessed correctly *and* how many guesses it took! #### First, pick a random integer from 1 to 100 using the random module and assign it to a variable Note: `random.randint(a,b)` returns a random integer in range `[a, b]`, including both end points. ``` import random num = random.randint(1,100) num ``` #### Next, print an introduction to the game and explain the rules ``` print("WELCOME TO GUESS ME!") print("I'm thinking of a number between 1 and 100") print("If your guess is more than 10 away from my number, I'll tell you you're COLD") print("If your guess is within 10 of my number, I'll tell you you're WARM") print("If your guess is farther than your most recent guess, I'll say you're getting COLDER") print("If your guess is closer than your most recent guess, I'll say you're getting WARMER") print("LET'S PLAY!") ``` #### Create a list to store guesses Hint: zero is a good placeholder value. It's useful because it evaluates to "False" ``` guesses = [0] ``` #### Write a `while` loop that asks for a valid guess. Test it a few times to make sure it works. ``` while True: guess = int(input("I'm thinking of a number between 1 and 100.\n What is your guess? ")) if guess < 1 or guess > 100: print('OUT OF BOUNDS! Please try again: ') continue break ``` #### Write a `while` loop that compares the player's guess to our number. If the player guesses correctly, break from the loop. Otherwise, tell the player if they're warmer or colder, and continue asking for guesses. Some hints: * it may help to sketch out all possible combinations on paper first! * you can use the `abs()` function to find the positive difference between two numbers * if you append all new guesses to the list, then the previous guess is given as `guesses[-2]` ``` while True: # we can copy the code from above to take an input guess = int(input("I'm thinking of a number between 1 and 100.\n What is your guess? ")) if guess < 1 or guess > 100: print('OUT OF BOUNDS! Please try again: ') continue # here we compare the player's guess to our number if guess == num: print(f'CONGRATULATIONS, YOU GUESSED IT IN ONLY {len(guesses)} GUESSES!!') break # if guess is incorrect, add guess to the list guesses.append(guess) # when testing the first guess, guesses[-2]==0, which evaluates to False # and brings us down to the second section if guesses[-2]: if abs(num-guess) < abs(num-guesses[-2]): print('WARMER!') else: print('COLDER!') else: if abs(num-guess) <= 10: print('WARM!') else: print('COLD!') ``` That's it! You've just programmed your first game! In the next section we'll learn how to turn some of these repetitive actions into *functions* that can be called whenever we need them. ### Good Job!
github_jupyter
# Fast Sign Adversary Generation Example This notebook demos finds adversary examples using MXNet Gluon and taking advantage of the gradient information [1] Goodfellow, Ian J., Jonathon Shlens, and Christian Szegedy. "Explaining and harnessing adversarial examples." arXiv preprint arXiv:1412.6572 (2014). https://arxiv.org/abs/1412.6572 ``` %matplotlib inline import mxnet as mx import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from mxnet import gluon ``` Build simple CNN network for solving the MNIST dataset digit recognition task ``` ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu() batch_size = 128 ``` ## Data Loading ``` transform = lambda x,y: (x.transpose((2,0,1)).astype('float32')/255., y) train_dataset = gluon.data.vision.MNIST(train=True).transform(transform) test_dataset = gluon.data.vision.MNIST(train=False).transform(transform) train_data = gluon.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=5) test_data = gluon.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) ``` ## Create the network ``` net = gluon.nn.HybridSequential() with net.name_scope(): net.add( gluon.nn.Conv2D(kernel_size=5, channels=20, activation='tanh'), gluon.nn.MaxPool2D(pool_size=2, strides=2), gluon.nn.Conv2D(kernel_size=5, channels=50, activation='tanh'), gluon.nn.MaxPool2D(pool_size=2, strides=2), gluon.nn.Flatten(), gluon.nn.Dense(500, activation='tanh'), gluon.nn.Dense(10) ) ``` ## Initialize training ``` net.initialize(mx.initializer.Uniform(), ctx=ctx) net.hybridize() loss = gluon.loss.SoftmaxCELoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1, 'momentum':0.95}) ``` ## Training loop ``` epoch = 3 for e in range(epoch): train_loss = 0. acc = mx.metric.Accuracy() for i, (data, label) in enumerate(train_data): data = data.as_in_context(ctx) label = label.as_in_context(ctx) with mx.autograd.record(): output = net(data) l = loss(output, label) l.backward() trainer.update(data.shape[0]) train_loss += l.mean().asscalar() acc.update(label, output) print("Train Accuracy: %.2f\t Train Loss: %.5f" % (acc.get()[1], train_loss/(i+1))) ``` ## Perturbation We first run a validation batch and measure the resulting accuracy. We then perturbate this batch by modifying the input in the opposite direction of the gradient. ``` # Get a batch from the testing set for data, label in test_data: data = data.as_in_context(ctx) label = label.as_in_context(ctx) break # Attach gradient to it to get the gradient of the loss with respect to the input data.attach_grad() with mx.autograd.record(): output = net(data) l = loss(output, label) l.backward() acc = mx.metric.Accuracy() acc.update(label, output) print("Validation batch accuracy {}".format(acc.get()[1])) ``` Now we perturb the input ``` data_perturbated = data + 0.15 * mx.nd.sign(data.grad) output = net(data_perturbated) acc = mx.metric.Accuracy() acc.update(label, output) print("Validation batch accuracy after perturbation {}".format(acc.get()[1])) ``` ## Visualization Let's visualize an example after pertubation. We can see that the prediction is often incorrect. ``` from random import randint idx = randint(0, batch_size-1) plt.imshow(data_perturbated[idx, :].asnumpy().reshape(28,28), cmap=cm.Greys_r) print("true label: %d" % label.asnumpy()[idx]) print("predicted: %d" % np.argmax(output.asnumpy(), axis=1)[idx]) ```
github_jupyter
``` import os import cobra from cobra.core import Reaction from cobra.flux_analysis.parsimonious import add_pfba import pandas as pd # currently working with local medusa installation -- anytime changes are made to medusa, we need to run installation # again within the virtualenv by running setup.py #import medusa from medusa.reconstruct.expand.expand import iterative_gapfill_from_binary_phenotypes import medusa from medusa.flux_analysis import flux_balance def load_universal_modelseed(): seed_rxn_table = pd.read_csv('../data/reactions_seed_20180809.tsv',sep='\t') seed_rxn_table['id'] = seed_rxn_table['id'] + '_c' universal = cobra.io.load_json_model('../data/universal_mundy.json') # remove any reactions from the universal that don't have "OK" status # in modelSEED (guards against mass and charge-imbalanced reactions) ok_ids = list(seed_rxn_table.loc[(seed_rxn_table['status'] == 'OK') | (seed_rxn_table['status'] == 'HB')]['id']) remove_rxns = [] for reaction in universal.reactions: if reaction.id not in ok_ids: remove_rxns.append(reaction) universal.remove_reactions(remove_rxns) # remove metabolites from the universal that are no longer present in any # reactions. mets_in_reactions = [] for reaction in universal.reactions: mets = [met.id for met in reaction.metabolites] mets_in_reactions.extend(mets) mets_in_reactions = set(mets_in_reactions) mets_missing_reactions = [] for metabolite in universal.metabolites: if metabolite.id not in mets_in_reactions: mets_missing_reactions.append(metabolite) universal.remove_metabolites(mets_missing_reactions) universal.repair() return universal master_universal = load_universal_modelseed() # Load the biolog composition to be used for gapfilling biolog_base_composition = pd.read_csv('../data/biolog_base_composition.csv',sep=',') biolog_base_dict = dict(zip(biolog_base_composition['ID'],\ [1000 for i in range(0,len(biolog_base_composition['ID']))])) # The biolog growth file has already been filtered by species that meet # the minimum carbon source requirement, so we can use the entire dataframe biolog_thresholded = pd.read_csv('../data/plata_thresholded.csv',sep='\t',index_col=0) # get the list of ensembles already generated. already_generated = os.listdir('../results/ensembles/') # remove the .json extension to just get the name for each species already_generated = [s.split('.')[0] for s in already_generated] # Exclude species for which there is no feasible solution # using this reaction bag (identified during previous iterations # of this analysis) exclude_species = ["Brachybacterium faecium","Gordonia bronchialis"] # Iterate over each species and generate and ensemble for each for species_file in os.listdir('../data/modelseed_models/'): # Load the species model. only continue if the species is in the filtered # biolog dataframe (i.e. it met our filtering criteria) species_name = species_file.split('.')[0] if (species_name in biolog_thresholded.index) and ( species_name not in already_generated) and ( species_name not in exclude_species): print("Building ensemble for " + species_name) model = cobra.io.load_json_model('../data/modelseed_models/' + species_file) # extract the biolog conditions for the model of interest mod_pheno = biolog_thresholded.loc[species_name] mod_pheno = list(mod_pheno[mod_pheno == True].index) # generate a fresh universal for each species universal = master_universal.copy() # check for biolog base components in the model. Add exchange reactions # if none exist and add the metabolite to the model if it does not # already exist add_mets = [] add_exchanges = [] for met in list(biolog_base_dict.keys()): try: model.metabolites.get_by_id(met) except: print('no '+met) add_met = universal.metabolites.get_by_id(met).copy() add_mets.append(add_met) model.add_metabolites(add_mets) for met in list(biolog_base_dict.keys()): # Search for exchange reactions try: model.reactions.get_by_id('EX_'+met) except: add_met = model.metabolites.get_by_id(met) ex_rxn = Reaction('EX_' + met) ex_rxn.name = "Exchange reaction for " + met ex_rxn.lower_bound = -1000 ex_rxn.upper_bound = 1000 ex_rxn.add_metabolites({add_met:-1}) add_exchanges.append(ex_rxn) model.add_reactions(add_exchanges) # Find metabolites from the biolog data that are missing in the model # and add them from the universal missing_mets = [] missing_exchanges = [] media_dicts = {} for met_id in mod_pheno: try: model.metabolites.get_by_id(met_id) except: print(met_id + " was not in model, adding met and exchange reaction") met = universal.metabolites.get_by_id(met_id).copy() missing_mets.append(met) ex_rxn = Reaction('EX_' + met_id) ex_rxn.name = "Exchange reaction for " + met_id ex_rxn.lower_bound = -1000 ex_rxn.upper_bound = 1000 ex_rxn.add_metabolites({met:-1}) missing_exchanges.append(ex_rxn) media_dicts[met_id] = biolog_base_dict.copy() media_dicts[met_id] = {'EX_'+k:v for k,v in media_dicts[met_id].items()} media_dicts[met_id]['EX_'+met_id] = 1000 model.add_metabolites(missing_mets) model.add_reactions(missing_exchanges) # identify transporters for each biolog component in the universal model # and pick one that will enable transport in the gapfilling problem. transporters_in_universal = [rxn for rxn in universal.reactions if len(rxn.compartments)>1] for met in media_dicts.keys(): metabolite = model.metabolites.get_by_id(met) base_met_id = met.split('_')[0] rxns_with_metabolite = metabolite.reactions transport = False for rxn in rxns_with_metabolite: metabolites = [met_in_rxn.id for met_in_rxn in rxn.metabolites] if (base_met_id+'_e' in metabolites and base_met_id+'_c' in metabolites): transport = True pick_transporter = {} if not transport: print("missing transporter for " + metabolite.name) for rxn in transporters_in_universal: metabolites = [met_in_rxn.id for met_in_rxn in rxn.metabolites] if (base_met_id+'_e' in metabolites and base_met_id+'_c' in metabolites): pick_transporter[met] = rxn.id # Add the transporters to the model transporters_to_add = list(pick_transporter.values()) transporter_list = [] for rxn in transporters_to_add: transporter_list.append(universal.reactions.get_by_id(rxn).copy()) model.add_reactions(transporter_list) # remove the added transporters from the universal model universal.remove_reactions([universal.reactions.get_by_id(rxn) for rxn in transporters_to_add]) # generate the ensemble for this species num_cycles = 100 lower_bound = 0.05 ensemble = iterative_gapfill_from_binary_phenotypes(\ model,\ universal,\ media_dicts,\ num_cycles,\ lower_bound=lower_bound,\ inclusion_threshold=1E-11,\ exchange_reactions=False,\ demand_reactions=False,\ exchange_prefix='EX') # save the ensemble by pickling it ensemble.to_pickle('../results/ensembles/'+species_name+'.pickle') ex_rxns = [rxn for rxn in ensemble.base_model.reactions \ if rxn.id.startswith('EX_')] for source in media_dicts.keys(): # close all exchange reactions for rxn in ex_rxns: rxn.lower_bound = 0 #ensemble.base_model.medium = media_dicts[source] for ex_rxn in media_dicts[source].keys(): ensemble.base_model.reactions.get_by_id(ex_rxn).lower_bound = \ -1.0*media_dicts[source][ex_rxn] ensemble.base_model.reactions.get_by_id(ex_rxn).upper_bound = \ 1.0*media_dicts[source][ex_rxn] for member in ensemble.members: ensemble.set_state(member) # member should produce the minimum amount of required biomass # flux or more if ensemble.base_model.optimize().f > 0.001: print(member.id,source) else: print("no growth for ",member.id,source) ```
github_jupyter
# This Jupyter Notebook is written to convert Raw Data and Scores files from NIH Toolbox IPAD exports into NDA data structures using linking information from a 'Crosswalk' and extra NDA-required subject identifier data (GUID, etc) from a csv. Some Notes: using a specialty Python 3 virtual environment (named PycharmToolbox) as kernel for this notebook. Installed by running the following commands in my terminal and then switching the kernel with the dropdown menu above: > source /home/petra/.virtualenvs/PycharmToolbox/bin/activate > pip install ipykernel > ipython kernel install --user --name=PycharmToolbox > jupyter-notebook requirements file generated from within the activated virtual environment by: > pip freeze > requirements.txt ``` import os, datetime import pandas as pd import numpy as np import subprocess snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y') ``` Specify the input and output data and paths for NIH toolbox. To run the cells of this notebook, you will need four files. Two are in the .csv format of the IPAD Toolbox applcation export. E.g. a raw Data file containing scores for item level responses, and a Scores file, containing the summary statistics for the collection of item level data. We don't need the registration file, but it might be handy for filtering out batteries that were inappropriately administered. These two files are linked by PIN and Inst variables, and must be cleaned a priori to remove subjects that are in one but not the other file. I.e. the list of unique PINs (ex. HCP0211999_V1) in one file should be exactly the same as the list of unique PINs in the other. For HCP data, we concatenate the exports of all subjects' Score data in to a single file, and the exports of all subjects Raw data into a second file. Because all other sources of HCP data use 'subject' and 'visit' rather than a PIN which is a concatenation of both, we create these variables (subject and visit) from PIN prior to running this program as well. The third necessary file is a csv containing the fields that NDA requires in all of their structures e.g. subjectkey (GUID or pseudo-GUID), src_subject_id (e.g. HCP0211999), interview_age (in months), and gender (misnomer for sex assigned at birth). In HCP data, we link the two sources of information via 'subject' and 'visit.' Lastly, read in the crosswalk file - which will map your vars to NDA after transpose is complete. I have placed the crosswalk from HCP data. Any instruments in this crosswalk that are the same as yours (look at 'Inst' column) will work for you provided you haven't renamed your columns. You will have to add any instruments not present, after obtaining variable maps and templates from the NDA for your particular set of NIH Toolbox Data. Note that subject and visit are variables we created locally to merge with the data coming from a different local source (REDCap). They are not variables that are output from the NIH Toolbox app on the Ipads, but are necessary for the merge with the NDA required fields stored elsewhere. ``` ######################### #POINT TO YOUR DATA - a scores file and a raw file. These will be the output from the IPAD, concatenated. # E.g. take all the scores and concatenate vertically in to a single file, and take all the raw item data and # concatenate into a single file. Files are linked by PIN and Instrument. # In general there will be multiple row in the Raw Data file for every Instrument/PIN row in the Scores file, # All exceptions to this generality are accounted for below ########################## #HCP has two different Lifespan studies going to NDA - HCP - Aging, and HCP Developement. #We have a few internal aliases for these studies that need to be labelled here; #hcp_studystr gets passed into the functions that name the output structures (e.g. a string for the filename). #ndar_studystr gets passed into the argument that opens the csv that contains ALL of the HCP #subjects (they are related, so they have to be considered together) and subsets based on study ########################## #hcp_studystr='yourstudy' #ndar_studystr='placeholderforastringyouprobablywontneed' #scoresD='pathtoyourscores/yourscores.csv' #rawE='pathtoyourrawdata/yourraw.csv' ########################## #hcp_studystr='HCPD' #ndar_studystr='HCD' #scoresD='/home/petra/UbWinSharedSpace1/boxtemp/HCD_Toolbox_Scored_Combined_09_29_2020.csv' #rawD='/home/petra/UbWinSharedSpace1/boxtemp/HCD_Toolbox_Raw_Combined_09_29_2020.csv' ########################## hcp_studystr='HCPA' ndar_studystr='HCA' scoresD='/home/petra/UbWinSharedSpace1/boxtemp/HCA_Toolbox_Scored_Combined_09_29_2020.csv' rawD='/home/petra/UbWinSharedSpace1/boxtemp/HCA_Toolbox_Raw_Combined_09_29_2020.csv' #path where you want to save your formatted structures preformatout="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/Ipad2NDA_withCrosswalk/NIHToolbox2NDA/tlbxIpad_format/" pathout="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/Ipad2NDA_withCrosswalk/NIHToolbox2NDA/prepped_structures/" #read into dataframe and take a peak (we alphabetized our columns during concat process) scordata=pd.read_csv(scoresD,header=0,low_memory=False) #scordata.head() scordata.subject=scordata.PIN.str[0:10] len(scordata.PIN.unique()) rawdata=pd.read_csv(rawD,header=0,low_memory=False,error_bad_lines=True) #rawdata.head() rawdata.shape rawdata.subject=rawdata.PIN.str[0:10] len(rawdata.PIN.unique()) #just taking a look here #unnamed columns result of index not removed from pd.to_csv in other location rawdata.columns rawdata.PIN.unique()[0:10] #rawdata.groupby(['Inst','ItemID']).count() samplePINS=list(rawdata.PIN.unique()[0:10]) replacePINS=['HCP0000001_V1', 'HCP0000002_V1', 'HCP0000003_V1', 'HCP0000004_V1', 'HCP0000005_V1', 'HCP0000006_V1', 'HCP0000007_V1', 'HCP0000008_V1', 'HCP0000009_V1', 'HCP0000010_V1'] pinSUBs=dict(zip(samplePINS,replacePINS)) pinSUBs sampleRAW=rawdata.loc[rawdata.PIN.isin(samplePINS)] sampleSCORES=scordata.loc[scordata.PIN.isin(samplePINS)] sampleRAW=sampleRAW.replace(pinSUBs).drop(columns=['subject','visit'])#rc_subject_id','gender','subjectkey','interview_age','interview_date']) sampleSCORES=sampleSCORES.replace(pinSUBs).drop(columns=['subject','visit'])#,'src_subject_id','gender','subjectkey','interview_age','interview_date']) sampleRAW.DateCreated='dummydate' sampleRAW.InstStarted='dummydate' sampleRAW.InstEnded='dummydate' sampleSCORES.DateFinished='dummydate' sampleRAW.to_csv("SampleRaw.csv",index=False) sampleSCORES.to_csv("SampleScores.csv",index=False) #pinSUBs #sampleSCORES.PIN #note that the data still have visits OTHER than v1, which is the only subset we'll be sending rawdata.groupby('visit').count() #subset rawdata and scordata to v1 rawdata=rawdata.loc[rawdata.PIN.str.upper().str.contains('V1')].copy() scordata=scordata.loc[scordata.PIN.str.upper().str.contains('V1')].copy() print(len(rawdata.PIN.unique())) print(len(scordata.PIN.unique())) #BEWARE: NIH TOOLBOX IPAD FORMAT FORCES YOU TO LINK DATA BY PIN, WHICH IS PRONE TO CONTAINING TYPOS (no dataset id) #EVEN AFTER YOU'VE CLEANED THE DATA YOU SHOULD CHECK FOR PROBLEMS... AGAIN...since they can get reintroduced whenever curated data are 'live.' print("PINS that might cause problems: " + rawdata.loc[rawdata.PIN.str.contains('v1'),'PIN'].unique()) print("PINS that might cause problems: " + scordata.loc[scordata.PIN.str.contains('v1'),'PIN'].unique()) print("Duplicate Alert: "+ rawdata.loc[rawdata.PIN.str.upper().str.contains('HCD2155746_V1'),'PIN'].unique()) #Would be a DUPLICATE if you're counting by unique PINS!!! rawdata.loc[rawdata.PIN.str.upper().str.contains('HCD2600539_V1'),'PIN'].unique() #not a duplicate, whew rawdata.loc[rawdata.PIN.str.upper().str.contains('HCD2537457_V1'),'PIN'].unique() #not a duplicate, whew #remove any duplicates rawdata=rawdata.loc[~(rawdata.PIN.str.contains('HCD2155746_v1'))] scordata=scordata.loc[~(scordata.PIN.str.contains('HCD2155746_v1'))] print(len(rawdata.PIN.unique())) print(len(scordata.PIN.unique())) scordata.groupby('visit').count() print("HCA7581081_V1" in rawdata.PIN.unique()) print("HCA7581081_V1" in scordata.PIN.unique()) print("HCA6375275_V1" in rawdata.PIN.unique()) print("HCA6375275_V1" in scordata.PIN.unique()) print("HCA7581081_V1" in rawdata.PIN.unique()) print("HCA7581081_V1" in scordata.PIN.unique()) print("HCA8361577_V1" in rawdata.PIN.unique()) print("HCA8361577_V1" in scordata.PIN.unique()) print("HCA7247574_V1" in rawdata.PIN.unique()) print("HCA7247574_V1" in scordata.PIN.unique()) #HCP data had some alternatives to '1 or 2' for assessment name that needed to be sent back to RAs for clarification #after all was said and done - set to missing here after capturing flags because #fneproc has limitations on character length, and need be consistent rawdata['Assessment Name']=rawdata['Assessment Name'].str.replace('Assessment ','') scordata['Assessment Name']=scordata['Assessment Name'].str.replace('Assessment ','') raw_assessment_strings=rawdata.loc[~(rawdata['Assessment Name'].isin(['1','2','3']))][['PIN','Assessment Name']].drop_duplicates(keep='first') score_assessment_strings=scordata.loc[~(scordata['Assessment Name'].isin(['1','2','3']))][['PIN','Assessment Name']].drop_duplicates(keep='first') #score_assessment_strings #raw_assessment_strings assessment_strings_forFU=pd.merge(raw_assessment_strings,score_assessment_strings,on=['PIN','Assessment Name'],how='outer',indicator=True) assessment_strings_forFU.loc[~(assessment_strings_forFU._merge =='both')] assessment_strings_forFU.drop(columns=['_merge']).to_csv("TLBX Records with Unexpected Assessment Strings.csv",index=False) #set strings to missing rawdata.loc[~(rawdata['Assessment Name'].isin(['1','2','3'])),'Assessment Name']='' scordata.loc[~(scordata['Assessment Name'].isin(['1','2','3'])),'Assessment Name']='' print(len(rawdata.PIN.unique())) print(len(scordata.PIN.unique())) #prep the fields that NDA requires in all of their structures - we did this in another program, #and call it our ROSETTA STONE since output is required elsewhere and has subject aliases in double winner cases. #Here, just subsetting ROSETTA STONE to particular study (ndar_studystr='HCA' or 'HCD'), # renaming a few vars, and changing the date format #ALSO DROPPING two per Emily A. email 5/28/20 finaltwo=['HCA8465488','HCA7884605'] #should not have 8_17 batteries: another=['HCA9914193']#protocol deviation has <18yo batteries odorerr=['HCA6623874']#has empty odor battery in curated...not part of hca protocol - send back to trello griperr=['HCA7130957']#has Grip error wierdness - nih_tlbx_nondomsc = 1.21.2720 row shift? subjectlist='/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/UnrelatedHCAHCD_w_STG_Image_and_pseudo_GUID01_15_2021.csv' subjects=pd.read_csv(subjectlist) #doublewinners - dont exclude #Emily exclusions subjects=subjects.loc[~(subjects.subjectped.isin(finaltwo))] #other wierdness subjects=subjects.loc[~(subjects.subjectped.isin(another))] subjects=subjects.loc[~(subjects.subjectped.isin(odorerr))] subjects=subjects.loc[~(subjects.subjectped.isin(griperr))] subjects.shape subjects=subjects[['subjectped','nda_gender', 'nda_guid', 'nda_interview_age', 'nda_interview_date']].copy() ndar=subjects.loc[subjects.subjectped.str.contains(ndar_studystr)].rename( columns={'nda_guid':'subjectkey','subjectped':'src_subject_id','nda_interview_age':'interview_age', 'nda_interview_date':'interview_date','nda_gender':'gender'}).copy() ndar['interview_date'] = pd.to_datetime(ndar['interview_date']).dt.strftime('%m/%d/%Y') ndarlist=['subjectkey','src_subject_id','interview_age','interview_date','gender'] ndar.columns ndar.head() #this is the list of variables in the scored and raw data files that you might need... #creating list in case your scored data is merged with other files for other reasons (ours was) #scorlist=['Age-Corrected Standard Score', 'Age-Corrected Standard Scores Dominant', # 'Age-Corrected Standard Scores Non-Dominant', 'AgeCorrCrystal', 'AgeCorrDCCS', 'AgeCorrEarly', # 'AgeCorrEngRead', 'AgeCorrEngVocab', 'AgeCorrFlanker', 'AgeCorrFluid', 'AgeCorrListSort', # 'AgeCorrPSM', 'AgeCorrPatternComp', 'AgeCorrTotal', 'Assessment Name', 'Computed Score', # 'ComputedDCCS', 'ComputedEngRead', 'ComputedEngVocab', 'ComputedFlanker', 'ComputedPSM', # 'ComputedPatternComp', 'DCCSaccuracy', 'DCCSreactiontime', 'Dominant Score', 'FlankerAccuracy', # 'FlankerReactionTime', 'FullTCrystal', 'FullTDCCS', 'FullTEarly', 'FullTEngRead', 'FullTEngVocab', # 'FullTFlanker', 'FullTFluid', 'FullTListSort', 'FullTPSM', 'FullTPatternComp', 'FullTTotal', # 'Fully-Corrected T-score', 'Fully-Corrected T-scores Dominant', 'Fully-Corrected T-scores Non-Dominant', # 'FullyCorrectedTscore', 'Group', 'Inst', 'InstrumentBreakoff', 'InstrumentRCReason', 'InstrumentRCReasonOther', # 'InstrumentStatus2', 'ItmCnt', 'Language', 'Male', 'National Percentile (age adjusted)', # 'National Percentile (age adjusted) Dominant', 'National Percentile (age adjusted) Non-Dominant', # 'Non-Dominant Score', 'PIN', 'Raw Score Left Ear', 'Raw Score Right Ear', 'RawDCCS', # 'RawFlanker', 'RawListSort', 'RawPSM', 'RawPatternComp', 'RawScore', 'SE', 'Static Visual Acuity Snellen', # 'Static Visual Acuity logMAR', 'TScore', 'Theta', 'ThetaEngRead', 'ThetaEngVocab', 'ThetaPSM', 'Threshold Left Ear', # 'Threshold Right Ear', 'UncorrCrystal', 'UncorrDCCS', 'UncorrEarly', 'UncorrEngRead', 'UncorrEngVocab', # 'UncorrFlanker', 'UncorrFluid', 'UncorrListSort', 'UncorrPSM', 'UncorrPatternComp', 'UncorrTotal', # 'Uncorrected Standard Score', 'Uncorrected Standard Scores Dominant', 'Uncorrected Standard Scores Non-Dominant', # 'UncorrectedStandardScore'] scorlist=['PIN', 'DeviceID', 'Assessment Name', 'Inst', 'RawScore', 'Theta', 'TScore', 'SE', 'ItmCnt', 'DateFinished', 'Column1', 'Column2', 'Column3', 'Column4', 'Column5', 'Language', 'Computed Score', 'Uncorrected Standard Score', 'Age-Corrected Standard Score', 'National Percentile (age adjusted)', 'Fully-Corrected T-score', 'Uncorrected Standard Scores Dominant', 'Age-Corrected Standard Scores Dominant', 'National Percentile (age adjusted) Dominant', 'Fully-Corrected T-scores Dominant', 'Uncorrected Standard Scores Non-Dominant', 'Age-Corrected Standard Scores Non-Dominant', 'National Percentile (age adjusted) Non-Dominant', 'Fully-Corrected T-scores Non-Dominant', 'Dominant Score', 'Non-Dominant Score', 'Raw Score Right Ear', 'Threshold Right Ear', 'Raw Score Left Ear', 'Threshold Left Ear', 'Static Visual Acuity logMAR', 'Static Visual Acuity Snellen', 'InstrumentBreakoff', 'InstrumentStatus2', 'InstrumentRCReason', 'InstrumentRCReasonOther', 'App Version', 'iPad Version', 'Firmware Version','Age-Corrected Standard Scores Quinine Whole', 'Age-Corrected Standard Scores Salt Whole','Fully-Corrected T-scores Quinine Whole', 'Fully-Corrected T-scores Salt Whole','Uncorrected Standard Scores Quinine Whole', 'Uncorrected Standard Scores Salt Whole','Whole Mouth Quinine','Whole Mouth Salt'] #rawlist=['App Version', 'Assessment Name', 'DataType','DateCreated', 'DeviceID', 'Firmware Version', # 'Inst', 'InstEnded','InstEndedDatetime', 'InstOrdr', 'InstSctn', 'InstStarted','InstStartedDatetime', # 'ItemID', 'ItmOrdr', 'Locale','PIN', 'Position', 'Response', 'ResponseTime', 'SE', 'Score', 'TScore', # 'Theta','iPad Version'] rawlist=['App Version', 'Assessment Name', 'DataType','DateCreated', 'DeviceID', 'Firmware Version', 'Inst', 'InstEnded','InstOrdr', 'InstSctn', 'InstStarted', 'ItemID', 'ItmOrdr', 'Locale','PIN', 'Position', 'Response', 'ResponseTime', 'SE', 'Score', 'TScore', 'Theta','iPad Version'] #scordata[scorlist] scordata.columns #rawdata.columns #merge the score and raw data with the required fields for the NDA #Note that subject and visit are HCP specific variables that we use to subset the records being sent to the NDA #Depending on how you organized your data, you may need to create dummy vars if you dont have them... #scordata['subject']=scordata.PIN #or some other variable in scordata that can be used to merge with ndarfields data #scordata['visit']='V1' #we keep this around because eventually we'll be releaseing V2,V3, and FU data #rawdata['subject']=rawdata.PIN #rawdata['visit']='V1' scordata=pd.merge(scordata[scorlist+['subject','visit']],ndar,how='inner',left_on='subject', right_on='src_subject_id') rawdata=pd.merge(rawdata[rawlist+['subject','visit']],ndar,how='inner',left_on='subject', right_on='src_subject_id') print(len(rawdata.PIN.unique())) print(len(scordata.PIN.unique())) #scordata=pd.merge(scordata,ndar,how='inner',left_on='subject', right_on='src_subject_id') #rawdata=pd.merge(rawdata,ndar,how='inner',left_on='subject', right_on='src_subject_id') #check for one-to-one match of PINs for i in scordata.PIN.unique(): if i not in rawdata.PIN.unique(): print("not in Raw " + i) for i in rawdata.PIN.unique(): if i not in scordata.PIN.unique(): print("not in scores " + i) scordata.groupby('visit').count() #any PINs still have Xs or As and Bs? if so, drop - rep 2nd parent info...will handle in next release scordata.loc[scordata.PIN.str.contains('X')] scordata=scordata.loc[~(scordata.PIN.str.len()>13)] rawdata=rawdata.loc[~(rawdata.PIN.str.len()>13)] #THIS IS THE NUMBER THAT WILL BE ASSOCIATED WITH UPLOAD print(len(rawdata.PIN.unique())) print(len(scordata.PIN.unique())) scordata.loc[scordata.PIN.str.contains("HCA6007044")] #Export Ipad formatted version of data to be submitted to NDA (not including erroneous odor and pegboard instruments for HCA) if hcp_studystr=='HCPA': print(rawdata.shape) print(scordata.shape) rawdata=rawdata.loc[~(rawdata.Inst.isin(['NIH Toolbox Odor Identification Test Age 10+ v2.0','NIH Toolbox 9-Hole Pegboard Dexterity Test Age 3+ v2.0']))] scordata=scordata.loc[~(scordata.Inst.isin(['NIH Toolbox Odor Identification Test Age 10+ v2.0','NIH Toolbox 9-Hole Pegboard Dexterity Test Age 3+ v2.0']))] print(rawdata.shape) print(scordata.shape) rawdata.to_csv(preformatout+ndar_studystr+'_'+'RAW_TLBX_'+snapshotdate+'.csv',index=False) scordata.to_csv(preformatout+ndar_studystr+'_'+'SCORES_TLBX_'+snapshotdate+'.csv',index=False) ``` Do a little QC and data exploration wrt instruments included ``` #NOW that you have ALL of your data, take a look at how the intruments are organized within them. #for example, the bulk of instruments have representation in the raw files AND the scored files, and can be #handled by the 'normal' code block below. We will need to code for exceptions, though. #All code blocks below will skip Practice instruments and Instructions print('*****Instruments in Raw data but not Scores:') for i in rawdata.Inst.unique(): if i not in scordata.Inst.unique(): print(i) print('******Instruments in Scored data but not Raw:') for i in scordata.Inst.unique(): if i not in rawdata.Inst.unique(): print(i) #Occasionally one of the instruments, (NIH Toolbox List Sorting Working Memory Test Ages 3-6 v2.1 in our case) will showing up for protocol #deviation reasons (someone opened and closed a battery, for example). The validated column of the crosswalk has been set to NO # for this instrument, but you may have the item level info to extend and validate htis in your data. #nan rows in Raw data can correspond to cases where Registration Data was uploaded as 'raw' data and concatenated behind the scenes somewhere. #you will need to QC (we just allow program to drop them here - send flag back to your data curation team for next release) #check that lengths are the same...indicating one to one PIN match between scores and raw print(len(rawdata.PIN.unique())) print(len(scordata.PIN.unique())) #check that shape is same before and after removing duplicates (should not be any) rawdata.shape scordata.shape print(rawdata.shape) print(scordata.shape) testraw=rawdata.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first') testscore=scordata.drop_duplicates(subset={'PIN','Inst'}) print(testraw.shape) print(testscore.shape) rawdata.columns #scordata.columns #define the function that will turn a prepared (e.g already transformed, renamed, revalued, etc. and otherwise # ready to go) dataframe into a csv structure def data2struct(patho,dout,crosssub,study): """ Convert dout, a prepared pandas dataframe, into a csv structure that NDA can import parameters: patho - full path to place you want to store structures (there will be many) dout - name of data frame that contains all the variables to be exported crosssub - a dataframe which is the subset of the crosswalk for the instrument to be exported as structure study - a string to put in the name of the csv file along with the structure name and the short name of the instrument note that snapshotdate is globally defined external to this function near import statments... """ #get the name and number of the structure from the crosswalk subset strucroot=crosssub['nda_structure'].str.strip().str[:-2][0] strucnum=crosssub['nda_structure'].str.strip().str[-2:][0] #prepare the name of the output file and path instshort=crosssub['inst_short'].str.strip()[0] inst=crosssub['Inst'].str.strip()[0].replace(' ','_').replace('+','plus').replace('-','_') filePath=os.path.join(pathout,study+'_'+instshort+'_'+strucroot+strucnum+'_'+snapshotdate+'.csv') if os.path.exists(filePath): os.remove(filePath) else: pass #print("Can not delete the file as it doesn't exists") with open(filePath,'a') as f: f.write(strucroot+","+str(int(strucnum))+"\n") dout.to_csv(f,index=False) #This function sends a transformed dataframe of the right 'shape' (i.e. after items have been pivoted into row with scores) # through the crosswalk for renaming, revaluing and structure destination mapping #function takes a dataframe (in which NIH Toolbox Items are still the names) and formats the column names #such that all the special characters are removed because the export has characters that python and the NDA dont like #This function will alert you to any instruments that were successfully transformed but tha might #warrent a closer look. def sendthroughcrosswalk(pathout,instreshapedfull,inst_i,crosswalk,studystr,verbose,debug): """ Send instreshapedfull, a dataframe that has pivoted the item level data into the scored data by instrument through the crosswalk to have its variables renamed and reformatted according to the harmonization requests of the NDA parameters: pathout - full path to place you want to store structures (there will be many) - argument gets passed to data2struct fuctnion inst_i - string name of instrument as it appears in the NIH Toolbox output, exactly (case sensitive with version) crosswalk - pandas dataframe of crosswalk (read from csv) studystr - 'HCPA' or other string specified at the beginning of this notebook - will be passed to data2struct funciton to tag the file name with the study source of the data verbose - YES or NO, will flag all the variable to element mappings available in the crosswalk that weren't called upon in this transformation because they weren't found in your data debug - YES or NO, will print out the last row to be executed from the 'requested_python' column, in case you get an Error, and need to figure out where the loop got stuck """ # replace special charaters in column names instreshapedfull.columns = instreshapedfull.columns.str.replace(' ', '_').str.replace('-', '_').str.replace('(','_').str.replace(')', '_') crosswalk_subset = crosswalk.loc[crosswalk['Inst'] == inst_i] crosswalk_subset.reset_index(inplace=True) # if crosswalk_subset.reset_index().validated[0]=='NO': print("Skipping "+inst_i+ " because crosswalk not yet validated for this instrument ") else: # some studies will have some but not all of the variables in hcp_variable (result of skip logic, perhaps) # need to make sure they know about this(in case not due to skip logic) but also that code is only execute for # vars in existence. #also need to keep track of dummy vars that dont exist in IPAD output, but are necessary for NDA and need to be created #on the fly #how many vars are in the instrument according to what is stored in hcp_variable? cwlistbef = list(crosswalk_subset['hcp_variable']) before = len(cwlistbef) #how many are in the intersection of hcp_variable and the prepared data (e.g. what shows up with the particular instrument in instreshapedfull) cwlist = list(set(cwlistbef) & set( instreshapedfull.columns)) # drop the handful of vars in larger instruments that got mapped but that we dont have after = len(cwlist) if before != after: print("WARNING!!! " + inst_i + ": Crosswalk expects " + str(before) + " elements, but only found " + str(after)+ " in the prepared data") notfound=list(np.setdiff1d(cwlistbef,cwlist)) if verbose=='YES': print("Not Found:"+ str(notfound)) #get the dummies dummys=[] for i in cwlistbef: if "dummy" in i: dummys=dummys+[i] #studydata should have all the ndar variables and the list of vars in the intersection of what exists in ipad output and #what can be received per the crosswalk. # if you dont force the intersection then code will try to execute on things that dont exist #The new dummy variables will be created on the fly. studydata = instreshapedfull[ndarlist + cwlist].copy() # execute any python one liners (see how they all refer to studydata?) for all rows in the crosswalk corresponding to cwlist this instrument vars except the notfounds. itersubset=crosswalk_subset.loc[crosswalk_subset.hcp_variable.isin(cwlist + dummys)] for index, row in itersubset.iterrows():#crosswalk_subset.iterrows(): if pd.isna(row['requested_python']): pass else: if debug=='YES': print(row['requested_python']) exec(row['requested_python']) uploadlist = list(crosswalk_subset['hcp_variable_upload']) uploadlist = list(set(uploadlist) & set(studydata.columns)) data2struct(patho=pathout, dout=studydata[ndarlist + uploadlist], crosssub=crosswalk_subset, study=studystr) #specify your crosswalk- take a peak - use the latest crosswalk from the https://github.com/humanconnectome/NIHToolbox2NDA/ #e.g. Crosswalk_NIH_Toolbox_2_NDA.csv crosswalkpath="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/Ipad2NDA_withCrosswalk/NIHToolbox2NDA/" cfile="Crosswalk_NIH_Toolbox_2_NDA.csv" crosswalk=pd.read_csv(crosswalkpath+cfile,header=0,low_memory=False, encoding = "ISO-8859-1") crosswalk.head() scordata.columns ##testing area for problem instruments #studydata['Raw_Score_Right_Ear'].round().fillna(-9999).astype(int).astype(str).str.replace('-9999','') #scordata.loc[scordata.Inst=='Social Satisfaction Summary (18+)','TScore']#.round().fillna(-9999).astype(int).astype(str).str.replace('-9999','') #test=pd.DataFrame(test,columns=['Raw Score Right Ear']) #test.columns ##test.describe() #test2=test.loc[~(test['Raw Score Right Ear'].isnull()==True)] #test2.dtypes ##test['Raw Score Right Ear'].astype(float).round().fillna(-9999).astype(int).astype(str).str.replace('-9999','') ##test2['Raw Score Right Ear'].round()#.fillna(-9999).astype(int).astype(str).str.replace('-9999','') for i in scordata.Inst.unique(): if i not in rawdata.Inst.unique(): inst_i=i print(inst_i) ``` Do special cases last ``` #For cases where instrument is in the scored data but not the raw data, e.g. # because this is a summary across instruments, is instructions/practice or because someone # opened and closes a battery before generating any item level data for i in scordata.Inst.unique(): if i not in rawdata.Inst.unique(): inst_i=i if "Cognition" in inst_i: pass #special case--see specialty code block below elif "Practice" in inst_i: print("Note: Omitting practice instrument, "+inst_i) elif "Instructions" in inst_i: print("Note: Omitting Instructions instrument, "+inst_i) else: try: #this will fail if there are duplicates or if no-one has the data of interest (e.g. idlist too small), or if only V2 instrument #print('Processing '+inst_i+'...') instreshapedfull=scordata.loc[scordata.Inst==inst_i][scorlist+ndarlist] #instreshapedfull=scordata.loc[scordata.Inst==inst_i].copy() instreshapedfull['version_monster']=instreshapedfull['Inst']+','+instreshapedfull['Assessment Name'] #verbose will tell you which items were not found, debug will print out the python lines before they are executed if 'Parent' in inst_i: instreshapedfull['respondent']='caregiver about child subject' else: instreshapedfull['respondent']='subject about self' sendthroughcrosswalk(pathout,instreshapedfull, inst_i, crosswalk,studystr=hcp_studystr,verbose='YES',debug='NO') except: print('Couldnt process '+inst_i+'...') #Lots of dummy variables were needed to fit the IPAD data into the NDA structures without needing a special code block for every #single structure (see cogcomp01 specialty code below). The reason for this is that many of these summary scores were coming #from different instruments (NIH Toolbox term) were originally merged into the same structure. #basically you're looking for anything but 'dummy' variables in your warnings below to make sure nothing is accidentally omitted #in the transformation # For some instruments we only want to send scores. See discussion about Visual Acuity Instruements below, for example. # For other instruments we only CAN send scores, # one reason is that only score level data is avaialbe for this instrument (see next code cell) # Other reason is because only score level data is ready to go (item level data not yet mapped at the NDA) # for example, the Picture Sequence instruments...NIH Data dictionary wasn't sufficient to # map the item level data to the NDA, where items are incorrect/correct but NIH Toolbox outputting value range of # 0::14. Item level detail for this particular instrument will require facilitation between NIH Toolbox and NDA to define # new variables for these particular items (not done yet). Similar story for Words-In-Noise, where data dictionar at both ends # (NIH Toolbox and NDA expectation) doesn't match the observed output. Yay. # When it is done, remove the elif statement from the non-special # cases cell block below # Note also you'll get a lot of warnings for these because you're only sending scores when items are also (almost) available scoresonly=scordata.loc[(scordata.Inst.str.contains('Visual Acuity')==True) | (scordata.Inst.str.contains('Picture Sequence Memory Test')==True)| (scordata.Inst.str.contains('Words-In-Noise')==True)] for i in scoresonly.Inst.unique(): inst_i=i if "Practice" in inst_i: print("Note: Omitting practice instrument, "+inst_i) else: try: #this will fail if there are duplicates or if no-one has the data of interest (e.g. idlist too small), or if only V2 instrument #print('Processing '+inst_i+'...') instreshapedfull=scordata.loc[scordata.Inst==inst_i][scorlist+ndarlist] #instreshapedfull=scordata.loc[scordata.Inst==inst_i].copy() instreshapedfull['version_monster']=instreshapedfull['Inst']+','+instreshapedfull['Assessment Name'] #verbose will tell you which items were not found, debug will print out the python lines before they are executed if 'Parent' in inst_i: instreshapedfull['respondent']='caregiver about child subject' else: instreshapedfull['respondent']='subject about self' sendthroughcrosswalk(pathout,instreshapedfull, inst_i, crosswalk,studystr=hcp_studystr,verbose='NO',debug='NO') except: print('Couldnt process '+inst_i+'...') #for non-special instruments in both scores AND raw data types (skip the scoresonly ones from above) #add a check to make sure that everything found in the data has a row in the crosswalk #add indicator for whether this is a variable from the scores file or a variable from the raw data file. for i in scordata.Inst.unique(): if i in rawdata.Inst.unique(): inst_i=i if "Visual Acuity" in inst_i: pass #special case--see below elif "Practice" in inst_i: print("Note: Omitting practice instrument, "+inst_i) #new elif statements here needed because of unresolved discrepancey between NIH Data Dictionary and observed output elif "Picture Sequence Memory Test" in inst_i: print("Note: Sent Scores only for "+inst_i) elif "Words-In-Noise" in inst_i: print("Note: Sent Scores only for "+inst_i) else: try: #this will fail if there are duplicates or if no-one has the data of interest (e.g. idlist too small), or if only V2 instrument #print('Processing '+inst_i+'...') items=rawdata.loc[rawdata.Inst==inst_i][['PIN','subject','Inst','visit','ItemID','Position', 'subjectkey','src_subject_id','interview_age','interview_date','gender', 'Score','Response','ResponseTime']]# not these..., 'SE', 'Response', 'TScore','Theta']] items.ItemID = items.ItemID.str.lower().str.replace('-','_').str.replace('(','_').str.replace(')','_').str.replace(' ','_') inst=items.pivot(index='PIN',columns='ItemID',values='Score').reset_index() meta=items.drop_duplicates(subset=['PIN','visit']) instreshaped = pd.merge(meta, inst, on='PIN', how='inner').drop(columns={'subject', 'visit','Inst'}) items2=scordata.loc[scordata.Inst==inst_i][scorlist] instreshapedfull=pd.merge(instreshaped,items2,on='PIN',how='inner') instreshapedfull['version_monster']=instreshapedfull['Inst']+','+instreshapedfull['Assessment Name'] #verbose will tell you which items were not found, debug will print out the python lines before they are executed if 'Parent' in inst_i: instreshapedfull['respondent']='caregiver about child subject' else: instreshapedfull['respondent']='subject about self' sendthroughcrosswalk(pathout,instreshapedfull, inst_i, crosswalk,studystr=hcp_studystr,verbose='YES',debug='NO') except: print('ERROR: Couldnt process '+inst_i+'...') # NOTE: its okay if there are items in the crosswalk that doent exist in your data...lots of reasons, but please compare # with Ipad formatted version to make sure nothing is wierd (e.g. no problems with crosswalk which will lowercase the item names from the NIH Toolbox Ipad 'item' column # for example if a crosswalk can transform elements that are found in an instrument for 3-6 yos, but you dont have any 3-6 yos, then youll be alerted # not okay if there are scores in the crosswalk that doent exsit in the data...need to investigate # also will be flags for dummy vars because they exist in crosswalk but not data (they were created for the NDA) # turn verbose off to ignore these warnings. Errors will still be reported. # for instruments not validated--check crosswalk. v2.1 of some isntruments dont exist in the NIH Toolbox Data # Dictionary yet, so its pointless to try to pretend that we know what they map to in the NDA. #special coding required for instruments in the crosswalk that have jupyter in their specialty code columns #Within the rawdata structure (for HCP), all but the NIH Toolbox Pain Intensity FF Age 18+ v2.0 Instrument are practices #So only the Pain Intensity instrument needed special coding attention #check your data and adjust if needed - note that subject and visit are variables we created locally #to merge with the data coming from a different local source (REDCap) #create the NDA structure for this special case #this structure doesnt get 'sent through crosswalk' so any code that is in python column wont get executed inst_i='NIH Toolbox Pain Intensity FF Age 18+ v2.0' #most of the rows contain duplicated information...only need to know the PIN once, for example, not once for each item response # so values in the response column need to be pivoted and then merged with the rest of the data, paindata=rawdata.loc[rawdata.Inst==inst_i][['PIN','subject','Inst','visit','ItemID','Position', 'subjectkey','src_subject_id','interview_age','interview_date','gender', 'Response','ResponseTime', 'SE', 'Score', 'TScore','Theta','Assessment Name']] paindata.ItemID = paindata.ItemID.str.lower().str.replace('-','_').str.replace('(','_').str.replace(')','_') inst = paindata.pivot(index='PIN', columns='ItemID', values='Score').reset_index() meta = paindata.drop_duplicates(subset=['PIN', 'visit']) #meta['Inst']=inst_i painreshaped = pd.merge(meta, inst, on='PIN', how='inner').drop(columns={'subject','visit','PIN'}) crosswalk_subset=crosswalk.loc[crosswalk['Inst']==inst_i] crosswalk_subset.reset_index(inplace=True) cwlist=list(crosswalk_subset['hcp_variable_upload']) #these should all correspond with the nda_element names in this structure #several dummy vars for required vars - normally these would have placeholders in the meta (scores) files but #since this particular instrument only exists in the raw data, we have to explicitly create place holders painreshaped['pssr8_12_10']=painreshaped.pssr8_12_10.round().fillna(-9999).astype(int).astype(str).str.replace('-9999','') painreshaped['nih_tlbx_agegencsc']=999 painreshaped['nih_tlbx_rawscore']=999 painreshaped['nih_tlbx_tscore']=999 painreshaped['nih_tlbx_se']=999 painreshaped['nih_tlbx_theta']=999 painreshaped['respondent']='self' painreshaped['version_form']=painreshaped['Inst'] painreshaped['fneproc']=painreshaped['Assessment Name'].str.replace('Assessment ','') painreshaped['comqother']=painreshaped['respondent'] #painreshaped['version_form']=painreshaped.Inst #+','+painreshaped['Assessment Name'] reshapedslim=painreshaped[ndarlist+cwlist] #the data2struct function only uses the crosswalk to get the structure name and number for the header of dout #dout is otherwise ready to go and data2structure just writes it to a file in the specified location data2struct(patho=pathout,dout=reshapedslim,crosssub=crosswalk_subset,study=hcp_studystr) # Another special case is for Cognition Composite scores all v1.1 - going to cogcomp01 structure at the NDA- # Cog comp is special for several reason...it doesnt have corresponding entries in the raw data because # it represents a summary across instruments in the Cognitive domain. Even so, 4 cog comp 'instruments' are going to # a single NDA structure. Each of these insturments has a version number and an assessmen # This was mapped before Leo agreed to accept data by NIH Toolbox Instrument name (pivot by Inst) # keeping this special case coding in for posterity and to shed light on one type of merge he must do on his end # and the fact that this special situation is not yet being addressed (unless they can take multiple rows per person) # Note that this structure illustrates the versioning problem when merging several NIH Toolbox Instruments together # onto the same row # Instruments are being mapped to the same # buckets # One of the main issues that will hopefully be resolved by teleconference 3/23 is how the NDA is keeping track of # several instruments (with different versions) getting mapped to the same rows in a structure # when it comes to NIH toolbox data cogcompdata=scordata.loc[scordata.Inst.str.contains('Cognition')==True][['PIN','Language', 'Assessment Name','Inst', 'Uncorrected Standard Score', 'Age-Corrected Standard Score', 'National Percentile (age adjusted)', 'Fully-Corrected T-score']+ndarlist] #initialize prefix cogcompdata['varprefix']='test' cogcompdata.loc[cogcompdata.Inst=='Cognition Crystallized Composite v1.1','varprefix']='nih_crystalcogcomp_' cogcompdata.loc[cogcompdata.Inst=='Cognition Early Childhood Composite v1.1','varprefix']='nih_eccogcomp_' cogcompdata.loc[cogcompdata.Inst=='Cognition Fluid Composite v1.1','varprefix']='nih_fluidcogcomp_' cogcompdata.loc[cogcompdata.Inst=='Cognition Total Composite Score v1.1','varprefix']='nih_totalcogcomp_' #pivot the vars of interest by varprefix and rename uncorr=cogcompdata.pivot(index='PIN',columns='varprefix',values='Uncorrected Standard Score') for col in uncorr.columns.values: uncorr=uncorr.rename(columns={col:col+"unadjusted"}) ageadj=cogcompdata.pivot(index='PIN',columns='varprefix',values='Age-Corrected Standard Score') for col in ageadj.columns.values: ageadj=ageadj.rename(columns={col:col+"ageadj"}) npage=cogcompdata.pivot(index='PIN',columns='varprefix',values='National Percentile (age adjusted)') for col in npage.columns.values: npage=npage.rename(columns={col:col+"np_ageadj"}) #put them together cogcompreshape=pd.concat([uncorr,ageadj,npage],axis=1) #hijacking what is the same for all four instruments meta=cogcompdata[['PIN','Language']+ndarlist].drop_duplicates(subset={'PIN'}) #all the data in place cogcompreshape=pd.merge(meta,cogcompreshape,on='PIN',how='inner') # Now grabbing version and assessment info for version_form # initial attempt to capture the version failed ...they got mapped to raw scores # and failed validation # per email, all of this information will go to the 'version_form' variable. meta2=cogcompdata[['PIN','Inst','Assessment Name']].drop_duplicates(subset={'PIN','Inst'}) meta2['Inst,Assessment Name']=meta2['Inst']+','+meta2['Assessment Name'] meta3=meta2.pivot(index='PIN',columns='Inst',values='Inst,Assessment Name') #this will only work until there are more than one versions of the composites in the data #need to make it more flexible so that there are 4 possible instruments (whatever version they may be) meta3['version_monster']=meta3['Cognition Crystallized Composite v1.1']+';'+meta3['Cognition Early Childhood Composite v1.1']+';'+meta3['Cognition Fluid Composite v1.1']+';'+meta3['Cognition Total Composite Score v1.1'] meta3['nih_crystalcogcomp']=meta3['Cognition Crystallized Composite v1.1'] meta3['nih_eccogcomp']=meta3['Cognition Early Childhood Composite v1.1'] meta3['nih_fluidcogcomp']=meta3['Cognition Fluid Composite v1.1'] meta3['nih_totalcogcomp']=meta3['Cognition Total Composite Score v1.1'] ##for i in meta3.columns.to_list(); #meta3=meta3['version_form'].reset_index() cogcompreshape=pd.merge(cogcompreshape,meta3,on='PIN',how='inner') #cogcompreshape.columns inst_i='Cognition Composite Scores' #one instrument here...is merging of four instruments there #crosswalk_subset=crosswalk.loc[crosswalk.Inst==inst_i] #cwlist=list(crosswalk_subset['hcp_variable_upload']) #these should all correspond with the nda_element names in this structure #reshapedslim=cogcompreshape[ndarlist+cwlist] #cogcompreshape[cwlist] #the data2struct function only uses the crosswalk to get the structure name and number for the header of dout #dout is otherwise ready to go and data2structure just writes it to a file in the specified location #data2struct(patho=pathout,dout=reshapedslim,crosssub=crosswalk_subset,study=hcp_studystr) sendthroughcrosswalk(pathout,cogcompreshape,inst_i,crosswalk,studystr=hcp_studystr,verbose='No',debug='Yes') filePath="./prepped_structures" for f in os.listdir(filePath): with open(filePath+'/'+f) as g: first_line = g.readline() print(f) print(first_line, end ="") df=pd.read_csv(filePath+'/'+f,header=1) print("NumRows: "+str(df.shape[0])) print() print() ##test area for testing individual instruments inst_i='NIH Toolbox Dimensional Change Card Sort Test Ages 3-7 v2.1' items=rawdata.loc[rawdata.Inst==inst_i][['PIN','subject','Inst','visit','ItemID','Position', 'subjectkey','src_subject_id','interview_age','interview_date','gender', 'Score','ResponseTime']]# not these..., 'SE', 'Score', 'TScore','Theta']] items.ItemID = items.ItemID.str.lower().str.replace('-','_').str.replace('(','_').str.replace(')','_').str.replace(' ','_') inst=items.pivot(index='PIN',columns='ItemID',values='Score').reset_index() meta=items.drop_duplicates(subset=['PIN','visit']) instreshaped = pd.merge(meta, inst, on='PIN', how='inner').drop(columns={'subject', 'visit','Inst'}) items2=scordata.loc[scordata.Inst==inst_i][scorlist] instreshapedfull=pd.merge(instreshaped,items2,on='PIN',how='inner') instreshapedfull['version_monster']=instreshapedfull['Inst']+','+instreshapedfull['Assessment Name'] if 'Parent' in inst_i: instreshapedfull['respondent']='caregiver about child subject' else: instreshapedfull['respondent']='subject about self' #for i in instreshapedfull.columns: # print(i) #meta.columns ##items2.columns #Visual Acuity instruments, for example, have an unknown and variable number of #repeated items at different 'positions' which would require a double transpose into a single instrument 'row' per person. #NDA mapped all the different positions we saw in our data (see placeholders in crosswalk), but will assuredly #not have all of your positions. For visual acuity, the scores matter more than the individual items, however, #so we opted to only send scores for this particular instrument. Feel free to extend the specialty code below to #accommodate item level instrument for visual acuity. #Visual Acuity items not yet mapped - come back to it if possible before the release otherwise omit item levels this release #scores are mapped in the first special case above. #Last special Case is for Visual Acuity, which needs double pivot because of repeat items at different positions #This special case not yet mapped by NDA - so don't run, but will look something like this #special case for instruments with "Visual Acuity" in their titles, which have dup inst/itemid at diff positions #for i in scordata.Inst.unique(): # if i in rawdata.Inst.unique(): # inst_i=i # if "Visual Acuity" in inst_i: # print('Processing ' + inst_i + '...') # items=rawdata.loc[rawdata.Inst.str.contains('Visual Acuity')][['PIN','subject','Inst', # 'gender','visit','ItemID','Position','Response','Score']] # items.ItemID = items.ItemID.str.lower() # items['dup_number']=items.groupby(['PIN','ItemID']).cumcount()+1 # items['ItemID_Dup']=items.ItemID.str.replace('|', '_') + '_P'+items.dup_number.astype(str) # inst=items.pivot(index='PIN',columns='ItemID_Dup',values='Score') # meta = items.drop_duplicates(subset=['PIN', 'visit'])[['Inst', 'PIN', # 'subject', 'visit']] # instreshaped = pd.merge(meta, inst, on='PIN', how='inner') # items2 = scordata.loc[scordata.Inst == inst_i] # instreshapedfull = pd.merge(instreshaped, items2, on='PIN', how='inner') #now lets see what we have. whatdata=pd.DataFrame(rawdata.PIN.unique(),columns={'PIN'}) print(whatdata.shape) ##import list from Cindy for Plasma folks #plasma=pd.read_csv("20200723_list for plasma HCP2.0.csv", header=None) #plasma.columns=['subject'] #plasma['Plasma']='YES' whatdata['subject']=whatdata.PIN.str[0:10] whatdata['V1_TLBX']='YES' #whatdata=pd.merge(whatdata, plasma, on='subject',how='outer')#.drop(columns='PIN') whatdata.loc[whatdata.V1_TLBX.isnull()==True,'V1_TLBX']='NO' ##whatdata.loc[whatdata.Plasma.isnull()==True,'Plasma']='NO' ##whatdata.head() ##whatdata.shape ##whatdatdata.loc[whatdata.Plasma.isnull()==True,'Plasma']='NO' ##a.to_csv(preformatout+ndar_studystr+'_'+'Whatdata_TLBX_and_Plasma_NDA_HCA2.0'+snapshotdate+'.csv',index=False) whatdata.head() #for all the structures in the prepped diretories, create an indicator for subject included pathout="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/Ipad2NDA_withCrosswalk/NIHToolbox2NDA/prepped_structures/" pathout2="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/Ipad2NDA_withCrosswalk/NIHToolbox2NDA/" structs=pd.DataFrame(os.listdir(pathout),columns=['fname']) #structs.to_csv(pathout+'/listoffiles.csv') print(whatdata.shape) def getsubjects(filestruct=structs.fname[0], path=pathout, what=whatdata): din=pd.read_csv(path+filestruct,header=1) print(filestruct) dout=pd.DataFrame(din.src_subject_id.unique(),columns={'src_subject_id'}) #print(dout.columns) try: doutinst=pd.DataFrame(din.version_form.unique(),columns={'version_form'}) dout[doutinst.version_form[0]]='1' except: print('exception: '+ filestruct) if 'cogcomp' in filestruct: dout['Cognition Composite Instruments']='1' #what=whatdata.copy() #print(dout.columns) what=pd.merge(what,dout,left_on='subject',right_on='src_subject_id',how='outer')#.drop(columns=['src_subject_id']) what=what.drop(columns=['src_subject_id']) #print(what.columns) return what for i in structs.fname: #whatdata=getsubjects(filestruct=structs.fname[0], path=pathout, what=whatdata) whatdata=getsubjects(filestruct=i, path=pathout, what=whatdata) print(whatdata.shape) structs.shape whatdata.head() #whatdata['src_subject_id']=whatdata.PIN #whatdata.to_csv(preformatout+ndar_studystr+'_'+'Whatdata_TLBXINST_and_Plasma_NDA_HCA2.0'+snapshotdate+'.csv',index=False) whatdata.to_csv(pathout2+ndar_studystr+'_'+'Whatdata_TLBXINST_'+snapshotdate+'.csv',index=False) print(pathout2+ndar_studystr+'_'+'Whatdata_TLBXINST_'+snapshotdate+'.csv') ``` now validate all of these files by calling the OS from within this notebook (assuming you are using linux) to run the NDA validator on your command line. Alternatively, you could just navigate to your terminal and execute the following for loop . for var in pathout/*.csv; do vtcmd $var; done Either option requires that you have downloaded and installed https://github.com/NDAR/nda-tools python package per instructions. I installed vtcmd in my home directory, which set a couple defaults in place., such as the location of validation results. To have the output of the validation sent to a more meaningful location than than the default, I opened the /home/petra/.NDATools/settings.cfg file, and changed the line under [Files] that says 'validation_results = NDAValidationResults' to a better place (perhaps 'pathout'). Example, mine now says validation_results = /home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/Ipad2NDA_withCrosswalk/NIHToolbox2NDA/NDAValidationResults so that the prepped structures directory and the NDAValidationResults Directory are right next to one another. If you had an error in the validation, your likely course of action is to debug the python code in the the crosswalk. Here are some unix commands to help filter through common issues in the Validation results: Find all the not integer warnings: grep notInteger /home/petra/NDAValidationResults/* > Notintegerwarnings Find all the invalid range warnings grep "invalid" NDAValidationResults/* | cut -d ',' -f 1,6 Cat all of the validation results together so you can see them all at once cat NDAValidationResults/validation*.csv > NDAValidationResults/Allvalidations.csv ``` structs=pd.DataFrame(os.listdir(pathout),columns=['fname']) structs.fname.str.split('_') #structs.to_csv(pathout+'/listoffiles.csv') ```
github_jupyter
# RadarCOVID-Report ## Data Extraction ``` import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 ``` ### Constants ``` from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 ``` ### Parameters ``` environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates ``` ### COVID-19 Cases ``` report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df = \ confirmed_source_regions_group_df.merge( confirmed_days_df[["sample_date_string"]].rename( columns={"sample_date_string": "sample_date"}), how="right") confirmed_source_regions_group_df["new_cases"] = \ confirmed_source_regions_group_df["new_cases"].clip(lower=0) confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan) confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) ``` ### Extract API TEKs ``` raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() ``` ### Dump API TEKs ``` tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() ``` ### Load TEK Dumps ``` import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() ``` ### Daily New TEKs ``` tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() ``` ### Hourly New TEKs ``` hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() ``` ### Official Statistics ``` import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() ``` ### Data Merge ``` result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary ``` ## Report Results ``` display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] ``` ### Daily Summary Table ``` result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df ``` ### Daily Summary Plots ``` result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) ``` ### Daily Generation to Upload Period Table ``` display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() ``` ### Hourly Summary Plots ``` hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) ``` ### Publish Results ``` github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) ``` ### Save Results ``` report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") ``` ### Publish Results as JSON ``` def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) ``` ### Publish on README ``` with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) ``` ### Publish on Twitter ``` enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids) ```
github_jupyter
# DEMO2 Multitask learning - In this demo, we would like to allow a ML model to learn both "PEDOT-PSS" and "Wikipedia" databases - in the manuscript, totally 14 databases were used for training. - some of them cannot be uploaded due to the copyright issue. - therefore, wikipedia database is used for multitask training in this DEMO. - you can add your favorite databases to improve prediction accuracy, etc. - Basic steps are the same as DEMO1 ## This script will format graph databases ``` import sys sys.path.append("../MIGraph/GraphConv/") from ValueTransformer import ValueTransformer from ConvGraphScript import drawGraph,checkGraphList from AutoParameterScaling import AutoParameterScaling from ConvGraphmlToGraph import loadGraphCSV from PrepGraphScript import PrepGraphScript import glob import os import joblib from tqdm import tqdm import numpy as np import random os.chdir("praparingGraphs") #load PEDOT-PSS files folderList=glob.glob("input/PEDOTPSS/*") CSVPathList=[] graphPathList=[] for folder in folderList: CSVPath=folder+"/"+os.path.basename(folder)+".csv" graphPath=folder+"/graph/" CSVPathList.append(CSVPath) graphPathList.append(graphPath) ``` # convert graph-type PEDOT-PSS file - In DEMO1, graph-shaped PEDOT-PSS database was made automatically by NLP. - However, we made graph databases manually here, because the connection styles in graphs ("formats") were different from our "standard format". (compare those graphs, if you want) - Graphs are recorded in a graphml format. ``` VT=ValueTransformer() for CSVPath,graphPath in zip(CSVPathList,graphPathList): print(CSVPath) gList=loadGraphCSV(CSVPath,graphPath) #convert unit etc gList=VT.convertGraphList(gList) checkGraphList(gList) filename=os.path.basename(CSVPath) outname="temporary/"+filename+".graphbin" print("saving...", outname) joblib.dump(gList,outname,compress=3) ``` # convert wikipedia database - the database is recorded as a table. - it will be converted to graphs ``` #convert wikipedia file #you can add other compound csv files in additional_simple_comps csvList=glob.glob("input/additional_simple_comps/*.csv") print(len(csvList)) sorted(csvList) def conv(filename): pgs=PrepGraphScript(filename) pgs.doFragment=False pgs.prapareGraphList(numOfMaxFragments=2000) for num,filename in tqdm(enumerate(csvList)): print(num, "file: ",filename) conv(filename) ``` # combine compound databases ``` import pandas as pd #in the case of this PEDOT-PSS_txt project, only one compound file is available, but normally many) allCompundsPath="output/allcompounds.csv.gz" csvList=glob.glob("../convCSVtoGraph/temp/output/*.csv") csvList2=glob.glob("input/*.csv") csvgzList=glob.glob("input/*.csv.gz") compPathList=sorted(list(set(csvList)|set(csvgzList)|set(csvList2))) print(compPathList) CompColumns=["ID","SMILES"] for num,filePath in enumerate(compPathList): print(filePath) if num==0: df=pd.read_csv(filePath)[CompColumns] else: df2=pd.read_csv(filePath)[CompColumns] df=pd.concat([df,df2],axis=0) df=df.drop_duplicates("ID") df=df[CompColumns].reset_index() df.to_csv(allCompundsPath,index=False) df ``` # delete broken compounds and their graphs ``` from rdkit import Chem from rdkit.Chem import AllChem compIDtoSMILES=dict(zip(df["ID"],df["SMILES"])) graphbinList1=glob.glob("temporary/*.graphbin") graphbinList2=glob.glob("../convCSVtoGraph/temp/output/*.graphbin") graphbinList=sorted(list(set(graphbinList1)|set(graphbinList2))) for graphbin in tqdm(graphbinList): gList=joblib.load(graphbin) ngList=[] for g in (gList): #extract comps compIDList=[g.nodes[node]["label"] for node in g.nodes if str(g.nodes[node]["label"])[:2]=="C_"] if np.nan in compIDList: compIDList=["none"] print("nan") if "C_nan" in compIDList: compIDList=["none"] #check if mol objects can be made from smiles try: SMILESList = [compIDtoSMILES[i[2:]] for i in compIDList] molList =[Chem.MolFromSmiles(smiles) for smiles in SMILESList] for mol in molList: morgan_fps =AllChem.GetMorganFingerprintAsBitVect(mol, 2, 20) bit=morgan_fps.ToBitString() ngList.append(g) except: print("error",SMILESList) joblib.dump(ngList,graphbin) #standardizing values (this is not necessary for PEDOT-PSS project) and finalize graphs #** standardizing was done at step 1, because graphs made from automatic text parsing have slightly different forms #, and standardizing cannot be done by this code. (i.e., developed for "normal graphs" ) graphbinList1=glob.glob("temporary/*.graphbin") graphbinList2=glob.glob("../convCSVtoGraph/temp/output/*.graphbin") graphbinList=sorted(list(set(graphbinList1)|set(graphbinList2))) print(graphbinList) AutoSC=AutoParameterScaling() AutoSC.initialize(graphbinList) joblib.dump(AutoSC,"output/AutoSC.scaler",compress=3) AutoSC.autoTransform(graphbinList) ``` # check graphs ``` graphbinList=glob.glob("output/*.graphbin") gList=[] for file in tqdm(graphbinList): print(file) temp=joblib.load(file) gList.extend(temp) print(len(gList), " plots") number=0 #draw drawGraph(gList[number]) g=gList[number] nodeVals=[g.nodes[node]["label"] for node in g.nodes] nodeVals ```
github_jupyter
``` %matplotlib inline import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import os import matplotlib.pyplot as plt from torchvision import datasets, transforms from torch.autograd import Variable from __future__ import print_function from PIL import Image from utils import * import sys; sys.argv=['']; del sys ``` ** Sepcify parameters ** ``` # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.005, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--decay', type=float, default=0.00001, metavar='LR', help='learning rate (default: 0.00001)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} ``` ** Define Data Loaders ** 1. Data transformations - Horizontal flipping ``` # Data augmentation and normalization for training # Just normalization for validation data_transforms = { 'train': transforms.Compose([ #transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ #transforms.Scale(256), #transforms.CenterCrop(224), transforms.ToTensor(), #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = '../../Data/Test_1' dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=args.batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']} dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']} dset_classes = dsets['train'].classes ``` ** Visualize Patches ** ``` ## Uncomment below to visualize dataset ###### def imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data inputs, classes = next(iter(dset_loaders['train'])) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out, title=[dset_classes[x] for x in classes]) ``` ** Define Model and Optimizer ** ``` # from resnet import ResNet152 from vgg import * model = VGG('VGG19') arch = 'VGG19' # model = VGG('VGG16') # arch = 'VGG16' # model = ResNet152() # arch = 'RESNET152' if args.cuda: model.cuda() ``` ** Define optimizer ** ``` optimizer = optim.Adadelta(model.parameters()) ``` ** Define Train and Test Functions ** ``` def train(epoch): model.train() train_loss = 0 correct = 0 for batch_idx, (data, target) in enumerate(dset_loaders['train']): if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) loss = F.cross_entropy(output, target) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(dset_loaders['train'].dataset), 100. * batch_idx / len(dset_loaders['train']), loss.data[0]), end='\r') train_loss += loss.data[0] pred = output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(target.data).cpu().sum() train_loss /= len(dset_loaders['train']) print('\nTrain set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( train_loss, correct, len(dset_loaders['train'].dataset), 100. * correct / len(dset_loaders['train'].dataset))) return train_loss, 100. * correct / len(dset_loaders['train'].dataset) def test(epoch): model.eval() test_loss = 0 correct = 0 for data, target in dset_loaders['val']: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) test_loss += F.cross_entropy(output, target).data[0] pred = output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(target.data).cpu().sum() test_loss = test_loss test_loss /= len(dset_loaders['val']) # loss function already averages over batch size print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(dset_loaders['val'].dataset), 100. * correct / len(dset_loaders['val'].dataset))) return test_loss, 100. * correct / len(dset_loaders['val'].dataset) ``` ** Train ** ``` performance = {} history = {} best_test_accuracy = 0.0 path = 'models/'+arch+'_1/' for epoch in range(1, args.epochs+1): adjust_learning_rate(optimizer,args.lr,args.decay,epoch) train_loss,train_accuracy = train(epoch) test_loss, test_accuracy = test(epoch) performance['train_loss'] = train_loss performance['test_loss'] = test_loss performance['train_acc'] = train_accuracy performance['test_acc'] = test_accuracy history = evaluate_model_and_append('./Train_Stats/'+arch+'_1/',performance,history) plot_performance('./Train_Stats/'+arch+'_1/',history) # print(optimizer.param_groups[0]['lr']) is_best = test_accuracy > best_test_accuracy print('test accuracy {} best {} is_best {}'.format(test_accuracy,best_test_accuracy,is_best)) prev_best = best_test_accuracy best_test_accuracy = max(best_test_accuracy, test_accuracy) print('test accuracy {} best {} is_best {}'.format(test_accuracy,best_test_accuracy,is_best)) save_checkpoint({ 'epoch': epoch + 1, 'arch': arch, 'state_dict': model.state_dict(), 'best_prec1': best_test_accuracy, 'optimizer' : optimizer.state_dict(), }, prev_best, is_best, path) ``` ** Load pre-trained model ** ``` model_path = 'models/VGG19_1/model_best_95.64.pkl' print("=> loading checkpoint '{}'".format(model_path)) checkpoint = torch.load(model_path) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) model.eval() optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {}) (accuracy {})" .format(model_path, checkpoint['epoch'], checkpoint['best_prec1'])) ``` ** Evaluate IOU Performance of CNN ** ``` iou = evaluate_test_iou('../../Data/instrument_1_8_training/instrument_dataset_1/left_frames/','../../Data/instrument_1_8_training/instrument_dataset_1/foreground/',model) print(np.mean(iou),np.std(iou)) ``` ** Evaluate on Example image ** ``` box_side = 50 image = np.asarray(Image.open("../../Data/instrument_1_8_testing/instrument_dataset_1/left_frames/frame256.png")) # gt_mask = np.asarray(Image.open("../../Data/instrument_1_8_testing/instrument_dataset_1/foreground/frame239.png")) # gt_mask = (gt_mask>0) seg_mask,seg_img = get_predict(model,image,box_side) plt.figure(figsize=(14, 12)) plt.subplot(131) plt.imshow(image) plt.subplot(132) # plt.imshow(gt_mask,cmap='gray') plt.subplot(133) plt.imshow(seg_mask,cmap='gray') plt.tight_layout() plt.show() ``` ** Implement CRF post processing ** ``` import sys import numpy as np import cv2 import pydensecrf.densecrf as dcrf import matplotlib.pylab as plt from skimage.segmentation import relabel_sequential from pydensecrf.utils import softmax_to_unary from pydensecrf.utils import compute_unary, create_pairwise_bilateral, \ create_pairwise_gaussian def crf_postprocess(image,seg_mask,use_2d): (x,y) = np.nonzero(np.sum(image!=15,axis=2)) top_left_x, top_left_y = x[0],y[0] bottom_right_x, bottom_right_y = x[-1],y[-1] img = image[top_left_x:bottom_right_x,top_left_y:bottom_right_y,:] mask = seg_mask[top_left_x:bottom_right_x,top_left_y:bottom_right_y] # img = image[0].transpose((1,2,0)) labels = relabel_sequential(mask)[0].flatten().astype('uint8') + 1 M = 21 # 21 Classes to match the C++ example # Example using the DenseCRF class and the util functions d = dcrf.DenseCRF(img.shape[0] * img.shape[1], M) # get unary potentials (neg log probability) U = compute_unary(labels, M, GT_PROB=0.5) d.setUnaryEnergy(U) # This creates the color-independent features and then add them to the CRF feats = create_pairwise_gaussian(sdims=(23, 23), shape=img.shape[:2]) d.addPairwiseEnergy(feats, compat=3, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) # This creates the color-dependent features and then add them to the CRF feats = create_pairwise_bilateral(sdims=(100, 100), schan=(5, 5, 5), img=img, chdim=2) d.addPairwiseEnergy(feats, compat=10, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) Q = d.inference(20) map = np.argmax(Q, axis=0).reshape(img.shape[:2]) res = (map.astype('float32') * 255 / map.max() > 0).astype('float32') crf_mask = np.zeros((image.shape[:2])) crf_mask[top_left_x:bottom_right_x,top_left_y:bottom_right_y] = res crf_seg_img = np.multiply(image,np.transpose(np.tile(crf_mask,(3,1,1)),(1,2,0))).astype('uint8') return (crf_mask,crf_seg_img) from timeit import default_timer as timer start = timer() crf_mask,crf_seg_img = crf_postprocess(image,seg_mask,False) end = timer() print('Time taken {}'.format(end - start)) crf_and_cnn_seg_mask = np.multiply(crf_mask,seg_mask) plt.figure(figsize=(14, 12)) plt.subplot(131) plt.imshow(image) plt.subplot(132) plt.imshow(seg_img) plt.subplot(133) plt.imshow(crf_seg_img) plt.tight_layout() plt.show() print('CNN output IOU {}'.format(IOU(gt_mask,seg_mask))) print('CRF output IOU {}'.format(IOU(gt_mask,crf_mask))) print('CNN and CRF output IOU {}'.format(IOU(gt_mask,np.multiply(crf_mask,seg_mask)))) plt.figure(figsize=(14, 12)) plt.subplot(141) plt.imshow(image) plt.subplot(142) # plt.imshow(gt_mask,cmap='gray') plt.subplot(143) plt.imshow(seg_mask,cmap='gray') plt.subplot(144) plt.imshow(crf_mask,cmap='gray') seg_mask.reshape(-1,seg_mask.shape[2]).transpose((1,0)).shape ```
github_jupyter
``` # R code for all interval level models of Analysis II: Driving Style Parameters # Import some packages library(reshape2) library(ggplot2) library(cowplot) library(ez) library(nlme) library(multcomp) library(pastecs) theme_set(theme_cowplot(font_size=8)) library(usdm) library(r2glmm) library(car) library(MASS) library(MCMCglmm) library(lmerTest) library(expss) library(moments) library(MuMIn) library(margins) library(dplyr) # Read signals, extract directions and distance thresholds signals = read.csv("../metrics/all_intervals_all_data.csv") colnames(signals) # Asign direction columns for acceleration and jerk (positive for >= 0 and negative for < 0) signals$pos_latA <- signals$MaxLatAcc >= 0 signals$pos_longA <- signals$MaxLongAcc >= 0 signals$pos_latJ <- signals$MaxLatJerk >= 0 signals$pos_longJ <- signals$MaxLongJerk >= 0 # Thresholds for the presence of a lead vehicle signals$present_stop <- signals$LongDist < 190 signals$present_pass <- signals$LatDist < 190 # Max-valued regressions # column indexes for the response/dependent variables of interest (NumPks, MaxPkAmp, MaxHR -- # MaxEntropy is handled separately due to missing data in signals) response_ids = c(18, 19, 26) responses = colnames(signals)[response_ids] #----------------------------- Models for all main effects -----------------------------# main_max = lapply(responses, function(x) { lme( eval( substitute( resp ~ Time + present_stop + MaxLongAcc*pos_longA + MaxLatAcc*pos_latA + MaxLongJerk*pos_longJ + MaxLatJerk*pos_latJ + present_pass + age + knowledge + trust, # control variables list(resp = as.name(x)) ) ), random = ~1|pid/Trial, correlation = corAR1(0, form=~1|pid/Trial), data = signals, method = "ML" ) }) entropy_main_max = lme( MaxEntropy ~ Time + present_stop + MaxLongAcc*pos_longA + MaxLatAcc*pos_latA + MaxLongJerk*pos_longJ + MaxLatJerk*pos_latJ + present_pass + age + knowledge + trust, random = ~1|pid/Trial, correlation = corAR1(0, form=~1|pid/Trial), data = na.omit(signals), method = "ML" ) #-------------------------- Models for all interaction effects ---------------------------# inter_max = lapply(responses, function(x) { lme( eval( substitute( resp ~ Time + present_stop + MaxLongAcc*pos_longA + MaxLatAcc*pos_latA + MaxLongJerk*pos_longJ + MaxLatJerk*pos_latJ + present_stop*MaxLongAcc*pos_longA + present_stop*MaxLongJerk*pos_longJ + present_pass + age + knowledge + trust, list(resp = as.name(x)) ) ), random = ~1|pid/Trial, correlation = corAR1(0, form=~1|pid/Trial), data = signals, method = "ML" ) }) entropy_inter_max = lme( MaxEntropy ~ Time + present_stop + MaxLongAcc*pos_longA + MaxLatAcc*pos_latA + MaxLongJerk*pos_longJ + MaxLatJerk*pos_latJ + present_stop*MaxLongAcc*pos_longA + present_stop*MaxLongJerk*pos_longJ + present_pass + age + knowledge + trust, random = ~1|pid/Trial, correlation = corAR1(0, form=~1|pid/Trial), data = na.omit(signals), method = "ML" ) # Mean-valued regressions # column indexes for the response/dependent variables of interest (Mean SCL and Mean HR # -- MeanEntropy is handled separately due to missing data in signals response_ids = c(20, 25) responses = colnames(signals)[response_ids] #---------------------------- Models for all main effects -----------------------------# main_mean = lapply(responses, function(x) { lme( eval( substitute( resp ~ Time + present_stop + MeanLongAcc + MeanLatAcc + MeanLongJerk + MeanLatJerk + present_pass + age + knowledge + trust, list(resp = as.name(x)) ) ), random = ~1|pid/Trial, correlation = corAR1(0, form=~1|pid/Trial), data = signals, method = "ML" ) }) entropy_main_mean = lme( MeanEntropy ~ Time + present_stop + MeanLongAcc + MeanLatAcc + MeanLongJerk + MeanLatJerk + present_pass + age + knowledge + trust, random = ~1|pid/Trial, correlation = corAR1(0, form=~1|pid/Trial), data = na.omit(signals), method = "ML" ) #---------------------- Models for all interaction effects --------------------------# inter_mean = lapply(responses, function(x) { lme( eval( substitute( resp ~ Time + present_stop + MeanLongAcc + MeanLatAcc + MeanLongJerk + MeanLatJerk + present_stop:MeanLongAcc + present_stop:MeanLongJerk + present_pass + age + knowledge + trust, list(resp = as.name(x)) ) ), random = ~1|pid/Trial, correlation = corAR1(0, form=~1|pid/Trial), data = signals, method = "ML" ) }) entropy_inter_mean = lme( MeanEntropy ~ Time + present_stop + MeanLongAcc + MeanLatAcc + MeanLongJerk + MeanLatJerk + present_stop:MeanLongAcc + present_stop:MeanLongJerk + present_pass + age + knowledge + trust, random = ~1|pid/Trial, correlation = corAR1(0, form=~1|pid/Trial), data = na.omit(signals), method = "ML" ) # Combine all main effect models and interaction effect models (excluding entropy # which we process separately) main_eff_models = c(main_max, main_mean) inter_eff_models = c(inter_max, inter_mean) response_names = c( "NUMBER OF PEAKS", "MAX PEAK AMPLITUDE", "MAX HR", "MEAN SCL", "MEAN HR" ) model_idx = c(1:5) cat("ALL MAIN EFFECTS\n") cat("-------------------------------------------------------------------------------\n") model_print = function(x, y, z){ cat(y[x]) cat("\n") print(summary(z[[x]])) cat("\n\n\n") cat("============================================================================== \n\n\n") } lapply(model_idx, model_print, y=response_names, z=main_eff_models) cat("MAXIMUM ENTROPY\n") print(summary(entropy_main_max)) cat("\n================================================================================ \n\n\n") cat("MEAN ENTROPY\n") print(summary(entropy_main_mean)) cat("=================================================================================\n") cat("ALL INTERACTION EFFECTS\n") cat("---------------------------------------------------------------------------------\n") model_print = function(x, y, z){ cat(y[x]) cat("\n") print(summary(z[[x]])) cat("\n\n\n") cat("================================================================================ \n\n\n") } lapply(model_idx, model_print, y=response_names, z=inter_eff_models) cat("MAXIMUM ENTROPY\n") print(summary(entropy_inter_max)) cat("\n================================================================================== \n\n\n") cat("MEAN ENTROPY\n") print(summary(entropy_inter_mean)) cat("==================================================================================== \n") ```
github_jupyter
## CIFAR 10 ``` %matplotlib inline %reload_ext autoreload %autoreload 2 from resources.conv_learner import * PATH = Path("datasets/cifar10/") os.makedirs(PATH,exist_ok=True) torch.cuda.set_device(0) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159])) num_workers = num_cpus()//2 bs=256 sz=32 tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomFlip()], pad=sz//8) data = ImageClassifierData.prepare_from_path(PATH, val_name='test', tfms=tfms, bs=bs) def get_data(path: str, sz, bs): create, lbl2index, lbl2index_test = ImageClassifierData.prepare_from_path(path, val_name='val', bs=bs) # main_stats_X = {lbl2index[key][0]: val for key, val in stats.items()} tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomFlip()], pad=sz//8) print('\n class to index mapping:\n',lbl2index) return create(tfms) data = get_data(PATH, sz, bs) x, y = next(iter(data.trn_dl)) x.shape def conv_layer(ni, nf, ks=3, stride=1): return nn.Sequential( nn.Conv2d(ni, nf, kernel_size=ks, bias=False, stride=stride, padding=ks//2), nn.BatchNorm2d(nf, momentum=0.01), nn.LeakyReLU(negative_slope=0.1, inplace=True)) class ResLayer(nn.Module): def __init__(self, ni): super().__init__() self.conv1=conv_layer(ni, ni//2, ks=1) self.conv2=conv_layer(ni//2, ni, ks=3) def forward(self, x): return x.add(self.conv2(self.conv1(x))) class Darknet(nn.Module): def make_group_layer(self, ch_in, num_blocks, stride=1): return [conv_layer(ch_in, ch_in*2,stride=stride) ] + [(ResLayer(ch_in*2)) for i in range(num_blocks)] def __init__(self, num_blocks, num_classes, nf=32): super().__init__() layers = [conv_layer(3, nf, ks=3, stride=1)] for i,nb in enumerate(num_blocks): layers += self.make_group_layer(nf, nb, stride=2-(i==1)) nf *= 2 layers += [nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(nf, num_classes)] self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) m = Darknet([1, 2, 4, 6, 3], num_classes=10, nf=32) # m = nn.DataParallel(m, [1,2,3]) lr = 1.3 learn = ConvLearner.from_model_data(m, data) learn.crit = nn.CrossEntropyLoss() learn.metrics = [accuracy] wd=1e-4 %time learn.fit(lr, 1, wds=wd, cycle_len=30, use_clr_beta=(20, 20, 0.95, 0.85)) %time learn.fit(lr, 1, wds=wd, cycle_len=30, use_clr_beta=(20, 20, 0.95, 0.85)) # DP: m = WideResNet(depth=22, num_classes=10, widen_factor=6, dropRate=0.) learn.fit(lr/10, 1, wds=wd, cycle_len=1, use_clr_beta=(100, 1, 0.9, 0.8)) %time learn.fit(lr, 1, wds=wd, cycle_len=30, use_clr_beta=(20, 20, 0.95, 0.85)) learn.fit(lr/10, 1, wds=wd, cycle_len=1, use_clr_beta=(100, 1, 0.9, 0.8)) %time learn.fit(lr, 1, wds=wd, cycle_len=40, use_clr_beta=(10, 15, 0.95, 0.85)) learn.fit(lr/10, 1, wds=wd, cycle_len=1, use_clr_beta=(100, 1, 0.9, 0.8)) %time learn.fit(1., 1, wds=wd, cycle_len=30, use_clr_beta=(10, 25, 0.95, 0.85)) %time learn.fit(lr, 1, wds=wd, cycle_len=40, use_clr_beta=(100, 15, 0.95, 0.85)) # darknet 2222 lr 1.3 65 cl %time learn.fit(lr, 1, wds=wd, cycle_len=65, use_clr_beta=(30, 20, 0.95, 0.85)) ```
github_jupyter
# Hyperparameter Exploration This notebook in concerned with exploring the hyperparameters associated with the Random Forest regressor. It is _extremely_ computationally intensive so you should only get stuck into this if you have: a) time, and b) an interest in whether I've selected the optimal parameters. ``` # Needed on a Mac import matplotlib as mpl mpl.use('TkAgg') %matplotlib inline import matplotlib.pyplot as plt # For reproducibility import random import numpy as np r_state = 42 random.seed(r_state) np.random.seed(r_state) import os import re import pandas as pd import seaborn as sns import sklearn print('Your scikit-learn version is {}.'.format(sklearn.__version__)) print('Please check it is at least 0.18.0.') from sklearn.preprocessing import scale from sklearn import linear_model from sklearn import tree from sklearn import preprocessing from sklearn import feature_selection from sklearn import model_selection from sklearn import metrics from sklearn import ensemble from sklearn.externals.six import StringIO #from sklearn.model_selection import GridSearchCV #from sklearn.feature_selection import SelectKBest #from sklearn.feature_selection import f_regression from timeit import default_timer as timer import datetime analytical = os.path.join('data','analytical') def load_status_scores(dtype): status = pd.read_csv(os.path.join(analytical,dtype+'-Scores.csv.gz'), index_col=0) # SES scores # Scores status.drop(['RANK_01','RANK_11'], axis=1, inplace=True) status.rename(columns={ 'SES_01':'SES 2001', 'SES_11':'SES 2011', 'SES_ASC':'SES Ascent 2001-2011', 'SES_PR_01':'SES 2001 Percentile', # 99 = High-status 'SES_PR_11':'SES 2011 Percentile', # 99 = High-status 'SES_PR_ASC':'SES Percentile Ascent 2001-2011' }, inplace=True) return status def classifier_report(clf, y_true, y_hat): txt = '' # If the task is regression evaluate using regression metrics, # otherwise evaluate using classification metrics txt += "R2: {0:8.5f}".format(metrics.r2_score(y_true, y_hat)) + "\n" # R2 - Coefficient of determination txt += "MSE: {0:8.5f}".format(metrics.mean_squared_error(y_true, y_hat)) + "\n" # Mean squared error regression loss txt += "MAE: {0:8.5f}".format(metrics.mean_absolute_error(y_true, y_hat)) + "\n" # Mean absolute error regression loss txt += "Expl. Var: {0:8.5f}".format(metrics.explained_variance_score(y_true, y_hat)) + "\n" # Explained variance regression score function txt += "\n" return txt ``` ## Exploring Hyperparameters The code below is concerned with exploring the imapct that different hyperparameter settings can have on performance of the overall prediction. ``` # Take a paramter grid and explore a hyperparameter space # using Cross-Fold Validation... def explore_extr_hyper(params, x_train, y_train): clf = ensemble.ExtraTreesRegressor(n_jobs=-1, random_state=r_state) cv = model_selection.GridSearchCV(estimator=clf, param_grid=params, cv=4, n_jobs=2, return_train_score=True, verbose=1, scoring='neg_mean_absolute_error') cv.fit(x_train, y_train) print("Best score: " + str(cv.best_score_)) print("Best parameters: " + str(cv.best_params_)) best_clf = cv.best_estimator_ # Extract the best estimator from the GridSearch best_clf.fit(x_train, y_train) y_pred = best_clf.predict(X_test) print(classifier_report(best_clf, y_test, y_pred)) return cv # Output the results of a Cross-Validation process # to a data frame. Currently focussed on training and # testing scores. def cv_to_df(cvr): # Extract the parameters from the Cross-Validation object that # we want to track in our results params = cvr.cv_results_['params'] trn_scr = cvr.cv_results_['mean_train_score'] tst_scr = cvr.cv_results_['mean_test_score'] trn_std = cvr.cv_results_['std_train_score'] tst_std = cvr.cv_results_['std_test_score'] rank = cvr.cv_results_['rank_test_score'] # Create a data frame from the numbers df = pd.DataFrame.from_dict({'Training Score':trn_scr, 'Test Score':tst_scr, 'Std. of Training Scores':trn_std, 'Std. of Test Scores':tst_std}) # Add the rank of the result rs = pd.Series(rank, index=df.index) df['rank'] = rs # And now work out how many parameters there # were and create the appropriate columns to # add to the df. Start with named parameters... n_params = cvr.cv_results_['params'][0].keys() # Convert these to arrays that can be assigned # as a new data series to the df. for p in list(n_params): vals = [] for v in cvr.cv_results_['params']: vals.append(v[p]) # Create and assign a new series using # the index from the data frame to avoid # setting-with-copy warnings ps = pd.Series(vals, index=df.index) df[p] = ps return df # Can override to_use here if have already generated data above to_use = 'Untransformed' SES = load_status_scores(to_use) # SES scores in 2011 # Read the transformed data d01_trs2 = pd.read_csv(os.path.join(analytical,to_use+'-2001-Data-Transformed_and_Scaled.csv.gz'), index_col=0) d11_trs2 = pd.read_csv(os.path.join(analytical,to_use+'-2011-Data-Transformed_and_Scaled.csv.gz'), index_col=0) # Data about variables used later in process vardb = pd.read_csv(os.path.join('data','variables.csv'), index_col=False) vardb.drop('Description', axis=1, inplace=True) ``` To evaluate the models most reliably a portion of the dataset must be kept as holdout to evaluate the classifier on independently. The code below splits the data into training and test sets using a test size of 20%. ``` X_train, X_test, y_train, y_test = model_selection.train_test_split( d01_trs2, SES['SES Ascent 2001-2011'], test_size=0.2, random_state=r_state) ``` ### n_estimators This one is a beast since computations pile up as you increase the number of trees. For 400 fits on a MacBook Air I get a total running time of 5:10:40. ``` param_grid = { "n_estimators" : [int(x) for x in np.arange(start=20, stop=2001, step=20)] } start = timer() cv1 = explore_extr_hyper(param_grid, X_train, y_train) duration = timer() - start print("Execution complete in: {0:15.1f}s".format(duration) + " (" + str(datetime.timedelta(seconds=duration)) + ")") cv_to_df(cv1).to_csv(os.path.join(analytical,to_use+'-Scores-n_estimators.csv'), index=False) ``` ### max_depth This appears to take approximately 36 seconds on a MacBook Air. ``` param_grid = { "max_depth" : [int(x) for x in np.arange(start=10, stop=161, step=10)], } start = timer() cv2 = explore_extr_hyper(param_grid, X_train, y_train) duration = timer() - start print("Execution complete in: {0:15.1f}s".format(duration) + " (" + str(datetime.timedelta(seconds=duration)) + ")") cv_to_df(cv2).to_csv(os.path.join(analytical,to_use+'-Scores-max_depth.csv'), index=False) ``` ### min_samples_leaf This is relatively quick since increasing the minimum size of terminal leaves reduces the depth of the trees substantially. It should take approximately 21 seconds on a MacBook Air. ``` param_grid = { "min_samples_leaf" : [int(x) for x in np.arange(start=1, stop=26, step=1)], } start = timer() cv3 = explore_extr_hyper(param_grid, X_train, y_train) duration = timer() - start print("Execution complete in: {0:15.1f}s".format(duration) + " (" + str(datetime.timedelta(seconds=duration)) + ")") # Save results to CSV file cv_to_df(cv3).to_csv(os.path.join(analytical,to_use+'-Scores-min_samples_leaf.csv'), index=False) ``` ### max_features & bootstrap The `max_features` applies limits to how many features each tree can employ as a share of the total number of features (1.0). Bootstrapping should not be necessary with a `k`-folds approach but in some cases can chagne the results. Running this apepars to take about 40 seconds on a MacBook Air. ``` param_grid = { "max_features" : [float(x) for x in np.arange(start=0.1, stop=1.01, step=0.1)], # For regression normally n_features (worth trying after shorter runs) "bootstrap" : [True, False] # Not normally needed for ExtraTrees, but seems to improve performance? } param_grid['max_features'].append('auto') param_grid['max_features'].append('sqrt') start = timer() cv4 = explore_extr_hyper(param_grid, X_train, y_train) duration = timer() - start print("Execution complete in: {0:15.1f}s".format(duration) + " (" + str(datetime.timedelta(seconds=duration)) + ")") # Save results to CSV file cv_to_df(cv4).to_csv(os.path.join(analytical,to_use+'-Scores-max_features_and_bootstrap.csv'), index=False) ``` ## Important Caveat Although this exploration provides a useful overview of how the tuning of different hyperparameters can impact overall performance of the regressor, they _do not act independently of one another_. In other words: this is just exploration to get a 'feel' for the algorithm, and we will actually need to undertake a much, much, much more computationally challenging 'grid search' in [Notebook 8](08-Neighbourhood Prediction.ipynb) (or, I would suggest, [Script 8](08-Neighbourhood Prediction.py)).
github_jupyter
``` # Tensorflow to ONNX conversion is supported through the tf2onnx converter. import onnxmltools import tensorflow as tf import tf2onnx ``` To convert a TensorFlow model, tf2onnx prefers a frozen TensorFlow graph with specified inputs and outputs for the graph, alongside a source checkpoint file. More details on usage can be found in the [tf2onnx](https://github.com/onnx/tensorflow-onnx#usage) repository. ``` python3 -m tf2onnx.convert --input SOURCE_GRAPHDEF_PB --graphdef SOURCE_GRAPHDEF_PB --checkpoint SOURCE_CHECKPOINT --saved-model SOURCE_SAVED_MODEL [--inputs GRAPH_INPUTS] [--outputs GRAPH_OUTPUS] [--inputs-as-nchw inputs_provided_as_nchw] [--target TARGET] [--output TARGET_ONNX_GRAPH] [--target TARGET] [--continue_on_error] [--verbose] [--custom-ops list-of-custom-ops] [--opset OPSET] [--fold_const] ``` ``` # savedmodel is the name of the directory from your tf.saved_model output # model.onnx is the name of your desired output model !python3 -m tf2onnx.convert \ --saved_model savedmodel \ --output model.onnx \ --target rs6 \ --fold_const \ --verbose # model.pb is the name of your checkpoint # graph_inputs are the names of your input nodes # graph_outputs are the names of your output nodes # model.onnx is the name of your desired output model # If you know your graph input / output node names, then you can use the freeze_graph method instead # input and output node names can be discovered through Netron, tensorboard, or the TF summarize_graph tool !python3 -m tf2onnx.convert \ --input model.pb \ --inputs GRAPH_INPUTS \ --outputs GRAPH_OUTPUTS \ --output model.onnx \ --fold_const \ --verbose ``` Alternatively, you could use a model from an active Tensorflow session in Python, freeze the graph, and then convert it, as demonstrated below. ``` # Replace this with your desired input TF model name input_model_name = "tf_model" # Replace this with your desired output ONNX model name output_onnx_model = "model.onnx" with tf.Session() as sess: # Note: this is a simple example Tensorflow model x = tf.placeholder(tf.float32, [2, 3], name="input") x_ = tf.add(x, x) _ = tf.identity(x_, name="output") onnx_graph = tf2onnx.tfonnx.process_tf_graph(sess.graph, input_names=["input:0"], output_names=["output:0"]) onnx_model = onnx_graph.make_model(input_model_name) # Save as protobuf onnxmltools.utils.save_model(onnx_model, output_onnx_model) ```
github_jupyter
## The three card puzzle Suppose we have three cards in a hat: * <span style="color:red">**R**</span><span style="color:blue">**B**</span> - One card is painted <span style="color:blue">blue</span> on one side and <span style="color:red">red</span> on the other. * <span style="color:blue">**BB**</span> - One card is painted <span style="color:blue">blue</span> on both sides. * <span style="color:red">**RR**</span> - One card is painted <span style="color:red">red</span> on both sides. ## The setup * I pick one of the three cards at random, flip it to a random side, and place it on the table. * $U$ be the color of the side of the card facing up. (<span style="color:blue">**B**</span> or <span style="color:red">**R**</span>) ## Do you want to bet? * If the other side of the card has a different I pay you \$1, * If the other side has the same color you pay me \$1. ## Why is this a fair bet ? * Suppose $U$ is <span style="color:red">**R**</span>. * Then the card is either <span style="color:red">**RR**</span> or <span style="color:red">**R**</span><span style="color:blue">**B**</span>. * Therefor the other side can be either <span style="color:red">**R**</span> or <span style="color:blue">**B**</span> * Therefor in this case the odds are equal. * A similar argument holds for the case where $U$ is <span style="color:blue">**B**</span> ## Lets use a monte-carlo simulation The code below selects one of the three cards at random and selects a random side to be "up". It then prints the card and indicates if the two sides have the same or different colors. ``` red_bck="\x1b[41m%s\x1b[0m" blue_bck="\x1b[44m%s\x1b[0m" red=red_bck%'R' black=blue_bck%'B' Cards=[(red,black),(red,red),(black,black)] counts={'same':0,'different':0} from random import random for j in range(50): i=int(random()*3.) # Select a random card side=int(random()*2.) C=Cards[i] if(side==1): # select which side to be "up" C=(C[1],C[0]) same= 'same' if C[0]==C[1] else 'different' # count the number of times the two sides are the same or different. counts[same]+=1 print(''.join(C)+' %-9s'%same, end='') if (j+1)%5==0: print() print() print(counts) ``` ## The simulation does not agree with the argument * In Simulation: the two sides have the same color about **twice** the number of times that they have different color. * you are twice as likely to lose as you are to win. * On average you lose 33 cents per iteration: $\$1\times(2/3)-\$1\times(1/3)$ ## Alternative argument If we pick a card at random 2/3 of the time we pick a card where the two sides have the same color, and only 1/3 where the color is different. ## How can we be sure? * The original argument also sounds convincing, but is wrong. * To be sure that our argument is correct, we need to define some concepts, including **outcome** and **event**. Which we will do next week.
github_jupyter
### Uncertainty Robot motion and sensors have some uncertainty associated with them. For example, imagine a car driving up hill and down hill; the speedometer reading will likely overestimate the speed of the car going up hill and underestimate the speed of the car going down hill because it cannot perfectly account for gravity. Similarly, we cannot perfectly predict the *motion* of a robot. A robot is likely to slightly overshoot or undershoot a target location. --- Before we start analyzing robot motion, let's load in our resources and define the `robot` class. You can see that this class initializes the robot's position and adds measures of uncertainty for motion. ``` # import some resources import numpy as np import matplotlib.pyplot as plt import random %matplotlib inline class robot: # -------- # init: # creates a robot with the specified parameters and initializes # the location (self.x, self.y) to the center of the world # def __init__(self, world_size = 100.0, measurement_range = 30.0, motion_noise = 1.0, measurement_noise = 1.0): self.measurement_noise = 0.0 self.world_size = world_size self.measurement_range = measurement_range self.x = world_size / 2.0 self.y = world_size / 2.0 self.motion_noise = motion_noise self.measurement_noise = measurement_noise self.landmarks = [] self.num_landmarks = 0 # returns a positive, random float def rand(self): return random.random() * 2.0 - 1.0 # -------- # move: attempts to move robot by dx, dy. If outside world # boundary, then the move does nothing and instead returns failure # def move(self, dx, dy): x = self.x + dx + self.rand() * self.motion_noise y = self.y + dy + self.rand() * self.motion_noise if x < 0.0 or x > self.world_size or y < 0.0 or y > self.world_size: return False else: self.x = x self.y = y return True # -------- # sense: returns x- and y- distances to landmarks within visibility range # because not all landmarks may be in this range, the list of measurements # is of variable length. Set measurement_range to -1 if you want all # landmarks to be visible at all times # def sense(self): ''' This function does not take in any parameters, instead it references internal variables (such as self.landmarks) to measure the distance between the robot and any landmarks that the robot can see (that are within its measurement range). This function returns a list of landmark indices, and the measured distances (dx, dy) between the robot's position and said landmarks. This function should account for measurement_noise and measurement_range. One item in the returned list should be in the form: [landmark_index, dx, dy]. ''' measurements = [] ## TODO: For each landmark ## 1. compute dx and dy, the distances between the robot and the landmark ## 2. account for measurement noise by adding a noise component to dx and dy ## TODO: return the final, complete list of measurements for i, landmark in enumerate(self.landmarks): dx = landmark[0] - self.x + self.rand() * self.measurement_noise dy = landmark[1] - self.y + self.rand() * self.measurement_noise # checking if measurement in range if abs(dx) <= self.measurement_range and abs(dy) <= self.measurement_range: measurements.append([i,dx,dy]) return measurements # -------- # make_landmarks: # make random landmarks located in the world # def make_landmarks(self, num_landmarks): self.landmarks = [] for i in range(num_landmarks): self.landmarks.append([round(random.random() * self.world_size), round(random.random() * self.world_size)]) self.num_landmarks = num_landmarks # called when print(robot) is called; prints the robot's location def __repr__(self): return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y) ``` ## Defining a world and a robot Instantiate a robot object. As you can see in `__init__` above, the robot class takes in a number of parameters including a world size and some values that indicate the sensing and movement capabilities of the robot. In the next example, we define a small 10x10 square world, a measurement range that is half that of the world and small values for motion and measurement noise. These values will typically be about 10 times larger, but we just want to demonstrate this behavior on a small scale. ``` #Fiddle around with these values world_size = 10.0 # size of world (square) measurement_range = 5.0 # range at which we can sense landmarks motion_noise = 0.2 # noise in robot motion measurement_noise = 0.2 # noise in the measurements # instantiate a robot, r r = robot(world_size, measurement_range, motion_noise, measurement_noise) # print out the location of r print(r) ``` ## Visualizing the World In the given example, we can see/print out that the robot is in the middle of the 10x10 world at (x, y) = (5.0, 5.0). In the next cell a helper visualization function is provided: `display_world`, that will display a grid world in a plot and draw a red `o` at the location of our robot, `r`. The details of how this function wors can be found in the `helpers.py` file in the home directory. ``` # import helper function from helpers import display_world # define figure size plt.rcParams["figure.figsize"] = (5,5) # call display_world and display the robot in it's grid world print(r) display_world(int(world_size), [r.x, r.y]) ``` ## Movement Let's call the robot's `move` function. We'll ask it to move some distance `(dx, dy)` and we'll see that this motion is not perfect by the placement of our robot `o` and by the printed out position of `r`. Try changing the values of `dx` and `dy` and/or running this cell multiple times; see how the robot moves and how the uncertainty in robot motion accumulates over multiple movements. ``` # choose values of dx and dy (negative works, too) dx = 1 dy = 2 r.move(dx, dy) # print out the exact location print(r) # display the world after movement, not that this is the same call as before # the robot tracks its own movement display_world(int(world_size), [r.x, r.y]) ``` ## Landmarks Next, let's create landmarks, which are measurable features in the map. You can think of landmarks as things like notable buildings, or something smaller such as a tree, rock, or other feature. The robot class has a function `make_landmarks` which randomly generates locations for the number of specified landmarks. Try changing `num_landmarks` or running this cell multiple times to see where these landmarks appear. We have to pass these locations as a third argument to the `display_world` function and the list of landmark locations is accessed similar to how we find the robot position `r.landmarks`. Each landmark is displayed as a purple `x` in the grid world, and we also print out the exact `[x, y]` locations of these landmarks at the end of this cell. ``` # create any number of landmarks num_landmarks = 3 r.make_landmarks(num_landmarks) # print out our robot's exact location print(r) # display the world including these landmarks display_world(int(world_size), [r.x, r.y], r.landmarks) # print the locations of the landmarks print('Landmark locations [x,y]: ', r.landmarks) ``` ## Sense Once we have some landmarks to sense, we need to be able to tell our robot to *try* to sense how far they are away from it. The `sense` function uses only internal class parameters and returns a list of the the measured/sensed x and y distances to the landmarks it senses within the specified `measurement_range`. ``` # try to sense any surrounding landmarks measurements = r.sense() # this will print out an empty list if `sense` has not been implemented print(measurements) ``` --- ## Data #### Putting it all together To perform SLAM, we'll collect a series of robot sensor measurements and motions, in that order, over a defined period of time. Then we'll use only this data to re-construct the map of the world with the robot and landmar locations. In the next notebook, you'll see this list of movements and measurements listed in a structure called `data`. This is an array that holds sensor measurements and movements in a specific order, which will be useful to call upon when you have to extract this data and form constraint matrices and vectors. `data` is constructed over a series of time steps as follows: ``` data = [] # after a robot first senses, then moves (one time step) # that data is appended like so: data.append([measurements, [dx, dy]]) # for our example movement and measurement print(data) # in this example, we have only created one time step (0) time_step = 0 # so you can access robot measurements: print('Measurements: ', data[time_step][0]) # and its motion for a given time step: print('Motion: ', data[time_step][1]) ```
github_jupyter
``` ######################################################################## # File : CZI-ZARR Save Dask Array.ipynb # Version : 0.1 # Author : czsrh # Date : 12.11.2019 # Insitution : Carl Zeiss Microscopy GmbH # # Disclaimer: Just for testing - Use at your own risk. # Feedback or Improvements are welcome. ######################################################################## ``` This notebook was mainly inspired by the following blogposts: [Load Large Image Data with Dask Array](https://blog.dask.org/2019/06/20/load-image-data) [Introducing napari: a fast n-dimensional image viewer in Python](https://ilovesymposia.com/2019/10/24/introducing-napari-a-fast-n-dimensional-image-viewer-in-python) ``` # this can be used to switch on/off warnings import warnings warnings.filterwarnings('ignore') warnings.simplefilter('ignore') # import the libraries mentioned above from apeer_ometiff_library import io, processing, omexmlClass import czifile as zis import xmltodict import os import time import numpy as np import ipywidgets as widgets import napari import imgfileutils as imf import xml.etree.ElementTree as ET import zarr import dask import dask.array as da import glob # the directory contains 96 scenes of a wellplate as individual CZI files # which where created by SplitScenesWriteFiles # get list of all filenames #filenames = glob.glob(r'c:\Users\m1srh\Documents\Testdata_Zeiss\Castor\EMBL\96well\testwell96_Single_CZI\*.czi') filenames = glob.glob(r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/wellplate/single_czi/*.czi') # show number of files len(filenames) def get_czi_array(filename): # get the array and the metadata array, metadata = imf.get_array_czi(filename) return array metadata, add_metadata = imf.get_metadata_czi(filenames[0]) # get the required shape of the resulting array - assumption here is that all scenes have the same shape array_shape = metadata['Shape'][:-1] # get the required pixel type for such an array array_dtype = metadata['NumPy.dtype'] print(array_shape) print(array_dtype) # find the indes for the Scenes dimensions from the dimstring dims_dict, dimindex_list, numvalid_dims = imf.get_dimorder(metadata['Axes']) dims_dict['S'] # lazy reading lazy_arrays = [dask.delayed(get_czi_array)(fn) for fn in filenames] lazy_arrays = [da.from_delayed(x, shape=array_shape, dtype=array_dtype) for x in lazy_arrays] # look at a singe array lazy_arrays[0] # concatenate first n array - in this case along the scenes dimension full_array = da.concatenate(lazy_arrays[:], axis=dims_dict['S']) # show full dask array full_array use_compression = False # construct new filename for dask array zarr_arrayname = os.path.join( os.path.dirname(filenames[0]), 'testwell96.zarr') print('Try to save to : ', zarr_arrayname) # save to ZARR array if not already existing if os.path.exists(zarr_arrayname): print('Dask Array already exits. Do not overwrite.') if not os.path.exists(zarr_arrayname): print('Saving ZARR Array to : ', zarr_arrayname) # write data to disk using dask array if use_compression: from numcodecs import Blosc # save with compression full_array.to_zarr(zarr_arrayname, compressor=Blosc(cname='zstd', clevel=3, shuffle=Blosc.BITSHUFFLE)) if not use_compression: # just use the "simple save" method full_array.to_zarr(zarr_arrayname) # read image back from ZARR array zarr_image = da.from_zarr(zarr_arrayname) print('Array Type : ', type(zarr_image)) print('Array Shape : ', zarr_image.shape) # switch to qt5 backend for napari viewer and wait a few seconds %gui qt5 time.sleep(5) # initialize Napari Viewer and add the two channels as layes viewer = napari.Viewer() viewer.add_image(zarr_image[:, :, 0, :, :], name='A568', colormap='red', blending='additive') viewer.add_image(zarr_image[:, :, 1, :, :], name='A488', colormap='green', blending='additive') ``` jupyter nbconvert CZI-ZARR Save Dask Array.ipynb --to slides --post serve
github_jupyter
``` import seaborn as sns import pandas as pd import numpy as np import os loc = '/checkpoint/koustuvs/compositionality/' step = 'step_2' dir_loc = os.path.join(loc, step) s1_train = open(os.path.join(dir_loc, 's1.train')).readlines() s2_train = open(os.path.join(dir_loc, 's2.train')).readlines() s1_dev = open(os.path.join(dir_loc, 's1.dev')).readlines() s2_dev = open(os.path.join(dir_loc, 's2.dev')).readlines() s1_test = open(os.path.join(dir_loc, 's1.test')).readlines() s2_test = open(os.path.join(dir_loc, 's2.test')).readlines() train_words = [] train_sent = s1_train + s2_train for sent in train_sent: ws = sent.rstrip().split(' ') noun_verb = [w for w in ws if ('N' in w) or ('V' in w)] train_words.extend(noun_verb) train_words = list(set(train_words)) dev_words = [] dev_sent = s1_dev + s2_dev for sent in dev_sent: ws = sent.rstrip().split(' ') noun_verb = [w for w in ws if ('N' in w) or ('V' in w)] dev_words.extend(noun_verb) dev_words = list(set(dev_words)) set(train_words) - set(dev_words) test_words = [] test_sent = s1_test + s2_test for sent in test_sent: ws = sent.rstrip().split(' ') noun_verb = [w for w in ws if ('N' in w) or ('V' in w)] test_words.extend(noun_verb) test_words = list(set(test_words)) len(test_words) len(train_words) with open('/checkpoint/koustuvs/compositionality/train_words.txt','w') as fp: for word in train_words: fp.write(word + '\n') len(test_words) with open('/checkpoint/koustuvs/compositionality/test_words.txt','w') as fp: for word in test_words: fp.write(word + '\n') import torch len(train_words) len(test_words) len(step_0_w) from sklearn.manifold import TSNE import matplotlib.pyplot as plt %matplotlib inline from sklearn.decomposition import PCA def reduce_dim(vocab_dict, train_words, test_words, mode='all',alg='tsne'): labels = [] tokens = [] for word in vocab_dict: if mode == 'train': words = train_words elif mode == 'test': words = test_words else: words = train_words + test_words #if word in words: tokens.append(vocab_dict[word]) if word in train_words: labels.append(word + '_t') else: labels.append(word + '_s') if alg == 'tsne': model = TSNE(perplexity=30, n_components=2, init='pca', n_iter=3500, random_state=23) new_values = model.fit_transform(tokens) else: model = PCA(n_components=2, whiten=True) new_values = model.fit(tokens).transform(tokens) x = [] y = [] for value in new_values: x.append(value[0]) y.append(value[1]) return x,y, labels step = 'step_0' wloc = '/checkpoint/koustuvs/compositionality/outputs/exp_seed_99/' step_0_w = torch.load(wloc + step + '/InnerAttentionMILAEncoder_model.pkl.wordvec') step_0_w['N0.0'].shape x_0,y_0,labels_0 = reduce_dim(step_0_w, random.sample(train_words, len(test_words)), test_words) df = pd.DataFrame({'one': x_0, 'two': y_0, 'Labels': ['Novel' if t.split('_')[-1] == 's' else 'Train' for t in labels_0]}) plt.figure(figsize=(16,10)) ax = plt.subplot() for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontname('Arial') label.set_fontsize(16) label.set_fontweight('semibold') plt.ticklabel_format(style='sci', axis='x', scilimits=(0,10)) ax.xaxis.get_offset_text().set_fontsize(16) axis_font = {'fontname':'Arial', 'size':'5'} sns.scatterplot( x="one", y="two", hue="Labels", palette=sns.color_palette("colorblind", 2), data=df, legend="full", alpha=1, style="Labels", s=80, markers=['o','P'] ) ax.legend(loc="upper right", prop={'size' : 18}) step = 'step_2' wloc = '/checkpoint/koustuvs/compositionality/outputs/exp_seed_99/' step_2_w = torch.load(wloc + step + '/InnerAttentionMILAEncoder_model.pkl.wordvec') x_2,y_2,labels_2 = reduce_dim(step_2_w, random.sample(train_words, len(test_words)), test_words) df = pd.DataFrame({'one': x_2, 'two': y_2, 'Labels': ['Novel' if t.split('_')[-1] == 's' else 'Train' for t in labels_2]}) plt.figure(figsize=(16,10)) ax = plt.subplot() for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontname('Arial') label.set_fontsize(16) label.set_fontweight('semibold') plt.ticklabel_format(style='sci', axis='x', scilimits=(0,10)) ax.xaxis.get_offset_text().set_fontsize(16) axis_font = {'fontname':'Arial', 'size':'5'} sns.scatterplot( x="one", y="two", hue="Labels", palette=sns.color_palette("colorblind", 2), data=df, legend="full", alpha=1, style="Labels", markers=['o','P'], s=80 ) ax.legend(loc="upper right", prop={'size' : 18}) sns.set_context('paper') train_mean = np.mean([w for k,w in step_0_w.items() if k in train_words],axis=1) test_mean = np.mean([w for k,w in step_0_w.items() if k in test_words],axis=1) mean.shape from sklearn.metrics.pairwise import cosine_similarity np.linalg.norm(step_0_w['N97.1']) import random from tqdm.notebook import tqdm def unit_vector(vector): """ Returns the unit vector of the vector. """ return vector / np.linalg.norm(vector) def angle_between(v1, v2): """ Returns the angle in radians between vectors 'v1' and 'v2':: >>> angle_between((1, 0, 0), (0, 1, 0)) 1.5707963267948966 >>> angle_between((1, 0, 0), (1, 0, 0)) 0.0 >>> angle_between((1, 0, 0), (-1, 0, 0)) 3.141592653589793 """ v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) xax = np.zeros(300) xax[0]=1 def get_angle(v): return np.dot(v,xax) / (np.linalg.norm(v) * np.linalg.norm(xax)) np.linalg.norm(xax) tms = 10000 step_dict = step_2_w # sample the trained words dst = [] da = [] pb = tqdm(total=tms) step_train_words = list(set(train_words).intersection(set(step_dict.keys()))) step_test_words = list(set(test_words).intersection(set(step_dict.keys()))) print("Train words : {}".format(len(step_train_words))) print("Test words : {}".format(len(step_test_words))) k = len(step_test_words) for l in range(tms): sample_train = [random.choice(step_train_words) for i in range(k)] norms = [np.linalg.norm(step_dict[r]) for r in sample_train] angles = [angle_between(step_dict[r], xax) for r in sample_train] dst.append(np.mean(norms)) da.append(np.mean(angles)) pb.update(1) pb.close() novel_dst = np.mean([np.linalg.norm(step_dict[r]) for r in step_test_words]) novel_da = np.mean([angle_between(step_dict[r], xax) for r in step_test_words]) # calculate mean(r_i) > mean(test) r_len = len([1 for p in dst if p > novel_dst]) r_ang = len([1 for p in da if p > novel_da]) p_len = (r_len + 1) / tms p_ang = (r_ang + 1) / tms len(da) p_len p_ang p_len p_ang da[0] novel_da p_ang p_len novel_da p_len r_len len(test_words) p_len = (r_len) / tms p_ang = (r_ang ) / tms dst = np.array(dst) dst = np.mean(dst, axis=0) len(dst) len(train_words) len(test_words) # novel_dst = [np.linalg.norm(step_0_w[r]) for r in test_words] novel_da = [angle_between(step_0_w[r], xax) for r in test_words] train_da = [angle_between(step_0_w[r], xax) for r in train_words] from scipy import stats def ttest(dista, distb): s = np.std(dista + distb) t = (np.mean(dista) - np.mean(distb)) / (s * np.sqrt(1/k)) df = 2*k - 2 p = 1 - stats.t.cdf(t,df=df) return t,p ttest(da, [angle_between(step_0_w[r], xax) for r in test_words]) ttest(dst, novel_dst) ttest(da, novel_da) len(dst) t from scipy import stats df = 2*k - 2 p = 1 - stats.t.cdf(t,df=df) p get_angle(step_0_w['N0.0']) angle_between(step_0_w['N0.0'], xax) p_len ### compute similarity scores between pairs of vectors from train and test from scipy.spatial.distance import cosine, euclidean cos_sim = [] euc_dist = [] step_dict = step_2_w # sample the trained words step_train_words = list(set(train_words).intersection(set(step_dict.keys()))) step_test_words = list(set(test_words).intersection(set(step_dict.keys()))) pb = tqdm(total=len(step_train_words)) for train_word in step_train_words: for test_word in step_test_words: cos_sim.append(cosine(step_dict[train_word],step_dict[test_word])) euc_dist.append(euclidean(step_dict[train_word],step_dict[test_word])) pb.update(1) len(step_0_w['all']) np.mean(cos_sim) np.std(cos_sim) np.mean(euc_dist) np.std(euc_dist) ```
github_jupyter
## Perform standard imports ``` import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from torchvision import datasets, transforms from torchvision.utils import make_grid import numpy as np import pandas as pd from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt %matplotlib inline transform = transforms.ToTensor() train_data = datasets.MNIST(root='../Data', train=True, download=True, transform=transform) test_data = datasets.MNIST(root='../Data', train=False, download=True, transform=transform) train_data test_data ``` ### Create loaders ``` train_loader = DataLoader(train_data, batch_size=10, shuffle=True) test_loader = DataLoader(test_data, batch_size=10, shuffle=False) ``` ## Define a convolutional model ``` # Define layers conv1 = nn.Conv2d(1, 6, 3, 1) conv2 = nn.Conv2d(6, 16, 3, 1) # Grab the first MNIST record for i, (X_train, y_train) in enumerate(train_data): break # Create a rank-4 tensor to be passed into the model # (train_loader will have done this already) x = X_train.view(1,1,28,28) print(x.shape) # Perform the first convolution/activation x = F.relu(conv1(x)) print(x.shape) # Run the first pooling layer x = F.max_pool2d(x, 2, 2) print(x.shape) # Perform the second convolution/activation x = F.relu(conv2(x)) print(x.shape) # Run the second pooling layer x = F.max_pool2d(x, 2, 2) print(x.shape) # Flatten the data x = x.view(-1, 5*5*16) print(x.shape) class ConvolutionalNetwork(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 6, 3, 1) self.conv2 = nn.Conv2d(6, 16, 3, 1) self.fc1 = nn.Linear(5*5*16, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84,10) def forward(self, X): X = F.relu(self.conv1(X)) X = F.max_pool2d(X, 2, 2) X = F.relu(self.conv2(X)) X = F.max_pool2d(X, 2, 2) X = X.view(-1, 5*5*16) X = F.relu(self.fc1(X)) X = F.relu(self.fc2(X)) X = self.fc3(X) return F.log_softmax(X, dim=1) torch.manual_seed(42) model = ConvolutionalNetwork() model def count_parameters(model): params = [p.numel() for p in model.parameters() if p.requires_grad] for item in params: print(f'{item:>6}') print(f'______\n{sum(params):>6}') count_parameters(model) ``` ## Define loss function & optimizer ``` criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) ``` ## Train the model ``` import time start_time = time.time() epochs = 5 train_losses = [] test_losses = [] train_correct = [] test_correct = [] for i in range(epochs): trn_corr = 0 tst_corr = 0 # Run the training batches for b, (X_train, y_train) in enumerate(train_loader): b+=1 # Apply the model y_pred = model(X_train) # we don't flatten X-train here loss = criterion(y_pred, y_train) # Tally the number of correct predictions predicted = torch.max(y_pred.data, 1)[1] batch_corr = (predicted == y_train).sum() trn_corr += batch_corr # Update parameters optimizer.zero_grad() loss.backward() optimizer.step() # Print interim results if b%600 == 0: print(f'epoch: {i:2} batch: {b:4} [{10*b:6}/60000] loss: {loss.item():10.8f} \ accuracy: {trn_corr.item()*100/(10*b):7.3f}%') train_losses.append(loss) train_correct.append(trn_corr) # Run the testing batches with torch.no_grad(): for b, (X_test, y_test) in enumerate(test_loader): # Apply the model y_val = model(X_test) # Tally the number of correct predictions predicted = torch.max(y_val.data, 1)[1] tst_corr += (predicted == y_test).sum() loss = criterion(y_val, y_test) test_losses.append(loss) test_correct.append(tst_corr) print(f'\nDuration: {time.time() - start_time:.0f} seconds') # print the time elapsed ``` ## Plot the loss and accuracy comparisons ``` plt.plot(train_losses, label='training loss') plt.plot(test_losses, label='validation loss') plt.title('Loss at the end of each epoch') plt.legend(); test_losses plt.plot([t/600 for t in train_correct], label='training accuracy') plt.plot([t/100 for t in test_correct], label='validation accuracy') plt.title('Accuracy at the end of each epoch') plt.legend(); ``` ## Evaluate Test Data ``` # Extract the data all at once, not in batches test_load_all = DataLoader(test_data, batch_size=10000, shuffle=False) with torch.no_grad(): correct = 0 for X_test, y_test in test_load_all: y_val = model(X_test) # we don't flatten the data this time predicted = torch.max(y_val,1)[1] correct += (predicted == y_test).sum() print(f'Test accuracy: {correct.item()}/{len(test_data)} = {correct.item()*100/(len(test_data)):7.3f}%') ``` Recall that our [784,120,84,10] ANN returned an accuracy of 97.25% after 10 epochs. And it used 105,214 parameters to our current 60,074. ## Display the confusion matrix ``` # print a row of values for reference np.set_printoptions(formatter=dict(int=lambda x: f'{x:4}')) print(np.arange(10).reshape(1,10)) print() # print the confusion matrix print(confusion_matrix(predicted.view(-1), y_test.view(-1))) ``` ## Examine the misses ``` misses = np.array([]) for i in range(len(predicted.view(-1))): if predicted[i] != y_test[i]: misses = np.append(misses,i).astype('int64') # Display the number of misses len(misses) # Display the first 10 index positions misses[:10] # Set up an iterator to feed batched rows r = 12 # row size row = iter(np.array_split(misses,len(misses)//r+1)) nextrow = next(row) print("Index:", nextrow) print("Label:", y_test.index_select(0,torch.tensor(nextrow)).numpy()) print("Guess:", predicted.index_select(0,torch.tensor(nextrow)).numpy()) images = X_test.index_select(0,torch.tensor(nextrow)) im = make_grid(images, nrow=r) plt.figure(figsize=(10,4)) plt.imshow(np.transpose(im.numpy(), (1, 2, 0))); ``` ## Run a new image through the model ``` x = 2019 plt.figure(figsize=(1,1)) plt.imshow(test_data[x][0].reshape((28,28)), cmap="gist_yarg"); model.eval() with torch.no_grad(): new_pred = model(test_data[x][0].view(1,1,28,28)).argmax() print("Predicted value:",new_pred.item()) ```
github_jupyter
``` %matplotlib inline import matplotlib import seaborn as sns import matplotlib.pyplot as plt import pandas as pd import re %load_ext autoreload %autoreload 2 import sys sys.path.append("../..") sys.path.append("../") sys.path.append("./") from src.data_pipeline.DataLoader import DataLoader from src.utility.sys_utils import get_spark # import result checking tools from src.utility.Summary import Summary spark = get_spark(cores=4) # change cores up to 6 if needed dataloader = DataLoader(dataset_name="user_10_item_1_exp", config_name="default_config.json") ``` ### Visualization of Evaluation Metric to Recommendation Size ``` summary = Summary(dataloader.get_config().db_path) summary hyper = summary.get_optimal_params("user_10_item_1_exp", "surprise_SVD", "ndcg@1") result = summary.get_result_for_params("user_10_item_1_exp", "surprise_SVD", hyper, "ndcg@1") result ``` ### Read ndcg top k info from parsed csv ``` parsed = pd.read_csv("../parse_results/results/topk_results.csv", sep = ',') parsed.head() parsed['model'].unique() parsed[parsed['model']=='BPR'].head() modelname = parsed['model'].unique() modelname ## plot ndcg@k for all models def plot_topk_ndcg(dataframe): """ Input: a tidy dataframe with topk for all models Output: a ndcg@k plot """ ## add more if needed color = ['black','m','goldenrod', 'wheat', 'c', 'brown', 'slateblue', 'skyblue', 'yellowgreen', 'tomato', 'darkorange', 'lavender'] marker = ['P', '^' ,'o','H', 'v', 'D', 'X', 'p', 'x','*'] ## get model names (number of line) dfs = dict(tuple(dataframe.groupby('model'))) modelname = dataframe['model'].unique() ## size of template size = len(modelname) colorsize = len(color) markersize = len(marker) tempsize = min(colorsize, markersize) ## plotting fig, ax = plt.subplots(1,1, figsize = (10,6)) t = 0 ## default template index for i in range(size): t = i if t > tempsize: ## reset t = 0 ax.plot(dfs[modelname[i]]['k'], dfs[modelname[i]]['value'], marker=marker[t], color=color[t], linewidth=2, label = modelname[i]) t =+ 1 ax.legend() ax.set_xlabel("k", fontsize = 18) ax.set_ylabel('ndcg@k', fontsize = 18) ax.xaxis.set_tick_params(labelsize = 15) ax.yaxis.set_tick_params(labelsize = 15) ax.legend(bbox_to_anchor=(0.5,1.15), loc="upper center", ncol=5) plt.tight_layout() plt.savefig('figs/ndcg@k.eps', format='eps') return ax plot_topk_ndcg(parsed) ```
github_jupyter
``` #Import necessary libraries/packages import numpy as np import pandas as pd from scipy.io import arff #import glob import matplotlib.pyplot as plt %matplotlib inline #from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, normalize #from sklearn.linear_model import LogisticRegression from sklearn.ensemble import IsolationForest from sklearn.svm import OneClassSVM from sklearn.neighbors import LocalOutlierFactor #from sklearn.metrics import mean_absolute_error from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score #from sklearn.metrics import average_precision_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import roc_auc_score #from sklearn.metrics import roc_auc from sklearn.metrics import auc ### Suppresses Warning import warnings warnings.filterwarnings('ignore') #Metrics for Isolation Forest with just one file # load the train data through arff under scipy data_train, meta_train = arff.loadarff('HeartbeatDimension1_TRAIN.arff') # Put the array in dataframe format with help of panda df_train=pd.DataFrame(data_train) print(df_train) # Extract attributes/features and output X_train = df_train.iloc[:,: -1].values # Extract output and encode it- 1 for normal and 0 for abnormal y_train = LabelEncoder().fit_transform(df_train.iloc[:,-1]) print(y_train) y_train = np.where(y_train==0,-1, y_train) print(y_train) # Rows other than outlier rows normal=y_train ==1 print(normal) # load the test dataset # Normal Train data after removing outliers X_train, y_train=X_train[normal, :], y_train[normal] data_test, meta_test = arff.loadarff('HeartbeatDimension1_TEST.arff') df_test=pd.DataFrame(data_test) X_test = df_test.iloc[:,: -1].values y_test = LabelEncoder().fit_transform(df_test.iloc[:,-1]) y_test = np.where(y_test==0,-1, y_test) # Isolation Forest # fit the model isf=IsolationForest(contamination=0.1) isf.fit(X_train) #predict values y_pred_test_isf=isf.fit_predict(X_test) y_status_test_isf=isf.predict(X_test) print(isf.decision_function(X_test)) # calculate metrics and store print(accuracy_score(y_test, y_pred_test_isf)) print(precision_score(y_test, y_pred_test_isf)) print(recall_score(y_test, y_pred_test_isf)) print(roc_auc_score(y_test, y_pred_test_isf)) print(confusion_matrix (y_test, y_pred_test_isf)) # Comparative performance on True and Predicted labels- Plottting fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130) ax.plot(y_test) ax.scatter(range(len(y_test)), y_test, s=13, label = 'True Label') ax.plot(y_pred_test_isf) ax.scatter(range(len(y_pred_test_isf)), y_pred_test_isf, s=13, label = 'Predicted Label') ax.set_title('True vs Predicted(Isolation Forest) Label') ax.set_xlabel('Data Points') ax.set_ylabel('Label') ax.legend() plt.show() # Metrics for Local Outlier Factor with just one file # load the train data through arff under scipy data_train, meta_train = arff.loadarff('HeartbeatDimension1_TRAIN.arff') # Put the array in dataframe format with help of panda df_train=pd.DataFrame(data_train) #print(df_train) # Extract attributes/features and output X_train = df_train.iloc[:,: -1].values # Extract output and encode it- 1 for normal and 0 for abnormal y_train = LabelEncoder().fit_transform(df_train.iloc[:,-1]) #print(y_train) y_train = np.where(y_train==0,-1, y_train) print(y_train) # Rows other than outlier rows normal=y_train ==1 print(normal) # load the test dataset # Normal Train data after removing outliers X_train, y_train=X_train[normal, :], y_train[normal] #print(X_train.shape) print(y_train) data_test, meta_test = arff.loadarff('HeartbeatDimension1_TEST.arff') df_test=pd.DataFrame(data_test) X_test = df_test.iloc[:,: -1].values y_test = LabelEncoder().fit_transform(df_test.iloc[:,-1]) #print(y_test) y_test = np.where(y_test==0,-1, y_test) print(y_test) # Local Outlier Factor # fit the model lof=LocalOutlierFactor(novelty=False) isf.fit(X_train) #predict values y_pred_test_lof=lof.fit_predict(X_test) print(y_test, y_pred_test_lof) # calculate metrics and store print(accuracy_score(y_test, y_pred_test_lof)) print(precision_score(y_test, y_pred_test_lof)) print(recall_score(y_test, y_pred_test_lof)) print(roc_auc_score(y_test, y_pred_test_lof)) print(confusion_matrix (y_test, y_pred_test_lof)) # Comparative performance on True and Predicted labels- Plottting fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130) ax.plot(y_test) ax.scatter(range(len(y_test)), y_test, s=13, label = 'True Label') ax.plot(y_pred_test_lof) ax.scatter(range(len(y_pred_test_lof)), y_pred_test_lof, s=13, label = 'Predicted Label') ax.set_title('True vs Predicted(Local Outlier factor) Label') ax.set_xlabel('Data Points') ax.set_ylabel('Label') ax.legend() plt.show() #Metrics for OneClassSVM with just one file # load the train data through arff under scipy data_train, meta_train = arff.loadarff('HeartbeatDimension1_TRAIN.arff') # Put the array in dataframe format with help of panda df_train=pd.DataFrame(data_train) #print(df_train) # Extract attributes/features and output X_train = df_train.iloc[:,: -1].values # Extract output and encode it- 1 for normal and 0 for abnormal y_train = LabelEncoder().fit_transform(df_train.iloc[:,-1]) #print(y_train) y_train = np.where(y_train==0,-1, y_train) print(y_train) # Rows other than outlier rows normal=y_train ==1 print(normal) # load the test dataset # Normal Train data after removing outliers X_train, y_train=X_train[normal, :], y_train[normal] #print(X_train.shape) print(y_train) data_test, meta_test = arff.loadarff('HeartbeatDimension1_TEST.arff') df_test=pd.DataFrame(data_test) X_test = df_test.iloc[:,: -1].values y_test = LabelEncoder().fit_transform(df_test.iloc[:,-1]) #print(y_test) y_test = np.where(y_test==0,-1, y_test) print(y_test) #OneClassSVM Model # fit the model svm=OneClassSVM(kernel='rbf') svm.fit(X_train) #predict values y_pred_test_svm=svm.fit_predict(X_test) # calculate metrics and store print(accuracy_score(y_test, y_pred_test_svm)) print(precision_score(y_test, y_pred_test_svm)) print(recall_score(y_test, y_pred_test_svm)) print(roc_auc_score(y_test, y_pred_test_svm)) print(confusion_matrix (y_test, y_pred_test_svm)) # Comparative performance on True and Predicted labels- Plottting fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130) ax.plot(y_test) ax.scatter(range(len(y_test)), y_test, s=13, label = 'True Label') ax.plot(y_pred_test_svm) ax.scatter(range(len(y_pred_test_svm)), y_pred_test_svm, s=13, label = 'Predicted Label') ax.set_title('True vs Predicted (OneClassSVM) Label') ax.set_xlabel('Data Points') ax.set_ylabel('Label') ax.legend() plt.show() ''' #Calculating the Metrics for three Outlier Models, namely ISOLATION FOREST,LOCAL OUTLIER FACTOR and OneClassSVM and with all the 61 data files- one by one via loop ''' #Initialise the lists for storing various metrics for Isolation Forest over all the data files acc_isf=[] precision_isf=[] recall_isf=[] roc_isf=[] #Initialise the lists for storing various metrics for Local Outlier Factor over all the data files acc_lof=[] precision_lof=[] recall_lof=[] roc_lof=[] #Initialise the lists for storing various metrics for OneClassSVM over all the data files acc_svm=[] precision_svm=[] recall_svm=[] roc_svm=[] # loop over all data files in format .arff and calculating metrics for the three models( 61 in number) for i in range(1, 61): # load the training through arff under scipy data_train, meta_train = arff.loadarff('HeartbeatDimension'+str(i)+'_TRAIN.arff') # Put the array in dataframe format with help of panda df_train=pd.DataFrame(data_train) # Extract attributes/features X and output/labels y from training data X_train = df_train.iloc[:,: -1].values # Extract output/labels and encode it- 1 for normal and -1 for abnormal y_train = LabelEncoder().fit_transform(df_train.iloc[:,-1]) # changing the code 0 wherever it occurs to -1 y_train = np.where(y_train==0,-1, y_train) # Rows in the data with label normal normal=y_train ==1 # Training data after removing outliers/abnormal ( i.e. normal training data) X_train, y_train=X_train[normal, :], y_train[normal] # Now load the test dataset- it is noted that downloaded data is already split between training and test data data_test, meta_test = arff.loadarff('HeartbeatDimension'+str(i)+'_TEST.arff') df_test=pd.DataFrame(data_test) # Extract attributes/features X and output/labels y from test data X_test = df_test.iloc[:,: -1].values y_test = LabelEncoder().fit_transform(df_test.iloc[:,-1]) # changing the code 0 wherever it occurs to -1 y_test = np.where(y_test==0,-1, y_test) ''' Isolation Forest ''' # Set up the model isf=IsolationForest(contamination=0.1) #fit the model isf.fit(X_train) #predict labels for the test data y_pred_test_isf=isf.fit_predict(X_test) # calculate metrics and store them acc_isf.append(accuracy_score(y_test,y_pred_test_isf)) precision_isf.append(precision_score(y_test,y_pred_test_isf)) recall_isf.append(recall_score(y_test,y_pred_test_isf)) roc_isf.append(roc_auc_score(y_test,y_pred_test_isf)) ''' Local Outlier Factor ''' # Set up the model lof=LocalOutlierFactor(novelty=False) # Fit the model isf.fit(X_train) #Predict labels for the test data y_pred_test_lof=lof.fit_predict(X_test) #print(y_test, y_pred_test) # calculate and store metrics acc_lof.append(accuracy_score(y_test,y_pred_test_lof)) precision_lof.append(precision_score(y_test,y_pred_test_lof)) recall_lof.append(recall_score(y_test,y_pred_test_lof)) roc_lof.append(roc_auc_score(y_test,y_pred_test_lof)) ''' One Class SVM ''' # Set up the model svm=OneClassSVM(kernel='rbf') # Fit the model svm.fit(X_train) #Predict labels for the test data y_pred_test_svm=svm.fit_predict(X_test) # calculate and Store metrics acc_svm.append(accuracy_score(y_test,y_pred_test_svm)) precision_svm.append(precision_score(y_test,y_pred_test_svm)) recall_svm.append(recall_score(y_test,y_pred_test_svm)) roc_svm.append(roc_auc_score(y_test,y_pred_test_svm)) # print average of metrics print("printing average of various metrics for Isolation Forest") print(np.average(acc_isf)) print(np.average(precision_isf)) print(np.average(recall_isf)) print(np.average(roc_isf)) print("printing average of various metrics for Local Outlier Factor") print(np.average(acc_lof)) print(np.average(precision_lof)) print(np.average(recall_lof)) print(np.average(roc_lof)) print("printing average of various metrics for OneClassSVM") print(np.average(acc_svm)) print(np.average(precision_svm)) print(np.average(recall_svm)) print(np.average(roc_svm)) # Comparative performance on Accuracy- Plottting fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130) ax.plot(acc_isf) ax.scatter(range(len(acc_isf)), acc_isf, s=13, label = 'Accuracy score with with ISF') ax.plot(acc_lof) ax.scatter(range(len(acc_lof)), acc_lof, s=13, label = 'Accuracy score with with LOF') ax.plot(acc_svm) ax.scatter(range(len(acc_svm)), acc_svm, s=13, label = 'Accuracy score with OneClassSVM') ax.set_ylim(bottom=0, top=1) ax.grid(True) ax.set_title('Accuracy') ax.set_xlabel('Dimensions/Data Files') ax.set_ylabel('Accuracy Score') ax.legend() plt.show() # Comparative performance on Precision- Plotting fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130) ax.plot(precision_isf) ax.scatter(range(len(precision_isf)), precision_isf, s=13, label = 'Precision score with ISF') ax.plot(precision_lof) ax.scatter(range(len(precision_lof)), precision_lof, s=13, label = 'Precision score with LOF') ax.plot(precision_svm) ax.scatter(range(len(precision_svm)), precision_svm, s=13, label = 'Precision score with OneClassSVM') ax.set_ylim(bottom=0, top=1) ax.grid(True) ax.set_title('Precision') ax.set_xlabel('Dimensions/Data Files') ax.set_ylabel('Precision Score') ax.legend() plt.show() # Comparative performance on Recall- Plotting fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130) ax.plot(recall_isf) ax.scatter(range(len(recall_isf)), recall_isf, s=13, label = 'Recall score with ISF') ax.plot(recall_lof) ax.scatter(range(len(recall_lof)), recall_lof, s=13, label = 'Recall score with LOF') ax.plot(recall_svm) ax.scatter(range(len(recall_svm)), recall_svm, s=13, label = 'Recall score with OneClassSVM') ax.set_ylim(bottom=0, top=1) ax.grid(True) ax.set_title('Recall') ax.set_xlabel('Dimensions/Data Files') ax.set_ylabel('Recall Score') ax.legend() plt.show() # Comparative performance of ROC AUC- Plotting fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130) ax.plot(roc_isf) ax.scatter(range(len(roc_isf)), roc_isf, s=13, label = 'ROC AUC score with ISF') ax.plot(roc_lof) ax.scatter(range(len(roc_lof)), roc_lof, s=13, label = 'ROC AUC score with LOF') ax.plot(roc_svm) ax.scatter(range(len(roc_svm)), roc_svm, s=13, label = 'ROC AUC score with OneClassSVM') ax.set_ylim(bottom=0, top=1) ax.grid(True) ax.set_title('ROC AUC ') ax.set_xlabel('Dimensions/Data Files') ax.set_ylabel('ROC AUC Score') ax.legend() plt.show() ```
github_jupyter
``` #hide #skip ! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab #export from fastai.torch_basics import * from fastai.callback.hook import * #hide from nbdev.showdoc import * # default_exp vision.models.unet ``` # Dynamic UNet > Unet model using PixelShuffle ICNR upsampling that can be built on top of any pretrained architecture ``` #export def _get_sz_change_idxs(sizes): "Get the indexes of the layers where the size of the activation changes." feature_szs = [size[-1] for size in sizes] sz_chg_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0]) return sz_chg_idxs #hide test_eq(_get_sz_change_idxs([[3,64,64], [16,64,64], [32,32,32], [16,32,32], [32,32,32], [16,16]]), [1,4]) test_eq(_get_sz_change_idxs([[3,64,64], [16,32,32], [32,32,32], [16,32,32], [32,16,16], [16,16]]), [0,3]) test_eq(_get_sz_change_idxs([[3,64,64]]), []) test_eq(_get_sz_change_idxs([[3,64,64], [16,32,32]]), [0]) #export class UnetBlock(Module): "A quasi-UNet block, using `PixelShuffle_ICNR upsampling`." @delegates(ConvLayer.__init__) def __init__(self, up_in_c, x_in_c, hook, final_div=True, blur=False, act_cls=defaults.activation, self_attention=False, init=nn.init.kaiming_normal_, norm_type=None, **kwargs): self.hook = hook self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, act_cls=act_cls, norm_type=norm_type) self.bn = BatchNorm(x_in_c) ni = up_in_c//2 + x_in_c nf = ni if final_div else ni//2 self.conv1 = ConvLayer(ni, nf, act_cls=act_cls, norm_type=norm_type, **kwargs) self.conv2 = ConvLayer(nf, nf, act_cls=act_cls, norm_type=norm_type, xtra=SelfAttention(nf) if self_attention else None, **kwargs) self.relu = act_cls() apply_init(nn.Sequential(self.conv1, self.conv2), init) def forward(self, up_in): s = self.hook.stored up_out = self.shuf(up_in) ssh = s.shape[-2:] if ssh != up_out.shape[-2:]: up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest') cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1)) return self.conv2(self.conv1(cat_x)) # export class ResizeToOrig(Module): "Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`." def __init__(self, mode='nearest'): self.mode = mode def forward(self, x): if x.orig.shape[-2:] != x.shape[-2:]: x = F.interpolate(x, x.orig.shape[-2:], mode=self.mode) return x #export class DynamicUnet(SequentialEx): "Create a U-Net from a given architecture." def __init__(self, encoder, n_classes, img_size, blur=False, blur_final=True, self_attention=False, y_range=None, last_cross=True, bottle=False, act_cls=defaults.activation, init=nn.init.kaiming_normal_, norm_type=None, **kwargs): imsize = img_size sizes = model_sizes(encoder, size=imsize) sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes))) self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False) x = dummy_eval(encoder, imsize).detach() ni = sizes[-1][1] middle_conv = nn.Sequential(ConvLayer(ni, ni*2, act_cls=act_cls, norm_type=norm_type, **kwargs), ConvLayer(ni*2, ni, act_cls=act_cls, norm_type=norm_type, **kwargs)).eval() x = middle_conv(x) layers = [encoder, BatchNorm(ni), nn.ReLU(), middle_conv] for i,idx in enumerate(sz_chg_idxs): not_final = i!=len(sz_chg_idxs)-1 up_in_c, x_in_c = int(x.shape[1]), int(sizes[idx][1]) do_blur = blur and (not_final or blur_final) sa = self_attention and (i==len(sz_chg_idxs)-3) unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa, act_cls=act_cls, init=init, norm_type=norm_type, **kwargs).eval() layers.append(unet_block) x = unet_block(x) ni = x.shape[1] if imsize != sizes[0][-2:]: layers.append(PixelShuffle_ICNR(ni, act_cls=act_cls, norm_type=norm_type)) layers.append(ResizeToOrig()) if last_cross: layers.append(MergeLayer(dense=True)) ni += in_channels(encoder) layers.append(ResBlock(1, ni, ni//2 if bottle else ni, act_cls=act_cls, norm_type=norm_type, **kwargs)) layers += [ConvLayer(ni, n_classes, ks=1, act_cls=None, norm_type=norm_type, **kwargs)] apply_init(nn.Sequential(layers[3], layers[-2]), init) #apply_init(nn.Sequential(layers[2]), init) if y_range is not None: layers.append(SigmoidRange(*y_range)) super().__init__(*layers) def __del__(self): if hasattr(self, "sfs"): self.sfs.remove() from fastai.vision.models import resnet34 m = resnet34() m = nn.Sequential(*list(m.children())[:-2]) tst = DynamicUnet(m, 5, (128,128), norm_type=None) x = torch.randn(2, 3, 128, 128) y = tst(x) test_eq(y.shape, [2, 5, 128, 128]) tst = DynamicUnet(m, 5, (128,128), norm_type=None) x = torch.randn(2, 3, 127, 128) y = tst(x) ``` ## Export - ``` #hide from nbdev.export import * notebook2script() ```
github_jupyter