code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="lXnDTL32PCMi" import torch from torchvision import datasets, transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + id="0HHk6OKhbcnm" train_dataset = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=200, shuffle=True) test_dataset = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=200, shuffle=True) # + id="L2YIodKk0Kli" class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(28 * 28, 200) self.fc2 = nn.Linear(200, 200) self.fc3 = nn.Linear(200, 10) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x) net = Net() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) criterion = nn.MSELoss() # + id="Zs_fX3mOQE-G" epochs=15 log_interval=10 accuracy_hist = [] loss_hist = [] for epoch in range(epochs): for batch_idx, (data, target) in enumerate(train_dataset): v_data, v_target = Variable(data), Variable(F.one_hot(target, num_classes=10).float()) v_data = v_data.view(-1, 28*28) optimizer.zero_grad() net_out = net(v_data) accuracy = (torch.argmax(net_out, 1) == target).sum()/train_dataset.batch_size loss = criterion(net_out, v_target) loss.backward() optimizer.step() accuracy_hist.append(accuracy) loss_hist.append(loss.data) if batch_idx % log_interval == 0: print('epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} \tAccuracy: {:.6f} '.format( epoch, batch_idx * len(data), len(train_dataset.dataset), 100. * batch_idx / len(train_dataset), loss.data, accuracy)) # + id="_Aj1stHevVyy" fig = plt.figure(figsize = (20,10)) plt.subplot(2, 1, 1) plt.plot(accuracy_hist) plt.subplot(2, 1, 2) plt.plot(loss_hist) # + id="b9gpcIOxz4X2" test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_dataset: data, target = Variable(data), Variable(target) data = data.view(-1, 28 * 28) net_out = net(data) test_loss += criterion(net_out, Variable(F.one_hot(target, num_classes=10).float())).data pred = net_out.data.max(1)[1] correct += pred.eq(target.data).sum() test_loss /= len(test_dataset.dataset) print('Test set: Loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_dataset.dataset), 100. * correct / len(test_dataset.dataset))) # + id="f-I8j2xgbeyt" net.eval() dummy_input = torch.randn(28 * 28, requires_grad=True) torch.onnx.export(net, dummy_input, "network.onnx", export_params=True, opset_version=10, do_constant_folding=True, input_names = ['modelInput'], output_names = ['modelOutput']) # + id="z_xJxJPYU9op" test_size = 1000 input = np.zeros([test_size, 28*28]) output = np.zeros([test_size, 10]) for i in range(test_size): input[i] = np.array(test_dataset.dataset[i][0][0].view(-1, 28*28)) output[i][test_dataset.dataset[i][1]] = 1.0 np.save('input', input) np.save('output', output)
network_MNIST/create_network_MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div> # <h1 align="center"> GBM8378 - Principes d'imagerie biomédicale</h1> # <h2 align="center"> Laboratoire 2 - Ultrasons</h2> # <br> # # <b> # Prénom1 Nom1 - Matricule1<br> # Prénom2 Nom2 - Matricule2 # </b> # </div> # <div align="center" class="alert alert-block alert-danger"> # <b>Assurez vous d'avoir indiqué vos noms et matricules dans la cellule précédente.</b> # </div> # <div class="alert alert-block alert-danger"> # <b> # Avant de commencer à répondre au différentes questions, veuillez vérifier que le Jupyter Notebook fonctionne correctement: # <br> # <br> # 1. Redémarrez le noyau (dans la barre de menu : "Kernel" $\rightarrow$ "Restart") # <br> # 2. Lancez toutes les cellules (dans la barre de menu : "Cell" $\rightarrow$ "Run All"). # </b> # </div> # <div class="alert alert-block alert-info"> # # <h1 align="center"> Travail à effectuer </h1> # # <b><u>Le PDF contenant la partie théorique du TP est disponible sur Moodle.</u> # <br><br> # # Avant de remettre votre travail, assurez-vous d'avoir rempli toutes les sections `"Commencez votre code ici"` et `"Double-cliquez pour entrez votre réponse"`.</b> # </div> # <h1> <font color='teal'> Section 1 - Questions préliminaires (11pts) </font></h1> # <div class="alert alert-block alert-warning"><b> # 1.1. Quel est l’impact de la fréquence de réponse du transducteur sur: (1pt)<br> # <ul> # a. la profondeur de pénétration ?<br> # b. la résolution spatiale dans les différentes directions ?<br> # c. l’absorption ?</ul> # </b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>1.2. À quel temps le pic de l’écho devrait-il apparaître sur l’écran ? (1pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>1.3. Pour des signaux expérimentaux, est-il mieux d'enregistrer une fenêtre temporelle plus petite ou plus grande afin de déterminer la réponse en fréquence ? Justifier votre réponse. (1pt)<br></b> # # <img src="attachment:image.png" alt="Temporal Windows" style="display: block;margin-left: auto;margin-right: auto;width: 90%;border-radius: 30px;border: 2px solid black;"/> # # <center> <i>Question 1.3. Exemple de fenêtres temporelles différentes. Enregistrements de 10 s (gauche) et de 25 s (droite). # </i></center></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>1.4. Donnez trois types d’interactions des ondes acoustiques avec la matière ? Avec quel autre domaine de la physique pouvez-vous faire l’analogie. (1pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>1.5. Supposons que l’on essaie d’imager deux structures différentes. L’une ayant une impédance acoustique de 1.70, ce qui se rapproche de celle du milieu dans lequel se propage l’onde (1.62) et l’autre possédant une impédance beaucoup plus élevée (5.75). Vous pouvez supposer que les deux structures ont une absorption négligeable. # <ul> # a. Quelle structure sera le plus facilement identifiable ? (0.5pt)<br> # b. Considérant une incidence normale, quel est le pourcentage de l’intensité de l’onde acoustique qui sera recaptée par le transducteur suite à la réflexion sur la première structure dense (considérer l’atténuation de l’onde par le milieu = coefficient d’absorption de l’eau, considérez une onde accoustique de 15Mhz et utilisez α=a.f ) ? (1.5pt)<br> # c. Quel est le pourcentage de l’intensité de l’onde acoustique qui sera recaptée suite à la réflexion sur la seconde structure ? (Ne pas tenir compte des réflexions multiples) (1.5pts)<br> # d. Si la première structure était plus profonde que la seconde, qu’arriverait-il (Ne pas tenir compte des réflexions multiples)? (1pt)<br> # e. Quelle est la difficulté d’utiliser l’échographie pour imager des tumeurs cérébrales (comme proposé par <NAME> et <NAME>). (0.5pt) <br> # </ul> # # <img src="attachment:image.png" alt="first_graph"/></b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>1.6. Expliquer pourquoi il n’y a pas de problème à faire transiter le signal d’aller et de retour par le même canal du transducteur au pulseur. (1pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>1.7. Prouver que pour un signal périodique, de la forme $f(t)= sin (\omega_0t)$, la transformée de Fourier fait ressortir le contenu fréquentiel. (1pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <h1> <font color='teal'> Section 2 - Signal ultrasonore (4pts) </font></h1> # + # Dans cette cellule, nous importons les librairies/fonctions qui nous serviront dans ce notebook. # Librairies import numpy as np import matplotlib.pyplot as plt # Fonctions from numpy.fft import fft, fft2, fftshift, ifft, ifftshift from numpy.matlib import repmat from IPython.display import HTML # %matplotlib inline # - # <div class="alert alert-block alert-warning"><b>2.1. Le signal <I>scanA.npy</I> représente la réponse ultrasonore du scan d'une tige. Interprétez la décroissance du signal. (0.5pt)</b></div> # + # On charge le fichier'scanA.npy' et on l'affiche. scanA = np.load('scanA.npy') plt.figure(figsize=[8, 8]) plt.plot(scanA) plt.title("Signal ScanA") plt.xlabel("Nombre d'échantillon") plt.ylabel("Amplitude de l'onde sonore"); # - # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"> # <b>2.2. Proposez une méthode pour la corriger et appliquez cette méthode. Affichez l’écho corrigé de ce biais. (1pt)</b> # </div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # + plt.figure(figsize=[8, 8]) # Commencez votre code ici (7 lignes max) plt.title("Echo corrigé") plt.xlabel("Nombre d'échantillon") plt.ylabel("Amplitude de l'onde sonore"); # - # <div class="alert alert-block alert-warning"><b>2.3. Sachant que la fréquence d’échantillonnage est de 100 MHz et que la vitesse du son dans l'eau est de 1500 m/s, à quelle distance est placée la tige? (0.5pt) </b></div> # # # + # Commencez votre code ici (7 lignes max) print(f"La distance de la tige est de {d} m.") # - # <div class="alert alert-block alert-warning"><b>2.4. Effectuez la transformée de Fourier numérique des données recueillies afin de retrouver la réponse en fréquence du transducteur. Affichez le résultat obtenu ainsi que les données fournies par le constructeur (<I>H2.npy</I>) sur un même graphique, identifiez les axes et la fréquence centrale. (1pt)</b></div> # + plt.figure(figsize=[8, 8]) # Commencez votre code ici (3 lignes max) # Comparer avec les données constructeur H2 = np.load('H2.npy') # Interpolate between 0 and 50 Mhz nbpts = 2048 # Nombre de données acquises par 1 transducteur H2 = np.interp(np.linspace(0, 50, nbpts // 2 + 1), np.linspace(0, 30, len(H2)), H2, left=0, right=0) H2 = np.concatenate((H2[-1:0:-1], H2[:-1])) plt.plot(np.linspace(-Fs/2,Fs/2,2048), H2, color='red'); # - # <div class="alert alert-block alert-warning"><b>2.5. Que venez-vous de mesurer ? (0.5pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>2.6. Comparez avec la valeur fournie par le fabriquant du transducteur (voir section suivante). (0.5pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <h1> <font color='teal'> Section 3 - Reconstruction de l'image (7pts) </font></h1> # <div class="alert alert-block alert-info"> # <b>Données : # Les données dans le fichier <I>rawdata.npy</I> représentent une image obtenue en effectuant un scan en mode B avec la sonde étudiée précédemment sur un fantôme contenant des billes. Chaque colonne correspond à la réception d’un transducteur. # Pour un scan donné, l’onde acoustique se propage selon l’axe des z, lorsque l’onde rencontre un changement d’indice, un signal est réfléchi et pourra être mesuré par le transducteur. Il est à noter que le signal mesuré est un signal temporel et que le signal recherché est une mesure spatiale.<br><br> # # Déconvolution : # Les données recueillies par chaque transducteur peuvent être représentées mathématiquement approximativement<sup>1</sup> comme : # # $g(t)=\int_{-\infty}^{\infty} h(t-\tau)\cdot f(\tau) d\tau = h(\tau)\star f(\tau)$ # (1)<br> # où :<ul> # $g(t)=$données recueillies<br> # $f(t)=$données réelles<br> # $h(t)=$réponse temporelle du transducteur<br> # </ul> # </b> # <sup>1</sup>Le signal provenant de réflexions sur des éléments de d’autres transducteur n’est pas considéré. # <div class="alert alert-block alert-info" style="display: flex;flex-wrap: wrap; align-content: center;padding: 1%;"> # <img src="attachment:image.png" style="margin-left:5%; width: 50%; border-radius: 30px; border: 2px solid black;"/> # <table style="margin-right: 5%; background-color: white;border: 2px solid black; width:30%;margin: 0"> # <tr> # <th>Frequency (MHz)</th> # <th>Amplitude (u.a.)</th> # </tr> # <tr><td>0</td><td>0.00</td></tr> # <tr><td>3</td><td>0.05</td></tr> # <tr><td>6</td><td>0.19</td></tr> # <tr><td>9</td><td>0.38</td></tr> # <tr><td>12</td><td>0.80</td></tr> # <tr><td>14</td><td>1.00</td></tr> # <tr><td>15</td><td>0.92</td></tr> # <tr><td>18</td><td>0.52</td></tr> # <tr><td>21</td><td>0.15</td></tr> # <tr><td>24</td><td>0.05</td></tr> # <tr><td>27</td><td>0.02</td></tr> # <tr><td>30</td><td>0.00</td></tr> # </table> # </div> # <center> <i>Données fournies par le fabriquant du transducteur. </i></center> # </div> # <div class="alert alert-block alert-warning"> # <b>3.1. Comment interprétez-vous cette convolution? Pourquoi le signal recueilli diffère-t-il du signal réel ? <br> # La réponse du transducteur est fournie par le fournisseur dans le fichier H2.mat (1pt)</b> # </div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b> # 3.2. Résoudre l’équation (1) pour obtenir le signal réel f(t) sachant la réponse du transducteur g(t). (1pt)<br><br> # En pratique un terme de régularisation µ est ajouté et l’équation utilisée est : <br> # $F(\omega)=\frac{G(\omega)\cdot H(\omega)}{H(\omega)^2+\mu}$</b> # </div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>3.3. Quelle est l’utilité de ce terme de régularisation ? (1pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>3.4. Effectuez la déconvolution de l’image de la partie précédente et comparer. Vous pouvez utiliser µ=5.10-8. (2pts)</b></div> # + # Charger H2 H2 = np.load('H2.npy') # Interpoler entre 0 et 50 Mhz nbpts = 2048 # Nombre de données acquises par 1 transducteur H2 = np.interp(np.linspace(0, 50, nbpts // 2 + 1), np.linspace(0, 30, len(H2)), H2, left=0, right=0) H2 = np.concatenate((H2[-1:0:-1], H2[:-1])) # Création d'un subplot 1 par 2 plt.figure(figsize=[10, 5]) plt.subplot(1,2,1) # Chargement et plot de rawdata rawdata = np.load('rawdata.npy') plt.imshow(rawdata, aspect='auto', vmin=-1, vmax=1) plt.colorbar(); # Commencez votre code ici # - # <div class="alert alert-block alert-warning"><b>3.5. Que se passe-t-il au niveau des bords supérieurs et inférieurs ? (1pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>3.6. À quoi correspondent les barres horizontales visibles sur une partie de l’image ? (1pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b></div> # <h1> <font color='teal'> Section 4 - Simulation d'un transducteur (8pts)</font></h1> # <div class="alert alert-block alert-info"> # <b>Pour cette section, Vous allez utiliser un simulateur d'onde ultrasonores dévoloppé au sein de Polytechnique par le Provost Ultrasound Lab (un grand merci à <NAME> et <NAME> pour leur super travail).<br><br> # Afin d'effectuer ces simulations, assurez-vous que le fichier <i>labUSfunctions.py</i> est bien dans le même dossier que ce Notebook.</b></div> # <div class="alert alert-block alert-warning"><b>4.1. Utilisez le script ci-dessous pour afficher le champ de pression émis par un transducteur composé d'un unique élément au cours du temps. Expliquez les résultats obtenus. (0.5pt)</b><br> # </div> # + ## Title: Ultrasound Forward Simulator ## Filename: __main__.py ## Authors: <NAME>, <NAME> --- Provost Ultrasound Lab ## Inspired from work in: Section 2.3 of https://doi.org/10.1088/1361-6560/aae3c3 import numpy as np from labUSfunctions import ussimforward, prepare_animation ################## ### PARAMETERS ### ################## ## PROBE prm = {} prm['fc'] = 14e6 # Central frequency of the transducer (Hz) prm['pitch'] = 0.5e-3 # Spacing between the center of 2 elements (m) prm['width'] = 0.8117*1540/(14e6) # Width of element (m) prm['height'] = 1.5e-3 # Height of element (m) prm['nele'] = 1 # Number of elements prm['BW'] = 0.5 # 50% Pulse Bandwidth prm['eleBW'] = 1 # 100% Bandwidth prm['hardbaffle'] = True # Use hard baffle =True or soft baffle =False ## MEDIUM prm['c'] = 1540 # Propagation velocity (m/s) prm['lbd'] = prm['c']/prm['fc'] # Wavelenght (m) ## IMAGING SEQUENCE prm['fs'] = 4 * prm['fc'] # Sampling frequency of the system (Hz) prm['elevfocus'] = 64 * prm['lbd'] prm['fspulse'] = 10*prm['fs'] # Sampling frequency for the pulse (Hz) prm['pulse'] = np.sin(2*np.pi*prm['fc']*np.arange(0, 2/prm['BW']/prm['fc']+1/prm['fspulse'], 1/prm['fspulse'])) prm['pulse'] = prm['pulse'] * np.hanning(prm['pulse'].size) # Shape of the pulse prm['planewaveangle'] = 0 # Plane wave emission angle (degrees) ## SIMULATION prm['useprobeheight'] = False # Take probe height into account =True or not =False prm['gridsize'] = 256 # Number of samples per side of the grid prm['fovx'] = prm['lbd'] * 64 # Lateral field of view (m) prm['fovz'] = prm['lbd'] * 64 # Axial field of view (m) prm['floatprecision'] = np.float32 # Float arrays precision prm['complexprecision'] = np.complex64 # Complex arrays precision ############# ### SETUP ### ############# ## Grid Definition zgrid, xgrid = np.meshgrid(np.linspace(0, 1, prm['gridsize'])*prm['fovz'], np.linspace(-0.5, 0.5, prm['gridsize'])*prm['fovx']) ygrid = np.zeros_like(xgrid) ## Delays Definition delays = np.array([0.]) ## Apodization Definition apod = np.ones_like(delays) ################## ### SIMULATION ### ################## RF = ussimforward(xgrid, ygrid, zgrid, delays, apod, prm) # Call to the simulation function ###################### ### OUTPUT US WAVE ### ###################### anim = prepare_animation(RF, prm['fovx'], prm['fovz']) display(HTML(anim.to_html5_video())) # - # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.2. Quel type de propagation est une onde sonore ? Écrivez la forme mathématique générale de ce type d’onde. (1pt) </b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.3. À quoi ressemble le champ de pression à un instant t fixe (t ≈ 3µs)? (0.5pt) </b><br> # Affichez l'image contenue dans la matrice RF qui correspond à l'état du front d'onde au moment t. Utilisez les paramètres de la simulation pour trouver l'indice de cette image. # </div> # + # Commencez votre code ici # - # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.4. En adaptant la simulation ci-dessous, affichez de nouveau le champ de pression au cours du temps pour un transducteur composé de 10 éléments. Qu'observez-vous ? (0.5pt)</b> # </div> # + ################## ### PARAMETERS ### ################## ## PROBE prm = {} prm['fc'] = 14e6 # Central frequency of the transducer (Hz) prm['pitch'] = 0.5e-3 # Spacing between the center of 2 elements (m) prm['width'] = 0.8117*1540/(14e6) # Width of element (m) prm['height'] = 1.5e-3 # Height of element (m) prm['nele'] = 1 # Number of elements prm['BW'] = 0.5 # 50% Pulse Bandwidth prm['eleBW'] = 1 # 100% Bandwidth prm['hardbaffle'] = True # Use hard baffle =True or soft baffle =False ## MEDIUM prm['c'] = 1540 # Propagation velocity (m/s) prm['lbd'] = prm['c']/prm['fc'] # Wavelenght (m) ## IMAGING SEQUENCE prm['fs'] = 4 * prm['fc'] # Sampling frequency of the system (Hz) prm['elevfocus'] = 64 * prm['lbd'] prm['fspulse'] = 10*prm['fs'] # Sampling frequency for the pulse (Hz) prm['pulse'] = np.sin(2*np.pi*prm['fc']*np.arange(0, 2/prm['BW']/prm['fc']+1/prm['fspulse'], 1/prm['fspulse'])) prm['pulse'] = prm['pulse'] * np.hanning(prm['pulse'].size) # Shape of the pulse prm['planewaveangle'] = 0 # Plane wave emission angle (degrees) ## SIMULATION prm['useprobeheight'] = False # Take probe height into account =True or not =False prm['gridsize'] = 256 # Number of samples per side of the grid prm['fovx'] = prm['lbd'] * 64 # Lateral field of view (m) prm['fovz'] = prm['lbd'] * 64 # Axial field of view (m) prm['floatprecision'] = np.float32 # Float arrays precision prm['complexprecision'] = np.complex64 # Complex arrays precision ############# ### SETUP ### ############# ## Grid Definition zgrid, xgrid = np.meshgrid(np.linspace(0, 1, prm['gridsize'])*prm['fovz'], np.linspace(-0.5, 0.5, prm['gridsize'])*prm['fovx']) ygrid = np.zeros_like(xgrid) ## Delays Definition delays = np.array([0.]) ## Apodization Definition apod = np.ones_like(delays) ################## ### SIMULATION ### ################## RF = ussimforward(xgrid, ygrid, zgrid, delays, apod, prm) # Call to the simulation function ###################### ### OUTPUT US WAVE ### ###################### anim = prepare_animation(RF, prm['fovx'], prm['fovz']) display(HTML(anim.to_html5_video())) # - # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.5. Toujours avec 10 éléments, faire une fonction qui viendra modifier le vecteur 'delays' afin d’obtenir une onde plane à un angle de 10°. Affichez le front d’onde après 3µs. (1.5pts) </b><br> # Delays prend des valeurs en s. </div> # + ################## ### PARAMETERS ### ################## ## PROBE prm = {} prm['fc'] = 14e6 # Central frequency of the transducer (Hz) prm['pitch'] = 0.5e-3 # Spacing between the center of 2 elements (m) prm['width'] = 0.8117*1540/(14e6) # Width of element (m) prm['height'] = 1.5e-3 # Height of element (m) prm['nele'] = 1 # Number of elements prm['BW'] = 0.5 # 50% Pulse Bandwidth prm['eleBW'] = 1 # 100% Bandwidth prm['hardbaffle'] = True # Use hard baffle =True or soft baffle =False ## MEDIUM prm['c'] = 1540 # Propagation velocity (m/s) prm['lbd'] = prm['c']/prm['fc'] # Wavelenght (m) ## IMAGING SEQUENCE prm['fs'] = 4 * prm['fc'] # Sampling frequency of the system (Hz) prm['elevfocus'] = 64 * prm['lbd'] prm['fspulse'] = 10*prm['fs'] # Sampling frequency for the pulse (Hz) prm['pulse'] = np.sin(2*np.pi*prm['fc']*np.arange(0, 2/prm['BW']/prm['fc']+1/prm['fspulse'], 1/prm['fspulse'])) prm['pulse'] = prm['pulse'] * np.hanning(prm['pulse'].size) # Shape of the pulse prm['planewaveangle'] = 0 # Plane wave emission angle (degrees) ## SIMULATION prm['useprobeheight'] = False # Take probe height into account =True or not =False prm['gridsize'] = 256 # Number of samples per side of the grid prm['fovx'] = prm['lbd'] * 64 # Lateral field of view (m) prm['fovz'] = prm['lbd'] * 64 # Axial field of view (m) prm['floatprecision'] = np.float32 # Float arrays precision prm['complexprecision'] = np.complex64 # Complex arrays precision ############# ### SETUP ### ############# ## Grid Definition zgrid, xgrid = np.meshgrid(np.linspace(0, 1, prm['gridsize'])*prm['fovz'], np.linspace(-0.5, 0.5, prm['gridsize'])*prm['fovx']) ygrid = np.zeros_like(xgrid) ## Delays Definition delays = np.array([0.]) ## Apodization Definition apod = np.ones_like(delays) ################## ### SIMULATION ### ################## RF = ussimforward(xgrid, ygrid, zgrid, delays, apod, prm) # Call to the simulation function ###################### ### OUTPUT US WAVE ### ###################### anim = prepare_animation(RF, prm['fovx'], prm['fovz']) display(HTML(anim.to_html5_video())) # - # <div class="alert alert-block alert-warning"><b>4.6. Quel est l’intérêt de pouvoir focaliser en émission? Y a-t-il un désavantage à utiliser un grand nombre de transducteurs répartis sur une large distance? (0.5pt)</b> # </div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.7. Que peut-on faire concernant la réception en terme de focalisation? (0.5pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.8. Adaptez le paramètre 'delays' afin d'effectuer un focus en x=3mm et z=5mm. Expliquez vos résultats. (1.5pts)</b> # </div> # + ################## ### PARAMETERS ### ################## ## PROBE prm = {} prm['fc'] = 14e6 # Central frequency of the transducer (Hz) prm['pitch'] = 0.5e-3 # Spacing between the center of 2 elements (m) prm['width'] = 0.8117*1540/(14e6) # Width of element (m) prm['height'] = 1.5e-3 # Height of element (m) prm['nele'] = 1 # Number of elements prm['BW'] = 0.5 # 50% Pulse Bandwidth prm['eleBW'] = 1 # 100% Bandwidth prm['hardbaffle'] = True # Use hard baffle =True or soft baffle =False ## MEDIUM prm['c'] = 1540 # Propagation velocity (m/s) prm['lbd'] = prm['c']/prm['fc'] # Wavelenght (m) ## IMAGING SEQUENCE prm['fs'] = 4 * prm['fc'] # Sampling frequency of the system (Hz) prm['elevfocus'] = 64 * prm['lbd'] prm['fspulse'] = 10*prm['fs'] # Sampling frequency for the pulse (Hz) prm['pulse'] = np.sin(2*np.pi*prm['fc']*np.arange(0, 2/prm['BW']/prm['fc']+1/prm['fspulse'], 1/prm['fspulse'])) prm['pulse'] = prm['pulse'] * np.hanning(prm['pulse'].size) # Shape of the pulse prm['planewaveangle'] = 0 # Plane wave emission angle (degrees) ## SIMULATION prm['useprobeheight'] = False # Take probe height into account =True or not =False prm['gridsize'] = 256 # Number of samples per side of the grid prm['fovx'] = prm['lbd'] * 64 # Lateral field of view (m) prm['fovz'] = prm['lbd'] * 64 # Axial field of view (m) prm['floatprecision'] = np.float32 # Float arrays precision prm['complexprecision'] = np.complex64 # Complex arrays precision ############# ### SETUP ### ############# ## Grid Definition zgrid, xgrid = np.meshgrid(np.linspace(0, 1, prm['gridsize'])*prm['fovz'], np.linspace(-0.5, 0.5, prm['gridsize'])*prm['fovx']) ygrid = np.zeros_like(xgrid) ## Delays Definition delays = np.array([0.]) ## Apodization Definition apod = np.ones_like(delays) ################## ### SIMULATION ### ################## RF = ussimforward(xgrid, ygrid, zgrid, delays, apod, prm) # Call to the simulation function ###################### ### OUTPUT US WAVE ### ###################### anim = prepare_animation(RF, prm['fovx'], prm['fovz']) display(HTML(anim.to_html5_video())) # - # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.9. Sur la figure de droite, en dehors du point de focalisation, nous observons des zones présentant de fortes intensités acoustiques (e.g. [x=2mm, z=6mm]). A quoi cela est-il-dû ? (0.5pt) # </b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.10. Quel est l’intérêt de pouvoir diriger l’onde? (0.5pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div> # <div class="alert alert-block alert-warning"><b>4.11. En faisant suite à la question 4.9, on observe que les zones non focalisées présentent une intensité acoustique du même ordre de grandeur que l'intensité en [x=3mm, z=5mm]. Comment expliquez-vous que, malgré cela, on puisse obtenir une image de qualité satisfaisante en mode B. (0.5pt)</b></div> # <div class="alert alert-block alert-success"> <b> Double-cliquez pour entrez votre réponse. </b> </div>
lab2-us/GBM8378_Lab2_US_2021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Approximate q-learning # # In this notebook you will teach a lasagne neural network to do Q-learning. # __Frameworks__ - we'll accept this homework in any deep learning framework. For example, it translates to TensorFlow almost line-to-line. However, we recommend you to stick to theano/lasagne unless you're certain about your skills in the framework of your choice. # %env THEANO_FLAGS='floatX=float32' import os if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0: # !bash ../xvfb start # %env DISPLAY=:1 import gym import numpy as np, pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + env = gym.make("CartPole-v0") env.reset() n_actions = env.action_space.n state_dim = env.observation_space.shape plt.imshow(env.render("rgb_array")) # - # # Approximate (deep) Q-learning: building the network # # In this section we will build and train naive Q-learning with theano/lasagne # First step is initializing input variables # + import theano import theano.tensor as T #create input variables. We'll support multiple states at once current_states = T.matrix("states[batch,units]") actions = T.ivector("action_ids[batch]") rewards = T.vector("rewards[batch]") next_states = T.matrix("next states[batch,units]") is_end = T.ivector("vector[batch] where 1 means that session just ended") # + import lasagne from lasagne.layers import * #input layer l_states = InputLayer((None,)+state_dim) <Your architecture. Please start with a single-layer network> #output layer l_qvalues = DenseLayer(<previous_layer>,num_units=n_actions,nonlinearity=None) # - # #### Predicting Q-values for `current_states` #get q-values for ALL actions in current_states predicted_qvalues = get_output(l_qvalues,{l_states:current_states}) #compiling agent's "GetQValues" function get_qvalues = <compile a function that takes current_states and returns predicted_qvalues> #select q-values for chosen actions predicted_qvalues_for_actions = predicted_qvalues[T.arange(actions.shape[0]),actions] # #### Loss function and `update` # Here we write a function similar to `agent.update`. # + #predict q-values for next states predicted_next_qvalues = get_output(l_qvalues,{l_states:<theano input with for states>}) #Computing target q-values under gamma = 0.99 target_qvalues_for_actions = <target Q-values using rewards and predicted_next_qvalues> #zero-out q-values at the end target_qvalues_for_actions = (1-is_end)*target_qvalues_for_actions #don't compute gradient over target q-values (consider constant) target_qvalues_for_actions = theano.gradient.disconnected_grad(target_qvalues_for_actions) # + #mean squared error loss function loss = <mean squared between target_qvalues_for_actions and predicted_qvalues_for_actions> # + #all network weights all_weights = get_all_params(l_qvalues,trainable=True) #network updates. Note the small learning rate (for stability) updates = lasagne.updates.sgd(loss,all_weights,learning_rate=1e-4) # - #Training function that resembles agent.update(state,action,reward,next_state) #with 1 more argument meaning is_end train_step = theano.function([current_states,actions,rewards,next_states,is_end], updates=updates) # ### Playing the game # + epsilon = 0.25 #initial epsilon def generate_session(t_max=1000): """play env with approximate q-learning agent and train it at the same time""" total_reward = 0 s = env.reset() for t in range(t_max): #get action q-values from the network q_values = get_qvalues([s])[0] a = <sample action with epsilon-greedy strategy> new_s,r,done,info = env.step(a) #train agent one step. Note that we use one-element arrays instead of scalars #because that's what function accepts. train_step([s],[a],[r],[new_s],[done]) total_reward+=r s = new_s if done: break return total_reward # - for i in range(100): rewards = [generate_session() for _ in range(100)] #generate new sessions epsilon*=0.95 print ("mean reward:%.3f\tepsilon:%.5f"%(np.mean(rewards),epsilon)) if np.mean(rewards) > 300: print ("You Win!") break assert epsilon!=0, "Please explore environment" # ### Video epsilon=0 #Don't forget to reset epsilon back to initial value if you want to go on training # + #record sessions import gym.wrappers env = gym.wrappers.Monitor(env,directory="videos",force=True) sessions = [generate_session() for _ in range(100)] env.close() #unwrap env = env.env.env #upload to gym #gym.upload("./videos/",api_key="<your_api_key>") #you'll need me later #Warning! If you keep seeing error that reads something like"DoubleWrapError", #run env=gym.make("CartPole-v0");env.reset(); # + #show video from IPython.display import HTML import os video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/"))) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices # -
week4/Seminar4.0_recap_approx_qlearning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Importing libraries import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns # %config Completer.use_jedi = False # this speeds up autocomplete # ## Creating dataframe: df df = pd.read_csv('total_datasus_2010_2019.csv', encoding = 'latin-1') df.head() # Initial info df.info() # ### Selecting only the suicide cases # + # Creating filter list filter_list = ["X{}".format(x) for x in range(600, 850)] df = df[df['CAUSABAS'].isin(filter_list) | df['CAUSABAS_O'].isin(filter_list) ] df.info() # From 1GB to 10MB of memory usage after filtering. Now we can continue. # - # #### Creating a new .csv to be uploaded in GitHub df.to_csv('suicides_in_brazil_2010_2019.csv') # ### Cleaning dataframe
.ipynb_checkpoints/analysing_suicide-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Validation of Value-at-Risk (VVaR): sample usage import matplotlib.pyplot as plt import numpy as np import pandas as pd # ## Generate sample of observations (PnL and VaR) np.random.seed(123) n = 1000 # Length of the observation's time window p = 0.05 # Value-at-Risk level y = pd.DataFrame(np.random.rand(n)) # Draw random sample of PnL y_p = pd.DataFrame(np.repeat(p, n)) # Assume that VaR is properly estimated # ## Plot the generated scenario # + fig = plt.figure(figsize=(9, 5)) index = np.arange(n) ax = plt.subplot(111) ax.plot(index, y, label='PnL', color='darkslategray', linewidth=1) ax.plot(index, y_p, label='VaR', color='lightcoral', linewidth=1) ax.plot(y_p[y < y_p], label='VaR breach', color='red', linewidth=0, marker='.') ax.set_title('# of obs.: {}, # of VaR breaches: {}'.format(n, sum((y < y_p).values)[0])) ax.set_xlabel('Observation index') handles, labels = ax.get_legend_handles_labels() fig.legend(handles, labels, loc='lower center', ncol=3) plt.subplots_adjust(bottom=0.18) plt.show() # - # ### Kupiec *Proportion of Failures* test # Reference: *<NAME>., 1995. Techniques for verifying the accuracy of risk measurement models. The J. of Derivatives, 3(2).* from vvar import kupiec_pof results = kupiec_pof(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### *Binomial* test # Reference: *<NAME>., 2001. Value at risk: the new benchmark for managing financial risk. NY: McGraw-Hill Professional.* from vvar import binomial_pof results = binomial_pof(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### Christoffersen *Independence* test # Reference: *<NAME>. 1998. Evaluating interval forecasts. International Economic Review 39:841–62.* from vvar import christoffersen_icov results = christoffersen_icov(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### Christoffersen *Conditional Coverage* test # Reference: *<NAME>. 1998. Evaluating interval forecasts. International Economic Review 39:841–62.* from vvar import christoffersen_ccov results = christoffersen_ccov(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### Kupiec *Time Until First Failure* test # Reference: *<NAME>., 1995. Techniques for verifying the accuracy of risk measurement models. The J. of Derivatives, 3(2).* from vvar import kupiec_tuff results = kupiec_tuff(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### Haas *Time Between Failures* test # Reference: *<NAME>., 2001. New methods in backtesting. Financial Engineering Research Center, Bonn.* from vvar import haas_tbf results = haas_tbf(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### Christoffersen & Pelletier *Continuous Weibull* test # Reference: *<NAME>. and <NAME>., 2004. Backtesting value-at-risk: A duration-based approach.* from vvar import christoffersen_cweibull results = christoffersen_cweibull(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### Haas *Discrete Weibull* test # Reference: *<NAME>., 2005. Improved duration-based backtesting of value-at-risk. The Journal of Risk, 8(2), p.17.* from vvar import haas_dweibull results = haas_dweibull(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### Engle & Manganelli *Dynamical Quantile* test # Reference: *<NAME>. and <NAME>., 2004. CAViaR: Conditional autoregressive value at risk by regression quantiles.* from vvar import engle_dq results = engle_dq(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### Berkowitz *Box-Ljung statistic* test # Reference: *<NAME>., <NAME>. and <NAME>., 2011. Evaluating value-at-risk models with desk-level data.* from vvar import berkowitz_bl results = berkowitz_bl(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value'])) # ### <NAME> Wied *Gini coefficient* test # Reference: *<NAME>. and <NAME>., 2015. A simple and focused backtest of value at risk. Economics Letters, 137, pp.29-31.* from vvar import kramer_gini results = kramer_gini(y=y, y_p=y_p, p = p) print('Likelihood ratio: ' + str(results['lr'])) print('P-value : ' + str(results['p-value']))
SAMPLE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from sklearn.datasets import fetch_lfw_people from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from time import time from sklearn.pipeline import Pipeline from sklearn.model_selection import cross_val_score # %matplotlib inline # - # load the dataset # the optional parameter: min_faces_per_person: # will only retain pictures of people that have at least min_faces_per_person different pictures. # the optional parameter: resize is the ratio used to resize the each face picture. lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) # introspect the images arrays to find the shapes (for plotting) n_samples, h, w = lfw_people.images.shape n_samples, h, w # for machine learning we use the data directly (as relative pixel # positions info is ignored by this model) X = lfw_people.data y = lfw_people.target n_features = X.shape[1] X.shape # plot one of the faces plt.imshow(X[0].reshape((h, w)), cmap=plt.cm.gray) lfw_people.target_names[y[0]] # + # plot one of the faces after applying a scaling modujle from sklearn.preprocessing import StandardScaler plt.imshow(StandardScaler().fit_transform(X)[0].reshape((h, w)), cmap=plt.cm.gray) lfw_people.target_names[y[0]] # + # the label to predict is the id of the person target_names = lfw_people.target_names n_classes = target_names.shape[0] print "Total dataset size:" print "n_samples: %d" % n_samples print "n_features: %d" % n_features print "n_classes: %d" % n_classes # - # let's split our dataset into training and testing X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1) # + """ Compute a PCA (eigenfaces) on the face dataset from sklearn docs: The optional parameter whiten=True makes it possible to project the data onto the singular space while scaling each component to unit variance. This is often useful if the models down-stream make strong assumptions on the isotropy of the signal: this is for example the case for Support Vector Machines with the RBF kernel and the K-Means clustering algorithm. """ # instantiate the PCA module pca = PCA(n_components=200, whiten=True) # create a pipeline called preprocessing that will scale data and then apply PCA preprocessing = Pipeline([('scale', StandardScaler()), ('pca', pca)]) print "Extracting the top %d eigenfaces from %d faces" % (200, X_train.shape[0]) # fit the pipeline to the training set preprocessing.fit(X_train) # grab the PCA from the pipeline extracted_pca = preprocessing.steps[1][1] # + # Scree Plot plt.plot(np.cumsum(extracted_pca.explained_variance_ratio_)) # starting at 100 components captures over 90% of the variance compared to the 1,850 original features # - comp = extracted_pca.components_ image_shape = (h, w) def plot_gallery(title, images, n_col, n_row): plt.figure(figsize=(2. * n_col, 2.26 * n_row)) plt.suptitle(title, size=16) for i, comp in enumerate(images): plt.subplot(n_row, n_col, i + 1) vmax = max(comp.max(), -comp.min()) plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray, vmin=-vmax, vmax=vmax) plt.xticks(()) plt.yticks(()) plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.) plt.show() plot_gallery('PCA componenets', comp[:16], 4,4) import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints a more readable confusion matrix with heat labels and options for noramlization Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') # + # fit without using PCA to see what the difference will be t0 = time() param_grid = {'C': [1e-2, 1e-1,1e0,1e1, 1e2]} clf = GridSearchCV(logreg, param_grid) clf = clf.fit(X_train, y_train) best_clf = clf.best_estimator_ # Predicting people's names on the test set y_pred = best_clf.predict(X_test) print accuracy_score(y_pred, y_test), "Accuracy score for best estimator" print(classification_report(y_test, y_pred, target_names=target_names)) print plot_confusion_matrix(confusion_matrix(y_test, y_pred, labels=range(n_classes)), target_names) print round((time() - t0), 1), "seconds to grid search and predict the test set" # + # fit with using PCA to see what the difference will be t0 = time() face_pipeline = Pipeline(steps=[('PCA', PCA(n_components=200)), ('logistic', logreg)]) pipe_param_grid = {'logistic__C': [1e-2, 1e-1,1e0,1e1, 1e2]} clf = GridSearchCV(face_pipeline, pipe_param_grid) clf = clf.fit(X_train, y_train) best_clf = clf.best_estimator_ # Predicting people's names on the test set y_pred = best_clf.predict(X_test) print accuracy_score(y_pred, y_test), "Accuracy score for best estimator" print(classification_report(y_test, y_pred, target_names=target_names)) print plot_confusion_matrix(confusion_matrix(y_test, y_pred, labels=range(n_classes)), target_names) print round((time() - t0), 1), "seconds to grid search and predict the test set" # + # get a list of predicted names and true names to plot with faces in test set prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])] # splot a sample of the test set with predicted and true names plot_gallery(X_test, prediction_titles, h, w) # - def get_best_model_and_accuracy(model, params, X, y): grid = GridSearchCV(model, # the model to grid search params, # the parameter set to try error_score=0.) # if a parameter set raises an error, continue and set the performance as a big, fat 0 grid.fit(X, y) # fit the model and parameters # our classical metric for performance print "Best Accuracy: {}".format(grid.best_score_) # the best parameters that caused the best accuracy print "Best Parameters: {}".format(grid.best_params_) # the average time it took a model to fit to the data (in seconds) print "Average Time to Fit (s): {}".format(round(grid.cv_results_['mean_fit_time'].mean(), 3)) # the average time it took a model to predict out of sample data (in seconds) # this metric gives us insight into how this model will perform in real-time analysis print "Average Time to Score (s): {}".format(round(grid.cv_results_['mean_score_time'].mean(), 3)) # + # Create a larger pipeline to gridsearch face_params = {'logistic__C':[1e-2, 1e-1, 1e0, 1e1, 1e2], 'preprocessing__pca__n_components':[100, 150, 200, 250, 300], 'preprocessing__pca__whiten':[True, False], 'preprocessing__lda__n_components':range(1, 7) # [1, 2, 3, 4, 5, 6] recall the max allowed is n_classes-1 } pca = PCA() lda = LinearDiscriminantAnalysis() preprocessing = Pipeline([('scale', StandardScaler()), ('pca', pca), ('lda', lda)]) logreg = LogisticRegression() face_pipeline = Pipeline(steps=[('preprocessing', preprocessing), ('logistic', logreg)]) get_best_model_and_accuracy(face_pipeline, face_params, X, y) # + # much better than original data and very fast to predict and train! # talk about how these transformations are dope BUT they are predefined so we could learn new features # based on training data # these predefined transformations might not work for a particular dataset # PCA is PCA no matter what dataset you choose to work with # - from sklearn.neural_network import BernoulliRBM from sklearn.preprocessing import MinMaxScaler extracted_rbm.components_.shape # + # instantiate the PCA module rbm = BernoulliRBM(n_components=200, learning_rate=0.01, batch_size=10, n_iter=200, verbose=True) # create a pipeline called preprocessing that will scale data and then apply PCA preprocessing = Pipeline([('minmax', MinMaxScaler()), ('rbm', rbm)]) print "Extracting the top %d RBM-faces from %d faces" % (200, X_train.shape[0]) # fit the pipeline to the training set preprocessing.fit(X_train) # grab the PCA from the pipeline extracted_rbm = preprocessing.steps[1][1] # - comp = extracted_rbm.components_ image_shape = (h, w) def plot_gallery(title, images, n_col, n_row): plt.figure(figsize=(2. * n_col, 2.26 * n_row)) plt.suptitle(title, size=16) for i, comp in enumerate(images): plt.subplot(n_row, n_col, i + 1) vmax = max(comp.max(), -comp.min()) plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray, vmin=-vmax, vmax=vmax) plt.xticks(()) plt.yticks(()) plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.) plt.show() plot_gallery('RBM componenets', comp[:16], 4,4) # + # latent semantic analysis is a name given to the process of doing an SVD on sparse text document-term matricies # It is done to find latent structure in text for the purposes of classification, clustering, etc # - from sklearn.preprocessing import Normalizer from sklearn.cluster import KMeans import pandas as pd # import the sentence tokenizer from nltk from nltk.tokenize import sent_tokenize from sklearn.decomposition import TruncatedSVD hotel_reviews = pd.read_csv('../data/7282_1.csv') hotel_reviews.shape hotel_reviews.head() # + # Let's only include reviews from the US to try to only include english reviews # plot the lats and longs of reviews hotel_reviews.plot.scatter(x='longitude', y='latitude') # + #Filter to only include datapoints within the US hotel_reviews = hotel_reviews[((hotel_reviews['latitude']<=50.0) & (hotel_reviews['latitude']>=24.0)) & ((hotel_reviews['longitude']<=-65.0) & (hotel_reviews['longitude']>=-122.0))] # Plot the lats and longs again hotel_reviews.plot.scatter(x='longitude', y='latitude') # Only looking at reviews that are coming from the US # - hotel_reviews.shape texts = hotel_reviews['reviews.text'] sent_tokenize("hello! I am Sinan. How are you??? I am fine") sentences = reduce(lambda x, y:x+y, texts.apply(lambda x: sent_tokenize(str(x).decode('utf-8')))) # the number of sentences len(sentences) # + from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(ngram_range=(1, 2), stop_words='english') tfidf_transformed = tfidf.fit_transform(sentences) tfidf_transformed # + # try to fit PCA PCA(n_components=1000).fit(tfidf_transformed) # + # import the Iris dataset from scikit-learn from sklearn.datasets import load_iris # import our plotting module import matplotlib.pyplot as plt # %matplotlib inline # load the Iris dataset iris = load_iris() # seperate the features and response variable iris_X, iris_y = iris.data, iris.target X_centered = StandardScaler(with_std=False).fit_transform(iris_X) X_scaled = StandardScaler().fit_transform(iris_X) # + # can't work because it has to calculate a covariance matrix and to do that, the matrix needs to be dense # we use another method in sklearn called Truncated SVD # Truncated SVD uses a matrix trick to obtain the same components as PCA (when the data are scaled) # and can work with sparse matrices # components are a not exactly equal but they are up to a very precise decimal from sklearn.decomposition import TruncatedSVD svd = TruncatedSVD(n_components=2) pca = PCA(n_components=2) # check if components of PCA and TruncatedSVD are same for a dataset # by substracting the two matricies and seeing if, on average, the elements are very close to 0 print (pca.fit(iris_X).components_ - svd.fit(iris_X).components_).mean() # not close to 0 # matrices are NOT the same # check if components of PCA and TruncatedSVD are same for a centered dataset print (pca.fit(X_centered).components_ - svd.fit(X_centered).components_).mean() # close to 0 # matrices ARE the same # check if components of PCA and TruncatedSVD are same for a scaled dataset print (pca.fit(X_scaled).components_ - svd.fit(X_scaled).components_).mean() # close to 0 # matrices ARE the same # - svd = TruncatedSVD(n_components=1000) svd.fit(tfidf_transformed) # + # Scree Plot plt.plot(np.cumsum(svd.explained_variance_ratio_)) # 1,000 components captures about 30% of the variance # - # + tfidf = TfidfVectorizer(ngram_range=(1, 2), stop_words='english') svd = TruncatedSVD(n_components=10) # will extract 10 "topics" normalizer = Normalizer() # will give each document a unit norm lsa = Pipeline(steps=[('tfidf', tfidf), ('svd', svd), ('normalizer', normalizer)]) # + lsa_sentences = lsa.fit_transform(sentences) lsa_sentences.shape # + cluster = KMeans(n_clusters=10) cluster.fit(lsa_sentences) # - # %%timeit # time it takes to cluster on the original document-term matrix of shape (118151, 280901) cluster.fit(tfidf_transformed) # %%timeit # also time the prediction phase of the Kmeans clustering cluster.predict(tfidf_transformed) # %%timeit # time the time to cluster after latent semantic analysis of shape (118151, 10) cluster.fit(lsa_sentences) # over 80 times faster than fitting on the original tfidf dataset # %%timeit # also time the prediction phase of the Kmeans clustering after LSA was performed cluster.predict(lsa_sentences) # over 4 times faster than predicting on the original tfidf dataset # transform texts to a cluster distance space # each row represents an obsercation cluster.transform(lsa_sentences).shape predicted_cluster = cluster.predict(lsa_sentences) predicted_cluster # + # Distribution of "topics" pd.Series(predicted_cluster).value_counts(normalize=True)# create DataFrame of texts and predicted topics texts_df = pd.DataFrame({'text':sentences, 'topic':predicted_cluster}) texts_df.head() print "Top terms per cluster:" original_space_centroids = svd.inverse_transform(cluster.cluster_centers_) order_centroids = original_space_centroids.argsort()[:, ::-1] terms = lsa.steps[0][1].get_feature_names() for i in range(10): print "Cluster %d:" % i print ', '.join([terms[ind] for ind in order_centroids[i, :5]]) print lsa.steps[0][1] # + # topic prediction print cluster.predict(lsa.transform(['I definitely recommend this hotel'])) print cluster.predict(lsa.transform(['super friendly staff. Love it!'])) # -
Chapter08/Ch_8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ZlKvq8VlfReq" # ![SPARK Banner](https://i.imgur.com/3vCmTns.png) # # # SPARK | Day 4 | July 15th, 2021 # # The agenda for today: # # 1. Machine Learning # 2. Supervised Machine Learning (regression) # + [markdown] id="i9Iup557fRet" # # Maching Learning # # ## What is machine learning? # # Machine learning isn't really a well defined term. There are, however, two defintions that are well accepted # # # ### <NAME> (1959) # Field of study that gives computers the ability to learn without being explicitly programmed # # ### <NAME> (1999) # A computer program is said to learn from experience E with respect to some class of tasks T and performance measure P, if its performance at tasks in T, as measured by P, improves with experience E # # Watch this quick [video](https://www.youtube.com/watch?v=f_uwKZIAeM0) about machine learning. # # ## Types of Machine Learning # There are two major branches or machine learning that you different types of learning alorithms. # # ### 1. Supervised learning # We teach a computer how to do something, then let is use its new found knowledge to do it. # # - supervised learning requires **labelled** data # - e.g., regression, classification — you want to predict housing prices # # ### 2. Unsupervised learning # We let the computer learn how to learn to do somethings, and use its knowledge to getermine structure and patterns in the data. # # - unsupervised learning requested **unlabelled** data # - e.g., clustering — you want to know if you can group the data into three groups # # # For SPARK, we will be learning about supervised machine learning. # # + [markdown] id="U-TxVnfyfRew" # ## Importing Machine Learning Libraries # # There are some specialized libraries in python that we can use for machine learning. # # ### sklearn # # [sklearn](https://scikit-learn.org/stable/) is a module of simple and efficient tools for machine learning problems like classification, regression and clustering. # # To import sklearn we use an import statement # # ``` # import sklearn # ``` # # In particular, we want the `train_test_split` function, so we will import it this way # # ``` # from sklearn.model_selection import train_test_split # ``` # # ### statsmodels # # [statsmodels](https://www.statsmodels.org/stable/index.html) is a python module that provides class functions for many statistical models. Specifically, we are going to buse the [statsmodels.formula.api](https://www.statsmodels.org/stable/api.html?highlight=formula%20api#statsmodels-formula-api). This to long to type out, so we usually import and use the short for `smf` # # To import statsmodels.formula.api we use and import statement # # ``` # import statsmodels.formula.api as smf # ``` # # # # # + id="TTSr7ZUHfRex" colab={"base_uri": "https://localhost:8080/"} outputId="4e8e370a-5b06-4f64-a2ea-2530ec4bd87c" # Lets import sklearn, along with our other data science packages import numpy as np # For math- and matrix-based functions import pandas as pd # For DataFrame functionality import seaborn as sns # For plotting/ data visualization from sklearn.model_selection import train_test_split # For the train_test_split_function import statsmodels.formula.api as smf # Stats models for # + [markdown] id="LEHWD_9GfRex" # # Regression Analysis # # Regression analysis attempts to explain the relationship between variables. We want to see if we can predict or explain the **dependent variable** using one or more **explanatory variables**. In linear regression, we assume that the dependent variable can be somewhat explained by a combination of expanatory variables. # # A very simple example is one where the depedent variable is height and the explantatory variable is age. The age of a person does a good job of explaining their height for kids under 16. # # We can also think of **explanatory variables** as **features** in our linear model, and the **dependent variable** as the **target**. # # The pipeline is as follows: # # ![image](https://i.imgur.com/tu0nVzO.png) # + [markdown] id="Wq9a_qolfRey" # ## Load Car Data # # Lets load our data for car models # + id="awG26ZRkfRey" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="32806c17-91c7-43a8-f375-88e47ad6c1b5" # Import car data cars = pd.read_csv("https://raw.githubusercontent.com/nguyenjenny/spark_shared_repo/main/datasets/Cars.csv") # Drop missing data cars = cars.dropna() # Show DataFrame cars # + [markdown] id="3X6J_sgUfRez" # ## Predicting Fuel Efficiency of Cars # # Let's see if can predict the fuel efficiency (in miles per gallon), using information about the car's number of horsepower, weight, and year. # # **Dependent/Target Variable** = `"Miles_per_Gallon"` # # **Exploratory Variables/Features** = `["Horsepower", "Weight_in_lbs", "Year"]` # + [markdown] id="fXqLrwEbfRe0" # ### Lets plot the relationship between our target and each of our features using `sns.regplot()` # # `sns.regplot()` plots the data as well as a linear regression fit. The syntax of this function is the same as all the other seaborn plots, but you can also take a look at the [documentation](https://seaborn.pydata.org/generated/seaborn.regplot.html) # + [markdown] id="i5PsaqVhfRe0" # #### Horsepower vs. Miles_per_Gallon: negatively correlated # + id="Baxrns8RfRe1" outputId="1175138d-c657-4d2c-f9bf-c00ebe9bf9bd" sns.regplot( data = cars, x = "Horsepower", y = "Miles_per_Gallon" ) # + [markdown] id="fH6J3xAXfRe2" # #### Weight_in_lbs vs. Miles_per_Gallon: negatively correlated # + id="IHy0iBOsfRe2" outputId="fd5837ff-f5eb-4ae6-c522-add5f8d88024" sns.regplot( data = cars, x = "Weight_in_lbs", y = "Miles_per_Gallon" ) # + [markdown] id="MtKJUx7bfRe2" # #### Exercise 1: Plot Year vs. Miles_per_Gallon: Are they positvely or negatively correlated? Is year as good of a predictor as Horsepower and/or Weight? # + id="DYvybWbKfRe2" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="0767877d-4ceb-431d-be8e-01951df6fb6f" # TO-DO: Plot a regplot of Year vs Miles_per_Gallon sns.regplot( data = cars, x = "Year", y = "Miles_per_Gallon" ) # + [markdown] id="HgFAKNAdUDWE" # positively correlated # yes # + [markdown] id="9RAzZxDtfRe3" # ## Splitting our data using `train_test_split()` # # Whenever we do machine learning, we always want to reserve data to **train** the model as well as data to **test** the model. By default, we usually use 75% (.75) of our data for training and 25% (.25) of our data for testing. Testing our data allow us to see how good of a job our linear regression model does at predicting the target variable. # # # We use `train_test_split()` to shuffle and randomly split our data into the train dataset and the test dataset. # # Before we can use it, be sure to properly import it # # ``` # from sklearn.model_selection import train_test_split # ``` # # The `train_test_split()` is formatted as follows: # # ``` # df_train, df_test = train_test_split(df[target], df[features], test_size=.25, train_size=.75, random_state=1) # ``` # # - Where: # - `df_train`: the split dataset for training # - `df_test`: the split dataset for testing # - `test_size`: the proportion of the data set to include in the testing set # - `train_size`: the proportion of the data set to include in the training set # - `random_state`: controls the shuffling of the data, using an int allows for reproducible output # + id="b3fimpiHfRe3" # Split the the car data into testing and training sets cars_train, cars_test = train_test_split(cars, test_size=.25, train_size=.75, random_state=1) # + id="NPr4hkoIfRe4" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="0859786e-e3a7-414b-9874-fde430d3265c" # The train dataset has 304 rows (notice how the indexes are randomly suffled) cars_train # + id="Q0w09v4mfRe4" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="68bb7f49-c960-4060-cac7-5eed69c7f0a2" # The test dataset has 102 rows (notice how the indexes are randomly suffled) cars_test # + [markdown] id="5MXCQfSFfRe4" # ## Creating and Training our Linear Regression Machine Learning Model using `smf.ols()` # # `smf.ols()` is the function we use to create a linear regression model. OLS stands for ordinary least square models. # # # It requires the following syntax # # ``` # model = smf.ols(formula="target ~ feature_1 + feature_2 + feature_3 + ... + feature_n", data=df).fit() # ``` # # - where: # - `formula`: the formula of the model as string in the format of `"target ~ feature_1 + feature_2 + feature_3 + ... + feature_n"` # - The target and features are separated by `~` # - The features are seprated by `+` # - `data`: the name of your DataFrame # - `.fit()`: is a function that trains the model to fit the data # # # This is the syntax of `.predict()`: # # - If we pass nothing into the model, it will return the prediction based on what is was trained on: # ``` # model.predict() # ``` # - If we our pass our testing_data or other new data, it will return the prediction for the testing/new data| # ``` # model.predict(df_test) # ``` # # + id="17w4Ul3dfRe5" colab={"base_uri": "https://localhost:8080/", "height": 874} outputId="eff516dc-b901-4ecf-9340-d9d9f2581df4" # Lets create and train our model cars_model = smf.ols(formula="Miles_per_Gallon ~ Horsepower + Weight_in_lbs + Year", data=cars_train).fit() # Get prediciton from the linear model and make it a new colunn on the training DataFrame cars_train['Miles_per_Gallon_Prediction'] = cars_model.predict() # Get the difference between the predicted and observed/actual value cars_train['Miles_per_Gallon_Difference'] = cars_train['Miles_per_Gallon_Prediction']-cars_train['Miles_per_Gallon'] # Show DataFrame cars_train # + [markdown] id="KKjmitomfRe5" # ## Evaluating the Fit of the Model: Root Mean Squared Error (RMSE) # # Root Mean Squared Error (RMSE) is used to assess how well a regression model first the dataset. It tells the average distance between the predicted values from the model and the actual/observed values. # # The formula for RMSE is as follows: # # $$ # RMSE = \sqrt{\Sigma \frac{(P_{i} - O_{i})^2}{n}} # $$ # # Where: # - $RMSE$ is root mean squared error # - $P$ is predicted value # - $O$ is observed/actual value # - $i$ is a single instance # - $n$ is the sample size # # # A lower RMSE indicates a better fitting model. # ↓ RMSE = ↑ Fit # + id="m8wT6dZ5fRe5" outputId="1b6d0aab-f9e5-4f81-9cb8-838712ca0e4c" # Get sample size (n) n = len(cars_train) # Calculate RMSE rmse_train = np.sqrt((cars_train['Miles_per_Gallon_Difference'].pow(2).sum())/n) # Print the RMSE print(f'The RMSE of the training data is {rmse_train}') # + [markdown] id="x5e6D5oBfRe6" # ## Trying the model on the testing set # # We need to test how our model performs on data it has never seen before. To do this we must again use `.predict()` # # ### Recall: # # This is the syntax of `.predict()`: # # - If we pass nothing into the model, it will return the prediction based on what is was trained on: # ``` # model.predict() # ``` # - If we our pass our testing_data or other new data, it will return the prediction for the testing/new data| # ``` # model.predict(df_test) # ``` # + id="VE7PjpPmfRe7" outputId="e7927fe9-59c7-424b-919e-a144de3c3582" # Get prediciton from the linear model and make it a new colunn on the training DataFrame cars_test['Miles_per_Gallon_Prediction'] = cars_model.predict(cars_test) # notice how we are passing the test data now, NOT train # Get the difference between the predicted and observed/actual value cars_test['Miles_per_Gallon_Difference'] = cars_test['Miles_per_Gallon_Prediction']- cars_test['Miles_per_Gallon'] # Show DataFrame cars_test # + id="a-c_HroUfRe7" outputId="d3e8bb5e-44f6-40c7-95b3-eced435721e4" # Get sample size (n) n = len(cars_test) # Calculate RMSE rmse_test = np.sqrt((cars_test['Miles_per_Gallon_Difference'].pow(2).sum())/n) # Print the RMSE print(f'The RMSE of the testing data is {rmse_test}') # + [markdown] id="oXCSH75TfRe7" # ### Exercise 2: The RMSE for the testing data is higher than the RMSE for the training data. Why do you think that is? # # [double click to write your answer] # + [markdown] id="1v32y7ajfRe8" # ## Using the model to predict new scenerios. # # The RMSE values of the car model are actually pretty low. This gives us confidence in our model. We can now use our model to predict what the fuel efficiency of an entirely new car might be like. # # # + [markdown] id="N3rtGEO5fRe8" # ### Create a new data frame of our predicted data # # First we must create a DataFrame that has the the exact same column names (case-senstive) as our feature columns. We will be creating a DataFrame using the dictionary method. Please read the [documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas). # DataFrame.from_dict.html) for more details. # # Lets say we want estimate the fuel efficiency (`"Miles_per_Gallon"`) of two different cars that we are thinking of buying. # # **Car 1** # - Horsepower of 67 hp # - Weight of 2700 lb # - Made in 1970 # # **Car 2** # - Horsepower of 90 hp # - Weight of 3500 lb # - Made in 2002 # # # # # + id="CVfSdsqffRe8" colab={"base_uri": "https://localhost:8080/", "height": 110} outputId="da4a4055-83c4-4570-dc3c-8a1c93462860" # Create new_prediction DataFrame new_prediction = pd.DataFrame({ "Name": ["Car 1", "Car 2"], "Horsepower": [67, 90], "Weight_in_lbs": [2700, 3500], "Year": [1970, 2002], }) # Show DataFrame new_prediction # + id="bZspKUk6fRe9" colab={"base_uri": "https://localhost:8080/", "height": 110} outputId="2497eb74-b4bc-44a6-ff7c-33e7cb27eee6" # Lets create and train our model new_prediction["Fuel Efficiency"] = cars_model.predict(new_prediction) # Show DataFrame new_prediction # + [markdown] id="0E_ph2NmfRe9" # We used the model # + [markdown] id="dGi_x2zgfRe9" # ### Exercise 3: Predict how much the fuel efficiency of these three cars? # # **Car 1** # - Horsepower of 90 hp # - Weight of 1200 lb # - Made in 2006 # # **Car 2** # - Horsepower of 90 hp # - Weight of 3500 lb # - Made in 2002 # # **Car 3** # - Horsepower of 80 hp # - Weight of 2500 lb # - Made in 2040 # # + id="Y6RGJKZIfRe-" colab={"base_uri": "https://localhost:8080/", "height": 142} outputId="816b04e7-60dc-4b8b-e86a-ab414672f7c3" # TO-DO: Predict the fuel efficiency of the tree cars # Create new_prediction DataFrame new_prediction2 = pd.DataFrame({ "Name": ["Car 1", "Car 2", "Car 3"], "Horsepower": [67, 90, 80], "Weight_in_lbs": [1200, 3500, 2500], "Year": [2006, 2002,2040], }) # Show DataFrame new_prediction2 # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="WNf38aDkcsxW" outputId="33b1843f-f270-4f3d-c157-b05c110d3a7f" # Lets create and train our model new_prediction2["Fuel Efficiency"] = cars_model.predict(new_prediction2) # Show DataFrame new_prediction2 # + [markdown] id="QHOXGPcreYqJ" # ### Exercise 4: Create train and run a linear regression model on the dataset you selected # + id="7y5gaMI6ehTb" colab={"base_uri": "https://localhost:8080/"} outputId="c0127fb6-5fd7-4246-8c19-c2be0c1973c9" # Lets import sklearn, along with our other data science packages import numpy as np # For math- and matrix-based functions import pandas as pd # For DataFrame functionality import seaborn as sns # For plotting/ data visualization from sklearn.model_selection import train_test_split # For the train_test_split_function import statsmodels.formula.api as smf # Stats models for # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="z9x04ckRqSV_" outputId="4a080705-f740-46b6-c910-01e48246a740" # Create a list of missing values and save as a variable missing_values = ['n/a', 'na', 'NaN', 'NA', '--'] # Import dataset characters = pd.read_csv("https://raw.githubusercontent.com/nguyenjenny/spark_shared_repo/main/datasets/characters.csv", na_values=missing_values) # drop na rows characters = characters.dropna(axis="rows",how="any") characters= characters.drop(34,axis=0) characters # + colab={"base_uri": "https://localhost:8080/", "height": 881} id="jxi4_zlgqsUi" outputId="b5db9435-fd8d-4600-caa3-4e844cdcd7cc" # split the dataset characters_train, characters_test = train_test_split(characters, test_size=.1, train_size=.9, random_state=1) characters_train # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="nk-yRXyCrOLF" outputId="384e5222-8c34-4a91-8832-655dcbaf7d4c" # Lets create and train our model characters_model = smf.ols(formula="Mass ~ Gender + Height", data=characters_train).fit() # Get prediciton from the linear model and make it a new colunn on the training DataFrame characters_train['Mass_Prediction'] = characters_model.predict() # Get the difference between the predicted and observed/actual value characters_train['Mass_Difference'] = characters_train['Mass_Prediction']-characters_train['Mass'] characters_train # + colab={"base_uri": "https://localhost:8080/"} id="wzbK00qsuTFH" outputId="21cf36a2-220f-46eb-cdb4-8042bd0ab277" # Get sample size (n) n = len(characters_train) # Calculate RMSE rmse_train = np.sqrt((characters_train['Mass_Difference'].pow(2).sum())/n) # Print the RMSE print(f'The RMSE of the training data is {rmse_train}') # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="-J1Ufnocv95O" outputId="a053ae89-1ca9-4439-afbc-4577cfce021e" # Get prediciton from the linear model and make it a new colunn on the training DataFrame characters_test['Mass_Prediction'] = characters_model.predict(characters_test) # notice how we are passing the test data now, NOT train # Get the difference between the predicted and observed/actual value characters_test['Mass_Difference'] = characters_test['Mass_Prediction']- characters_test['Mass'] # Show DataFrame characters_test # + colab={"base_uri": "https://localhost:8080/"} id="NWMjKiqCwd3S" outputId="eb631eec-ea5a-4afc-e10d-34fb95aed6da" # Get sample size (n) n = len(characters_test) # Calculate RMSE rmse_test = np.sqrt((characters_test['Mass_Difference'].pow(2).sum())/n) # Print the RMSE print(f'The RMSE of the testing data is {rmse_test}') # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="iIaGDfaNujHo" outputId="bfd4fc4a-fa0e-440d-896b-f20e8a32f24f" # Create new_prediction DataFrame new_prediction = pd.DataFrame({ "Name": ["Char 1", "Char 2", "Char 3", "Char 4", "Char 5", "Char 6", "Char 7"], "Gender": ["Male","Female","Male","Female","Male","Female","Male"], "Height": [200, 200, 150, 30, 100, 120, 130] }) # Show DataFrame new_prediction # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="ibUJfaV9xrZq" outputId="d7e0b828-e8f2-4c27-c1a9-a05dcdda3770" # Lets create and train our model new_prediction["Mass"] = characters_model.predict(new_prediction) # Show DataFrame new_prediction # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="zalpjpCYgsXP" outputId="d603b656-8e5c-45b6-b08c-d9185a6c9e6c" characters_train.append(new_prediction) # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="i4WeUEv8idOt" outputId="e4d05e78-0eb1-4e9c-cc22-4fcaef9f7159" # Graph graph = sns.regplot( data = characters_train, x = "Mass", y = "Height", ) graph.set_title("Character Height (cm) vs Mass (kg)") # + colab={"base_uri": "https://localhost:8080/", "height": 506} id="xGV0XGcTm2qK" outputId="ab7116b6-76ca-4c1d-a41c-29140519b5c1" characters_model.summary()
group_01/SPARK_Day4_Beto.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import torch import torch.utils.data import pickle import numpy as np import random import itertools from tqdm import tqdm class CustomDataPreprocessorForCNN(): def __init__(self, input_seq_length=5, pred_seq_length=5, datasets=[i for i in range(37)], dev_ratio=0.1, test_ratio=0.1, forcePreProcess=False, augmentation=False): self.data_paths = ['./data/train/raw/biwi/biwi_hotel.txt', './data/train/raw/crowds/arxiepiskopi1.txt', './data/train/raw/crowds/crowds_zara02.txt', './data/train/raw/crowds/crowds_zara03.txt', './data/train/raw/crowds/students001.txt', './data/train/raw/crowds/students003.txt', './data/train/raw/stanford/bookstore_0.txt', './data/train/raw/stanford/bookstore_1.txt', './data/train/raw/stanford/bookstore_2.txt', './data/train/raw/stanford/bookstore_3.txt', './data/train/raw/stanford/coupa_3.txt', './data/train/raw/stanford/deathCircle_0.txt', './data/train/raw/stanford/deathCircle_1.txt', './data/train/raw/stanford/deathCircle_2.txt', './data/train/raw/stanford/deathCircle_3.txt', './data/train/raw/stanford/deathCircle_4.txt', './data/train/raw/stanford/gates_0.txt', './data/train/raw/stanford/gates_1.txt', './data/train/raw/stanford/gates_3.txt', './data/train/raw/stanford/gates_4.txt', './data/train/raw/stanford/gates_5.txt', './data/train/raw/stanford/gates_6.txt', './data/train/raw/stanford/gates_7.txt', './data/train/raw/stanford/gates_8.txt', './data/train/raw/stanford/hyang_4.txt', './data/train/raw/stanford/hyang_5.txt', './data/train/raw/stanford/hyang_6.txt', './data/train/raw/stanford/hyang_7.txt', './data/train/raw/stanford/hyang_9.txt', './data/train/raw/stanford/nexus_0.txt', './data/train/raw/stanford/nexus_1.txt', './data/train/raw/stanford/nexus_2.txt', './data/train/raw/stanford/nexus_3.txt', './data/train/raw/stanford/nexus_4.txt', './data/train/raw/stanford/nexus_7.txt', './data/train/raw/stanford/nexus_8.txt', './data/train/raw/stanford/nexus_9.txt'] # Number of datasets self.numDatasets = len(self.data_paths) # Data directory where the pre-processed pickle file resides self.data_dir = './data/train/processed' # Store the arguments self.input_seq_length = input_seq_length self.pred_seq_length = pred_seq_length # Dev Ratio self.dev_ratio = dev_ratio # Test Ratio self.test_ratio = test_ratio # Buffer for storing raw data. self.raw_data = [] # Buffer for storing processed data. self.processed_input_output_pairs = [] # Scale Factor for x and y (computed in self.process()) self.scale_factor_x = None self.scale_factor_y = None # Data augmentation flag self.augmentation = augmentation # Rotation increment (deg) for data augmentation (only valid if augmentation is True) self.rot_deg_increment = 120 # How many pedestrian permutations to consider (only valid if augmentation is True) self.permutations = 4 # Define the path in which the process data would be stored self.processed_train_data_file = os.path.join(self.data_dir, "trajectories_cnn_train.cpkl") self.processed_dev_data_file = os.path.join(self.data_dir, "trajectories_cnn_dev.cpkl") self.processed_test_data_file = os.path.join(self.data_dir, "trajectories_cnn_test.cpkl") # If the file doesn't exist or forcePreProcess is true if not(os.path.exists(self.processed_train_data_file)) or not(os.path.exists(self.processed_dev_data_file)) or not(os.path.exists(self.processed_test_data_file)) or forcePreProcess: print("============ Normalizing raw data (after rotation data augmentation) ============") self.normalize() print("============ Creating pre-processed training, dev and test data for CNN ============") self.preprocess() def normalize(self): if self.augmentation: print('--> Data Augmentation: Rotation (by ' + str(self.rot_deg_increment) + ' deg incrementally up to 360 deg)') for path in self.data_paths: # Load data from txt file. txtfile = open(path, 'r') lines = txtfile.read().splitlines() data = [line.split() for line in lines] data = np.transpose(sorted(data, key=lambda line: int(line[0]))).astype(float) self.raw_data.append(data) if self.augmentation: # Rotate data by deg_increment deg sequentially for data augmentation (only rotation is considered here) deg_increment_int = int(self.rot_deg_increment) for deg in range(deg_increment_int, 360, deg_increment_int): data_rotated = np.zeros_like(data) rad = np.radians(deg) c, s = np.cos(rad), np.sin(rad) Rot = np.array(((c,-s), (s, c))) for ii in range(data.shape[1]): data_rotated[0:2, ii] = data[0:2, ii] data_rotated[2:, ii] = np.dot(Rot, data[2:, ii]) self.raw_data.append(data_rotated) # Find x_max, x_min, y_max, y_min across all the data. x_max_global, x_min_global, y_max_global, y_min_global = -1000, 1000, -1000, 1000 for data in self.raw_data: x = data[2,:] x_min, x_max = min(x), max(x) if x_min < x_min_global: x_min_global = x_min if x_max > x_max_global: x_max_global = x_max y = data[3,:] y_min, y_max = min(y), max(y) if y_min < y_min_global: y_min_global = y_min if y_max > y_max_global: y_max_global = y_max self.scale_factor_x = (x_max_global - x_min_global)/(1 + 1) self.scale_factor_y = (y_max_global - y_min_global)/(1 + 1) # Normalize all the data to range from -1 to 1. for data in self.raw_data: x = data[2,:] x = (1 + 1)*(x - x_min_global)/(x_max_global - x_min_global) x = x - 1.0 for jj in range(len(x)): if abs(x[jj]) < 0.0001: data[2,jj] = 0.0 else: data[2,jj] = x[jj] y = data[3,:] y = (1 + 1)*(y - y_min_global)/(y_max_global - y_min_global) y = y - 1.0 for jj in range(len(y)): if abs(y[jj]) < 0.0001: data[3,jj] = 0.0 else: data[3,jj] = y[jj] ''' # Sanity check. # Find x_max, x_min, y_max, y_min across all the data. x_max_global, x_min_global, y_max_global, y_min_global = -1000, 1000, -1000, 1000 for data in self.raw_data: x = data[2,:] x_min, x_max = min(x), max(x) if x_min < x_min_global: x_min_global = x_min if x_max > x_max_global: x_max_global = x_max y = data[3,:] y_min, y_max = min(y), max(y) if y_min < y_min_global: y_min_global = y_min if y_max > y_max_global: y_max_global = y_max print(x_min_global, x_max_global) print(y_min_global, y_max_global) ''' def preprocess(self): random.seed(1) # Random seed for pedestrian permutation and data shuffling for data in self.raw_data: # Frame IDs of the frames in the current dataset frameList = np.unique(data[0, :].astype(int)).tolist() #print(frameList) numFrames = len(frameList) # Frame ID increment for this dataset. frame_increment = np.min(np.array(frameList[1:-1]) - np.array(frameList[0:-2])) # For this dataset check which pedestrians exist in each frame. pedsInFrameList = [] pedsPosInFrameList = [] for ind, frame in enumerate(frameList): # For this frame check the pedestrian IDs. pedsInFrame = data[:, data[0, :].astype(int) == frame] pedsList = pedsInFrame[1, :].astype(int).tolist() pedsInFrameList.append(pedsList) # Position information for each pedestrian. pedsPos = [] for ped in pedsList: # Extract x and y positions current_x = pedsInFrame[2, pedsInFrame[1, :].astype(int) == ped][0] current_y = pedsInFrame[3, pedsInFrame[1, :].astype(int) == ped][0] pedsPos.extend([current_x, current_y]) if (current_x == 0.0 and current_y == 0.0): print('[WARNING] There exists a pedestrian at coordinate [0.0, 0.0]') pedsPosInFrameList.append(pedsPos) # Go over the frames in this data again to extract data. ind = 0 while ind < len(frameList) - (self.input_seq_length + self.pred_seq_length): # Check if this sequence contains consecutive frames. Otherwise skip this sequence. if not frameList[ind + self.input_seq_length + self.pred_seq_length - 1] - frameList[ind] == (self.input_seq_length + self.pred_seq_length - 1)*frame_increment: ind += 1 continue # List of pedestirans in this sequence. pedsList = np.unique(np.concatenate(pedsInFrameList[ind : ind + self.input_seq_length + self.pred_seq_length])).tolist() # Print the Frame numbers and pedestrian IDs in this sequence for sanity check. # print(str(int(self.input_seq_length + self.pred_seq_length)) + ' frames starting from Frame ' + str(int(frameList[ind])) + ' contain pedestrians ' + str(pedsList)) # Initialize numpy arrays for input-output pair data_input = np.zeros((2*len(pedsList), self.input_seq_length)) data_output = np.zeros((2*len(pedsList), self.pred_seq_length)) for ii in range(self.input_seq_length): for jj in range(len(pedsList)): if pedsList[jj] in pedsInFrameList[ind + ii]: datum_index = pedsInFrameList[ind + ii].index(pedsList[jj]) data_input[2*jj:2*(jj + 1), ii] = np.array(pedsPosInFrameList[ind + ii][2*datum_index:2*(datum_index + 1)]) for ii in range(self.pred_seq_length): for jj in range(len(pedsList)): if pedsList[jj] in pedsInFrameList[ind + self.input_seq_length + ii]: datum_index = pedsInFrameList[ind + self.input_seq_length + ii].index(pedsList[jj]) data_output[2*jj:2*(jj + 1), ii] = np.array(pedsPosInFrameList[ind + self.input_seq_length + ii][2*datum_index:2*(datum_index + 1)]) processed_pair = (torch.from_numpy(data_input), torch.from_numpy(data_output)) self.processed_input_output_pairs.append(processed_pair) ind += self.input_seq_length + self.pred_seq_length print('--> Data Size: ' + str(len(self.processed_input_output_pairs))) if self.augmentation: # Perform data augmentation self.augment_flip() self.augment_permute() else: print('--> Skipping data augmentation') # Shuffle data. print('--> Shuffling all data before saving') random.shuffle(self.processed_input_output_pairs) # Split data into train, dev, and test sets. dev_size = int(len(self.processed_input_output_pairs)*self.dev_ratio) test_size = int(len(self.processed_input_output_pairs)*self.test_ratio) processed_dev_set = self.processed_input_output_pairs[:dev_size] processed_test_set = self.processed_input_output_pairs[dev_size:dev_size+test_size] processed_train_set = self.processed_input_output_pairs[dev_size+test_size:] print('--> Dumping dev data with size ' + str(len(processed_dev_set)) + ' to pickle file') f_dev = open(self.processed_dev_data_file, 'wb') pickle.dump(processed_dev_set, f_dev, protocol=2) f_dev.close() print('--> Dumping test data with size ' + str(len(processed_test_set)) + ' to pickle file') f_test = open(self.processed_test_data_file, 'wb') pickle.dump(processed_test_set, f_test, protocol=2) f_test.close() print('--> Dumping train data with size ' + str(len(processed_train_set)) + ' to pickle file') f_train = open(self.processed_train_data_file, 'wb') pickle.dump(processed_train_set, f_train, protocol=2) f_train.close() # Clear buffer self.raw_data = [] self.processed_input_output_pairs = [] def augment_flip(self): print('--> Data Augmentation: Y Flip') augmented_input_output_pairs = [] for processed_input_output_pair in tqdm(self.processed_input_output_pairs): data_input, data_output = processed_input_output_pair[0].numpy(), processed_input_output_pair[1].numpy() num_peds = int(data_input.shape[0]/2) # Flip y data_input_yflipped = np.zeros_like(data_input) data_output_yflipped = np.zeros_like(data_output) for kk in range(num_peds): data_input_yflipped[2*kk, :] = data_input[2*kk, :] data_input_yflipped[2*kk+1, :] = -1*data_input[2*kk+1, :] data_output_yflipped[2*kk, :] = data_output[2*kk, :] data_output_yflipped[2*kk+1, :] = -1*data_output[2*kk+1, :] processed_pair_yflipped = (torch.from_numpy(data_input_yflipped), torch.from_numpy(data_output_yflipped)) augmented_input_output_pairs.append(processed_pair_yflipped) self.processed_input_output_pairs.extend(augmented_input_output_pairs) print('--> Augmented Data Size: ' + str(len(self.processed_input_output_pairs))) def augment_permute(self): # Specify how many pedestrian permutations to consider per input-output pair print('--> Data Augmentation: Pedestrian Permutation (' + str(self.permutations) + ' random permutations per input-output pair)') augmented_input_output_pairs = [] for processed_input_output_pair in tqdm(self.processed_input_output_pairs): data_input, data_output = processed_input_output_pair[0].numpy(), processed_input_output_pair[1].numpy() num_peds = int(data_input.shape[0]/2) for ii in range(self.permutations): perm = np.random.permutation(num_peds) data_input_permuted = np.zeros_like(data_input) data_output_permuted = np.zeros_like(data_output) for jj in range(len(perm)): data_input_permuted[2*jj:2*(jj+1), :] = data_input[2*perm[jj]:2*(perm[jj]+1), :] data_output_permuted[2*jj:2*(jj+1), :] = data_output[2*perm[jj]:2*(perm[jj]+1), :] processed_pair_permuted = (torch.from_numpy(data_input_permuted), torch.from_numpy(data_output_permuted)) augmented_input_output_pairs.append(processed_pair_permuted) self.processed_input_output_pairs.extend(augmented_input_output_pairs) print('--> Augmented Data Size: ' + str(len(self.processed_input_output_pairs))) processed = CustomDataPreprocessorForCNN(forcePreProcess=True, augmentation=True) processed.scale_factor_x processed.scale_factor_y train_file = open(processed.processed_train_data_file, 'rb') dev_file = open(processed.processed_dev_data_file, 'rb') test_file = open(processed.processed_test_data_file, 'rb') processed.processed_train_data_file train = pickle.load(train_file) dev = pickle.load(dev_file) test = pickle.load(test_file) len(train) len(dev) len(test) class CustomDatasetForCNN(torch.utils.data.Dataset): def __init__(self, file_path): self.file_path = file_path self.file = open(self.file_path, 'rb') self.data = pickle.load(self.file) self.file.close() def __getitem__(self, index): item = self.data[index] return item def __len__(self): return len(self.data) train_set = CustomDatasetForCNN(processed.processed_train_data_file) train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=1, shuffle=True) x, y = train_set.__getitem__(9) x next(iter(train_loader)) x, y = train_set.__getitem__(9) len(train_loader)
ipynb/dataloader_for_cnn_test_fill_0_mix_all_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import seaborn as sns import matplotlib.pyplot as plt sns.set(style="white") sns.set_context("talk") df = pd.read_csv('raw/2016-17-ClassCentral-Survey-data-noUserText.csv', decimal=',', encoding = "ISO-8859-1") reason = df[['Which region of the world are you in?', 'Reasons: Learning skills for current career', 'Reasons: Learning skills for new career', 'Reasons: School credit', 'Reasons: Personal interest', 'Reasons: Access to reference materials']] reason.head() multi_reason = pd.melt(reason, id_vars='Which region of the world are you in?', var_name='select', value_name='score') multi_reason.head() grouped_reason = multi_reason.groupby(['Which region of the world are you in?', 'select'], as_index=False).sum() grouped_reason.head() sns.factorplot(x='score', y='select', hue='Which region of the world are you in?', data=grouped_reason, kind='bar', size=8, aspect=1.5) sns.plt.title('Which of the following are important reasons for you to take MOOCs?') sns.plt.show()
class-central-survey-2016-17/multi-select-statistic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd df_original = pd.read_csv('BankChurners.csv') df_original.head() # - # ## Deleting unnecessary columns # # This columns are secified in the documentation that they are not part of the original data. dropColumns = ['Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1', 'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2'] df = df_original.drop(columns=dropColumns) df.head() df.shape df.describe() # ## Exploring null values df.isnull().sum() df.loc[df['Customer_Age'] == 0, ['Customer_Age']].sum() df['Education_Level'].unique() df.loc[df['Education_Level'] == 'Unknown', ['Education_Level']].shape[0] / df.shape[0] # # It appears that the null values in the data are notated as 'Unknown'. Therefore, let's make a function which detects and count the number of nulls in this df. # # + def null_detector(df, column): return df.loc[df[column] == 'Unknown', [column]].shape[0] / df.shape[0] null_detector(df, 'Education_Level') # - for column in df.columns: print(column + "=" + str(null_detector(df, column))) # We can obseve that there are three columns which have null values. In this case we can try and consider the nulls as part of the data. df['Income_Category'].unique()
DataExploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="zEwdT0SUugv9" colab_type="text" pycharm={"name": "#%% md\n"} # # Linear Regresion MOdel # Date: 2019 09 12 # # Updated: 2020 01 12 # + id="zoX_-mpVpr68" colab_type="code" colab={} # importing libraries import pandas as pd import seaborn as sns # %matplotlib inline # + [markdown] id="-SX3tN0OwXGo" colab_type="text" # ## Load the dataset and extract independent and dependent variables # + id="ev_BZPzhrXnp" colab_type="code" colab={} # Importing the dataset companies = pd.read_csv('1000_Companies.csv') x= companies.iloc[:,:-1].values y= companies.iloc[:,:4].values companies.head() # + [markdown] id="kUbVuGCJwSoA" colab_type="text" # ## Data Visualization # + id="sEDcinIvrZ47" colab_type="code" colab={} # Building the correlain matrix sns.heatmap(companies.corr()) # + id="JCMKsvHbrdUu" colab_type="code" colab={} # Encoding categorial data from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder = LabelEncoder() x[:,3] = labelencoder.fit_transform(x[:,3]) onehotencoder = OneHotEncoder( categorical_features = [3] ) X = onehotencoder.fit_transform(x).toarray() # + id="Dkm3UJXhwpik" colab_type="code" colab={} # check the outpt print(X) X = X[:,1:] # + id="jbceX2ihrgPK" colab_type="code" colab={} # Split the dataset into Training set and Test set from sklearn.model_selection import train_test_split X_train, Y_test, y_train, x_test = train_test_split(X,y, test_size=0.2, random_state =0 ) # + id="T0F6-sCCuVl-" colab_type="code" colab={} # Fiiting Mupltiple Linear Regresion to the Training data from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) # + id="cFtNRRDMrlqQ" colab_type="code" colab={} # Predicting the Test set results y_pred = regressor.predict(x_test) y_pred # + [markdown] id="Yja22ZAwweel" colab_type="text" # ## Calculating the Coeffieents and Interceptps # + id="Zxt1AWWDn5H4" colab_type="code" colab={} # Calculating the coeffieents print(regressor.coef_) ## Calculating the interceptps print(regressor.intercepts_ ) # + [markdown] id="JkuSD7IcvqID" colab_type="text" # ## Evalutating the model # + id="4QYxctZwrojA" colab_type="code" colab={} ## Calculating the R squared values from sklearn.metrics import r2_score r2_score(Y_test, y_pred) # NOTE: R squaredvalue of 0.91 proves the model good
02 Deep Leaning/linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # NumPy Exercises - Solutions # + [markdown] deletable=true editable=true # #### Import NumPy as np # + deletable=true editable=true import numpy as np # + [markdown] deletable=true editable=true # #### Create an array of 10 zeros # + deletable=true editable=true np.zeros(10) # + [markdown] deletable=true editable=true # #### Create an array of 10 ones # + deletable=true editable=true np.ones(10) # + [markdown] deletable=true editable=true # #### Create an array of 10 fives # + deletable=true editable=true np.ones(10) * 5 # + [markdown] deletable=true editable=true # #### Create an array of the integers from 10 to 50 # + deletable=true editable=true np.arange(10,51) # + [markdown] deletable=true editable=true # #### Create an array of all the even integers from 10 to 50 # + deletable=true editable=true np.arange(10,51,2) # + [markdown] deletable=true editable=true # #### Create a 3x3 matrix with values ranging from 0 to 8 # + deletable=true editable=true np.arange(9).reshape(3,3) # + [markdown] deletable=true editable=true # #### Create a 3x3 identity matrix # + deletable=true editable=true np.eye(3) # + [markdown] deletable=true editable=true # #### Use NumPy to generate a random number between 0 and 1 # + deletable=true editable=true np.random.rand(1) # + [markdown] deletable=true editable=true # #### Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution # + deletable=true editable=true np.random.randn(25) # + [markdown] deletable=true editable=true # #### Create the following matrix: # + deletable=true editable=true np.arange(1,101).reshape(10,10) / 100 # + [markdown] deletable=true editable=true # #### Create an array of 20 linearly spaced points between 0 and 1: # + deletable=true editable=true np.linspace(0,1,20) # + [markdown] deletable=true editable=true # ## Numpy Indexing and Selection # # Now you will be given a few matrices, and be asked to replicate the resulting matrix outputs: # + deletable=true editable=true mat = np.arange(1,26).reshape(5,5) mat # + deletable=true editable=true # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + deletable=true editable=true mat[2:,1:] # + deletable=true editable=true # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + deletable=true editable=true mat[3,4] # + deletable=true editable=true # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + deletable=true editable=true mat[:3,1:2] # + deletable=true editable=true # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + deletable=true editable=true mat[4,:] # + deletable=true editable=true # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # + deletable=true editable=true mat[3:5,:] # + [markdown] deletable=true editable=true # ### Now do the following # + [markdown] deletable=true editable=true # #### Get the sum of all the values in mat # + deletable=true editable=true mat.sum() # + [markdown] deletable=true editable=true # #### Get the standard deviation of the values in mat # + deletable=true editable=true mat.std() # + [markdown] deletable=true editable=true # #### Get the sum of all the columns in mat # + deletable=true editable=true mat.sum(axis=0) # -
2.Numpy Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # RMinimum : Full - Test - Case: $k(n) = n^{\varepsilon}$ - $\varepsilon$ fix import math import random import queue # Testfälle : $k(n) = n^{\varepsilon}$, $X_{n_0} = [0, \cdots, n_0-1], \cdots, X_{n_1}=[0, \cdots, n_1-1]$ # + # User input : Range [n0, n1] n0, n1 = 6, 11 eps = 0.5 # Automatic generation: lst_X, lst_n = [], [] for n in range(n0, n1 + 1, 2): lst_X.append([i for i in range(2**n)]) lst_n.append(2**n) lst_k = [int(len(x)**eps) for x in lst_X] # Show Testcase print('') print('Input tuples : (n, k)') print('=====================') for i in range(len(lst_X)): print(i+1, ':', '(', str(len(lst_X[i])).ljust(5), ',', str(lst_k[i]).ljust(3), ')') # - # Algorithmus : Full # + def rminimum(X,k, cnt = []): k = int(k) n = len(X) if cnt == []: cnt = [0 for _ in range(len(X))] if len(X) == 3: if X[0] < X[1]: cnt[X[0]] += 2 cnt[X[1]] += 1 cnt[X[2]] += 1 if X[0] < X[2]: X = X[0] else: X = X[2] else: cnt[X[0]] += 1 cnt[X[1]] += 2 cnt[X[2]] += 1 if X[1] < X[2]: X = X[1] else: X = X[2] return cnt W, L, cnt = RMinimum_step1(X, cnt) minele, cnt = RMinimum_step2(L, k, cnt) res3, cnt = RMinimum_step3(W, k, minele, cnt) res4, cnt = RMinimum_step4(res3, k, n, cnt) return cnt # ================================================== def RMinimum_step1(lst, cnt): random.shuffle(lst) W = [0 for _ in range(len(lst) // 2)] L = [0 for _ in range(len(lst) // 2)] for i in range(len(lst) // 2): if lst[2 * i] > lst[2 * i + 1]: W[i] = lst[2 * i + 1] L[i] = lst[2 * i] else: W[i] = lst[2 * i] L[i] = lst[2 * i + 1] cnt[lst[2 * i + 1]] += 1 cnt[lst[2 * i]] += 1 return W, L, cnt # ================================================== def RMinimum_step2(L, k, cnt): random.shuffle(L) res = [L[i * k:(i + 1) * k] for i in range((len(L) + k - 1) // k)] minele = [0 for _ in range(len(res))] var = list(res) for i in range(len(var)): q = queue.Queue() for item in var[i]: q.put(item) while q.qsize() > 1: a = q.get() b = q.get() if a < b: q.put(a) else: q.put(b) cnt[a] += 1 cnt[b] += 1 minele[i] = q.get() return minele, cnt # ================================================== def RMinimum_step3(lst, k, minele, cnt): random.shuffle(lst) var = [lst[i * k:(i + 1) * k] for i in range((len(lst) + k - 1) // k)] res = [0 for _ in range(len(var))] for i in range(len(var)): res[i] = [elem for elem in var[i] if elem < minele[i]] cnt[minele[i]] += len(var[i]) for elem in var[i]: cnt[elem] += 1 res = [item for sublist in res for item in sublist] return res, cnt # ================================================== def RMinimum_step4(newW, k, n, cnt): if len(newW) <= (math.log(n)/math.log(2))**2: q = queue.Queue() var = list(newW) for item in var: q.put(item) while q.qsize() > 1: a = q.get() b = q.get() if a < b: q.put(a) else: q.put(b) cnt[a] += 1 cnt[b] += 1 res = q.get() else: res = rminimum(newW,k, cnt) return res, cnt # ================================================== # Testfall lst_cnt = [] for i in range(len(lst_X)): lst_cnt.append(rminimum(lst_X[i], lst_k[i])) # - # Resultat : $\mathbb{E}[f_{min}] = \mathcal{O}(\varepsilon^{-1}\log\log(n))$, $\mathbb{E}[f_{rem}]=\mathcal{O}(n^{\varepsilon})$, $\mathbb{E}[work]=\mathcal{O}(n)$. # + def test(lst_n, lst_k, eps, lst_cnt, n0, n1): print('') print('E[f_min] = O(eps^(-1) * loglog(n)) | E[f_rem] = O(n^eps) | E[work] = O(n)') for i in range(len(lst_n)): n = lst_n[i] f_min = lst_cnt[i][0] f_rem = max(lst_cnt[i][1:]) work = int(sum(lst_cnt[i])/2) E_min = round((eps**(-1) * (math.log(n)/math.log(2)) / (math.log(math.log(n)/math.log(2))/math.log(2))), 2) E_rem = round(n**eps, 2) E_work = n print('') print('Testfall n | k | eps :', lst_n[i], '|', lst_k[i], '|', eps) print('==========================================') print('f_min | E[f_min] | Diff :', f_min, '|', E_min, '|', round(abs(f_min - E_min), 2)) print('------------------------------------------') print('f_rem | E[f_rem] | Diff :', f_rem, '|', E_rem, '|', round(abs(f_rem - E_rem), 2)) print('------------------------------------------') print('Work | E[work] | Diff :', work, '|', E_work, '|', round(abs(work - E_work), 2)) print('==========================================') return # Testfall test(lst_n, lst_k, eps, lst_cnt, n0, n1) # -
jupyter/jupyter_case_eps_fix.eps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Selecting Data # ## Introduction # # Now that you've gotten a brief introduction to SQL, its time to get some hands-on practice connecting to a database via Python and executing some queries. # ## Objectives # # You will be able to: # - Connect to a SQL database using Python # - Retrieve all information from a SQL table # - Retrieve a subset of records from a table using a `WHERE` clause # - Write SQL queries to filter and order results # - Retrieve a subset of columns from a table # ## Connecting to a Database Using Python # # SQLite databases are stored as files on disk. The one we will be using in this lesson is called `data.sqlite`. # ! ls # (Here the file extension is `.sqlite` but you will also see examples ending with `.db`) # # If we try to read from this file without using any additional libraries, we will get a bunch of garbled nonsense, since this file is encoded as bytes and not plain text: with open("data.sqlite", "rb") as f: print(f.read(100)) # ### Connection # # Instead, we will use the `sqlite3` module ([documentation here](https://docs.python.org/3/library/sqlite3.html)). The way that this module works is that we start by opening a *connection* to the database with `sqlite3.connect`: import sqlite3 conn = sqlite3.connect('data.sqlite') # We will use this connection throughout the lesson, then close it at the end. # # Let's look at some more attributes of the connection: print("Data type:", type(conn)) print("Uncommitted changes:", conn.in_transaction) print("Total changes:", conn.total_changes) # As you can see, since we have only opened the connection and not performed any queries, there are no uncommitted changes and 0 total changes so far. Let's continue on and create a *cursor*. # # ### Cursor # # A cursor object is what can actually execute SQL commands. You create it by calling `.cursor()` on the connection. cur = conn.cursor() type(cur) # ### Exploring the Schema # # Let's use the cursor to find out what tables are contained in this database. This requires two steps: # # 1. Executing the query (`.execute()`) # 2. Fetching the results (`.fetchone()`, `.fetchmany()`, or `.fetchall()`) # # This is because some SQL commands (e.g. deleting data) do not require results to be fetched, just commands to be executed. So the interface only fetches the results if you ask for them. # Execute the query cur.execute("""SELECT name FROM sqlite_master WHERE type = 'table';""") # Fetch the result and store it in table_names table_names = cur.fetchall() table_names # So now we know the names of the tables. What if we want to know the schema of the `employees` table? cur.execute("""SELECT sql FROM sqlite_master WHERE type = 'table' AND name = 'employees';""") employee_columns = cur.fetchone() employee_columns # Ok, now we know the names of the columns! # ## ERD Overview # # The database that you've just connected to is the same database you have seen previously, containing data about orders, employeers, etc. Here's an overview of the database: # # <img src="images/Database-Schema.png"> # If we want to get all information about the first 5 employee records, we might do something like this (`*` means all columns): cur.execute("""SELECT * FROM employees LIMIT 5;""") cur.fetchall() # Because `.execute()` returns the cursor object, it also possible to combine the previous two lines into one line, like so: cur.execute("""SELECT * FROM employees LIMIT 5;""").fetchall() # ### Quick Note on String Syntax # # When working with strings, you may have previously seen a `'string'`, a `"string"`, a `'''string'''`, or a `"""string"""`. While all of these are strings, the triple quotes have the added functionality of being able to use multiple lines within the same string as well as to use single quotes within the string. Sometimes, SQL queries can be much longer than others, in which case it's helpful to use new lines for readability. Here's the same example, this time with the string spread out onto multiple lines: # + first_five_employees_query = """ SELECT * FROM employees LIMIT 5 ; """ cur.execute(first_five_employees_query).fetchall() # - # ## Wrapping Results into Pandas DataFrames # # Often, a more convenient output will be to turn these results into pandas DataFrames. One way to do this would be to wrap the `c.fetchall()` output with a pandas DataFrame constructor: import pandas as pd df = pd.DataFrame(cur.execute(first_five_employees_query).fetchall()) df # Sadly as you can see this is slightly clunky as we do not have the column names. Pandas just automatically assigns the numbers 0 through 7. # # We can access the column names by calling `cur.description`, like so: cur.description # Then using a list comprehension, assign the column names after we instantiate the dataframe: df.columns = [x[0] for x in cur.description] df # Even better, there is a pandas method directly designed for reading from SQL databases ([documentation here](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html)). Instead of using the cursor, all you need is the connection variable: pd.read_sql(first_five_employees_query, conn) # It is still useful to be aware of the cursor construct in case you ever need to develop Python code that fetches one result at a time, or is a command other than `SELECT`. But in general if you know that the end result is creating a pandas dataframe to display the result, you don't really need to interface with the cursor directly. # Note that we can also use `SELECT` to select only certain columns, and those will be reflected in the dataframe column names: pd.read_sql("""SELECT lastname, firstName FROM employees;""", conn) # ## The `WHERE` Clause # # Now that we have the general syntax down, let's try for some more complex queries! # # In general, the `WHERE` clause filters `SELECT` query results by some condition. # ### Selecting Customers from a Specific City # # Note that because the query is surrounded by triple quotes (`"""`) we can use single quotes (`'`) around the string literals within the query, e.g. `'Boston'`. You need to put quotes around strings in SQL just like you do in Python, so that it is interpreted as a string and not a variable name. pd.read_sql("""SELECT * FROM customers WHERE city = 'Boston';""", conn) # ### Selecting Multiple Cities # # As you are starting to see, you can also combine multiple conditions. pd.read_sql("""SELECT * FROM customers WHERE city = 'Boston' OR city = 'Madrid';""", conn) # ## The `ORDER BY` and `LIMIT` Clauses # # Two additional keywords that you can use to refine your searches are the `ORDER BY` and `LIMIT` clauses. # # The `ORDER BY` clause allows you to sort the results by a particular feature. For example, you could sort by the `customerName` column if you wished to get results in alphabetical order. # # By default, `ORDER BY` is ascending. So, to continue the previous example, if you want the customers in reverse alphabetical order, use the additional parameter `DESC` immediately after whatever you are ordering by. # # Finally, the limit clause is typically the last argument in a SQL query and simply limits the output to a set number of results, as seen with the employee data above. This is especially useful when you are performing initial data exploration and do not need to see thousands or millions of results. # ### Selecting Specific Columns with Complex Criteria # # This query demonstrates essentially all of the SQL features we have covered so far. It is asking for the number, name, city, and credit limit for all customers located in Boston or Madrid whose credit limit is above 50,000.00. Then it sorts by the credit limit and limits to the top 15 results. complex_query = """ SELECT customerNumber, customerName, city, creditLimit FROM customers WHERE (city = 'Boston' OR city = 'Madrid') AND (creditLimit >= 50000.00) ORDER BY creditLimit DESC LIMIT 15 ;""" df = pd.read_sql(complex_query, conn) df # You might notice that the output of this query doesn't seem to respect our credit limit criterion. There are results here where the credit limit is *not* over 50,000.00. # # A little investigation shows that this is because the number is actually stored as a string! df["creditLimit"].iloc[0] print(df["creditLimit"].dtype) # Let's do some additional investigation to figure out what happened. # # One additional technique we can use to understand the schema of a SQLITE table is the `PRAGMA` `table_info` command. You can read more about it in the [SQLite docs](https://www.sqlite.org/pragma.html#pragma_table_info). Essentially it shows you the full schema of a given table: pd.read_sql("""PRAGMA table_info(customers)""", conn, index_col="cid") # According to this, none of the columns actually have a data type specified (the `type` column is empty) and none of the columns is marked as the primary key (`pk` column). SQLite is defaulting to treating them like strings — even `creditLimit`, which we clearly want to treat as a number — because the schema doesn't specify their types. # # This is an annoying problem to encounter and also underlines the importance of setting up a database in an appropriate manner at the get-go. Sometimes you will encounter an issue like this and you won't be able to do all of the desired filtering in SQL, and instead will need to use pandas or some other technique for your final analysis. # # ### Bonus: Database Administration # # In this case, you have full control over the database since it's just a file on disk, on a computer you control. You can do some database administration and make a correctly-typed copy of `creditLimit` called `creditLimitNumeric`, so that the above complex query works. # # ***Important note:*** it is okay if you don't understand this part. Much of the time, data scientists are only given read access (so they can write `SELECT` queries) and are not responsible for database administration. This is just to give an example of what it takes to fix a database that is not set up correctly. # # First, note that because all of our queries so far have been `SELECT` queries, we still have not made any changes. It's a good idea to keep track of these attributes of `conn` as you attempt to perform any database administration. print("Uncommitted changes:", conn.in_transaction) print("Total changes:", conn.total_changes) # Now we can write a query that will alter the database structure (adding a new column `creditLimitNumeric`): add_column = """ ALTER TABLE customers ADD COLUMN creditLimitNumeric REAL; """ cur.execute(add_column) # Then copy all of the `creditLimit` values to the new `creditLimitNumeric` column: fill_values = """ UPDATE customers SET creditLimitNumeric = creditLimit ; """ cur.execute(fill_values) # Now if we check the attributes of `conn`, we do have some uncommitted changes: print("Uncommitted changes:", conn.in_transaction) print("Total changes:", conn.total_changes) # So we need to commit them: conn.commit() print("Uncommitted changes:", conn.in_transaction) print("Total changes:", conn.total_changes) # Now we can look at our table info again: pd.read_sql("""PRAGMA table_info(customers)""", conn, index_col="cid") # Ok, all the way at the bottom we see there is a column `creditLimitNumeric` with `type` of `REAL` (the SQLite name for floating point values). Let's try our complex query again: # query edited to refer to creditLimitNumeric complex_query = """ SELECT customerNumber, customerName, city, creditLimitNumeric FROM customers WHERE (city = 'Boston' OR city = 'Madrid') AND (creditLimitNumeric >= 50000.00) ORDER BY creditLimitNumeric DESC LIMIT 15 ;""" df = pd.read_sql(complex_query, conn) df print(df['creditLimitNumeric'].dtype) # It worked! # # Note that this was a fairly conservative, cautious approach to editing the database. We could have dumped the entire contents into a temp database, then read them back in with the appropriate schema, if we wanted to keep the name `creditLimit` while also setting the appropriate data type. But that kind of operation carries more risk compared to making a copy like this. Most of the time as a data scientist (not a database administrator), these are the kinds of changes you want to make: not true administrative overhauls, but just enough modification so that your query will work how you need it to. # # Now we can go ahead and close our database connection. Similar to working with CSV or JSON files, it is mainly important to close the connection if you are writing data to the file/database, but it's a best practice to close it regardless. conn.close() # ## Summary # # In this lesson, you saw how to connect to a SQL database via Python and how to subsequently execute queries against that database. Going forward, you'll continue to learn additional keywords for specifying your query parameters!
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Installation tips # * Easiest is to a miniconda + r-essentials install like [here](https://www.continuum.io/blog/developer/jupyter-and-conda-r) (If you want the hard way ask me later - i.e. the way where you can pick your R release version) # # Installing packages # * A lot of the time with jupyter systems you will encounter, you will not have permission to write to the default library so do this: # Check library location .libPaths() # + # Add a local path (if it doesn't exist nothing will happen) .libPaths(c(.libPaths(), "C:/Users/michhar/Documents/bin/rmds")) # Then recheck .libPaths() # - getwd() install.packages("ggplot2", repos = "http://cloud.r-project.org/", lib = "C:/Users/michhar/Documents/R/win-library/3.2/") # ### Just so you know...you can write R code in a python notebook - here are some links: # # (this has some advantages such as using the more feature rich IPython kernel...but if you don't care about magics and OS interaction that much don't worry about it) # * http://eneskemalergin.github.io/2015/10/01/R_Magic_with_IPython/ # * http://blog.revolutionanalytics.com/2016/01/pipelining-r-python.html - my shameless plug for myself # Created by a Microsoft Employee. # # The MIT License (MIT)<br> # Copyright (c) 2016 <NAME>
notebooks/01.Installing stuff.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import pandas as pd import numpy as np from sklearn.datasets import make_regression X,Y = make_regression(n_samples = 400,n_features = 1,n_informative = 1,noise = 1.8,random_state = 11) Y = Y.reshape((-1,1)) X = (X-X.mean())/X.std() plt.scatter(X,Y) plt.show() ones = np.ones((X.shape[0],1)) X1 = np.hstack((X,ones)) print(X1[:5,:]) def predict(X,theta): return np.dot(X,theta) def get_theta(X1,Y): Y = np.mat(Y) first = np.dot(X1.T,X1) sec = np.dot(X1.T,Y) theta = np.linalg.pinv(first)*sec return theta theta = get_theta(X1,Y) print(theta) print(X.shape) print(predict(X1,theta).shape) plt.figure() plt.scatter(X,Y) plt.plot(X,predict(X1,theta),color="red") plt.show()
regression/closed_form.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Datafaucet # # Datafaucet is a productivity framework for ETL, ML application. Simplifying some of the common activities which are typical in Data pipeline such as project scaffolding, data ingesting, start schema generation, forecasting etc. from kafka import KafkaConsumer, TopicPartition from json import loads, dumps from time import sleep consumer = KafkaConsumer( 'dfc', bootstrap_servers=['kafka:9092'], auto_offset_reset='earliest', enable_auto_commit=False, group_id=None, value_deserializer=lambda x: loads(x.decode('utf-8'))) p = consumer.partitions_for_topic('dfc') partitions = [TopicPartition('dfc', x) for x in list(p)] partitions consumer.beginning_offsets(partitions) # + while not list(consumer.end_offsets(partitions).values())[0]: print(consumer.end_offsets(partitions)) sleep(1) consumer.end_offsets(partitions) # - consumer.seek_to_beginning(partitions[0]) message = consumer.poll(timeout_ms=1000, max_records=1) cnt = len(message.values()) if cnt: record = list(message.values())[0][0] print(f'[consumer]: topic={record.topic}, partition={record.partition}, offset={record.offset}, timestamp={record.timestamp}') print(f'[datafaucet log data]:') print(dumps(record.value, indent=2)) else: print('No data in the queue') consumer.beginning_offsets(partitions) consumer.seek(partitions[0], 7) consumer.position(partitions[0]) # + message = consumer.poll(timeout_ms=1000, max_records=1) cnt = len(message.values()) d = {'severity':None, 'message':None, 'data':None} if cnt: record = list(message.values())[0][0] print(f'[consumer]: topic={record.topic}, partition={record.partition}, offset={record.offset}, timestamp={record.timestamp}') print(f'[datafaucet log data]:') print(dumps(record.value, indent=2)) d = record.value else: print('No data in the queue') # - assert d['severity'] == 'WARNING' assert d['message'] == 'custom data + message' assert d['data'] == {'test_value': 42}
examples/tutorial/logging-kafka/kafka-consumer-test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Calendar Module import datetime month, day , year = (int(x) for x in input().split(' ')) ans = datetime.date(year, month, day) print (ans.strftime("%A").upper()) # + # Time Delta import math import os import random import re import sys import datetime # Complete the time_delta function below. def time_delta(t1, t2): format = '%a %d %b %Y %H:%M:%S %z' timeA = datetime.datetime.strptime(t1, format) timeB = datetime.datetime.strptime(t2, format) difference = abs(timeA - timeB) secondi_diff = difference.total_seconds() secondi_diff = int(secondi_diff) secondi_diff = str(secondi_diff) return(secondi_diff) if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') t = int(input()) for t_itr in range(t): t1 = input() t2 = input() delta = time_delta(t1, t2) fptr.write(delta + '\n') fptr.close()
Problem1/Scripts/Date and Time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np column_types = { 'sex': 'category', 'workclass': 'category', 'education': 'category', 'marital-status': 'category', 'relationship': 'category', 'race': 'category', 'native-country': 'category', 'salary': 'category' } adult = pd.read_csv('adult.data.csv', dtype=column_types) adult['race'].value_counts() # How many people of each race are represented in this dataset? This should be a Pandas series with race names as the index labels. (race column) adult['race'].value_counts() # ## What is the average age of men? round( adult.loc[(adult.sex == 'Male'), 'age'].mean(), 1 ) # ## What is the percentage of people who have a Bachelor's degree? round( adult[(adult.education == 'Bachelors')]['education'].count() / adult['education'].count() * 100 , 1) # What percentage of people with advanced education (Bachelors, Masters, or Doctorate) make more than 50K? advanced_education = ['Bachelors', 'Masters', 'Doctorate'] round( adult[ (adult.education.isin(advanced_education)) & (adult.salary == '>50K') ]['education'].count() / adult[(adult.education.isin(advanced_education))]['education'].count() * 100 , 1) # What percentage of people without advanced education make more than 50K? round( adult[ (~adult.education.isin(advanced_educationx)) & (adult.salary == '>50K') ]['education'].count() / adult[ (~adult.education.isin(advanced_educationx)) ]['education'].count() * 100 ,1) # What is the minimum number of hours a person works per week? adult['hours-per-week'].min() # What percentage of the people who work the minimum number of hours per week have a salary of more than 50K? adult[ (adult['hours-per-week'] == adult['hours-per-week'].min()) & (adult['salary'] == '>50K')]['salary'].count() / adult[(adult['hours-per-week'] == adult['hours-per-week'].min())]['salary'].count() * 100 # What country has the highest percentage of people that earn >50K and what is that percentage? ( round( adult.loc[ (adult['salary'] == '>50K'), 'native-country' ].value_counts() / adult['native-country'].value_counts() * 100,1) ).idxmax() ( round( adult.loc[ (adult['salary'] == '>50K'), 'native-country' ].value_counts() / adult['native-country'].value_counts() * 100,1) ).max() # Identify the most popular occupation for those who earn >50K in India. ( adult.loc[ (adult['salary'] == '>50K') & (adult['native-country'] == 'India' ) , 'occupation' ].value_counts() ).idxmax()
python/demographic-data-analyzer/demographic_data_analyzer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exploring Iris Dataset # ![](https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Machine+Learning+R/iris-machinelearning.png) # >##### Here we're exploring basics of classification with the iris data set. # ##### The first step is to use the scikit-learn python package to import the preloaded data sets. from sklearn.datasets import load_iris #data is saved as a variable iris = load_iris() #view data description and information print(iris.DESCR) # ##### Putting Data into a Data Frame import pandas as pd data = pd.DataFrame(iris.data) data.head() # ##### renaming the columns for clarity. data.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width'] data.head() #put target data into data frame target = pd.DataFrame(iris.target) #rename the column to make it clear that these are the target values target = target.rename(columns = {0: 'target'}) target.head() # The target data frame is only one column, and it gives a list of the values 0, 1, and 2. We will use the information from the feature data to predict if a flower belongs in group 0, 1, or 2. # * 0 is Iris Setosa # * 1 is Iris Versicolour # * 2 is Iris Virginica # ## 1. Exploratory Data Analysis (EDA) df = pd.concat([data, target], axis = 1) df.head() # ### 1.1 Data Cleaning # ##### It's critical to go over the data, make sure it's clean, and then start looking for patterns between characteristics and target variables. df.dtypes # * float = numbers with decimals # * int = integer or whole number without decimals # * obj = object, string, or words # ##### The data types in this data set are all ready to be modelled. #Checking for Missing Values df.isnull().sum() # ##### This data set is not missing any values. df.describe() # ## 2. Visualizing import seaborn as sns sns.heatmap(df.corr(), annot = True) # * The target value is most correlated with the length and width of the petals, which means that as these numbers increase, so does the target value. # * In this case, it signifies that flowers in class 2 have petal length and width that are generally longer and wider than flowers in class 0. # * Sepal width is the most anti-correlated, implying that flowers in class 0 have the widest sepals compared to flowers in class 2. # * also see some intercorrelation between features, for example petal width and length are also highly correlated. import matplotlib.pyplot as plt # ##### we can plot scatter plots to further visualize the way the different classes of flowers relate to sepal and petal data. # The indices of the features that we are plotting (class 0 & 1) x_index = 0 y_index = 1 # this formatter will label the colorbar with the correct target names formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)]) plt.figure(figsize=(5, 4)) plt.scatter(iris.data[:, x_index], iris.data[:, y_index], c=iris.target) plt.colorbar(ticks=[0, 1, 2], format=formatter) plt.xlabel(iris.feature_names[x_index]) plt.ylabel(iris.feature_names[y_index]) plt.tight_layout() plt.show() # ##### Now let’s create the same scatter plot to compare the petal data points. x_index = 2 y_index = 3 # this formatter will label the colorbar with the correct target names formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)]) plt.figure(figsize=(5, 4)) plt.scatter(iris.data[:, x_index], iris.data[:, y_index], c=iris.target) plt.colorbar(ticks=[0, 1, 2], format=formatter) plt.xlabel(iris.feature_names[x_index]) plt.ylabel(iris.feature_names[y_index]) plt.tight_layout() plt.show() # ## 3. Modeling X = df.copy() y = X.pop('target') from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=1, stratify = y) #Standardize from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns) X_test = pd.DataFrame(scaler.transform(X_test), columns=X_test.columns) # ### 3.1 Baseline Prediction # ##### The baseline is the probability of predicting class before the model is implemented. If the data is split into 2 classes evenly, there is already a 50% chance of randomly assigning an element to the correct class. The goal of our model is to improve on this baseline, or random prediction. Also, if there is a strong class imbalance (if 90% of the data was in class 1), then we could alter the proportion of each class to help the model predict more accurately. df.target.value_counts(normalize= True) # #### The baseline prediction for this model is 1/3 # ### 3.2 Logistic Regression Model import numpy as np from sklearn.linear_model import LogisticRegression #create the model instance model = LogisticRegression() #fit the model on the training data model.fit(X_train, y_train) #the score, or accuracy of the model model.score(X_test, y_test) #the test score is already very high, but we can use the cross validated score to ensure the model's strength from sklearn.model_selection import cross_val_score scores = cross_val_score(model, X_train, y_train, cv=10) print(np.mean(scores)) # ##### Without any adjustments or tuning, this model is already performing very well with a test score of .9667 and a cross validation score of .9499. This means that the model is predicting the correct class for the flower about 95% of time. Much higher than the baseline of 33%! # ## 4. Understanding the Predictions df_coef = pd.DataFrame(model.coef_, columns=X_train.columns) df_coef # #### Coefficients are often a bit hard to interpret in Logistic Regression, but we can get an idea of how much of an impact each of the features had in deciding if a flower belonged to that class. For instance, petal length was barely a deciding factor for if a flower was in class 1, but petal width was a strong predictor for class 2 predictions = model.predict(X_test) #compare predicted values with the actual scores compare_df = pd.DataFrame({'actual': y_test, 'predicted': predictions}) compare_df = compare_df.reset_index(drop = True) compare_df # The predictions line up almost perfectly, and only once the model incorrectly predicted that a flower belonged to class 1 when it really belonged to class 2. from sklearn.metrics import confusion_matrix pd.DataFrame(confusion_matrix(y_test, predictions, labels=[2, 1, 0]),index=[2, 1, 0], columns=[2, 1, 0]) # ##### We can see that class 0 and 1 were all predicted correctly all 10 times, but the model incorrectly labeled class 2 as class 1 in one instance. # * Precision: Number of correctly predicted Iris Virginica flowers (10) out of total number of predicted Iris Virginica flowers (10). Precision in predicting Iris Virginica =10/10 = 1.0 # * Recall: Number of correctly predicted Iris Virginica out of the number of actual Iris Virginica. Recall = 9/10 = .9 # * F1 Score: This is a harmonic mean of precision and recall. The formula is F1 Score = 2* (precision * recall) / (precision + recall) # * Accuracy: Add all the correct predictions together for all classes and divide by the total number of predictions. 29 correct predictions /30 total values = accuracy of .9667. from sklearn.metrics import classification_report print(classification_report(y_test, predictions)) #Predicted Probabilities probs = model.predict_proba(X_test) #put the probabilities into a dataframe for easier viewing Y_pp = pd.DataFrame(model.predict_proba(X_test), columns=['class_0_pp', 'class_1_pp', 'class_2_pp']) Y_pp.head() # ## 5. Conclusion # ##### This is a typical data set since it is simple to work with, but the steps outlined here may be applied to any classification project. # ![](https://media.giphy.com/media/3o6MbudLhIoFwrkTQY/giphy.gif) # twitter: https://twitter.com/jithinharidaas/
_notebooks/2020-06-21-Exploring-Iris-Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import scipy.integrate as integrate from scipy.optimize import minimize from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm from scipy.stats import norm # + m, s = 0, 1 m1, s1, m2, s2 = -2, 0.3, 2, 0.5 p = 0.2 def gaussian_cdf(x, mu, sigma): return norm.cdf(x, loc=mu, scale=sigma) def gaussian_pdf(x, mu, sigma): return 1/(np.abs(sigma) * np.sqrt(2 * np.pi)) * np.exp(-(x-mu)**2/(2 * sigma**2)) #* (np.abs((x-mu)/sigma) < 4) def mix_cdf(x): return p * gaussian_cdf(x, m1, s1) + (1-p) * gaussian_cdf(x, m2, s2) def mix_pdf(x): return p * gaussian_pdf(x, m1, s1) + (1-p) * gaussian_pdf(x, m2, s2) def kernel(x, y, u=1): return np.exp(- (1/ (2*u)) * np.abs(x-y)**2) def KG(x, mu, sigma, u=1): return np.exp(- (x-mu)**2 / (2 * (u + sigma**2))) / (sigma * np.sqrt(1/u + 1/sigma**2)) def KQ(x, u=1): return p * KG(x, m1, s2, u=u) + (1 - p) * KG(x, m2, s2, u=u) def KS(x, mu, sigma, u=1): return 0.5 * ( KG(x, mu, sigma, u=u) + p * KG(x, m1, s2, u=u) + (1 - p) * KG(x, m2, s2, u=u)) # + def wasserstein(mu, sigma): return integrate.quad(lambda x: np.abs(gaussian_cdf(x, mu, sigma) - mix_cdf(x)), -5, 5)[0] def divergence(mu, sigma, u=1, fwd='fwd'): if fwd == 'sym': rat = lambda x: KG(x, mu, sigma, u=u) / KS(x, mu, sigma, u=u) pl = lambda x: gaussian_pdf(x, mu, sigma) pr = lambda x: 0.5 * (gaussian_pdf(x, mu, sigma) + mix_pdf(x)) t1 = integrate.quad(lambda x: pl(x) * np.log(rat(x)), -5, 5)[0] t2 = integrate.quad(lambda x: pr(x) * rat(x), -5, 5)[0] rat = lambda x: KQ(x, u=u) / KS(x, mu, sigma, u=u) pl = lambda x: mix_pdf(x) pr = lambda x: 0.5 * (gaussian_pdf(x, mu, sigma) + mix_pdf(x)) v1 = integrate.quad(lambda x: pl(x) * np.log(rat(x)), -5, 5)[0] v2 = integrate.quad(lambda x: pr(x) * rat(x), -5, 5)[0] return 0.5 * ( 1 + t1 - t2 + 1 + v1 - v2) elif fwd == 'fwd': rat = lambda x: KG(x, mu, sigma, u=u) / KQ(x, u=u) pl = lambda x: gaussian_pdf(x, mu, sigma) pr = lambda x: mix_pdf(x) elif fwd == 'bwd': rat = lambda x: KQ(x, u=u) / KG(x, mu, sigma, u=u) pl = lambda x: mix_pdf(x) pr = lambda x: gaussian_pdf(x, mu, sigma) t1 = integrate.quad(lambda x: pl(x) * np.log(rat(x)), -5, 5)[0] t2 = integrate.quad(lambda x: pr(x) * rat(x), -5, 5)[0] return 1 + t1 - t2 # + u = 0.1 from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.sans-serif'] = ['Lucida Grande'] rcParams['font.size'] = 12 plt.figure(figsize=(5, 3)) x = np.linspace(-3, 4, 100) plt.plot(x, mix_pdf(x), markersize=2, label='True') f = lambda w: divergence(w[0], w[1], u=u, fwd='fwd') res2 = minimize(f, x0=[1, 0.1], method='Nelder-Mead', tol=1e-6) plt.plot(x, gaussian_pdf(x, res2.x[0], res2.x[1]), '--', markersize=2, label='D(Data, Model)') f = lambda w: divergence(w[0], w[1], u=u, fwd='bwd') res1 = minimize(f, x0=[0, 2], method='Nelder-Mead', tol=1e-6) plt.plot(x, gaussian_pdf(x, res1.x[0], res1.x[1]), '-.', markersize=2, label='D(Model, Data)') #f = lambda w: wasserstein(w[0], w[1]) #res = minimize(f, x0=[1, 0.1], method='Nelder-Mead', tol=1e-6) plt.plot(x, gaussian_pdf(x, res.x[0], res.x[1]), '-', markersize=2, label='W(True, Mod)') plt.axis('off') new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] plt.text(-2.1, 0.3, r'$\mathbb{P}$', fontsize=15, color='#1f77b4') plt.text(2.5, 0.7, r'$\mathbb{D}^{\mathbf{K}}[\cdot \, || \, \mathbb{P}]$',fontsize=15, color='#ff7f0e') plt.text(-1.5, 0.2, r'$\mathbb{D}^{\mathbf{K}}[\mathbb{P} \, || \, \cdot]$', fontsize=15, color='#2ca02c') plt.text(-0.5, 0.45, r'$\mathbb{W}_1[\mathbb{P}, \cdot]$', fontsize=15, color='#d62728') plt.tight_layout() plt.savefig('mog_divergence', dpi=400) # f = lambda w: divergence(w[0], w[1], u=u, fwd='sym') # res = minimize(f, x0=[2, 1], method='Nelder-Mead', tol=1e-6) # print(res.x) # plt.plot(x, gaussian_pdf(x, res.x[0], res.x[1]), label='JS(True, Mod)') #plt.legend() #plt.plot(x, kernel(x, 0, u=0.1)) # - # + gs = 20 mus = np.linspace(-1, 2, gs) sigmas = np.linspace(0.1, 2, gs) M, S = np.meshgrid(mus, sigmas) Z = np.zeros((gs, gs)) for i in range(gs): for j in range(gs): Z[i, j] = divergence(M[i, j], S[i,j]) # - fig = plt.figure(figsize=(5, 5)) ax = fig.gca(projection='3d') ax.view_init(30, 130) ax.xaxis.pane.fill = False ax.yaxis.pane.fill = False ax.zaxis.pane.fill = False surf = ax.plot_surface(M, S, Z, cmap=cm.coolwarm, linewidth=0, antialiased=True) fig.colorbar(surf, shrink=0.5, aspect=20) plt.show()
mog_divergence.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" import numpy as np import pandas as pd import os import torch import torch.nn as nn import torch.optim as optim import torchvision from torchvision import datasets, models, transforms from imblearn.over_sampling import SMOTE torch.manual_seed(42) np.random.seed(42) # + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" from torch.hub import load_state_dict_from_url __all__ = ['ResNet', 'resnet18'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',} def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError('BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError("Dilation > 1 not supported in BasicBlock") self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(Bottleneck, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.)) * groups self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def _forward_impl(self, x): # See note [TorchScript super()] x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) x = nn.functional.softmax(x, 1) return x def forward(self, x): return self._forward_impl(x) def _resnet(arch, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model def resnet18(pretrained=False, progress=True, **kwargs): return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs) # + data_dir = "" TEST = '' TRAIN = '' VAL ='' def data_transforms2(phase): if phase == TRAIN: transform = transforms.Compose([ transforms.Resize((50, 50)), transforms.ToTensor(), # transforms.Normalize([1.2201, 0.4040, 0.7327], [0.3852, 0.5240, 0.5857]), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) if phase == VAL: transform = transforms.Compose([ transforms.Resize((50, 50)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) if phase == TEST: transform = transforms.Compose([ transforms.Resize((50, 50)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) return transform device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms2(x)) for x in [TRAIN, VAL, TEST]} dataloaders = {TRAIN: torch.utils.data.DataLoader(image_datasets[TRAIN], batch_size = 64, shuffle=True), VAL: torch.utils.data.DataLoader(image_datasets[VAL], batch_size = 256, shuffle=True), TEST: torch.utils.data.DataLoader(image_datasets[TEST], batch_size = 256, shuffle=True)} dataset_sizes = {x: len(image_datasets[x]) for x in [TRAIN, VAL, TEST]} classes = image_datasets[TRAIN].classes # - classes, counts = np.unique(dataloaders[TRAIN].dataset.targets, return_counts=True) print(classes) print(counts) class_inverse_priors = {c: 1 / (count/len(dataloaders[TRAIN].dataset.targets)) for c, count in zip(list(classes), counts)} print(class_inverse_priors) cost_matrix = pd.DataFrame(data = [[0, class_inverse_priors[0]], [5 * class_inverse_priors[1], 0]], index=['actual negative (0)', 'actual positive (1)'], columns=['predict negative (0)', 'predict positive (1)']) print(cost_matrix) # + def get_cost(preds, labels, cost_matrix): preds = preds.detach().cpu().numpy() labels = labels.detach().cpu().numpy() cost = [] for y_hat, y_actual in zip(preds, labels): cost.append(get_cost_from_matrix(y_hat=y_hat, \ y_actual=y_actual,\ cost_matrix=cost_matrix)) return sum(cost) def get_cost_from_matrix(y_hat, y_actual, cost_matrix): if y_actual == y_hat == 0: return cost_matrix.loc['actual negative (0)', 'predict negative (0)'] if y_actual == y_hat == 1: return cost_matrix.loc['actual positive (1)', 'predict positive (1)'] if (y_actual == 1) & (y_hat == 0): return cost_matrix.loc['actual positive (1)', 'predict negative (0)'] if (y_actual == 0) & (y_hat == 1): return cost_matrix.loc['actual negative (0)', 'predict positive (1)'] # - def cross_entropy(output, target): output = torch.clamp(output,min=1e-5,max=1-1e-5) loss = (target * torch.log(output[:, 1])) + \ ((1 - target) * torch.log(1 - output[:, 1])) return torch.neg(torch.mean(loss)) def train_smote(model, dataloader, optimizer, criterion, epoch, num_epochs): print("Epoch: {}/{}".format(epoch + 1, num_epochs)) print("=" * 10) model.train() running_loss = 0.0 running_corrects = 0 total_cost = 0.0 for data in dataloader: inputs, labels = data pos_count = np.unique(labels.numpy(), return_counts=True)[1][1] if pos_count >= 6: original_shape = list(inputs.shape) inputs_numpy = inputs.reshape(inputs.shape[0], -1).numpy() labels_numpy = labels.numpy() smote = SMOTE() smote_inputs_numpy, smote_labels_numpy = smote.fit_resample(inputs_numpy,\ labels_numpy) original_shape[0] = smote_inputs_numpy.shape[0] smote_inputs_numpy = smote_inputs_numpy.reshape(original_shape) inputs = torch.Tensor(smote_inputs_numpy) labels = torch.Tensor(smote_labels_numpy) inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() outputs = model(inputs) _, preds = torch.max(outputs, 1) cost = get_cost(preds, labels, cost_matrix) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) total_cost += cost epoch_loss = running_loss / dataset_sizes[TRAIN] epoch_acc = running_corrects.double() / dataset_sizes[TRAIN] print('Train: {:.4f}: , Accuracy: {:.4f},\ Cost: {:.4f}'.format(epoch_loss, epoch_acc, total_cost)) return epoch_loss, epoch_acc, total_cost def evaluate(model, dataloader, criterion, phase, epoch, save=True): model.eval() running_loss = 0.0 running_corrects = 0 total_cost = 0.0 with torch.no_grad(): for data in dataloaders[phase]: inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) cost = get_cost(preds, labels, cost_matrix) loss = criterion(outputs, labels) running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) total_cost += cost epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{}: {:.4f}: , Accuracy: {:.4f},\ Cost: {:.4f}'.format(phase, epoch_loss, epoch_acc, total_cost)) if save: global best_valid_cost if total_cost < best_valid_cost: best_valid_cost = total_cost print('Saving..') state = { 'net': model.state_dict(), 'epoch': epoch, 'acc': epoch_acc, 'cost': total_cost, } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/ckpt.pth') return epoch_loss, epoch_acc, total_cost # + resnet = resnet18(pretrained=True) resnet.fc = nn.Linear(512, 2) resnet = resnet.to(device) criterion = cross_entropy optimizer = optim.SGD(resnet.parameters(), lr=0.01, weight_decay=0.05) # + num_epochs = 20 best_valid_cost = 1e6 train_loss, valid_loss, train_acc, valid_acc,train_cost, valid_cost = [], [], [], [], [], [] for epoch in range(num_epochs): tr_epoch_loss, tr_epoch_acc, tr_epoch_cost= train_smote(resnet, dataloaders[TRAIN], optimizer, criterion, epoch, num_epochs) train_loss.append(tr_epoch_loss) train_acc.append(tr_epoch_acc) train_cost.append(tr_epoch_cost) val_epoch_loss, val_epoch_acc, val_epoch_cost = evaluate(resnet, dataloaders, criterion, VAL, epoch, save=True) valid_loss.append(val_epoch_loss) valid_acc.append(val_epoch_acc) valid_cost.append(val_epoch_cost) # + checkpoint = torch.load('./checkpoint/ckpt.pth') resnet.load_state_dict(checkpoint['net']) best_cost = checkpoint['cost'] current_epoch = checkpoint['epoch'] print(best_cost) print(current_epoch) # - evaluate(resnet, dataloaders, criterion, TEST, epoch, save=False) results = pd.DataFrame({'train loss': train_loss, 'train acc': [x.item() for x in train_acc], 'train cost': train_cost, 'valid loss': valid_loss, 'valid acc': [x.item() for x in valid_acc], 'valid cost': valid_cost})
notebooks/smote.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Un Bloc Note pour la Tortue en ligne # ## Une première tortue : from ipyturtle import Turtle t = Turtle() t # ## Une autre tortue : t2 = Turtle(fixed=False, width=100, height=100) t2 # ## Retour sur la première... t.back(40) t.forward(100) t.position() t.right(90) t.heading() t.forward(150) t.left(45) t.back(100) t.left(45) t.penup() t.forward(100) # ## Carré : t.reset() #clear canvas and start again def square(size): for i in range(4): t.forward(size) t.right(90) square(20) # ## Triangle : t.reset() def triangle(): for i in range(3): t.forward(100) t.right(120) triangle() # ## Cercle : t = Turtle(fixed=False, width=120, height=120) def circle(): for i in range(360): t.forward(1) t.right(1) t circle() # ## koch : #ouverture d'une fenêtre graphique from ipyturtle import Turtle t = Turtle() t def koch(n,longueur): if n==0: t.forward(longueur) else: koch(n-1,longueur/3) t.left(60) koch(n-1,longueur/3) t.right(120) koch(n-1,longueur/3) t.left(60) koch(n-1,longueur/3) t.reset() #clear canvas and start again t.penup() t.left(90) t.forward(150) t.right(180) t.pendown() koch(4,300) # ## sierpinski : #ouverture d'une fenêtre graphique from ipyturtle import Turtle t = Turtle() t def sierpinski (n,longueur): if n==0: for i in range (0,3) : t.forward (longueur) t.left(120) if n>0: sierpinski(n-1, longueur/2) t.forward(longueur/2) sierpinski(n-1, longueur/2) t.back(longueur/2) t.left(60) t.forward(longueur/2) t.right(60) sierpinski(n-1, longueur/2) t.left(60) t.back(longueur/2) t.right(60) t.reset() #clear canvas and start again t.right(90) t.pendown() sierpinski(4,150)
iPyturtle-Un_BN_pour_la_Tortue.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="NEMlVb_5biF0" # Importing all nescessary packages. # + id="zWiqWojS2qJW" import pandas as pd import tensorflow as tf import numpy as np import matplotlib as plt import seaborn as sns # + [markdown] id="JdCeRHRAcFkb" # Downloading & Printing the dataset # + id="4pSiCustcEzK" url = 'https://raw.githubusercontent.com/MilanBinsMathew/Software-Defect-Prediction-JM1/main/jm1_csv.csv' # + colab={"base_uri": "https://localhost:8080/"} id="cFKX_vJCd1Bk" outputId="603c00a5-2bc5-462e-85a9-50cc01ba0f21" df = pd.read_csv(url) print(df.head()) df.isnull().sum() # + [markdown] id="d2m3WgW3l7zp" # Shuffling the dataset. # Segementation into training and test set (80% - 20%) # Normalizing the values # + id="hSG4zkugmM0Y" from sklearn.model_selection import train_test_split X = df.drop(['defects'], axis =1).values y = df['defects'].values # + [markdown] id="O3VMk2pJri8R" # Feature Scaling # + id="4G-KTc-ArmWC" from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=[0,1]) X = scaler.fit_transform(X) train_X,test_X,train_Y,test_Y = train_test_split(X, y, test_size=0.2, random_state = 2) # + [markdown] id="B2rWS3crnMva" # # # Keras Model Definition # + id="Wbso4gW0r_zM" from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Activation,BatchNormalization, Dropout from tensorflow.keras.optimizers import Adam from tensorflow.keras.losses import BinaryCrossentropy # + id="4TZgUlkbxblE" model = Sequential([ (Dense(32,activation = 'relu',input_shape = (21,))), (BatchNormalization()), (Dense(32,activation = 'relu')), (BatchNormalization()), (Dense(64,activation = 'relu')), (Dense(64,activation = 'relu')), (Dense(64,activation = 'relu')), (Dense(64,activation = 'relu')), (BatchNormalization()), (Dense(32,activation = 'relu')), (Dense(32,activation = 'relu')), (Dense(1,activation = 'linear')) ]) ad = Adam(learning_rate = 0.00001) model.compile(optimizer = ad, loss='hinge',metrics = ['accuracy']) # + [markdown] id="XGMY88g8rQuO" # Training # + id="88pIatPU85m-" colab={"base_uri": "https://localhost:8080/"} outputId="dd6d73cf-aaa9-46dc-ad02-6e18c3aebf14" model.fit(x = train_X, y = train_Y, batch_size = 64, epochs = 250) model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="sbYMFah4lb7n" outputId="734f829e-ff5f-4d69-b259-efe27563dbfd" from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=10) x_train = model.predict(train_X) knn.fit(x_train,train_Y) x_test = model.predict(test_X) y_pred = knn.predict(x_test) from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score knn_accuracy = accuracy_score(test_Y, y_pred) print('Accuracy (RES + KNN): ', "%.2f" % (knn_accuracy*100)) precision = precision_score(test_Y, y_pred) print('Precision: %f' % precision) recall = recall_score(test_Y, y_pred) print('Recall: %f' % recall) f1 = f1_score(test_Y, y_pred) print('F1 score: %f' % f1) # + [markdown] id="NWS1YFLVsRnc" # Results # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="XhPBt7l6st0j" outputId="c90600ad-06b1-44a5-c0a6-8c28aa16a876" ''' from sklearn import metrics fig = plt.pyplot.plot(figsize=(10,5)) plt.pyplot.bar(test_Y,y_pred) plt.pyplot.plot(test_Y,test_Y,'r') ''' # + id="hH_4V2SByjfh" colab={"base_uri": "https://localhost:8080/"} outputId="2c84ded0-0bf7-45a7-ed1a-00cfeb0fa4a4" '''from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score y_pred = model.predict(test_X) y_pred = (y_pred >= 0.6).astype(int).ravel() accuracy = accuracy_score(test_Y, y_pred) print('Accuracy: %f' % accuracy) precision = precision_score(test_Y, y_pred) print('Precision: %f' % precision) recall = recall_score(test_Y, y_pred) print('Recall: %f' % recall) f1 = f1_score(test_Y, y_pred) print('F1 score: %f' % f1) '''
Software_Defect_Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/songqsh/foo1/blob/master/src/linreg_torch_v01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="5woR4KoCvJ1l" colab_type="code" colab={} import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt # + [markdown] id="G006ruHmhkz6" colab_type="text" # __Goal__ # # We will implement a linear regression model using both linear algebra appoach and pytorch neural network. # + [markdown] id="jfL3p4EuhyCt" colab_type="text" # __linear regression problem__ # # Given a sequence of input and output pairs # $$\{(x_i, y_i): i = 1, 2, \ldots, n\}$$ # the problem is to find a pair of numbers $(w, b)$ to have a linear function # $$ y = f(x) = wx + b$$ # fitting the given data. That is, # $$(w, b) = \arg\min_{w, b} \sum_i |y_i - f(x_i)|^2.$$ # + [markdown] id="6X2uvQP-i9po" colab_type="text" # __input and output data cook up__ # + id="piKakys4vMl2" colab_type="code" colab={} ''' Create data ''' batch_size = 50 #Cook data using these param #Use regression on data to find param back bias_param = 5. weight_param = 2. x_train = torch.linspace(0, batch_size-1, batch_size).reshape(batch_size, 1) y_train = bias_param + weight_param * x_train y_train += torch.randn(batch_size,1)*3.0 # + id="DVwM97yMvs4E" colab_type="code" outputId="064a8df7-073a-4810-ae55-1849672092e3" colab={"base_uri": "https://localhost:8080/", "height": 286} plt.scatter(x_train.numpy(), y_train.numpy()) # + [markdown] id="hKWkPW-A0JtW" colab_type="text" # __Linear Algebra approach__ # # Set # $$A = \begin{pmatrix} # 1 & x_1 \\ # 1 & x_2 \\ # \vdots & \vdots \\ # 1 & x_n # \end{pmatrix} # $$ # and # $$ # y = # \begin{bmatrix} # y_1\\ # y_2\\ # \vdots\\ # y_n # \end{bmatrix} # .$$ # $(w,b)$ can be solved by # $$ # A^T A # \begin{bmatrix} # b\\ # w # \end{bmatrix} = A^T y. # $$ # + id="SXBSrIWozllt" colab_type="code" colab={} A = torch.cat((torch.ones(batch_size,1), x_train), 1) # + id="waGEX5RavwhL" colab_type="code" colab={} bw, LU = torch.solve(A.t()@y_train, A.t()@A) bias = bw[0] weight = bw[1] # + id="fY-VfDFAyxln" colab_type="code" colab={} y_pred = bias + weight*x_train # + id="ZklDYzX5I7yM" colab_type="code" outputId="75e29ec6-968a-4868-d543-a3faba1ea3e6" colab={"base_uri": "https://localhost:8080/", "height": 34} loss_la = nn.MSELoss() print('>>> loss from linear algebra approach is ' + str(loss_la(y_pred, y_train).numpy())) # + id="9x4pWQfA1bsL" colab_type="code" outputId="666b7da7-6942-4f3f-913e-9892dd127f2e" colab={"base_uri": "https://localhost:8080/", "height": 298} print('>>>bias:'+str(bias.numpy()) +' and weight:'+ str(weight.numpy())) plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label ='Original data') plt.plot(x_train.numpy(), y_pred.numpy(), label ='Fitted line') plt.title('Linear Regression Result') plt.legend() plt.show() # + [markdown] id="-Rdw2XiP6JOP" colab_type="text" # __Torch nn__ # # We implement linear regression by torch,.nn # + [markdown] id="o1PuzC-8J3GZ" colab_type="text" # Linear regression has three components: # # - function space: linear regression is indeed to find the __best__ function in the function space # $$ \mathcal F = # \{ # f(x; w, b) = wx + b| w, b\in \mathbb R^1 # \}$$ # This can be implemented by nn.linear. # # - loss function: By best function among $\mathcal F$, we mean the function which minimize a given loss defined apriori. In this case, the loss function is Mean sequared error, i.e. # $$ # l(w,b) = \sum_i |y_i - f(x_i, w, b)|^2 # $$ # This can be defined by nn.MSELoss # # - optimization: # To minimize the loss function over all $(w,b)$, one shall need a iterative optimization scheme. In this example, we will use stochastic gradient descent implemented by torch.optim.SGD. see [ref](http://proceedings.mlr.press/v28/sutskever13.html) # + [markdown] id="Bs0FFJyMXIA4" colab_type="text" # __nn.Linear__ # # nn.Linear(input_dim, output_dim) defines a function # $$f(x, w, b) = x w^T+b$$ # where # # - $x \in \mathbb R^{1\times n}$ is the input, where $n$ is equal to input_dim # # - $w \in \mathbb R^{m\times n}$ is randomly assigned weight, where $m$ is equal to output_dim # # - $b \in \mathbb R^{1\times m}$ is randomly assigned bias. # + id="h14UiZfcY4fX" colab_type="code" colab={} #experiment for nn.Linear in_dim = 2 out_dim = 1 model = nn.Linear(in_dim, out_dim) # + id="qg0a6L5ZZWQW" colab_type="code" outputId="db62bac0-f21c-42a9-d96c-d959efe100d5" colab={"base_uri": "https://localhost:8080/", "height": 85} #print model parameters automatically for p in model.parameters(): print(p) # + id="im3_y5ZZZWTG" colab_type="code" outputId="8b8846cf-e0ea-4942-f210-39c05ea16014" colab={"base_uri": "https://localhost:8080/", "height": 85} #print model parameters manually print('weight is ' + str(model.weight)) print('bias is ' + str(model.bias)) # + id="jBeYkYohZrLX" colab_type="code" outputId="3ca50e3f-72ff-48b2-95e5-98a2ee5f92be" colab={"base_uri": "https://localhost:8080/", "height": 85} #one can change parameters model.weight.data = torch.tensor([[2, -1]]) model.bias.data = torch.tensor([1]) for p in model.parameters(): print(p) # + id="JKyaJfktaEGM" colab_type="code" outputId="f035218a-f260-418f-ab86-9174097d55c7" colab={"base_uri": "https://localhost:8080/", "height": 34} #test computation of linear model defined above x = torch.tensor([1, 2]) y = model(x) print(y) # + [markdown] id="rARfmRPVa9Rk" colab_type="text" # __nn.MSELoss__ # # pytorch provides MSELoss to save our work to define a function # + id="XPQcU30Qaxzq" colab_type="code" outputId="c9f8edd6-43b4-4a72-b58d-f8e32ecd06b9" colab={"base_uri": "https://localhost:8080/", "height": 51} input_ = torch.randn(3, 2) target_ = torch.randn(3, 2) loss = nn.MSELoss() output_mseloss = loss(input_, target_) print(output_mseloss.item()) output_my = (target_ - input_).pow(2).mean() print(output_my.item()) # + [markdown] id="D2y9jczjfEUR" colab_type="text" # __Implementation of linear regression by pytorch__ # + id="FRQmLK6X_I9G" colab_type="code" colab={} # Hyper-parameters input_size = 1 output_size = 1 num_epochs = 10000 learning_rate = 0.001 # Linear regression model model = nn.Linear(input_size, output_size) # Loss and optimizer criterion = nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # + id="K6x2G2eo_0Dh" colab_type="code" outputId="2370bd6d-9ff3-4628-9a07-493af3662482" colab={"base_uri": "https://localhost:8080/", "height": 357} # Train the model for epoch in range(num_epochs): # Forward pass outputs = model(x_train) loss = criterion(outputs, y_train) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() if (epoch+1) % 500 == 0: print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # + id="T138NRaz_0GF" colab_type="code" outputId="cddd0f73-0b2f-4e69-e4ac-36e942e30cd4" colab={"base_uri": "https://localhost:8080/", "height": 337} # Plot the graph #predicted = model(torch.from_numpy(x_train)).detach().numpy() predicted = model(x_train).detach().numpy() plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data') plt.plot(x_train.numpy(), predicted, label='Fitted line') plt.legend() plt.show() #print model parameters automatically for p in model.parameters(): print(p) # + id="VuqYTX4i_0Ip" colab_type="code" outputId="9d11138f-400d-48f1-8158-41f88161ba6a" colab={"base_uri": "https://localhost:8080/", "height": 34} print('>>>loss from one layer model is:', criterion(y_pred, y_train).item()) # + [markdown] id="BLVacJ5Lmgq_" colab_type="text" # __Q.__ In this below, we deploy two linear layers and it shall do the same job as one-layer network. However, it actually blows up. why? # + id="IEdRtI4xftwt" colab_type="code" outputId="d71f7d1c-3035-4eac-82a7-efb15657ed8a" colab={"base_uri": "https://localhost:8080/", "height": 527} # Hyper-parameters input_size = 1 output_size = 1 num_epochs = 30000 learning_rate = 0.00003 # Linear regression model H = 2 #layer1 = nn.Linear(input_size, H) #layer2 = nn.Linear(H, output_size) layer2 = nn.Sequential( nn.Linear(input_size, H), nn.Linear(H, output_size) ) #model = nn.Linear(input_size, output_size) # Loss and optimizer criterion = nn.MSELoss() optimizer = torch.optim.SGD(layer2.parameters(), lr=learning_rate) # Train the model for epoch in range(num_epochs): # Forward pass #out1 = layer1(x_train) #outputs = layer2(out1) outputs = layer2(x_train) loss = criterion(outputs, y_train) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() if (epoch+1) % 1000 == 0: print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # + id="W59zUVOohL3f" colab_type="code" colab={}
src/linreg_torch_v01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from scipy.io import loadmat import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.cm as cmx import pandas as pd import numpy as np import os # import os.path import seaborn as sns import csv import random # <h2> Process Multiple Files</h3> # <h3>Make F0 raw csv files per syllable # + # time step is 0.005 seconds, but the syll time resolution is 4dp, so must round # for each mat file: # load mat file # extract f0_raw, syll_label and syll_time into variables, and reshape # for each syll_label # if syll_label contains a '#' in it # get syll_time start and end times # divide each by 0.005 to get start and end indexes # extract f0_raw range using indexes # do the f0 rounding, shifting, casting as before # write to file, adding syllable loop index to end of filename # else # skip to next syllable # must also modify combo source/target section that follows, to remove segmenting parts # ########################################### # set number of decimal places dec = 0 # path to input files directory directory_path_root = '/Users/robinson/Dropbox/anasynth/_data/emoVC/Olivia2006' directory = os.fsencode(directory_path_root) # path to output files directory directory_path_f0raw = '/Users/robinson/Dropbox/anasynth/_data/emoVC/Olivia2006/f0_raw_syllable' if not os.path.exists(directory_path_f0raw): os.mkdir(directory_path_f0raw) # list to store all syllables in all files all_syllables = [] # for each mat file in directory (each mat file has one sequence of f0 raw values in it) for file in os.listdir(directory): filename = os.fsdecode(file) if filename.endswith('.mat'): # build filepath (should use file var here really) filepath = os.path.join(directory_path_root, filename) # print(filepath) # load the file and extract f0 raw, syll_label and syll_time into variables, and reshape mat_dict = loadmat(filepath) f0_raw = mat_dict['f0_raw'] f0_raw = f0_raw.reshape((f0_raw.shape[1],)) syll_label = mat_dict['syll_label'] syll_label = syll_label.reshape((syll_label.shape[1],)) # print(syll_label.shape) # for label in syll_label: # print(label[0]) # reshape this to 2d, to preserve start/end relationship syll_time = mat_dict['syll_time'] syll_time = syll_time.reshape((syll_time.shape[1],syll_time.shape[0])) # print(syll_time.shape) # print(syll_time) # break #debug # for each syll in syll_label for i, syll in enumerate(syll_label): # if syll_label doesn't contain a '#' in it if '#' not in syll[0]: # add syllable to a list all_syllables.append(syll[0]) # get syll_time start and end times syll_start_time = syll_time[i,0] syll_end_time = syll_time[i,1] # print(syll_start_time) # print(syll_end_time) # divide each by 0.005 to get start and end indexes syll_start_idx = (int)(syll_start_time // 0.005) syll_end_idx = (int)(syll_end_time // 0.005) # print(syll_start_idx) # print(syll_end_idx) # extract f0_raw range using indexes syll_f0 = f0_raw[syll_start_idx:syll_end_idx] # debug # if syll[0] == 't E t': # print(filename) # print('syll_start_time ', syll_start_time) # print('syll_end_time ', syll_end_time) # print('syll_start_idx ', syll_start_idx) # print('syll_end_idx ', syll_end_idx) # print('syll_f0 ', syll_f0) # print(syll_f0) # break #debug # create new array to hold rounded values syll_f0_dec = np.zeros(syll_f0.shape) # round all values to dec dp np.around(syll_f0, decimals=dec, out=syll_f0_dec) # multiply by 10^dec to shift dp dec places to the right syll_f0_dec = syll_f0_dec * (10**dec) # cast to int to ensure precise number representation in memory syll_f0_dec = syll_f0_dec.astype(int) # write out csv file of f0_raw values - specify format as %u for values to be written as int # add syllable loop index to end of filename filename_noext, _ = os.path.splitext(filename) output_file_extension = '.csv' output_file_name = ''.join([filename_noext, '.s', format(i, '02d'), '_', syll[0], output_file_extension]) np.savetxt(os.path.join(directory_path_f0raw, output_file_name), syll_f0_dec, delimiter=',', fmt='%u') # syll_label contains a '#' in it (an unvoiced region), skip to next syllable else: continue else: continue print('done') # - # <h3> Make Combo Source and Target Syllable Input Files</h3> # + # the above code makes output files with one row per syllable # we can't use these as-is, as we need to read the lines in as pairs, so source and target must have equal num of rows # next step is to pair the files using the phrase and intensities in the filenames # source: 10 phrases of i00 intensity across e01 to e08 - each phrase is said 8 times, neutrally # target: 10 phrases of i01-i05 intensity for e02 - each phrase is said 5 times, expressively (5 times) # so for each utterance (8 of) of each 'p' source phrase (10 of), copy it 5 times, matched with i01-i05 of 'p' target # P(10) > E(8) > I(5) # build paths and open output files # path to input files directories # input_directory_path = '/Users/robinson/Dropbox/anasynth/_data/emoVC/Olivia2006/f0_raw' input_directory_path = '/Users/robinson/Dropbox/anasynth/_data/emoVC/Olivia2006/f0_raw_syllable' # define filename components # Olivia2006.e02.p01.i01.csv input_file_root = 'Olivia2006' input_file_extension = '.csv' # define output filenames and paths output_directory = os.path.join(input_directory_path, 'out') if not os.path.exists(output_directory): os.mkdir(output_directory) # output filenames filename_source = 'source.txt' filename_target = 'target.txt' filename_log = 'log.txt' # open output files in subdirectory of input files directory (must create manually) fs = open(os.path.join(output_directory, filename_source), 'w') ft = open(os.path.join(output_directory, filename_target), 'w') fo = open(os.path.join(output_directory, filename_log), 'w') # pass it a symbol string 'p' / 'e' / 'i' with range, or a syllable code string # it finds all files in a directory that have this in their filename, and returns their filenames as a set def getSet(symbol, num_from=None, num_to=None): # path to input files directory directory = os.fsencode(input_directory_path) # filepath_list = [] filename_list = [] # for each csv file in directory (each csv file has one sequence of f0 raw values in it) for file in os.listdir(directory): filename = os.fsdecode(file) if filename.endswith('.csv'): # build filepath (should use file var here really) # filepath = os.path.join(input_directory_path, filename) # if num_from is set, then it's either a p/e/i, so loop the range specified if num_from != None: for i in range(num_from, num_to + 1): if ''.join(['.', symbol, format(i, '02d')]) in filename: filename_list.append(filename) # if num_from is not set, then it's a syllable symbol specified else: if ''.join(['_', symbol, '.']) in filename: filename_list.append(filename) # return a set of unique filenames that satisfy the given parameters return set(filename_list) # ##################### # DEFINE PARAMETERS # define phrase range phrase_from = 1 phrase_to = 10 # define source and target emotion ranges source_emotion_from = 1 source_emotion_to = 8 target_emotion_from = 2 target_emotion_to = 2 # define source and target intensity ranges source_intensity_from = 0 source_intensity_to = 0 target_intensity_from = 1 target_intensity_to = 5 # END PARAMETERS # ####################### # SOURCE # create lists of sets for each phrase, emotion, intensity and syllable code set_source_emotions = getSet('e', source_emotion_from, source_emotion_to) set_target_emotions = getSet('e', target_emotion_from, target_emotion_to) set_source_intensities = getSet('i', source_intensity_from, source_intensity_to) set_target_intensities = getSet('i', target_intensity_from, target_intensity_to) # print(set_source_intensities) # that do too, then I just - do this for all syllablesfor each syllable, get set of source filenames # which satisfy the parameters, and a set of target filenames that do too, then I just make a set of filename pairs # with a loop (for each filename in source set, match with a filename in target set) - do this for all syllables # get unique list of syllables all_syllables_set = set([x for x in all_syllables]) # print(len(set_one_syllable)) # print(all_syllables_set) # NEW pseudo code # for each phrase in specified phrase range for phrase in range(phrase_from, phrase_to + 1): # get set for that phrase (e.g. getSet('p',1,1)) set_phrases = getSet('p', phrase, phrase) # create empty set to store filenames phrase_syllables = [] # get all syllables in that phrase # for each syllable for syll in all_syllables_set: # get list of filenames that have this syllable set_one_syllable = getSet(syll) # if this syllable exists in this set of phrases, add to the list if set_one_syllable & set_phrases: phrase_syllables.append(syll) # make a set to remove duplicates set_phrase_syllables = set(phrase_syllables) # for each syllable in this phrase for syll in set_phrase_syllables: # get list of filenames that have this syllable set_one_syllable = getSet(syll) # get source filenames for that one phrase, one syll, all emotions combo # - note: returned set can be empty if set_one_syllable & set_phrases & set_source_emotions & set_source_intensities: set_sources = set.intersection(set_one_syllable, set_phrases, set_source_emotions, set_source_intensities) # print(set_sources) else: continue # get target filenames for that one phrase, one syll, all emotions combo if set_one_syllable & set_phrases & set_target_emotions & set_target_intensities: set_targets = set.intersection(set_one_syllable, set_phrases, set_target_emotions, set_target_intensities) # print(set_targets) else: continue # make pairs of all source and target filenames and write out # make a set of filename pairs (for every filename in source set, match with every filename in target set) for source_file in set_sources: for target_file in set_targets: # build the source file path source_file_path = os.path.join(input_directory_path, source_file) # build the target file path target_file_path = os.path.join(input_directory_path, target_file) # if this file doesn't exist, break out of syllable loop and try next one if not os.path.isfile(target_file_path) or os.stat(target_file_path).st_size == 0 or not \ os.path.isfile(source_file_path) or os.stat(source_file_path).st_size == 0: break # load the source file and extract vars source_f0_raw = np.loadtxt(source_file_path, dtype='int') # reshape to have two indices, the first being a constant so all values belong to the same 'row' source_f0_raw = source_f0_raw.reshape((1, source_f0_raw.shape[0])) # append it to output file as a new row, with space delimiter between elements, format unsigned int np.savetxt(fs, source_f0_raw, delimiter=' ', fmt='%u') # load the target file and extract vars target_f0_raw = np.loadtxt(target_file_path, dtype='int') # reshape to have two indices, the first being a constant so all values belong to the same 'row' target_f0_raw = target_f0_raw.reshape((1, target_f0_raw.shape[0])) # append it to output file as a new row, with space delimiter between elements, format unsigned int np.savetxt(ft, target_f0_raw, delimiter=' ', fmt='%u') # write input and output file pair to log file logstring = source_file_path + ' ' + target_file_path print(logstring, file=fo) # close the output files fs.close() ft.close() fo.close() print('done') # + # shuffle the source/target pairs and split them out into train/val/test files # set ratios for train/val/test split e.g. 0.6, 0.2, 0.2 train_split = 0.8 val_split = 0.2 test_split = 0.0 # ok for this to be 0.0, but not the others shuffle = False # open source and target input files to read from fs = open(os.path.join(output_directory, filename_source), 'r') ft = open(os.path.join(output_directory, filename_target), 'r') # get line counts of files (source and target will be the same, so just need to check one of them) with open(os.path.join(output_directory, filename_source)) as f: f_lines = sum(1 for _ in f) # set index values for train, val and test train_lines = int(f_lines // (1 / train_split)) val_lines = int(f_lines // (1 / val_split)) test_lines = f_lines - train_lines - val_lines # whatever is left # double check that source and target have the same number of lines with open(os.path.join(output_directory, filename_target)) as f2: f_lines2 = sum(1 for _ in f2) if f_lines != f_lines2: raise ValueError('Not the same') # open source and target input files to read from fs = open(os.path.join(output_directory, filename_source), 'r') ft = open(os.path.join(output_directory, filename_target), 'r') # read the source and target input files line by line, stripping all whitespace and empty lines source_data = fs.read().strip().split('\n') # print(type(source_data)) # print(len(source_data)) #6597 target_data = ft.read().strip().split('\n') # print(len(target_data)) #6597 # make a list of tuples, each holding a pair of source and target strings merged_data = list(zip(source_data, target_data)) # shuffle the tuples (preserving the pairing) to ensure a good mix of p/e/i in each set if shuffle: random.shuffle(merged_data) # print(len(merged_data)) #6597 # seperate the tuples into two lists of source and target lines train_data_source = [x[0] for x in merged_data[:train_lines]] train_data_target = [x[1] for x in merged_data[:train_lines]] val_data_source = [x[0] for x in merged_data[train_lines:(train_lines+val_lines)]] val_data_target = [x[1] for x in merged_data[train_lines:(train_lines+val_lines)]] test_data_source = [x[0] for x in merged_data[(train_lines+val_lines):]] test_data_target = [x[1] for x in merged_data[(train_lines+val_lines):]] print(len(train_data_source)) print(len(train_data_target)) # print(len(val_data_source)) # print(len(val_data_target)) # print(len(test_data_source)) # print(len(test_data_target)) # make train, test, dev, model directories train_dir = os.path.join(output_directory, 'train') dev_dir = os.path.join(output_directory, 'dev') test_dir = os.path.join(output_directory, 'test') model_dir = os.path.join(output_directory, 'model') if not os.path.exists(train_dir): os.mkdir(train_dir) if not os.path.exists(dev_dir): os.mkdir(dev_dir) if not os.path.exists(test_dir): os.mkdir(test_dir) if not os.path.exists(model_dir): os.mkdir(model_dir) # open output files to write to f_train_source = open(os.path.join(train_dir, 'train_source.txt'), 'w') f_train_target = open(os.path.join(train_dir, 'train_target.txt'), 'w') f_val_source = open(os.path.join(dev_dir, 'val_source.txt'), 'w') f_val_target = open(os.path.join(dev_dir, 'val_target.txt'), 'w') f_test_source = open(os.path.join(test_dir, 'test_source.txt'), 'w') f_test_target = open(os.path.join(test_dir, 'test_target.txt'), 'w') # print(train_data_source) # write each of the lists to the opened files print(len([line for line in train_data_source])) print(len([line for line in train_data_target])) [print(line, file=f_train_source) for line in train_data_source] [print(line, file=f_train_target) for line in train_data_target] [print(line, file=f_val_source) for line in val_data_source] [print(line, file=f_val_target) for line in val_data_target] [print(line, file=f_test_source) for line in test_data_source] [print(line, file=f_test_target) for line in test_data_target] # close the input source and target files fs.close() ft.close() # close the output files f_train_source.close() f_train_target.close() f_val_source.close() f_val_target.close() f_test_source.close() f_test_target.close() print('fs_lines = ' + str(f_lines)) print('train_lines = ' + str(train_lines)) print('val_lines = ' + str(val_lines)) print('test_lines = ' + str(test_lines)) print('done') # - # <h3> Make Vocabulary Inputs # + for file in [filename_source, filename_target]: # open output files in subdirectory of input files directory (must create manually) fs = open(os.path.join(output_directory, file), 'r') # read the source and target input files line by line, stripping all whitespace and empty lines source_data = fs.read().strip().split('\n') # set min and max initial values source_data_min = float('Inf') source_data_max = 0.0 for i in range(len(source_data)): source_array = np.array([int(x) for x in source_data[i].split(' ')]) if source_array.max() > source_data_max: source_data_max = source_array.max() if np.min(source_array[np.nonzero(source_array)]) < source_data_min: source_data_min = np.min(source_array[np.nonzero(source_array)]) # print range of integers from min to max found in files range_size = (source_data_max - source_data_min) + 1 samples = np.linspace(source_data_min, source_data_max, num=range_size, endpoint=True, retstep=False, dtype=int) print(samples) # save vocabulary input files to train_dir filename_noext, _ = os.path.splitext(file) np.savetxt(os.path.join(train_dir, filename_noext + '_vocab_input.txt'), samples, delimiter=' ', fmt='%u') # delete the input source and target files # os.remove(os.path.join(output_directory, filename_source)) # os.remove(os.path.join(output_directory, filename_target)) # now run the vocabulary script to make the proper vocab files # -
preprocessing/archive/tf_seq2seq_data_processing_syllable_by_syllable.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tutorial_env] * # language: python # name: conda-env-tutorial_env-py # --- # + [markdown] colab_type="text" id="Y57RMM1LEQmR" # # <span style="color:orange">Binary Classification Tutorial (CLF101) - Level Beginner</span> # + [markdown] colab_type="text" id="GM-nQ7LqEQma" # **Date Updated: May 02, 2020** # # # 1.0 Tutorial Objective # Welcome to the Binary Classification Tutorial (CLF101) - Level Beginner. This tutorial assumes that you are new to PyCaret and looking to get started with binary classification using the `pycaret.classification` Module. # # In this tutorial we will learn: # # # * **Getting Data:** How to import data from PyCaret repository # * **Setting up Environment:** How to setup an experiment in PyCaret and get started with building classification models # * **Create Model:** How to create a model, perform stratified cross validation and evaluate classification metrics # * **Tune Model:** How to automatically tune the hyper-parameters of a classification model # * **Plot Model:** How to analyze model performance using various plots # * **Finalize Model:** How to finalize the best model at the end of the experiment # * **Predict Model:** How to make predictions on new / unseen data # * **Save / Load Model:** How to save / load a model for future use # # Read Time : Approx. 30 Minutes # # # ## 1.1 Installing PyCaret # The first step to get started with PyCaret is to install PyCaret. Installation is easy and will only take a few minutes. Follow the instructions below: # # #### Installing PyCaret in Local Jupyter Notebook # `pip install pycaret` <br /> # # #### Installing PyCaret on Google Colab or Azure Notebooks # `!pip install pycaret` # # # ## 1.2 Pre-Requisites # - Python 3.x # - Latest version of pycaret # - Internet connection to load data from pycaret's repository # - Basic Knowledge of Binary Classification # # ## 1.3 For Google colab users: # If you are running this notebook on Google Colab, run the following code at top of your notebook to display interactive visuals.<br/> # <br/> # `from pycaret.utils import enable_colab` <br/> # `enable_colab()` # # # ## 1.4 See also: # - __[Binary Classification Tutorial (CLF102) - Intermediate Level](https://github.com/pycaret/pycaret/blob/master/Tutorials/Binary%20Classification%20Tutorial%20Level%20Intermediate%20-%20CLF102.ipynb)__ # - __[Binary Classification Tutorial (CLF103) - Expert Level](https://github.com/pycaret/pycaret/blob/master/Tutorials/Binary%20Classification%20Tutorial%20Level%20Expert%20-%20CLF103.ipynb)__ # + [markdown] colab_type="text" id="2DJaOwC_EQme" # # 2.0 What is Binary Classification? # Binary classification is a supervised machine learning technique where the goal is to predict categorical class labels which are discrete and unoredered such as Pass/Fail, Positive/Negative, Default/Not-Default etc. A few real world use cases for binary classification are listed below: # # - Medical testing to determine if a patient has a certain disease or not - the classification property is the presence of the disease. # - A "pass or fail" test method or quality control in factories, i.e. deciding if a specification has or has not been met – a go/no-go classification. # - Information retrieval, namely deciding whether a page or an article should be in the result set of a search or not – the classification property is the relevance of the article, or the usefulness to the user. # # __[Learn More about Binary Classification](https://medium.com/@categitau/in-one-of-my-previous-posts-i-introduced-machine-learning-and-talked-about-the-two-most-common-c1ac6e18df16)__ # + [markdown] colab_type="text" id="XC3kSuueEQmh" # # 3.0 Overview of the Classification Module in PyCaret # PyCaret's classification module (`pycaret.classification`) is a supervised machine learning module which is used for classifying the elements into a binary group based on various techniques and algorithms. Some common use cases of binary classification problems include predicting customer default (yes or no), customer churn (customer will leave or stay), disease found (positive or negative). # # The PyCaret classification module can be used for binary or multi-class classification problems. It has over 18 algorithms and 14 plots to analyze the performance of models. Be it hyper-parameter tuning, ensembling or advanced techniques like stacking, PyCaret's classification module has it all. # + [markdown] colab_type="text" id="aAKRo-EbEQml" # # 4.0 Dataset for the Tutorial # + [markdown] colab_type="text" id="VLKxlFjrEQmq" # For this tutorial we will use a dataset from UCI called **Default of Credit Card Clients Dataset**. This dataset contains information on default payments, demographic factors, credit data, payment history, and billing statements of credit card clients in Taiwan from April 2005 to September 2005. There are 24,000 samples and 25 features. Short descriptions of each column are as follows: # # - **ID:** ID of each client # - **LIMIT_BAL:** Amount of given credit in NT dollars (includes individual and family/supplementary credit) # - **SEX:** Gender (1=male, 2=female) # - **EDUCATION:** (1=graduate school, 2=university, 3=high school, 4=others, 5=unknown, 6=unknown) # - **MARRIAGE:** Marital status (1=married, 2=single, 3=others) # - **AGE:** Age in years # - **PAY_0 to PAY_6:** Repayment status by n months ago (PAY_0 = last month ... PAY_6 = 6 months ago) (Labels: -1=pay duly, 1=payment delay for one month, 2=payment delay for two months, ... 8=payment delay for eight months, 9=payment delay for nine months and above) # - **BILL_AMT1 to BILL_AMT6:** Amount of bill statement by n months ago ( BILL_AMT1 = last_month .. BILL_AMT6 = 6 months ago) # - **PAY_AMT1 to PAY_AMT6:** Amount of payment by n months ago ( BILL_AMT1 = last_month .. BILL_AMT6 = 6 months ago) # - **default:** Default payment (1=yes, 0=no) `Target Column` # # #### Dataset Acknowledgement: # <NAME>. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science. # # The original dataset and data dictionary can be __[found here.](https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients)__ # + [markdown] colab_type="text" id="Ui_rALqYEQmv" # # 5.0 Getting the Data # + [markdown] colab_type="text" id="BfqIMeJNEQmz" # You can download the data from the original source __[found here](https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients)__ and load it using pandas __[(Learn How)](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html)__ or you can use PyCaret's data respository to load the data using the `get_data()` function (This will require an internet connection). # + colab={"base_uri": "https://localhost:8080/", "height": 211} colab_type="code" id="lUvE187JEQm3" outputId="8741262c-0e33-4ec0-b54d-3c8fb41e52c0" from pycaret.datasets import get_data dataset = get_data('credit') # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="kMqDGBkJEQnN" outputId="b2015b7a-4c1a-4377-d9cf-3e9ac5ce3ea2" #check the shape of data dataset.shape # + [markdown] colab_type="text" id="LyGFryEhEQne" # In order to demonstrate the `predict_model()` function on unseen data, a sample of 1200 records has been withheld from the original dataset to be used for predictions. This should not be confused with a train/test split as this particular split is performed to simulate a real life scenario. Another way to think about this is that these 1200 records are not available at the time when the machine learning experiment was performed. # + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="hXmaL1xFEQnj" outputId="f1f62a7d-5d3d-4832-ee00-a4d20ee39c41" data = dataset.sample(frac=0.95, random_state=786) data_unseen = dataset.drop(data.index).reset_index(drop=True) data.reset_index(drop=True, inplace=True) print('Data for Modeling: ' + str(data.shape)) print('Unseen Data For Predictions: ' + str(data_unseen.shape)) # + [markdown] colab_type="text" id="y9s9wNcjEQn0" # # 6.0 Setting up Environment in PyCaret # + [markdown] colab_type="text" id="ZlA01j6NEQn7" # The `setup()` function initializes the environment in PyCaret and creates the transformation pipeline to prepare the data for modeling and deployment. `setup()` must be called before executing any other function in PyCaret. It takes two mandatory parameters: a pandas dataframe and the name of the target column. All other parameters are optional and are used to customize the pre-processing pipeline (we will see them in later tutorials). # # When `setup()` is executed, PyCaret's inference algorithm will automatically infer the data types for all features based on certain properties. The data type should be inferred correctly but this is not always the case. To account for this, PyCaret displays a table containing the features and their inferred data types after `setup()` is executed. If all of the data types are correctly identified, `enter` can be pressed to continue or `quit` can be typed to end the expriment. Ensuring that the data types are correct is of fundamental importance in PyCaret as it automatically performs a few pre-processing tasks which are imperative to any machine learning experiment. These tasks are performed differently for each data type which means it is very important for them to be correctly configured. # # In later tutorials we will learn how to overwrite PyCaret's inferred data type using the `numeric_features` and `categorical_features` parameters in `setup()`. # + colab={} colab_type="code" id="BOmRR0deEQoA" from pycaret.classification import * # + colab={"base_uri": "https://localhost:8080/", "height": 803} colab_type="code" id="k2IuvfDHEQoO" outputId="c7754ae9-b060-4218-b6f0-de65a815aa3a" exp_clf101 = setup(data = data, target = 'default', session_id=123) # + [markdown] colab_type="text" id="JJSOhIOxEQoY" # Once the setup has been succesfully executed, it prints the information grid which contains several important pieces of information. Most of the information is related to the pre-processing pipeline which is constructed when `setup()` is executed. The majority of these features are out of scope for the purposes of this tutorial however a few important things to note at this stage include: # # - **session_id :** A pseudo-random number distributed as a seed in all functions for later reproducibility. If no `session_id` is passed, a random number is automatically generated that is distributed to all functions. In this experiment, the `session_id` is set as `123` for later reproducibility.<br/> # <br/> # - **Target Type :** Binary or Multiclass. The Target type is automatically detected and shown. There is no difference in how the experiment is performed for Binary or Multiclass problems. All functionalities are identical.<br/> # <br/> # - **Label Encoded :** When the Target variable is of type string (i.e. 'Yes' or 'No') instead of 1 or 0, it automatically encodes the label into 1 and 0 and displays the mapping (0 : No, 1 : Yes) for reference. In this experiment no label encoding is required since the target variable is of type numeric. <br/> # <br/> # - **Original Data :** Displays the original shape of the dataset. In this experiment (22800, 24) means 22,800 samples and 24 features including the target column. <br/> # <br/> # - **Missing Values :** When there are missing values in the original data this will show as `True`. For this experiment there are no missing values in the dataset. # <br/> # <br/> # - **Numeric Features :** The number of features inferred as numeric. In this dataset, 14 out of 24 features are inferred as numeric. <br/> # <br/> # - **Categorical Features :** The number of features inferred as categorical. In this dataset, 9 out of 24 features are inferred as categorical. <br/> # <br/> # - **Transformed Train Set :** Displays the shape of the transformed training set. Notice that the original shape of (22800, 24) is transformed into (15959, 90) for the transformed train set and the number of features have increased to 90 from 24 due to categorical encoding <br/> # <br/> # - **Transformed Test Set :** Displays the shape of the transformed test/hold-out set. There are 6841 samples in test/hold-out set. This split is based on the default value of 70/30 that can be changed using the `train_size` parameter in setup. <br/> # # Notice how a few tasks that are imperative to perform modeling are automatically handled such as missing value imputation (in this case there are no missing values in the training data, but we still need imputers for unseen data), categorical encoding, etc. Most of the parameters in `setup()` are optional and used for customizing the pre-processing pipeline. These parameters are out of scope for this tutorial but as you progress to the intermediate and expert levels, we will cover them in much greater detail. # + [markdown] colab_type="text" id="it_nJo1IEQob" # # 7.0 Comparing All Models # + [markdown] colab_type="text" id="apb_B9bBEQof" # Comparing all models to evaluate performance is the recommended starting point for modeling once the setup is completed (unless you exactly know what kind of model you need, which is often not the case). This function trains all models in the model library and scores them using stratified cross validation for metric evaluation. The output prints a score grid that shows average Accuracy, Recall, Precision, F1 and Kappa accross the folds (10 by default) of all the available models in the model library. # + colab={} colab_type="code" id="AsG0b1NIEQoj" outputId="a6e3a510-45a1-4782-8ffe-0ec138a64eed" compare_models() # + [markdown] colab_type="text" id="nZAUhQGLEQoz" # Two simple words of code ***(not even a line)*** have created over 15 models using 10 fold stratified cross validation and evaluated the 5 most commonly used classification metrics (Accuracy, Recall, Precision, F1, Kappa). The score grid printed above highlights the highest performing metric for comparison purposes only. The grid by default is sorted using 'Accuracy' (highest to lowest) which can be changed by passing the `sort` parameter. For example `compare_models(sort = 'Recall')` will sort the grid by Recall instead of Accuracy. If you want to change the fold parameter from the default value of `10` to a different value then you can use the `fold` parameter. For example `compare_models(fold = 5)` will compare all models on 5 fold cross validation. Reducing the number of folds will improve the training time. # + [markdown] colab_type="text" id="P5m2pciOEQo4" # # 8.0 Create a Model # + [markdown] colab_type="text" id="u_6cIilfEQo7" # While `compare_models()` is a powerful function and often a starting point in any experiment, it does not return any trained models. PyCaret's recommended experiment workflow is to use `compare_models()` right after setup to evaluate top performing models and finalize a few candidates for continued experimentation. As such, the function that actually allows to you create a model is unimaginatively called `create_model()`. This function creates a model and scores it using stratified cross-validation. Similar to `compare_models()`, the output prints a score grid that shows Accuracy, Recall, Precision, F1 and Kappa by fold. # # For the remaining part of this tutorial, we will work with the below models as our candidate models. The selections are for illustration purposes only and do not necessarily mean they are the top performing or ideal for this type of data. # # - Decision Tree Classifier ('dt') # - K Neighbors Classifier ('knn') # - Random Forest Classifier ('rf') # # There are 18 classifiers available in the model library of PyCaret. Please view the `create_model()` docstring for the list of all available models. # + [markdown] colab_type="text" id="UWMSeyNhEQo-" # ### 8.1 Decision Tree Classifier # + colab={"base_uri": "https://localhost:8080/", "height": 392} colab_type="code" id="LP896uSIEQpD" outputId="d6d31562-feb5-4052-ee23-0a444fecaacf" dt = create_model('dt') # + colab={} colab_type="code" id="FRat05yGEQpQ" outputId="c8e6a190-8bec-4646-d2c8-8a92b129c484" #trained model object is stored in the variable 'dt'. print(dt) # + [markdown] colab_type="text" id="rWUojqBCEQpb" # ### 8.2 K Neighbors Classifier # + colab={"base_uri": "https://localhost:8080/", "height": 392} colab_type="code" id="2uonD20gEQpe" outputId="560e3cb6-41d5-4293-b1c5-2bd1cf3bc63b" knn = create_model('knn') # + [markdown] colab_type="text" id="nSg3OUjuEQpu" # ### 8.3 Random Forest Classifier # + colab={"base_uri": "https://localhost:8080/", "height": 392} colab_type="code" id="FGCoUiQpEQpz" outputId="212cb736-6dcb-4b77-e45b-14ad895bff43" rf = create_model('rf') # + [markdown] colab_type="text" id="z6F3Fk7TEQp8" # Notice that the mean scores of all models matche with the scores printed in `compare_models()`. This is because the metrics printed in the `compare_models()` score grid are the average scores across all CV folds. Similar to `compare_models()`, if you want to change the fold parameter from the default value of 10 to a different value, then you can use the `fold` parameter. For Example: `create_model('dt', fold = 5)` will create a Decision Tree Classifier using 5 fold stratified CV. # + [markdown] colab_type="text" id="XvpjzbGQEQqB" # # 9.0 Tune a Model # + [markdown] colab_type="text" id="nc_GgksHEQqE" # When a model is created using the `create_model()` function it uses the default hyperparameters. In order to tune hyperparameters, the `tune_model()` function is used. This function automatically tunes the hyperparameters of a model on a pre-defined search space and scores it using stratified cross-validation. The output prints a score grid that shows Accuracy, Recall, Precision, F1 and Kappa by fold. <br/> # <br/> # **Note:** `tune_model()` does not take a trained model object as an input. It instead requires a model name to be passed as an abbreviated string similar to how it is passed in `create_model()`. All other functions in `pycaret.classification` require a trained model object as an argument. # + [markdown] colab_type="text" id="BQlMCxrUEQqG" # ### 9.1 Decision Tree Classifier # + colab={"base_uri": "https://localhost:8080/", "height": 392} colab_type="code" id="of46aj6vEQqJ" outputId="26f7f708-739a-489b-bb76-b33e0a800362" tuned_dt = tune_model('dt') # + colab={} colab_type="code" id="__anDkttEQqV" outputId="7cf46ace-012a-4131-b8b8-370f9d4a63cb" #tuned model object is stored in the variable 'tuned_dt'. print(tuned_dt) # + [markdown] colab_type="text" id="CD-f0delEQqq" # ### 9.2 K Neighbors Classifier # + colab={"base_uri": "https://localhost:8080/", "height": 392} colab_type="code" id="xN1nYwFXEQqv" outputId="e4ab669d-bee0-4a9d-f5c7-2ed07ec613b9" tuned_knn = tune_model('knn') # + [markdown] colab_type="text" id="KO3zIfs-EQrA" # ### 9.3 Random Forest Classifier # + colab={"base_uri": "https://localhost:8080/", "height": 392} colab_type="code" id="gmaIfnBMEQrE" outputId="a59cebfa-f81e-477c-f83c-e9443fd80b0f" tuned_rf = tune_model('rf') # + [markdown] colab_type="text" id="IqxEZRi1EQrO" # The `tune_model()` function is a random grid search of hyperparameters over a pre-defined search space. By default, it is set to optimize `Accuracy` but this can be changed using `optimize` parameter. For example: `tune_model('dt', optimize = 'AUC')` will search for the hyperparameters of a Decision Tree Classifier that results in highest `AUC`. For the purposes of this example, we have used the default metric `Accuracy` for the sake of simplicity only. Generally, when the dataset is imbalanced (such as the credit dataset we are working with) `Accuracy` is not a good metric for consideration. The methodology behind selecting the right metric to evaluate a classifier is beyond the scope of this tutorial but if you would like to learn more about it, you can __[click here](https://medium.com/@george.drakos62/how-to-select-the-right-evaluation-metric-for-machine-learning-models-part-3-classification-3eac420ec991)__ to read an article on how to choose the right evaluation metric. # # Notice how the results after tuning have been improved: # # - Decision Tree Classifier (Before: **`0.7294`** , After: **`0.8199`**) # - K Neighbors Classifier (Before: **`0.7505`** , After: **`0.7791`**) # - Random Forest Classifier (Before: **`0.8084`** , After: **`0.8229`**) # # # Metrics alone are not the only criteria you should consider when finalizing the best model for production. Other factors to consider include training time, standard deviation of k-folds etc. As you progress through the tutorial series we will discuss those factors in detail at the intermediate and expert levels. For now, let's move forward considering the Tuned Random Forest Classifier as our best model for the remainder of this tutorial. # + [markdown] colab_type="text" id="w_P46O0jEQrT" # # 10.0 Plot a Model # + [markdown] colab_type="text" id="FGM9GOtjEQrV" # Before model finalization, the `plot_model()` function can be used to analyze the performance across different aspects such as AUC, confusion_matrix, decision boundary etc. This function takes a trained model object and returns a plot based on the test / hold-out set. # # There are 15 different plots available, please see the `plot_model()` docstring for the list of available plots. # + [markdown] colab_type="text" id="euqkQYJaEQrY" # ### 10.1 AUC Plot # + colab={} colab_type="code" id="RLbLqvkHEQra" outputId="fe40b5e3-6375-43e8-e97d-1d487e02eb2d" plot_model(tuned_rf, plot = 'auc') # + [markdown] colab_type="text" id="bwyoTUDQEQrm" # ### 10.2 Precision-Recall Curve # + colab={} colab_type="code" id="4IvchQoiEQrr" outputId="fdff2076-86fc-42f5-beee-f0051ea30dd4" plot_model(tuned_rf, plot = 'pr') # + [markdown] colab_type="text" id="_r9rwEw7EQrz" # ### 10.3 Feature Importance Plot # + colab={} colab_type="code" id="nVScSxJ-EQr2" outputId="f44f4b08-b749-4d0e-dcc9-d7e3dc6240c8" plot_model(tuned_rf, plot='feature') # + [markdown] colab_type="text" id="FfWC3NEhEQr9" # ### 10.4 Confusion Matrix # + colab={} colab_type="code" id="OAB5mes-EQsA" outputId="bd82130d-2cc3-4b63-df5d-03b7aa54bf52" plot_model(tuned_rf, plot = 'confusion_matrix') # + [markdown] colab_type="text" id="deClKJrbEQsJ" # *Another* way to analyze the performance of models is to use the `evaluate_model()` function which displays a user interface for all of the available plots for a given model. It internally uses the `plot_model()` function. # + colab={"base_uri": "https://localhost:8080/", "height": 436, "referenced_widgets": ["42d5400d235d40b78190016ef0dabe11", "41031579127f4a53b58957e601465083", "12bf8b3c6ae8444a900474912589fdf1", "9bb3600d38c04691b444ff375ad5e3f5", "8886001bc7c1463ba58a8453f5c55073", "0a06fb091bd94ce6b6ab892e2c6faadf", "3cc1e83b91f34b289c7d52003f20a97a", "8d709ec9ec484944b1f9773748857f84", "<KEY>", "<KEY>", "57b94ac505d142769b79de2f1e5c1166", "2a81017413ca4fe789c2272a5831a069", "<KEY>", "9e338844e75b4e17be8483529f5f38fd", "22588a12c0db4067982e62ebbe7e6930"]} colab_type="code" id="OcLV1Ln6EQsN" outputId="7b5b8b4e-8d4a-4371-9a4f-cabb0a96265a" evaluate_model(tuned_rf) # + [markdown] colab_type="text" id="RX5pYUJJEQsV" # # 11.0 Predict on test / hold-out Sample # + [markdown] colab_type="text" id="mFSvRYiaEQsd" # Before finalizing the model, it is advisable to perform one final check by predicting the test/hold-out set and reviewing the evaluation metrics. If you look at the information grid in Section 6 above, you will see that 30% (6,841 samples) of the data has been separated out as test/hold-out sample. All of the evaluation metrics we have seen above are cross validated results based on the training set (70%) only. Now, using our final trained model stored in the `tuned_rf` variable we will predict against the hold-out sample and evaluate the metrics to see if they are materially different than the CV results. # + colab={} colab_type="code" id="nwaZk6oTEQsi" outputId="d30c8533-d347-4fa6-f18e-5b2abc937bec" predict_model(tuned_rf); # + [markdown] colab_type="text" id="E-fHsX2AEQsx" # The accuracy on test/hold-out set is **`0.8126`** compared to **`0.8229`** achieved on the `tuned_rf` CV results (in section 9.3 above). This is not a significant difference. If there is a large variation between the test/hold-out and CV results, then this would normally indicate over-fitting but could also be due to several other factors and would require further investigation. In this case, we will move forward with finalizing the model and predicting on unseen data (the 5% that we had separated in the beginning and never exposed to PyCaret). # # (TIP : It's always good to look at the standard deviation of CV results when using `create_model()`.) # + [markdown] colab_type="text" id="r79BGjIfEQs1" # # 12.0 Finalize Model for Deployment # + [markdown] colab_type="text" id="B-6xJ9kQEQs7" # Model finalization is the last step in the experiment. A normal machine learning workflow in PyCaret starts with `setup()`, followed by comparing all models using `compare_models()` and shortlisting a few candidate models (based on the metric of interest) to perform several modeling techniques such as hyperparameter tuning, ensembling, stacking etc. This workflow will eventually lead you to the best model for use in making predictions on new and unseen data. The `finalize_model()` function fits the model onto the complete dataset including the test/hold-out sample (30% in this case). The purpose of this function is to train the model on the complete dataset before it is deployed in production. # + colab={} colab_type="code" id="_--tO4KGEQs-" final_rf = finalize_model(tuned_rf) # + colab={"base_uri": "https://localhost:8080/", "height": 147} colab_type="code" id="U9W6kXsSEQtQ" outputId="794b24a4-9c95-4730-eddd-f82e4925b866" #Final Random Forest model parameters for deployment print(final_rf) # + [markdown] colab_type="text" id="kgdOjxypEQtd" # **Caution:** One final word of caution. Once the model is finalized using `finalize_model()`, the entire dataset including the test/hold-out set is used for training. As such, if the model is used for predictions on the hold-out set after `finalize_model()` is used, the information grid printed will be misleading as you are trying to predict on the same data that was used for modeling. In order to demonstrate this point only, we will use `final_rf` under `predict_model()` to compare the information grid with the one above in section 11. # + colab={} colab_type="code" id="NJDk3I-EEQtg" outputId="4d75663a-e86f-4826-c8e4-c9aa722648df" predict_model(final_rf); # + [markdown] colab_type="text" id="V77JC5JVEQtp" # Notice how the AUC in `final_rf` has increased to **`0.8189`** from **`0.7538`**, even though the model is the same. This is because the `final_rf` variable has been trained on the complete dataset including the test/hold-out set. # + [markdown] colab_type="text" id="hUzc6tXNEQtr" # # 13.0 Predict on Unseen Data # + [markdown] colab_type="text" id="dx5vXjChEQtt" # The `predict_model()` function is also used to predict on the unseen dataset. The only difference from section 11 above is that this time we will pass the `data_unseen` parameter. `data_unseen` is the variable created at the beginning of the tutorial and contains 5% (1200 samples) of the original dataset which was never exposed to PyCaret. (see section 5 for explanation) # + colab={"base_uri": "https://localhost:8080/", "height": 211} colab_type="code" id="0y5KWLC6EQtx" outputId="30771f87-7847-43ce-e984-9963cff7d043" unseen_predictions = predict_model(final_rf, data=data_unseen) unseen_predictions.head() # + [markdown] colab_type="text" id="oPYmVpugEQt5" # The `Label` and `Score` columns are added onto the `data_unseen` set. Label is the prediction and score is the probability of the prediction. Notice that predicted results are concatenated to the original dataset while all the transformations are automatically performed in the background. # + [markdown] colab_type="text" id="L__po3sUEQt7" # # 14.0 Saving the model # + [markdown] colab_type="text" id="1sQPT7jrEQt-" # We have now finished the experiment by finalizing the `tuned_rf` model which is now stored in `final_rf` variable. We have also used the model stored in `final_rf` to predict `data_unseen`. This brings us to the end of our experiment, but one question is still to be asked: What happens when you have more new data to predict? Do you have to go through the entire experiment again? The answer is no, PyCaret's inbuilt function `save_model()` allows you to save the model along with entire transformation pipeline for later use. # + colab={} colab_type="code" id="ln1YWIXTEQuA" outputId="d3cb0652-f72e-44e8-9455-824b12740bff" save_model(final_rf,'Final RF Model 08Feb2020') # + [markdown] colab_type="text" id="WE6f48AYEQuR" # (TIP : It's always good to use date in the filename when saving models, it's good for version control.) # + [markdown] colab_type="text" id="Z8OBesfkEQuU" # # 15.0 Loading the saved model # + [markdown] colab_type="text" id="V2K_WLaaEQuW" # To load a saved model at a future date in the same or an alternative environment, we would use PyCaret's `load_model()` function and then easily apply the saved model on new unseen data for prediction. # + colab={} colab_type="code" id="Siw_2EIUEQub" outputId="5da8b7c9-01f7-469c-f0c9-b19c8ce11bcc" saved_final_rf = load_model('Final RF Model 08Feb2020') # + [markdown] colab_type="text" id="1zyi6-Q-EQuq" # Once the model is loaded in the environment, you can simply use it to predict on any new data using the same `predict_model()` function. Below we have applied the loaded model to predict the same `data_unseen` that we used in section 13 above. # + colab={} colab_type="code" id="HMPO1ka9EQut" new_prediction = predict_model(saved_final_rf, data=data_unseen) # + colab={} colab_type="code" id="7wyDQQSzEQu8" outputId="23065436-42e3-4441-ed58-a8863f8971f9" new_prediction.head() # + [markdown] colab_type="text" id="bf8I1uqcEQvD" # Notice that the results of `unseen_predictions` and `new_prediction` are identical. # + [markdown] colab_type="text" id="_HeOs8BhEQvF" # # 16.0 Wrap-up / Next Steps? # + [markdown] colab_type="text" id="VqG1NnwXEQvK" # This tutorial has covered the entire machine learning pipeline from data ingestion, pre-processing, training the model, hyperparameter tuning, prediction and saving the model for later use. We have completed all of these steps in less than 10 commands which are naturally constructed and very intuitive to remember such as `create_model()`, `tune_model()`, `compare_models()`. Re-creating the entire experiment without PyCaret would have taken well over 100 lines of code in most libraries. # # We have only covered the basics of `pycaret.classification`. In following tutorials we will go deeper into advanced pre-processing, ensembling, generalized stacking and other techniques that allow you to fully customize your machine learning pipeline and are must know for any data scientist. # # See you at the next tutorial. Follow the link to __[Binary Classification Tutorial (CLF102) - Intermediate Level](https://github.com/pycaret/pycaret/blob/master/Tutorials/Binary%20Classification%20Tutorial%20Level%20Intermediate%20-%20CLF102.ipynb)__
Tutorials/Binary Classification Tutorial Level Beginner - CLF101.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import pandas as pd from copy import deepcopy from collections import defaultdict import geopandas as gp from matplotlib import pyplot as plt import numpy as np # - # # PAM - Geometry Sampling # # This notebook shows how to sample geometrical locations from ozone/dzone regions. # trips = pd.read_csv('data/example_data/example_travel_diaries.csv') attributes = pd.read_csv('data/example_data/example_attributes.csv') attributes.set_index('pid', inplace=True) trips.head(20) # ### Sample Geometries # Transform trip ozone/dzone to geographically sampled points # + from pam.samplers.spatial import GeometryRandomSampler geom_sampler = GeometryRandomSampler(geo_df_file="data/example_data/geometry.geojson", geometry_name_column="NAME", default_region="Westminster,City of London") geo_trips = trips.copy() geo_trips['start_loc'] = geo_trips.apply(lambda x: geom_sampler.sample_point(x['ozone']), axis=1) geo_trips['end_loc'] = geo_trips.apply(lambda x: geom_sampler.sample_point(x['dzone']), axis=1) # - # ### Build Activity Plans # # First we convert the travel diary data to Activity Plans: from pam import read population = read.load_travel_diary(geo_trips, attributes, include_loc=True) # Let's check out an example Activity Plan and Attributes: household = population.households['census_120'] person = household.people['census_120'] person.print() person.plot() person.attributes # ### Output Matsim XML # + from pam.write import write_matsim_plans, write_matsim_attributes write_matsim_plans(population, "./outputs/example_output_plans.xml") write_matsim_attributes(population, "./outputs/example_output_attributes.xml")
examples/09_pam-geo-sampling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Clears workspace # %reset -f # import packages import pandas as pd import numpy as np import json from pprint import pprint import csv # + # set maximum number of rows and columns displayed pd.set_option('display.max_columns', 12) pd.set_option('display.max_rows', 20) # Specifies file to load into dataframe;primary pandas data structure df = pd.read_csv('Complete_Municipalities.csv') df.head() # + def sortbytopic(df, topics): '''Group dataframe by array of topics and return df - raw dataframe topics - list of topic names ''' df_temp = [] for i in range(len(topics)): df_temp.append(df.loc[df['Topic']==topics[i]]) return pd.concat(df_temp).reset_index(drop=True) topics = ['Income of individuals in 2010'] x = sortbytopic(df,topics) pd.set_option('display.max_rows', 20) x x[60:70] # - x1 = x.set_index('Characteristic') x1 x1.iloc[x1.index.get_loc(' Couple-with-children economic families')] # + df_tempchar = x['Characteristic'].to_frame() df_tempchar # df_tempchar_medians = (df_tempchar['Characteristic'] == ' Median family income ($)') # temp_list = [] # for i in len(range(x['Characteristic'])): # if x['Characteristic'] == ' Median family income ($)': # append.temp_list # x['Characteristic'] == ' Median family income ($)' # - df_pivot = pd.pivot_table(x, values='Total', index=['Geo_Code', 'Prov_Name', 'CD_Name', 'CSD_Name'], columns='Characteristic', aggfunc='first') df_pivot # listofrows = [] for i in x: if x['Characteristic'][i][0] == ' ': print(x['Characteristic'][i]) else: print ('failed') # listofrows.append(x['Characteristic'][i]) # + with open('mobility_char_list.csv', 'r') as f: reader = csv.reader(f) characteristics = list(reader) characteristics # characteristics[0][0] # sortbychar(x,characteristics) # + def sortbychar(df,characteristics): df_list = [] for item in characteristics: df_temp = df.loc[df['Characteristic'] == item[0]] df_temp = df_temp.drop(['Prov_Name', 'GNR','Topic','Characteristic','Note','Male','Female'], axis=1) df_temp = df_temp.rename(columns={'Total' : item[0]}) df_list.append(df_temp.reset_index(drop=True)) # print(len(df_temp),item[0]) return df_list # characteristic = [' Median income ($)'] y = sortbychar(x,characteristics) # y[0] y[0] # - df.head() df_temp = df.loc[df['Characteristic'] == ' Median after-tax household income ($)'] df_temp.to_csv('afterTaxHouseholdIncome.csv', index=False); # + active="" # def parsedata(df,topics,characteristic): # df_temp = sortbytopic(df, topics) # df_temp = sortbychar(df_temp, characteristic) # # return df_temp # # topic = ['Shelter costs','Income of individuals in 2010','Income of households in 2010'] # characteristic = ' Median income ($)'
src/data/boundary_linked/NHS_Profile_2011/Preprocessing/NHS_DataFormatting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tpn.util as tu import tpn.visualstudio as vs import tpn.visualstudio.project as vp import tpn.visualstudio.datrie as d print(tu.get_source(d.DatrieProject)) p = d.DatrieProject() p.load() vp.CompileFile.instances print(p.vcxproj) print(p.vcxproj_filters) print(p.compiles_props) print(p.props) print(p.props_debug) p.write() # Add datrie.vcxproj to pcbuild.sln in Visual Studio.
lib/tpn/visualstudio/datrie.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn') import numpy as np import pandas as pd import os from urllib.request import urlretrieve # - from jupyterworkflow.data import get_fremont_data #from [package.pyfile] import [function] data = get_fremont_data() pivoted = data.pivot_table('Total', index=data.index.time, columns=data.index.date) #rows by time column by date pivoted.plot(legend=False, alpha = 0.01)
Fixing the bug.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"></ul></div> # + import sys import os import time import datetime import re import numpy as np import pandas as pd import langid import warnings warnings.filterwarnings("ignore") from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # - # --- def detect_language(text): langid.set_languages(["pt", "es"]) result = langid.classify(text) language = result[0] return language # + rawdata_filepath = "/Users/xu.zhu/Desktop/Data/language_detection/mx/mx_raw_20210310.csv" raw_df = pd.read_csv( rawdata_filepath, encoding="utf-8-sig", lineterminator="\n" ) info = """ Filename: {0} Shape: {1} Columns: {2} """.format( os.path.basename(rawdata_filepath), raw_df.shape, raw_df.columns.to_list() ) print(info) raw_df.head(3) # + cols = raw_df.columns.to_list() raw_df["source_language"] = raw_df["item_title"].apply(lambda x: detect_language(str(x))) cols.insert(0, "source_language") raw_df = raw_df[cols] raw_df.head(3) # - pt_df = raw_df[raw_df["source_language"]=="pt"] pt_df # + raw_df.to_csv( "/Users/xu.zhu/Desktop/Data/language_detection/mx/mx_detetced_raw.csv", encoding="utf-8-sig", index=False ) pt_df.to_csv( "/Users/xu.zhu/Desktop/Data/language_detection/mx/mx_detetced_pt.csv", encoding="utf-8-sig", index=False ) # -
tools/language_detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp data # - # # Data # # > Helper functions used to download and extract common time series datasets. #hide from nbdev.showdoc import * from IPython.display import display, HTML display(HTML("<style>.container { width: 100% !important; }</style>")) #export from timeseries.imports import * from timeseries.utils import * from timeseries.core import * #export from fastai2.data.all import * #export import tempfile try: from urllib import urlretrieve except ImportError: from urllib.request import urlretrieve import shutil from pyunpack import Archive from scipy.io import arff #export def decompress_from_url(url, target_dir=None, verbose=False): #Download try: fname = os.path.basename(url) tmpdir = tempfile.mkdtemp() local_comp_fname = os.path.join(tmpdir, fname) urlretrieve(url, local_comp_fname) except: shutil.rmtree(tmpdir) if verbose: sys.stderr.write("Could not download url. Please, check url.\n") #Decompress try: if not os.path.exists(target_dir): os.makedirs(target_dir) Archive(local_comp_fname).extractall(target_dir) shutil.rmtree(tmpdir) return target_dir except: shutil.rmtree(tmpdir) if verbose: sys.stderr.write("Could not uncompress file, aborting.\n") return None # + #export def get_UCR_univariate_list(): return [ 'ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'Beef', 'BeetleFly', 'BirdChicken', 'BME', 'Car', 'CBF', 'Chinatown', 'ChlorineConcentration', 'CinCECGTorso', 'Coffee', 'Computers', 'CricketX', 'CricketY', 'CricketZ', 'Crop', 'DiatomSizeReduction', 'DistalPhalanxOutlineAgeGroup', 'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW', 'DodgerLoopDay', 'DodgerLoopGame', 'DodgerLoopWeekend', 'Earthquakes', 'ECG200', 'ECG5000', 'ECGFiveDays', 'ElectricDevices', 'EOGHorizontalSignal', 'EOGVerticalSignal', 'EthanolLevel', 'FaceAll', 'FaceFour', 'FacesUCR', 'FiftyWords', 'Fish', 'FordA', 'FordB', 'FreezerRegularTrain', 'FreezerSmallTrain', 'Fungi', 'GestureMidAirD1', 'GestureMidAirD2', 'GestureMidAirD3', 'GesturePebbleZ1', 'GesturePebbleZ2', 'GunPoint', 'GunPointAgeSpan', 'GunPointMaleVersusFemale', 'GunPointOldVersusYoung', 'Ham', 'HandOutlines', 'Haptics', 'Herring', 'HouseTwenty', 'InlineSkate', 'InsectEPGRegularTrain', 'InsectEPGSmallTrain', 'InsectWingbeatSound', 'ItalyPowerDemand', 'LargeKitchenAppliances', 'Lightning2', 'Lightning7', 'Mallat', 'Meat', 'MedicalImages', 'MelbournePedestrian', 'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect', 'MiddlePhalanxTW', 'MixedShapesRegularTrain', 'MixedShapesSmallTrain', 'MoteStrain', 'NonInvasiveFetalECGThorax1', 'NonInvasiveFetalECGThorax2', 'OliveOil', 'OSULeaf', 'PhalangesOutlinesCorrect', 'Phoneme', 'PickupGestureWiimoteZ', 'PigAirwayPressure', 'PigArtPressure', 'PigCVP', 'PLAID', 'Plane', 'PowerCons', 'ProximalPhalanxOutlineAgeGroup', 'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW', 'RefrigerationDevices', 'Rock', 'ScreenType', 'SemgHandGenderCh2', 'SemgHandMovementCh2', 'SemgHandSubjectCh2', 'ShakeGestureWiimoteZ', 'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SmoothSubspace', 'SonyAIBORobotSurface1', 'SonyAIBORobotSurface2', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols', 'SyntheticControl', 'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'TwoPatterns', 'UMD', 'UWaveGestureLibraryAll', 'UWaveGestureLibraryX', 'UWaveGestureLibraryY', 'UWaveGestureLibraryZ', 'Wafer', 'Wine', 'WordSynonyms', 'Worms', 'WormsTwoClass', 'Yoga' ] test_eq(len(get_UCR_univariate_list()), 128) # + #export def get_UCR_multivariate_list(): return [ 'ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories', 'Cricket', 'DuckDuckGeese', 'EigenWorms', 'Epilepsy', 'ERing', 'EthanolConcentration', 'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting', 'Heartbeat', 'InsectWingbeat', 'JapaneseVowels', 'Libras', 'LSST', 'MotorImagery', 'NATOPS', 'PEMS-SF', 'PenDigits', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1', 'SelfRegulationSCP2', 'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary' ] test_eq(len(get_UCR_multivariate_list()), 30) # + #export def stack_padding(arr): def resize(row, size): new = np.array(row) new.resize(size) return new row_length = max(arr, key=len).__len__() mat = np.array( [resize(row, row_length) for row in arr] ) return mat from sktime.utils.load_data import load_from_tsfile_to_dataframe def get_UCR_data(dsid, path='.', parent_dir='data/UCR', verbose=False, drop_na=False, on_disk=True): if verbose: print('Dataset:', dsid) assert dsid in get_UCR_univariate_list() + get_UCR_multivariate_list(), f'{dsid} is not a UCR dataset' full_parent_dir = Path(path)/parent_dir full_tgt_dir = full_parent_dir/dsid if not all([os.path.isfile(f'{full_parent_dir}/{dsid}/{fn}.npy') for fn in ['X_train', 'X_valid', 'y_train', 'y_valid']]): if dsid in ['InsectWingbeat', 'DuckDuckGeese']: if verbose: print('There are problems with the original zip file and data cannot correctly downloaded') return None, None, None, None src_website = 'http://www.timeseriesclassification.com/Downloads' if not os.path.isdir(full_tgt_dir): if verbose: print(f'Downloading and decompressing data to {full_tgt_dir}...') decompress_from_url(f'{src_website}/{dsid}.zip', target_dir=full_tgt_dir, verbose=verbose) if verbose: print('...data downloaded and decompressed') X_train_df, y_train = load_from_tsfile_to_dataframe(full_tgt_dir/f'{dsid}_TRAIN.ts') X_valid_df, y_valid = load_from_tsfile_to_dataframe(full_tgt_dir/f'{dsid}_TEST.ts') X_train_ = [] X_valid_ = [] for i in range(X_train_df.shape[-1]): X_train_.append(stack_padding(X_train_df[f'dim_{i}'])) # stack arrays even if they have different lengths X_valid_.append(stack_padding(X_valid_df[f'dim_{i}'])) X_train = np.transpose(np.stack(X_train_, axis=-1), (0, 2, 1)).astype(np.float32) X_valid = np.transpose(np.stack(X_valid_, axis=-1), (0, 2, 1)).astype(np.float32) # unique_cats = np.sort(np.unique(y_train)) # o2i = dict(zip(unique_cats, np.arange(len(unique_cats)))) # y_train = np.vectorize(o2i.get)(y_train) # y_valid = np.vectorize(o2i.get)(y_valid) np.save(f'{full_tgt_dir}/X_train.npy', X_train) np.save(f'{full_tgt_dir}/y_train.npy', y_train) np.save(f'{full_tgt_dir}/X_valid.npy', X_valid) np.save(f'{full_tgt_dir}/y_valid.npy', y_valid) delete_all_in_dir(full_tgt_dir, exception='.npy') if on_disk: mmap_mode='r+' else: mmap_mode=None X_train = np.load(f'{full_tgt_dir}/X_train.npy', mmap_mode=mmap_mode) y_train = np.load(f'{full_tgt_dir}/y_train.npy', mmap_mode=mmap_mode) X_valid = np.load(f'{full_tgt_dir}/X_valid.npy', mmap_mode=mmap_mode) y_valid = np.load(f'{full_tgt_dir}/y_valid.npy', mmap_mode=mmap_mode) if verbose: print('X_train:', X_train.shape) print('y_train:', y_train.shape) print('X_valid:', X_valid.shape) print('y_valid:', y_valid.shape, '\n') return X_train, y_train, X_valid, y_valid # - #hide PATH = Path(os.getcwd()).parent # Path to /data/UCR dsids = ['OliveOil', 'AtrialFibrillation'] # univariate and multivariate for dsid in dsids: tgt_dir = PATH/f'data/UCR/{dsid}' if os.path.isdir(tgt_dir): shutil.rmtree(tgt_dir) test_eq(len(get_files(tgt_dir)), 0) # no file left X_train, y_train, X_valid, y_valid = get_UCR_data(dsid, PATH, parent_dir='data/UCR') test_eq(len(get_files(tgt_dir, '.npy')), 4) test_eq(len(get_files(tgt_dir, '.npy')), len(get_files(tgt_dir))) # test no left file/ dir del X_train, y_train, X_valid, y_valid start = time.time() X_train, y_train, X_valid, y_valid = get_UCR_data(dsid, PATH, parent_dir='data/UCR') elapsed = time.time() - start test_eq(elapsed < 1, True) test_eq(X_train.ndim, 3) test_eq(y_train.ndim, 1) test_eq(X_valid.ndim, 3) test_eq(y_valid.ndim, 1) test_eq(len(get_files(tgt_dir, '.npy')), 4) test_eq(len(get_files(tgt_dir, '.npy')), len(get_files(tgt_dir))) # test no left file/ dir test_eq(X_train.ndim, 3) test_eq(y_train.ndim, 1) test_eq(X_valid.ndim, 3) test_eq(y_valid.ndim, 1) test_eq(X_train.dtype, np.float32) test_eq(X_train.__class__.__name__, 'memmap') del X_train, y_train, X_valid, y_valid X_train, y_train, X_valid, y_valid = get_UCR_data(dsid, PATH, parent_dir='data/UCR', on_disk=False) test_eq(X_train.__class__.__name__, 'ndarray') del X_train, y_train, X_valid, y_valid #hide from save_nb import * from nbdev.export import notebook2script save_nb() notebook2script() test_eq(last_saved() < 10, True)
nbs/002_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt def mandelbrot(h, w, maxit=20): y, x = np.ogrid[-1.4: 1.4: h*1j, -2: 0.8: w*1j] c = x+y*1j z = c divtime = maxit + np.zeros(z.shape, dtype=int) for i in range(maxit): z = z**2 + c diverge = z * np.conj(z) > 2**2 div_now = diverge & (divtime==maxit) divtime[div_now] = i z[diverge] = 2 return divtime plt.imshow(mandelbrot(400, 400)) plt.show() mu, sigma = 2, 0.5 v = np.random.normal(mu, sigma, 10000) plt.hist(v, bins=50, normed=1) plt.show() (n, bins) = np.histogram(v, bins=50, normed=True) plt.plot(.5*(bins[1:]+bins[:-1]), n) plt.show()
numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Word Count # # ### Counting the number of occurances of words in a text is a popular first exercise using map-reduce. # # ## The Task # **Input:** A text file consisisting of words separated by spaces. # **Output:** A list of words and their counts, sorted from the most to the least common. # # We will use the book "Moby Dick" as our input. #start the SparkContext from pyspark import SparkContext sc=SparkContext(master="local[4]") # ### Setup a plan for pretty print def pretty_print_plan(rdd): for x in rdd.toDebugString().decode().split('\n'): print(x) # ### Use `textFile()` to read the text # %%time text_file = sc.textFile("Data/Moby-Dick.txt") type(text_file) # ## Steps for counting the words # # * split line by spaces. # * map `word` to `(word,1)` # * count the number of occurances of each word. # %%time words = text_file.flatMap(lambda line: line.split(" ")) not_empty = words.filter(lambda x: x!='') key_values= not_empty.map(lambda word: (word, 1)) counts= key_values.reduceByKey(lambda a, b: a + b) # ### flatMap() # Note the line: # ```python # words = text_file.flatMap(lambda line: line.split(" ")) # ``` # Why are we using `flatMap`, rather than `map`? # # The reason is that the operation `line.split(" ")` generates a **list** of strings, so had we used `map` the result would be an RDD of lists of words. Not an RDD of words. # # The difference between `map` and `flatMap` is that the second expects to get a list as the result from the map and it **concatenates** the lists to form the RDD. # ## The execution plan # In the last cell we defined the execution plan, but we have not started to execute it. # # * Preparing the plan took ~100ms, which is a non-trivial amount of time, # * But much less than the time it will take to execute it. # * Lets have a look a the execution plan. # ### Understanding the details # To see which step in the plan corresponds to which RDD we print out the execution plan for each of the RDDs. # # Note that the execution plan for `words`, `not_empty` and `key_values` are all the same. pretty_print_plan(text_file) pretty_print_plan(words) pretty_print_plan(not_empty) pretty_print_plan(key_values) pretty_print_plan(counts) # | Execution plan | RDD | Comments | # | :---------------------------------------------------------------- | :------------: | :--- | # |`(2)_PythonRDD[6] at RDD at PythonRDD.scala:48 []`| **counts** | Final RDD| # |`_/__MapPartitionsRDD[5] at mapPartitions at PythonRDD.scala:436 []`| **---"---** | # |`_/__ShuffledRDD[4] at partitionBy at NativeMethodAccessorImpl.java:0 [`| **---"---** | RDD is partitioned by key | # |`_+-(2)_PairwiseRDD[3] at reduceByKey at <timed exec>:4 []`| **---"---** | Perform mapByKey | # |`____/__PythonRDD[2] at reduceByKey at <timed exec>:4 []`| **words, not_empty, key_values** | The result of partitioning into words| # | | | removing empties, and making into (word,1) pairs| # |`____/__../../Data/Moby-Dick.txt MapPartitionsRDD[1] at textFile at Nat`| **text_file** | The partitioned text | # |`____/__../../Data/Moby-Dick.txt HadoopRDD[0] at textFile at NativeMeth`| **---"---** | The text source | # ## Execution # Finally we count the number of times each word has occured. # Now, finally, the Lazy execution model finally performs some actual work, which takes a significant amount of time. # %%time ## Run #1 Count=counts.count() # Count = the number of different words Sum=counts.map(lambda x:x[1]).reduce(lambda x,y:x+y) # print('Different words=%5.0f, total words=%6.0f, mean no. occurances per word=%4.2f'%(Count,Sum,float(Sum)/Count)) # ### Amortization # When the same commands are performed repeatedly on the same data, the execution time tends to decrease in later executions. # # The cells below are identical to the one above, with one exception at `Run #3` # # Observe that `Run #2` take much less time that `Run #1`. Even though no `cache()` was explicitly requested. The reason is that Spark caches (or materializes) `key_values`, before executing `reduceByKey()` because performng reduceByKey requires a shuffle, and a shuffle requires that the input RDD is materialized. In other words, sometime caching happens even if the programmer did not ask for it. # %%time ## Run #2 Count=counts.count() Sum=counts.map(lambda x:x[1]).reduce(lambda x,y:x+y) print('Different words=%5.0f, total words=%6.0f, mean no. occurances per word=%4.2f'%(Count,Sum,float(Sum)/Count)) # ### Explicit Caching # In `Run #3` we explicitly ask for `counts` to be cached. This will reduce the execution time in the following run `Run #4` by a little bit, but not by much. # %%time ## Run #3, cache Count=counts.cache().count() Sum=counts.map(lambda x:x[1]).reduce(lambda x,y:x+y) print('Different words=%5.0f, total words=%6.0f, mean no. occurances per word=%4.2f'%(Count,Sum,float(Sum)/Count)) # %%time #Run #4 Count=counts.count() Sum=counts.map(lambda x:x[1]).reduce(lambda x,y:x+y) print('Different words=%5.0f, total words=%6.0f, mean no. occurances per word=%4.2f'%(Count,Sum,float(Sum)/Count)) # %%time #Run #5 Count=counts.count() Sum=counts.map(lambda x:x[1]).reduce(lambda x,y:x+y) print('Different words=%5.0f, total words=%6.0f, mean no. occurances per word=%4.2f'%(Count,Sum,float(Sum)/Count))
Word_Count.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Class exercises for EarthAI notebooks # # This is an adaptation for the in-class exercises for EarthAI notebooks that we went over in class the week of March 3. # ## Load in libraries from earthai.init import * import folium import geopandas # from pyspark.sql.functions import lit import pyspark.sql.functions as F from shapely.geometry import Point # ## Load in administrative districts shape # We can pick out a particular district (2838) and have a look at it's shape. We want to use the center point of this district to select a time series of MODIS data # # First we select the district and look at it geo_admin_url = 'https://raw.githubusercontent.com/datasets/geo-ne-admin1/master/data/admin1.geojson' adm1 = geopandas.read_file(geo_admin_url)#.drop(columns='id') # adm1_2838 = adm1[adm1.id.isin(['2838', '3342'])] # more than one adm1_2838 = adm1[adm1.id.isin(['2838'])] # just one adm1_2838.plot() # matplotlib, if so desired (better is a folium map below) # print(adm1_2838) # We can use the `centroid` function of geopandas to extract the center point of the district. `astype(str)` converts it to string. adm1_2838.centroid.astype(str) # Note that it has an index value of 700 next to it. We use to select it by index value to get just the text with the point value pt = adm1_2838.centroid.astype(str)[700] pt # ## Create a catalog # # We can use the point now to help create a catalog. We want all daily MODIS images (mcd43a4) for the year 2019 that intersect that point catalog = earth_ondemand.read_catalog( geo = pt, #geo = 'POINT(15.6 0.02)', # we could have specified a point manually also #geo = adm1_2838, # or intersected with the district polygon also start_datetime = '2019-01-01', end_datetime = '2019-12-31', max_cloud_cover = 100, collections = 'mcd43a4', # from the "id" field ) # Running the code below gives you a look at the catalog's attributes print(type(catalog)) print(catalog.columns) print('DataFrame `catalog` has', len(catalog), 'rows and ', len(catalog.id.unique()), 'distinct scenes ids') # Running this shows us what bands are there. It is commented out because we don't want to run it, and just know that it tells us that we want the blue channel's quality band ('B03qa') # + # earth_ondemand.bands('mcd43a4') # - # ## Read the catalog into spark # # Now we load up the data into RasterFrames, so we can analyze. We specify the band we want, and rename it to 'qual' df = spark.read.raster(catalog, catalog_col_names=['B03qa'])\ .withColumnRenamed('B03qa', 'qual') # Note that this process is lazy. No analysis is done until we need the results of an analysis. Same with the next step, in which we apply a mask # # ### Find how many pixels are > 0 # # This is like masking, but filtering pixels > 0 to get areas of low quality, which suggests low quality. masked = df.withColumn('clouds', rf_local_greater('qual', 0)) print(masked.columns) # Let's apply a histogram to one of these to see what the distribution of values is like # + hist_df = masked.select(rf_tile_histogram('clouds')['bins'].alias('bins')) hist_df.printSchema() bins_row = hist_df.first() values = [int(bin['value']) for bin in bins_row.bins] counts = [int(bin['count']) for bin in bins_row.bins] plt.hist(values, weights=counts, bins=100) plt.show() # - # Let's look at one of the masked tiles. This is new--we didn't do this in class, but it is more efficient than the process we used, which was to look at the RasterFrame contents, which was a bit slow. We convert here the first image to a tile and display it t = masked.select(rf_tile('qual').alias('clouds')).first()['clouds'] display(t) # ## Exercises # # The exercises we want to do on your own are as follows. # # We have created the derivatives necessary to create a time series to examine how cloudy it is over the course of the year in this part of the Congo. # # This will require you to: # # 1. Create an image time series, by calculating the average level of cloudiness across weeks. # # - To do this, you will need to: # - Apply the following functions on the `masked` object: `weekofyear`, `agg`, and `rf_agg_mean` # - The following code, from the `time-series.ipynb`, can help (see the [web version of](https://rasterframes.io//time-series.html) this notebook also): # # ```python # time_series = rf_park_tile \ # .groupby( # year('acquisition_date').alias('year'), # weekofyear('acquisition_date').alias('week')) \ # .agg(rf_agg_mean('ndvi_masked').alias('ndvi')) # ``` # This shows how to do this over an NDVI time series. We are doing this by applying to our masked quality time series. # # 2. Plot that as a time series # # - The same time series has the example you need, which use plotly to create the time series plot. Before doing that, however, you have to convert the RasterFrames object to pandas using `toPandas`, e.g. # # ```python # time_series_pdf = time_series.toPandas() # ``` # # - Then use plotly. From the same time-series example. Change the titles to relevant text for this cloudiness plot # # ```python # import matplotlib.pyplot as plt # time_series_pdf.sort_values('week', inplace=True) # plt.plot(time_series_pdf['week'], time_series_pdf['ndvi'], 'go-') # plt.ylim([-1, 1]) # note I added this to the example. You can set this [0, 1] for cloudiness # plt.xlabel('Week of year, 2018') # plt.ylabel('NDVI') # plt.title('Cuyahoga Valley NP Green-up') # ``` # # 3. Do the same, but calculating the monthly mean cloudiness, and plot the time series of that also # # **Note**: The conversion step `toPandas()` is what takes the longest. # # 4. **Bonus**, for the adventurous (and because I haven't worked out a solution for this yet). Try and use `rf_agg_local_mean` to get a monthly raster composite showing cloud cover frequency. To do that, you will have to apply this to the time series step. Pointers for how to do that are in the `aggregation.ipynb` notebook and the [online version](https://rasterframes.io/aggregation.html). However, this is not trivial, because it doesn't work when the images have different extents. Even though these are the same MODIS tiles, I guess the extent are different, so to make this work, the extents of the images would have to be made the same. I am guessing that entails cropping and possibly resampling each image to a common extent first, and then analyzing the time series. Maybe best to try this just on one or two months worth of data to start.
materials/code/earth_ai/class_exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn.decomposition import PCA # 读取四张表格的数据 prior=pd.read_csv("/home/hhl/Desktop/Python3天快速入门机器学习项目资料/机器学习day1资料/02/instacart/order_products__prior.csv").head(100000) products=pd.read_csv("/home/hhl/Desktop/Python3天快速入门机器学习项目资料/机器学习day1资料/02/instacart/products.csv") order=pd.read_csv("/home/hhl/Desktop/Python3天快速入门机器学习项目资料/机器学习day1资料/02/instacart/orders.csv") aisle=pd.read_csv("/home/hhl/Desktop/Python3天快速入门机器学习项目资料/机器学习day1资料/02/instacart/aisles.csv") # 将四张表合并到一张,根据字段来合并 _mg=pd.merge(prior,products,on=['product_id','product_id']) _mg=pd.merge(_mg,order,on=['order_id','order_id']) _mg=pd.merge(_mg,aisle,on=['aisle_id','aisle_id']) # + _mg.head(10) # - cross=pd.crosstab(_mg['user_id'],_mg['aisle']) # 建立一个交叉表用user_id和asile分别为行和列 cross=cross.head(10) #这个表格中的数据很多都是冗余的,这个时候我们就得对他进行降维 利用pca进行主成分分析降维 pca=PCA(n_components=0.96) data= pca.fit_transform(cross) data data.shape
instacart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Run time # # * Find min/max: $O(1)$ # * Insert: $O(\log n)$ # * Delete: $O(\log n)$ # + deletable=true editable=true from __future__ import print_function import numpy as np from IPython.display import clear_output from pjdiagram import * from ipywidgets import * from heap import binary_heap_allocation_example, insert_item_to_heap_example, percolate_down_example # + [markdown] deletable=true editable=true # # Description # # A heap is a type of binary tree that allows fast insertion and fast traversal. This makes them a good candidate for sorting large amounts of data. Heaps have the following properties: # - the root node has maximum value key # - the key stored at a non-root is at most the value of its parent # # Therefore: # - any path from the root node to a leaf node is in nonincreasing order # # However: # - the left and right sub-trees don't have a formal relationship # # # ## Storage # # A binary tree can be represented using an array with the following indexing: # # $$\texttt{parent}\left(i\right) = (i-1)/2$$ # $$\texttt{left}\left(i\right) = (2i)+1$$ # $$\texttt{right}\left(i\right) = (2i)+2$$ # # ### Example # - root node index: $0$ # - left child: $1$ # - left child: $2*1+1 = 3$ # - right child: $2*1+2 = 4$ # - right child: $2$ # - left child: $2*2 + 1 = 5$ # - right child: $2*2 + 2 = 6$ # # # # The figure below provides a visual demonstration. # + deletable=true editable=true binary_heap_allocation_example() # + [markdown] deletable=true editable=true # # Operations # # ## Inserting a new item into the heap # # To insert a new item into the heap, we start by first adding it to the end. Once added, we will percolate the item up until its parent is larger than the item. # + deletable=true editable=true def parent(i): return (i-1)/2 def left(i): return 2*i+1 def right(i): return 2*i+2 def percolate_up(heap, startpos, pos): ppos = parent(pos) while pos > startpos and heap[ppos] < heap[pos]: # percolate value up by swapping current position with parent position heap[pos], heap[ppos] = heap[ppos], heap[pos] # move up one node pos = ppos ppos = parent(pos) def heap_insert(heap, value): # add value to end heap.append(value) # move value up heap until the nodes below it are smaller percolate_up(heap, 0, len(heap)-1) # + [markdown] deletable=true editable=true # To see why this works, we can visualize the algorithm. We start with a new value of `100` (highlighted with red). That is inserted into the bottom of the heap. We percoluate `100` up (each swap is highlighted) until it gets placed into the root note. Once finished, the heap's properties are now restored, and every child will have a smaller value than its parent. # # To get a good sense of how `percolute_up` works, try putting different values in for the heap. Note that, it won't work correctly if the initial value isn't a proper heap. # + deletable=true editable=true heap = [16, 14, 10, 8, 7, 9, 3, 2, 4] heap.append(100) insert_item_to_heap_example(heap) # + [markdown] deletable=true editable=true # A quick example of using the code: # + deletable=true editable=true heap = [] heap_insert(heap, 20) print("adding 20: ", heap) # [20] heap_insert(heap, 5) print("adding 5: ", heap) # [5, 20] heap_insert(heap, 1) print("adding 1: ", heap) # [1, 20, 5] heap_insert(heap, 50) print("adding 50: ", heap) # [1, 20, 5, 50] heap_insert(heap, 6) print("adding 6: ", heap) # [1, 5, 6, 50, 20] with Canvas(400, 150) as ctx: draw_binary_tree(ctx, (200, 50), heap) # + [markdown] deletable=true editable=true # ## Removing an item from the heap # # Removing the root node from the heap gives the largest value. In place of the root node, the smallest (i.e. last value in the heap) can be placed at the root, and the heap properties are then restored. # # To restore the heap properties, the function `percolate_down` starts at the root node, and traverses down the tree. At every node it compares the current node's value with the left and right child. If the children are smaller than the current node, because of the heap properties, we know the rest of the tree is correctly ordered. If the current node is less than the left node or right node, it is swapped with the largest value. # # To understand why this works, consider the two possibilities: # # (1) The current node is largest. This meets the definition of a heap. # + deletable=true editable=true heap = [10, 5, 3] with Canvas(400, 80) as ctx: draw_binary_tree(ctx, (200, 20), heap) # + [markdown] deletable=true editable=true # (2) The left child is largest. In the case if we swap the parent node with the child, the heap properties are restored (i.e. the top node is larger than either of its children). # + deletable=true editable=true heap1 = [5, 10, 3] heap2 = [10, 5, 3] with Canvas(400, 80) as ctx: draw_binary_tree(ctx, (100, 20), heap1) draw_binary_tree(ctx, (300, 20), heap2) # + [markdown] deletable=true editable=true # We have to do this recursively down the tree, as every swap we make can potentially cause a violation of the heap below. The code for the algorithm is given below: # + deletable=true editable=true def percolate_down(heap, i, size): l = left(i) r = right(i) if l < size and heap[l] > heap[i]: max = l else: max = i if r < size and heap[r] > heap[l]: max = r # if left or right is greater than current index if max != i: # swap values heap[i], heap[max] = heap[max], heap[i] # continue downward percolate_down(heap, max, len(heap)) # + [markdown] deletable=true editable=true # To see this code in action, we'll start with a well-formed heap. Next, we'll take a value off of the heap by swapping the root node with the last node. Finally, we restore the heap with a call to `percolate_down`. In the demo below the highlighted nodes show the two nodes that will be swapped (i.e. parent node and the largest child). # + deletable=true editable=true heap = [16, 14, 10, 8, 7, 9, 3, 2, 4] # swap root with last value (4 is now root, and 16 is at the bottom) heap[0], heap[-1] = heap[-1], heap[0] # remove `16` from heap, and restore the heap properties value = heap.pop() percolate_down_example(heap) # + [markdown] deletable=true editable=true # Finally, putting everything together, we have `heap_pop`: # + deletable=true editable=true def heap_pop(heap): # swap root with last value heap[0], heap[-1] = heap[-1], heap[0] # remove last value result = heap.pop() # restore heap properties for i in range(len(heap)): percolate_down(heap, 0, len(heap)) return result # + [markdown] deletable=true editable=true # To see `heap_pop` in action: # + deletable=true editable=true heap = [] heap_insert(heap, 1) heap_insert(heap, 100) heap_insert(heap, 20) heap_insert(heap, 5) heap_insert(heap, 3) print(heap) print(heap_pop(heap)) print(heap_pop(heap)) print(heap_pop(heap)) print(heap_pop(heap)) print(heap_pop(heap))
Heaps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import division, print_function import logging import warnings import numpy as np from astropy.stats import sigma_clip import os from astropy.coordinates import SkyCoord, Angle import astropy.units as u import matplotlib.pyplot as plt import pandas as pd import scipy as sp import lightkurve as lk from scipy import stats from astropy import units as u import scipy.optimize as opt from photutils import centroids as cent import lmfit as lm from lmfit import Minimizer, Parameters, report_fit class PixelMapFit: """Object to hold 2D array of lk.periodgram object data . Attributes ---------- targetpixelfile : targetpixelfile object Optional keywords accepted if ``method='lombscargle'`` are: ``minimum_frequency``, ``maximum_frequency``, ``minimum_period``, ``maximum_period``, ``frequency``, ``period``, ``nterms``, ``nyquist_factor``, ``oversample_factor``, ``freq_unit``, ``normalization``, ``ls_method``. """ def __init__(self, targetpixelfile, gaia=True, magnitude_limit=18, frequencies=[], frequnit=u.uHz, principle_components = 5,aperture=None, **kwargs): #Defining an aperture that will be used in plotting and making empty 2-d arrays of the correct size for masks if targetpixelfile.pipeline_mask.any() == False: self.aperture = aperture else: self.aperture = targetpixelfile.pipeline_mask self.tpf = targetpixelfile # Make a design matrix and pass it to a linear regression corrector self.raw_lc = self.tpf.to_lightcurve(aperture_mask=self.aperture) self.dm = lk.DesignMatrix(self.tpf.flux[:, ~self.tpf.create_threshold_mask()], name='regressors').pca(principle_components) rc = lk.RegressionCorrector(self.raw_lc) corrected_lc = rc.correct(self.dm.append_constant()) corrected_lc[np.where(corrected_lc.quality == 0)] self.corrected_lc = corrected_lc.remove_outliers() self.frequency_list = np.asarray((frequencies*frequnit).to(1/u.d)) self.principle_components = principle_components def Obtain_Initial_Phase(tpf,corrected_lc,frequency_list): flux = corrected_lc.flux.value times = corrected_lc.time.value - np.mean(corrected_lc.time.value) pg = corrected_lc.to_periodogram(frequency = np.append([0.0001],frequency_list),ls_method='slow') initial_flux= np.asarray(pg.power[1:]) initial_phase = np.zeros(len(frequency_list)) def lc_model(time,amp,freq,phase): return amp*np.sin(2*np.pi*freq*time + phase) def background_model(time,height): return np.ones(len(time))*height for j in np.arange(len(frequency_list)): for i in np.arange(len(frequency_list)): if (i == 0): model = lm.Model(lc_model,independent_vars=['time'],prefix='f{0:d}'.format(i)) model += lm.Model(background_model, independent_vars=['time']) else: model += lm.Model(lc_model,independent_vars=['time'],prefix='f{0:d}'.format(i)) model.set_param_hint('f{0:d}phase'.format(i), min = -np.pi, max = np.pi ,value= initial_phase[i],vary = False) model.set_param_hint('f{0:d}amp'.format(i), value = initial_flux[i],vary=False) model.set_param_hint('height', value= np.mean(flux),vary=False) model.set_param_hint('f{0:d}freq'.format(i),value = frequency_list[i], vary = False) params = model.make_params() params['f{0:d}phase'.format(j)].set(vary=True) params['f{0:d}phase'.format(j)].set(value = initial_phase[j]) params['f{0:d}phase'.format(j)].set(brute_step=np.pi/10) result = model.fit(corrected_lc.flux.value,params,time=times,method = 'brute') initial_phase[j]=result.best_values['f{0:d}phase'.format(j)] return initial_phase self.initial_phases = Obtain_Initial_Phase(self.tpf,self.corrected_lc,self.frequency_list) def Obtain_Final_Phase(tpf,corrected_lc,frequency_list,initial_phases): flux = corrected_lc.flux.value times = corrected_lc.time.value - np.mean(corrected_lc.time.value) pg = corrected_lc.to_periodogram(frequency = np.append([0.0001],frequency_list),ls_method='slow') initial_flux= np.asarray(pg.power[1:]) def lc_model(time,amp,freq,phase): return amp*np.sin(2*np.pi*freq*time + phase) def background_model(time,height): return np.ones(len(time))*height for i in np.arange(len(frequency_list)): if (i == 0): model = lm.Model(lc_model,independent_vars=['time'],prefix='f{0:d}'.format(i)) model += lm.Model(background_model, independent_vars=['time']) else: model += lm.Model(lc_model,independent_vars=['time'],prefix='f{0:d}'.format(i)) model.set_param_hint('f{0:d}phase'.format(i), min = -np.pi, max = np.pi ,value= initial_phases[i],vary = True) model.set_param_hint('f{0:d}amp'.format(i), value = initial_flux[i],vary=True) model.set_param_hint('height', value= np.mean(flux),vary=True) model.set_param_hint('f{0:d}freq'.format(i),value = frequency_list[i], vary = False) params = model.make_params() result = model.fit(corrected_lc.flux.value,params,time=times) final_phases = [result.best_values['f{0:d}phase'.format(j)] for j in np.arange(len(frequency_list))] return final_phases self.final_phases = Obtain_Final_Phase(self.tpf,self.corrected_lc,self.frequency_list,self.initial_phases) def Obtain_Final_Fit(tpf,corrected_lc,frequency_list,final_phases): flux = corrected_lc.flux.value times = corrected_lc.time.value - np.mean(corrected_lc.time.value) pg = corrected_lc.to_periodogram(frequency = np.append([0.0001],frequency_list),ls_method='slow') initial_flux= np.asarray(pg.power[1:]) def lc_model(time,amp,freq,phase): return amp*np.sin(2*np.pi*freq*time + phase) def background_model(time,height): return np.ones(len(time))*height for i in np.arange(len(frequency_list)): if (i == 0): model = lm.Model(lc_model,independent_vars=['time'],prefix='f{0:d}'.format(i)) model += lm.Model(background_model, independent_vars=['time']) else: model += lm.Model(lc_model,independent_vars=['time'],prefix='f{0:d}'.format(i)) model.set_param_hint('f{0:d}phase'.format(i), value= final_phases[i],vary = False) model.set_param_hint('f{0:d}amp'.format(i), value = initial_flux[i],vary=True) model.set_param_hint('height', value= np.mean(flux),vary=True) model.set_param_hint('f{0:d}freq'.format(i),value = frequency_list[i], vary = False) params = model.make_params() result = model.fit(corrected_lc.flux.value,params,time=times) return result heats = [] heats_error =[] #Iterating through columns of pixels for i in np.arange(0,len(self.aperture)): #Iterating through rows of pixels for j in np.arange(0,len(self.aperture[0])): #Making an empty 2-d array mask = np.zeros((len(self.aperture),len(self.aperture[0])), dtype=bool) #Iterating to isolate pixel by pixel to get light curves mask[i][j] = True #Getting the light curve for a pixel and excluding any flagged data lightcurve = self.tpf.to_lightcurve(aperture_mask=mask) rcc = lk.RegressionCorrector(lightcurve) lc = rcc.correct(self.dm.append_constant()) #lc = lc[np.where(lc.quality == 0)] #lc = lc.remove_outliers() bestfit = Obtain_Final_Fit(self.tpf,lc,self.frequency_list,self.final_phases) heat = np.asarray([bestfit.best_values['f{0:d}amp'.format(n)] for n in np.arange(len(self.frequency_list))]) #heat = bestfit.best_values['f0amp']# / bestfit.params['f0amp'].stderr heat_error = np.asarray([bestfit.params['f{0:d}amp'.format(n)].stderr for n in np.arange(len(self.frequency_list))]) #Extending the list of fitting data for each pixel heats.extend([heat]) heats_error.extend([heat_error]) #Taking the final list and turning it into a 2-d numpy array with the same dimensions of the full postage stamp #heats = np.reshape(np.asarray(heats),(len(self.aperture),len(self.aperture[0]))) #heats_error = np.reshape(np.asarray(heats_error),(len(self.aperture),len(self.aperture[0]))) heats = np.asarray(heats) heats_error = np.asarray(heats_error) #Defining self.periodogram as this 2-d array of periodogram data self.heatmap = heats.T self.heatmap_error = heats_error.T self.timeserieslength = (self.tpf.time.max()-self.tpf.time.min()).value self.gaiadata = None if (gaia == True): """Make the Gaia Figure Elements""" # Get the positions of the Gaia sources c1 = SkyCoord(self.tpf.ra, self.tpf.dec, frame='icrs', unit='deg') # Use pixel scale for query size pix_scale = 4.0 # arcseconds / pixel for Kepler, default if self.tpf.mission == 'TESS': pix_scale = 21.0 # We are querying with a diameter as the radius, overfilling by 2x. from astroquery.vizier import Vizier Vizier.ROW_LIMIT = -1 result = Vizier.query_region(c1, catalog=["I/345/gaia2"],radius=Angle(np.max(self.tpf.shape[1:]) * pix_scale, "arcsec")) no_targets_found_message = ValueError('Either no sources were found in the query region ' 'or Vizier is unavailable') too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit)) if result is None: raise no_targets_found_message elif len(result) == 0: raise too_few_found_message result = result["I/345/gaia2"].to_pandas() result = result[result.Gmag < magnitude_limit] if len(result) == 0: raise no_targets_found_message year = ((self.tpf.time[0].jd - 2457206.375) * u.day).to(u.year) pmra = ((np.nan_to_num(np.asarray(result.pmRA)) * u.milliarcsecond/u.year) * year).to(u.deg).value pmdec = ((np.nan_to_num(np.asarray(result.pmDE)) * u.milliarcsecond/u.year) * year).to(u.deg).value result.RA_ICRS += pmra result.DE_ICRS += pmdec radecs = np.vstack([result['RA_ICRS'], result['DE_ICRS']]).T coords = self.tpf.wcs.all_world2pix(radecs, 0) # Gently size the points by their Gaia magnitude sizes = 64.0 / 2**(result['Gmag']/5.0) one_over_parallax = 1.0 / (result['Plx']/1000.) source = dict(ra=result['RA_ICRS'], dec=result['DE_ICRS'], source=result['Source'].astype(str), Gmag=result['Gmag'], plx=result['Plx'], one_over_plx=one_over_parallax, x=coords[:, 0], y=coords[:, 1], size=sizes) self.gaiadata = source class frequency_heatmap: def __init__(self,tpf,heats,heats_error,frequencies,gaia_data): self.heat_stamp = heats self.gaiadata=gaia_data self.heatmap_error = heats_error self.size = tpf.pipeline_mask.shape self.frequencies= frequencies def centroid(self): #Residuals to minimize relative to the error bars def residual(params, amp, amperr): x = params['x'] y = params['y'] sigma = params['sigma'] xpix,ypix = np.meshgrid(np.arange(self.size[0]),np.arange(self.size[1])) res = [] for i in np.arange(len(self.frequencies)): height = params['height{0:d}'.format(i)] model = height*np.exp(-(((x-xpix)/sigma)**2+((y-ypix)/sigma)**2)/2) res.extend( [(amp[i].reshape(self.size)-model) / amperr[i].reshape(self.size)]) return np.asarray(res) #Set starting values to converge from self.heatmap_error[np.where(self.heatmap_error==None)]=np.nan composite_heatmap = self.heat_stamp.sum(axis=0).reshape(self.size) / ((np.nansum(self.heatmap_error**2,axis=0))**(1/2)).reshape(self.size)#issue with numpy using sqrt? c = np.where(composite_heatmap==composite_heatmap.max()) params = Parameters() for i in np.arange(len(frequencies)): params.add('height{0:d}'.format(i), value=np.max(self.heat_stamp[i])) params.add('x', value=c[1][0]) params.add('y', value=c[0][0]) params.add('sigma', value=1) #Do the fit minner = Minimizer(residual, params, fcn_args=(self.heat_stamp, self.heatmap_error)) self.result = minner.minimize() fit = self.result.params.valuesdict() self.x = fit['x'] self.y = fit['y'] def star_list(self): gaia_data = self.gaiadata no_gaia_data_message = ValueError('No gaia data initialized in PixelMapPeriodogram class') if gaia_data ==None : raise no_gaia_data_message else: distances = np.square(self.x-gaia_data['x'])+np.square(self.y-gaia_data['y']) closest_star_mask = np.where(np.square(self.x-gaia_data['x'])+np.square(self.y-gaia_data['y'])==(np.square(self.x-gaia_data['x'])+np.square(self.y-gaia_data['y'])).min()) stars = dict(ra = np.asarray(gaia_data['ra']), dec = np.asarray(gaia_data['dec']), source = np.asarray(gaia_data['source']), x = np.asarray(gaia_data['x']), y = np.asarray(gaia_data['y']), distance = distances, probability = 2*stats.norm.sf(distances,scale=np.sqrt(self.result.params['x'].stderr**2 +self.result.params['y'].stderr**2 )))#I believe mutiply by 2 since we wont have a negative distance starlist = pd.DataFrame.from_dict(stars) self.stars = starlist.sort_values(by=[r'distance']) fh = frequency_heatmap(self.tpf,self.heatmap,self.heatmap_error,self.frequency_list,self.gaiadata) fh.centroid() fh.star_list() self.centroid = [fh.x,fh.y] self.heatmap = self.heatmap.sum(axis=0).reshape(self.aperture.shape[0],self.aperture.shape[1]) / np.sqrt((self.heatmap_error**2).sum(axis=0)).reshape(self.aperture.shape[0],self.aperture.shape[1]) self.starfit= fh.stars.reset_index() self.result = fh.result def info(self): plt.imshow(self.heatmap,origin='lower') #plot the centroid if (self.gaiadata != None): plt.scatter(self.gaiadata['x'],self.gaiadata['y'],s=self.gaiadata['size']*5,c='white',alpha=.6) plt.scatter(self.centroid[0],self.centroid[1],marker='X',s=100) plt.xlim(-.5,self.aperture.shape[1]-1+.5) plt.ylim(-.5,self.aperture.shape[0]-1+.5) print(self.starfit) report_fit(self.result) def pca(self): plt.figure(figsize=(12,5)) plt.plot(self.tpf.time.value, self.dm.values + np.arange(self.principle_components)*0.2) plt.title('Principle Components Contributions') plt.xlabel('Offset') g2 = self.raw_lc.plot(label='Raw light curve') self.corrected_lc.plot(ax=g2, label='Corrected light curve') plt.show() # - frequency_list = [9.51112996, 19.02225993, 28.53338989, 38.04451986, 47.55564982, 57.06677979, 66.57790975, 76.08903972] search_result = lk.search_targetpixelfile('TIC117070953') tpf = search_result.download(quality_bitmask='default') test = PixelMapFit(targetpixelfile=tpf, gaia=True, magnitude_limit=18, frequencies=frequency_list, frequnit=u.uHz, principle_components = 3) test.pca() test.info()
TESTINGNEWLK.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Image segmentation of Synthetic Unity data # %reload_ext autoreload # %autoreload 2 # %matplotlib inline from fastai.vision import * from fastai.callbacks.hooks import * path = Path('captures') path.ls() # ## Data fnames = list((path/'train').glob('*img*')) fnames[:3] lbl_names = list((path/'train').glob('*layer*')) lbl_names[:3] img_f = fnames[0] img = open_image(img_f) img.show(figsize=(5,5)) get_y_fn = lambda x: str(x).replace('img', 'layer') open_image(get_y_fn(img_f), convert_mode='L').data.unique() mask = open_mask(get_y_fn(img_f)) mask src_size = np.array(mask.shape[1:]) src_size,mask.data codes = np.array(["0", "1", "2", "3", "4", "5", "6", "7", "Cube", "Sphere", "Cylinder"]) # ## Datasets size = src_size//2 bs=8 # + def just_images(x): return 'img' in str(x) src = (SegmentationItemList.from_folder(path) .filter_by_func(just_images) .split_by_folder(train='train', valid='val') .label_from_func(get_y_fn, classes=codes)) src # - data = (src.transform(get_transforms(), size=size, tfm_y=True) .databunch(bs=bs) .normalize(imagenet_stats)) data.show_batch(2, figsize=(10,7)) data.show_batch(2, figsize=(10,7), ds_type=DatasetType.Valid) # ## Model # + name2id = {v:k for k,v in enumerate(codes)} void_code = name2id['0'] def acc_segmentation(input, target): target = target.squeeze(1) mask = target != void_code return (input.argmax(dim=1)[mask]==target[mask]).float().mean() # - metrics=acc_segmentation wd=1e-2 learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd) lr_find(learn) learn.recorder.plot() lr=1e-3 learn.fit_one_cycle(10, slice(lr), pct_start=0.9) learn.save('stage-1') learn.load('stage-1'); learn.show_results(rows=3, figsize=(16,16)) img = open_image((path/'val').ls()[0]) display(img) plt.imshow(learn.predict(img)[1].squeeze()) learn.unfreeze() lrs = slice(lr/400,lr/4) learn.fit_one_cycle(12, lrs, pct_start=0.8) learn.save('stage-2'); # ## Go big # You may have to restart your kernel and come back to this stage if you run out of memory, and may also need to decrease `bs`. size = src_size bs=3 data = (src.transform(get_transforms(), size=size, tfm_y=True) .databunch(bs=bs) .normalize(imagenet_stats)) learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd) learn.load('stage-2'); lr_find(learn) learn.recorder.plot() lr=1e-3 learn.fit_one_cycle(10, slice(lr), pct_start=0.8) learn.fit_one_cycle(10, slice(lr), pct_start=0.8) learn.save('stage-1-big') learn.load('stage-1-big'); learn.unfreeze() lrs = slice(1e-6,lr/10) learn.fit_one_cycle(10, lrs) learn.save('stage-2-big') learn.load('stage-2-big'); learn.show_results(rows=3, figsize=(10,10)) # ## fin
.ipynb_checkpoints/Unity-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Forecasting with DMD from dmdTrading.core import DMD import pandas as pd import numpy as np from matplotlib import pyplot as plt # ## Import Data df = pd.read_csv('./../datasets/all_prices.csv', header=None) df.shape df.columns = [f't_{i}' for i in range(len(df.columns))] df.index = [f'node_{i}' for i in range(len(df.index))] df.head() # ## Prepare Dataset and Time Vectors num_train = 2*24 num_predict = 2*24 nodes = 100 t_in = np.arange(num_train) t_out = np.arange(num_predict) + num_train X_train = df.values[0:nodes, 0:num_train] X_test = df.values[0:nodes, num_train:num_train + num_predict] # ## Decompose model = DMD.decomp(Xf = X_train, time = t_in, verbose=True, svd_cut=True, num_svd=40) # ## Predict y_test = [] for t in t_out: y_test.append(DMD.predict(model, t)) y_test = np.array(y_test).T # ## Visualize node_vis_id = 7 fig, ax = plt.subplots() ax.plot(t_in, X_train[node_vis_id], label='Training') ax.plot(t_out, X_test[node_vis_id], label='Testing') ax.plot(t_out, y_test[node_vis_id], label='Forecast') ax.legend() ax.set_xlabel('Time (hours)') ax.set_ylabel('Price') plt.show()
ExampleNotebooks/DMD-Forecast.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="0mjcy-LgJdUc" colab_type="code" colab={} import numpy as np import os import pandas as pd import time import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from datetime import timezone import datetime from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error import requests import pprint # + id="1BLj3ONHmjKL" colab_type="code" colab={} comp = "AAPL" apiKey = "<KEY>" url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&apikey={}&outputsize=full".format(comp, apiKey) # + id="YE502oaCmjX-" colab_type="code" outputId="41616d8b-b844-4c1b-c659-0963a2c08185" colab={"base_uri": "https://localhost:8080/", "height": 1000} r = requests.get(url) data = r.json() pprint.pprint(data) #confirmed that data request generates a dictionary of kinf: """ {'Meta Data': {'1. Information': 'Weekly Prices (open, high, low, close) and ' 'Volumes', '2. Symbol': 'TSLA', '3. Last Refreshed': '2020-02-21', '4. Time Zone': 'US/Eastern'}, 'Weekly Time Series': {'2010-07-09': {'1. open': '20.0000', '2. high': '20.0000', '3. low': '14.9800', '4. close': '17.4000', '5. volume': '25550600'}, """ # + id="_fksOWnc1h1G" colab_type="code" colab={} # + id="S2lERM3ZmjiB" colab_type="code" outputId="f2541bc7-54ff-4131-a1e9-c1ded18c1db9" colab={"base_uri": "https://localhost:8080/", "height": 402} stock_data = pd.DataFrame.from_dict(data["Time Series (Daily)"]) transposed_stock_data = stock_data.T transposed_stock_data.index = pd.to_datetime(transposed_stock_data.index) transposed_stock_data['year'] = transposed_stock_data.index.year transposed_stock_data['month'] = transposed_stock_data.index.month transposed_stock_data['day'] = transposed_stock_data.index.day transposed_stock_data.reset_index(inplace=True) transposed_stock_data.drop(transposed_stock_data.index) transposed_stock_data.drop(["3. low"], axis = 1, inplace = True) transposed_stock_data.drop(["5. adjusted close"], axis = 1, inplace = True) transposed_stock_data.drop(["6. volume", "7. dividend amount", "8. split coefficient"], axis = 1, inplace = True) transposed_stock_data.drop(["2. high"], axis = 1, inplace = True) for j in [(int(datetime.date(transposed_stock_data["year"][i], transposed_stock_data["month"][i], transposed_stock_data["day"][i]).strftime('%s')) for i in range(transposed_stock_data.shape[0]))]: transposed_stock_data["utc"] = list(j) # transposed_stock_data["utc"] = ((int(datetime.date(transposed_stock_data["year"][i], transposed_stock_data["month"][i], transposed_stock_data["day"][i]).strftime('%s')) for i in range(transposed_stock_data.shape[0]))) transposed_stock_data # + id="g3BOfRHYD6kb" colab_type="code" outputId="7e34a257-e88a-4ecb-e0f8-385bef42afd2" colab={"base_uri": "https://localhost:8080/", "height": 54} for j in [(int(datetime.date(transposed_stock_data["year"][i], transposed_stock_data["month"][i], transposed_stock_data["day"][i]).strftime('%s')) for i in range(transposed_stock_data.shape[0]))]: print(list(j)) # + id="ACm02CVSpP9t" colab_type="code" outputId="cdcedc38-f742-48a2-dfc3-3e9653849016" colab={"base_uri": "https://localhost:8080/", "height": 34} os.listdir() # + id="hmYr7WjAmjlc" colab_type="code" colab={} transposed_stock_data.to_csv("stock_market.csv", index = None, header=True) # + id="PB0sUQWemjnf" colab_type="code" colab={} # + id="0CJteHMemju7" colab_type="code" colab={} # + id="yiSh_DLVmj1S" colab_type="code" colab={} # + id="tx6hmsBNmjzG" colab_type="code" colab={} # + id="59x2no84mjxa" colab_type="code" colab={}
02_FinancialDataScraper.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # V 0.3 # # to see how num of hours of absance will influence the final grade of students. # # packages import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn import metrics from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.tree import export_graphviz from sklearn.externals.six import StringIO from IPython.display import Image import pydotplus import graphviz # + stdrecord = pd.read_csv('stdset.csv') stdrecord.head() # - # # Preparing data for modeling, pre-processing # # Pandas dummy grade = pd.get_dummies(stdrecord['Finalgrades']) grade.head() # # Using labelEncoder # + #from sklearn.preprocessing import LabelEncoder #labgrade = preprocessing.LabelEncoder() #stdrecord.Finalgrades = labgrade.fit_transform(stdrecord.Finalgrades) #stdrecord.Finalgrades.head() # - stdrecords = pd.concat([stdrecord, grade,], axis='columns') stdrecords cols = ['Grades', 'Hrabsence'] x=stdrecords[cols] y=stdrecord.Finalgrades x.head() y.head() # # Splitting Data x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=3) # 70% traning data and 30% test data # # Building decision tree model # + dt = DecisionTreeClassifier() dtfit = dt.fit(x_train, y_train) # - y_pred = dtfit.predict(x_test) print(y_pred) # # Evaluating model print("Accuracy:", metrics.accuracy_score(y_test, y_pred)) # # Confusion Matrix a = pd.DataFrame(confusion_matrix(y_test, y_pred)) print("confusion Matrix") print(a) # # classification Report print(classification_report(y_test, y_pred)) # # Visualizing Decision Tree dot_data = StringIO() export_graphviz(dtfit, out_file=dot_data, filled=True, rounded=True, special_characters=True, feature_names=cols, class_names=["Pass","Retake","Redo"]) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) graph.write_png('grades.png') Image(graph.create_png()) # # Optimizing Decision Tree performance # + dtfit = DecisionTreeClassifier(criterion="entropy", max_depth=5) dtfit = dtfit.fit(x_train, y_train) y_pred=dtfit.predict(x_test) # - print("Accuray:", metrics.accuracy_score(y_test, y_pred)) print(y_pred) dot_data = StringIO() export_graphviz(dtfit, out_file=dot_data, filled=True, rounded=True, special_characters=True) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) Image(graph.create_png())
Untitled2.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # #### 11기 머신러닝 수업목차에 나오는 교육영상과 책을 미리 읽어오면 더 이해가 잘되니까 틈나는대로 보고 그 단원을 완성하는 것은 코드로 관련 머신러닝 알고리즘을 구현하면 완성 # ## ■ R # ### 뉴질랜드 aukland 대학의 <NAME>과 <NAME>가 1995년에 개발한 소프트웨어 # ### 데이터 분석을 위한 통계 및 그래픽스를 지원하는 무료 소프트웨어 # ## ■ 작업 디렉토리 설정 및 emp.csv 로드하는 방법 setwd("d:/R") getwd() emp<-read.csv('emp3.csv',header=T) emp # ## ■ SQL과 R의 차이 # ### 아주 긴 SQL 코드를 R 코드로는 아주 단순하게 작성할 수 있다. # ```sql # select deptno, sum(decode(job,'SALESMAN',sal,0)), # sum(decode(job,'ANALYST',sal,0)), # sum(decode(job,'CLERK',sal,0)), # sum(decode(job,'MANAGER',sal,0)), # sum(decode(job,'PRESIDENT',sal,0)) # from emp # group by deptno; # ``` attach(emp) tapply(sal,list(deptno,job),sum) # ※ attach(emp)를 안하면 다음과 같이 작성해야한다. # ```R # tapply(emp$sal,list(emp$deptno, emp$job),sum) # ``` # ### SQL로는 시각화를 할 수 없지만 R은 데이터 시각화가 아주 강력하다 # ## ■ R의 자료구조 (R에서 데이터를 저장하는 구조) # ### 1. vector # #### 같은 데이터 타입을 갖는 1차원 배열구조 # ### 2. matrix # #### 같은 데이터 타입을 갖는 2차원 배열구조 # ### 3. array # #### 같은 데이터 타입을 갖는 다차원 배열구조 # ### 4. data.frame # #### 같은 데이터 타입을 갖는 컬럼으로 이루어진 2차원 배열구조(RDBMS의 table과 유사) # ### 5. list # #### 서로 다른 데이터 구조(vector, matrix, array, data frame)인 데이터 타입이 중첩된 구조 # # ![구조](r자료구조.png) # # ``` # Oracle vs R # desc emp str(emp) # ``` str(emp) # ## ■ R에서의 기본 데이터 검색 # ### ※ 문제1. emp 데이터 프레임에서 이름과 월급을 출력하시오 emp[,c('ename','sal')] # c() : combine # ### ※ 문제2. 월급이 3000 이상인 사원들의 이름과 월급을 출력하시오 emp=read.csv('emp3.csv',header=T) emp[emp$sal>=3000,c('ename','sal')] # ### ※ 문제3. 월급이 2000 이상인 사원들의 이름과 월급을 출력하시오 emp[emp$sal>=2000,c('ename','sal')] # ### ※ R에서 사용하는 연산자 3가지 # #### 1. 산술연산자 : +, _, *, / # #### 2. 비교연산자 : >, <, >=, <=, ==, != # #### 3. 논리연산자 : & (벡터화 됐을 때 연산) / &&(벡터화 되지 않았을 때 연산) / | (벡터화 됐을 때 연산)/ || (벡터화 되지 않았을 때 연산)/ ! (not) # ## ■ 벡터화 된 연산 vs 벡터화 되지 않은 연산 # 벡터화 된 연산 x=c(1,2,3) print(x) print(x > c(1,1,1) & x < c(3,3,3)) # 벡터화 되지 않은 연산 x2=1 print(x2 > -2 && x2 < 2) # ### ※ 문제4. 직업이 SALESMAN인 사원들의 이름과 월급과 직업을 출력하시오 emp[emp$job=='SALESMAN',c('ename','sal','job')] # ### ※ 문제5. 직업이 SALESMAN이고 월급이 1000 이상인 사원들의 이름과 월급과 직업을 출력하시오 emp[emp$job=='SALESMAN' & emp$sal>=1000, c('ename','sal','job')] # #### ※ emp\\$job이나 emp\$sal은 벡터이다. 벡터란 같은 데이터 타입을 갖는 1차원 구조이다 # ## ■ 연결 연산자 # ``` # Oracle vs R # || paste # ``` # ### ※ 문제6. 아래와 같이 결과를 출력하시오 # ```sql # select ename||'의 직업은'|| job # from emp; # ``` paste(emp$ename,'의 직업은',emp$job) install.packages("data.table") data.table(paste(emp$ename,'의 직업은',emp$job)) # ## ■ 기타 비교 연산자 # ``` # SQL vs R # in %in% # like grep # is null is.na # between .. and >= & <= # ``` # ### ※ 문제7. 직업이 SALESMAN, ANALYST인 사원들의 이름, 월급, 직업을 출력하시오 emp[emp$job %in% c('SALESMAN','ANALYST'),c('ename','sal','job')] # ### ※ 문제8. 직업이 SALESMAN, ANALYST가 아닌 사원들의 이름, 월급, 직업을 출력하시오 emp[!emp$job %in% c('SALESMAN','ANALYST'),c('ename','sal','job')] # ### ※ 문제9. 커미션이 null인 사원들의 이름, 월급, 커미션을 출력하시오 emp[is.na(emp$comm),c('ename','sal','comm')] # ### ※ 문제10. 커미션이 null이 아닌 사원들의 이름, 월급, 커미션을 출력하시오 emp[!is.na(emp$comm),c('ename','sal','comm')] # ### ※ 문제11. 이름의 첫번째 철자가 A로 시작하는 사원들의 이름, 월급을 출력 # 정규식 표현> # ^ 첫번째 # $ 마지막 # . 한자리수 # * 여러개(wild card) emp[grep("^A",emp$ename),c('ename','sal')] # ### ※ 문제12. 이름의 끝글자가 T로 끝나는 사원들의 이름을 출력 print(emp[grep("T$",emp$ename),'ename']) # ### ※ 문제13. 이름의 두번째 철자가 M인 사원들의 이름, 월급을 출력 emp[grep('^.M',emp$ename),c('ename','sal')] # ### ※ 문제14. 이름의 세번째 철자가 L인 사원들의 이름, 월급을 출력 emp[grep('^..L',emp$ename),c('ename','sal')] # ## ■ 중복제거 # ``` # SQL vs R # distinct unique # ``` # ### ※ 문제15. 부서번호를 출력하는데 중복을 제거해서 출력하시오 print(data.frame("부서번호"=unique(emp$deptno))) # ## ■ 정렬작업 # ``` # SQL vs R # order by data frame의 order 옵션 # doBy 패키지 -> orderBy 함수 # ``` install.packages("doBy") # ### ※ 문제16. 이름과 월급을 출력하는데 월급이 높은 사원부터 출력하시오 emp[order(emp$sal, decreasing = T),c('ename','sal')] # ### ※ 문제17. 직업이 SALESMAN인 사원들의 이름, 월급, 직업을 출력하는데 월급이 높은 사원부터 출력 rs=emp[emp$job=='SALESMAN',c('ename','sal','job')] rs[order(rs$sal,decreasing = T),] # R에서 모든 변수들을 다 보고싶을 때 ls() ls() # x, x2 변수를 지우고 싶다면? rm(x, x2) ls() # ### ※ 문제18. (점심시간 문제) 부서번호가 20번인 사원들의 이름과 월급과 부서번호를 출력하는데 월급이 높은 사원부터 출력 rs=emp[emp$deptno==20,c('ename','sal','deptno')] rs[order(rs$sal, decreasing = T),] # ### ※ 문제19. 위의 문제를 doBy 패키지를 이용해서 수행하시오 library(doBy) orderBy(~-sal,emp[emp$deptno==20,c('ename','sal')]) # 내림차순 orderBy(~sal,emp[emp$deptno==20,c('ename','sal')]) # 오름차순 # ### ※ 문제20. crime_loc.csv를 워킹 디렉토리에 내려받고 R로 로드해서 결과를 출력하는데 살인이 일어나는 장소와 건수를 출력하는데 건수가 높은 것 부터 출력하시오 c_loc=read.csv('crime_loc.csv',header=T) c_loc rs=c_loc[c_loc$범죄=='살인',] orderBy(~-건수,rs) # ### ※ 문제21. c_loc에서 범죄유형을 출력하는데 중복을 제거해서 출력하시오 data.frame('범죄'=unique(c_loc$범죄)) # ## ■ SQL과 R 함수 비교 # ### 함수 # #### 1. 문자함수 # #### 2. 숫자함수 # #### 3. 날짜함수 # #### 4. 변환함수 # #### 5. 일반함수 # #### ■ 문자함수 # ``` # SQL vs R # upper toupper # lower tolower # substr substr # replace gsub # ``` # ### ※ 문제22.이름과 직업을 출력하는데 소문자로 출력하시오 # cbind(tolower(emp$ename),tolower(emp$job)) data.frame(이름=tolower(emp$ename),직업=tolower(emp$job)) # ##### ■ substr 함수 # ### ※ 문제23. 이름의 두번째 철자가 M인 사원들의 이름과 월급을 출력하는데 substr을 이용해서 수행하시오 # ```sql # select ename, sal # from emp # where substr(ename,2,1)='M'; # ``` emp[substr(emp$ename,2,2)=='M',c('ename','sal')] # ### ※ 문제24. SMITH의 이름을 출력하는데 MIT만 출력하시오 print(substr(emp[emp$ename=='SMITH','ename'],2,4)) # ##### ■ gsub 함수 # gsub('h','H',text) # 특정 text에서 소문자 h를 대문자 H로 변경해라 # ### ※ 문제25. 이름과 월급을 출력하는데 월급을 출력할 때 숫자 0을 별표(*)로 출력하시오 # ```sql # select ename, replace(sal,0,'*') # from emp; # ``` data.frame(emp$ename,gsub(0,'*',emp$sal)) # ### ※ 문제26. 아래의 SQL을 R로 구현하시오 # ```sql # select ename, regexp_replace(sal,'[0-2]','*') # from emp; # ``` data.frame(이름=emp$ename, 월급=gsub('[0-2]','*',emp$sal)) # #### ■ 숫자함수 # ``` # SQL vs R # round round # trunc trunc # mod %% # power ^ # ``` # ### ※ 문제27. 6의 9승을 출력하시오 6^9 # ### ※ 문제28. 10을 3으로 나눈 나머지값을 출력하시오 print(10 %% 3) # ### ※ 문제29. 이름과 연봉을 출력하시오 orderBy(~-연봉, data.frame(이름=emp$ename, 연봉=emp$sal*12)) orderBy(~-연봉,data.frame(이름=emp$ename,연봉=round(emp$sal*12,-3))) # ### ※ 문제30. 백단위 자리를 포함해서 다 잘라내고 0으로 출력되게 하시오 orderBy(~-연봉, data.frame(이름=emp$ename, 연봉=trunc(emp$sal*12))) # ※ R에서 trunc 함수는 소숫점 이하만 버리고 정수부분은 버리지 않는다. round는 반올림이 어느 자리에서든 가능 # #### ■ 날짜함수 # ``` # Oracle vs R # sysdate Sys.date() # add_months() # months_between 사용자 정의 함수 # last_day # next_day # ``` # ### ※ 문제31. 오늘 날짜를 출력하시오 Sys.Date() # ### ※ 문제32. 이름, 입사한 날짜부터 오늘까지 총 몇일 근무했는지 출력하시오 data.frame(근무일=Sys.Date()-as.Date(emp$hiredate)) # ### ※ 문제33. 오늘날짜의 달의 마지막 날짜를 출력하시오 # ```sql # select last_day(sysdate) # from dual; # ``` library(lubridate) print(ceiling_date(Sys.Date(),'month')) print(floor_date(Sys.Date(),'month')) print(ceiling_date(Sys.Date(),'month')-1) # #### ※ 함수 생성 last_day=function(x){ ceiling_date(x,'months')-days(1) } print(last_day(Sys.Date())) # ### ※ 문제34. first_day 함수를 아래와 같이 생성하시오 first_day=function(x){ floor_date(x,'month') } print(first_day(Sys.Date())) # ### ※ 문제35. 오늘부터 100달뒤에 돌아오는 날짜를 출력하시오 # ```sql # select add_months(sysdate,100) # from dual; # ``` print(Sys.Date()+months(100)) print(Sys.Date()+days(100)) print(Sys.Date()+years(100)) # #### ■ 변환함수 # ``` # Oracle vs R # to_char as.character # to_number as.integer # to_date as.Date # as.Factor # ``` # #### ■ 날짜형 --> 문자형으로 변환 # ``` # format 함수 %Y, %m, %d, %A(요일) # ``` # ### ※ 문제36. 이름, 입사한 요일을 출력하시오 data.frame(이름=emp$ename, 요일=format(as.Date(emp$hiredate),"%A")) # ### ※ 문제37. 내가 무슨 요일에 태어났는지 출력하시오 print(format(as.Date('1987-02-18',format='%Y-%m-%d'),'%A')) # ### ※ 문제38. 이름, 입사한 연도(4자리)를 출력하시오 data.frame(이름=emp$ename,입사연도=format(as.Date(emp$hiredate),'%Y')) # ### ※ 문제39. 1981년도에 입사한 사원들의 이름, 입사일을 출력하시오 emp[format(as.Date(emp$hiredate),'%Y')=='1981',c('ename','hiredate')] # ### ※ 문제40. 수요일에 입사한 사원들의 이름, 입사일을 출력하시오 emp[format(as.Date(emp$hiredate),'%A')=='수요일',c('ename','hiredate')]
02. R basic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] deletable=false # # **Tutorial 08: Practical Examples of the OOP Paradigm** 👀 # + [markdown] deletable=false # Computer games are quite suitable to practice **object-oriented modelling** and **design**. Making simple games can demonstrate the effectiveness of **object orientation** to easily scale projects and build larger software components. In these special themed mini-projects, we will develop simple games using an **object-oriented approach**. # + [markdown] deletable=false editable=false # <br><br><a id='t1cls'></a> # ## ▙▂ **Part 1: Guess the Number Game ▂▂** # + [markdown] deletable=false # In the first tutorial, we will implement a simple guess-the-number game. This game was already part of the exercises for the Analysis 1 course, and here we will show one more way to implement it; this time using an object-oriented approach. # # The objective of the tutorial is to **practice design and implementation of simple game classes**. Also, you will **get familiar with a basic game loop**. # + [markdown] deletable=false editable=false # <a id='t4p1toc'></a> # #### Contents: #### # - [`GuessTheNumber` Class](#t4p1gtn) # - [Test and Experiment](#t4p1tegtn) # - [`AutoGame` Class](#t4p1agc) # - [Exercise](#t4p1agcex) # - [Game Loop](#t4p1gameloop) # - [Game Class](#t4p1gameclass) # - [Test and Experiment](#t4p1tegameclass) # - [GuessTheNumber Game](#t4p1guessthenumber) # - [Implement, Test and Experiment](#t4p1iteguessthenumber) # - [BotGame](#t4p1botgame) # # + [markdown] deletable=false # <a id='t4p1gtn'></a> # #### **▇▂ `GuessTheNumber` Class ▂▂** # We start with the basic class that should do the following: # - generate a random number in a given interval upon initialization, # - implement a method `guess()` that accepts an integer and prints "lower" or "higher" depending on the difference between the generated number and the one inputted; if the guess is correct, it should print so, # - `guess()` should also track the number of attempts, # - `guess()` should give an appropriate return value -1, 0 or 1 that can be interpreted by other functions. # # Other class methods are used to set / get values and as helper methods. # + [markdown] deletable=false editable=false # An UML class diagram for `GuessTheNumber` class can be found below. # + [markdown] deletable=false # <br>⚠ <b>NOTE</b><br> # >You will learn about the UML and Class diagram in Lesson 7. At the moment, it is enough to understand the structure of the class in the figure below.<br> # + [markdown] deletable=false editable=false # ![game1-guessthenumber1.png](attachment:game1-guessthenumber1.png) # + [markdown] deletable=false editable=false # Import `randint` from `random` module. # + deletable=false from random import randint # + [markdown] deletable=false editable=false # Implement `GuessTheNumber` class. # + deletable=false class GuessTheNumber: """ Backend component that on initialization uses random.randint() to generate a random number between min-max (1-100 default). Has a method guess() that accepts an integer and prints 'lower' or 'higher' If the guess is correct, it says so, and also tells the number of attempts. """ def __init__(self, min=1, max=100): """ initialize and set range """ self.max = max self.min = min self.reset() def reset(self): """ call reset to enable the game to be played again """ self.__number = randint(self.min,self.max) self.attempts = 0 self.guessed = False def get_attempts(self): return self.attempts def get_guessed(self): return self.guessed def debug_number(self): return "Generated number is {}.".format(self.__number) def guess(self, value): """ Call guess and provide a value. Increments attempts and prints the appropriate outcome. Also returns a value to be interpreted by other functions if needed (0 - guessed, 1 - guess higher, -1 - guess lower) """ self.attempts += 1 if value == self.__number: print ("You guessed the number!") self.guessed = True return 0 else: if value > self.__number: print ("Your guess is higher than the generated number") return 1 else: print ("Your guess is lower than the generated number") return -1 # + [markdown] deletable=false # <a id='t4p1tegtn'></a> # ##### **Test and Experiment** # Good programming practice suggests testing a class once implemented. Instantiate the `GuessTheNumber` class. # + deletable=false gtn = GuessTheNumber() # + [markdown] deletable=false # Change the input to guess-the-number. # + deletable=false gtn.guess(50) # + [markdown] deletable=false editable=false # Call `get_attempts()` to show how many guesses it took to find the number. # + deletable=false gtn.get_attempts() # + [markdown] deletable=false editable=false # Reset it and try again. # + deletable=false gtn.reset() # + deletable=false your_guess = # type your guess here gtn.guess(your_guess) # + [markdown] deletable=false # Play with different parameters (e.g. generate a number between 20 and 1000). Use the `debug_number()` method to determine which number was generated. # + deletable=false # + deletable=false # + [markdown] deletable=false editable=false # <br>[back to top ↥](#t4p1toc) # + [markdown] deletable=false # <a id='t4p1agc'></a> # #### **▇▂ `AutoGame` Class ▂▂** # Next, we want a class that can have the same functionality, plus iteratively guess the number (one try per call). We can reuse the `GuessTheNumber` class and extend it with attributes and methods we need. One way to design such a class would be like the one given in the UML class diagram below. # + [markdown] deletable=false editable=false # ![game1-autogame1.png](attachment:game1-autogame1.png) # + [markdown] deletable=false # <a id='t4p1agcex'></a> # ##### **Exercise** # Implement the `AutoGame` class. # The `auto_guess()` method should return 0 if number is guessed, 1 if the guess was higher than the generated number, and -1 if it was lower. It is up to you how you want to determine the `next_guess` value. # + [markdown] deletable=false editable=false # <br>⚠ <b>NOTE</b><br> # >it is perfectly normal and even desirable to extend the class design with helper attributes and methods.<br> # + deletable=false # class AutoGame # add what is missing # def reset(self): # ... implement ... # def auto_guess(self): # ... implement ... # ... add any other helper method if needed ... # + deletable=false # + [markdown] deletable=false editable=false # If you have implemented `AutoGame` as required, then the following code can be used to simulate one game run and test if everything is working as intended. # + deletable=false ag = AutoGame() while not ag.get_guessed(): print("This bot will try with number", ag.next_guess, "next.") ag.auto_guess() print() print("The number was guessed after", ag.get_attempts(), "attempts.") # + [markdown] deletable=false editable=false # We recommend that you try and implement `AutoGame` by yourself. If you still have difficulty, you can take a look at the design we opted for, as well as the code. Normally, helper attributes and methods are private or protected, but here, for clarity, we made everything public. # # Note: teachers may want to explain which methods are called when `AutoGame.__init__()` is triggered. This is an example of **polymorphism**, but it is not our focus at this point. # + [markdown] deletable=false editable=false # ![game1-autogame2.png](attachment:game1-autogame2.png) # + deletable=false class AutoGame(GuessTheNumber): """ A child of GuessTheNumber. Has auto_guess() to automatically guess the next possible value. """ def __init__(self): super().__init__() # not needed ( due to polymorphism __init__ of GuessTheNumber would call # AutoGame.reset() ), added for code clarity self.reset() def reset(self): """ call reset to enable the game to be played again """ super().reset() self.possible_max = self.max self.possible_min = self.min self.next_guess = int((self.possible_max + self.possible_min) / 2) def adjust(self, previous_outcome): """ adjust guess based on the previous outcome """ if previous_outcome != 0: if previous_outcome > 0: self.possible_max = self.next_guess - 1 else: self.possible_min = self.next_guess + 1 self.next_guess = int((self.possible_max + self.possible_min) / 2) def auto_guess(self): """ Call to to guess the number by trying the middle one of the available range. """ res = self.guess(self.next_guess) self.adjust(res) return res def status(self): """ For debugging purpose. Prints attributes' values. """ print ("Generated number is {}; next guess is {} and no. attempts is {}" .format(self._GuessTheNumber__number,self.next_guess,self.attempts)) print ("max: {} min: {}".format(self.possible_max, self.possible_min)) # + [markdown] deletable=false editable=false # <br>[back to top ↥](#t4p1toc) # + [markdown] deletable=false # <a id='t4p1gameloop'></a> # #### **▇▂ Game Loop ▂▂** # At the heart of every game is its **game loop**. Depending on the complexity of each game, game loops can contain different steps, but every game has three distinct tasks that must be handled: # - get (user) input, # - update the game state, # - display the game. # + [markdown] deletable=false editable=false # <img src="attachment:game%20loop%201.jpeg" width=500></img> # + [markdown] deletable=false editable=false # (Source: Game loop; taken from: [https://gameprogrammingpatterns.com/game-loop.html](https://gameprogrammingpatterns.com/game-loop.html) # + [markdown] deletable=false # You can also consider using an initialization step before entering into **game loop** and a clean-up step once it is exited. # + [markdown] deletable=false editable=false # <img src="attachment:game%20loop%202.png" width=600></img> # + [markdown] deletable=false editable=false # Source: Game loop; taken from: [http://nepos.games/nebuchadnezzar/blog/4](http://nepos.games/nebuchadnezzar/blog/4) # + [markdown] deletable=false editable=false # As every game will contain those steps, good design practice is to make a parent template class, that would contain all these **game loop** steps and run them accordingly. Then, we can ***inherit from such a class***, and ***extend*** and ***override only the methods needed***. # # Since our first games will writing to console, we will start the game loop with draw, following with user input and finally updating the game states. # + [markdown] deletable=false editable=false # <br>[back to top ↥](#t4p1toc) # + [markdown] deletable=false # <a id='t4p1gameclass'></a> # #### **▇▂ Game Class ▂▂** # At its core, the game class should be treated as an *abstract class*. But, since we want to test and run it (even though it does nothing special), we need to implement methods: `run()`, as well as `user_input()` and `update()` to allow us to terminate the game loop. # To prevent accidental changes, attribute `running` is made private and two public methods are used to send a message to change this flag: `start()` and `quit()`. All other methods representing one step of a game loop should be made abstract. # + [markdown] deletable=false editable=false # ![game1-gameclass1.png](attachment:game1-gameclass1.png) # + [markdown] deletable=false # <br>⚠ <b>NOTE</b><br> # >even though the UML class diagram above suggests that `Game` is an abstract class, and the methods `draw()`, `user_input()`, `update()`, `intro()`, `outro()` are depicted as abstract (i.e. no implementation given), in our implementation below, they are given as empty, meaning that the `Game` class can be instantiated and all of its methods can be called.<br> # + deletable=false class Game: """ Game is a base, template class, implementing a simple game loop. Create a child class and override the needed methods. """ def __init__(self): self.__running = True def get_state(self): return self.__running def start(self): """ Internal message to start the game. """ self.__running = True def quit(self): """ Internal message to end the game. """ self.__running = False def draw(self): pass def user_input(self): """ Expects input from the user and returns the result. Override.""" print("Enter Q or QUIT to terminate the game") res = input("Please enter your command:") return res def update(self, keys): """ Processes given input. By default quits on Q or QUIT. Override. """ if keys.upper() == "QUIT" or keys.upper() == "Q": self.__running = False def intro(self): """ The first method to be called (once) when the game starts. Override. """ pass def outro(self): """ The last method to be called (once) when the game ends. Override. """ pass def run(self): """ The main 'workhorse' method. Calls all other methods according to the game loop. """ self.intro() while self.__running: self.draw() usr = self.user_input() self.update(usr) self.outro() # + [markdown] deletable=false editable=false # <br>[back to top ↥](#t4p1toc) # + [markdown] deletable=false # <a id='t4p1tegameclass'></a> # ##### **Test and Experiment** # # Let's demonstrate the effectiveness of inheritance and overriding. # Assume you want to create a 'game' that does nothing except print a welcoming message for the user, and lets him / her type whatever he / she wants until `quit` or `q` is typed. # # Since all of this is implemented in the `Game` class, just create a child class and override `intro()` method. You are encouraged to override other methods as well and create something more exciting. # + [markdown] deletable=false editable=false # ![game1-helloworld1.png](attachment:game1-helloworld1.png) # + deletable=false # implement class HelloWorldGame # class HelloWorldGame # ... # def intro(self): # ... override ... # + deletable=false # + deletable=false # + [markdown] deletable=false editable=false # Check if it works. # + deletable=false game = HelloWorldGame() game.run() # + [markdown] deletable=false editable=false # ##### **Solution** # + deletable=false class HelloWorldGame(Game): def intro(self): print("Welcome to the Hello World game!") print("Type anything you want") # + [markdown] deletable=false editable=false # <br>[back to top ↥](#t4p1toc) # + [markdown] deletable=false editable=false # <a id='t4p1guessthenumber'></a> # #### **▇▂ GuessTheNumber game ▂▂** # The time has come to combine `Game` class and `GuessTheNumber` class to make a game with decent user interface and limited number of guesses. Use `Game` as the template, and override the 'abstract' methods. In theory, this can be solved as multiple inheritance, but that would complicate the structure needlessly, so it is better to use aggregation, making `GuessTheNumber` class an attribute of to-be-created `GuessTheNumberGame` class. # # The recommended design is given below via UML class diagram. # + [markdown] deletable=false editable=false # ![game1-alltogether1.png](attachment:game1-alltogether1.png) # + [markdown] deletable=false editable=false # <a id='t4p1iteguessthenumber'></a> # ##### **Implement, Test and Experiment** # # Implement `GuessTheNumberGame` class. # + deletable=false # Your implementation goes here ... # + deletable=false # + deletable=false # + [markdown] deletable=false editable=false # Test your game by running it. # + deletable=false g = GuessTheNumberGame() g.run() # + [markdown] deletable=false editable=false # <br>[back to top ↥](#t4p1toc) # + [markdown] deletable=false editable=false # ##### **Solution** # + [markdown] deletable=false editable=false # Implementing `GuessTheNumberGame` class is a crucial step which shows if you are able to read and implement given designs, as well as if you understand object-oriented relations such as inheritance and aggregation, including method overriding. Therefore, it is strongly recommended not to look at the solution, but make extra effort to implement it on your own. Once done, you can compare your solution with the one we had in mind, and note different programming styles. # + deletable=false class GuessTheNumberGame(Game): def __init__(self): super().__init__() self.game_logic = GuessTheNumber() self.max_attempts = 5 def intro(self): print("Welcome to Guess the number game!") print("The computer will generate a number in range 1-100.") print("You have 5 attempts to find the target number.") print() self.game_logic.reset() self.start() def user_input(self): res = int(input("Enter your number:")) return res def draw(self): print() print("Try to guess the number.") print("You have {} attempts left".format(self.max_attempts - self.game_logic.get_attempts())) def update(self, keys): res = self.game_logic.guess(keys) if res == 0 or self.game_logic.get_attempts() == self.max_attempts: self.quit() def outro(self): print() if self.game_logic.get_guessed(): print("Congratulations! You have guessed the number!") else: print("Game over! Please try again.") # + [markdown] deletable=false editable=false # <br>[back to top ↥](#t4p1toc) # + [markdown] deletable=false # <a id='t4p1botgame'></a> # #### **▇▂ BotGame - Extra Practice ▂▂** # You can go further and use the available classes to make a bot game - let the computer generate a number and then try to guess it in a given number of attempts. The easier option is to follow the same approach as in the `GuessTheNumberGame` class above. But, for extra practice, try to inherit from the `GuessTheNumberGame` class and only override the methods that need to be changed. # + deletable=false # Your implementation goes here ... # class BotGame # ... # + deletable=false # + deletable=false # + [markdown] deletable=false editable=false # You can test the game in the same manner like done previously. # + deletable=false g2 = BotGame() g2.run() # + [markdown] deletable=false editable=false # ##### **Solution** # + deletable=false class BotGame(GuessTheNumberGame): def __init__(self): # # ! We don't want to call father's init, but we need grandfather's init super(GuessTheNumberGame, self).__init__() self.game_logic = AutoGame() self.max_attempts = 5 def user_input(self): bot_guess = self.game_logic.next_guess print("I will guess: {}".format(bot_guess)) return bot_guess def update(self, keys): res = self.game_logic.guess(keys) self.game_logic.adjust(res) if res == 0 or self.game_logic.get_attempts() == self.max_attempts: self.quit() # + deletable=false # + [markdown] deletable=false editable=false # <br>[back to top ↥](#t4p1toc)
ipynb/T08-01-Practical-Examples-OOP-Part-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!L backend = 'None' # 'YC', 'Colab' if backend == 'Colab': # !pip install lpips # !git clone https://github.com/yandexdataschool/Practical_DL.git # !sudo apt install -y ninja-build # %cd /content/Practical_DL/seminar07-gen_models_2 # !wget https://www.dropbox.com/s/2kpsomtla61gjrn/pretrained.tar # !tar -xvf pretrained.tar elif backend == 'YC': # Yandex Cloud (temporary unavailable) # %wget https://www.dropbox.com/s/2kpsomtla61gjrn/pretrained.tar # %tar -xvf pretrained.tar # + # !wget https://www.dropbox.com/s/2kpsomtla61gjrn/pretrained.tar # !tar -xvf pretrained.tar import sys sys.path.append('/content/Practical_DL/seminar08-gen_models_3') # + # #!L import torch from torch import nn from torch.nn import functional as F import numpy as np from matplotlib import pyplot as plt print (torch.cuda.device_count()) print (torch.__version__) import torchvision from torchvision.utils import make_grid from torchvision.transforms import ToPILImage from tqdm.auto import tqdm, trange from PIL import Image from gans.gan_load import make_stylegan2 def to_image(tensor, adaptive=False): if len(tensor.shape) == 4: tensor = tensor[0] if adaptive: tensor = (tensor - tensor.min()) / (tensor.max() - tensor.min()) else: tensor = ((tensor + 1) / 2).clamp(0, 1) return ToPILImage()((255 * tensor.cpu().detach()).to(torch.uint8)) def to_image_grid(tensor, adaptive=False, **kwargs): return to_image(make_grid(tensor, **kwargs), adaptive) # - # !cd /content/Practical_DL # + # #!L G = make_stylegan2(resolution=1024, weights='pretrained/stylegan2-ffhq-config-f.pt', target_key='g').eval() with torch.no_grad(): z = torch.randn([4, 512]).cuda() imgs = G(z) plt.figure(dpi=200) plt.axis('off') plt.imshow(to_image_grid(imgs, nrow=6)) # - # # Naive inversions # + # download image import requests from io import BytesIO from torchvision import transforms zoom = 1. def portrait_crop(img, h_percent, w_percent): w, h = img.size w_offset = int(0.5 * (1 - w_percent) * w) return img.crop([w_offset, 0, w - w_offset, int(h_percent * h)]) def load_image(img_url, zoom=1.0, w=1.0, h=1.0): crop = lambda x: portrait_crop(x, w, h) normalization = transforms.Compose([ crop, transforms.Resize(int(zoom * 1024)), transforms.Resize(int(zoom * 1024)), transforms.CenterCrop(1024), transforms.ToTensor(), lambda x: 2 * x - 1, ]) img_data = requests.get(img_url).content img = Image.open(BytesIO(img_data)) return normalization(img).unsqueeze(0).cuda() imgs = [] imgs.append(load_image('https://fotorelax.ru/wp-content/uploads/2015/08/Daniel-Jacob-Radcliffe_6.jpg')) imgs.append(load_image('https://www.kinogallery.com/pimages/742/kinogallery.com-742-520627.jpg', h=0.8)) img = imgs[0] plt.figure(dpi=200) plt.axis('off') plt.imshow(to_image(img)) # + import lpips lpips_model = lpips.LPIPS() lpips_model.cuda().eval() lpips_dist = lambda x, y: lpips_model( F.interpolate(x, 256, mode='bilinear'), F.interpolate(y, 256, mode='bilinear')) # CelebA regressor features extractor # same as at Seminar 7 face_fe = torchvision.models.resnet18() face_fe.fc = nn.Sequential(nn.ReLU(), nn.Linear(512, 512), nn.ReLU()) state_dict = torch.load('pretrained/regressor.pth')['model_state_dict'] state_dict = {name[len('backbone.'):]: val for name, val in state_dict.items() if name.startswith('backbone.')} face_fe.load_state_dict(state_dict) face_fe.cuda().eval(); # + def invert(img, G, latent_init, n_steps=500, lr=0.025, l2_loss_scale=0.1, lpips_loss_scale=1.0, id_loss_scale=1.0, latent_map=lambda x: x, **g_kwargs): latent = nn.Parameter(latent_init.cuda()) opt = torch.optim.Adam([latent,], lr=lr) l2_losses = [] perceptual_losses = [] id_losses = [] losses = [] for i in trange(n_steps): opt.zero_grad() reconstruction = G(latent_map(latent), **g_kwargs) l2_loss, perceptual_loss, id_loss = [torch.zeros([])] * 3 if l2_loss_scale > 0.0: l2_loss = F.mse_loss(img, reconstruction).mean() if lpips_loss_scale > 0.0: perceptual_loss = lpips_dist(img, reconstruction).mean() if id_loss_scale > 0.0: id_loss = F.mse_loss(face_fe(img), face_fe(reconstruction)).mean() loss = l2_loss_scale * l2_loss + lpips_loss_scale * perceptual_loss + id_loss_scale * id_loss loss.backward() l2_losses.append(l2_loss.item()) perceptual_losses.append(perceptual_loss.item()) id_losses.append(id_loss.item()) losses.append(loss.item()) opt.step() if i % 100 == 0: print(f'{i}: loss: {np.mean(losses[-100:]): 0.2f}; ' f'l2-loss: {np.mean(l2_losses[-100:]): 0.2f}; ' f'lpips loss: {np.mean(perceptual_losses[-100:]): 0.2f}; ' f'id-loss: {np.mean(id_losses[-100:]): 0.2f}') return reconstruction, latent, losses def show_inversion_result(img, reconstruction, losses=None): _, axs = plt.subplots(1, 3, dpi=250) for ax in axs[:2]: ax.axis('off') axs[0].imshow(to_image_grid(img)) axs[1].imshow(to_image_grid(reconstruction)) if losses is not None: axs[2].set_aspect(1.0 / np.max(losses) * len(losses)) axs[2].set_title('Loss') axs[2].plot(losses) # - rec, z, losses = invert(img, G, torch.randn([1, G.dim_z]), n_steps=100) show_inversion_result(img, rec, losses) w_mean = G.style_gan2.mean_latent(64) rec, w, losses = invert(img, G, w_mean, w_space=True, n_steps=100) show_inversion_result(img, rec, losses) w_mean = G.style_gan2.mean_latent(64) rec, w_plus, losses = invert(img, G, w_mean.unsqueeze(1).repeat(1, 18, 1), n_steps=100, latent_map=lambda w_plus: [w_plus], w_space=True) show_inversion_result(img, rec, losses) # # Pix2Style2Pix # cc: https://github.com/eladrich/pixel2style2pixel # !git clone https://github.com/eladrich/pixel2style2pixel # !touch pixel2style2pixel/__init__.py # + import sys sys.path.append('pixel2style2pixel') from models.encoders.psp_encoders import GradualStyleEncoder from argparse import Namespace encoder_chkpt = torch.load('pretrained/psp_ffhq_encode.pt') encoder = GradualStyleEncoder(50, 'ir_se', opts=Namespace(**encoder_chkpt['opts'])) encoder_state = {name[len('encoder.'):]: val for name, val in encoder_chkpt['state_dict'].items() \ if name.startswith('encoder')} encoder.load_state_dict(encoder_state) encoder.cuda().eval(); latent_mean = encoder_chkpt['latent_avg'].cuda() # - with torch.no_grad(): w_inversion = encoder(F.interpolate(img, 256, mode='bilinear')) + latent_mean[None] rec = G([w_inversion], w_space=True) show_inversion_result(img, rec) # ### pix2style2pix with optimization rec, w_plus, losses = invert(img, G, w_inversion, n_steps=100, lr=0.005, latent_map=lambda w_plus: [w_plus], w_space=True) show_inversion_result(img, rec, losses) # # Style Mix # + with torch.no_grad(): w_target = G.style_gan2.style(torch.randn([1, G.dim_z], device='cuda')).unsqueeze(1).repeat(1, 18, 1) w_source = G.style_gan2.style(torch.randn([1, G.dim_z], device='cuda')) target = G(w_target, w_space=True) source = G(w_source, w_space=True) plt.axis('off') plt.imshow(to_image_grid(torch.cat([target, source]))) # - styled_rec = [] for i in range(18): w_styled = w_target.clone() w_styled[:, :i] = w_source styled_rec.append(G([w_styled], w_space=True).cpu().detach()) plt.figure(dpi=250) plt.axis('off') plt.imshow(to_image_grid(torch.cat(styled_rec)))
seminar08-gen_models_3/inversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Data Preprocessing # + import numpy as np import pandas as pd import sys import sklearn.neighbors._base sys.modules['sklearn.neighbors.base'] = sklearn.neighbors._base from missingpy import MissForest from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, LabelEncoder import pycountry_convert as pc import pycountry from difflib import SequenceMatcher import warnings warnings.filterwarnings('ignore') # - # ## Load Data file df = pd.read_csv(r'data/Train.csv') df.shape # ### Null Values df.isnull().sum()/df.shape[0] # ## Imputing Missing Values # # The following features need imputing for its missing values - # 1. travel_with | Categorical | 1114 | 23% # 2. most_impressing | Categorical | 313 | 6.5% # 3. total_male | Numerical | 5 | <0% # 4. total_female | Numerical | 3 | <0% # # Categorical variables : # Imputing all the categorical fields using MissForest Imputer from missingpy library since more than 20% of the data is missing for the feature 'travel_with'. For using MissForest Imputer we need to first encode categorical data for the algorithm to function. Hence we will manually encode the categorical variables for ease of inversion to categorical data type. # # Numerical variables : # The variables total male and total female have a few null values but for a few rows both variables show 0 number of tourists attending the tour. These ghost numbers will be replaced with the mean for computing purposes. # + # Dictionary for encoding the categorical features # Most Impressing Feature mi = { 'Friendly People' : 0, 'Wonderful Country, Landscape, Nature': 1, 'Excellent Experience': 2, 'No comments': 3, ' Wildlife': 4, 'Good service': 5, 'Satisfies and Hope Come Back': 6 } # Travel With feature trw = { 'Friends/Relatives': 0, 'Alone': 1, 'Spouse': 2, 'Children': 3, 'Spouse and Children': 4 } # + def find_value(x, dictionary): """Find the value from dictionary for a key""" for k, v in dictionary.items(): if x == k: return int(v) def find_key(x, dictionary): """Find the key from dictionary for a value""" for k, v in dictionary.items(): if x == v: return k # + # Most Impressing df['most_impressing'] = df['most_impressing'].apply(lambda x : find_value(x, mi)) # Travel With df['travel_with'] = df['travel_with'].apply(lambda x : find_value(x, trw)) ## Ghost Tourists cond1 = df['total_male']==0 # Condition: Male Tourist is 0 cond2 = df['total_female']==0 # Condition: Female Tourist is 0 male_avg = round(df['total_male'].mean()) # Average Make tourists in the data female_avg = round(df['total_female'].mean()) # Average Female tourists in the data # Total Male df.loc[cond1 & cond2, 'total_male'] = male_avg # Total Female df.loc[cond1 & cond2, 'total_female'] = female_avg # + # Imputer imputer = MissForest(random_state=7) X = df.drop(['ID'], axis=1) cat_cols = X.select_dtypes(include='object').columns # Encode categorical data le = LabelEncoder() for i in cat_cols: X[i] = le.fit_transform(X[i]) # Impute Missing Values in 'travel_with' feature X_imputed = imputer.fit_transform(X) # - X_clean = pd.DataFrame(X_imputed, columns=X.columns) # + # Most Impressing df['most_impressing'] = X_clean['most_impressing'].apply(lambda x : find_key(round(x), mi)) # Travel With df['travel_with'] = X_clean['travel_with'].apply(lambda x : find_key(round(x), trw)) # Male Tourists df['total_male'].fillna(male_avg, inplace=True) # Female Tourists df['total_female'].fillna(female_avg, inplace=True) # - df['travel_with'].value_counts() # ## Country Name Anomalies # # The following country name inputs were entered incorrectly. We will have to correct it for the data analysis # # 1. SWIZERLAND : SWITZERLAND # 2. MALT : MALTA # 3. UKRAIN : UKRAINE # 4. BURGARIA : BULGARIA # 5. TRINIDAD TOBACCO : TRINIDAD AND TOBAGO # 6. COMORO : COMOROS # 7. PHILIPINES : PHILIPPINES # 8. DJIBOUT : DJIBOUTI # 9. MORROCO : MOROCCO # 10. SCOTLAND : UNITED KINGDOM # # Since we need to match the country names provided in the pycountry library for Geospatial analysis we convert them using a function. df['country'] = df['country'].str.title() [{country.alpha_2: country.name} for country in sorted(pycountry.countries, key=lambda x: x.name)] # + # Corrected country names c_name = { 'Swizerland': 'Switzerland', 'Malt': 'Malta', 'Ukrain': 'Ukraine', 'Burgaria': 'Bulgaria', 'Trinidad Tobacco': 'Trinidad and Tobago', 'Comoro': 'Comoros', 'Philipines': 'Philippines', 'Djibout': 'Djibouti', 'Morroco': 'Morocco', 'Scotland': 'United Kingdom', 'United States Of America': 'United States', 'Costarica': 'Costa Rica', 'Uae': 'United Arab Emirates', 'Drc': 'Congo', 'Korea': 'Korea, Republic of', 'Russia': 'Russian Federation', 'Iran': 'Iran', 'Czech Republic': 'Czechia' } # Applying correction on the function df['country'] = df['country'].apply(lambda x: c_name.get(x, x)) # - df['country'].unique() # ### Output df.to_csv(r'data/clean_train.csv', index=False)
Tanzania Tourism Prediction/Preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ejercicio Precipitacion # # Para el ejercicio vamos a ocupar la base de datos de precipitacion de la RNEAA la cual incluye los siguientes campos: # # * index : variable entera, indice del valor # * name : variable texto, nombre de la estacion # * number : variable entera: numero de la estacion # * lat : variable flotante : latitud # * long : variable flotante : longitud # * year : variable entera : anio # * month : variable entera : mes # * day : variable entera : dia # * rain : variable flotante : precipitacion # # Esta base de datos se encuentra en la carpeta **`data`** con el nombre de **`data_course_prec.csv`** # # ** Nota: Si ejecutas alguna celda de codigo el resultado puede desaparecer** # ** Importar numpy y pandas como np y pd ** import numpy as np import pandas as pd # ** Leer el archivo csv en un dataframe de nombre data ** data = pd.read_csv('../../data/data_course_prec.csv') # ** Checar los 5 primeros registros ** data.head() # ** Checar las columnas del dataframe ** data.columns # ## Preguntas basicas # **Numero total de estaciones en la base de datos?** data['name'].nunique() # **Cuales son los 5 anios con mas datos disponibles?** data['year'].value_counts().head(5) # ** Precipitacion acumulada en la base de datos?** data['rain'].sum() # ** Precipitacion acumulada por anios?** data.groupby('year').sum()['rain'] # ** Numero de dias con lluvia en la base de datos? ** len(data.loc[data['rain']>0]) # ** Numero de dias sin lluvia en la base de datos? ** len(data.loc[data['rain'] == 0]) # ** Numero de dias con valores nulos en la base de datos? ** # NaN len(data.loc[data['rain'].isnull()]) # ** Numero de registros en la base de datos?** data['name'].count() # ### Bonus # ** Los 5 registros con la acumulacion mas alta (anio-mes)** dataTemp = data[['year','month','rain']] dataTemp = dataTemp.dropna() group = dataTemp.groupby(['year','month']) group.sum().sort_values(by='rain')[-5:]
ejercicios/3_Pandas/Ejercicio_Precipitacion_Solucion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/yahyanh21/Machine-Learning-Homework/blob/main/Week9_Dropout.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="1z6oy-4HOcEk" outputId="84e0e608-40b4-4439-84ab-0d438ad5c465" # !pip install d2l # + colab={"base_uri": "https://localhost:8080/", "height": 471} id="U1R0Tot8RywO" outputId="ae4d5877-22dc-4905-be5c-4a7b669c3dd1" pip install matplotlib==3.0.2 # + id="QggI01jLOzue" import torch from torch import nn from d2l import torch as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 # In this case, all elements are dropped out if dropout == 1: return torch.zeros_like(X) # In this case, all elements are kept if dropout == 0: return X mask = (torch.rand(X.shape) > dropout).float() return mask * X / (1.0 - dropout) # + colab={"base_uri": "https://localhost:8080/"} id="QFY_REUyO0_B" outputId="4ea80b3e-bbaa-493b-af6a-d40acdabecab" X= torch.arange(16, dtype = torch.float32).reshape((2, 8)) print(X) print(dropout_layer(X, 0.)) print(dropout_layer(X, 0.5)) print(dropout_layer(X, 1.)) # + id="VIQX3jcHO28I" num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256 # + id="uBNjFZjnPOMk" dropout1, dropout2 = 0.2, 0.5 class Net(nn.Module): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) # Use dropout only when training the model if self.training == True: # Add a dropout layer after the first fully connected layer H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: # Add a dropout layer after the second fully connected layer H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return out net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2) # + colab={"base_uri": "https://localhost:8080/", "height": 262} id="unNiA4ZtPPh5" outputId="73f2fc34-712a-4b0f-d17f-523fb4ce0618" num_epochs, lr, batch_size = 10, 0.5, 256 loss = nn.CrossEntropyLoss(reduction='none') train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) # + id="YlEAJfOoPP-G" net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), # Add a dropout layer after the first fully connected layer nn.Dropout(dropout1), nn.Linear(256, 256), nn.ReLU(), # Add a dropout layer after the second fully connected layer nn.Dropout(dropout2), nn.Linear(256, 10)) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01) net.apply(init_weights); # + colab={"base_uri": "https://localhost:8080/", "height": 262} id="anUH0x8rPSA8" outputId="70cd6368-22ea-4de5-d69f-b37d8b64beb4" trainer = torch.optim.SGD(net.parameters(), lr=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) # + id="3VYOJs8mPTXW"
Week9_Dropout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Scaling up ML using Cloud AI Platform # # In this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud AI Platform. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates *how* to package up a TensorFlow model to run it within Cloud AI Platform. # # Later in the course, we will look at ways to make a more effective machine learning model. # ## Environment variables for project and bucket # # Note that: # <ol> # <li> Your project id is the *unique* string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: <b>Project ID:</b> cloud-training-demos </li> # <li> Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket. </li> # </ol> # <b>Change the cell below</b> to reflect your Project ID and bucket name. # import os PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 # For Python Code # Model Info MODEL_NAME = 'taxifare' # Model Version MODEL_VERSION = 'v1' # Training Directory name TRAINING_DIR = 'taxi_trained' # For Bash Code os.environ['PROJECT'] = PROJECT os.environ['BUCKET'] = BUCKET os.environ['REGION'] = REGION os.environ['MODEL_NAME'] = MODEL_NAME os.environ['MODEL_VERSION'] = MODEL_VERSION os.environ['TRAINING_DIR'] = TRAINING_DIR os.environ['TFVERSION'] = '1.14' # Tensorflow version # + language="bash" # gcloud config set project $PROJECT # gcloud config set compute/region $REGION # - # ### Create the bucket to store model and training data for deploying to Google Cloud Machine Learning Engine Component # + language="bash" # # The bucket needs to exist for the gsutil commands in next cell to work # gsutil mb -p ${PROJECT} gs://${BUCKET} # - # ### Enable the Cloud Machine Learning Engine API # The next command works with Cloud AI Platform API. In order for the command to work, you must enable the API using the Cloud Console UI. Use this [link.](https://console.cloud.google.com/project/_/apis/library) Then search the API list for Cloud Machine Learning and enable the API before executing the next cell. # Allow the Cloud AI Platform service account to read/write to the bucket containing training data. # + language="bash" # # This command will fail if the Cloud Machine Learning Engine API is not enabled using the link above. # echo "Getting the service account email associated with the Cloud AI Platform API" # # AUTH_TOKEN=$(gcloud auth print-access-token) # SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \ # -H "Authorization: Bearer $AUTH_TOKEN" \ # https://ml.googleapis.com/v1/projects/${PROJECT}:getConfig \ # | python -c "import json; import sys; response = json.load(sys.stdin); \ # print (response['serviceAccount'])") # If this command fails, the Cloud Machine Learning Engine API has not been enabled above. # # echo "Authorizing the Cloud AI Platform account $SVC_ACCOUNT to access files in $BUCKET" # gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET # gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored. # gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET # - # ## Packaging up the code # # Take your code and put into a standard Python package structure. <a href="taxifare/trainer/model.py">model.py</a> and <a href="taxifare/trainer/task.py">task.py</a> containing the Tensorflow code from earlier (explore the <a href="taxifare/trainer/">directory structure</a>). # + language="bash" # find ${MODEL_NAME} # + language="bash" # cat ${MODEL_NAME}/trainer/model.py # - # ## Find absolute paths to your data # Note the absolute paths below. # + language="bash" # echo "Working Directory: ${PWD}" # echo "Head of taxi-train.csv" # head -1 $PWD/taxi-train.csv # echo "Head of taxi-valid.csv" # head -1 $PWD/taxi-valid.csv # - # ## Running the Python module from the command-line # #### Clean model training dir/output dir # + language="bash" # # This is so that the trained model is started fresh each time. However, this needs to be done before # # tensorboard is started # rm -rf $PWD/${TRAINING_DIR} # - # #### Monitor using Tensorboard # + language="bash" # # Setup python so it sees the task module which controls the model.py # export PYTHONPATH=${PYTHONPATH}:${PWD}/${MODEL_NAME} # # Currently set for python 2. To run with python 3 # # 1. Replace 'python' with 'python3' in the following command # # 2. Edit trainer/task.py to reflect proper module import method # python -m trainer.task \ # --train_data_paths="${PWD}/taxi-train*" \ # --eval_data_paths=${PWD}/taxi-valid.csv \ # --output_dir=${PWD}/${TRAINING_DIR} \ # --train_steps=1000 --job-dir=./tmp # + language="bash" # ls $PWD/${TRAINING_DIR}/export/exporter/ # - # %%writefile ./test.json {"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2} # + language="bash" # # This model dir is the model exported after training and is used for prediction # # # model_dir=$(ls ${PWD}/${TRAINING_DIR}/export/exporter | tail -1) # # predict using the trained model # gcloud ai-platform local predict \ # --model-dir=${PWD}/${TRAINING_DIR}/export/exporter/${model_dir} \ # --json-instances=./test.json # - # #### Clean model training dir/output dir # + language="bash" # # This is so that the trained model is started fresh each time. However, this needs to be done before # # tensorboard is started # rm -rf $PWD/${TRAINING_DIR} # - # ## Running locally using gcloud # + hiddenCell=true language="bash" # # Use Cloud Machine Learning Engine to train the model in local file system # gcloud ai-platform local train \ # --module-name=trainer.task \ # --package-path=${PWD}/${MODEL_NAME}/trainer \ # -- \ # --train_data_paths=${PWD}/taxi-train.csv \ # --eval_data_paths=${PWD}/taxi-valid.csv \ # --train_steps=1000 \ # --output_dir=${PWD}/${TRAINING_DIR} # - # Use TensorBoard to examine results. When I ran it (due to random seeds, your results will be different), the ```average_loss``` (Mean Squared Error) on the evaluation dataset was 187, meaning that the RMSE was around 13. # + language="bash" # ls $PWD/${TRAINING_DIR} # - # ## Submit training job using gcloud # # First copy the training data to the cloud. Then, launch a training job. # # After you submit the job, go to the cloud console (http://console.cloud.google.com) and select <b>AI Platform | Jobs</b> to monitor progress. # # <b>Note:</b> Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job. # + language="bash" # # Clear Cloud Storage bucket and copy the CSV files to Cloud Storage bucket # echo $BUCKET # gsutil -m rm -rf gs://${BUCKET}/${MODEL_NAME}/smallinput/ # gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/${MODEL_NAME}/smallinput/ # + language="bash" # OUTDIR=gs://${BUCKET}/${MODEL_NAME}/smallinput/${TRAINING_DIR} # JOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S) # echo $OUTDIR $REGION $JOBNAME # # Clear the Cloud Storage Bucket used for the training job # gsutil -m rm -rf $OUTDIR # gcloud ai-platform jobs submit training $JOBNAME \ # --region=$REGION \ # --module-name=trainer.task \ # --package-path=${PWD}/${MODEL_NAME}/trainer \ # --job-dir=$OUTDIR \ # --staging-bucket=gs://$BUCKET \ # --scale-tier=BASIC \ # --runtime-version=$TFVERSION \ # -- \ # --train_data_paths="gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-train*" \ # --eval_data_paths="gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-valid*" \ # --output_dir=$OUTDIR \ # --train_steps=10000 # - # Don't be concerned if the notebook appears stalled (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. # # <b>Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.</b> # ## Deploy model # # Find out the actual name of the subdirectory where the model is stored and use it to deploy the model. Deploying model will take up to <b>5 minutes</b>. # + language="bash" # gsutil ls gs://${BUCKET}/${MODEL_NAME}/smallinput/${TRAINING_DIR}/export/exporter # - # #### Deploy model : step 1 - remove version info # Before an existing cloud model can be removed, it must have any version info removed. If an existing model does not exist, this command will generate an error but that is ok. # + language="bash" # MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/${MODEL_NAME}/smallinput/${TRAINING_DIR}/export/exporter | tail -1) # # echo "MODEL_LOCATION = ${MODEL_LOCATION}" # # gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME} # - # #### Deploy model: step 2 - remove existing model # Now that the version info is removed from an existing model, the actual model can be removed. If an existing model is not deployed, this command will generate an error but that is ok. It just means the model with the given name is not deployed. # + language="bash" # gcloud ai-platform models delete ${MODEL_NAME} # - # #### Deploy model: step 3 - deploy new model # + language="bash" # gcloud ai-platform models create ${MODEL_NAME} --regions $REGION # - # #### Deploy model: step 4 - add version info to the new model # + language="bash" # MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/${MODEL_NAME}/smallinput/${TRAINING_DIR}/export/exporter | tail -1) # # echo "MODEL_LOCATION = ${MODEL_LOCATION}" # # gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION # - # ## Prediction # + language="bash" # gcloud ai-platform predict --model=${MODEL_NAME} --version=${MODEL_VERSION} --json-instances=./test.json # + from googleapiclient import discovery from oauth2client.client import GoogleCredentials import json credentials = GoogleCredentials.get_application_default() api = discovery.build('ml', 'v1', credentials=credentials, discoveryServiceUrl='https://storage.googleapis.com/cloud-ml/discovery/ml_v1_discovery.json') request_data = {'instances': [ { 'pickuplon': -73.885262, 'pickuplat': 40.773008, 'dropofflon': -73.987232, 'dropofflat': 40.732403, 'passengers': 2, } ] } parent = 'projects/%s/models/%s/versions/%s' % (PROJECT, MODEL_NAME, MODEL_VERSION) response = api.projects().predict(body=request_data, name=parent).execute() print ("response={0}".format(response)) # - # ## Train on larger dataset # # I have already followed the steps below and the files are already available. <b> You don't need to do the steps in this comment. </b> In the next chapter (on feature engineering), we will avoid all this manual processing by using Cloud Dataflow. # # Go to http://bigquery.cloud.google.com/ and type the query: # <pre> # SELECT # (tolls_amount + fare_amount) AS fare_amount, # pickup_longitude AS pickuplon, # pickup_latitude AS pickuplat, # dropoff_longitude AS dropofflon, # dropoff_latitude AS dropofflat, # passenger_count*1.0 AS passengers, # 'nokeyindata' AS key # FROM # [nyc-tlc:yellow.trips] # WHERE # trip_distance > 0 # AND fare_amount >= 2.5 # AND pickup_longitude > -78 # AND pickup_longitude < -70 # AND dropoff_longitude > -78 # AND dropoff_longitude < -70 # AND pickup_latitude > 37 # AND pickup_latitude < 45 # AND dropoff_latitude > 37 # AND dropoff_latitude < 45 # AND passenger_count > 0 # AND ABS(HASH(pickup_datetime)) % 1000 == 1 # </pre> # # Note that this is now 1,000,000 rows (i.e. 100x the original dataset). Export this to CSV using the following steps (Note that <b>I have already done this and made the resulting GCS data publicly available</b>, so you don't need to do it.): # <ol> # <li> Click on the "Save As Table" button and note down the name of the dataset and table. # <li> On the BigQuery console, find the newly exported table in the left-hand-side menu, and click on the name. # <li> Click on "Export Table" # <li> Supply your bucket name and give it the name train.csv (for example: gs://cloud-training-demos-ml/taxifare/ch3/train.csv). Note down what this is. Wait for the job to finish (look at the "Job History" on the left-hand-side menu) # <li> In the query above, change the final "== 1" to "== 2" and export this to Cloud Storage as valid.csv (e.g. gs://cloud-training-demos-ml/taxifare/ch3/valid.csv) # <li> Download the two files, remove the header line and upload it back to GCS. # </ol> # # <p/> # <p/> # # ## Run Cloud training on 1-million row dataset # # This took 60 minutes and uses as input 1-million rows. The model is exactly the same as above. The only changes are to the input (to use the larger dataset) and to the Cloud MLE tier (to use STANDARD_1 instead of BASIC -- STANDARD_1 is approximately 10x more powerful than BASIC). At the end of the training the loss was 32, but the RMSE (calculated on the validation dataset) was stubbornly at 9.03. So, simply adding more data doesn't help. # + language="bash" # # XXXXX this takes 60 minutes. if you are sure you want to run it, then remove this line. # # OUTDIR=gs://${BUCKET}/${MODEL_NAME}/${TRAINING_DIR} # JOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S) # CRS_BUCKET=cloud-training-demos # use the already exported data # echo $OUTDIR $REGION $JOBNAME # gsutil -m rm -rf $OUTDIR # gcloud ai-platform jobs submit training $JOBNAME \ # --region=$REGION \ # --module-name=trainer.task \ # --package-path=${PWD}/${MODEL_NAME}/trainer \ # --job-dir=$OUTDIR \ # --staging-bucket=gs://$BUCKET \ # --scale-tier=STANDARD_1 \ # --runtime-version=$TFVERSION \ # -- \ # --train_data_paths="gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/train.csv" \ # --eval_data_paths="gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/valid.csv" \ # --output_dir=$OUTDIR \ # --train_steps=100000 # - # ## Challenge Exercise # # Modify your solution to the challenge exercise in d_trainandevaluate.ipynb appropriately. Make sure that you implement training and deployment. Increase the size of your dataset by 10x since you are running on the cloud. Does your accuracy improve? # ### Clean-up # #### Delete Model : step 1 - remove version info # Before an existing cloud model can be removed, it must have any version info removed. # + language="bash" # gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME} # - # #### Delete model: step 2 - remove existing model # Now that the version info is removed from an existing model, the actual model can be removed. # + language="bash" # gcloud ai-platform models delete ${MODEL_NAME} # - # Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
courses/machine_learning/deepdive/03_tensorflow/e_ai_platform.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Why You Should Hedge Beta and Sector Exposures (Part II) # by <NAME> and <NAME> # # Part of the Quantopian Lecture Series: # # * [www.quantopian.com/lectures](https://www.quantopian.com/lectures) # * [github.com/quantopian/research_public](https://github.com/quantopian/research_public) # # # --- # # In the first lecture on [Why You Should Hedge Beta and Sector Exposure](quantopian.com/lectures/why-you-should-hedge-beta-and-sector-exposures-part-i), we covered the information coefficient (IC) and effective breadth, providing yet more reasons to make as many independent bets as possible. Here we expand upon the concepts detailed there by decomposing portfolios of varying numbers of securities to further explore the effects of systematic risk. import numpy as np import matplotlib.pyplot as plt # ## Hedging Beta and Sector Risk is Good for Allocators (Which is Good for You!) # # Let's work from two basic beliefs: # - You would like someone to fund your algorithm # - The institution that funds your algorithm is not going to allocate 100% of its money to you. In other words, your algorithm is one in a portfolio of algorithms. # # The implication of the second belief is subtle. Why should it matter that your high Sharpe algo is part of a portfolio? The key to understanding the importance of this and what it has to do with beta and sector exposure is the following mathematical result: # # **In a portfolio, stock specific risk can be diversified out while common factor risk cannot.** # <div class="alert alert-warning"> # <b>TL;DR:</b> Beta and sector exposure are **common factors**, i.e., they are among a handful of risk characteristics that are shared among all stocks. Risk exposure to common factors does not diversify away in a portfolio of algos. An allocator will not be able to make a large allocation to you if your algo presents common factor risk. The combination of many algos with modest common factor risk can lead to overwhelming common factor risk at the portfolio level. Allocators do not like this. If you want to get a large capital allocation, you must have low beta and sector exposure consistently over time. # </div> # # # Foundations # # ### Single Stock Risk Decomposition # # To build intuition, let's posit a single factor model: # # $$r_i = \alpha_i + \beta_i r_m + \epsilon_i$$ # # where $\alpha_i$ is the intercept, $\epsilon_i$ is the error, and $r_m$ is the market return. This is the [Capital Asset Pricing Model (CAPM)](https://www.quantopian.com/lectures/the-capital-asset-pricing-model-and-arbitrage-pricing-theory), which posits that the returns to a stock can be attributable to its beta-weighted exposure to the market and a return which is idiosyncratic to that stock. Two important assumptions here are that the $\epsilon_i$s are uncorrelated to the market and each other across stocks. See the [Lecture on Beta Hedging](https://www.quantopian.com/lectures/beta-hedging) for more background. # # In this case, the "risk", as measured by the variance, for an individual stock is: # # $$\sigma_i^2 = \beta_i^2 \sigma_m^2 + \sigma_{\epsilon_i}^2$$ # # A stocks variance is broken into the **common risk**, $\beta_i^2\sigma_m^2$, and **specific risk**, $\sigma_{\epsilon_i}$. **Common risk** is risk in the stock driven by market risk which is common among all stocks proportionate to the stock's beta. **Specific risk** is the risk that is unique to that individual stock. # # Let's look at two examples and decompose the risk into the percent due to common factor risk. def stock_risk(beta, market_vol, idio_vol): common_risk = (beta**2)*(market_vol**2) specific_risk = idio_vol**2 total_risk = common_risk + specific_risk return total_risk, common_risk/total_risk # We take two separate stocks, each with different market beta exposures and idiosyncratic volatility. # Betas b1 = 1.2 b2 = 1.1 # Market volatility market_vol = 0.15 # Idiosyncratic volatilities idio_vol_1 = 0.10 idio_vol_2 = 0.07 # + total_1, pct_common_1 = stock_risk(b1, market_vol, idio_vol_1) total_2, pct_common_2 = stock_risk(b2, market_vol, idio_vol_2) print "Stock 1 risk (annualized standard deviation): %0.4f " % np.sqrt(total_1) print "Stock 1: percent of total risk due to common risk: %0.4f " % pct_common_1 print "\nStock 2 risk (annualized standard deviation): %0.4f " % np.sqrt(total_2) print "Stock 2: percent of total risk due to common risk: %0.4f " % pct_common_2 # - # This is just looking at the breakdown of the risk associated with each individual stock. We can combine these into a portfolio to see how their combined volatility is affected by common factor risk. # ### Two Stock Portfolio Risk Decomposition # # Now let's imagine you have a two stock portfolio with percentage weights $w_1$ and $w_2$. The risk of the portfolio (derived below), $\Pi$, under the one-factor model is then: # # $$\sigma_{\Pi}^2 = \overbrace{\sigma_m^2\left( w_1^2\beta_1^2 + w_2^2\beta_2^2 + 2w_1w_2\beta_1\beta_1 \right)}^{\text{common risk}} + \overbrace{w_1^2\epsilon_1^2 + w_2^2 \epsilon_2^2}^{\text{specifc risk}}$$ # # This is the simplest possible example of portfolio factor risk, one factor and two assets, yet we can already use it to gain intuition about portfolio risk and hedging. # The weights for each security in our portfolio w1 = 0.5 w2 = 0.5 def two_stocks_one_factor(w1, w2, b1, b2, market_vol, idio_vol_1, idio_vol_2): common_risk = (market_vol**2)*(w1*w1*b1*b1 + w2*w2*b2*b2 + 2*w1*w2*b1*b2) specific_risk = w1*w1*idio_vol_1**2 + w2*w2*idio_vol_2**2 total_risk = common_risk + specific_risk return total_risk, common_risk/total_risk # The risk for a two stock, equally-weighted, long-only portfolio: total, pct_common = two_stocks_one_factor(w1, w2, b1, b2, market_vol, idio_vol_1, idio_vol_2) print "Portfolio risk (annualized standard deviation): %0.4f " % np.sqrt(total) print "Percent of total risk due to common risk: %0.4f" % pct_common # The astute reader will notice that the proportion of risk in the portfolio due to common factor risk is **larger for the portfolio** than for the weighted sum of the common risk proportion for the two components. To repeat the key point in this lecture: **In a portfolio, stock specific risk diversifies while common factor risk does not.** # The risk for a two stock, beta-hedged long-short portfolio: # + w2 = -w1*b1/b2 # set weight 2 such that the portfolio has zero beta total, pct_common = two_stocks_one_factor(w1, w2, b1, b2, market_vol, idio_vol_1, idio_vol_2) print "Portfolio risk (annualized standard deviation): %0.4f " % np.sqrt(total) print "Percent of total risk due to common risk: %0.4f" % pct_common # - # Note that we eliminated **all** the common risk with a perfect beta hedge. # # # Portfolio Risk # # If $X$ is a column vector of n random variables, $X_1,\dots,X_n$, and $c$ is a column vector of coefficients (constants), then the [variance of the weighted sum](https://en.wikipedia.org/wiki/Variance) $c'X$ is # # $$\text{Var}(c'X) = c'\Sigma c$$ # # where $\Sigma$ is the covariance matrix of the $X$'s. # # In our application, $c$ is our stock weight vector $w$ and $\Sigma$ is the covariance matrix of stock returns. # # $$\sigma_{\Pi}^2 = w' \Sigma w$$ # # Just as we decompose the single stock risk above, we can decompose the covariance matrix to separate *common risk* and *specific risk* # # $$\Sigma = BFB' + D$$ # # Thus # # $$\sigma_{\Pi}^2 = w'(BFB' + D)w$$ # $$\sigma_{\Pi}^2 = w'BFB'w + w'Dw$$ # # Which for the two stock portfolio above works out to # # \begin{equation} # \sigma_{\Pi}^2 = # \overbrace{ # \begin{bmatrix} w_1 & w_2 \end{bmatrix} # \begin{bmatrix} \beta_{1} \\ \beta_{2} \end{bmatrix} # \sigma_m^2 # \begin{bmatrix} \beta_{1} & \beta_{2} \end{bmatrix} # \begin{bmatrix} w_1 \\ w_2 \end{bmatrix} # }^{\text{common risk}} # + \overbrace{\begin{bmatrix} w_1 & w_2 \end{bmatrix} # \begin{bmatrix} \epsilon_1^2 & 0\\ 0 & \epsilon_2^2 \end{bmatrix} # \begin{bmatrix} w_1 \\ w_2 \end{bmatrix}}^{\text{specific risk}} # \end{equation} # # If you work through this matrix multiplication, you get the stated result above # # $$\sigma_{\Pi}^2 = \overbrace{\sigma_m^2\left( w_1^2\beta_1^2 + w_2^2\beta_2^2 + 2w_1w_2\beta_1\beta_1 \right)}^{\text{common risk}} + \overbrace{w_1^2\epsilon_1^2 + w_2^2 \epsilon_2^2}^{\text{specifc risk}}$$ # # ### Multi-Factor Models # # Of course, we can expand the CAPM to include *additional* risk factors besides market beta. We could posit that there are in total $m$ risks which are *common* to all stocks. # # $$r_i = \alpha_i + \beta_{1,i} f_1 + \dots + \beta_{m,i} f_m + \epsilon_i$$ # # or more concisely # # $$r_i = \alpha_i + \sum_{j=1}^m \beta_{j,i} f_j + \epsilon_i$$ # # or, considering all stocks, $i$, from 1 to N, even more concisely, for a given period $t$, # # $$r = \alpha + Bf + \epsilon$$ # # where $r$ is the Nx1 column vector of returns, $B$ is the Nx$m$ matrix of factor betas, $f$ is the Nx1 column of factor returns, and $\epsilon$ is the Nx1 column vector of idiosyncratic returns. Finally, # # $$\sigma_{\Pi}^2 = w'BFB'w + w'Dw$$ # # where $B$ is the Nx$m$ matrix of factor betas, $F$ is the $m$x$m$ covariance matrix of factor returns, and $D$ is a NxN matrix with the $\epsilon_i$'s on diagonal, and zeros everywhere else. # # With this result, *assuming we had a suitable risk model giving us the matrices $B$, $F$, and $D$*, we could calculate our portfolio risk and the proportion of risk coming from common risk. # # Likewise, just as we set $w_2$ above in the two stock case to the value that neutralized the exposure to the single factor $\beta$, in the multi-factor case we could use the factor betas matrix $B$ to construct a portfolio which is neutral to **all** common factors. **A portfolio which is neutral to all common factors has zero common factor risk.** # # # Portfolios of Algos # # Even without a risk model, we can get some intuition as to how the risk of a portfolio of algos looks. # # What does a resulting portfolio of algos look like when the individual algos have non-zero common risk? Taking some inspiration from a recent journal article [The Dangers of Diversification](http://www.iijournals.com/doi/abs/10.3905/jpm.2017.43.2.013?journalCode=jpm) by Garvey, Kahn, and Savi, imagine that each algo has a certain *budget of common risk* it can take. This budget is defined as the percent common risk of total risk in the algo. # # In the first case, we assume that all algos have this same budget (and use all the budget!) and the correlation between their common risks is 1.0. This is simular to the case of a single factor model. # # def portfolio_risk_decomposition(budget=0.2, correl=1.0, algo_count=2, algo_total_risk=0.04): N = algo_count algo_common_risk = budget*(algo_total_risk**2) algo_idio_risk = algo_total_risk**2 - algo_common_risk w = 1./N covar = correl*algo_common_risk common_risk = N*w*w*algo_common_risk + (N*N - N)*w*w*covar idio_risk = algo_idio_risk*w total_risk = common_risk + idio_risk return total_risk, common_risk/total_risk a, b = portfolio_risk_decomposition(budget=0.2, algo_count=20, correl=1.0, algo_total_risk=0.04) print "Portfolio total risk: %.4f " % np.sqrt(a) print "Portfolio percent of common risk: %.4f " % b algos = np.linspace(1,20) plt.plot( algos, portfolio_risk_decomposition(budget=0.2, correl=1.0, algo_count=algos)[1] ) plt.plot( algos, portfolio_risk_decomposition(budget=0.4, correl=1.0, algo_count=algos)[1] ) plt.ylim([0,1]); plt.title('Percent of Portfolio Risk due to Common Risk') plt.xlabel('Number of Algos in Portfolio') plt.ylabel('Percent of Portfolio of Algos Risk due to Common Risk') plt.legend( ['20% Single Algo Common Risk Budget', '40% Single Algo Common Risk Budget'] ); # From this plot, you can see that from the allocator's perspective, a "small" budget that allows for 20% of individual algo total risk to be driven by common risk leads to a 20 algo portfolio **with 83%** of it's risk driven by common risk! Ideally an allocator wants you to have **zero common factor risk**. # <div class="alert alert-warning"> # <b>TL;DR:</b> Even if you can't predict portfolio risk and don't have a risk model to decompose risk, you can form a portfolio with **zero common risk** by hedging the beta exposure to common factors. The most important common factors in the US Equity market are market beta and sector beta. Hedge your beta and be sector neutral if you want a large allocation from any allocator. # </div> # *This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
quantopian/lectures/Why_Hedge_II/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Sales Analysis # Import Libraries import os import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from itertools import combinations from collections import Counter # ## Load the Data # + files = [file for file in os.listdir('./Sales_Data')] sales_data = pd.DataFrame() for file in files: df = pd.read_csv(f'./Sales_Data/{file}') sales_data = pd.concat([sales_data, df]) # - sales_data.head() # ## Data Cleaning and Processing sales_data.info() # ### Deal with missing values missing_count = sales_data.isna().sum().sort_values(ascending=False) missing_count sales_data = sales_data.dropna(how='all') sales_data.info() # ### Change column datatype # + # Get rid of header column in middle of data sales_data = sales_data[sales_data.Product != 'Product'] # Change column datatype sales_data['Quantity Ordered'] = pd.to_numeric(sales_data['Quantity Ordered']) sales_data['Price Each'] = pd.to_numeric(sales_data['Price Each']) sales_data['Order Date'] = pd.to_datetime(sales_data['Order Date']) sales_data.info() # - # ### Add additional columns # #### Add Time Columns sales_data['Month'] = sales_data['Order Date'].dt.month sales_data['Year'] = sales_data['Order Date'].dt.year sales_data['Day of Week'] = sales_data['Order Date'].dt.dayofweek sales_data['Hour'] = sales_data['Order Date'].dt.hour sales_data.head() # #### Add City # + def get_city(address): return address.split(",")[1].strip(" ") def get_state(address): return address.split(",")[2].split(" ")[1] sales_data['City'] = sales_data['Purchase Address'].apply(lambda x: f"{get_city(x)}, {get_state(x)}") sales_data.head() # - # #### Add sales amount column sales_data['Sales'] = sales_data['Quantity Ordered']* sales_data['Price Each'] sales_data.head() # ## Exploratory Analysis and Visualization # #### Number of orders orders = sales_data[['Order ID', 'Quantity Ordered', 'Sales']].groupby('Order ID').sum() len(orders) # #### Distribution of Quantity Ordered and Sales per Order # + fig, axes = plt.subplots(1, 2, figsize=(15, 5)) fig.suptitle('Distribution per Order') sns.histplot(ax=axes[0], data=orders, x="Quantity Ordered") axes[0].set_title('Quantity') sns.histplot(ax=axes[1], data=orders, x="Sales", binwidth = 100) axes[1].set_title('Sales Amount') plt.show() # - # ## Ask and Answer Questions # #### Question 1: What was the best month for sales? How much was earned that month? sales_by_month = sales_data[['Month', 'Quantity Ordered', 'Sales']].groupby(['Month']).sum() sales_by_month['Sales in thousands'] = sales_by_month['Sales']/1000 sales_by_month sns.set_palette("Set2") ax = sns.barplot(x=sales_by_month.index, y='Sales in thousands', data=sales_by_month) ax.set_ylabel('Sales (in thousands)') plt.title('Sales By Month') plt.show() # #### Question 2: What was the best day of the week for sales? How much was earned? sales_by_day = sales_data[['Day of Week', 'Quantity Ordered', 'Sales']].groupby(['Day of Week']).sum() sales_by_day['Sales in thousands'] = sales_by_day['Sales']/1000 sales_by_day ax = sns.barplot(x=sales_by_day.index, y='Sales in thousands', data=sales_by_day) ax.set_ylabel('Sales (in thousands)') plt.title('Sales By Day of the Week') plt.show() # #### Question 3: What time of the day has the best sales? sales_by_time = sales_data[['Hour', 'Quantity Ordered', 'Sales']].groupby(['Hour']).sum() sales_by_time['Sales in thousands'] = sales_by_time['Sales']/1000 sales_by_time ax = sns.lineplot(x=sales_by_time.index, y='Sales in thousands', data=sales_by_time) ax.set_ylabel('Sales (in thousands)') plt.xticks(range(24)) plt.grid() plt.title('Sales By Hour') plt.show() # #### Question 4: What city had the best sales? sales_by_city = sales_data[['City', 'Quantity Ordered', 'Sales']].groupby(['City']).sum() sales_by_city['Sales in thousands'] = sales_by_city['Sales']/1000 sales_by_city = sales_by_city.sort_values(by=['Sales in thousands'], ascending = False) sales_by_city ax = sns.barplot(y=sales_by_city.index, x='Sales in thousands', data=sales_by_city) ax.set_xlabel('Sales (in thousands)') plt.title('Sales By City') plt.show() # #### Question 5: Which product sold the most? sales_by_product = sales_data[['Product', 'Quantity Ordered', 'Sales']].groupby(['Product']).sum() sales_by_product['Quantity in thousands'] = sales_by_product['Quantity Ordered']/1000 sales_by_product['Sales in thousands'] = sales_by_product['Sales']/1000 sales_by_product['Price Each'] = sales_data.groupby('Product').mean()['Price Each'] sales_by_product = sales_by_product.sort_values(by=['Quantity Ordered'], ascending = False) sales_by_product # + ax = sns.barplot(x=sales_by_product.index, y='Quantity in thousands', data=sales_by_product) ax.set_ylabel('Quantity Ordered (in thousands)') ax.set_xticklabels(ax.get_xticklabels(),rotation = 90) ax2=ax.twinx() ax2 = sns.lineplot(x=sales_by_product.index, y='Price Each', data=sales_by_product) ax2.set_ylabel('Price ($USD)') plt.title('Sales By Product') plt.show() # - # #### Question 6: What products are often sold together? df = sales_data[sales_data['Order ID'].duplicated(keep=False)] grouped_products = df.groupby('Order ID')['Product'].apply(', '.join).reset_index() grouped_products = grouped_products.groupby(['Product']).size().reset_index(name='Count') grouped_products = grouped_products.sort_values(by='Count', ascending = False) grouped_products.head(10) # + df = sales_data[sales_data['Order ID'].duplicated(keep=False)] grouped_products2 = df.groupby('Order ID')['Product'].apply(', '.join).reset_index() count = Counter() for row in grouped_products2['Product']: row_list = row.split(', ') count.update(Counter(combinations(row_list, 2))) for key,value in count.most_common(10): print(key, value)
SalesAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append("../../") import numpy as np from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import WhiteKernel, RBF, ConstantKernel as C import matplotlib.pyplot as plt from core.vhgpr import VHGPR from fourbranches import S, f, r from core.inputs import GaussianInputs import time plt.rcParams.update({'font.size': 16}) def plot_prediction(vhgp): x = np.linspace(-5, 5, 80) y = np.linspace(-5, 5, 80) meshx, meshy = np.meshgrid(x,y) truefuncf = [[f(np.array([[j,i]]))[0] for j in x]for i in y] truefuncg = [[r(np.array([[j,i]]))[0] for j in x]for i in y] meshsample = np.array([[[j,i] for j in x] for i in y]) predresults = vhgp.predict(meshsample.reshape(6400,2)) predfuncf = predresults[0].reshape(80,80) predfuncg = np.sqrt(np.exp(predresults[2].reshape(80,80))) plt.figure(figsize = (4,4)) axes1 = plt.contour(meshx, meshy, truefuncf, colors = "Black") axes2 = plt.contour(meshx, meshy, predfuncf, colors = "Red") plt.clabel(axes1) plt.clabel(axes2) plt.title('mean') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.show() plt.figure(figsize = (4,4)) axes1 = plt.contour(meshx, meshy, truefuncg, colors = "Black") axes2 = plt.contour(meshx, meshy, predfuncg, colors = "Red") plt.plot(DX[:,0],DX[:,1],'o', markersize=3) plt.clabel(axes1) plt.clabel(axes2) plt.title('std') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.show() dim = 2 mean, cov = np.zeros(dim), np.eye(dim) domain = np.array([[-5,5]]*dim) inputs = GaussianInputs(mean, cov, domain, dim) np.random.seed(0) DX = inputs.sampling(300, True) DY = S(DX) # + tags=[] kernelf = C(10.0, (1e-1, 1e2)) * RBF((5), (1e-1, 1e2)) kernelg = C(2, (1e-1, 1e1)) * RBF((2), (1e-1, 1e1)) vhgpr = VHGPR(kernelf, kernelg) vhgpr.fit(DX, DY) # - plot_prediction(vhgpr)
HGPextreme/examples/fourbranches/func_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np A = np.array([[4,1],[1,4]]) A_eigens = np.linalg.eig(A) print(A_eigens) # %matplotlib inline import matplotlib.pyplot as plt def plot_vector2d(vector2d, origin=[0, 0], **options): return plt.arrow(origin[0], origin[1], vector2d[0], vector2d[1], head_width=0.2, head_length=0.3, length_includes_head=True,**options) vector_original_1 = np.array([[1],[0]]) vector_original_2 = np.array([[0],[1]]) vector_original_3 = np.array([[0],[-1]]) vector_original_4 = np.array([[-1],[0]]) vector_transformed_1 = np.matmul(A,vector_original_1).T[0] vector_transformed_2 = np.matmul(A,vector_original_2).T[0] vector_transformed_3 = np.matmul(A,vector_original_3).T[0] vector_transformed_4 = A.dot(vector_original_4).T[0] plot_vector2d(A_eigens[1][0]*A_eigens[0][1], color="r", label=f"Eigenvalue={A_eigens[0][1]}", linestyle="dotted") plot_vector2d(A_eigens[1][1]*A_eigens[0][0], color="b", label=f"Eigenvalue={A_eigens[0][0]}", linestyle="dotted") plot_vector2d(vector_original_1.T[0], color="k", label="(1, 0)") plot_vector2d(vector_original_2.T[0], color="g", label="(0, 1)") plot_vector2d(vector_original_3.T[0], color="y", label="(0, -1)") plot_vector2d(vector_original_4.T[0], color="m", label="(-1, 0)") # plot_vector2d(vector_transformed_1, color="k", label="(1, 0) transformed") # plot_vector2d(vector_transformed_2, color="g", label="(0, 1) transformed") # plot_vector2d(vector_transformed_3, color="y", label="(0, -1) transformed") # plot_vector2d(vector_transformed_4, color="m", label="(-1, 0) transformed") # plt.axis([-10, 10, -10, 10]) plt.axis('equal') x_ticks = np.arange(-8, 8, 1) y_ticks = np.arange(-5, 5, 1) plt.xticks(x_ticks) plt.yticks(y_ticks) # plt.grid(axis='x', linewidth=1, linestyle='--', color='0.75') # plt.grid(axis='y', linewidth=1, linestyle='--', color='0.75') plt.legend() plt.grid() # plt.show() plt.savefig('Fig_00.png', dpi=300) # - import numpy as np A = np.array([[2,1,1],[1,2,1],[1,1,2]]) A_svd = np.linalg.svd(A) print(A_svd)
Ch5-Singular Value Decomposition/class examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <center><img src="images/data.png" alt="graph" style="width:250px"></center> # # # # Generating synthetic data # *** # <br> # # ## Table of Contents # # #### [1. Introduction](#Intro) # # #### [2.Data Exploration - Palmer Archipelago (Antartica) Penguin Dataset](#Exploration) # &nbsp;&nbsp;&nbsp;&nbsp;[- Data Cleaning](#Clean)<br> # &nbsp;&nbsp;&nbsp;&nbsp;[- Visualise the data](#Visualise)<br> # # # #### [3. ](#) # &nbsp;&nbsp;&nbsp;&nbsp;[- ](#)<br> # # #### [4. ](#) # &nbsp;&nbsp;&nbsp;&nbsp;[- ](#)<br> # # #### [5. ](#) # # <br> # # *** # # <center> 1. Introduction <center> # *** # Synthetic data is that produced by an algorithm which serves as an alternative to real-world or "authentic" data. That is, it is computer generated data rather than the collection of real-world measurements. Although technically articifial, it is modelled on and represents real-world phenomena in a mathematical and statistical sense. For this reason, it is as valuable as real data within its context. # # Synthetic data is being used to train machine learning algorithms and to validate mathematical models. Some specific examples, include Amazon's alexa which uses synthetic data to train its language system, while American Express uses it for the improvement of fraud detection. [https://www.statice.ai/post/types-synthetic-data-examples-real-life-examples] # # This purpose of this project is to generate synthetic data for learning purposes, using the popular Palmer Archipelago (Antartica) Penguin Data Set. # # <br> # *** # # <br> # # <center><img src="images/palmer_penguin.png" alt="Palmer Penguins" style="width:200px"></center> # # # <center>2. Data Exploration</center> # # ### <center><i>The Palmer Archipelago (Antarctica) Penguin Data Set</i></center> # # # *** # # <br> # # The Palmer Archipelago (Antartica) Penguin Data Set is often termed the new Iris data set due to its popularity amongst those new to data science. The data was originally collected by Dr. <NAME> and the Palmer Station, Antarctica LTER and focuses on three penguin species found in the Palmer Archipelago Islands, Antartica.[Gorman KB, Williams TD, Fraser WR (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus Pygoscelis). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081] The [data set](https://www.kaggle.com/parulpandey/palmer-archipelago-antarctica-penguin-data) which will be used in this project was sourced from [Kaggle](https://www.kaggle.com/). # # There are 17 data points in the data set and for the purpose of this project, the <b>seven attributes</b> below will be used: # # - <b>species</b>: penguin species (Chinstrap, Adélie, or Gentoo) # - <b>culmen_length_mm</b>: culmen length in mm # - <b>culmen_depth_mm</b>: culmen depth in mm # - <b>flipper_length_mm</b>: flipper length in mm # - <b>body_mass_g</b>: body mass in grams # - <b>island</b>: the island name (Dream, Torgersen, or Biscoe) in the Palmer Archipelago, Antarctica # - <b>sex</b>: penguin sex # # <br> # # The <b>culmen</b> is the upper margin of the beak, the length and width measurements are depicted below. # # <img src="images/culmen.jpeg" alt="Culmen" style="width:400px"> # # # <br> # # ### Import libraries # *** # + # Dataframes. import pandas as pd # Machine learning library. import sklearn as sk # Fill missing values. from sklearn.impute import SimpleImputer # Plotting. import matplotlib.pyplot as plt # Stylish plots. import seaborn as sns # - # <br> # # ### Load data # *** # Read in csv data. data = pd.read_csv('data/penguins_lter.csv') data.head() # <br> # # Removing columns that will not be used. # Remove columns that will not be used. data = data.drop(labels=['studyName', 'Sample Number', 'Region', 'Stage', 'Individual ID', 'Clutch Completion', 'Date Egg', 'Delta 15 N (o/oo)', 'Delta 13 C (o/oo)', 'Comments'], axis=1) # Get basic info about dataset. data.info() # Statistical summary. data.describe() # <br> # # ## Clean the data # *** # Check for null values. data.isnull().sum() # Eyeball data to see where Null values are. pd.set_option('display.max_rows', None) # <br> # # After a visual check of the data, it was found that the penguins at index 3 and 339 had no values entered in any of the columns other than species and the decision was made to fill the missing values rather than delete the rows entirely. The same process will be applied with the missing sex values with the implementation below. # # <br> # # #### Fill missing values # Code adapted from https://www.kaggle.com/parulpandey/penguin-dataset-the-new-iris/notebook # Fill in missing values with the most frequent occurance in the column. imputer = SimpleImputer(strategy='most_frequent') data.iloc[:,:] = imputer.fit_transform(data) # Check data again. data.isnull().sum() # <br> # # #### Convert values in Sex column from strings to integers # Convert sex type from boolean string value to boolean integer value. lb = sk.preprocessing.LabelEncoder() data["Sex"] = lb.fit_transform(data["Sex"]) # Check the Sex column. data.Sex.head() # <br> # # ##### The value 2 male and 1 denotes female. # <br> # # #### Check species count # Count of each species. data['Species'].value_counts() # *** # # End
Generating-synthetic-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Importing the necessary libraries from pandasdmx import Request as rq # ## Connecting to the UNESCO web service auth = {'Ocp-Apim-Subscription-Key': '<KEY>'} uis = rq('unesco', headers = auth) # ## Downloading the SDMX dataflow uis_dflow = uis.dataflow(url = "http://api.uis.unesco.org/sdmx/dataflow/UNESCO/all/latest") # ### The datasets of the UNESCO data service uis_dflow.write().dataflow # We are interested in the **Research and experimental development**, or **RD** dataset # ## The RD dataset dsd_response = uis.get(url = 'http://api.uis.unesco.org/sdmx/datastructure/UNESCO/all/latest?references=children') # ### Cross domain concepts dsd_response.write().conceptscheme.loc['CROSS_DOMAIN_CONCEPTS'] # ### RD Concepts dsd_response.write().conceptscheme.loc['RD'] # ### Datastructure dflow_response = uis.get(url = 'http://api.uis.unesco.org/sdmx/dataflow/UNESCO/All/latest?references=datastructure') dsd = dflow_response.datastructure['RD'] dsd # ### Measures # Measures differ based on having so-called primary and observational value dsd.measures.aslist() # ### Dimension groups dsd.dimensions.aslist() # ### Attributes dsd.attributes.aslist() # ### Codelist rd_response = uis.get(url = 'http://api.uis.unesco.org/sdmx/datastructure/UNESCO/RD/latest?references=children') codelist = rd_response.write().codelist codelist # ## The dimensions under the 'UNIT_MEASURE' category codelist.loc['UNIT_MEASURE'] # From these, first I am intersted in the 'Per capita - constant PPP $' values # ## Downloading the table resource = 'RD' uis_keys = {'CAP_PPP_CONST'} dtables = '.PPP_CONST...........' # 'GERD...........' # 'ALL' api_format = 'sdmx-generic-2.1' # 'compact' or 'generic' period = [2015, 2016] url = 'https://api.uis.unesco.org/sdmx//data/UNESCO,RD,1.0/.CAP_PPP_CONST..........?format=sdmx-generic-2.1&startPeriod=2016&endPeriod=2016&locale=en&subscription-key=<KEY>' dsd uis_resp = uis.get(resource_type = 'data', resource_id = 'RD', params = {'key': '.CAP_PPP_CONST...........', 'format': 'sdmx-generic-2.1', 'subscription-key' : '<KEY>'}) uis_resp.url uis_data = uis_resp.data len(list(uis_data.series)) uis_iter = (s for s in uis_data.series) # Creating an iterator from the Series data = uis_resp.write(list(uis_iter)) # Creating a dataframe data def dl_rd(tables): uis_resp = uis.get(resource_type = 'data', resource_id = 'RD', url = 'https://api.uis.unesco.org/sdmx//data/UNESCO,' + 'RD' + ',1.0/' + tables, params = {'startPeriod': 2015, 'endPeriod': 2016, 'format': 'sdmx-generic-2.1', 'subscription-key' : '<KEY>', 'dsd' : dsd}) example = uis(url = 'http://api.uis.unesco.org/sdmx/dataflow/UNESCO/INNOV/latest?references=datastructure&format=sdmx-json')
UNESCO R&D/UNESCO R&D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pandas as pd import matplotlib.pyplot as plt import numpy as np import plotly.graph_objects as go from plotly.offline import init_notebook_mode init_notebook_mode(connected = True) # - # ### With matplotlib # + df = pd.read_excel("../data/electricity_plan.xlsx", skiprows = 3) #fill nan value with 0 df = df.fillna(0) df # + plt.figure(figsize = (20,10)) df.loc[:,"Current level":].plot(kind = "bar", bottom = df["Base"], width = 35) plt.xticks([]) plt.title("My electricity saving plan") plt.ylabel("kWh consumption") plt.tight_layout() plt.savefig("../output/waterfall chart1.jpeg", dpi = 300) plt.show() # + colors = ["royalblue","green","green","red","red","red","royalblue", "red","red","red","royalblue"] df.loc[:,"Current level":].plot(kind = "bar", bottom = df["Base"], width = 35, color = colors) plt.xticks([]) plt.legend(bbox_to_anchor = (1.1, -0.05), ncol = 2) plt.title("My electricity saving plan") plt.ylabel("kWh consumption") #plt.savefig("output/waterfall chart1.jpeg", dpi = 300) # + colors = ["royalblue","green","green","red","red","red","royalblue", "red","red","red","royalblue"] fig = df.loc[:,"Current level":].plot(kind = "bar", bottom = df["Base"], width = 1, color = colors) selected_patches = fig.patches[0], fig.patches[20], fig.patches[40] plt.legend(selected_patches, ["Base", "Rise", "Fall"], loc = "upper right") plt.xticks(ticks = np.arange(0, len(df)), labels = df.columns[1:], rotation = 90) plt.title("My electricity saving plan") plt.ylabel("kWh consumption") plt.savefig("../output/waterfall chart2.jpeg", dpi = 300) # - # ### Restructure the pandas dataframe and plot it # + plt.figure(figsize = (6, 6)) colors = ["royalblue","green","green","red","red","red", "royalblue", "red", "red", "red", "royalblue"] fig = df.T[1:].max(axis = 1).plot(kind = "bar", bottom = df["Base"], width = 0.8, color = colors) selected_patches = fig.patches[0], fig.patches[2], fig.patches[4] plt.legend(selected_patches, ["Base value", "Rise", "Fall"], loc = "upper right") plt.title("My electricity saving plan") plt.ylabel("kWh consumption") plt.tight_layout() plt.savefig("../output/waterfall chart2.jpeg", dpi = 300) # - # ### With Plotly # # https://plotly.com/python/waterfall-charts/#horizontal-waterfall-chart # + df = pd.read_excel("../data/electricity_plan.xlsx", sheet_name = "Sheet2", skiprows = 3) df # + import plotly.graph_objects as go from plotly.offline import init_notebook_mode init_notebook_mode(connected = True) fig = go.Figure() fig.add_trace(go.Waterfall(x = df["Values"], y = df["kWh"], measure = df["measure"].tolist(), base = 0, #by default connector = {"line":{"dash":"dot"}}, #get dotted line as connector textposition = "outside", text = df["kWh"].tolist(), orientation = "v" #by default )) fig.update_layout(title = "My electricity saving plan for next six months") #Set y-limit fig.update_yaxes(range = (50, 200), title = "kWh") fig.show() # + fig = go.Figure() fig.add_trace(go.Waterfall(x = [["Initial","Short-term measure","Short-term measure", "Short-term measure","Short-term measure","Short-term measure", "Intermediate", "Long-term measure","Long-term measure", "Long-term measure","Final"], df["Values"]], y = df["kWh"], measure = df["measure"].tolist(), base = 0, #by default #get dotted line as connector connector = {"line":{"dash":"dot"}}, textposition = "outside", text = df["kWh"].tolist(), orientation = "v", decreasing = {"marker":{"color":"Maroon", "line":{"color":"red", "width":2}}}, increasing = {"marker":{"color":"Teal", "line":{"color":"Aquamarine","width": 3}}}, totals = {"marker":{"color":"deep sky blue", "line":{"color":"blue", "width":3}}} )) fig.update_layout(title = "My electricity saving plan for next six months", height = 600, width = 800) #Set y-limit fig.update_yaxes(range = (0, 200), title = "kWh") fig.show() # -
notebooks/.ipynb_checkpoints/Waterfall Charts-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import bisect from collections import defaultdict import gzip import io import os import pandas as pd from tqdm.notebook import tqdm # - wd = '~' hd = os.getcwd() + '/' hg38_gtf = f'{"/".join(hd.split("/")[0:-2])}/hg38.ncbiRefSeq.gtf.gz' # + bed_files = [] for head, directory, files in os.walk(wd): for file in files: if file.endswith('.bed'): bed_files.append(f'{head}/{file}') # - def binary_search(site, feature_spans): search_features = [x[0] for x in feature_spans[site[0]]] bisect_index = bisect.bisect(search_features, site[1]) upstream_gene = feature_spans[site[0]][bisect_index - 1] upstream_dist = upstream_gene[0] - site[1] try: downstream_gene = feature_spans[site[0]][bisect_index] except IndexError as e: downstream_gene = 'NA' downstream_dist = 'NA' bookmark = upstream_gene else: downstream_dist = downstream_gene[0] - site[1] bookmark = upstream_gene if abs(upstream_dist) < downstream_dist else downstream_gene return dict(upstream_gene_id=upstream_gene[1], upstream_gene_start=upstream_gene[0], feature_upstream_dist=upstream_dist, downstream_gene_id=downstream_gene[1], downstream_gene_start=downstream_gene[0], feature_downstream_dist=downstream_dist, bookmark_gene=bookmark[1]) def get_book_mark_info(site, bookmark_gene): if bookmark_gene[0]['start'] <= site[1] <= bookmark_gene[0]['end']: feature_hits = [] for exon in bookmark_gene[1:]: if exon['start'] <= site[1] <= exon['end']: if exon['feature'] != 'transcript': feature_hits.append(exon['feature']) if feature_hits: if 'exon' in feature_hits: return 'exon' else: return feature_hits[0] return 'intragenic' else: return 'intergenic' def update_coefs_meta(coefs, gene_reference, feature_spans): for site in coefs: chrom, start, end = site.split(':') bookmark_info = binary_search((chrom, int(pos)), feature_spans) bookmark_info['hit_type'] = get_book_mark_info((chrom, int(pos)), gene_reference[bookmark_info['bookmark_gene']]) bookmark_info['chrom'] = chrom bookmark_info['pos'] = pos coefs[site].update(bookmark_info) def format_annotations(sites, output_path=None): sites_df = pd.DataFrame(sites) if output_path: sites_df.to_csv(output_path, sep='\t', index=False) return sites_df # ### Site Annotation and Export # + # import reference annotations hg38_refgene_annotations = {} with io.BufferedReader(gzip.open(hg38_gtf, 'rb')) as ref: for b_line in tqdm(ref): line = b_line.decode() line_info = line.strip().split('\t') chrom, _, feature_type, start, end, score, strand, frame = line_info[0:-1] transcript_info = {} for info in line_info[-1].split(';'): seg = info.strip().split(' ') if len(seg)== 2: transcript_info[seg[0]] = seg[1].replace('"', '') info = dict(chrom=chrom, feature=feature_type, start=int(start), end=int(end), strand=strand, frame=frame, attributes=transcript_info) if transcript_info['gene_name'] not in hg38_refgene_annotations: hg38_refgene_annotations[transcript_info['gene_name']] = [info] else: hg38_refgene_annotations[transcript_info['gene_name']].append(info) # + feature_spans = defaultdict(list) count = 0 for feature, feature_info in tqdm(hg38_refgene_annotations.items()): transcript_info = feature_info[0] if transcript_info['feature'] != 'transcript': continue tss_start = transcript_info['start'] if transcript_info['strand'] == '+' else transcript_info['end'] feature_spans[transcript_info['chrom']].append((tss_start, transcript_info['attributes']['gene_name'])) # - for feature_list in feature_spans.values(): feature_list.sort(key=lambda x: x[0]) for bed_file in tqdm(bed_files): bed_info = [] with open(bed_file, 'r') as bed: for line in bed: chrom, start, end = line.strip().split('\t') start, end = int(start), int(end) info = dict(chrom=chrom, start=start, end=end) info.update(binary_search((chrom, int(start)), feature_spans)) info['hit_type'] = get_book_mark_info((chrom, int(start)), hg38_refgene_annotations[info['bookmark_gene']]) bed_info.append(info) _ = format_annotations(bed_info, output_path=bed_file.replace('.bed', '.anno.tsv'))
BedAnnotation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''SNP_enrich'': conda)' # name: python373jvsc74a57bd09a218f4a58cb44d8651aa34d01362d0132a49afe72d8eee3976ac6216d69f221 # --- # + import os import sys import numpy as np import pandas as pd #from scipy.stats import norm import pybedtools import re #import matplotlib as plt import pyBigWig as pbw import time import urllib.request import matplotlib.pyplot as plt import json import requests imputed_urls_path="../configs/Imputed_urls.txt" observed_urls_path="../configs/Observed_urls.txt" Enhancer_urls=pd.read_csv("../configs/enhancers_urls.txt", header=None, names=['filename']) Promoter_urls=pd.read_csv("../configs/promoters_urls.txt", header=None, names=['filename']) epimap_meta_path="../configs/main_metadata_table.tsv" epimap_meta=pd.read_csv(epimap_meta_path, sep="\t") def strip_to_sample(input_url): return(input_url.split(sep="_")[0]) Promoter_urls_samples=Promoter_urls.apply(lambda x: strip_to_sample(x['filename']), axis=1) Enhancer_urls_samples=Enhancer_urls.apply(lambda x: strip_to_sample(x['filename']), axis=1) epimap_meta=epimap_meta.loc[epimap_meta['id'].isin(Enhancer_urls_samples) & epimap_meta['id'].isin(Promoter_urls_samples)] # -
notebooks/Plotting_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/r-lomba/covid-19-charts/blob/master/covid-19.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> # Run in Google Colab</a> # </td> # </table> # # COVID-19 Charts Notebook # # This notebook generates a variety of charts starting from the latest daily data regarding COVID-19, made publicly available by "Johns Hopkins University CSSE" in their Git Repository (see URL reference below). # # Dataframes are created in both a cumulative and an incremental fashion starting from the original data. This allows to plot them revealing a few interesting insights. # # A plotting function capable of presenting data in complex ways is the core of this notebook. # # You can: # # - Present data from 8 aggregated Dataframe sources: Cumulative Active cases, Daily Active (new cases), Cumulative Confirmed cases, Daily Confirmed (new cases), Cumulative Recoveries, Daily Recoveries (new cases), Cumulative Fatalities, Daily Fatalities (new cases) # - There are also 3 further aggregated Dataframe sources for drawing Pie Charts: Confirmed cases, Recoveries and Fatalities. Active Cases not present here because they can (correctly) have negative values at times, being a derived quantity # - Easily filter the Dataframe Sources specifying an initial date # - Easily filter the Dataframe Sources by Country at the same time # - Combine the above 2 points # - Draw multiple line charts on the same plot # - Draw multiple bar chart on the same plot # - Draw pie charts # - Specify linear or logarithmic scale for the Y axis # - Select single or multiple Coutries as the source of data for a particular plot # - Specify if you want to keep the Country data separated (e.g. for charts comparison) or aggregated (e.g. to check global trends) # - Generate Chart Images in PNG format inside a "charts" subdirectory, together with a simple "Index.html" page to directly show the images on a browser # # ### Contacts: # You can contact me here:<br> # <EMAIL> # # ### Data Sources Reference: # 2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE:<br> https://github.com/CSSEGISandData/COVID-19<br> # # ### Terms of use: # Please see the Terms of Use extensively described at the above links for reference # # ### Disclaimer: # This GitHub repo and its contents herein, including all data, mapping, and analysis is provided to the public strictly for educational and academic research purposes. It is hereby disclaimed any and all representations and warranties with respect to the Website, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited. # --- # # Section 1 - Initializations # ### Let's import a few Libraries we'll be using later: # + # IMPORT LIBRARIES import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn.apionly as sns import dateutil import os import io import requests import copy import time import shutil from matplotlib.colors import LogNorm from matplotlib.pyplot import figure from datetime import datetime, timedelta, date from itertools import cycle, islice from scipy.interpolate import UnivariateSpline from sklearn.metrics import mean_squared_error, r2_score # %matplotlib inline # - # ### Here, we create a few Working Directories, if they don't exist yet: # + # CREATES WORKING DIRECTORY TO SAVE CHARTS IF IT DOESN'T EXIST path = os.getcwd() try: os.mkdir(path + '/charts') except OSError: print ('Directory ./charts already exists, proceed anyway') else: print ('Successfully created Directory ./charts') # - # ### A few hardcoded Dictionaries are created below. They'll be used later to fix a number of known issues on the original streams of data: # + ############################################################# ## ## HARDCODED DICTIONARIES - FIXES TO MISSING DATA ON THE ORIGINAL ## DATASOURCES THAT NEVER GOT CORRECTED AND THAT WOULD SHOW ## INCONSISTENCIES IF LEFT UNTOUCHED (E.G. CUMULATIVE STATS ## DECREASING IN TIME INSTEAD OF INCREASING, ETC) ## ## PLEASE NOTE THAT IN SOME CASES A DECREASING CUMULATIVE STAT ## COULD BE CORRECT AND EXPECTABLE, AS SOMETIMES THE AUTHOTITIES ## CAN REVISE E.G. A STAT REGARDING A RECOVERY, RETREATING IT ## BUT IN THE VAST MAJORITY OF THE CASES, A MORE REASONABLE ## QUANTITY CAN BE FOUND ONLINE FOR THAT PARTICULAR STAT FOR ## THAT DAY. IF THIS IS THE CASE, IT CAN BE LISTED IN THE ## FOLLOWING DICTIONARIES AND IT WILL BE STRUCTURALLY USED WHEN ## A NEW CALCULATION LOOP IS LAUNCHED TO GENERATE CHARTS FOR THE DAY ## ## SOURCES: ## https://www.worldometers.info/coronavirus/ ## https://ourworldindata.org/coronavirus-source-data ## ############################################################# confirmed_fixes_dict = {'Italy|2020-03-12': 15113, 'Spain|2020-03-12': 3146, 'France|2020-03-12': 2876, 'United Kingdom|2020-03-12': 590, 'Germany|2020-03-12': 2745, 'Argentina|2020-03-12': 19, 'Australia|2020-03-12': 150, 'Belgium|2020-03-12': 314, 'Chile|2020-03-12': 23, 'Colombia|2020-03-12': 9, 'Greece|2020-03-12': 98, 'Indonesia|2020-03-12': 34, 'Ireland|2020-03-12': 43, 'Japan|2020-03-12': 620, 'Netherlands|2020-03-12': 503, 'Qatar|2020-03-12': 262, 'Singapore|2020-03-12': 178, 'United Kingdom|2020-03-15': 1391, 'France|2020-03-15': 5423, 'Switzerland|2020-03-16': 2353, 'United Kingdom|2020-03-19': 3269, 'Azerbaijan|2020-03-16': 28, 'Bahrain|2020-03-13': 210, 'Cruise Ship|2020-03-06': 706, 'Cruise Ship|2020-03-07': 706, 'Cruise Ship|2020-03-08': 706, 'Cruise Ship|2020-03-09': 706, 'Cruise Ship|2020-03-10': 706, 'Cruise Ship|2020-03-11': 706, 'Cruise Ship|2020-03-12': 706, 'Cruise Ship|2020-03-13': 706, 'Cruise Ship|2020-03-14': 706, 'Cruise Ship|2020-03-15': 706, 'Cruise Ship|2020-03-16': 706, 'Cruise Ship|2020-03-17': 706, 'Japan|2020-01-23': 2, 'Japan|2020-02-06': 25, 'Japan|2020-03-12': 701, 'Japan|2020-03-16': 878, 'Lebanon|2020-03-16': 120, 'Montenegro|2020-03-18': 3, 'US|2020-04-13': 582619, 'Italy|2020-06-19': 238200 } deaths_fixes_dict = {'Italy|2020-03-12': 1016, 'Spain|2020-03-12': 86, 'France|2020-03-12': 61, 'Germany|2020-03-12': 6, 'Argentina|2020-03-12': 1, 'Australia|2020-03-12': 3, 'Greece|2020-03-12': 1, 'Indonesia|2020-03-12': 1, 'Ireland|2020-03-12': 1, 'Japan|2020-03-12': 15, 'Netherlands|2020-03-12': 5, 'Switzerland|2020-03-12': 4, 'United Kingdom|2020-03-15': 35, 'France|2020-03-15': 127, 'Switzerland|2020-03-16': 19, 'France|2020-03-17': 175, 'France|2020-03-18': 264, 'France|2020-03-19': 372, 'Iceland|2020-03-15': 1, 'Iceland|2020-03-16': 1, 'Iceland|2020-03-20': 1, 'Philippines|2020-03-18': 17, 'Kazakhstan|2020-03-20': 0 } recovered_fixes_dict = {'Korea, South|2020-03-08': 135, 'Korea, South|2020-03-09': 135, 'Italy|2020-03-12': 1258, 'Spain|2020-03-12': 189, 'France|2020-03-12': 12, 'Germany|2020-03-12': 25, 'US|2020-02-21': 2, 'US|2020-02-22': 2, 'US|2020-02-23': 2, 'US|2020-02-24': 2, 'US|2020-02-25': 3, 'US|2020-02-26': 3, 'US|2020-02-27': 3, 'US|2020-02-28': 3, 'US|2020-02-29': 4, 'US|2020-03-01': 6, 'US|2020-03-02': 6, 'US|2020-03-03': 6, 'US|2020-03-04': 6, 'US|2020-03-05': 6, 'US|2020-03-06': 12, 'US|2020-03-07': 12, 'US|2020-03-08': 12, 'US|2020-03-09': 12, 'US|2020-03-10': 12, 'US|2020-03-11': 12, 'US|2020-03-12': 28, 'US|2020-03-13': 38, 'US|2020-03-14': 53, 'US|2020-03-15': 56, 'US|2020-03-16': 71, 'US|2020-03-17': 103, 'US|2020-03-18': 103, 'US|2020-03-19': 105, 'US|2020-03-20': 144, 'Italy|2020-03-20': 5129, 'France|2020-03-17': 590, 'France|2020-03-18': 590, 'France|2020-03-19': 1283, 'France|2020-03-20': 1575, 'France|2020-03-21': 1575, 'Belgium|2020-03-19': 165, 'Belgium|2020-03-20': 204, 'Andorra|2020-03-13': 1, 'Poland|2020-03-16': 1, 'Poland|2020-03-17': 1, 'Poland|2020-03-18': 1, 'Poland|2020-03-23': 1, 'Iceland|2020-03-16': 8, 'Iceland|2020-03-17': 8, 'Iceland|2020-03-18': 8, 'Iceland|2020-03-19': 8, 'Iceland|2020-03-20': 8, 'Austria|2020-03-17': 7, 'Egypt|2020-03-15': 27, 'US|2020-03-21': 178, 'US|2020-03-22': 178, 'Sri Lanka|2020-03-22': 1, 'Pakistan|2020-03-22': 13, 'Togo|2020-03-20': 0, 'Togo|2020-03-21': 0, 'Togo|2020-03-22': 0, 'Trinidad and Tobago|2020-03-21': 0, 'Trinidad and Tobago|2020-03-22': 0, 'Canada|2020-03-23': 112, 'US|2020-03-23': 178, 'Algeria|2020-03-24': 65 } # - # ### Next, we define the core functions we'll be using later when plotting charts: # + # GENERATES BASIC "index.html" PAGE FOR A SPECIFIC COUNTRY IN ITS SPECIFIC SUBDIRECTORY # READS A DATE IN THE ORIGINAL FORMAT IN INPUT # RETURNS A DATE FORMATED AS 'YYYY-MM-DD' # --------------------------------------------------------------------------------------------- # THIS UTILITY FUNCTION TAKES IN INPUT: # A SUBDIRECTORY NAME WHERE TO GENERATE "index.html" PAGE # A COUNTRY NAME TO PROCESS # A DICTIONARY OF COUNTRIES HOLDING ADDITIONAL INFO SUCH AS COUNTRY NAME, START DATE, ETC # A FLAG INDICATING THIS IS THE ENTRY PAGE ("/index.html"). OPTIONAL def generate_country_index_page(countrydir, country, detailed_countries_dict, flg_top_page='N'): # GENERATES "index.html" PAGE IN "./charts/<COUNTRY>" TO SHOW SAVED CHARTS FOR SPECIFIC COUNTRY IN BROWSER html_str = """<!DOCTYPE html> <html> <head> <meta charset='UTF-8'> <title>COVID-19 Charts</title> </head> <style> body {margin:0;} .navbar { overflow: hidden; background-color: white; position: fixed; top: 0; width: 100%; } .navbar a { float: left; display: block; color: #f2f2f2; text-align: center; padding: 5px 5px; text-decoration: none; font-size: 17px; } .navbar a:hover { background: #ddd; color: black; } .main { padding: 16px; margin-top: 30px; height: 1500px; /* Used in this example to enable scrolling */ } </style> <body> <div class="navbar"> <table border=0> <tr> <td align='center'> <font face='Impact' size='5' color='black'>COVID-19 CHARTS</font> <br> <font face='Impact' size='3' color='black'>Choose desired Country</font> </td> <td> <table border='0'>""" # CHANGES DIRECTORY AND ENTERS CHARTS FOLDER now = datetime.now() last_updated = now.strftime("%d %b, %Y - %H:%M:%S") os.chdir('charts') ###table = sorted(filter(os.path.isdir, os.listdir('.')), key=os.path.getmtime) table = list(detailed_countries_dict.keys()) table_splitted = np.array_split(table,2) # HERE, "2" IS THE NUMBER OF LINES WE WANT OUR TABLE TO SPAN. FIX ACCORDING TO YOUR NEEDS for i in range(len(table_splitted)): html_str = html_str + '<tr>' for j in range(len(table_splitted[i])): # IF THIS IS THE ROOT PAGE OF THE SITE, USES PROPER LINKS TO IMAGES if (flg_top_page=='Y'): html_str = html_str + '<td><a href="./' + table_splitted[i][j] + '/index.html"><img src="../demo-images/flags/Flag_of_' + table_splitted[i][j] + '.png" width="40" height="40" alt="' + table_splitted[i][j] + '" title="' + table_splitted[i][j] + '"></a></td>' # IF THIS IS NOT THE ROOT PAGE OF THE SITE, USES PROPER LINKS TO IMAGES else: html_str = html_str + '<td><a href="../' + table_splitted[i][j] + '/index.html"><img src="../../demo-images/flags/Flag_of_' + table_splitted[i][j] + '.png" width="40" height="40" alt="' + table_splitted[i][j] + '" title="' + table_splitted[i][j] + '"></a></td>' html_str = html_str + '</tr>' html_str = html_str + '</table>' html_str = html_str + """</td> </tr> </table> <hr> </div>""" html_str = html_str + """<div class="main"><br><br><br><br><table border="0">""" # IF THIS IS THE ROOT PAGE OF THE SITE, JUST DRAWS THREE MAIN ICONS TO START FROM if (flg_top_page=='Y'): # STAYS IN "charts" DIRECTORY AND DOES NOT ENTER ANY SUBFOLDER (THIS IS THE ROOT PAGE) now = datetime.now() last_updated = now.strftime("%d %b, %Y - %H:%M:%S") html_str += """<tr> <td colspan='3' align='center'> <font face='Impact' size='3' color='black'> <h1>COVID-19 Charts - Last updated """ + last_updated + """ (CET)</h1> </font> </td></tr>""" html_str = html_str + '<tr><td><a href="./World/index.html"><img src="../demo-images/flags/Flag_of_World.png" width="250" height="250" alt="World" title="World"></a></td>' html_str = html_str + '<td><a href="./European_Union/index.html"><img src="../demo-images/flags/Flag_of_European_Union.png" width="250" height="250" alt="European_Union" title="European_Union"></a></td>' html_str = html_str + '<td><a href="./Italy/index.html"><img src="../demo-images/flags/Flag_of_Italy.png" width="250" height="250" alt="Italy" title="Italy"></a></td></tr>' html_str += """<tr> <td colspan='3' align='center'><font face='Impact' size='3' color='black'> <h2>Choose the Dashboard you want to start from</h2> </font> </td></tr>""" html_str = html_str + "<tr><td colspan='3'><hr></td></tr>" # ELSE THIS IS A DEDICATED COUNTRY PAGE OF THE SITE, DRAWS THE WHOLE PAGE WITH CHARTS else: # CHANGES DIRECTORY AND ENTERS THIS COUNTRY'S FOLDER now = datetime.now() last_updated = now.strftime("%d %b, %Y - %H:%M:%S") os.chdir(countrydir) i = 0 for file in sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime): filename = os.fsdecode(file) if (filename.endswith('.png')): if (i == 0): # IF IT'S THE FIRST LOOP, PRINTS FLAG + HEADING FOR THE PAGE html_str = html_str + "<tr><td colspan='2'>&nbsp</td></tr>" html_str = html_str + "<tr><td><img src='" + filename.replace(' ', '%20') + "' width='100' height='100'></td><td><font face='Impact' size='3' color='black'><h1 align='left'>" + country + " at a glance - Grandtotals Summary and Most relevant Charts</h1></font><font face='Impact' size='5' color='black'>Last updated " + last_updated + " (CET)</font></td></tr>" html_str = html_str + "<tr><td colspan='2'><hr></td></tr>" else: # IF IT'S NOT THE COUNTRY FLAG, IT'S A CHART if (i > 1): # NO ADDITIONAL SPACE UNDER GENERAL PAGE HEADING html_str = html_str + "<tr><td colspan='2'><p><br></p></td></tr>" # EMPTY LINE TO CREATE SPACE BETWEEN SINGLE CHARTS html_str = html_str + "<tr><td colspan='2'><img src='" + filename.replace(' ', '%20') + "'></td></tr>" html_str = html_str + "<tr><td colspan='2'><hr></td></tr>" i = i + 1 html_str += "</table>" html_str += """<p> <b>GitHub Repository:</b><br> Please visit the GitHub Repository containing the full source code (Jupyter Notebook) used to generate the charts: <br> <a href='https://github.com/r-lomba/covid-19-charts'>https://github.com/r-lomba/covid-19-charts</a> </p> <p> <b>Contacts:</b><br> You can contact me here: <br> <a href='mailto:<EMAIL>'><EMAIL></a> </p> <p> <b>Data Source Reference:</b> <br> 2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE: <br> <a href='https://github.com/CSSEGISandData/COVID-19'>https://github.com/CSSEGISandData/COVID-19</a> <br> Worldometer - World Counters and Stats: <br> <a href='https://www.worldometers.info/coronavirus'>https://www.worldometers.info/coronavirus</a> </p> <p> <b>Terms of use:</b><br> Please see the Terms of Use extensively described at the above link for reference </p> <p> <b>Disclaimer:</b><br> This Website, the related GitHub repo and its contents, including all data, mapping, and analysis is provided to the public strictly for educational and academic research purposes. It is hereby disclaimed any and all representations and warranties with respect to the Website and related Git Repo, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited. </p>""" html_str += "</div>" html_str += "</body></html>" Html_file= open('./index.html','w') Html_file.write(html_str) Html_file.close() os.chdir('../..') # FORMATS A DATE # READS A DATE IN THE ORIGINAL FORMAT IN INPUT # RETURNS A DATE FORMATED AS 'YYYY-MM-DD' # --------------------------------------------------------------------------------------------- # SPLITS ORIGINAL COLUMN NAME IN TOKENS DELIMITED BY "/" # ZERO-PADS ALL THE RESULTING TOKENS TO BE OF LENGTH 2 # CONCATENATES THE NEW TOKENS USING "-" AND PREPENDS "20" TO THE NEW # DATE TO BE RETURNED def reformat_date(date_to_be_formatted): date_split = date_to_be_formatted.split('/') date_padded_split=[str(item).zfill(2) for item in date_split] date_formatted = '20' + date_padded_split[2][-2:] + '-' + date_padded_split[0] + '-' + date_padded_split[1] return(date_formatted) # MAKES SURE ALL THE NUMERIC COLUMNS IN CONFIRMED DATAFRAME ARE "Int64" # THIS IS A FIX THAT CORRECT ISSUES IN THE DATASET SINCE 2020/03/07 # --------------------------------------------------------------------------------------------- # CORRECTS IN-PLACE COLUMNS THAT MAY BE EMPTY AND ENSURES THAT THEIR # DATATYPE IS "Int64" AS ALL THE OTHER COLUMNS, AS OPPOSED TO FLOAT # AS YHIS ISSUE SEEMS TO HAVE INTRODUCED def fix_na_columns(dataframe_to_be_fixed_DF): dataframe_fixed_DF = dataframe_to_be_fixed_DF.copy() for i, key in enumerate(dataframe_fixed_DF.keys()): # if (i >= 2): # dataframe_fixed_DF[key] = dataframe_fixed_DF[key].fillna(0).astype(np.int64) dataframe_fixed_DF[key] = dataframe_fixed_DF[key].fillna(0).astype(np.int64) return(dataframe_fixed_DF) # FORMATS A DATAFRAME # READS DATA FOR ALL NATIONS # A DICTIONARY OF HARDCODED CORRECTIONS TO THE ORIGINAL DATA IS ALSO READ # RETURNS A FINAL DATAFRAME DEVELOPING HORIZONTALLY AS THE ORIGINAL DATA # --------------------------------------------------------------------------------------------- # DROPS 'Province/State', 'Lat' AND 'Long' COLUMNS AS NOT USED IN OUR CONTEXT # GROUPS DATA BY NATION IF NEEDED (E.G. US OR CHINA WILL LOOSE STATE INFORMATION) # THEREFORE MULTI-RECORDS NATIONS WILL RESULT IN HAVING JUST ONE RECORD AFTER GROUPING # ALSO SUMS UP NUMERIC QUANTITIES BY NATION IF NEEDED (FOR THE SAME REASONS AS EXPLAINED ABOVE) # FINALLY, RENAMES DATE COLUMNS NAMES USING THE "reformat_date" HELPER FUNCTION def reformat_dataframe(dataframe_to_be_formatted_DF, fixes_dict): dataframe_to_be_formatted_DF = dataframe_to_be_formatted_DF.drop(columns=['Province/State', 'Lat', 'Long']) dataframe_formatted_DF = dataframe_to_be_formatted_DF.groupby(['Country/Region']).sum() for column in dataframe_formatted_DF: dataframe_formatted_DF = dataframe_formatted_DF.rename(columns={column: reformat_date(column)}) for key in fixes_dict.keys(): country_to_be_fixed = key.split('|')[0] date_to_be_fixed = key.split('|')[1] value_to_be_fixed = fixes_dict[key] dataframe_formatted_DF.at[country_to_be_fixed, date_to_be_fixed] = value_to_be_fixed return(dataframe_formatted_DF) # APPLIES FIXES TO A DATAFRAME WITHOUT REFORMATTING IT # A DICTIONARY OF HARDCODED CORRECTIONS TO THE ORIGINAL DATA IS READ # RETURNS A FINAL DATAFRAME DEVELOPING HORIZONTALLY AS THE ORIGINAL DATA # --------------------------------------------------------------------------------------------- # FIXES KNOWN WRONG QUANTITIES IN A DATAFRAME USING A HARDCODED DICTIONARY def fix_dataframe(dataframe_to_be_fixed_DF, fixes_dict): for key in fixes_dict.keys(): country_to_be_fixed = key.split('|')[0] date_to_be_fixed = key.split('|')[1] value_to_be_fixed = fixes_dict[key] dataframe_to_be_fixed_DF.at[country_to_be_fixed, date_to_be_fixed] = value_to_be_fixed return(dataframe_to_be_fixed_DF) # TOTALIZES A DATAFRAME SUMMING UP VALUES FOR A LIST OF NATIONS # READS DATA FOR ALL NATIONS # RETURNS A FINAL DATAFRAME DEVELOPING HORIZONTALLY AS THE ORIGINAL DATA # CONTAINING JUST ONE RECORD PER COUNTRY WITH TOTAL VALUES # --------------------------------------------------------------------------------------------- # FILTERS DATAFRAME IN INPUT, WHICH HAS BEEN PREVIOUSLY FORMATTED # EXTRACTS RECORDS FROM THE PROVIDED LIST OF NATIONS OF INTEREST TO BE TOTALIZED def formatted_dataframe_totalize_countries(list_of_countries, dataframe_to_be_formatted_DF): is_in_countries = dataframe_to_be_formatted_DF.index.isin(list_of_countries) dataframe_formatted_DF = dataframe_to_be_formatted_DF[is_in_countries].sum().to_frame().T return(dataframe_formatted_DF) # TOTALIZES A DATAFRAME SUMMING UP VALUES FOR A LIST OF NATIONS # READS DATA FOR ALL NATIONS # RETURNS A FINAL DATAFRAME DEVELOPING VERTICALLY, DIFFERENTLY FROM THE ORIGINAL DATA # SO THE COUNTRIES WILL BE THE COLUMN HEADERS AND WE'LL HAVE # JUST ONE RECORD OF TOTALS FOR ALL THE SELECTED IES # THIS BECAUSE TO DRAW A PIE CHART WE NEED TO SHAPE THE DATA IN THIS FASHION # --------------------------------------------------------------------------------------------- # FILTERS DATAFRAME IN INPUT, WHICH HAS BEEN PREVIOUSLY FORMATTED # EXTRACTS RECORDS FROM THE PROVIDED LIST OF NATIONS OF INTEREST TO BE TOTALIZED def reformat_dataframe_for_pie_chart(dataframe_to_be_formatted_DF): dataframe_to_be_formatted_DF = dataframe_to_be_formatted_DF.drop(columns=['Province/State', 'Lat', 'Long']) dataframe_formatted_DF = dataframe_to_be_formatted_DF.groupby(['Country/Region']).sum() for column in dataframe_formatted_DF: dataframe_formatted_DF = dataframe_formatted_DF.rename(columns={column: reformat_date(column)}) # REDUCES CUMULATIVE VALUES TO DAILY NEW VALUES. THIS IS PECULIAR FOR PIE CHARTS # OTHERWISE WE WOULD OBTAIN COUNTRY SHARES THAT WOULD BE THE TOTAL OF A RUNNING TOTAL (INCORRECT!) dataframe_formatted_DF = undo_cum_dataframe(dataframe_formatted_DF) return(dataframe_formatted_DF) # TOTALIZES A DATAFRAME EXTRACTING JUST A LIST OF SPECIFIED NATIONS # READS DATA FROM THE PRE-PROCESSED DATAFRAME WITH TOTAL DATA FOR PIE CHARTS FOR ALL NATIONS # RETURNS A FINAL DATAFRAME DEVELOPING VERTICALLY, DIFFERENTLY FROM THE ORIGINAL DATA # SO THE COUNTRIES WILL BE THE COLUMN HEADERS AND WE'LL HAVE # JUST ONE RECORD OF TOTALS FOR ALL THE SELECTED IES # THIS BECAUSE TO DRAW A PIE CHART WE NEED TO SHAPE THE DATA IN THIS FASHION # --------------------------------------------------------------------------------------------- # FILTERS DATAFRAME IN INPUT, WHICH HAS BEEN PREVIOUSLY FORMATTED # EXTRACTS RECORDS FROM THE PROVIDED LIST OF NATIONS OF INTEREST TO BE TOTALIZED def formatted_dataframe_totalize_countries_for_pie_chart(list_of_countries, dataframe_to_be_formatted_DF): dataframe_formatted_DF = dataframe_to_be_formatted_DF[list_of_countries] return(dataframe_formatted_DF) # TRANSFORMS AN ALREADY FORMATTED DATAFRAME # READS THE ALREADY FORMATTED DATAFRAME IN INPUT # RETURNS A FINAL DATAFRAME CONTAINING DAILY PROGRESSIVE DATA (E.G. "NEW CASES ADDING EVERYDAY") INSTEAD OF CUMULATIVE DATA # --------------------------------------------------------------------------------------------- # THE ORIGINAL DATA CONTAIN A CUMULATIVE TOTAL THAT GROWS UP AT EACH NEXT SAMPLED PERIOD # THIS FUNCTION THIS INCREASING GRANDTOTAL IN A SEQUENCE OF DAILY QUANTITIES # ADDING UP ALL THESE DAILY QUANTITIES WILL RESULT, IN THE END, IN THE SAME GRANDTOTAL AS # IN THE ORIGINAL DATA. THIS WILL ALLOW TO PLOT DAILY CHARTS WITH DAILY QUANTITIES def undo_cum_dataframe(dataframe_to_be_reduced_DF): dataframe_reduced_tmp_DF = dataframe_to_be_reduced_DF.copy() dataframe_reduced_tmp_DF.insert(0, '2020-01-01', 0) dataframe_reduced_tmp_DF = dataframe_reduced_tmp_DF.diff(axis=1).fillna(0).astype(int) dataframe_reduced_tmp_DF.drop(dataframe_reduced_tmp_DF.columns[0], axis=1, inplace=True) dataframe_reduced_DF = dataframe_reduced_tmp_DF.copy() return(dataframe_reduced_DF) # PLOTS A SINGLE PLOT REPRESENTING THE DATA AS ASKED # READS DATA FROM A LIST OF NATIONS PASSED IN INPUT # THE LIST CAN BE MADE UP OF A SIGLE NATION OR MULTIPLE NATIONS # RETURNS A SINGLE PLOT ON SCREEN CONTAINING SINGLE OR MULTIPLE LINE CHARTS # AND/ OR SINGLE OR MULTIPLE BAR CHARTS # THE PRESENTED CHARTS CAN BE MULTIPLE SPLITS ORIGINTING FROM EVERY SINGLE # NATION IN THE LIST OF COUNTRIES (E.G. TO PERFORM COMPARISON) # OR CAN BE SINGLE SUMMED-UP ENTITES ORIGINATING FROM ALL THE SUM OF # ALL THE NATIONS IN THE LIST OF COUNTRIES (E.G. TO CHECK GLOBAL TRENDS) # ALSO, THE GENERATED PLOT IS SAVED IN ".png" FORMAT IN THE "charts" SUBDIR # --------------------------------------------------------------------------------------------- # THIS PLOTTING UTILITY FUNCTION TAKES IN INPUT: # A LIST OF NATIONS # A STARTING DATE OF INTEREST (FORMATTED "YYYY-MM-DD") # A TITLE FOR THE FINAL CHART # A SCALE FOR THE Y AXIS (E.G. "plain" OR "log") # A DICTIONARY OF FORMATTED DATAFRAMES CONTAINING THE DATA TO BE PLOTTED # A DICTIONARI OF CHART TYPES THAT WE WANT TO USE TO PLOT EACH OF THE DATAFRAMES SPECIFIED ABOVE # A FLAG SPECIFYING IF WE WANT TO GROUP THE SINGLE CHARTS FOR ALL NATIONS (E.G. "Y" OR "N") # A FLAG SPECIFYING IF WE WANT TO SAVE THE CHARTS IN THE LOCAL DIR ("Y") OR IN A SEPARATED "./charts" DIR ("N") def plot_complete_chart(list_of_countries, start_date, title, scale, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked = 10, flg_advanced_chart = 'N', flg_save_localdir = 'N' ): # CHART SCALE TO BE USED, PLAIN OR LOGARITHMIC if(scale == 'plain'): logy=False else: logy=True # INITIALIZES FILE NAME TO BE USED WHEN SAVING CHART TO FILESYSTEM filename = title.replace('\n','').replace('\t','').replace(' ', '_')[:250] # HELPER VARIABLES AND HELPER DATAFRAME TO BE USED IF WE HAVE HEATMAPS OR BAR CHARTS TO BE PLOTTED flg_barcharts = 0 flg_heatmaps = 0 d_barcharts = pd.DataFrame() # COMMON FIGURE FOR ALL PLOTS, COMMON AX. COULD BE "f = plt.figure()" IF SINGLE DATAFRAME PLOT f, ax = plt.subplots() # SETS CHART TITLE AND OTHER PARAMS plt.title(title, color='black', size=18) #plt.suptitle(title, color='black', size=18) #plt.subplots_adjust(top=0.8) #plt.legend(loc='upper left', bbox_to_anchor=(1, 1)) #plt.tight_layout() # IF WE REQUESTED A SEPARATE CHART FOR EACH COUNTRY # LET'S NORMALLY LOOP THROUGH THE LIST OF COUNTRIES if flg_totalize_countries == 'N': for i, country in enumerate(list_of_countries): # LET'S LOOP OVER THE KEYS OF THE DATAFRAMES DICTIONARY WE PASSED IN INPUT for key in dict_of_dataframes.keys(): # EXTRACTS DESIRED CHART TYPE FOR THIS DATAFRAME, FROM THE CORRESPONDING # KEY IN CHART TYPES DICTIONARY (FIXED AND INITIALIZED ONCE AND FOR ALL # ON TOP OF OUR PROGRAM) kind = dict_of_charttypes[key] # LINE CHART if kind == 'line': # IF THIS IS AN ADVANCED CHART, LINE WIDTH OF THE BASE DATA CHART MUST BE THICKER if (flg_advanced_chart == 'Y'): alpha = 0.5 style = 'bx-' linewidth = 6 # USED TO BE 12 markersize = 15 spline_alpha = 0.5 spline_style = 'go--' spline_linewidth = 6 spline_markersize = 12 figsize = (17,15.5) label_caption_trailer = ' - ORIGINAL SAMPLED DATA' # SPECIFY THAT THIS IS "ORIGINAL SAMPLED DATA" IN THE LEGEND else: linewidth = 6 style = 'x-' # USED TO BE '-' markersize = 15 # USED TO BE 0 alpha = 0.5 # USED TO BE 1 figsize = (17,11.5) label_caption_trailer = '' # DO NOT SPECIFY THAT THIS IS "ORIGINAL SAMPLED DATA" IN THE LEGEND # PREPROCESSES AND FILTERS THE DATAFRAME UNDER EXAM IN CURRENT LOOP d = dict_of_dataframes[key][dict_of_dataframes[key].index.isin([country])].T d = d.rename(columns={d.columns[0]: country + ' - ' + key + label_caption_trailer}) d = d[d.index > start_date] # LINE CHARTS ARE PLOTTED AS THEY FLOW IN fig_to_be_saved = d.plot(kind=kind, logy=logy, figsize=figsize, alpha=alpha, style=style, linewidth=linewidth, markersize=markersize, legend=True, grid=True, rot=90, ax=ax ) # REFRESH LEGEND TO WORKAROUND MISSING MARKERS FOR SOME LINES ax.legend() # IF THIS IS AN ADVANCED CHART WITH CUBIC SPLINE FITTING if (flg_advanced_chart == 'Y'): # CREATES A SPACE UNDER THE CHART FOR THE SPECIAL CAPTIONS UNDER THE X AXIS SCALE plt.subplots_adjust(bottom=0.30) # DEEP COPIES THE DATAFRAME UNDER EXAM IN CURRENT LOOP data_DF = d.copy() # ADDS A PROGRESSIVE NUMBER COLUMN FOR POLYNOMIAL FITTING # AND RENAMES THE SAMPLED VALUES COLUMN WITH A PROPER NAME FOR DISPLAYING LEGEND LATER data_DF.insert(0, 'x', np.arange(len(data_DF))) data_DF.rename(columns={country + ' - ' + key + label_caption_trailer:'y'}, inplace=True) # PLS SEE ABOVE TO CHECK HOW COLUMN NAME WAS ASSIGNED # WE LOOP SEVERAL DEGREES TO PRE-FIT SPLINES TO SAMPLED DATA AND FIND THE BEST FITTING DEGREE ACCORDING TO MSE steps_tmp = np.linspace(2,19,18) steps = [int(i) for i in steps_tmp] best_degree = 1 best_error = 999999999999999 for degree in steps: try: #this_error = np.sum((np.polyval(np.polyfit(data_DF['x'], data_DF['y'], degree), data_DF['x']) - data_DF['y'])**2) ppp, this_error, _, _, _ = np.polyfit(data_DF['x'], data_DF['y'], degree, full=True) if (this_error < best_error): best_error = this_error best_degree = degree print('degree: ' + str(degree) + ' - error: ' + str(this_error)) #rmse = np.sqrt(mean_squared_error(data_DF['y'],np.polyfit(data_DF['x'], data_DF['y'], degree))) #print('rmse: ' + str(rmse)) #r2 = r2_score(y,y_poly_pred) except: print('exception') pass best_degree = 9 print('best_degree: ' + str(best_degree) + ' - best error: ' + str(best_error)) # CALCULATES THE TEMPORARY POLYNOMIAL WITH BEST DEGREE ACCORDING TO MSE z = np.polyfit(data_DF['x'], data_DF['y'], best_degree) ff = np.poly1d(z) # EXTRACTS THE NEW LIST OF "X" AND "Y" FROM THE TEMPORARY DEGREE 9 POLYNOMIAL x_new = np.linspace(data_DF['x'][0], data_DF['x'][-1], len(data_DF['x'])) # WITH THE LAST PARAMETER WE CREATE A LINESPACE OF THE SAME NUMER OF SAMPLES ON THE "X" AXIS AS THE LENGTH OF OUR SAMPLED VALUES ARRAY y_new = ff(x_new) # FINALLY, WE FIT THE FINAL CUBIC SPLINE ("k=4" MEANS DEGREE 3) # TO THE LINESPACE SAMPLES WE EXTRACTED FROM THE PREVIUSLY FIT # TEMPORARY DEGREE 9 SPLINE y_spl = UnivariateSpline(x_new,y_new,s=0,k=4) # CALCULATES SECOND DERIVATIVE FROM FITTED CUBIC SPLINE y_spl_2d = y_spl.derivative(n=2) # CREATES X AXIS LINESPACE RANGE FOR SECOND DERIVATIVE PLOT x_range = np.linspace(data_DF['x'][0],data_DF['x'][-1], len(data_DF['x'])) # WITH THE LAST PARAMETER WE CREATE A LINESPACE OF THE SAME NUMER OF SAMPLES ON THE "X" AXIS AS THE LENGTH OF OUR SAMPLED VALUES ARRAY # PLOTS FITTED CUBIC SPLINE ax.plot(x_range,y_spl(x_range), spline_style, label= country + ' - ' + key + ' - BEST FIT CUBIC SPLINE', alpha=spline_alpha, linewidth=spline_linewidth, markersize=spline_markersize ) # ACTIVATE LEGEND FOR SUBPLOT OF CUBIC SPLINE DATA ON TOP LEFT ax.legend(loc="upper left") # ENRICHES DATAFRAME TO PLOT WITH 2ND DERIVATIVE COORDS AND CUBIC SPLINE COORDS data_DF.insert(2, 'spline_fit', y_spl(x_range)) data_DF.insert(3, '2nd_der', y_spl_2d(x_range)) # IDENTIFIES LAST MEANINGFULE INFLECTION POINT (IF ANY) check_2ndder = data_DF['2nd_der'] flg_first_loop = 'Y' # LOOPS IN REVERSE ORDER OVER THE ENRICHED DATAFRAME. WHEN THE SECOND DERIVATIVE # CHANGES IN SIGN, THAT'S WHERE THE MOST RECENT INFLECTION POINT HAPPENED # AND WE MUST ANNOTATE IT ON THE CHART for day_in_exam, value_2nd_der in reversed(list(enumerate(check_2ndder))): if (flg_first_loop == 'Y'): flg_first_loop = 'N' prev_value_2nd_der = value_2nd_der # DETECTS CHANGE IN SECOND DERIVATIVE DIRECTION - INFLECTION POINT FOUND if (((prev_value_2nd_der > 0) and (value_2nd_der < 0)) or ((prev_value_2nd_der < 0) and (value_2nd_der > 0))): inflection_point_coords = (day_in_exam, value_2nd_der) # CHECKS IF CURVE INCREASES OR DECREASES AFTER LAST INFLECTION POINT if (prev_value_2nd_der < 0): inflection_point_direction = 'DOWN' else: inflection_point_direction = 'UP' break # LAST INFLECTION POINT FOUND, LEAVES LOOP # CREATES AN ANNOTATION TRAILED TO BE USED TO ANNOTATE THE TREND # FOR THE CURVE ORIGINALLY CREATED WITH THE "REAL WORLD" SAMPLES if (inflection_point_direction == 'UP'): caption_trailer = 'Curve Trend is INCREASING' caption_color = 'red' else: caption_trailer = 'Curve Trend is DECREASING' caption_color = 'green' # INITIALIZES A FEW OUT OF LOOP VARIABLES TO BE USED # TO CALCULATE THE FINE-GRAINED COORDINATES OF THE LAST # INFLECTION POINT FOR THE CURVE ORIGINALLY CREATED WITH # THE "REAL WORLD" SAMPLES lowest_inflection_point_y = 9999999 lowest_inflection_point_x = 9999999 lowest_inflection_point_2ndder_y = 9999999 lowest_inflection_point_2ndder_x = 9999999 steps = np.linspace(0,1,11) # CALCULATES ACTUAL INFLECTION POINT CLOSEST COORDINATES LOOPING # BETWEEN THE DAYS WHERE IT LIES AT 0.1 STEPS TO FIND THE X WHERE 2ND DERIVATIVE # IS CLOSEST TO ZERO for inflection_point_x in steps: # CHECK FITTED CUBIC SPLINE AND SECOND DERIVATIVE ABSOLUTE VALUES AT THIS STEP inflection_point_y = ff(day_in_exam + inflection_point_x) inflection_point_2ndder_y = y_spl_2d(day_in_exam + inflection_point_x) # EVENTUALLY UPDATES NEW CLOSEST INFLECTION POINT COORDS # IF WE ARE CLOSER TO THE ABSOLUTE ZERO THAN IN THE PREVIOUS STEPS if (abs(inflection_point_2ndder_y) < abs(lowest_inflection_point_2ndder_y)): lowest_inflection_point_2ndder_y = abs(inflection_point_2ndder_y) lowest_inflection_point_y = inflection_point_y lowest_inflection_point_2ndder_x = (day_in_exam + inflection_point_x) lowest_inflection_point_x = (day_in_exam + inflection_point_x) # ANNOTATES THE LAST INFLECTION POINT ON OUR CHART ax.annotate('Last Inflection Point\nof best fit Cubic Spline\n' + caption_trailer, xy=(lowest_inflection_point_x, lowest_inflection_point_y), # COORDS OF THE POINT TO HIGHLIGHT (TUPLE). ADDING 0.5 TO FIND MOST APPROX POINT WHERE DERIVATIVE HAS CHANGED IN SIGN xycoords='data', # COORDS OF THE POINT TO HIGHLIGHT ARE EXPRESSED AS DATA COORDS xytext=(0.2, 0.8), # COORDS OF TEXT TO ANNOTATE textcoords='axes fraction', # COORDS OF TEXT TO ANNOTATE ARE EXPRESSED IN AXES FRACTION (INDEPENDENT FROM PIXELS) arrowprops=dict(edgecolor='black', facecolor='red', shrink=0.00), # PROPERTIES OF THE ARROW TO DRAW fontsize=20, color='red', horizontalalignment='center', verticalalignment='center' # ARROW STARTS BOTTOM RIGHT CORNER OF TEXT ) # INITIALIZES A FEW OUT OF LOOP VARIABLES TO BE USED # TO CALCULATE THE FINE-GRAINED COORDINATES OF THE # ROOT POINTS FOR THE CURVE ORIGINALLY CREATED WITH # THE "REAL WORLD" SAMPLES root_points = y_spl.derivative().roots() flg_first_loop = 'Y' # LOOPS OVER THE ROOT POINTS ARRAY, WE MUST ANNOTATE THEM ON THE CHART # THIS PART IS COMMENTED OUT BECAUSE EVEN IF IT WORKS CORRECTLY, ADDING # ROOT POINTS ON THE ADVANCED CHART ADDS CONFUSION TO THE VISUALIZATION # AND BEING THIS INFO NOT STRICTLY IMPORTANT IN THIS CONTEXT IT HAS # BEEN HIDDEN AT LEAST FOR NOW ###for root_point in root_points: ### if (flg_first_loop == 'Y'): ### flg_first_loop = 'N' ### annotation_text = 'Root Points of\nbest fit Cubic Spline' ### else: ### annotation_text = '\n' ### ### # ACTUALLY ANNOTATES THE ROOT POINT UNDER EXAM ON THE CHART ### ax.annotate(annotation_text, ### xy=(root_point, ff(root_point)), # COORDS OF ROOT POINT UNDER EXAM (TUPLE). WE REUSE FITTED SPLINE FUNCTION TO INFER Y COORDINATES OF THE ROOT POINTS ### xycoords='data', # COORDS OF THE POINT TO HIGHLIGHT ARE EXPRESSED AS DATA COORDS ### xytext=(0.2, -0.2), # COORDS OF TEXT TO ANNOTATE ### textcoords='axes fraction', # COORDS OF TEXT TO ANNOTATE ARE EXPRESSED IN AXES FRACTION (INDEPENDENT FROM PIXELS) ### arrowprops=dict(edgecolor='black', facecolor='red', shrink=0.00), # PROPERTIES OF THE ARROW TO DRAW ### fontsize=20, ### color='red', ### verticalalignment='center' # ARROW STARTS BOTTOM RIGHT CORNER OF TEXT ### ) # ANNOTATES DESCRIPTIVE TEXTBOX EXPLAINUNG ADVANCED CHART JUST BELOW THE "X" AXIS ax.text(0.0, -0.40, 'ADVANCED CHART - Starting from the real-world Data Samples Curve (BLUE), a best ' + \ '\n' + \ 'fitting Cubic Polynomial (GREEN) is calculated and plotted. Subsequently, the ' + \ '\n' + \ 'Second Derivative of the best fitting Polynomial is found and plotted (RED).' + \ '\n' + \ 'Finally, this Second Derivative curve is checked to find the most recent point ' + \ '\n' + \ 'in time where it crosses its Zero Value. We can use this point to infer the last ' + \ '\n' + \ 'INFLECTION POINT in the original real-world Data Samples Curve, and to determine ' + \ '\n' + \ 'if its actual Trend is INCREASING or DECREASING, according to the direction the ' + \ '\n' + \ 'Second Derivative crosses Zero (UPWARDS or DOWNWARDS). Please note that you have ' + \ '\n' + \ 'two Scales in this Chart, No. of Cases (LEFT) and Second Derivative value (RIGHT)' + \ '\n' + \ 'CAREFULLY READ THE LEGEND TO CHECK IF THIS IS AN INSTANT TREND ON PLAIN RAW VALUES' + \ '\n' + \ 'OR IF THIS IS A TREND CALCULATED ON A 5 DAYS MOVING AVERAGE OF RAW VALUES', transform=ax.transAxes, # TRANSFORMS SPECIFIED TEXT COORDS AS AXES FRACTION color='black', size=12, bbox=dict(facecolor='none', edgecolor='black', pad=5.0) ) # INSTANTIATE A SECOND AXES SET TO PLOT A SECOND DERIVATIVE LINE # SHARING THE SAME X-AXIS AS THE SAMPLED DATA CHART ax2 = ax.twinx() # SET LIMITS FOR THE SECOND AXES SET TO MATCH THE EXISTING UNDERLYING CHART ax2.set_xlim(ax.get_xlim()) # ACTUALLY ANNOTATES THE SECOND DERIVATIVE OF FITTED CUBIC SPLINE PLOT ax2.color = 'tab:black' ax2.set_ylabel('Second Derivative of fitted Cubic Spline', color='red', size=14) # WE ALREADY HANDLED X LABEL WITH "ax" ax2.plot(x_range,y_spl_2d(x_range), 'r--', label= country + ' - ' + key + ' - Second Derivative of BEST FIT CUBIC SPLINE', alpha=spline_alpha, linewidth=spline_linewidth ) # PLOTS SECOND DERIVATIVE ax2.tick_params(axis='y', labelcolor='red') # ADDITIONAL PARAMS FOR THE SECOND Y-AXIS SCALE ON THE RIGHT HAND SIDE ax2.axhline(y=0.0, color='black', linestyle='-.', label= country + ' - ' + key + ' - Second Derivative BASELINE') # SECOND DERIVATIVE HORIZONTAL BASELINE ax2.axvline(x=lowest_inflection_point_x, color='black', linestyle='-.') # SECOND DERIVATIVE VERTICAL INFLECTION POINT LINE # ACTUALLY ANNOTATES THE POINT WHERE THE SECOND DERIVATIVE OF # FITTED CUBIC SPLINE PLOT CROSSES ZERO (ON ITS OWN Y SCALE OF VALUES) ax2.annotate('Second Derivative of\nbest fit Cubic Spline\ncrossing Zero here', xy=(lowest_inflection_point_x, 0), # COORDS OF THE POINT TO HIGHLIGHT (TUPLE). ADDING 0.5 TO FIND MOST APPROX POINT WHERE DERIVATIVE HAS CHANGED IN SIGN xycoords='data', # COORDS OF THE POINT TO HIGHLIGHT ARE EXPRESSED AS DATA COORDS xytext=(0.8, -0.25), # COORDS OF TEXT TO ANNOTATE textcoords='axes fraction', # COORDS OF TEXT TO ANNOTATE ARE EXPRESSED IN AXES FRACTION (INDEPENDENT FROM PIXELS) arrowprops=dict(edgecolor='black', facecolor='red', shrink=0.00), # PROPERTIES OF THE ARROW TO DRAW fontsize=20, color='red', horizontalalignment='center', verticalalignment='center' # ARROW STARTS BOTTOM RIGHT CORNER OF TEXT ) # ACTIVATE LEGEND FOR SUBPLOT OF SECOND DERIVATIVE AND FOR # DRAWING THE BASELINES WHERE IT CROSSES ZERO (ON ITS OWN Y SCALE OF VALUES) ax2.legend(loc="upper right") # PIE CHARTS AGGREGATE MORE COUNTRIES ON A SINGLE CHART, SO THEY CAN'T BE CALLED # IN THIS SECTION E.G. RECURSIVELY! JUST RETURN elif kind == 'pie': return() elif kind == 'heatmap': # HEATMAP CHART if (flg_heatmaps != 1): # CHOOSE A COLOR FOR THE HEATMAP ACCORDING TO WHAT'S IN THE CHART TITLE if ("ACTIVE" in title.upper()): base_color = 'blue' elif ("CONFIRMED" in title.upper()): base_color = 'orange' elif ('RECOVERED' in title.upper()): base_color = 'green' elif ('FATALITIES' in title.upper()): base_color = 'red' else: base_color = 'grey' # HOUSTON, WE HAVE A HEATMAP! WE DON'T WANT TO LOOP MORE THAN ONE TIME HERE # (OK, THIS IS A VERY BAD SOLUTION TO AVOID THIS PIECE OF CODE BEING CALLED # MULTIPLE TIMES WHEN WE HAVE A HEATMAP AND WE DON'T WANT TO COLLAPSE ALL # THE COUNTRIES IN THE LIST IN JUST ONE RECORD) flg_heatmaps = 1 # IN HEATMAP CHARTS WE RECEIVE A READY-TO-BE-PLOTTED DATAFRAME IN INPUT # AS IT HAS ALREADY BEEN RANKED AND FILTERED E.G. TOP 20 COUNTRIES d = dict_of_dataframes[key] # WE FIRST FILTER OUR INITIAL DATAFRAMES KEEPING ALL THE COUNTRIES IN THE REQUESTED LIST d = d[d.index.isin(list_of_countries)] # THEN WE RESHAPE IT KEEPING THE LAST "REQUESTED DAYS" COLUMNS AND THE FIRST # "REQUESTED RANK" ROWS d = d.iloc[:,-start_date:].nlargest(num_ranked, d.columns[-1:], keep='first') # ARBITRARY AND APPROPRIATE LOWER BOUND FOR LOGARITHMIC COLORSCALE TO AVOID DIV BY ZERO LOGMIN = 0.1 # MINIMUM AND MAXIMUM NUMERIC VALUES IN OUR DATAFRAME, USED LATER TO INFER # THE LOGARITHMIC COLOR SCALE TO USE COLORS IN A BALANCED WAY ###mi, ma = dict_of_dataframes[key].values.min(), dict_of_dataframes[key].values.max() mi, ma = d.values.min(), d.values.max() #plot pivot table as heatmap using seaborn #ax = sns.heatmap(heatmap, square=True) #sns.heatmap(df1.iloc[:, 1:6:], annot=True, linewidths=.5, ax=ax) sns.heatmap(d, square=False, # HEATMAP CELLS DO NOT NEED TO BE SQUARE annot=True, # WE ARE GOING TO WRITE QUANTITIES INSIDE CELLS annot_kws={"size": 14}, # ANNOTATIONS FONT SIZE linewidths=0.01, # NO INTERLINE BETWEEN HEATMAP CELLS fmt='d', # ANNOTATIONS INSIDE OUR HEATMAP CELLS ARE DECIMAL NUMBERS cbar=True, # WE WANT A COLOR LEGEND WITH RELATED QUANTITIES TO SHOW UP cmap = sns.light_palette(base_color, n_colors=6), # WE USE A 6 TONES PALETTE OF THE SPECIFIED BASE COLOR xticklabels=True, # WE WANT TICKS ON THE X AXIS E.G. DAY AND DATE yticklabels=True, # WE WANT TICKS ON THE Y AXIS TOO E.G. COUNTRY NAMES norm=LogNorm(), # WE USE A LOGARITHMIC COLOR SCALE, OTHERWISE HEATMAP WOULD BE MOSTLY OF THE SAME COLOR vmin=max(mi, LOGMIN), # WE SET A MIN FOR OUR LOG SCALE ACCORDING TO MIN VALUE IN OUR DATAFRAME vmax=ma, # WE SET A MAX FOR OUR LOG SCALE ACCORDING TO MAX VALUES IN OUR DATAFRAME ax=ax ) # LET'S FIX A BUG IN SEABORN CAUSING FIRST AND LAST LINE OF A CHART TO BE TRUNCATED ax.set_ylim(len(d)+0.5, -0.5) # HEATMAPS NEED A BIGGER LAYOUT AND A TIGHT OUTPUT # Y INCHES DIMENSION IS MINIMUM 4 PLUS 0.5 * NUMBER OF ROWS IN OUR DATAFRAME f.set_size_inches(17, (4 + (0.5 * d.shape[0]))) plt.tight_layout() # BAR CHART else: # HOUSTON, WE HAVE A BARCHART! flg_barcharts = 1 d_tmp = dict_of_dataframes[key][dict_of_dataframes[key].index.isin([country])].T d_tmp = d_tmp.rename(columns={d_tmp.columns[0]: country + ' - ' + key}) d_tmp = d_tmp[d_tmp.index > start_date] d_barcharts = pd.concat([d_barcharts, d_tmp], axis=1) # IF INSTEAD WE REQUESTED TO COLLAPSE MULTIPLE COUNTRIES IN JUST ONE CHART SUMMING THEIR DATA UP # LET'S DO SOME PREPARATIONS ON THE ORIGINAL DATA FIRST else: for key in dict_of_dataframes.keys(): # LOOPS OVER DATAFRAMES PASSED IN INPUT kind = dict_of_charttypes[key] # EXTRACTS DESIRED CHART TYPE FOR THIS DATAFRAME, FROM THE CORRESPONDING KEY IN CHART TYPES DICTIONARY # REFORMATS DATA OF INTEREST USING THE PROPER FUNCTION # DEPENDING ON THE TYPE OF CHART BEING GENERATED if (kind == 'line' or kind == 'bar' or kind == 'heatmap'): # LINE, BAR OR HEATMAP CHART totalized_by_countries_dataframe = formatted_dataframe_totalize_countries(list_of_countries, dict_of_dataframes[key]) elif (kind == 'pie'): # PIE CHART pie_dataframe_filtered_by_date = dict_of_dataframes[key] for column_name in pie_dataframe_filtered_by_date: if (column_name < start_date): pie_dataframe_filtered_by_date = pie_dataframe_filtered_by_date.drop(columns=[column_name]) pie_dataframe_filtered_by_date = pie_dataframe_filtered_by_date.sum(axis=1).to_frame().T # EXTRACTS JUST THE LIST OF COUNTRIES OF INTEREST FOR OUR PIE CHART FROM THE EXPLICIT # LIST OF COUNTRIES THAT WE HAVE PASSED AS INPUT PARAMETER (E.G "list_of_world_confirmed_top5") totalized_by_countries_dataframe = formatted_dataframe_totalize_countries_for_pie_chart(list_of_countries, pie_dataframe_filtered_by_date) else: # OTHER CASES, NOT MANAGED. JUST RETURN return() if kind == 'line': # LINE CHART # IF THIS IS AN ADVANCED CHART, LINE WIDTH OF THE BASE DATA CHART MUST BE THICKER if (flg_advanced_chart == 'Y'): alpha = 0.5 style = 'bx-' linewidth = 6 # USED TO BE 12 markersize = 15 spline_alpha = 0.5 spline_style = 'go--' spline_linewidth = 6 spline_markersize = 12 figsize = (17,15.5) label_caption_trailer = ' - ORIGINAL SAMPLED DATA' # SPECIFY THAT THIS IS "ORIGINAL SAMPLED DATA" IN THE LEGEND else: linewidth = 6 style = 'x-' # USED TO BE '-' markersize = 15 # USED TO BE 0 alpha = 0.5 # USED TO BE 1.0 figsize = (17,11.5) label_caption_trailer = '' # DO NOT SPECIFY THAT THIS IS "ORIGINAL SAMPLED DATA" IN THE LEGEND # PREPROCESSES AND FILTERS THE DATAFRAME UNDER EXAM IN CURRENT LOOP d = totalized_by_countries_dataframe[totalized_by_countries_dataframe.index.isin(['0'])].T d = d.rename(columns={d.columns[0]: 'Many Countries' + ' - ' + key + label_caption_trailer}) d = d[d.index > start_date] # LINE CHARTS ARE PLOTTED AS THEY FLOW IN fig_to_be_saved = d.plot(kind=kind, logy=logy, figsize=figsize, grid=True, rot=90, alpha=alpha, style=style, linewidth=linewidth, markersize=markersize, legend=True, ax=ax ) # REFRESH LEGEND TO WORKAROUND MISSING MARKERS FOR SOME LINES ax.legend() # IF THIS IS AN ADVANCED CHART WITH CUBIC SPLINE FITTING if (flg_advanced_chart == 'Y'): # CREATES A SPACE UNDER THE CHART FOR THE SPECIAL CAPTIONS UNDER THE X AXIS SCALE plt.subplots_adjust(bottom=0.30) # DEEP COPIES THE DATAFRAME UNDER EXAM IN CURRENT LOOP data_DF = d.copy() # ADDS A PROGRESSIVE NUMBER COLUMN FOR POLYNOMIAL FITTING # AND RENAMES THE SAMPLED VALUES COLUMN WITH A PROPER NAME FOR DISPLAYING LEGEND LATER data_DF.insert(0, 'x', np.arange(len(data_DF))) data_DF.rename(columns={'Many Countries' + ' - ' + key + label_caption_trailer:'y'}, inplace=True) # PLS SEE ABOVE TO CHECK HOW COLUMN NAME WAS ASSIGNED # WE LOOP SEVERAL DEGREES TO PRE-FIT SPLINES TO SAMPLED DATA AND FIND THE BEST FITTING DEGREE ACCORDING TO MSE steps_tmp = np.linspace(2,19,18) steps = [int(i) for i in steps_tmp] best_degree = 1 best_error = 999999999999999 for degree in steps: try: #this_error = np.sum((np.polyval(np.polyfit(data_DF['x'], data_DF['y'], degree), data_DF['x']) - data_DF['y'])**2) ppp, this_error, _, _, _ = np.polyfit(data_DF['x'], data_DF['y'], degree, full=True) if (this_error < best_error): best_error = this_error best_degree = degree print('degree: ' + str(degree) + ' - error: ' + str(this_error)) #rmse = np.sqrt(mean_squared_error(data_DF['y'],np.polyfit(data_DF['x'], data_DF['y'], degree))) #print('rmse: ' + str(rmse)) #r2 = r2_score(y,y_poly_pred) except: print('exception') pass best_degree = 9 print('best_degree: ' + str(best_degree) + ' - best error: ' + str(best_error)) # CALCULATES THE TEMPORARY POLYNOMIAL WITH BEST DEGREE ACCORDING TO MSE z = np.polyfit(data_DF['x'], data_DF['y'], best_degree) ff = np.poly1d(z) # EXTRACTS THE NEW LIST OF "X" AND "Y" FROM THE TEMPORARY DEGREE 9 POLYNOMIAL x_new = np.linspace(data_DF['x'][0], data_DF['x'][-1], len(data_DF['x'])) # WITH THE LAST PARAMETER WE CREATE A LINESPACE OF THE SAME NUMER OF SAMPLES ON THE "X" AXIS AS THE LENGTH OF OUR SAMPLED VALUES ARRAY y_new = ff(x_new) # FINALLY, WE FIT THE FINAL CUBIC SPLINE ("k=4" MEANS DEGREE 3) # TO THE LINESPACE SAMPLES WE EXTRACTED FROM THE PREVIUSLY FIT # TEMPORARY DEGREE 9 SPLINE y_spl = UnivariateSpline(x_new,y_new,s=0,k=4) # CALCULATES SECOND DERIVATIVE FROM FITTED CUBIC SPLINE y_spl_2d = y_spl.derivative(n=2) # CREATES X AXIS LINESPACE RANGE FOR SECOND DERIVATIVE PLOT x_range = np.linspace(data_DF['x'][0],data_DF['x'][-1], len(data_DF['x'])) # WITH THE LAST PARAMETER WE CREATE A LINESPACE OF THE SAME NUMER OF SAMPLES ON THE "X" AXIS AS THE LENGTH OF OUR SAMPLED VALUES ARRAY # PLOTS FITTED CUBIC SPLINE ax.plot(x_range,y_spl(x_range), spline_style, label= 'Many Countries' + ' - ' + key + ' - BEST FIT CUBIC SPLINE', alpha=spline_alpha, linewidth=spline_linewidth, markersize=spline_markersize ) # ACTIVATE LEGEND FOR SUBPLOT OF CUBIC SPLINE DATA ON TOP LEFT ax.legend(loc="upper left") # ENRICHES DATAFRAME TO PLOT WITH 2ND DERIVATIVE COORDS AND CUBIC SPLINE COORDS data_DF.insert(2, 'spline_fit', y_spl(x_range)) data_DF.insert(3, '2nd_der', y_spl_2d(x_range)) # IDENTIFIES LAST MEANINGFULE INFLECTION POINT (IF ANY) check_2ndder = data_DF['2nd_der'] flg_first_loop = 'Y' # LOOPS IN REVERSE ORDER OVER THE ENRICHED DATAFRAME. WHEN THE SECOND DERIVATIVE # CHANGES IN SIGN, THAT'S WHERE THE MOST RECENT INFLECTION POINT HAPPENED # AND WE MUST ANNOTATE IT ON THE CHART for day_in_exam, value_2nd_der in reversed(list(enumerate(check_2ndder))): if (flg_first_loop == 'Y'): flg_first_loop = 'N' prev_value_2nd_der = value_2nd_der # DETECTS CHANGE IN SECOND DERIVATIVE DIRECTION - INFLECTION POINT FOUND if (((prev_value_2nd_der > 0) and (value_2nd_der < 0)) or ((prev_value_2nd_der < 0) and (value_2nd_der > 0))): inflection_point_coords = (day_in_exam, value_2nd_der) # CHECKS IF CURVE INCREASES OR DECREASES AFTER LAST INFLECTION POINT if (prev_value_2nd_der < 0): inflection_point_direction = 'DOWN' else: inflection_point_direction = 'UP' break # LAST INFLECTION POINT FOUND, LEAVES LOOP # CREATES AN ANNOTATION TRAILED TO BE USED TO ANNOTATE THE TREND # FOR THE CURVE ORIGINALLY CREATED WITH THE "REAL WORLD" SAMPLES if (inflection_point_direction == 'UP'): caption_trailer = 'Curve Trend is INCREASING' caption_color = 'red' else: caption_trailer = 'Curve Trend is DECREASING' caption_color = 'green' # INITIALIZES A FEW OUT OF LOOP VARIABLES TO BE USED # TO CALCULATE THE FINE-GRAINED COORDINATES OF THE LAST # INFLECTION POINT FOR THE CURVE ORIGINALLY CREATED WITH # THE "REAL WORLD" SAMPLES lowest_inflection_point_y = 9999999 lowest_inflection_point_x = 9999999 lowest_inflection_point_2ndder_y = 9999999 lowest_inflection_point_2ndder_x = 9999999 steps = np.linspace(0,1,11) # CALCULATES ACTUAL INFLECTION POINT CLOSEST COORDINATES LOOPING # BETWEEN THE DAYS WHERE IT LIES AT 0.1 STEPS TO FIND THE X WHERE 2ND DERIVATIVE # IS CLOSEST TO ZERO for inflection_point_x in steps: # CHECK FITTED CUBIC SPLINE AND SECOND DERIVATIVE ABSOLUTE VALUES AT THIS STEP inflection_point_y = ff(day_in_exam + inflection_point_x) inflection_point_2ndder_y = y_spl_2d(day_in_exam + inflection_point_x) # EVENTUALLY UPDATES NEW CLOSEST INFLECTION POINT COORDS # IF WE ARE CLOSER TO THE ABSOLUTE ZERO THAN IN THE PREVIOUS STEPS if (abs(inflection_point_2ndder_y) < abs(lowest_inflection_point_2ndder_y)): lowest_inflection_point_2ndder_y = abs(inflection_point_2ndder_y) lowest_inflection_point_y = inflection_point_y lowest_inflection_point_2ndder_x = (day_in_exam + inflection_point_x) lowest_inflection_point_x = (day_in_exam + inflection_point_x) # ANNOTATES THE LAST INFLECTION POINT ON OUR CHART ax.annotate('Last Inflection Point\nof best fit Cubic Spline\n' + caption_trailer, xy=(lowest_inflection_point_x, lowest_inflection_point_y), # COORDS OF THE POINT TO HIGHLIGHT (TUPLE). ADDING 0.5 TO FIND MOST APPROX POINT WHERE DERIVATIVE HAS CHANGED IN SIGN xycoords='data', # COORDS OF THE POINT TO HIGHLIGHT ARE EXPRESSED AS DATA COORDS xytext=(0.2, 0.8), # COORDS OF TEXT TO ANNOTATE textcoords='axes fraction', # COORDS OF TEXT TO ANNOTATE ARE EXPRESSED IN AXES FRACTION (INDEPENDENT FROM PIXELS) arrowprops=dict(edgecolor='black', facecolor='red', shrink=0.00), # PROPERTIES OF THE ARROW TO DRAW fontsize=20, color='red', horizontalalignment='center', verticalalignment='center' # ARROW STARTS BOTTOM RIGHT CORNER OF TEXT ) # INITIALIZES A FEW OUT OF LOOP VARIABLES TO BE USED # TO CALCULATE THE FINE-GRAINED COORDINATES OF THE # ROOT POINTS FOR THE CURVE ORIGINALLY CREATED WITH # THE "REAL WORLD" SAMPLES root_points = y_spl.derivative().roots() flg_first_loop = 'Y' # LOOPS OVER THE ROOT POINTS ARRAY, WE MUST ANNOTATE THEM ON THE CHART # THIS PART IS COMMENTED OUT BECAUSE EVEN IF IT WORKS CORRECTLY, ADDING # ROOT POINTS ON THE ADVANCED CHART ADDS CONFUSION TO THE VISUALIZATION # AND BEING THIS INFO NOT STRICTLY IMPORTANT IN THIS CONTEXT IT HAS # BEEN HIDDEN AT LEAST FOR NOW ###for root_point in root_points: ### if (flg_first_loop == 'Y'): ### flg_first_loop = 'N' ### annotation_text = 'Root Points of\nbest fit Cubic Spline' ### else: ### annotation_text = '\n' ### ### # ACTUALLY ANNOTATES THE ROOT POINT UNDER EXAM ON THE CHART ### ax.annotate(annotation_text, ### xy=(root_point, ff(root_point)), # COORDS OF ROOT POINT UNDER EXAM (TUPLE). WE REUSE FITTED SPLINE FUNCTION TO INFER Y COORDINATES OF THE ROOT POINTS ### xycoords='data', # COORDS OF THE POINT TO HIGHLIGHT ARE EXPRESSED AS DATA COORDS ### xytext=(0.2, -0.2), # COORDS OF TEXT TO ANNOTATE ### textcoords='axes fraction', # COORDS OF TEXT TO ANNOTATE ARE EXPRESSED IN AXES FRACTION (INDEPENDENT FROM PIXELS) ### arrowprops=dict(edgecolor='black', facecolor='red', shrink=0.00), # PROPERTIES OF THE ARROW TO DRAW ### fontsize=20, ### color='red', ### verticalalignment='center' # ARROW STARTS BOTTOM RIGHT CORNER OF TEXT ### ) # ANNOTATES DESCRIPTIVE TEXTBOX EXPLAINUNG ADVANCED CHART JUST BELOW THE "X" AXIS ax.text(0.0, -0.40, 'ADVANCED CHART - Starting from the real-world Data Samples Curve (BLUE), a best ' + \ '\n' + \ 'fitting Cubic Polynomial (GREEN) is calculated and plotted. Subsequently, the ' + \ '\n' + \ 'Second Derivative of the best fitting Polynomial is found and plotted (RED).' + \ '\n' + \ 'Finally, this Second Derivative curve is checked to find the most recent point ' + \ '\n' + \ 'in time where it crosses its Zero Value. We can use this point to infer the last ' + \ '\n' + \ 'INFLECTION POINT in the original real-world Data Samples Curve, and to determine ' + \ '\n' + \ 'if its actual Trend is INCREASING or DECREASING, according to the direction the ' + \ '\n' + \ 'Second Derivative crosses Zero (UPWARDS or DOWNWARDS). Please note that you have ' + \ '\n' + \ 'two Scales in this Chart, No. of Cases (LEFT) and Second Derivative value (RIGHT)' + \ '\n' + \ 'CAREFULLY READ THE LEGEND TO CHECK IF THIS IS AN INSTANT TREND ON PLAIN RAW VALUES' + \ '\n' + \ 'OR IF THIS IS A TREND CALCULATED ON A 5 DAYS MOVING AVERAGE OF RAW VALUES', transform=ax.transAxes, # TRANSFORMS SPECIFIED TEXT COORDS AS AXES FRACTION color='black', size=12, bbox=dict(facecolor='none', edgecolor='black', pad=5.0) ) # INSTANTIATE A SECOND AXES SET TO PLOT A SECOND DERIVATIVE LINE # SHARING THE SAME X-AXIS AS THE SAMPLED DATA CHART ax2 = ax.twinx() # SET LIMITS FOR THE SECOND AXES SET TO MATCH THE EXISTING UNDERLYING CHART ax2.set_xlim(ax.get_xlim()) # ACTUALLY ANNOTATES THE SECOND DERIVATIVE OF FITTED CUBIC SPLINE PLOT ax2.color = 'tab:black' ax2.set_ylabel('Second Derivative of fitted Cubic Spline', color='red', size=14) # WE ALREADY HANDLED X LABEL WITH "ax" ax2.plot(x_range,y_spl_2d(x_range), 'r--', label= 'Many Countries' + ' - ' + key + ' - Second Derivative of BEST FIT CUBIC SPLINE', alpha=spline_alpha, linewidth=spline_linewidth ) # PLOTS SECOND DERIVATIVE ax2.tick_params(axis='y', labelcolor='red') # ADDITIONAL PARAMS FOR THE SECOND Y-AXIS SCALE ON THE RIGHT HAND SIDE ax2.axhline(y=0.0, color='black', linestyle='-.', label= 'Many Countries' + ' - ' + key + ' - Second Derivative BASELINE') # SECOND DERIVATIVE HORIZONTAL BASELINE ax2.axvline(x=lowest_inflection_point_x, color='black', linestyle='-.') # SECOND DERIVATIVE VERTICAL INFLECTION POINT LINE # ACTUALLY ANNOTATES THE POINT WHERE THE SECOND DERIVATIVE OF # FITTED CUBIC SPLINE PLOT CROSSES ZERO (ON ITS OWN Y SCALE OF VALUES) ax2.annotate('Second Derivative of\nbest fit Cubic Spline\ncrossing Zero here', xy=(lowest_inflection_point_x, 0), # COORDS OF THE POINT TO HIGHLIGHT (TUPLE). ADDING 0.5 TO FIND MOST APPROX POINT WHERE DERIVATIVE HAS CHANGED IN SIGN xycoords='data', # COORDS OF THE POINT TO HIGHLIGHT ARE EXPRESSED AS DATA COORDS xytext=(0.8, -0.25), # COORDS OF TEXT TO ANNOTATE textcoords='axes fraction', # COORDS OF TEXT TO ANNOTATE ARE EXPRESSED IN AXES FRACTION (INDEPENDENT FROM PIXELS) arrowprops=dict(edgecolor='black', facecolor='red', shrink=0.00), # PROPERTIES OF THE ARROW TO DRAW fontsize=20, color='red', horizontalalignment='center', verticalalignment='center' # ARROW STARTS BOTTOM RIGHT CORNER OF TEXT ) # ACTIVATE LEGEND FOR SUBPLOT OF SECOND DERIVATIVE AND FOR # DRAWING THE BASELINES WHERE IT CROSSES ZERO (ON ITS OWN Y SCALE OF VALUES) ax2.legend(loc="upper right") elif kind == 'pie': # PIE CHART d = totalized_by_countries_dataframe[list_of_countries].T # CREATES A "country" COLUMN WITH THE LIST OF COUNTRIES CURRENTLY IN THE INDEX d['country'] = d.index # STILL NEED TO FIX DATE FILTERING HERE AS INDEX DOESN'T CONTAIN DATES AS IN THE OTHER CASES #d = d[d.index > start_date] # RENAMES DATA COLUMN WITH A BLANK STRING SO THAT Y AXIS LABEL FOR PIE # CHARTS WON'T BE DRAWN. THIS LABEL COULD OF COURSE BE SET TO SOMETHING # MEANINGFUL E.G. "Share" OR OTHER STRINGS DESCRIBING THE PLOTTED QTYS d = d.rename(columns={d.columns[0]: ' '}) # PIE CHARTS ARE PLOTTED AS THEY FLOW IN fig_to_be_saved = d.plot(kind = "pie", y=" ", legend=False, autopct="%.2f%%", figsize=(15,10), ax=ax ) elif kind == 'heatmap': # HEATMAP CHART # CHOOSE A COLOR FOR THE HEATMAP ACCORDING TO WHAT'S IN THE CHART TITLE if ("ACTIVE" in title.upper()): base_color = 'blue' elif ("CONFIRMED" in title.upper()): base_color = 'orange' elif ('RECOVERED' in title.upper()): base_color = 'green' elif ('FATALITIES' in title.upper()): base_color = 'red' else: base_color = 'grey' # FIRST WE READ OUR INITIAL DATAFRAME, ALREADY TOTALIZED BY COUNTRY AT THE BEGINNING # OF THIS CODE BLOCK d = totalized_by_countries_dataframe[totalized_by_countries_dataframe.index.isin(['0'])] d = d.rename(index={d.index[0]: 'Many Countries' + ' - ' + key}) #d = d[d.index > start_date] # DATAFRAME FOR HEATMAPS KEEPS LAST 2 WKS ALREADY! # WE RESHAPE OUR INITIAL DATAFRAME KEEPING THE LAST "REQUESTED DAYS" COLUMNS AND THE FIRST # "REQUESTED RANK" ROWS d = d.iloc[:,-start_date:].nlargest(num_ranked, d.columns[-1:], keep='first') # ARBITRARY AND APPROPRIATE LOWER BOUND FOR LOGARITHMIC COLORSCALE TO AVOID DIV BY ZERO LOGMIN = 0.1 # MINIMUM AND MAXIMUM NUMERIC VALUES IN OUR DATAFRAME, USED LATER TO INFER # THE LOGARITHMIC COLOR SCALE TO USE COLORS IN A BALANCED WAY ###mi, ma = dict_of_dataframes[key].values.min(), dict_of_dataframes[key].values.max() mi, ma = d.values.min(), d.values.max() #plot pivot table as heatmap using seaborn #ax = sns.heatmap(heatmap, square=True) #sns.heatmap(df1.iloc[:, 1:6:], annot=True, linewidths=.5, ax=ax) sns.heatmap(d, square=False, # HEATMAP CELLS DO NOT NEED TO BE SQUARE annot=True, # WE ARE GOING TO WRITE QUANTITIES INSIDE CELLS annot_kws={"size": 1}, # ANNOTATIONS FONT SIZE linewidths=0.01, # NO INTERLINE BETWEEN HEATMAP CELLS fmt='d', # ANNOTATIONS INSIDE OUR HEATMAP CELLS ARE DECIMAL NUMBERS cbar=True, # WE WANT A COLOR LEGEND WITH RELATED QUANTITIES TO SHOW UP cmap = sns.light_palette(base_color, n_colors=6), # WE USE A 6 TONES PALETTE OF THE SPECIFIED BASE COLOR xticklabels=True, # WE WANT TICKS ON THE X AXIS E.G. DAY AND DATE yticklabels=False, # FOR THIS AGGREGATED MULTI-COUNTRIES HEATMAP, WE DO NOT WANT THE Y LABELS! NO RELEVANT COUNTRY NAME HERE norm=LogNorm(), # WE USE A LOGARITHMIC COLOR SCALE, OTHERWISE HEATMAP WOULD BE MOSTLY OF THE SAME COLOR vmin=max(mi, LOGMIN), # WE SET A MIN FOR OUR LOG SCALE ACCORDING TO MIN VALUE IN OUR DATAFRAME vmax=ma, # WE SET A MAX FOR OUR LOG SCALE ACCORDING TO MAX VALUES IN OUR DATAFRAME ax=ax ) # LET'S FIX A BUG IN SEABORN CAUSING FIRST AND LAST LINE OF A CHART TO BE TRUNCATED ax.set_ylim(len(d)+0.5, -0.5) # A FEW SETTINGS REGARDING AXIS TICKS AND LABELS FOR HEATMAPS ###plt.setp(ax.xaxis.get_majorticklabels(), rotation=90) # Y AXIS LABEL, CENTERED plt.ylabel('Many Countries' + ' - ' + key, fontsize = 10) # HEATMAPS NEED A BIGGER LAYOUT AND A TIGHT OUTPUT # Y INCHES DIMENSION IS MINIMUM 4 PLUS 0.5 * NUMBER OF ROWS IN OUR DATAFRAME f.set_size_inches(17, (4 + (0.5 * d.shape[0]))) plt.tight_layout() elif kind == 'bar': # BAR CHART flg_barcharts = 1 # HOUSTON, WE HAVE A BARCHART d_tmp = totalized_by_countries_dataframe[totalized_by_countries_dataframe.index.isin(['0'])].T d_tmp = d_tmp.rename(columns={d_tmp.columns[0]: 'Many Countries' + ' - ' + key}) d_tmp = d_tmp[d_tmp.index > start_date] d_barcharts = pd.concat([d_barcharts, d_tmp], axis=1) else: # OTHER CASES, NOT MANAGED. JUST RETURN return() # IF WE HAVE PROCESSED BAR CHARTS IN OUR MAIN LOOP, WE HAVE TO PLOT THEM OUT OF IT # BECAUSE IN THE MAIN LOOP WE JUST CONCATENATE DATAFRAMES if flg_barcharts == 1: fig_to_be_saved = d_barcharts.plot(kind='bar', logy=logy, legend=True, figsize=(17,11.5), grid=True, rot=90, stacked=False, ax=ax ) # REFRESH LEGEND TO WORKAROUND MISSING MARKERS FOR SOME LINES ax.legend() # IF LINE OR BAR CHART, DRAW AXES LABELS if (kind == 'line' or kind == 'bar'): ax.set_xlabel("Date", size=14) ax.set_ylabel("Number of Cases", size=14) # SHOWS THE CHART ON SCREEN plt.show() # SAVES RESULTING BAR CHARTS if (flg_save_localdir == 'N'): f.savefig('charts/' + filename + '.png') # SAVES CHART IN "charts" SUBDIRECTORY else: f.savefig(filename + '.png') # SAVES CHART IN LOCAL DIRECTORY # PLOTS A SINGLE PLOT PRESENTING (AS BIG FONT NUMBERS) THE GRANDTOTALS SUMMARY OF: # - CONFIRMED CASES # - ACTIVE CASES # - RECOVERED CASES # - DEATHS # ALSO, THE GENERATED PLOT IS SAVED IN ".png" FORMAT IN THE "charts" SUBDIR # --------------------------------------------------------------------------------------------- # THIS PLOTTING UTILITY FUNCTION TAKES IN INPUT: # A LIST OF NUMBERS TO PLOT (E.G. CONFIRMED, ACTIVE, RECOVERED, DEATHS) # A LIST OF PERCENTAGES OVER TOTAL TO PLOT (E.G. CONFIRMED, ACTIVE, RECOVERED, DEATHS) # A LIST OF SUBTITLES TO PLOT IN EACH OUTPUT BOX (E.G. CONFIRMED, ACTIVE, RECOVERED, DEATHS) # A LIST OF COLORS TO PLOT THE OUTPUT BOXES # A LIST OF COLORS TO PLOT THE OUTPUT BOXES TEXTS # A TITLE FOR THE FINAL CHART # A FLAG SPECIFYING IF WE WANT TO SAVE THE CHARTS IN THE LOCAL DIR ("Y") OR IN A SEPARATED "./charts" DIR ("N") def plot_summary_headings_chart(list_of_numbers, list_of_percs, list_of_subtitles, list_of_colors, list_of_textcolors, title, flg_save_localdir = 'N'): # INITIALIZES FILE NAME TO BE USED WHEN SAVING CHART TO FILESYSTEM filename = title.replace('\n','').replace('\t','').replace(' ', '_')[:250] f, (ax1) = plt.subplots(1, len(list_of_numbers), figsize=(17,4), squeeze=False) # SETS CHART TITLE AND OTHER PARAMS #f.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.subplots_adjust(top=0.4) f.suptitle(title, color='black', size=18, y=0.98) #plt.subplots_adjust(bottom=0.50) #plt.title(title, color='black', size=18) #plt.suptitle(title, color='black', size=18) #plt.subplots_adjust(top=0.8) #plt.legend(loc='upper left', bbox_to_anchor=(1, 1)) #plt.tight_layout() for i, number in enumerate(list_of_numbers): ax1[0,i].set_title(list_of_subtitles[i], color='black', size=16) ax1[0,i].set_facecolor(list_of_colors[i]) ax1[0,i].text(0.07, 0.5, str(list_of_numbers[i]), fontsize=35, color=list_of_textcolors[i]) # IF THE ASSOCIATED PERCENTAGE HAS NOT BEEN PUT TO "0" (THAT WOULD BE INTENTIONAL TO HIDE THE PERC TEXT) # DRAWS THE PERCENTAGE NUMBER JUST BELOW THE MAIN QUANTITY if (round(summary_percs[i],2) > 0): ax1[0,i].text(0.07, 0.15, str(round(summary_percs[i],2)) + '%', fontsize=25, color=list_of_textcolors[i]) # REMOVE TICKS FROM BOXES ax1[0,i].tick_params( axis='both', # CHANGES APPLY TO BOTH AXIS, ELSE "x" OR "y" which='both', # BOTH MAJOR AND MINOR TICKS AFFECTED bottom=False, # BOTTOM EDGE TICKS OFF top=False, # TOP EDGE TICKS OFF left=False, # LEFT EDGE TICKS OFF labelbottom=False, # BOTTOM EDGE LABELS OFF labelleft=False) # LEFT EDGE LABELS OFF # SAVES RESULTING SUMMARY if (flg_save_localdir == 'N'): plt.savefig('charts/' + filename + '.png') # SAVES CHART IN "charts" SUBDIRECTORY else: plt.savefig(filename + '.png') # SAVES CHART IN LOCAL DIRECTORY plt.show() # - # --- # # Section 2 - Pull Confirmed, Recovered and Deaths stats from JHU CSSE # ### Reads and parses JHU CSSE COVID-19 stats Git Repo, saving the contents of the daily updated CSV files of interest straight into Pandas Dataframes: # + # PREPARES DATAFRAMES READING ORIGINAL DATA FROM LATEST FILES @ JOHNS HOPKINS GIT REPO url_confirmed_DF = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' confirmed_orig_DF = pd.read_csv(url_confirmed_DF, sep=',') url_deaths_DF = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv' deaths_orig_DF = pd.read_csv(url_deaths_DF, sep=',') url_recovered_DF = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv' recovered_orig_DF = pd.read_csv(url_recovered_DF, sep=',') # - # --- # # Section 3 - Data consolidation # ### Now we can start to put all the pieces together. With regards to the Recovered stats, the first thing to do is to integrate the data we just read from our daily increasing "recovered_DF.csv" file with the data we pulled from Worldometers: # ### Then, we can perform all the initial aggregations needed to have a number of homogeneous Dataframes, with all the data we need, ready to be queried to produce charts: # + ############################################################# ## ## AGGREGATED DATAFRAMES - CUMULATIVE AND DAILY QUANTITIES ## ############################################################# # PROPERLY REFORMATS CUMULATIVE CONFIRMED TOTAL DATAFRAME # ONE RECORD PER COUNTRY confirmed_DF = reformat_dataframe(confirmed_orig_DF, confirmed_fixes_dict) # MAKES SURE ALL THE NUMERIC COLUMNS IN CONFIRMED DATAFRAME ARE "Int64" # (CORRECTS ISSUES IN THE DATASET SINCE 2020/03/07) confirmed_DF = fix_na_columns(confirmed_DF) # CREATES A DAILY-SPLITTED-DATA VERSION OF CUMULATIVE CONFIRMED TOTAL DATAFRAME # ONE RECORD PER COUNTRY reduced_confirmed_DF = undo_cum_dataframe(confirmed_DF) # PROPERLY REFORMATS CUMULATIVE DEATHS TOTAL DATAFRAME # ONE RECORD PER COUNTRY deaths_DF = reformat_dataframe(deaths_orig_DF, deaths_fixes_dict) # MAKES SURE ALL THE NUMERIC COLUMNS IN CONFIRMED DATAFRAME ARE "Int64" # (CORRECTS ISSUES IN THE DATASET SINCE 2020/03/07) deaths_DF = fix_na_columns(deaths_DF) # CREATES A DAILY-SPLITTED-DATA VERSION OF CUMULATIVE DEATHS TOTAL DATAFRAME # ONE RECORD PER COUNTRY reduced_deaths_DF = undo_cum_dataframe(deaths_DF) # PROPERLY REFORMATS CUMULATIVE RECOVERED TOTAL DATAFRAME # ONE RECORD PER COUNTRY recovered_DF = reformat_dataframe(recovered_orig_DF, recovered_fixes_dict) # MAKES SURE ALL THE NUMERIC COLUMNS IN CONFIRMED DATAFRAME ARE "Int64" # (CORRECTS ISSUES IN THE DATASET SINCE 2020/03/07) recovered_DF = fix_na_columns(recovered_DF) # CREATES A DAILY-SPLITTED-DATA VERSION OF CUMULATIVE DEATHS TOTAL DATAFRAME # ONE RECORD PER COUNTRY reduced_recovered_DF = undo_cum_dataframe(recovered_DF) # PROPERLY REFORMATS CUMULATIVE ACTIVE CASES TOTAL DATAFRAME # THIS STAT IS OF COURSE THE SUBTRACTION OF (CONFIRMED - (RECOVERED + DEATHS)) # ONE RECORD PER COUNTRY active_DF = (confirmed_DF.sub(recovered_DF)).sub(deaths_DF) # MAKES SURE ALL THE NUMERIC COLUMNS IN CONFIRMED DATAFRAME ARE "Int64" # (CORRECTS ISSUES IN THE DATASET SINCE 2020/03/07) active_DF = fix_na_columns(active_DF) # CREATES A DAILY-SPLITTED-DATA VERSION OF CUMULATIVE ACTIVE CASES TOTAL DATAFRAME # ONE RECORD PER COUNTRY reduced_active_DF = undo_cum_dataframe(active_DF) # CREATES A DAILY-SPLITTED-DATA VERSION OF # MOVING AVERAGE OVER A 5 DAYS WINDOW CONFIRMED CASES DATAFRAME # ONE RECORD PER COUNTRY reduced_moving_avg_confirmed_DF = reduced_confirmed_DF.T.rolling(7).mean().T.fillna(0) # CREATES A PROGRESSIVE CUMULATIVE TOTAL VERSION OF # MOVING AVERAGE OVER A 5 DAYS WINDOW CONFIRMED CASES DATAFRAME # ONE RECORD PER COUNTRY moving_avg_confirmed_DF = reduced_moving_avg_confirmed_DF.cumsum(axis=1) # CREATES A DAILY-SPLITTED-DATA VERSION OF # MOVING AVERAGE OVER A 5 DAYS WINDOW RECOVERED CASES DATAFRAME # ONE RECORD PER COUNTRY reduced_moving_avg_recovered_DF = reduced_recovered_DF.T.rolling(7).mean().T.fillna(0) # CREATES A PROGRESSIVE CUMULATIVE TOTAL VERSION OF # MOVING AVERAGE OVER A 5 DAYS WINDOW RECOVERED CASES DATAFRAME # ONE RECORD PER COUNTRY moving_avg_recovered_DF = reduced_moving_avg_recovered_DF.cumsum(axis=1) # CREATES A DAILY-SPLITTED-DATA VERSION OF # MOVING AVERAGE OVER A 5 DAYS WINDOW DEATHS DATAFRAME # ONE RECORD PER COUNTRY reduced_moving_avg_deaths_DF = reduced_deaths_DF.T.rolling(7).mean().T.fillna(0) # CREATES A PROGRESSIVE CUMULATIVE TOTAL VERSION OF # MOVING AVERAGE OVER A 5 DAYS WINDOW DEATHS DATAFRAME # ONE RECORD PER COUNTRY moving_avg_deaths_DF = reduced_moving_avg_deaths_DF.cumsum(axis=1) # CREATES A DAILY-SPLITTED-DATA VERSION OF # MOVING AVERAGE OVER A 5 DAYS WINDOW ACTIVE CASES DATAFRAME # THIS STAT IS OF COURSE THE SUBTRACTION OF (CONFIRMED - (RECOVERED + DEATHS)) # AS ALREADY CALCULATED BEFORE FOR "recovered_DF" AND "reduced_recovered_DF" # ONE RECORD PER COUNTRY reduced_moving_avg_active_DF = reduced_active_DF.T.rolling(7).mean().T.fillna(0) # CREATES A PROGRESSIVE CUMULATIVE TOTAL VERSION OF # MOVING AVERAGE OVER A 5 DAYS WINDOW ACTIVE CASES DATAFRAME # THIS STAT IS OF COURSE THE SUBTRACTION OF (CONFIRMED - (RECOVERED + DEATHS)) # AS ALREADY CALCULATED BEFORE FOR "recovered_DF" AND "reduced_recovered_DF" # ONE RECORD PER COUNTRY moving_avg_active_DF = reduced_moving_avg_active_DF.cumsum(axis=1) # PROPERLY REFORMATS CUMULATIVE CUMULATIVE CONFIRMED TOTAL DATAFRAME # FOR PIE CHARTS. THAT IS: JUST ONE RECORD OF VALUES FOR ALL COUNTRIES (COLUMNS) confirmed_pie_DF = reformat_dataframe_for_pie_chart(confirmed_orig_DF) # PROPERLY REFORMATS CUMULATIVE CUMULATIVE CONFIRMED TOTAL DATAFRAME # FOR PIE CHARTS. THAT IS: JUST ONE RECORD OF VALUES FOR ALL COUNTRIES (COLUMNS) deaths_pie_DF = reformat_dataframe_for_pie_chart(deaths_orig_DF) ### BEFORE THE CHANGES INTRODUCED WHEN JHU DISMISSED THE RECOVERED STATS WE NEEDED ### TO PROPERLY REFORMAT "recovered_orig_DF" DATAFRAME TO BE ABLE TO USE IT FOR ### PIE CHARTS, THE SAME WAY WE TREATED "confirmed_orig_DF" AND "deaths_orig_DF" ### JUST ABOVE. WE LEAVE THAT ORIGINAL LINE OF CODE COMMENTED BELOW HERE. BUT NOW ### OUR DATAFRAME FOR RECOVERED STATS "recovered_DF" HAS ALREADY BEEN PROCESSED ### WHEN PULLING DATA FROM WORLDOMETERS, AND IT'S READY TO BE USED, SO WE JUST ### ASSIGN IT TO THE RELEVANT DF FOR PIE CHARTS, "recovered_pie_DF" ### ### # PROPERLY REFORMATS CUMULATIVE CUMULATIVE CONFIRMED TOTAL DATAFRAME ### # FOR PIE CHARTS. THAT IS: JUST ONE RECORD OF VALUES FOR ALL COUNTRIES (COLUMNS) ### recovered_pie_DF = reformat_dataframe_for_pie_chart(recovered_orig_DF) recovered_pie_DF = recovered_DF.copy() ### # PROPERLY REFORMATS CUMULATIVE CUMULATIVE CONFIRMED TOTAL DATAFRAME ### # FOR PIE CHARTS. THAT IS: JUST ONE RECORD OF VALUES FOR ALL COUNTRIES (COLUMNS) active_pie_DF = (confirmed_pie_DF.sub(recovered_pie_DF)).sub(deaths_pie_DF) # HEATMAP CHART - CUMULATIVE CONFIRMED CASES DATAFRAME CALCULATION # IT KEEPS LAST 14 DAYS OF DATA (SEE "iloc" PARAM) # ALSO KEEPS JUST THE TOP 20 COUNTRIES WITH THE LARGEST MEANINGFUL VALUES IN THE LAST DAYS (SEE "nlargest" PARAM) confirmed_heatmap_DF = confirmed_DF # HEATMAP CHART - DAILY-SPLITTED-DATA VERSION OF CUMULATIVE CONFIRMED CASES DATAFRAME CALCULATION # IT KEEPS LAST 14 DAYS OF DATA (SEE "iloc" PARAM) # ALSO KEEPS JUST THE TOP 20 COUNTRIES WITH THE LARGEST MEANINGFUL VALUES IN THE LAST DAYS (SEE "nlargest" PARAM) reduced_confirmed_heatmap_DF = reduced_confirmed_DF # HEATMAP CHART - CUMULATIVE DEATHS DATAFRAME CALCULATION # IT KEEPS LAST 14 DAYS OF DATA (SEE "iloc" PARAM) # ALSO KEEPS JUST THE TOP 20 COUNTRIES WITH THE LARGEST MEANINGFUL VALUES IN THE LAST DAYS (SEE "nlargest" PARAM) deaths_heatmap_DF = deaths_DF # HEATMAP CHART - DAILY-SPLITTED-DATA VERSION OF CUMULATIVE DEATHS DATAFRAME CALCULATION # IT KEEPS LAST 14 DAYS OF DATA (SEE "iloc" PARAM) # ALSO KEEPS JUST THE TOP 20 COUNTRIES WITH THE LARGEST MEANINGFUL VALUES IN THE LAST DAYS (SEE "nlargest" PARAM) reduced_deaths_heatmap_DF = reduced_deaths_DF # HEATMAP CHART - CUMULATIVE RECOVERED DATAFRAME CALCULATION # IT KEEPS LAST 14 DAYS OF DATA (SEE "iloc" PARAM) # ALSO KEEPS JUST THE TOP 20 COUNTRIES WITH THE LARGEST MEANINGFUL VALUES IN THE LAST DAYS (SEE "nlargest" PARAM) recovered_heatmap_DF = recovered_DF # HEATMAP CHART - DAILY-SPLITTED-DATA VERSION OF CUMULATIVE RECOVERED DATAFRAME CALCULATION # IT KEEPS LAST 14 DAYS OF DATA (SEE "iloc" PARAM) # ALSO KEEPS JUST THE TOP 20 COUNTRIES WITH THE LARGEST MEANINGFUL VALUES IN THE LAST DAYS (SEE "nlargest" PARAM) reduced_recovered_heatmap_DF = reduced_recovered_DF # HEATMAP CHART - CUMULATIVE ACTIVE DATAFRAME CALCULATION # IT KEEPS LAST 14 DAYS OF DATA (SEE "iloc" PARAM) # ALSO KEEPS JUST THE TOP 20 COUNTRIES WITH THE LARGEST MEANINGFUL VALUES IN THE LAST DAYS (SEE "nlargest" PARAM) active_heatmap_DF = active_DF # HEATMAP CHART - DAILY-SPLITTED-DATA VERSION OF CUMULATIVE ACTIVE DATAFRAME CALCULATION # IT KEEPS LAST 14 DAYS OF DATA (SEE "iloc" PARAM) # ALSO KEEPS JUST THE TOP 20 COUNTRIES WITH THE LARGEST MEANINGFUL VALUES IN THE LAST DAYS (SEE "nlargest" PARAM) reduced_active_heatmap_DF = reduced_active_DF # - # ### Before further proceeding, we can peek inside the latest "revovered_DF" and "reduced_recovered_DF" Dataframes to check especially the last two columns, to see if discrepancies arose since yesterday e.g. if the number of Recoveries for a certain Country dropped instead of increasing etc. Remember that that could be a correct situation too, e.g. if the Authorities revised some reports from the day before and decided that they wish to retire a certain number of Recoveries because of (to us) unknown reasons! pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) recovered_DF.head(500) reduced_recovered_DF.head(500) # ### Time to create a few customized lists of Countries, to be use for fast reference later when we'll be drawing aggregated charts (e.g. for EU Countries) without the need to manually write the list of EU Countries all the times: # + # WORLD COUNTRIES UPDATED LIST OF ALL NATIONS AVAILABLE IN LATEST DATA @ JOHNS HOPKINS list_of_countries_world = confirmed_DF.index.tolist() # WORLD COUNTRIES EXCEPT CHINA. NEED TO START FROM A DEEP COPY OF THE ORIGINAL LIST TO AVOID CONFLICTS list_of_countries_world_nochina = copy.deepcopy(list_of_countries_world) list_of_countries_world_nochina.remove('China') # EUROPEAN COUNTRIES LIST IS HARDCODED. UPDATE MANUALLY IF NEEDED list_of_countries_europe = ['Austria', 'Belgium', 'Croatia', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Netherlands', 'Portugal', 'Romania', 'Spain', 'Sweden', 'Norway', 'Switzerland', 'United Kingdom', 'San Marino', 'Monaco'] # EUROPEAN COUNTRIES EXCEPT ITALY. NEED TO START FROM A DEEP COPY OF THE ORIGINAL LIST TO AVOID CONFLICTS list_of_countries_europe_noitaly = copy.deepcopy(list_of_countries_europe) list_of_countries_europe_noitaly.remove('Italy') # - # ### VERY IMPORTANT - Now we create a dictionary of chart types. This dictionary will map all the aggregated Dataframes we created before to the relevant chart types it can be used for. When later, for instance, we'll call the plotting function for the "Cumulative Confirmed Heatmap" statistic, the plotting function will infer that the chart to be produced is a Heatmap: # + # POPULATES CHART TYPES DICTIONARY FOR EACH OF THE POSSIBLE DATAFRAMES TYPES WE ARE GOING TO PLOT LATER # VALID TYPES ARE 'line' AND 'bar' # DICTIONARY KEYS MUST MATCH THE CORRESPONDING DATAFRAME KEY THAT WE'LL USE LATER WHEN PLOTTING THE CHARTS dict_of_charttypes = {'Daily Confirmed': 'bar', 'Daily Recovered': 'bar', 'Daily Fatalities': 'bar', 'Daily Active': 'bar', 'Daily Confirmed 5 Days Avg': 'bar', 'Daily Recovered 5 Days Avg': 'bar', 'Daily Fatalities 5 Days Avg': 'bar', 'Daily Active 5 Days Avg': 'bar', 'Daily Confirmed Heatmap': 'heatmap', 'Daily Recovered Heatmap': 'heatmap', 'Daily Fatalities Heatmap': 'heatmap', 'Daily Active Heatmap': 'heatmap', 'Cumulative Confirmed': 'line', 'Cumulative Recovered': 'line', 'Cumulative Fatalities': 'line', 'Cumulative Active': 'line', 'Cumulative Confirmed 5 Days Avg': 'line', 'Cumulative Recovered 5 Days Avg': 'line', 'Cumulative Fatalities 5 Days Avg': 'line', 'Cumulative Active 5 Days Avg': 'line', 'Cumulative Confirmed Heatmap': 'heatmap', 'Cumulative Recovered Heatmap': 'heatmap', 'Cumulative Fatalities Heatmap': 'heatmap', 'Cumulative Active Heatmap': 'heatmap', 'Cumulative Confirmed Shares': 'pie', 'Cumulative Recovered Shares': 'pie', 'Cumulative Fatalities Shares': 'pie'} # ABOVE, WE DO NOT ADD "Cumulative Active Shares" (THAT WOULD BE A PIE CHART) BECAUSE # IT MAY CONTAIN NEGATIVE VALUES THAT WOULD BE A PROBLEM WHEN PLOTTING PIE CHARTS # ALSO, THERE ARE NO DAILY-SPLITTED PIE CHARTS, BECAUSE IT WOULD NOT MAKE MUCH # SENSE TO PUT TOGETHER IN A PIE CHART DAILY-SPLITTED DATA (TOO MANY SLICES) # - # ### Time to calculate all the Grandtotals that we'll display at Country level or at Continental/World level when we'll actually plot our charts of choice. We'll make use of the aggregated lists of Countries that we previously populated, too: # + ############################################################# ## ## GRANDTOTALS DICTIONARIES INITIALIZATION ## ############################################################# # SCALAR QUANTITIES GRANDTOTALS DICTIONARIES dict_of_active_grandtotals = {} dict_of_confirmed_grandtotals = {} dict_of_recovered_grandtotals = {} dict_of_deaths_grandtotals = {} # PERCENTAGES GRANDTOTALS DICTIONARIES dict_of_active_grandtotals_percs = {} dict_of_recovered_grandtotals_percs = {} dict_of_deaths_grandtotals_percs = {} ############################################################# ## ## GRANDTOTALS SCALAR COUNTERS OF INTEREST ## ############################################################# # AFFECTED COUNTRIES WORLDWIDE IS THE "Y" SHAPE OF THE "confirmed_DF" DATAFRAME num_affected_countries = confirmed_DF.shape[0] ############################################################# ## ## GRANDTOTALS - SPECIFIC "OUT OF LOOP" LISTS OF COUNTRIES PREVIUOSLY ASSIGNED TO CUSTOM LISTS ## ############################################################# # CALCULATES WORLD GRANDTOTALS TO BE SHOWN ON TOP OF THE CHARTS LIST # AND ASSIGNS THEM TO THE RELEVANT DICTIONARY OF GRANDTOTALS DATAFRAMES dict_of_active_grandtotals['world'] = (formatted_dataframe_totalize_countries(list_of_countries_world, reduced_active_DF).sum(axis=1))[0] dict_of_confirmed_grandtotals['world'] = (formatted_dataframe_totalize_countries(list_of_countries_world, reduced_confirmed_DF).sum(axis=1))[0] dict_of_recovered_grandtotals['world'] = (formatted_dataframe_totalize_countries(list_of_countries_world, reduced_recovered_DF).sum(axis=1))[0] dict_of_deaths_grandtotals['world'] = (formatted_dataframe_totalize_countries(list_of_countries_world, reduced_deaths_DF).sum(axis=1))[0] dict_of_active_grandtotals_percs['world'] = (dict_of_active_grandtotals['world'] / dict_of_confirmed_grandtotals['world']) * 100 dict_of_recovered_grandtotals_percs['world'] = (dict_of_recovered_grandtotals['world'] / dict_of_confirmed_grandtotals['world']) * 100 dict_of_deaths_grandtotals_percs['world'] = (dict_of_deaths_grandtotals['world'] / dict_of_confirmed_grandtotals['world']) * 100 # CALCULATES WORLD EXCEPT CHINA GRANDTOTALS TO BE SHOWN ON TOP OF THE CHARTS LIST # AND ASSIGNS THEM TO THE RELEVANT DICTIONARY OF GRANDTOTALS DATAFRAMES dict_of_active_grandtotals['world_nochina'] = (formatted_dataframe_totalize_countries(list_of_countries_world_nochina, reduced_active_DF).sum(axis=1))[0] dict_of_confirmed_grandtotals['world_nochina'] = (formatted_dataframe_totalize_countries(list_of_countries_world_nochina, reduced_confirmed_DF).sum(axis=1))[0] dict_of_recovered_grandtotals['world_nochina'] = (formatted_dataframe_totalize_countries(list_of_countries_world_nochina, reduced_recovered_DF).sum(axis=1))[0] dict_of_deaths_grandtotals['world_nochina'] = (formatted_dataframe_totalize_countries(list_of_countries_world_nochina, reduced_deaths_DF).sum(axis=1))[0] dict_of_active_grandtotals_percs['world_nochina'] = (dict_of_active_grandtotals['world_nochina'] / dict_of_confirmed_grandtotals['world_nochina']) * 100 dict_of_recovered_grandtotals_percs['world_nochina'] = (dict_of_recovered_grandtotals['world_nochina'] / dict_of_confirmed_grandtotals['world_nochina']) * 100 dict_of_deaths_grandtotals_percs['world_nochina'] = (dict_of_deaths_grandtotals['world_nochina'] / dict_of_confirmed_grandtotals['world_nochina']) * 100 # CALCULATES EUROPE INCLUDING ITALY GRANDTOTALS TO BE SHOWN ON TOP OF THE CHARTS LIST # AND ASSIGNS THEM TO THE RELEVANT DICTIONARY OF GRANDTOTALS DATAFRAMES dict_of_active_grandtotals['europe'] = (formatted_dataframe_totalize_countries(list_of_countries_europe, reduced_active_DF).sum(axis=1))[0] dict_of_confirmed_grandtotals['europe'] = (formatted_dataframe_totalize_countries(list_of_countries_europe, reduced_confirmed_DF).sum(axis=1))[0] dict_of_recovered_grandtotals['europe'] = (formatted_dataframe_totalize_countries(list_of_countries_europe, reduced_recovered_DF).sum(axis=1))[0] dict_of_deaths_grandtotals['europe'] = (formatted_dataframe_totalize_countries(list_of_countries_europe, reduced_deaths_DF).sum(axis=1))[0] dict_of_active_grandtotals_percs['europe'] = (dict_of_active_grandtotals['europe'] / dict_of_confirmed_grandtotals['europe']) * 100 dict_of_recovered_grandtotals_percs['europe'] = (dict_of_recovered_grandtotals['europe'] / dict_of_confirmed_grandtotals['europe']) * 100 dict_of_deaths_grandtotals_percs['europe'] = (dict_of_deaths_grandtotals['europe'] / dict_of_confirmed_grandtotals['europe']) * 100 # CALCULATES EUROPE INCLUDING ITALY GRANDTOTALS TO BE SHOWN ON TOP OF THE CHARTS LIST # AND ASSIGNS THEM TO THE RELEVANT DICTIONARY OF GRANDTOTALS DATAFRAMES dict_of_active_grandtotals['europe_noitaly'] = (formatted_dataframe_totalize_countries(list_of_countries_europe_noitaly, reduced_active_DF).sum(axis=1))[0] dict_of_confirmed_grandtotals['europe_noitaly'] = (formatted_dataframe_totalize_countries(list_of_countries_europe_noitaly, reduced_confirmed_DF).sum(axis=1))[0] dict_of_recovered_grandtotals['europe_noitaly'] = (formatted_dataframe_totalize_countries(list_of_countries_europe_noitaly, reduced_recovered_DF).sum(axis=1))[0] dict_of_deaths_grandtotals['europe_noitaly'] = (formatted_dataframe_totalize_countries(list_of_countries_europe_noitaly, reduced_deaths_DF).sum(axis=1))[0] dict_of_active_grandtotals_percs['europe_noitaly'] = (dict_of_active_grandtotals['europe_noitaly'] / dict_of_confirmed_grandtotals['europe_noitaly']) * 100 dict_of_recovered_grandtotals_percs['europe_noitaly'] = (dict_of_recovered_grandtotals['europe_noitaly'] / dict_of_confirmed_grandtotals['europe_noitaly']) * 100 dict_of_deaths_grandtotals_percs['europe_noitaly'] = (dict_of_deaths_grandtotals['europe_noitaly'] / dict_of_confirmed_grandtotals['europe_noitaly']) * 100 ############################################################# ## ## GRANDTOTALS - ALL SINGLE COUNTRIES CALCULATION LOOP ## ############################################################# # ITERATES OVER SINGLE COUNTRIES AND CALCULATE GRANDTOTALS FOR EACH OF THEM # TO BE SHOWN ON TOP OF THE RELEVANT CHARTS LIST for country in list_of_countries_world: dict_of_active_grandtotals[country] = (formatted_dataframe_totalize_countries([country], reduced_active_DF).sum(axis=1))[0] dict_of_confirmed_grandtotals[country] = (formatted_dataframe_totalize_countries([country], reduced_confirmed_DF).sum(axis=1))[0] dict_of_recovered_grandtotals[country] = (formatted_dataframe_totalize_countries([country], reduced_recovered_DF).sum(axis=1))[0] dict_of_deaths_grandtotals[country] = (formatted_dataframe_totalize_countries([country], reduced_deaths_DF).sum(axis=1))[0] # THE LAMBDAS IN THE NEXT 3 LINES JUST CHECK IF "dict_of_confirmed_grandtotals[country]" IS 0 # TO AVOID DIVISION BY ZERO PROBLEMS. THE LAMBDA ITSELF IS AS FOLLOWS: # (lambda x: x if x>0 else 1)(dict_of_confirmed_grandtotals[country]) # AND IT MEANS THAT WE PASS "(dict_of_confirmed_grandtotals[country])" AS INPUT TO SUBSTITUTE "x" # THEN IT'S EASY TO CHECK ON-THE-FLY IF WE ARE TRYING TO USE A ZERO GRANDTOTAL FOR THAT COUNTRY # AND IF THIS IS THE CASE, WE JUST SUBSTITUTE A "1" INSTEAD dict_of_active_grandtotals_percs[country] = (dict_of_active_grandtotals[country] / (lambda x: x if x>0 else 1)(dict_of_confirmed_grandtotals[country])) * 100 dict_of_recovered_grandtotals_percs[country] = (dict_of_recovered_grandtotals[country] / (lambda x: x if x>0 else 1)(dict_of_confirmed_grandtotals[country])) * 100 dict_of_deaths_grandtotals_percs[country] = (dict_of_deaths_grandtotals[country] / (lambda x: x if x>0 else 1)(dict_of_confirmed_grandtotals[country])) * 100 # - # ### Following, we'll create a few Rankings of Top 5 Countries with regards to several measurables, to be able to later plot Pie charts easily also for these predefined aggregations: # + # CALCULATES TOP 5 WORLD COUNTRIES AS ACTIVE TO BE ABLE TO DRAW PIE CHARTS FOR THAT world_active_top5 = active_DF.sum(axis=1).to_frame() world_active_top5 = world_active_top5.rename(columns={world_active_top5.columns[0]: 'tmp'}).sort_values(['tmp'], ascending=[0]).T list_of_world_active_top5 = world_active_top5.columns[:5].tolist() # CALCULATES TOP 5 WORLD COUNTRIES AS CONFIRMED TO BE ABLE TO DRAW PIE CHARTS FOR THAT world_confirmed_top5 = confirmed_DF.sum(axis=1).to_frame() world_confirmed_top5 = world_confirmed_top5.rename(columns={world_confirmed_top5.columns[0]: 'tmp'}).sort_values(['tmp'], ascending=[0]).T list_of_world_confirmed_top5 = world_confirmed_top5.columns[:5].tolist() # CALCULATES TOP 5 WORLD COUNTRIES AS RECOVERED TO BE ABLE TO DRAW PIE CHARTS FOR THAT world_recovered_top5 = recovered_DF.sum(axis=1).to_frame() world_recovered_top5 = world_recovered_top5.rename(columns={world_recovered_top5.columns[0]: 'tmp'}).sort_values(['tmp'], ascending=[0]).T list_of_world_recovered_top5 = world_recovered_top5.columns[:5].tolist() # CALCULATES TOP 5 WORLD COUNTRIES AS DEATHS TO BE ABLE TO DRAW PIE CHARTS FOR THAT world_deaths_top5 = deaths_DF.sum(axis=1).to_frame() world_deaths_top5 = world_deaths_top5.rename(columns={world_deaths_top5.columns[0]: 'tmp'}).sort_values(['tmp'], ascending=[0]).T list_of_world_deaths_top5 = world_deaths_top5.columns[:5].tolist() # CALCULATES TOP 5 EUROPE COUNTRIES AS ACTIVE TO BE ABLE TO DRAW PIE CHARTS FOR THAT europe_active_top5 = active_DF[active_DF.index.isin(list_of_countries_europe)].sum(axis=1).to_frame() europe_active_top5 = europe_active_top5.rename(columns={europe_active_top5.columns[0]: 'tmp'}).sort_values(['tmp'], ascending=[0]).T list_of_europe_active_top5 = europe_active_top5.columns[:5].tolist() # CALCULATES TOP 5 EUROPE COUNTRIES AS CONFIRMED TO BE ABLE TO DRAW PIE CHARTS FOR THAT europe_confirmed_top5 = confirmed_DF[confirmed_DF.index.isin(list_of_countries_europe)].sum(axis=1).to_frame() europe_confirmed_top5 = europe_confirmed_top5.rename(columns={europe_confirmed_top5.columns[0]: 'tmp'}).sort_values(['tmp'], ascending=[0]).T list_of_europe_confirmed_top5 = europe_confirmed_top5.columns[:5].tolist() # CALCULATES TOP 5 EUROPE COUNTRIES AS RECOVERED TO BE ABLE TO DRAW PIE CHARTS FOR THAT europe_recovered_top5 = recovered_DF[recovered_DF.index.isin(list_of_countries_europe)].sum(axis=1).to_frame() europe_recovered_top5 = europe_recovered_top5.rename(columns={europe_recovered_top5.columns[0]: 'tmp'}).sort_values(['tmp'], ascending=[0]).T list_of_europe_recovered_top5 = europe_recovered_top5.columns[:5].tolist() # CALCULATES TOP 5 EUROPE COUNTRIES AS DEATHS TO BE ABLE TO DRAW PIE CHARTS FOR THAT europe_deaths_top5 = deaths_DF[deaths_DF.index.isin(list_of_countries_europe)].sum(axis=1).to_frame() europe_deaths_top5 = europe_deaths_top5.rename(columns={europe_deaths_top5.columns[0]: 'tmp'}).sort_values(['tmp'], ascending=[0]).T list_of_europe_deaths_top5 = europe_deaths_top5.columns[:5].tolist() # - # --- # # Section 4 - Finally ready to plot all the charts we wish! # + ############################################################# ## ## HERE WE DEFINE A LIST OF COUNTRIES AND LOGICAL GROUPS OF ## COUNTRES FOR WHICH WE WANT TO CREATE SUBFOLDERS SONTAINNG ## DETAILED SITUATION WITH DEDICATED CHARTS ## ############################################################# detailed_countries_dict = {'World': [['Whole World'], '2020-01-21'], 'European_Union': [['Europe'], '2020-02-19'], 'Italy': [['Italy'], '2020-02-19'], 'Spain': [['Spain'], '2020-02-19'], 'Germany': [['Germany'], '2020-02-19'], 'France': [['France'], '2020-02-19'], 'United_Kingdom': [['United Kingdom'], '2020-02-19'], 'South_Korea': [['Korea, South'], '2020-01-21'], 'United_States': [['US'], '2020-02-19'], 'China': [['China'], '2020-01-21'], 'Sweden': [['Sweden'], '2020-02-19'], 'Norway': [['Norway'], '2020-02-19'], 'Denmark': [['Denmark'], '2020-02-19'], 'Finland': [['Finland'], '2020-02-19'], 'Canada': [['Canada'], '2020-03-01'], 'Romania': [['Romania'], '2020-03-01'], 'Iran': [['Iran'], '2020-02-19'], 'Russia': [['Russia'], '2020-03-01'], 'Japan': [['Japan'], '2020-02-19'], 'Australia': [['Australia'], '2020-02-19'], 'New_Zealand': [['New Zealand'], '2020-02-19'], 'Singapore': [['Singapore'], '2020-02-19'], 'Brazil': [['Brazil'], '2020-02-19'], 'Israel': [['Israel'], '2020-02-19'] } # CREATES WORKING DIRECTORY TO SAVE CHARTS FOR EACH COUTRY/GROUP IF IT DOESN'T EXIST # ALSO COPIES PROPER FLAG IMAGE FILE INTO THE CREATED DIRECTORY FOR LATER DISPLAY path = os.getcwd() dir_to_create = '/charts' dir_to_create_tot = dir_to_create for country_key in detailed_countries_dict.keys(): try: # CREATES WORKING DIRECTORY OF COUNTRY IN CURRENT LOOP CYCLE country = country_key.replace('\'', '_').replace('\"', '_').replace(', ' ,'_').replace(',' ,'_').replace('.' , '_').replace(' ' , '_') dir_to_create_tot = dir_to_create + '/' + country os.mkdir(path + dir_to_create_tot) except OSError: print ('Directory .' + dir_to_create_tot + ' already exists, proceed anyway') pass else: print('Successfully created Directory .' + dir_to_create_tot) pass """ try: # CREATES WORKING CHARTS SUBDIRECTORY OF COUNTRY IN CURRENT LOOP CYCLE country = country_key.replace('\'', '_').replace('\"', '_').replace(', ' ,'_').replace(',' ,'_').replace('.' , '_').replace(' ' , '_') dir_to_create_tot = dir_to_create_tot + '/charts' os.mkdir(path + dir_to_create_tot) except OSError: print ('Directory .' + dir_to_create_tot + ' already exists, proceed anyway') pass else: print('Successfully created Directory .' + dir_to_create_tot) pass """ try: # COPIES FLAG OF COUNTRY IN CURRENT LOOP CYCLE INTO DESTINATION DIR shutil.copy('./demo-images/flags/Flag_of_' + country + '.png', './charts/' + country) #os.system('cp ./demo-images/flags/Flag_of_' + country + '.png ./charts/' + country) except OSError: print ('Error during copy of Country Flag File: "cp ./demo-images/flags/Flag_of_' + country + '.png ./charts/' + country + '" - proceed anyway') pass else: print('Successfully copied Country Flag File: "cp ./demo-images/flags/Flag_of_' + country + '.png ./charts/' + country + '"') pass try: # CREATES COMPLETE SET OF CHARTS FOR COUNTRY IN CURRENT LOOP CYCLE os.chdir('charts' + '/' + country) country_name_in_exam = detailed_countries_dict[country_key][0][0] country_start_date_in_exam = detailed_countries_dict[country_key][1] country_start_date_in_exam_string = datetime.strptime(country_start_date_in_exam, '%Y-%m-%d').strftime('%d %b %Y') ############################################################# ## ## WORLD CHARTS ## ############################################################# # DRAWS STANDARD LIST OF CHARTS FOR WHOLE WORLD IN THE DEDICATED SUBDIRECTORY if (country_key == 'World'): # LET'S PLOT THE WORLD/CONTINENT/COUNTRY SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'world' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = country_name_in_exam.upper() + ' - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since start of Outbreak' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Active'] = active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Active Cases INSTANT TOTAL' + \ '\n' + \ 'vs. Confirmed Affected CUMULATIVE TOTAL vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Active'] = reduced_active_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Active Cases DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Recovered DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Fatalities DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = ['Italy', 'Germany', 'France', 'Spain', 'US', 'United Kingdom', 'Korea, South', 'China'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY, GERMANY, FRANCE, SPAIN, U.S.A., U.K., SOUTH KOREA AND CHINA COMPARED' + \ '\n' + \ 'Confirmed Affected CUMULATIVE TOTAL - Linear Analysis on Log Scale' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Logarithmic Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_world_confirmed_top5 # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP FIVE WORLD COUNTRIES - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Relative Percentages over the Sum of JUST their CUMULATIVE Confirmed Cases' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 WORLD COUNTRIES - Active Cases INSTANT TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 WORLD COUNTRIES - Confirmed Cases CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 WORLD COUNTRIES - Recovered CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 WORLD COUNTRIES - Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # BACK TO HOME DIRECTORY TO POSITION FOR NEXT LOOP CYCLE os.chdir(path) # CREATES INDEX PAGE FOR COUNTRY OR GROUP OF COUNTRIES IN CURRENT LOOP CYCLE try: generate_country_index_page(country, country_name_in_exam, detailed_countries_dict) except OSError: print ('Error during creation of Country Index Page: ' + country_name_in_exam + ', proceed anyway') os.chdir(path) pass else: print('Successfully created Country Index Page: ' + country_name_in_exam) os.chdir(path) pass ############################################################# ## ## EUROPE CHARTS ## ############################################################# # DRAWS STANDARD LIST OF CHARTS FOR EUROPEAN COUNTRIES AGGREGATIONS IN THE DEDICATED SUBDIRECTORY elif (country_key == 'European_Union'): # LET'S PLOT THE WORLD/CONTINENT/COUNTRY SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'europe' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = country_name_in_exam.upper() + ' - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since start of Outbreak' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Active'] = active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Active Cases INSTANT TOTAL' + \ '\n' + \ 'vs. Confirmed Affected CUMULATIVE TOTAL vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Active'] = reduced_active_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Active Cases DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Recovered DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Fatalities DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = ['Italy', 'Germany', 'France', 'Spain', 'Portugal', 'United Kingdom', 'Sweden', 'Norway', 'Denmark'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY, GERMANY, FRANCE, SPAIN, ,PORTUGAL, U.K., SWEDEN, NORWAY AND DENMARK COMPARED' + \ '\n' + \ 'Confirmed Affected CUMULATIVE TOTAL - Linear Analysis on Log Scale' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Logarithmic Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_europe_confirmed_top5 # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP FIVE EUROPEAN COUNTRIES - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Relative Percentages over the Sum of JUST their CUMULATIVE Confirmed Cases' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 EUROPEAN COUNTRIES - Active Cases INSTANT TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 EUROPEAN COUNTRIES - Confirmed Cases CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 EUROPEAN COUNTRIES - Recovered CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 EUROPEAN COUNTRIES - Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # BACK TO HOME DIRECTORY TO POSITION FOR NEXT LOOP CYCLE os.chdir(path) # CREATES INDEX PAGE FOR COUNTRY OR GROUP OF COUNTRIES IN CURRENT LOOP CYCLE try: generate_country_index_page(country, country_name_in_exam, detailed_countries_dict) except OSError: print ('Error during creation of Country Index Page: ' + country_name_in_exam + ', proceed anyway') os.chdir(path) pass else: print('Successfully created Country Index Page: ' + country_name_in_exam) os.chdir(path) pass ############################################################# ## ## SINGLE COUNTRIES IN LOOP CHARTS ## ############################################################# # DRAWS STANDARD LIST OF CHARTS FOR SINGLE SPECIFIC COUNTRIES IN THE DEDICATED SUBDIRECTORY else: # LET'S PLOT THE WORLD/CONTINENT/COUNTRY SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = country_name_in_exam summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = country_name_in_exam.upper() + ' - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Active'] = active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = [country_name_in_exam] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Active Cases INSTANT TOTAL' + \ '\n' + \ 'vs. Confirmed Affected CUMULATIVE TOTAL vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: list_of_countries = [country_name_in_exam] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = [country_name_in_exam] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Active'] = reduced_active_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = [country_name_in_exam] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Active Cases DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = [country_name_in_exam] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Recovered DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = [country_name_in_exam] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Fatalities DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = [country_name_in_exam] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = [country_name_in_exam] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = country_start_date_in_exam # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = country_name_in_exam.upper() + ' - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since ' + country_start_date_in_exam_string + ' - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'N' # LET'S INDICATE IF WE WANT TO SAVE THE CHARTS IN A SEPARATE "charts" SUBDIRECTORY ("N") # OR IF WE WANT TO SAVE IN THE CURRENT DIRECTORY ("Y") flg_save_localdir = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart, flg_save_localdir) ############################################### # BACK TO HOME DIRECTORY TO POSITION FOR NEXT LOOP CYCLE os.chdir(path) # CREATES INDEX PAGE FOR COUNTRY OR GROUP OF COUNTRIES IN CURRENT LOOP CYCLE try: generate_country_index_page(country, country_name_in_exam, detailed_countries_dict) except OSError: print ('Error during creation of Country Index Page: ' + country_name_in_exam + ', proceed anyway') os.chdir(path) pass else: print('Successfully created Country Index Page: ' + country_name_in_exam) os.chdir(path) pass except OSError: print ('Error during creation of Country Set of Charts: ' + country + ', stopping!') # BLOCKING ERROR, STOPS LOOP os.chdir(path) else: print('Successfully created Country Set of Charts: ' + country) os.chdir(path) pass # - # ### Whole world situation: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'world' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries], num_affected_countries] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries], 0] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities', 'Affected Countries'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red', 'white'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black', 'red'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'ALL WORLD INCLUDING CHINA - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ALL WORLD INCLUDING CHINA - SUMMARY GRANDTOTALS' + \ '\n' + \ 'Active Cases (OPEN CASES, UNDER TREATMENT)' + \ '\n' + \ 'vs. Recovered (CLOSED CASES) vs. Fatalities (CLOSED CASES)' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ALL WORLD INCLUDING CHINA - SUMMARY GRANDTOTALS' + \ '\n' + \ 'Active Cases (OPEN CASES, UNDER TREATMENT)' + \ '\n' + \ 'Advanced Chart - INSTANT TREND ON RAW VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ALL WORLD INCLUDING CHINA - Active Cases DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Italy', 'Germany', 'France', 'Spain', 'US', 'United Kingdom'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY, GERMANY, FRANCE, SPAIN, U.S.A. AND U.K. COMPARED - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Linear Analysis on Log scale shows similar Growth Rate taking time shift into account' + \ '\n' + \ 'Italy showing Growth Rate decrease in most recent period, U.S.A. Growth Rate increasing' + \ '\n' + \ 'Since 19 Feb 2020 - Logarithmic Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Top 5 Pie charts at Global and Continental level: # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_world_confirmed_top5 # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP FIVE WORLD COUNTRIES - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Relative Percentages over the Sum of JUST their CUMULATIVE Confirmed Cases' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_europe_confirmed_top5 # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP FIVE EUROPEAN COUNTRIES - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Relative Percentages over the Sum of JUST their CUMULATIVE Confirmed Cases' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Various Heatmaps for Active Cases, Confirmed, Recovered and Deaths: # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Daily Confirmed Heatmap'] = reduced_confirmed_heatmap_DF #dict_of_dataframes['Daily Recovered Heatmap'] = reduced_recovered_heatmap_DF #dict_of_dataframes['Daily Fatalities Heatmap'] = reduced_deaths_heatmap_DF #dict_of_dataframes['Daily Active Heatmap'] = reduced_active_heatmap_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly # - list_of_world_confirmed_top5 # - list_of_europe_confirmed_top5 list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 WORLD COUNTRIES - Active Cases CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Daily Confirmed Heatmap'] = reduced_confirmed_heatmap_DF #dict_of_dataframes['Daily Recovered Heatmap'] = reduced_recovered_heatmap_DF #dict_of_dataframes['Daily Fatalities Heatmap'] = reduced_deaths_heatmap_DF #dict_of_dataframes['Daily Active Heatmap'] = reduced_active_heatmap_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly # - list_of_world_confirmed_top5 # - list_of_europe_confirmed_top5 list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 WORLD COUNTRIES - Confirmed Cases CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Daily Confirmed Heatmap'] = reduced_confirmed_heatmap_DF #dict_of_dataframes['Daily Recovered Heatmap'] = reduced_recovered_heatmap_DF #dict_of_dataframes['Daily Fatalities Heatmap'] = reduced_deaths_heatmap_DF #dict_of_dataframes['Daily Active Heatmap'] = reduced_active_heatmap_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly # - list_of_world_confirmed_top5 # - list_of_europe_confirmed_top5 list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # HERE WE DRAW A HEATMAP, THEREFORE THE FORMAT IS NOT 'YYYY-MM-DD' # INSTEAD WE INDICATE HOW MANY DAYS BACK WE WANT TO GO start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 WORLD COUNTRIES - Recovered CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Daily Confirmed Heatmap'] = reduced_confirmed_heatmap_DF #dict_of_dataframes['Daily Recovered Heatmap'] = reduced_recovered_heatmap_DF #dict_of_dataframes['Daily Fatalities Heatmap'] = reduced_deaths_heatmap_DF #dict_of_dataframes['Daily Active Heatmap'] = reduced_active_heatmap_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly # - list_of_world_confirmed_top5 # - list_of_europe_confirmed_top5 list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = 14 # IN THE CONTEXT OF HEATMAPS, THIS MEANS TO DRAW DATA SINCE X DAYS AGO # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'TOP 20 WORLD COUNTRIES - Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Heatmap comparing Countries evoution in time' + \ '\n' + \ 'Since 2 Weeks ago - Logarithmic Color Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'log' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # PIE CHATYS ARE A SPECIAL CASE AS THEY AGGREGATE MORE COUNTRIES IN A SINGLE CHART BY # DEFINITION, SO THEY NEED THIS FLAG TO BE SET TO "Y" # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 20 # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked) # - # ### More stats for the whole world: # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ALL WORLD INCLUDING CHINA - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ALL WORLD INCLUDING CHINA - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - INSTANT TREND ON RAW VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ALL WORLD INCLUDING CHINA - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ALL WORLD INCLUDING CHINA - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ALL WORLD INCLUDING CHINA - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Now a complete list of visualizations for the whole world, but excluding China: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'world_nochina' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'ALL WORLD EXCLUDING CHINA - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world_nochina # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'WORLD EXCLUDING CHINA - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world_nochina # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'WORLD EXCLUDING CHINA - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - INSTANT TREND ON RAW VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world_nochina # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'WORLD EXCLUDING CHINA - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world_nochina # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'WORLD EXCLUDING CHINA - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_world_nochina # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'WORLD EXCLUDING CHINA - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Following is a Section of visualizations dedicated to China only: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'China' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'CHINA ONLY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['China'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'CHINA ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['China'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'CHINA ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['China'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'CHINA ONLY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['China'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'CHINA ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['China'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'CHINA ONLY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### The next Secion of visualizations is dedicated to South Korea: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'Korea, South' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'SOUTH KOREA ONLY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Korea, South'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SOUTH KOREA ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Korea, South'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SOUTH KOREA ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Korea, South'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SOUTH KOREA ONLY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Korea, South'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SOUTH KOREA ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Korea, South'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-01-21' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SOUTH KOREA ONLY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 21 Jan 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Whole Europe stats section: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'europe' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'EUROPE - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPE - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPE - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - INSTANT TREND ON RAW VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPE - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPE - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPE - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Section dedicated to Italy: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'Italy' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'ITALY ONLY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Italy'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Italy'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Italy'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY ONLY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Italy'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Italy'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY ONLY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'ITALY COMPARED TO EUROPEAN COUNTRIES - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Here below a Section dedicated to European Countries as a whole but excluding Italy: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'europe_noitaly' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'EUROPEAN COUNTRIES EXCLUDING ITALY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe_noitaly # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPEAN COUNTRIES EXCLUDING ITALY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe_noitaly # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPEAN COUNTRIES EXCLUDING ITALY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - INSTANT TREND ON RAW VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe_noitaly # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPEAN COUNTRIES EXCLUDING ITALY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe_noitaly # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPEAN COUNTRIES EXCLUDING ITALY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe_noitaly # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPEAN COUNTRIES EXCLUDING ITALY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = list_of_countries_europe_noitaly # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'EUROPEAN COUNTRIES EXCLUDING ITALY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Germany visualizations: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'Germany' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'GERMANY ONLY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Germany'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'GERMANY ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Germany'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'GERMANY ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Germany'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'GERMANY ONLY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Germany'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'GERMANY ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Germany'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'GERMANY ONLY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Section dedicated to United Kingdom: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'United Kingdom' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'U.K. ONLY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['United Kingdom'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.K. ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['United Kingdom'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.K. ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['United Kingdom'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.K. ONLY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['United Kingdom'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.K ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['United Kingdom'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.K. ONLY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### France Section of visualizations: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'France' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'FRANCE ONLY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['France'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'FRANCE ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['France'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'FRANCE ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['France'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'FRANCE ONLY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['France'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'FRANCE ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['France'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'FRANCE ONLY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Spain Section: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'Spain' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'SPAIN ONLY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Spain'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SPAIN ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Spain'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SPAIN ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Spain'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SPAIN ONLY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Spain'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SPAIN ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['Spain'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'SPAIN ONLY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # ### Finally, a Section dedicated to U.S.A.: # + # LET'S PLOT THE WORLD SUMMARY AT A GLANCE # PUTS RELEVANT VALUES TOGETHER IN LISTS HOLDING THEM ALL summary_countries = 'US' summary_figures = [dict_of_active_grandtotals[summary_countries], dict_of_confirmed_grandtotals[summary_countries], dict_of_recovered_grandtotals[summary_countries], dict_of_deaths_grandtotals[summary_countries]] summary_percs = [dict_of_active_grandtotals_percs[summary_countries], 100, dict_of_recovered_grandtotals_percs[summary_countries], dict_of_deaths_grandtotals_percs[summary_countries]] # CHOOSE SUB TITLES FOR EACH BOX summary_subtitles = ['Active Cases', 'Confirmed Cases', 'Recovered', 'Fatalities'] # CHOOSE COLORS FOR EACH BOX summary_colors = ['blue', 'yellow', 'green', 'red'] # CHOOSE TEXT COLORS FOR EACH BOX summary_text_colors = ['white', 'black', 'black', 'black'] # LET'S CHOOSE A TITLE (USED TO SAVE FILE) summary_title = 'U.S.A. ONLY - SUMMARY GRANDTOTALS' + \ '\n' + \ 'ACTIVE means CURRENTLY OPEN CASES, under treatment' + \ '\n' + \ 'CONFIRMED means CUMULATIVE OF CONFIRMED AFFECTED' + \ '\n' + \ 'RECOVERED means CLOSED CASES - healing' + \ '\n' + \ 'FATALITIES means CLOSED CASES - negative outcome' + \ '\n' + \ 'Since 21 Jan 2020' # LET'S PLOT! plot_summary_headings_chart( summary_figures, summary_percs, summary_subtitles, summary_colors, summary_text_colors, summary_title) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF dict_of_dataframes['Cumulative Recovered'] = recovered_DF dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['US'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.S.A. ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Recoveries CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Fatalities CUMULATIVE TOTAL' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['US'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.S.A. ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'Advanced Chart - TREND ON 5 DAYS AVERAGE VALUES - Pls. read explanation below the Chart itself' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S INDICATE HOW MANY RECORDS WE WANT TO KEEP FROM THE TOP RANKED DOWN # USEFUL FOR HEATMAPS. PUT ZERO FOR ALL RECORDS. DEFAULT 10 num_ranked = 10 # LET'S INDICATE IF WE WANT TO DRAW AN ADVANCED CHART E.G. WITH CUBIC POLYNOMIAL FITTING # SECOND DERIVATIVES, INFLECTION POINTS AND ROOT POINTS. DEFAULT 'N' flg_advanced_chart = 'Y' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries, num_ranked, flg_advanced_chart) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['US'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.S.A. ONLY - Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF #dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF #dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['US'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.S.A. ONLY - Confirmed Affected CUMULATIVE TOTAL' + \ '\n' + \ 'vs. Confirmed Affected DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # + # LET'S PLOTS A COMPLETE CHART! # CHOOSE DATAFRAMES OF INTEREST FOR THIS PARTICULAR CHART # THEY WILL BE PUT IN A DICTIONARY HOLDING THEM ALL # DICTIONARY KEYS ARE THE LEGEND ENTRIES THAT WILL BE PLOTTED dict_of_dataframes = {} #dict_of_dataframes['Daily Confirmed'] = reduced_confirmed_DF dict_of_dataframes['Daily Recovered'] = reduced_recovered_DF dict_of_dataframes['Daily Fatalities'] = reduced_deaths_DF #dict_of_dataframes['Daily Active'] = reduced_active_DF #dict_of_dataframes['Cumulative Confirmed'] = confirmed_DF #dict_of_dataframes['Cumulative Recovered'] = recovered_DF #dict_of_dataframes['Cumulative Fatalities'] = deaths_DF #dict_of_dataframes['Cumulative Active'] = active_DF #dict_of_dataframes['Cumulative Confirmed Heatmap'] = confirmed_heatmap_DF #dict_of_dataframes['Cumulative Recovered Heatmap'] = recovered_heatmap_DF #dict_of_dataframes['Cumulative Fatalities Heatmap'] = deaths_heatmap_DF #dict_of_dataframes['Cumulative Active Heatmap'] = active_heatmap_DF #dict_of_dataframes['Daily Confirmed 5 Days Avg'] = reduced_moving_avg_confirmed_DF #dict_of_dataframes['Daily Recovered 5 Days Avg'] = reduced_moving_avg_recovered_DF #dict_of_dataframes['Daily Fatalities 5 Days Avg'] = reduced_moving_avg_deaths_DF #dict_of_dataframes['Daily Active 5 Days Avg'] = reduced_moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed 5 Days Avg'] = moving_avg_confirmed_DF #dict_of_dataframes['Cumulative Recovered 5 Days Avg'] = moving_avg_recovered_DF #dict_of_dataframes['Cumulative Fatalities 5 Days Avg'] = moving_avg_deaths_DF #dict_of_dataframes['Cumulative Active 5 Days Avg'] = moving_avg_active_DF #dict_of_dataframes['Cumulative Confirmed Shares'] = confirmed_pie_DF #dict_of_dataframes['Cumulative Recovered Shares'] = recovered_pie_DF #dict_of_dataframes['Cumulative Fatalities Shares'] = deaths_pie_DF # CHOOSE A LIST OF COUNTRIES FOR THIS PARTICULAR CHART # THE LIST CAN BE EXPLICIT OR CAN BE A REFERENCE TO THESE PREVIOUSLY INITIALIZED VARIABLES: # - list_of_countries_world # - list_of_countries_world_nochina # - list_of_countries_europe # - list_of_countries_europe_noitaly list_of_countries = ['US'] # CHOOSE A START DATE FOR THIS PARTICULAR CHART # FORMAT MUST MATCH 'YYYY-MM-DD' start_date = '2020-02-19' # CHOOSE A TITLE FOR THIS THIS PARTICULAR CHART title = 'U.S.A. ONLY - Recoveries DAILY NEW' + \ '\n' + \ 'vs. Fatalities DAILY NEW' + \ '\n' + \ 'Since 19 Feb 2020 - Linear Scale' # CHOOSE A SCALE TYPE FOR THIS THIS PARTICULAR CHART # VALID VALUES ARE 'plain' OR 'log' scale_type = 'plain' # CHOOSE WHETHER YOU WANT TO TOTALIZE DATA ACROSS MULTIPLE NATIONS IN THE SPECIFIED LIST # OR IF YOU WANT SEPARATE CHARTS FOR EACH NATION # VALID VALUES ARE 'Y' OR 'N' flg_totalize_countries = 'N' # LET'S PLOT! plot_complete_chart( list_of_countries, start_date, title, scale_type, dict_of_dataframes, dict_of_charttypes, flg_totalize_countries) # - # --- # # Section 5 - Generates a basic "index.html" page to display the Charts # ### Below, we loop over the charts images that have been generated in the dedicated directory "./charts" in order of date/time of creation, and for each image we generate the HTML code to display the charts straight in a browser: # + # GENERATES "index.html" PAGE IN "./charts" TO SHOW SAVED CHARTS ON BROWSER home_directory = os.getcwd() now = datetime.now() last_updated = now.strftime("%d %b, %Y - %H:%M:%S") os.chdir('charts') html_str = """<!DOCTYPE html> <html> <head> <meta charset='UTF-8'> <title>COVID-19 Charts</title> </head> <body> <font face='Impact' size='3' color='black'> <h1>COVID-19 Charts - Last updated """ + last_updated + """ (CET)</h1> </font> <p> <b>Data Source Reference:</b> <br> 2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE: <br> <a href='https://github.com/CSSEGISandData/COVID-19'>https://github.com/CSSEGISandData/COVID-19</a> <br> Worldometer - World Counters and Stats: <br> <a href='https://www.worldometers.info/coronavirus'>https://www.worldometers.info/coronavirus</a> </p> <p> <b>Terms of use:</b><br> Please see the Terms of Use extensively described at the above link for reference </p> <p> <b>Disclaimer:</b><br> This Website, the related GitHub repo and its contents, including all data, mapping, and analysis is provided to the public strictly for educational and academic research purposes. It is hereby disclaimed any and all representations and warranties with respect to the Website and related Git Repo, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited. </p> <p> <b>GitHub Repository:</b><br> Please visit the GitHub Repository containing the full source code (Jupyter Notebook) used to generate the charts: <br> <a href='https://github.com/r-lomba/covid-19-charts'>https://github.com/r-lomba/covid-19-charts</a> </p> <p> <b>Contacts:</b><br> You can contact me here: <br> <a href='mailto:<EMAIL>'><EMAIL></a> </p> <hr> <font face='Impact' size='3' color='black'> <h1 align='center'>The World at a glance - Grandtotals Summary - """ + last_updated + """ (CET)</h1> </font> <table border=0>""" i = 0 for file in sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime): filename = os.fsdecode(file) if filename.endswith('.png'): html_str = html_str + "<tr><p><br><br><br></p></tr>" # EMPTY LINE TO CREATE SPACE BETWEEN SINGLE CHARTS html_str = html_str + "<tr><img src='" + filename.replace(' ', '%20') + "'></tr>" if (i == 0): # COMMENTS TO THE SUMMARY CHART i = 1 html_str += "</table></body></html>" Html_file= open('./index.html','w') Html_file.write(html_str) Html_file.close() os.chdir('..') # + # GENERATES "index.html" PAGE IN "./charts" TO SHOW SAVED CHARTS ON BROWSER home_directory = os.getcwd() now = datetime.now() last_updated = now.strftime("%d %b, %Y - %H:%M:%S") os.chdir('charts') html_str = """<!DOCTYPE html> <html> <head> <meta charset='UTF-8'> <title>COVID-19 Charts</title> </head> <body> <font face='Impact' size='3' color='black'> <h1>COVID-19 Charts - Last updated """ + last_updated + """ (CET)</h1> </font> <p> <b>Data Source Reference:</b> <br> 2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE: <br> <a href='https://github.com/CSSEGISandData/COVID-19'>https://github.com/CSSEGISandData/COVID-19</a> <br> Worldometer - World Counters and Stats: <br> <a href='https://www.worldometers.info/coronavirus'>https://www.worldometers.info/coronavirus</a> </p> <p> <b>Terms of use:</b><br> Please see the Terms of Use extensively described at the above link for reference </p> <p> <b>Disclaimer:</b><br> This Website, the related GitHub repo and its contents, including all data, mapping, and analysis is provided to the public strictly for educational and academic research purposes. It is hereby disclaimed any and all representations and warranties with respect to the Website and related Git Repo, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited. </p> <p> <b>GitHub Repository:</b><br> Please visit the GitHub Repository containing the full source code (Jupyter Notebook) used to generate the charts: <br> <a href='https://github.com/r-lomba/covid-19-charts'>https://github.com/r-lomba/covid-19-charts</a> </p> <p> <b>Contacts:</b><br> You can contact me here: <br> <a href='mailto:<EMAIL>'><EMAIL></a> </p> <hr> <font face='Impact' size='3' color='black'> <h1 align='center'>The World at a glance - Grandtotals Summary - """ + last_updated + """ (CET)</h1> </font> <table border='0'>""" table = sorted(filter(os.path.isdir, os.listdir('.')), key=os.path.getmtime) table_splitted = np.array_split(table,2) # HERE, "2" IS THE NUMBER OF LINES WE WANT OUR TABLE TO SPAN for i in range(len(table_splitted)): html_str = html_str + '<tr>' for j in range(len(table_splitted[i])): html_str = html_str + '<td><img src="./' + table_splitted[i][j] + '/Flag_of_' + table_splitted[i][j] + '.png" width="30" height="30"></td>' html_str = html_str + '</tr>' html_str = html_str + '</table>' for directory in sorted(filter(os.path.isdir, os.listdir('.')), key=os.path.getmtime): directoryname = os.fsdecode(directory) if filename.endswith('.png'): html_str = html_str + "<tr><p><br><br><br></p></tr>" # EMPTY LINE TO CREATE SPACE BETWEEN SINGLE CHARTS html_str = html_str + "<tr><img src='" + filename.replace(' ', '%20') + "'></tr>" if (i == 0): # COMMENTS TO THE SUMMARY CHART i = 1 html_str += "</table></body></html>" Html_file= open('./index.html','w') Html_file.write(html_str) Html_file.close() os.chdir('..') # + ############################################### # BACK TO HOME DIRECTORY TO POSITION FOR NEXT LOOP CYCLE os.chdir(path) flg_top_page = 'Y' # WE ARE CREATING THE ROOT PAGE HERE country = 'World' country_name_in_exam = 'World' # CREATES INDEX PAGE FOR COUNTRY OR GROUP OF COUNTRIES IN CURRENT LOOP CYCLE try: generate_country_index_page(country, country_name_in_exam, detailed_countries_dict, flg_top_page) except OSError: print ('Error during creation of Country Index Page: ' + country_name_in_exam + ', proceed anyway') os.chdir(path) pass else: print('Successfully created Country Index Page: ' + country_name_in_exam) os.chdir(path) pass # -
covid-19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: soccer_rating_pressing-3.6.2 # language: python # name: soccer_rating_pressing-3.6.2 # --- # + # %load_ext autoreload # %autoreload 2 import os; import sys; sys.path.append('../') import pandas as pd import tqdm import warnings warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning) import socceraction.classification.features as fs import socceraction.classification.labels as lab # - ## Configure file and folder names datafolder = "../data" spadl_h5 = os.path.join(datafolder,"spadl-statsbomb.h5") features_h5 = os.path.join(datafolder,"features.h5") labels_h5 = os.path.join(datafolder,"labels.h5") predictions_h5 = os.path.join(datafolder,"predictions.h5") games = pd.read_hdf(spadl_h5,"games") games = games[games.competition_name == "FIFA World Cup"] print("nb of games:", len(games)) # + # 1. Select feature set X xfns = [fs.actiontype, fs.actiontype_onehot, #fs.bodypart, fs.bodypart_onehot, fs.result, fs.result_onehot, fs.goalscore, fs.startlocation, fs.endlocation, fs.movement, fs.space_delta, fs.startpolar, fs.endpolar, fs.team, #fs.time, fs.time_delta, #fs.actiontype_result_onehot ] nb_prev_actions = 1 # generate the columns of the selected features Xcols = fs.feature_column_names(xfns,nb_prev_actions) X = [] for game_id in tqdm.tqdm(games.game_id,desc="selecting features"): Xi = pd.read_hdf(features_h5,f"game_{game_id}") X.append(Xi[Xcols]) X = pd.concat(X) # 2. Select label Y Ycols = ["scores","concedes"] Y = [] for game_id in tqdm.tqdm(games.game_id,desc="selecting label"): Yi = pd.read_hdf(labels_h5,f"game_{game_id}") Y.append(Yi[Ycols]) Y = pd.concat(Y) print("X:", list(X.columns)) print("Y:", list(Y.columns)) # + # %%time # 3. train classifiers F(X) = Y import xgboost Y_hat = pd.DataFrame() models = {} for col in list(Y.columns): model = xgboost.XGBClassifier() model.fit(X,Y[col]) models[col] = model # + from sklearn.metrics import brier_score_loss, roc_auc_score Y_hat = pd.DataFrame() for col in Y.columns: Y_hat[col] = [p[1] for p in models[col].predict_proba(X)] print(f"Y: {col}") print(f" Brier score: %.4f" % brier_score_loss(Y[col],Y_hat[col])) print(f" ROC AUC: %.4f" % roc_auc_score(Y[col],Y_hat[col])) # - # ### Save predictions # + # get rows with game id per action A = [] for game_id in tqdm.tqdm(games.game_id,"loading game ids"): Ai = pd.read_hdf(spadl_h5,f"actions/game_{game_id}") A.append(Ai[["game_id"]]) A = pd.concat(A) A = A.reset_index(drop=True) # concatenate action game id rows with predictions and save per game grouped_predictions = pd.concat([A,Y_hat],axis=1).groupby("game_id") for k,df in tqdm.tqdm(grouped_predictions,desc="saving predictions per game"): df = df.reset_index(drop=True) df[Y_hat.columns].to_hdf(predictions_h5,f"game_{int(k)}")
public-notebooks/4-estimate-scoring-and-conceding-probabilities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + tags=["settings"] import matplotlib.style as style import pandas as pd style.use('seaborn-whitegrid') # %matplotlib inline # + # Change format of charts to .svg % config InlineBackend.figure_format = 'svg' # %xmode Plain # - df = pd.read_csv('telco_dataset.csv') df.info() # Check for unique record ID #s - all were unique df['customerID'].nunique() # Drop this column since we no longer need it df.drop('customerID', axis=1, inplace=True) copies = df.duplicated(subset=['gender', 'SeniorCitizen', 'Partner', 'Dependents', 'tenure', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling', 'PaymentMethod', 'MonthlyCharges', 'TotalCharges', 'Churn']) copies.value_counts() # I subsequently explored those 22 'duplicates' but determined they were all due to having the same characteristics - # people in their first month of payments who had the same services and had paid the same amount. df.columns # Look at data to see if we need to recode any variables df.head(2) # + # Coded binary categories as 0 and 1 in preparation for regression binary_dict = {'No': 0, 'No internet service': 0, 'No phone service': 0, 'Yes': 1} def convert_cols_to_binary(col_names: list): for col in col_names: df[col] = df[col].map(binary_dict) cols = ['Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'PaperlessBilling', 'Churn'] convert_cols_to_binary(cols) # + # Code gender as binary gender_dict = {'Male': 0, 'Female': 1} df['gender'] = df['gender'].map(gender_dict) # Rename column to 'female' since 1's now represent females df.rename(columns={'gender': 'female'}) # - # Confirm that our column renamed correctly - it did df.columns # Recast binary columns as integers, as some of them were floats df[['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies']].astype('int', inplace=True) # Tried recasting TotalCharges to float to divide it by tenure, but got an error as there are 11 values in the column showing as ' '. This occurred because some people had not had any charges yet. I then dropped those records (only 11 of them) in order to allow us to cast the column as float. # + # Dropping the 11 ' ' values from TotalCharges column df = df[df['TotalCharges']!=' '] # Then recast the column as float df['TotalCharges'] = df['TotalCharges'].astype('float') # Created new feature column to measure average monthly charges df['AvgMonthlyCharges'] = df['TotalCharges']/df['tenure'] # - # Create dummy variables in preparation for regression. # Drop first binary column for dummy variables to prevent redundant columns. test = pd.get_dummies(data=df, columns=['InternetService', 'Contract', 'PaymentMethod'], drop_first=True) # + # Change name of columnns for both dataframes to make them more easier to read and work with df.columns = ['gender', 'senior', 'partner', 'dependents', 'tenure', 'phone_service', 'multiple_lines', 'internet_service', 'online_security', 'online_backup', 'device_protection', 'tech_support', 'streaming_tv', 'streaming_movies', 'contract', 'paperless_billing', 'payment_method', 'monthly_charges', 'total_charges', 'churn', 'avg_monthly_charges'] test.columns = ['gender', 'senior', 'partner', 'dependents', 'tenure', 'phone_service', 'multiple_lines', 'online_security', 'online_backup', 'device_protection', 'tech_support', 'streaming_tv', 'streaming_movies', 'paperless_billing', 'monthly_charges', 'total_charges', 'churn', 'avg_monthly_charges', 'internet_service-fiber_optic', 'internet_service-no', 'contract-one_year', 'contract-two_year', 'payment_method-credit_card_auto', 'payment_method-electronic_check', 'payment_method-mailed_check'] # - # Saving clean data and clean data + dummy variables for regression. Commenting code out below so we don't overwrite our current data files unless we mean to. # + # # Save output as CSV # df.to_csv('data/clean_data.csv', index=False) # test.to_csv('data/clean_data_encoded_for_regression.csv', index=False) # - # ### Feature engineering df = pd.read_csv('data/clean_data.csv') df.head() df['monthly_charges'].describe() df['tenure'].describe() df['internet_service'].value_counts() df.groupby(df['internet_service'])['monthly_charges'].mean() x = df.groupby(df['tenure'])['monthly_charges'].mean() df.groupby(df['phone_service'])['monthly_charges'].mean() df.columns # + X = df[['phone_service', 'multiple_lines', 'online_security', 'online_backup', 'device_protection', 'tech_support', 'streaming_tv', 'streaming_movies', 'internet_service-fiber_optic', 'internet_service-no']] y = df['monthly_charges'] # -
1 - Data Cleaning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Single-Cell Analysis # v0.1 - 22/03/25 # The purpose of this notebook is to give a basic example of how to perform single-cell analysis of SpaceM data, more precisely the following steps: # # - Loading multiple SpaceM datasets # - QC & Preprocessing # - Dimensionality reduction (i.e. creating a UMAP) # - Clustering # - Differential analysis # # These steps are demonstrated using a SpaceM perturbation dataset generously donated by Luísa. # If you want to run this notebook yourself, you can download the dataset [here](https://oc.embl.de/index.php/s/1eMkXRIS8HE2qoY). # The dataset contains the metabolomes of cells subjected to one of four different treatments in order to induce metabolic changes, with five replicates for each treatment. # # You are also invited to also use this notebook as a basis for your own analysis - keep in mind however that each analysis often needs to be custom-tailored to your data. Feel free to reach out to Alex if you need help! # # Before you read this notebook, also consider checking out [this primer](https://scanpy.readthedocs.io/en/stable/usage-principles.html) on the core usage principles of Scanpy, the main Python package we're going to use for single-cell analysis. # ### Setup # # # <div class="alert alert-info"> # # **Note:** Make sure to install both [Scanpy](https://scanpy.readthedocs.io/en/stable/installation.html) and [Outer SpaceM](https://mattausc.embl-community.io/outer-spacem/installation.html) before running this notebook! # # </div> # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scanpy as sc import outer_spacem as osm import os # %matplotlib inline # %config InlineBackend.figure_formats = ['retina'] sns.set( rc={ "figure.figsize":(5, 5), "legend.frameon": False }, style="ticks", context="talk" ) # - # ### Setup of paths and names well = "Drug_W8" analysis_name = "v1" # ### Loading the data # + main_dir = os.path.join("/Users/alberto-mac/EMBL_ATeam/projects/gastrosome", well, "reprocessing") adata = sc.read(os.path.join(main_dir, "single_cell_analysis/spatiomolecular_adata.h5ad")) # intracell_ions = pd.read_csv("/Users/alberto-mac/EMBL_ATeam/projects/gastrosome/molecules_databases/reannotated/AB_Gastrosome_DrugW8_intra_ions_v2.tsv", # sep="\t", index_col=0) proj_dir = os.path.join(main_dir, "analysis", analysis_name) # - # #### Mark gastrosomes # + cond_col = "cell_type" adata.obs[cond_col] = np.where(adata.obs["max_intensity-Annotations"] > 0., "Gastrosomes", "Other cells") adata.obs = adata.obs.astype({cond_col: "category"}) # - nb_marked_cells = (adata.obs[cond_col] == "Gastrosomes").sum() total_nb_cells = adata.obs[cond_col].shape[0] print("Gastrosomes: {}/{} cells".format(nb_marked_cells, total_nb_cells)) # ### Preprocessing # #### Quality control # Let's first perform some quality control. # For starters, let's check the size of our dataset: adata.shape # Our dataset consists of roughly 17000 cells described by 547 unique ions. # Let's get some more detailed QC metrics: cell_qc, ion_qc = sc.pp.calculate_qc_metrics(adata) sns.displot(data=cell_qc, x="n_genes_by_counts", bins=50, linewidth=0) plt.xlabel('Unique ions') plt.ylabel('N cells') plt.xlim(left=0) plt.axvline(x=10, linestyle=":", c="#CC3333") plt.show() # Seems like most of our cells have 100-200 unique ions, and there is close to 0 cells that only have very few annotations. sns.displot(data=ion_qc, x="n_cells_by_counts", bins=50, linewidth=0) plt.xlabel('Unique cells') plt.ylabel('N ions') plt.xlim(left=0) plt.axvline(x=200, linestyle=":", c="#CC3333") plt.show() # There seems to be plenty of ions however that only occur in a low number of cells. # #### Filtering cells and ions # Cells with only few annotations may not have been sampled by the MALDI laser sufficiently, and should therefore be removed. # Ions that are only present in a few cells will contain only little biological information, and removing them reduces dataset complexity. # # Keep in mind however that the threshold for both filtering steps should be adapted to your own analysis. adata.raw = adata # keep raw values for diff. analysis # + print("Cells before filtering:", adata.shape[0]) sc.pp.filter_cells(adata, min_genes=10) print("Cells after filtering:", adata.shape[0]) # + print("Ions before filtering:", adata.shape[1]) sc.pp.filter_genes(adata, min_cells=200) print("Ions after filtering:", adata.shape[1]) # - # #### Scaling # Other single-cell omics may require you to scale intensities to achieve more normally distributed counts. For SpaceM data this is not recommended however: # # - Log (or log1p) scaling is commonly used in sequencing-based single-cell analyses to reduce data skewness, but has shown to actually drown out biological information in SpaceM data. # - Z scoring (i.e. centering to 0 and scaling to as standard deviation of 1) has no consensus even in other single-cell omics, from experience it can severely distort SpaceM data. # #### Normalization # # Technical aspects of MS imaging for SpaceM cause significant variance in how much biological material is sampled, similarly to varying sequencing depth in scRNA-seq. # As this sampling variance can severely distort downstream analysis, we're going to apply some normalization to mitigate for it. # # The simplest way to do this is by scaling a cell's ion counts using a size factor porportional to the total ion count (TIC) of the cells - commonly referred to as TIC normalization: # sc.pp.normalize_total(adata) sc.pp.normalize_total(adata, target_sum=1., key_added='tic') # ### Dimensionality reduction # We'll perform some dimensionality reduction in order to get a visual overview of our data. # In short, we'll create a **U**niform **M**anifold **A**pproximation and **P**rojection (UMAP) representation of our data that will give us a overview of the # biological (i.e. wanted) and technical (i.e. unwanted) variance within the sample e.g. similarity of cell types, conditions or replicates. # # You can read more about dimensionality reduction in single-cell analysis [here](https://chanzuckerberg.github.io/scRNA-python-workshop/analysis/03-dimensionality-reduction.html). sc.pp.pca(adata) sc.pp.neighbors(adata) sc.tl.umap(adata) adata.obs[["UMAP1", "UMAP2"]] = adata.obsm["X_umap"] # transfer to .obs # + f = osm.pl.highlight_scatterplot( data = adata.obs, x = "UMAP1", y = "UMAP2", hue = cond_col, col = cond_col, palette = "tab10", height = 5, scatter_kwargs = dict(s=5) ) f.add_legend(markerscale=3) plt.xticks([]) plt.yticks([]) plt.show() # - # Excellent! Seems like different treatments seem to be a major driver of variance in our UMAP! # Let's take a deeper look however and also check how much variance we have between our replicates. # ### Differential Analysis # After identifying groups of cells we might want to know what makes these groups unique, for which we are going to perform differential analysis. # # <div class="alert alert-warning"> # # **A fair warning:** there are no data-backed best practices for differential analysis of SpaceM data yet. The steps currently shown here therefore use the most simplistic methods, which may not be universally applicable. # A future update to this notebook will aim to change that and provide more robust methods. # # </div> # # To identify which ions are differentially abundant between between groups of cells, we will use a series of 1-vs-rest wilcoxon rank-sum tests for each ion: sc.tl.rank_genes_groups(adata, groupby=cond_col, method="wilcoxon", use_raw=True) from singlecelltools.nonzero_wilcoxon import nonzero_wilcoxon nonzero_wilcoxon(adata, groupby=cond_col, use_raw=False, key_added="zero_rank_genes_groups") # Let's check out which ions are most associated with each of our four groups: # We can also visualize the abundance of these ions on our UMAP: # Furthermore, we can check the distribution of p-values and logfoldchanges in a volcano plot: # + groupname = adata.uns["rank_genes_groups"]["params"]["groupby"] # = "leiden" pval_thres = 0.05 # upper threshold for p-values fc_thres = 2 # lower threshold for fold changes for group in adata.obs[groupname].unique().categories: df = sc.get.rank_genes_groups_df(adata, group) df = df.sort_values("scores", ascending=False) df.insert(0, groupname, group) df["significance"] = (df["pvals_adj"] < pval_thres) & (df["logfoldchanges"].abs() > np.log2(fc_thres)) df["pvals_adj_nlog10"] = -np.log10(df["pvals_adj"] + np.finfo("f8").eps) plt.figure(figsize=[15, 5]) sns.scatterplot( data = df, x = "logfoldchanges", y = "pvals_adj_nlog10", s = 10, linewidth = 0, hue = "significance", palette = "tab10" ) plt.xlabel("Log fold change") plt.ylabel("-log10(p)") plt.legend(loc="lower left", title="Significance") plt.title(f"{groupname}={group}", fontsize=20) plt.show() # - # We can see that most ions have either very high or low log fold changes, as well as very low p-values, and it is currently not clearly known what causes these extreme values. # # Nonetheless, we can now export these markers to use them in other analyses: # + output_dir = "C:/Users/ama/data/220325_Luisa_ScSeahorse" for group in adata.obs[groupname].unique().categories: df = sc.get.rank_genes_groups_df(adata, group) df = df.sort_values("scores", ascending=False) df.insert(0, groupname, group) df = df[ (df["pvals"] < pval_thres) & (df["logfoldchanges"].abs() > np.log2(fc_thres)) ] df_path = f"{output_dir}/{groupname}_{group}_markers.tsv" print(df_path) df.to_csv(df_path, index=False, sep='\t') # -
projects/gastrosome_processing/new_analysis/sc_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Regression exploration # # There are various versions of regression around. Here we will explore linear, ridge and lasso regression first. I will use sklearn and astroML to illustrate their use and strengths/weaknesses. import numpy as np import matplotlib.pyplot as plt from astroML.datasets import fetch_sdss_sspp from astroML.plotting import hist import seaborn as sns import pandas as pd import sys import cPickle from sklearn.linear_model import Ridge, Lasso # %matplotlib inline # I quite like to use Seaborn for statistical visualisation but I find the default setting sub-optimal for figures and prefer the 'white' style. I also set the palette to be friendly to colour blind users. sns.set(style="white") sns.set_palette('colorblind') # The following two functions are simple convenience functions for saving data to file and getting them back. They can be convenient to have around # + def pickle_to_file(data, fname): """ Dump some data to a file using pickle """ try: fh = open(fname, 'w') cPickle.dump(data, fh) fh.close() except: print "Pickling failed!", sys.exc_info()[0] def pickle_from_file(fname): """ Retrieve data from file that were saved using cPickle. """ try: fh = open(fname, 'r') data = cPickle.load(fh) fh.close() except: print "Loading pickled data failed!", sys.exc_info()[0] data = None return data # - # ## Get stellar photometry from SDSS # # The following routine will load stellar data from the SDSS using the `astroML` package. For more details, see [the astroML web-site](http://www.astroml.org/). This will download the data the first time you call the function - this amounts to 37Mb, so do not get it over a mobile link. You can also control where it is downloaded, see the documentation of the `fetch_sdss_sspp` function for more details. # # The following call is just to see what is kept within this file. data = fetch_sdss_sspp() data.dtype.names # ## Calculate colours and visualise data # # It is always important to have a look at the data before plotting them. Here I use [the Pandas library](http://pandas.pydata.org/) to create a dataframe since this is needed for use with `seaborn`. You can use other approaches if you prefer but I think this is very handy. # # The particular plot is a bit slow here. ug = data['upsf']-data['gpsf'] gr = data['gpsf']-data['rpsf'] ri = data['rpsf']-data['ipsf'] iz = data['ipsf']-data['zpsf'] T = data['Teff'] X = np.vstack((ug, gr, ri, iz, T)).T M = np.vstack((ug, gr, ri, iz)).T df = pd.DataFrame(X[0:1000, :]) g = sns.PairGrid(df, diag_sharey=False) g.map_lower(sns.kdeplot, cmap="Blues_d") g.map_upper(plt.scatter) g.map_diag(sns.kdeplot) # There is plenty of co-linearity in this data. Let me now assume that we can write the effective temperature as a linear combination of data: # # $$T = \theta_0 + \theta_1 (u-g) + \theta_2 (g-r) + \theta_3 (r-i) + \theta_5 (i-z) $$ # # and we want to constrain $\theta$ to create a simple predictor of temperature. # ## Regular linear regression # # This is the simplest version. I use the `LinearRegression` function in `astroML` because this supports uncertainties - even though I do not use them here! from astroML.linear_model import LinearRegression # ### Create the model # # Note that this is independent of the data model = LinearRegression() # ### Fit the model to the data # # This is where we introduce the data. res = model.fit(M, T/1e4) # ### Extract best-fit coefficients and calculate the predicted temperature # res.coef_ Tpred = model.predict(M) # We can now plot these results. I am not putting much effort into this! In a real application you would of course look at residuals with respect to all different input quantities. plt.plot(T, (Tpred*1e4-T)/T, ',') plt.xlabel(r'$T_{\mathrm{eff}}$ [K]') plt.ylabel(r'$\Delta T_{\mathrm{eff}}$ [predicted-true] $10^4$K') fig = plt.figure() ax = fig.add_axes([0.1, 0.1, 0.9, 0.9]) hist((Tpred*1e4-T)/T, bins='knuth', ax=ax) ax.set_xlabel(r'$\Delta T_{\mathrm{eff}}$ [predicted-true] [$10^4$K]') ax.set_xlim(-0.5, 0.2) # ## Ridge regression # # The next up is ridge regression. In this case we fit a model in the same way as for linear regression, but we now have a regularisation condition on the coefficients $\theta_i$. In practice it tries to minimize # # $$\sum_i (y_i - \sum_j \theta_j x_{ij})^2 + \lambda_i \sum \theta_j^2 $$ # # so you can see it as a linear regression problem where you also want to ensure that the coefficients don't get too large or too small. model = Ridge() res = model.fit(M, T/1e4) Tpred = model.predict(M) res.coef_ plt.plot(T, (Tpred*1e4-T)/T, ',') plt.xlabel(r'$T_{\mathrm{eff}}$ [K]') plt.ylabel(r'$\Delta T_{\mathrm{eff}}$ [predicted-true] $10^4$K') # ## LASSO regression # # This is very similar to ridge regression, but in this case we now have a regularisation condition on the coefficients $\theta_i$ that sets less important coefficients to zero: # # $$\sum_i (y_i - \sum_j \theta_j x_{ij})^2 + \lambda_i \sum |\theta_j| $$ # # If you are curious why some parameters are set to zero in some cases (as we will see below), take a look at section 6.2 in "An Introduction to Statistical Learning". The practical use of the LASSO is near identical to that of ridge regression: model = Lasso(alpha=0.001) res = model.fit(M, T.astype(np.float)/1e4) Tpred = model.predict(M) res.coef_ plt.plot(T, (Tpred*1e4-T)/T, ',') plt.xlabel(r'$T_{\mathrm{eff}}$ [K]') plt.ylabel(r'$\Delta T_{\mathrm{eff}}$ [predicted-true] $10^4$K') # # Choosing the regularisation parameter # # Both LASSO and ridge regression take a separate parameter, $\lambda$, which determines the degree of regularisation. How do you decide on this? You can try trial and error, but the most common way is to divide your data into subsets, and do what is called cross-validation between these sets. # # Stability of estimates # # The next step is to try this for very small subsets and store the coefficients. For simplicity I will just shuffle the data and then split into 10 objects. This will help us see how stable the estimates of the different coefficients are with respect to the sample size. # + def fit_one_subset(M, T, indices, ridge=False, lasso=False, standard=True, alpha=0.05): """ Fit one subset of the data using one of several methods Arguments: ---------- M: Design matrix for the model T: The independent variable (in the examples here the temperature) Keywords: --------- ridge: Whether to do ridge regression (default=False) lasso: Whether to do LASSO regression (default=False) standard: Whether to do standard linear regression (default=True) alpha: The regularization parameter for LASSO and ridge regression. (default=0.05) """ if standard: model = LinearRegression(fit_intercept=True) elif ridge: model = Ridge(alpha=alpha, fit_intercept=True, normalize=True) elif lasso: model = Lasso(alpha=alpha, fit_intercept=True, normalize=True) res = model.fit(M[indices, :], T[indices].astype(np.float)/1e4) return res.coef_ def run_many_subsets(M, T, N_per=10, N_sets=1000, ridge=False, lasso=False, standard=True, alpha=0.05): """ Fit N_sets of N_per objects. Arguments: ---------- M: Design matrix for the model T: The independent variable (in the examples here the temperature) Keywords: --------- N_per: The number of objects per subset. N_sets: The number of subsets to create. ridge: Whether to do ridge regression (default=False) lasso: Whether to do LASSO regression (default=False) standard: Whether to do standard linear regression (default=True) alpha: The regularization parameter for LASSO and ridge regression. (default=0.05) """ if (N_per*N_sets > len(T)): print "It is not possible to have this combination of N_per and N_sets" return None # This is the index array inds = np.arange(len(T)) # Now shuffle it. np.random.shuffle(inds) subset_coefs = np.zeros((N_sets, 5)) for i in range(N_sets): subset = inds[i*N_per:(i+1)*N_per] subset_coefs[i, :] = fit_one_subset(M, T, subset, ridge=ridge, lasso=lasso, standard=standard, alpha=alpha) return subset_coefs # - coefs6 = run_many_subsets(M, T, N_per=6) coefs100 = run_many_subsets(M, T, N_per=100) coefs6[0, :] xaxis = np.arange(coefs100.shape[0]) fig, axes = plt.subplots(1, 2, sharey=True) axes[0].plot(xaxis, coefs100[:, 2]-np.median(coefs100[:, 2]), '.') axes[0].set_title('100 per subset') axes[0].set_ylabel('g-r coefficient') axes[1].plot(xaxis, coefs6[:, 2]-np.median(coefs6[:, 2]), '.') axes[1].set_title('6 per subset') plt.savefig('g-r-coeff-variation.pdf', format='pdf') # What this tells us, is that prediction of the coefficient is very unstable when we have a small number of data points. In statistics lingo the coefficient has large *variance*. # # Regularising the situation # # Ridge regression and the lasso (and other techniques), allow one to run the same fits, but with some regularisation (constraints on the $\theta_i$). This helps obtain better values for the parameters, but at the cost of a slight bias in their values. # # I save the output below, but in fact it is so quick it does have to be. def run_many_lambdas(M, T, ridge=True, lasso=False): """ Fit N_sets of N_per objects. This is near identical to """ # I want repeatability np.random.seed(0) # This is the index array inds = np.arange(len(T)) # Now shuffle it. np.random.shuffle(inds) n_alpha = 100 alphas = np.logspace(-4, 2, n_alpha) # Get 20 stars out. subset = inds[0:20] subset_coefs = np.zeros((n_alpha, 4)) for i in range(n_alpha): subset_coefs[i, :] = fit_one_subset(M, T, subset, ridge=ridge, lasso=lasso,\ standard=False, alpha=alphas[i]) return alphas, subset_coefs, subset # ## Ridge regression a_ridge, coefs_ridge, subset = run_many_lambdas(M, T, ridge=True) pickle_to_file((M[subset, :], T[subset]), "T-vs-colour-regression.pkl") # Next, let us plot the coefficients as a function of the regularisation parameter. In this case, remember, we have 10 objects per subset because that is the default. plt.semilogx(a_ridge, coefs_ridge[:, 0], label='u-g') plt.semilogx(a_ridge, coefs_ridge[:, 1], label='g-r') plt.semilogx(a_ridge, coefs_ridge[:, 2], label='r-i') plt.semilogx(a_ridge, coefs_ridge[:, 3], label='i-z') plt.plot([1e-4, 1e2], [0, 0], 'k--') plt.legend() plt.xlabel(r'$\lambda$') plt.ylabel('coefficient') plt.title('Ridge regression') plt.savefig('ridge-coeffs.pdf', format='pdf') # ## LASSO regression a_lasso, coefs_lasso, subset = run_many_lambdas(M, T, ridge=False, lasso=True) plt.semilogx(a_lasso, coefs_lasso[:, 0], label='u-g') plt.semilogx(a_lasso, coefs_lasso[:, 1], label='g-r') plt.semilogx(a_lasso, coefs_lasso[:, 2], label='r-i') plt.semilogx(a_lasso, coefs_lasso[:, 3], label='i-z') plt.plot([1e-4, 1e2], [0, 0], 'k--') plt.legend() plt.xlabel(r'$\lambda$') plt.ylabel('coefficient') plt.title('LASSO regression') plt.savefig('LASSO-coeffs.pdf', format='pdf') data = (M[subset, :], T[:]) pickle_to_file(data, 'T-vs-colour-regression.pkl')
notebook/Regression - CAUP Programming Club.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- reset from geopy.geocoders import Nominatim import pandas as pd import numpy as np import os from os import listdir from os.path import isfile, join import time # + ###set working directory os.chdir('insert directory here') #import origin city data data = pd.read_csv(os.getcwd() + '/raw_data/ICLR_registrants_work_location.csv') # + #define API geolocator = Nominatim(user_agent="GoogleV3") #geolocator tends to time out due to connectivity reasons. #needs to be used with strong internet connection #thus I define and query a list of unique cities from registrants data to reduce number of required queries #define dataframe of unique cities cities=pd.DataFrame(list(set(list(data['Work Location']))),columns=['Work Location']) cities['Origin_Latitude']=0 cities['Origin_Longitude']=0 # - #cycle through unique cities and query names to retrieve coordinates for row in cities.index: print(row) #query unique city location = geolocator.geocode(cities.loc[row,'Work Location']) #if coordinates are found, assign coordinates to city in unique cities list if location is not None: cities.loc[row,'Origin_Latitude']=location.latitude cities.loc[row,'Origin_Longitude']=location.longitude #the queries take longer to execute than the for loop so they accumulate. #If too many queries open at once the API crashes #Delay for 1 second to avoid issue. Sleep time can be adjusted based on script performance time.sleep(1) #cycle through rows of registrants and assign city coordinates to each unique city for row in data.index: print(row) #isolate row from dataframe of unique cities/coordinates that matches registrant city city_row=cities.loc[cities['Work Location']==data.loc[row,'Work Location']] #assign unique city coordinates to registrant data.loc[row,'Origin_Latitude']=float(city_row['Origin_Latitude']) data.loc[row,'Origin_Longitude']=float(city_row['Origin_Longitude']) if data.loc[row,'Work Location']=='Unknown': data.loc[row,'Origin_Latitude']=0 data.loc[row,'Origin_Longitude']=0 #export data data.to_csv(os.getcwd() + '/raw_data/latitude_and_longitude_ICLR.csv')
scripts/travel_footprint/ICLR/repository_coordinate_calculator_iclr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/KarthikGogisetty07/Covid19_CNN/blob/main/Covid_Detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="-Hi5IRdIbu6e" import numpy as np import matplotlib.pyplot as plt import keras from keras.layers import Dense, Conv2D, MaxPool2D, Dropout, Flatten from keras.models import Sequential from keras.preprocessing import image # + id="wXY-MYRycisU" train_datagen = image.ImageDataGenerator( rescale = 1/225, horizontal_flip = True, zoom_range = 0.2, shear_range = 0.2 ) # zoom range less than 1 implies that the image will be zoomed in x% in 0.x # value we assgin, While zooms out if it's greater than 1.0 # Shear range is the distortion in a particular axis: augment image # + id="TqdNIvGgi3gb" colab={"base_uri": "https://localhost:8080/"} outputId="eac97ade-70c9-44c1-bedb-4889b1522afb" train_data = train_datagen.flow_from_directory(directory = "/content/drive/MyDrive/xray_dataset_covid19/train", target_size = (256, 256), batch_size = 16, class_mode = "binary") # + colab={"base_uri": "https://localhost:8080/"} id="xEruaGTCfPYf" outputId="6e0fd37d-ac89-46c8-a207-dd5e02a73a72" train_data.class_indices # Discoving of classes in the folder. # + colab={"base_uri": "https://localhost:8080/"} id="5gz4gymOfuY2" outputId="2a095ace-a906-47e1-a81f-9255d649157f" test_datagen = image.ImageDataGenerator( rescale = 1/225 ) # No agumentation for test set: Cause your testing the data set irrespective of it's # orientation using the model you built. test_data = test_datagen.flow_from_directory(directory = "/content/drive/MyDrive/xray_dataset_covid19/test", target_size = (256, 256), batch_size = 16, class_mode = "binary") # + id="r5XWQdXDfuq6" # CNN Model model = Sequential() model.add(Conv2D(filters = 32, kernel_size = (3,3), activation = 'relu', input_shape = (256, 256, 3))) # Layer -1: model.add(Conv2D(filters = 64, kernel_size = (3,3), activation = 'relu')) model.add(MaxPool2D()) model.add(Dropout(rate = 0.25)) # Layer -2: model.add(Conv2D(filters = 64, kernel_size = (3,3), activation = 'relu')) model.add(MaxPool2D()) model.add(Dropout(rate = 0.25)) # Layer -3: model.add(Conv2D(filters = 128, kernel_size = (3,3), activation = 'relu')) model.add(MaxPool2D()) model.add(Dropout(rate = 0.25)) model.add(Flatten()) model.add(Dense(units = 64, activation = 'relu')) model.add(Dropout(rate = 0.50)) model.add(Dense(units = 1, activation = 'sigmoid')) model.compile(loss = keras.losses.binary_crossentropy, optimizer = 'adam', metrics = ['acc']) # + colab={"base_uri": "https://localhost:8080/"} id="922CcZs-fuzu" outputId="d1a2c82c-f8bf-4213-8187-22ab1324da6c" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="HswPUIDQClrd" outputId="a708edea-0564-423e-c075-4f8cc70232b0" cnn = model.fit_generator(train_data, steps_per_epoch = 8, epochs = 15, validation_steps = 2, validation_data = test_data ) # + colab={"base_uri": "https://localhost:8080/"} id="jqr10wNGzZPs" outputId="5608947d-c4fc-4802-b3ec-05c9a997dd49" h = cnn.history h.keys() # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="fUfUbCFTjw4q" outputId="44e8aef8-2ec8-469b-dd21-f8d588c9d4ef" losstrain = cnn.history['loss'] lossval = cnn.history['val_loss'] epochs = range(1,16) plt.plot(epochs, losstrain, "g", label = 'Training Loss Graph vs Iterations') plt.plot(epochs, lossval, "r", label = 'Validation Loss Graph vs Iterations') plt.title('Training and validation Model Result') plt.xlabel('ITERATIONS') plt.ylabel('LOSS') plt.legend() plt.show() # + id="L0Ba7nYmjwra" # #!unzip "" # + id="DiDVg7wtjww8" colab={"base_uri": "https://localhost:8080/"} outputId="d70c1433-a7bf-4d03-d3fe-cf8c22128011" path = "/content/drive/MyDrive/predict pics/Normal-31.png" # Load path of image (X-ray u want to predict) img = image.load_img(path, target_size = (256, 256)) img = image.img_to_array(img)/255 img = np.array([img]) img.shape # + id="dBK3TF5cjw0o" colab={"base_uri": "https://localhost:8080/"} outputId="e8c2e294-321f-44ab-ce77-e29c92f2e117" # Prediction gives 0 - Normal, 1 - Corona model.predict_classes(img) # + id="lF7Chuo9jw8v" # Must increase epoch to 100 - 1000 to get better results, It works for few sample pics fails for few Non - Covid cases.
Covid_Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import numpy as np from IPython import display from tqdm.notebook import tqdm import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap import torch import torch.nn as nn import torch.optim as optim import random import time from torchtext.legacy import data from torchtext.legacy import datasets import torchtext.vocab from torchtext.vocab import Vocab import gensim from scipy.spatial.distance import cosine from sklearn.neighbors import NearestNeighbors import seaborn as sns sns.set(style='whitegrid', font_scale=3.0) # %matplotlib inline # + # Установка random seed для воспроизводимости эксперимента SEED = 1234 torch.manual_seed(SEED) torch.backends.cudnn.deterministic = True # - # Загрузим данные. Разобъём на `train`, `valid`, `test` # Загрузка данных и разбивка на токены TEXT = data.Field(tokenize = 'spacy', tokenizer_language = 'en_core_web_sm') LABEL = data.LabelField(dtype = torch.float) # Разделение данных на train, test train_data, test_data = datasets.IMDB.splits(TEXT, LABEL) # Разделение данных на train, valid train_data, valid_data = train_data.split(random_state = random.seed(SEED)) def cut_words(ddata, max_length=20): '''Обрезать тексты по первым max_length словам''' for i in range(len(ddata.examples)): ddata.examples[i].text = ddata.examples[i].text[:max_length] # Укорачивание текстов cut_words(train_data) cut_words(valid_data) cut_words(test_data) # + # Построение словаря по данным # Используются предобученные эмбединги для построения векторов MAX_VOCAB_SIZE = 10_000 TEXT.build_vocab(train_data, vectors = "glove.6B.100d", unk_init = torch.Tensor.normal_) LABEL.build_vocab(train_data) # - # Размер эмбединга EMB_DIM = TEXT.vocab.vectors[0].shape[0] # + def get_idx_embedding(idx): '''Получить эмбединг по индексу''' return TEXT.vocab.vectors[idx] def get_word_embedding(word): '''Получить эмбединг по слову''' idx = TEXT.vocab.stoi[word] return get_idx_embedding(idx) def get_text_embeddings(text): '''Получить эмбединги по тексту из индексов''' return TEXT.vocab.vectors[text] def get_texts_embeddings(texts): '''Получить эмбединги по текстам из индексов''' return torch.stack([get_text_embeddings(text) for text in texts]) def get_word_texts_embeddings(texts): '''Получить эмбединги по текстам из слов''' texts = TEXT.process([TEXT.tokenize(text) for text in texts]) return get_texts_embeddings(texts) # + # Поиск ближайшего вектора по косинусному расстоянию neigh = NearestNeighbors(n_neighbors=3, metric='cosine', algorithm='brute', n_jobs=-1) neigh.fit(TEXT.vocab.vectors) def get_synonim(word, tol=0.5): '''Получить синоним слова word с косинусным расстоянием между эмбедингами не более tol''' wv = get_word_embedding(word) cosine_dist, idxs = neigh.kneighbors(wv.reshape(1, -1)) cosine_dist = cosine_dist[0] idxs = idxs[0] for i in range(len(idxs)): if cosine_dist[i] > tol: continue sim_word = TEXT.vocab.itos[idxs[i]] if sim_word != word: return sim_word return word # + # Генерация пакетов (батчей) данных для обучения, валидации и тесто BATCH_SIZE = 64 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits( (train_data, valid_data, test_data), batch_size = BATCH_SIZE, sort_within_batch = True, device = device) # - # Опишем модель $-$ базовая `RNN` class RNN(nn.Module): '''RNN модель из rnn слоя и fully-connected слоя''' def __init__(self, embedding_dim, hidden_dim, output_dim): super().__init__() self.rnn = nn.RNN(embedding_dim, hidden_dim) self.fc = nn.Linear(hidden_dim, output_dim) def forward(self, embedded): #embedded = [sent len, batch size, emb dim] output, hidden = self.rnn(embedded) #output = [sent len, batch size, hid dim] #hidden = [1, batch size, hid dim] assert torch.equal(output[-1,:,:], hidden.squeeze(0)) return self.fc(hidden.squeeze(0)) # + # Инициализация модели с заданными параметрами EMB_DIM = 100 HIDDEN_DIM = 128 OUTPUT_DIM = 1 model = RNN(EMB_DIM, HIDDEN_DIM, OUTPUT_DIM) # - # Инициализация оптимизатора и критерия(функции потерь) optimizer = optim.Adam(model.parameters(), lr=1e-3) criterion = nn.BCEWithLogitsLoss().to(device) model = model.to(device) def binary_accuracy(preds, y): """ Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8 """ #round predictions to the closest integer rounded_preds = torch.round(torch.sigmoid(preds)) correct = (rounded_preds == y).float() #convert into float for division acc = correct.sum() / len(correct) return acc def train(model, iterator, optimizer, criterion): '''Одна эпоха обучения модели''' epoch_loss = 0 epoch_acc = 0 model.train() for batch in iterator: optimizer.zero_grad() embedded = get_texts_embeddings(batch.text) predictions = model(embedded).squeeze(1) loss = criterion(predictions, batch.label) acc = binary_accuracy(predictions, batch.label) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) def evaluate(model, iterator, criterion): '''Валидация модели''' epoch_loss = 0 epoch_acc = 0 model.eval() with torch.no_grad(): for batch in iterator: embedded = get_texts_embeddings(batch.text) predictions = model(embedded).squeeze(1) loss = criterion(predictions, batch.label) acc = binary_accuracy(predictions, batch.label) epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) def epoch_time(start_time, end_time): '''Время вычисления эпохи''' elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # + # Обучение модели N_EPOCHS = 5 best_valid_loss = float('inf') for epoch in range(N_EPOCHS): start_time = time.time() train_loss, train_acc = train(model, train_iterator, optimizer, criterion) valid_loss, valid_acc = evaluate(model, valid_iterator, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'imdb20.pt') print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%') print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%') # + # Загрузка модели model.load_state_dict(torch.load('imdb20.pt')) test_loss, test_acc = evaluate(model, test_iterator, criterion) print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%') # + def generate_sample(data, size=1000, random_state=42): '''Генерация выборки из заданных данных''' np.random.seed(random_state) sample_idx = np.random.randint(0, len(data), sample_size) sample_texts = [data[idx].text for idx in sample_idx] return sample_texts def generate_similar(text, p=0.1): '''Генерация текста из синонимов с вероятностью замены p''' sim_text = [] for i in range(len(text)): word = text[i] if np.random.random() <= p: word = get_synonim(word) sim_text.append(word) return sim_text def make_pred_chart(true_val, pred_val, label, save_path): '''Отобразить график предсказаний''' args = np.argsort(true_val)[::-1] plt.figure(figsize=(10, 8)) plt.plot(pred_val[args], lw=2, label=label) plt.plot(true_val[args], lw=3, label='Истинные предсказания') plt.xlabel('Индекс объекта') plt.legend(fontsize=24) plt.savefig(save_path, format='svg') plt.show() def make_cosine_chart(true_val, pred_val, label, save_path): '''Отобразить график косинусного расстояния''' dists = [cosine([true_val[i], 1 - true_val[i]], [pred_val[i], 1 - pred_val[i]]) for i in range(len(true_val))] dists = np.sort(dists) plt.figure(figsize=(10, 8)) plt.plot(dists, lw=3, label=label) plt.xlabel('Индекс объекта') plt.legend(fontsize=24) plt.savefig(save_path, format='svg') plt.show() def predict_proba_lime(texts): '''Предсказания вероятностьй для метода LIME''' with torch.no_grad(): embedded = get_word_texts_embeddings(texts) model.eval() pred = torch.sigmoid(model(embedded)).squeeze() return torch.stack([1 - pred, pred]).T def print_metrics(true_val, pred_val): '''Напечатать метрики качества''' rmse = np.sqrt(np.mean((true_val - pred_val) ** 2)) mae = np.mean(np.abs(true_val - pred_val)) mape = np.mean(np.abs(true_val - pred_val) / true_val) print(f'RMSE: {rmse:.5f}') print(f'MAE: {mae:.5f}') print(f'MAPE: {mape:.5f}') def make_experiment_openbox(texts, sim_texts): '''Запустить эксперимент для метода OpenBox''' indexed = TEXT.process(texts) sim_indexed = TEXT.process(sim_texts) embedded = get_texts_embeddings(indexed) embedded.requires_grad = True sim_embedded = get_texts_embeddings(sim_indexed) model.eval() pred = torch.sigmoid(model(embedded)).squeeze() sim_pred = torch.sigmoid(model(sim_embedded)).squeeze() pred.sum().backward() grad = embedded.grad delta_x = sim_embedded - embedded delta_pred = torch.sum(delta_x * grad, axis=[0, 2]) openbox_pred = pred + delta_pred pred_np = pred.detach().cpu().numpy() sim_pred_np = sim_pred.detach().cpu().numpy() openbox_pred_np = openbox_pred.detach().cpu().numpy() make_pred_chart(sim_pred_np, openbox_pred_np, 'OpenBox', '../figures/openbox_proba_est.svg') make_cosine_chart(sim_pred_np, openbox_pred_np, 'OpenBox', '../figures/openbox_cosine.svg') print_metrics(sim_pred_np, openbox_pred_np) def make_experiment_lime(texts): '''Запустить эксперимент для метода LIME''' with torch.no_grad(): indexed = TEXT.process(texts) embedded = get_texts_embeddings(indexed) model.eval() pred = torch.sigmoid(model(embedded)).squeeze() lime_pred = [] for text in tqdm(texts): exp = explainer.explain_instance(" ".join(text), predict_proba_lime, num_features=6) lime_pred.append(exp.predict_proba[1].item()) pred_np = pred.detach().cpu().numpy() lime_pred_np = np.array(lime_pred) make_pred_chart(pred_np, lime_pred_np, 'LIME', '../figures/lime_proba_est.svg') make_cosine_chart(pred_np, lime_pred_np, 'LIME', '../figures/lime_cosine.svg') print_metrics(pred_np, lime_pred_np) # - sample_texts = generate_sample(test_data) sim_texts = [generate_similar(text, p=0.2) for text in tqdm(sample_texts)] make_experiment_openbox(sample_texts, sim_texts) make_experiment_lime(sample_texts)
code/experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7 # language: python # name: py37 # --- # + tags=[] import numpy as np import torch import matplotlib.pyplot as plt print(torch.__version__) print(torch.cuda.is_available()) # - # # Model - Manual # - Cell: $y_t = tanh(W_x \cdot X_t + W_y \cdot y_{t-1} + b)$ # - System # - $y_0 = tanh(W_x \cdot X_0)$ # - $y_1 = tanh(W_x \cdot X_1 + W_y \cdot y_0)$ # # <img src="./assets/1.png" width="700"/> # # + import torch.nn as nn class SingleRNN(nn.Module): def __init__(self, n_inputs, n_neurons): super(SingleRNN, self).__init__() self.Wx = torch.randn(n_inputs, n_neurons) self.Wy = torch.randn(n_neurons, n_neurons) self.b = torch.zeros(1, n_neurons) def forward(self, X0, X1): self.y0 = torch.tanh(torch.mm(X0, self.Wx) + self.b) self.y1 = torch.tanh(torch.mm(self.y0, self.Wy) + torch.mm(X1, self.Wx) + self.b) return self.y0, self.y1 # - # #### Fit test - 1 neuron # + tags=[] # data X0 = torch.tensor([ [0,1,2,0], [3,4,5,0], [6,7,8,0], [9,0,1,0], [0,5,0,0]], dtype = torch.float) X1 = torch.tensor([ [9,8,7,0], [0,0,0,0], [6,5,4,0], [3,2,1,0], [0,0,1,0]], dtype = torch.float) print("X0 shape:", X0.shape) print("X1 shape:", X1.shape) # model N_INPUT = 4 N_NEURONS = 1 model = SingleRNN(N_INPUT, N_NEURONS) # fit y0, y1 = model(X0, X1) print("y0 shape:", y0.shape) print("y1 shape:", y1.shape) # - # #### Fir test - 5 neurons # + tags=[] # data X0 = torch.tensor([ [0,1,2,0], [3,4,5,0], [6,7,8,0]], dtype = torch.float) X1 = torch.tensor([ [9,8,7,0], [0,0,0,0], [6,5,4,0]], dtype = torch.float) print("X0 shape:", X0.shape) print("X1 shape:", X1.shape) # model N_INPUT = 4 N_NEURONS = 5 model = SingleRNN(N_INPUT, N_NEURONS) # fit y0, y1 = model(X0, X1) print("y0 shape:", y0.shape) print("y1 shape:", y1.shape) # - # # 2. Model - Torch builtin # - For dimensions reference # + import torch.nn as nn class SimpleRNN(nn.Module): def __init__(self, n_inputs, n_hidden, n_outputs): super(SimpleRNN, self).__init__() self.D = n_inputs self.M = n_hidden self.K = n_outputs self.rnn = nn.RNN( input_size=self.D, hidden_size=self.M, nonlinearity='tanh', batch_first=True) self.fc = nn.Linear(self.M, self.K) def forward(self, X): # initial hidden states h0 = torch.zeros(1, X.size(0), self.M) # get RNN unit output out, _ = self.rnn(X, h0) # we only want h(T) at the final time step # out = self.fc(out[:, -1, :]) out = self.fc(out) return out # - # #### Data # + # N = number of samples N = 2 # T = sequence length T = 10 # D = number of input features D = 3 # + tags=[] X = np.random.randn(N, T, D) print(X.shape) print(X) # - # #### Model # + # M = number of hidden units M = 5 # K = number of output units K = 2 model = SimpleRNN(n_inputs=D, n_hidden=M, n_outputs=K) # + tags=[] W_xh, W_hh, b_xh, b_hh = model.rnn.parameters() print(W_xh.shape) print(b_xh.shape) print(W_hh.shape) print(b_hh.shape) # - # #### Output # + tags=[] X_torch = torch.from_numpy(X.astype(np.float32)) y_ = model(X_torch) print(y_.shape) print(y_)
Pytorch/Practices/5A_RNN_Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set(context = 'notebook', #mostly controls relative sizes of things on plot #The base context is “notebook”, and the other contexts are “paper”, “talk”, and “poster” style = 'darkgrid', #dict, None, or one of {darkgrid, whitegrid, dark, white, ticks} palette = 'deep', # Should be something that color_palette() can process. font_scale = 1, color_codes = False, rc = None) #from IPython.core.interactiveshell import InteractiveShell #InteractiveShell.ast_node_interactivity = 'last_expr' #setting = "all" allows multiple outputs to be displayed for a given input cell. don't use w plotting! from IPython.display import display import ast # %matplotlib notebook # #%matplotlib inline pd.__version__ , np.__version__ #, matplotlib.__version__, sns.__version__ # + from sklearn.metrics import roc_auc_score from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn.externals import joblib # - pwd % cd '/Users/DonBunk/Desktop/Google Drive/data_science/Python_Projects/Home_Credit_Default_Risk/' from Home_Credit_package.master_pipeline import master_pipeline from Home_Credit_package.Dons_functions import balanced_sample pwd # + path_to_models = 'saved_models/level_1_models/' level_1_metafeats_save_path = 'final_metafeats_and_predictions/level_1/level_1_meta_feats/' level_1_preds_save_path = 'final_metafeats_and_predictions/level_1/level_1_final_predictions/' # - # # load this # ## load dfs # + path = 'wrangling/TRAINING_DATA_create_final_wrangled_csv/' TRAIN_df = pd.read_csv(path + 'complete_initial_wrangled_data.csv', index_col = 'SK_ID_CURR') path = 'wrangling/TEST_DATA_create_final_wrangled_csv/' TEST_df = pd.read_csv(path + 'complete_initial_wrangled_data.csv', index_col = 'SK_ID_CURR') # - # # new models to run raw_level_1_new_features_df = pd.DataFrame(TEST_df.index) raw_level_1_new_features_df.set_index('SK_ID_CURR', inplace=True) # + [markdown] heading_collapsed=true # ### random forest random, EXT sources, with poly interactions. # Kaggle scores: # Public: 0.70550 # Private: 0.71763 # + hidden=true # + hidden=true total_df_piped, final_feature_list, total_pipeline, trans_list = \ master_pipeline(df_in = TRAIN_df[['EXT_SOURCE_1','EXT_SOURCE_2', 'EXT_SOURCE_3']], int_cutoff=20, poly_deg=4, feats_with_interaction=['EXT_SOURCE_1','EXT_SOURCE_2', 'EXT_SOURCE_3'] ) # + hidden=true TEST_piped = total_pipeline.transform(TEST_df) # + hidden=true RanFor_EXTpoly_level_1 = joblib.load(path_to_models + 'RanFor_EXTpoly_level_1.joblib') # + hidden=true RanFor_EXTpoly_level_1 # + hidden=true preds = RanFor_EXTpoly_level_1.predict_proba(TEST_piped) val_scores = [x[1]for x in preds] # + hidden=true # + hidden=true raw_level_1_new_features_df['RanFor_EXTpoly'] = val_scores # + hidden=true # + hidden=true this_model = 'RanFor_EXTpoly' raw_level_1_new_features_df[[this_model]].to_csv(level_1_preds_save_path + this_model + '.csv', columns = list(raw_level_1_new_features_df[[this_model]].columns), header = ['TARGET'], ) # + hidden=true # - # ### random forest, all features # Kaggle scores: Public: 0.74025 Private:0.74177 total_df_piped, final_feature_list, total_pipeline, trans_list = \ master_pipeline(df_in = TRAIN_df, int_cutoff=20, poly_deg=4, feats_with_interaction=[] ) TEST_piped = total_pipeline.transform(TEST_df) RanFor_AllFeats_level_1 = joblib.load(path_to_models + 'RanFor_AllFeats_level_1.joblib') RanFor_AllFeats_level_1 preds = RanFor_AllFeats_level_1.predict_proba(TEST_piped) val_scores = [x[1] for x in preds] raw_level_1_new_features_df['RanFor_AllFeats'] = val_scores # + this_model = 'RanFor_AllFeats' raw_level_1_new_features_df[[this_model]].to_csv(level_1_preds_save_path + this_model + '.csv', columns = list(raw_level_1_new_features_df[[this_model]].columns), header = ['TARGET'], ) # - # ### plot feature importances for random forest model above # all features and importances feat_imports_raw = list(zip(final_feature_list, RanFor_AllFeats_level_1.feature_importances_)) # strip off details (polynomial order etc) feat_imports_stripped_details_df = pd.DataFrame( [(ast.literal_eval(x[0])[0], x[1]) for x in feat_imports_raw], columns = ['Feature','Importance']) # group by 'base' feature and sum feats_grouped = feat_imports_stripped_details_df['Importance'].groupby(feat_imports_stripped_details_df['Feature']) final_feat_imports = feats_grouped.sum().to_frame() # make index into column, and order by Importance final_feat_imports.reset_index(inplace=True) final_feat_imports.sort_values(by = 'Importance', ascending=False, inplace=True) sns.barplot(x="Feature", y="Importance", data=final_feat_imports[:20]) plt.xticks(rotation=90) plt.tight_layout() # + [markdown] heading_collapsed=true # ### log reg, on EXT sources with poly interactions # Kaggle: Public: 0.70579 Private:0.71612 # + hidden=true total_df_piped, final_feature_list, total_pipeline, trans_list = \ master_pipeline(df_in = TRAIN_df[['EXT_SOURCE_1','EXT_SOURCE_2', 'EXT_SOURCE_3']], int_cutoff=20, poly_deg=4, feats_with_interaction=['EXT_SOURCE_1','EXT_SOURCE_2', 'EXT_SOURCE_3'] ) # + hidden=true # + hidden=true TEST_piped = total_pipeline.transform(TEST_df) # + hidden=true LogReg_EXTpoly_level_1 = joblib.load(path_to_models + 'LogReg_EXTpoly_level_1.joblib') LogReg_EXTpoly_level_1 # + hidden=true # + hidden=true preds = LogReg_EXTpoly_level_1.predict_proba(TEST_piped) val_scores = [x[1] for x in preds] # + hidden=true # + hidden=true raw_level_1_new_features_df['LogReg_EXTpoly'] = val_scores # + hidden=true # + hidden=true this_model = 'LogReg_EXTpoly' raw_level_1_new_features_df[[this_model]].to_csv(level_1_preds_save_path + this_model + '.csv', columns = list(raw_level_1_new_features_df[[this_model]].columns), header = ['TARGET'], ) # + hidden=true # + [markdown] heading_collapsed=true # ### log reg, on all features # Kaggle: Public: 0.75913 Private: 0.76233 # + hidden=true # + hidden=true total_df_piped, final_feature_list, total_pipeline, trans_list = \ master_pipeline(df_in = TRAIN_df, int_cutoff=20, poly_deg=4, feats_with_interaction=[] ) # + hidden=true # + hidden=true TEST_piped = total_pipeline.transform(TEST_df) # + hidden=true LogReg_AllFeats_level_1 = joblib.load(path_to_models + 'LogReg_AllFeats_level_1.joblib') LogReg_AllFeats_level_1 # + hidden=true # + hidden=true preds = LogReg_AllFeats_level_1.predict_proba(TEST_piped) val_scores = [x[1] for x in preds] # + hidden=true # + hidden=true raw_level_1_new_features_df['LogReg_AllFeats'] = val_scores # + hidden=true # + hidden=true this_model = 'LogReg_AllFeats' raw_level_1_new_features_df[[this_model]].to_csv(level_1_preds_save_path + this_model + '.csv', columns = list(raw_level_1_new_features_df[[this_model]].columns), header = ['TARGET'], ) # + hidden=true # + [markdown] heading_collapsed=true # ### MLP Classifier, all features. # Kaggle: Public: 0.75187 Private: 0.75392 # + hidden=true # + hidden=true total_df_piped, final_feature_list, total_pipeline, trans_list = \ master_pipeline(df_in = TRAIN_df, int_cutoff=20, poly_deg=4, feats_with_interaction=[] ) # + hidden=true # + hidden=true TEST_piped = total_pipeline.transform(TEST_df) # + hidden=true MLP_AllFeats_level_1 = joblib.load(path_to_models + 'MLP_AllFeats_level_1.joblib') MLP_AllFeats_level_1 # + hidden=true # + hidden=true preds = MLP_AllFeats_level_1.predict_proba(TEST_piped) val_scores = [x[1] for x in preds] # + hidden=true # + hidden=true raw_level_1_new_features_df['MLP_AllFeats'] = val_scores # + hidden=true # + hidden=true this_model = 'MLP_AllFeats' raw_level_1_new_features_df[[this_model]].to_csv(level_1_preds_save_path + this_model + '.csv', columns = list(raw_level_1_new_features_df[[this_model]].columns), header = ['TARGET'], ) # + hidden=true # + hidden=true # + hidden=true # + [markdown] heading_collapsed=true # # final new level 1 meta-features # + hidden=true # + hidden=true def pwr_and_rescale(df_col, pwr): temp_col = df_col**pwr return (temp_col - min(temp_col)) /( max(temp_col) - min(temp_col)) # + hidden=true # + hidden=true FINAL_level_1_new_features_df = pd.DataFrame() # + hidden=true # + code_folding=[9, 13] hidden=true feat = 'RanFor_EXTpoly' FINAL_level_1_new_features_df['pwr_rescale_'+ feat] = pwr_and_rescale(+raw_level_1_new_features_df[feat], 1/5) #total_df.drop(columns=[feat], inplace = True) feat = 'RanFor_AllFeats' FINAL_level_1_new_features_df['pwr_rescale_'+ feat] = pwr_and_rescale(+raw_level_1_new_features_df[feat], 1/3.5) #total_df.drop(columns=[feat], inplace = True) feat = 'LogReg_EXTpoly' FINAL_level_1_new_features_df['pwr_rescale_'+ feat] = pwr_and_rescale(+raw_level_1_new_features_df[feat], 1/7) #total_df.drop(columns=[feat], inplace = True) feat = 'LogReg_AllFeats' FINAL_level_1_new_features_df['pwr_rescale_'+ feat] = pwr_and_rescale(+raw_level_1_new_features_df[feat], 1/4) #total_df.drop(columns=[feat], inplace = True) feat = 'MLP_AllFeats' FINAL_level_1_new_features_df['pwr_rescale_'+ feat] = pwr_and_rescale(+raw_level_1_new_features_df[feat], 1/4) #total_df.drop(columns=[feat], inplace = True) # + hidden=true # + hidden=true pwd # + hidden=true # + hidden=true FINAL_level_1_new_features_df.to_csv(level_1_metafeats_save_path + 'FINAL_level_1_meta_features_df.csv', columns = list(FINAL_level_1_new_features_df.columns)) # + hidden=true
Kaggle_Home_Credit_Default_Risk/final_metafeats_and_predictions/level_1/TEST_final_level_1_run_folding_and_metafeature_generation.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Ruby 2.5.3 # language: ruby # name: ruby # --- require 'daru/view' Daru::View.plotting_library = :highcharts # + opts = { chart: { type: 'column' }, title: { text: 'Styling axes' }, yAxis: [{ className: 'highcharts-color-0', title: { text: 'Primary axis' } }, { className: 'highcharts-color-1', opposite: true, title: { text: 'Secondary axis' } }], plotOptions: { column: { borderRadius: 5 } } } user_opts = { css: ['.highcharts-color-0 {fill: #7cb5ec;stroke: #7cb5ec;}', '.highcharts-axis.highcharts-color-0 .highcharts-axis-line {stroke: #7cb5ec;}', '.highcharts-color-1 {fill: #90ed7d;stroke: #90ed7d;}', '.highcharts-axis.highcharts-color-1 .highcharts-axis-line {stroke: #90ed7d;}', '.highcharts-yaxis .highcharts-axis-line {stroke-width: 2px;}' ] } series_dt = [{ data: [1, 3, 2, 4] }, { data: [324, 124, 547, 221], yAxis: 1 }] column = Daru::View::Plot.new(series_dt, opts, user_opts) column.show_in_iruby # + opts = { chart: { type: 'line' }, chart_class: 'stock', title: { text: 'Chart border and background by CSS' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, legend: { layout: 'vertical', floating: true, align: 'left', x: 100, verticalAlign: 'top', y: 70 } } user_opts = { css: ['.highcharts-background {fill: #efefef;stroke: #a4edba;stroke-width: 2px;}'] } data = Daru::Vector.new([29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4]) line = Daru::View::Plot.new(data, opts, user_opts) line.show_in_iruby # + opts = { chart: { type: 'line' }, title: { text: 'Chart border and background by CSS' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, legend: { layout: 'vertical', floating: true, align: 'left', x: 100, verticalAlign: 'top', y: 70 } } user_opts = { css: ['.highcharts-button-symbol{fill: #90ed7d;stroke: #90ed7d;}'] } data = Daru::Vector.new([29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4]) line = Daru::View::Plot.new(data, opts, user_opts) line.show_in_iruby # + opts = { rangeSelector: { selected: 1 }, chart: { type: 'candlestick' }, title: { text: 'AAPL Stock Price' }, plotOptions: { series: { dataGrouping: "{ units: [ [ 'week', // unit name [1] // allowed multiples ], [ 'month', [1, 2, 3, 4, 6] ] ] }".js_code } } } data = [ [1294617600000,48.40,49.03,48.17,48.92], [1294704000000,49.27,49.28,48.50,48.81], [1294790400000,49.04,49.20,48.86,49.20], [1294876800000,49.31,49.52,49.12,49.38], [1294963200000,49.41,49.78,49.21,49.78], [1295308800000,47.07,49.25,46.57,48.66], [1295395200000,49.76,49.80,48.13,48.41], [1295481600000,48.06,48.33,47.16,47.53], [1295568000000,47.68,47.84,46.66,46.67], [1295827200000,46.70,48.21,46.67,48.21], [1295913600000,48.05,48.78,47.80,48.77], [1296000000000,48.99,49.37,48.79,49.12], [1296086400000,49.11,49.24,48.98,49.03], [1296172800000,49.17,49.20,47.65,48.01], [1296432000000,47.97,48.58,47.76,48.47], [1309478400000,47.99,49.07,47.74,49.04], [1309824000000,49.00,49.98,48.93,49.92], [1309910400000,49.85,50.59,49.53,50.25], [1309996800000,50.67,51.14,50.57,51.03], [1310083200000,50.48,51.43,50.31,51.39], [1310342400000,50.91,51.40,50.40,50.57], [1310428800000,50.50,51.10,49.80,50.54], [1310515200000,51.19,51.43,50.91,51.15], [1310601600000,51.57,51.66,50.91,51.11], [1310688000000,51.60,52.14,51.31,52.13], [1310947200000,52.20,53.52,52.18,53.40], [1311033600000,54.00,54.09,53.33,53.84], [1311120000000,56.59,56.61,55.14,55.27], [1311206400000,55.28,55.72,54.84,55.33], [1311292800000,55.47,56.44,55.39,56.19], [1311552000000,55.76,57.14,55.66,56.93], [1311638400000,57.14,57.79,57.10,57.63], [1311724800000,57.23,57.52,56.02,56.08], [1311811200000,55.95,56.71,55.45,55.97], [1311897600000,55.38,56.45,54.86,55.78], [1417392000000,118.81,119.25,111.27,115.07], [1417478400000,113.50,115.75,112.75,114.63], [1417564800000,115.75,116.35,115.11,115.93], [1417651200000,115.77,117.20,115.29,115.49], [1417737600000,115.99,116.08,114.64,115.00], [1417996800000,114.10,114.65,111.62,112.40], [1418083200000,110.19,114.30,109.35,114.12], [1418169600000,114.41,114.85,111.54,111.95], [1418256000000,112.26,113.80,111.34,111.62], [1418342400000,110.46,111.87,109.58,109.73], [1418601600000,110.70,111.60,106.35,108.22], [1418688000000,106.37,110.16,106.26,106.74], [1418774400000,107.12,109.84,106.82,109.41], [1418860800000,111.87,112.65,110.66,112.65], [1418947200000,112.26,113.24,111.66,111.78], [1419206400000,112.16,113.49,111.97,112.94], [1419292800000,113.23,113.33,112.46,112.54], [1419379200000,112.58,112.71,112.01,112.01], [1419552000000,112.10,114.52,112.01,113.99], [1419811200000,113.79,114.77,113.70,113.91], [1419897600000,113.64,113.92,112.11,112.52], [1419984000000,112.82,113.13,110.21,110.38], [1514851200000,170.16,172.30,169.26,172.26], [1514937600000,172.53,174.55,171.96,172.23], [1515024000000,172.54,173.47,172.08,173.03], [1515110400000,173.44,175.37,173.05,175.00] ] user_opts = { chart_class: 'stock', css: ['.highcharts-candlestick-series .highcharts-point {stroke: #2f7ed8;}', '.highcharts-candlestick-series .highcharts-point-up {stroke: silver;fill: silver;}'] } candle_stick = Daru::View::Plot.new(data, opts, user_opts) candle_stick.show_in_iruby # + opts = { title: { text: 'Pie point CSS' }, chart: { type: 'pie' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, plotOptions: { series: { allowPointSelect: true, keys: ['name', 'y', 'selected', 'sliced'], showInLegend: true } } } user_opts = { css: ['.highcharts-point.highcharts-color-2,', '.highcharts-legend-item.highcharts-color-2 .highcharts-point,', '.highcharts-tooltip .highcharts-color-2 {fill: #78a8d1;}', '.highcharts-tooltip.highcharts-color-2,', '.highcharts-data-label-connector.highcharts-color-2 {stroke: #78a8d1;}'] } data = [ ['Apples', 29.9, false], ['Pears', 71.5, false], ['Oranges', 106.4, false], ['Plums', 129.2, false], ['Bananas', 144.0, false], ['Peaches', 176.0, false], ['Prunes', 135.6, true, true], ['Avocados', 148.5, false] ] pie = Daru::View::Plot.new(data, opts, user_opts) pie.show_in_iruby # + opts = { chart: { type: 'line' }, title: { text: 'Crosshair by CSS' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, legend: { layout: 'vertical', floating: true, align: 'left', x: 100, verticalAlign: 'top', y: 70 }, yAxis: { crosshair: true } } user_opts = { css: ['.highcharts-crosshair{fill: #90ed7d;stroke: #90ed7d;}'] } data = Daru::Vector.new([29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4]) line = Daru::View::Plot.new(data, opts, user_opts) line.show_in_iruby # + opts = { chart: { type: 'gauge' }, title: { text: 'Speedometer' }, pane: { startAngle: -150, endAngle: 150, background: [{ className: 'outer-pane', outerRadius: '115%' }, { className: 'middle-pane', outerRadius: '112%' }, { # default background }, { className: 'inner-pane', outerRadius: '105%', innerRadius: '103%' }] }, # the value axis yAxis: { min: 0, max: 200, minorTickInterval: 'auto', minorTickLength: 10, minorTickPosition: 'inside', tickPixelInterval: 30, tickPosition: 'inside', tickLength: 10, labels: { step: 2, rotation: 'auto' }, title: { text: 'km/h' }, plotBands: [{ from: 0, to: 120, className: 'green-band' }, { from: 120, to: 160, className: 'yellow-band' }, { from: 160, to: 200, className: 'red-band' }] }, plotOptions: { series: { tooltip: { valueSuffix: ' km/h' }, name: 'speed' } } } user_options = { modules: ['highcharts-more'], css: ['.outer-pane {fill: #EFEFEF;}', '.middle-pane {stroke-width: 1px;stroke: #AAA;}', '.inner-pane {fill: #DDDDDD;}', '.green-band {fill: #55BF3B;fill-opacity: 1;}', '.yellow-band {fill: #DDDF0D;fill-opacity: 1;}', '.red-band {fill: #DF5353;fill-opacity: 1;}'] } data = Daru::Vector.new([80]) gauge = Daru::View::Plot.new(data, opts, user_options) gauge.show_in_iruby # + opts = { chart: { type: 'scatter' }, title: { text: 'Styling grid and ticks' }, xAxis: { minorTickInterval: 'auto', startOnTick: true, endOnTick: true } } user_opts = { css: ['.highcharts-xaxis-grid .highcharts-grid-line {stroke-width: 2px;stroke: #d8d8d8;}', '.highcharts-xaxis .highcharts-tick {stroke-width: 2px;stroke: #d8d8d8;}'] } data = [[161.2, 51.6], [167.5, 59.0], [159.5, 49.2], [157.0, 63.0], [155.8, 53.6], [170.0, 59.0], [159.1, 47.6], [166.0, 69.8], [176.2, 66.8], [160.2, 75.2], [172.5, 55.2], [170.9, 54.2], [172.9, 62.5], [153.4, 42.0], [160.0, 50.0], [147.2, 49.8], [168.2, 49.2], [175.0, 73.2], [157.0, 47.8], [167.6, 68.8], [159.5, 50.6], [175.0, 82.5], [166.8, 57.2], [176.5, 87.8], [170.2, 72.8], [174.0, 54.5], [173.0, 59.8], [179.9, 67.3], [170.5, 67.8], [160.0, 47.0], [154.4, 46.2], [162.0, 55.0], [176.5, 83.0], [160.0, 54.4], [152.0, 45.8], [162.1, 53.6], [170.0, 73.2], [160.2, 52.1], [161.3, 67.9], [166.4, 56.6], [168.9, 62.3], [163.8, 58.5], [167.6, 54.5], [160.0, 50.2], [161.3, 60.3], [167.6, 58.3], [165.1, 56.2], [160.0, 50.2], [170.0, 72.9], [157.5, 59.8], [167.6, 61.0], [160.7, 69.1], [163.2, 55.9], [152.4, 46.5], [157.5, 54.3], [168.3, 54.8], [180.3, 60.7], [165.5, 60.0], [165.0, 62.0], [164.5, 60.3], [156.0, 52.7], [160.0, 74.3], [163.0, 62.0], [165.7, 73.1], [161.0, 80.0], [162.0, 54.7], [166.0, 53.2], [174.0, 75.7], [172.7, 61.1], [167.6, 55.7], [151.1, 48.7], [164.5, 52.3], [163.5, 50.0], [152.0, 59.3], [169.0, 62.5], [164.0, 55.7], [161.2, 54.8], [155.0, 45.9], [170.0, 70.6], [176.2, 67.2], [170.0, 69.4], [162.5, 58.2], [170.3, 64.8], [164.1, 71.6], [169.5, 52.8], [163.2, 59.8], [154.5, 49.0], [159.8, 50.0], [173.2, 69.2], [170.0, 55.9]] df = Daru::DataFrame.new( { Data1: data.map {|row| row[0]}, Data2: data.map {|row| row[1]} } ) scatter = Daru::View::Plot.new(df, opts, user_opts) scatter.show_in_iruby # + opts = { title: { text: 'Pie point CSS' }, chart: { type: 'pie' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, plotOptions: { series: { allowPointSelect: true, keys: ['name', 'y', 'selected', 'sliced'], showInLegend: true } } } user_opts = { css: ['.highcharts-legend-box {fill: black;fill-opacity: 0.3;stroke: black;stroke-width: 1px;}'] } data = [ ['Apples', 29.9, false], ['Pears', 71.5, false], ['Oranges', 106.4, false], ['Plums', 129.2, false], ['Bananas', 144.0, false], ['Peaches', 176.0, false], ['Prunes', 135.6, true, true], ['Avocados', 148.5, false] ] pie = Daru::View::Plot.new(data, opts, user_opts) pie.show_in_iruby # + opts = { chart: { type: 'pie', width: 500, borderWidth: 2 }, title: { text: 'Legend styled by CSS' }, credits: { enabled: false }, legend: { layout: 'vertical', align: 'right', verticalAlign: 'top', y: 30, title: { text: 'Male name' } } } user_opts = { css: ['.highcharts-legend-box {fill: black;fill-opacity: 0.3;stroke: black;stroke-width: 1px;}', '.highcharts-legend-item text {fill: #e0e0e0;transition: fill 250ms;}', '.highcharts-legend-item:hover text {fill: white;}', '.highcharts-legend-item-hidden * {fill: gray !important;stroke: gray !important;}', '.highcharts-legend-title {fill: white;font-style: italic;}', '.highcharts-legend-navigation {fill: white;}', '.highcharts-legend-nav-active {fill: white;}', '.highcharts-legend-nav-inactive {fill: gray;}'] } series_dt = [{ data: "(function () { var names = 'Ari,Bjartur,Bogi,Bragi,Dánjal,Dávur,Eli,Emil,Fróði,Hákun,Hanus,Hjalti,Ísakur,' + 'Johan,Jóhan,Julian,Kristian,Leon,Levi,Magnus,Martin,Mattias,Mikkjal,Nóa,Óli,Pauli,Petur,Rói,Sveinur,Teitur', arr = []; Highcharts.each(names.split(','), function (name) { arr.push({ name: name, y: Math.round(Math.random() * 100) }); }); return arr; }())".js_code, showInLegend: true }] pie = Daru::View::Plot.new(series_dt, opts, user_opts) pie.show_in_iruby # + opts = { chart: { type: 'scatter' }, title: { text: 'Styling grid and ticks' }, xAxis: { minorTickInterval: 'auto', startOnTick: true, endOnTick: true } } user_opts = { css: ['.highcharts-minor-grid-line {stroke-dasharray: 2, 2;stroke-width: 2px;stroke: #d8d8d8;}'] } data = [[161.2, 51.6], [167.5, 59.0], [159.5, 49.2], [157.0, 63.0], [155.8, 53.6], [170.0, 59.0], [159.1, 47.6], [166.0, 69.8], [176.2, 66.8], [160.2, 75.2], [172.5, 55.2], [170.9, 54.2], [172.9, 62.5], [153.4, 42.0], [160.0, 50.0], [147.2, 49.8], [168.2, 49.2], [175.0, 73.2], [157.0, 47.8], [167.6, 68.8], [159.5, 50.6], [175.0, 82.5], [166.8, 57.2], [176.5, 87.8], [170.2, 72.8], [174.0, 54.5], [173.0, 59.8], [179.9, 67.3], [170.5, 67.8], [160.0, 47.0], [154.4, 46.2], [162.0, 55.0], [176.5, 83.0], [160.0, 54.4], [152.0, 45.8], [162.1, 53.6], [170.0, 73.2], [160.2, 52.1], [161.3, 67.9], [166.4, 56.6], [168.9, 62.3], [163.8, 58.5], [167.6, 54.5], [160.0, 50.2], [161.3, 60.3], [167.6, 58.3], [165.1, 56.2], [160.0, 50.2], [170.0, 72.9], [157.5, 59.8], [167.6, 61.0], [160.7, 69.1], [163.2, 55.9], [152.4, 46.5], [157.5, 54.3], [168.3, 54.8], [180.3, 60.7], [165.5, 60.0], [165.0, 62.0], [164.5, 60.3], [156.0, 52.7], [160.0, 74.3], [163.0, 62.0], [165.7, 73.1], [161.0, 80.0], [162.0, 54.7], [166.0, 53.2], [174.0, 75.7], [172.7, 61.1], [167.6, 55.7], [151.1, 48.7], [164.5, 52.3], [163.5, 50.0], [152.0, 59.3], [169.0, 62.5], [164.0, 55.7], [161.2, 54.8], [155.0, 45.9], [170.0, 70.6], [176.2, 67.2], [170.0, 69.4], [162.5, 58.2], [170.3, 64.8], [164.1, 71.6], [169.5, 52.8], [163.2, 59.8], [154.5, 49.0], [159.8, 50.0], [173.2, 69.2], [170.0, 55.9]] df = Daru::DataFrame.new( { Data1: data.map {|row| row[0]}, Data2: data.map {|row| row[1]} } ) scatter = Daru::View::Plot.new(df, opts, user_opts) scatter.show_in_iruby # + # line with markers and shadow opts = { rangeSelector: { selected: 1 }, title: { text: 'AAPL Stock Price' }, plotOptions: { series: { marker: { enabled: true, radius: 3 }, shadow: true, tooltip: { valueDecimals: 2 } } } } user_opts = { chart_class: 'stock', css: ['.highcharts-navigator-handle{fill: #90ed7d;stroke: #90ed7d;}'] } data = [ [1147651200000,67.79], [1147737600000,64.98], [1147824000000,65.26], [1149120000000,62.17], [1149206400000,61.66], [1149465600000,60.00], [1149552000000,59.72], [1157932800000,72.50], [1158019200000,72.63], [1158105600000,74.20], [1158192000000,74.17], [1158278400000,74.10], [1158537600000,73.89], [1170288000000,84.74], [1170374400000,84.75], [1174953600000,95.46], [1175040000000,93.24], [1175126400000,93.75], [1175212800000,92.91], [1180051200000,113.62], [1180396800000,114.35], [1180483200000,118.77], [1180569600000,121.19], ] df = Daru::DataFrame.new( { data1: data.map {|row| row[0]}, data2: data.map {|row| row[1]} } ) line_series_shadow_markers = Daru::View::Plot.new(df, opts, user_opts) line_series_shadow_markers.show_in_iruby # + # line with markers and shadow opts = { rangeSelector: { selected: 1 }, title: { text: 'AAPL Stock Price' }, plotOptions: { series: { marker: { enabled: true, radius: 3 }, shadow: true, tooltip: { valueDecimals: 2 } } } } data = [ [1147651200000,67.79], [1147737600000,64.98], [1147824000000,65.26], [1149120000000,62.17], [1149206400000,61.66], [1149465600000,60.00], [1149552000000,59.72], [1157932800000,72.50], [1158019200000,72.63], [1158105600000,74.20], [1158192000000,74.17], [1158278400000,74.10], [1158537600000,73.89], [1170288000000,84.74], [1170374400000,84.75], [1174953600000,95.46], [1175040000000,93.24], [1175126400000,93.75], [1175212800000,92.91], [1180051200000,113.62], [1180396800000,114.35], [1180483200000,118.77], [1180569600000,121.19], ] user_opts = { chart_class: 'stock', css: ['.highcharts-navigator-mask-inside{fill: #90ed7d;stroke: #90ed7d;}', '.highcharts-navigator-outline{stroke: #90ed7d;}'] } df = Daru::DataFrame.new( { data1: data.map {|row| row[0]}, data2: data.map {|row| row[1]} } ) line_series_shadow_markers = Daru::View::Plot.new(df, opts, user_opts) line_series_shadow_markers.show_in_iruby # + # Area chart : negative values opts = { title: { text: 'Monthly temperatures in a random cold place' }, subtitle: { text: 'All series should be blue below zero' }, xAxis: { type: 'datetime' }, plotOptions: { series: { className: 'main-color', negativeColor: true } } } user_opts = { css: ['.highcharts-point {stroke: white;}', '.main-color .highcharts-graph {stroke: red;}', '.main-color, .main-color .highcharts-point {fill: red;}', '.highcharts-graph.highcharts-negative {stroke: blue;}', '.highcharts-area.highcharts-negative {fill: blue;}', '.highcharts-point.highcharts-negative {fill: blue;}'] } series_dt = [ { name: 'Spline', type: 'spline', data: [-6.4, -5.2, -3.0, 0.2, 2.3, 5.5, 8.4, 8.3, 5.1, 0.9, -1.1, -4.0], pointStart: 'Date.UTC(2010, 0)'.js_code, pointInterval: '31 * 24 * 36e5'.js_code }, { name: 'Area', type: 'area', data: [-6.4, -5.2, -3.0, 0.2, 2.3, 5.5, 8.4, 8.3, 5.1, 0.9, -1.1, -4.0], pointStart: 'Date.UTC(2011, 0)'.js_code, pointInterval: '30 * 24 * 36e5'.js_code }, { name: 'Column', type: 'column', data: [-6.4, -5.2, -3.0, 0.2, 2.3, 5.5, 8.4, 8.3, 5.1, 0.9, -1.1, -4.0], pointStart: 'Date.UTC(2012, 0)'.js_code, pointInterval: '30 * 24 * 36e5'.js_code } ] area_neg = Daru::View::Plot.new(series_dt, opts, user_opts) area_neg.show_in_iruby # + opts = { chart: { type: 'line' }, title: { text: 'Chart border and background by CSS' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, legend: { layout: 'vertical', floating: true, align: 'left', x: 100, verticalAlign: 'top', y: 70 }, yAxis: { crosshair: true } } user_opts = { css: ['.highcharts-plot-background {fill: #efffff;}', '.highcharts-plot-border {stroke-width: 2px;stroke: #7cb5ec;}'] } data = Daru::Vector.new([29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4]) line = Daru::View::Plot.new(data, opts, user_opts) line.show_in_iruby # + opts = { chart: { type: 'line' }, title: { text: 'Title styles ...' }, subtitle: { text: '... and subtitle styles' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, legend: { layout: 'vertical', floating: true, align: 'left', x: 100, verticalAlign: 'top', y: 70 }, yAxis: { crosshair: true } } user_opts = { css: ['.highcharts-title {fill: #434348;font-weight: bold;}', ".highcharts-subtitle {font-family: 'Courier New', monospace;font-style: italic;fill: #7cb5ec;}"] } data = Daru::Vector.new([29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4]) line = Daru::View::Plot.new(data, opts, user_opts) line.show_in_iruby # + opts = { chart: { type: 'line' }, title: { text: 'Title styles ...' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, legend: { layout: 'vertical', floating: true, align: 'left', x: 100, verticalAlign: 'top', y: 70 }, yAxis: { crosshair: true } } user_opts = { css: ['.highcharts-tooltip-box {fill: black;fill-opacity: 0.1;stroke-width: 0;}', '.highcharts-title {fill: #434348;font-weight: bold;}'] } data = Daru::Vector.new([29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4]) line = Daru::View::Plot.new(data, opts, user_opts) line.show_in_iruby # + opts = { title: { text: 'Styled color zones' }, yAxis: { min: -10 }, plotOptions: { series: { zones: [{ value: 0, className: 'zone-0' }, { value: 10, className: 'zone-1' }, { className: 'zone-2' }], threshold: -10 } } } user_opts = { css: ['.highcharts-point {stroke: white;}', '.highcharts-graph.zone-0 {stroke: #f7a35c;}', '.highcharts-area.zone-0 {fill: #f7a35c;}', '.highcharts-point.zone-0 {fill: #f7a35c;}', '.highcharts-graph.zone-1 {stroke: #7cb5ec;}', '.highcharts-area.zone-1 {fill: #7cb5ec;}', '.highcharts-point.zone-1 {fill: #7cb5ec;}', '.highcharts-graph.zone-2 {stroke: #90ed7d;}', '.highcharts-area.zone-2 {fill: #90ed7d;}', '.highcharts-point.zone-2 {fill: #90ed7d;}'] } series_dt = [{ type: 'areaspline', data: [-10, -5, 0, 5, 10, 15, 10, 10, 5, 0, -5] }, { type: 'column', data: [1, 13, 2, -4, 6, 7, 5, 3, 2, -1, 2] }] area_spline = Daru::View::Plot.new(series_dt, opts, user_opts) area_spline.show_in_iruby # + opts = { chart: { type: 'column' }, title: { text: 'Column chart CSS' } } user_opts = { css: ['.highcharts-plot-background {fill: #efffff;}'] } data_frame = Daru::DataFrame.new( { 'Beer' => ['Kingfisher', 'Snow', 'Bud Light', 'Tiger Beer', 'Budweiser'], 'Gallons sold' => [500, 400, 450, 200, 250] } ) chart = Daru::View::Plot.new(data_frame, opts, user_opts) chart.show_in_iruby # + opts = { chart: { type: 'line' }, title: { text: 'Column chart CSS' } } user_opts = { css: ['.highcharts-xaxis-grid .highcharts-grid-line {stroke-width: 2px;stroke: #d8d8d8;}', '.highcharts-xaxis .highcharts-tick {stroke-width: 2px;stroke: #d8d8d8;}'] } chart = Daru::View::Plot.new(data_frame.first(3), opts, user_opts) chart.show_in_iruby # -
spec/dummy_iruby/Highcharts - Custom styling in CSS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.4 64-bit # name: python374jvsc74a57bd0021d9f4f6a0c9e23e32c4246ac82593951ffad9baab3e58c0c69e8a8c06b339b # --- # # 04 - Grouping # ### Step 1. Import the necessary libraries import pandas as pd # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv). # ### Step 3. Assign it to a variable called drinks. drinks = pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv') drinks # ### Step 4. Which continent drinks more beer on average? drinks.groupby('continent')['beer_servings'].mean() # ### Step 5. For each continent print the statistics for wine consumption. drinks.groupby('continent')['wine_servings'].describe() # ### Step 6. Print the mean alcohol consumption per continent for every column drinks.groupby('continent').aggregate('mean') # ### Step 7. Print the median alcohol consumption per continent for every column drinks.groupby('continent').aggregate('median') # ### Step 8. Print the mean, min and max values for spirit consumption. # + drinks.groupby('continent').aggregate({"spirit_servings": ['max', 'min', "mean"]}) # -
week4_EDA_np_pd_json_apis_regex/pandas2/Alcohol_Consumption.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Python Basics for beginners #Fundamental for basics programming #Loops in Python Language #RANGE #range(3)=[0, 1, 2] #range(N)=[0, 1, 3, ..., N] #step is equal to 1 and start point is 0 ra1=range(10) print("range (10): ", ra1) #The output of above syntax is equal to: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ra2=range(4) print("range (4): ", ra1) #The output of above syntax is equal to: 0, 1, 2, 3 ra2=range(1) print("range (1): ", ra1) #The output of above syntax is equal to: 0 #you can change both start point and end point #if the second input is larger than the first input, the output of the range is range(strat point, end point)" #the step is +1 ra1=range(4, 9) print("range (4, 9): ", ra1) #The output of above syntax is equal to: 4, 5, 6, 7, 8 ra2=range(154, 160) print("range (154, 160): ", ra1) #The output of above syntax is equal to: 154, 155, 156, 157, 158, 159 ra2=range(4000, 40003) print("range (1): ", ra1) #The output of above syntax is equal to: 4000, 4001, 4002 # + #LOOPS #We want to change the color of some boxes with white #Assume we have a number of boxes #Firstly, it is a good idea to make a list from the boxes box1=["red", "blue", "green", "black", "orange", "pink", "yellow"] #with defining a loop we are able to change the color of each box as follows for i in range(0,7): box1[i]="white" print("box1 in loop", box1) print("box1 after loop:", box1) #Example #Vector of binary codes #as string bin1=["0010", "0011", "1001", "1010", "0001", "1000"] for i in range(0,6): bin1[i]="1111" print("bin1 in loop", bin1) print("bin1 after loop:", bin1) #Example #Vector of binary codes #as integer Num1=[1, 56, 23, 51, 70, 18, 90, 87] for i in range(0,8): Num1[i]=25 print("Num1 in loop", Num1) print("Num1 after loop:", Num1 #Directly iteration #lsits and tuples # We can also iterate through lists or tuples directly instead of using "RANGE" in loops as follows box1=["red", "blue", "green", "black", "orange", "pink", "yellow"] for box in box1: print("directly iteration: ", box) print("box1 after loop: ", box1) #Example #Vector of binary codes #as string binx1=["0010", "0011", "1001", "1010", "0001", "1000"] for binx in binx1: print("directly iteration: ", binx) print("binx1 after loop, directly iteration: ", binx1) #Example #Vector of binary codes #as integer Num1=[1, 56, 23, 51, 70, 18, 90, 87] for Num in Num1: print("directly iteration: ", Num) print("Num1 after loop, directly iteration:", Num1) #Enumerate() method adds a counter to an iterable and returns it in a form of enumerate object #"enumerate " can be used to obtain index and elements in the list as follows # for first_var, second_var in enumerate(list) #first_var id the index of the list while the second_value is the value of the related index list1=["red", "blue", "green", "black", "orange", "pink", "yellow"] for index, value in enumerate (list1): print("Applocation of Enumerate in 'for', index of element in the list:", index, " value of the related index:", value) #Example #Vector of binary codes #as integer Num1=[1, 56, 23, 51, 70, 18, 90, 87] for i, num in enumerate(Num1): print("Enumerate, index: ", i, " value: ", num) binx1=["0010", "0011", "1001", "1010", "0001", "1000"] for i, binx in enumerate(binx1): print("Enumerate, index: ", i, " value: ", binx) # Loop through the list and iterate on both index and element value squares=['red', 'yellow', 'green', 'purple', 'blue'] for i, square in enumerate(squares): print(i, square) # + #While #while(condition is true): binx1=["0010", "0011", "1001", "1010", "0001", "1111","1001", "1010", "0000", "1000"] binx2=[] i=0 while (binx1[i]!="1111"): binx2.append(binx1[i]) print("binx1 ith", binx1[i]) print("binx2 ith", binx2[i]) i=i+1 if i>50: break print("binx1:",binx1) print("binx2:",binx2) #Example dates = [1982, 1980, 1973, 2000] i = 0 year = 0 while(year != 1973): year = dates[i] i = i + 1 print(year) print("It took ", i ,"repetitions to get out of loop.") i=0 while (binx1[i]!="1010"): binx2.append(binx1[i]) print("binx1 ith", binx1[i]) print("binx2 ith", binx2[i]) i=i+1 if i>50: break print("binx1:",binx1) print("binx2:",binx2) i=0 box1=["red", "blue", "green", "black", "orange", "pink", "yellow"] box2=[] while (box1[i]!="green"): box2.append(box1[i]) print("box1 ith", box1[i]) print("box2 ith", box2[i]) i=i+1 if i>50: break print("box1:",box1) print("box2:",box2)
PythonBasics_Loops.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # + import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.cm as cm from scipy.optimize import brentq from lightshifts.consts import h, hbar, c, eps0 from lightshifts.auxiliary import smart_gen_array, laser_intensity, \ plot_total_lightshift_around_hyperfine_state,\ plot_scalar_lightshift import lightshifts.lightshift_solver as ls # - # ## Calculate potential for the clock states 1S0, 3P0 and for 3P1 # ** Transitions relevant for clock states and intercombination line light shift ** #import transition data ls_1S0 = ls('atom_yb173.json', 'transitions_1S0.json') ls_3P0 = ls('atom_yb173.json', 'transitions_3P0.json') ls_3P1 = ls('atom_yb173.json', 'transitions_3P1.json') # ** find magic wavelength ** lambda_m = brentq(lambda w: ls_3P0.scalar_polarizability(w) \ - ls_1S0.scalar_polarizability(w), 660*1e-9, 800*1e-9) print('magic wavelength is %1.2f nm'%(lambda_m*1e9)) # lattice depth there in kHz/(W/cm^2) print('lattice depth is %1.2f Hz/(W/cm^2)'%ls_1S0.scalar_lightshift(lambda_m)) # for a given laser beam (L3) laser_power = 100e-3 #in Watts beam_waist = 127e-6 #in meters l_int = laser_intensity(laser_power, beam_waist) print('lattice depth for %1.2f W/cm^2 is %1.2f Hz'%(l_int, ls_1S0.scalar_lightshift(lambda_m, l_int))) # ** SDL polarizability ratio ** lambda_sdl = 670e-9 ls_3P0.scalar_lightshift(lambda_sdl)/ls_1S0.scalar_lightshift(lambda_sdl) # **Yellow probe light shift** # + lambda_clock = 578e-9 clock_laser_intensity = laser_intensity(laser_power=50e-3, beam_waist=127e-6) probe_shift = ls_3P0.scalar_lightshift(lambda_clock, clock_laser_intensity)\ -ls_1S0.scalar_lightshift(lambda_clock, clock_laser_intensity) print('yellow probe shift for %1.2f W/cm^2 is %1.2f Hz'%(clock_laser_intensity, probe_shift)) # - # ** generate and plot light shift table for clock states and 3P1 ** plot_scalar_lightshift('atom_yb173.json', 'transitions_1S0.json') plot_scalar_lightshift('atom_yb173.json', 'transitions_3P0.json') plt.ylim(-100,100) # ## OSG ground state polarizability including vector and tensor shifts plt.figure(figsize=(9,6)) plot_total_lightshift_around_hyperfine_state('atom_yb173.json', 'transitions_1S0.json', ['6s6p','3P1'], Ff=7/2, q=1, df_min=10e9, df_max=10e9, n=100) plt.ylim(-100e3,100e3) plt.show() plt.figure(figsize=(9,6)) plot_total_lightshift_around_hyperfine_state('atom_yb173.json', 'transitions_1S0.json', ['6s6p','3P1'], Ff=7/2, q=0, df_min=10e9, df_max=10e9, n=100) plt.ylim(-100e3,100e3) plt.show() # ## "Repumper OSG" excited clock state polarizability including vector and tensor shifts plt.figure(figsize=(9,6)) plot_total_lightshift_around_hyperfine_state('atom_yb173.json', 'transitions_3P0.json', ['6s5d','3D1'], Ff=3/2, q=1, df_min=10e9, df_max=10e9, n=100) plt.ylim(-1e6,1e6) plt.show() plt.figure(figsize=(9,6)) plot_total_lightshift_around_hyperfine_state('atom_yb173.json', 'transitions_3P0.json', ['6s5d','3D1'], Ff=3/2, q=0, df_min=10e9, df_max=10e9, n=100) plt.ylim(-1e6,1e6) plt.show()
examples/example_lightshifts_yb173.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DS Automation Assignment # Using our prepared churn data from week 2: # - use pycaret to find an ML algorithm that performs best on the data # - Choose a metric you think is best to use for finding the best model; by default, it is accuracy but it could be AUC, precision, recall, etc. The week 3 FTE has some information on these different metrics. # - save the model to disk # - create a Python script/file/module with a function that takes a pandas dataframe as an input and returns the probability of churn for each row in the dataframe # - your Python file/function should print out the predictions for new data (new_churn_data.csv) # - the true values for the new data are [1, 0, 0, 1, 0] if you're interested # - test your Python module and function with the new data, new_churn_data.csv # - write a short summary of the process and results at the end of this notebook # - upload this Jupyter Notebook and Python file to a Github repository, and turn in a link to the repository in the week 5 assignment dropbox # # *Optional* challenges: # - return the probability of churn for each new prediction, and the percentile where that prediction is in the distribution of probability predictions from the training dataset (e.g. a high probability of churn like 0.78 might be at the 90th percentile) # - use other autoML packages, such as TPOT, H2O, MLBox, etc, and compare performance and features with pycaret # - create a class in your Python module to hold the functions that you created # - accept user input to specify a file using a tool such as Python's `input()` function, the `click` package for command-line arguments, or a GUI # - Use the unmodified churn data (new_unmodified_churn_data.csv) in your Python script. This will require adding the same preprocessing steps from week 2 since this data is like the original unmodified dataset from week 1. # + import pandas as pd df = pd.read_csv('Prepped_Churn_Data.csv') df = df.drop('Unnamed: 0', axis=1) df # - from pycaret.classification import setup, compare_models, predict_model, save_model, load_model, create_model automl = setup(df, target='Churn', fix_imbalance = True) automl[6] new_df= df.drop('Churn', axis=1) new_df gbc= create_model('gbc') best_model = gbc import pickle save_model(best_model, 'GBC') with open('GBC_model.pk', 'wb') as f: pickle.dump(best_model, f) with open('GBC_model.pk', 'rb') as f: loaded_model = pickle.load(f) new_data = new_df.iloc[-2:-1].copy() loaded_model.predict(new_data) loaded_lda = load_model('GBC') predict_model(loaded_lda, new_data) # + from IPython.display import Code Code('predict_churn.py') # - # %run predict_churn.py # # Summary # # This is not an excuse, but I have had a terrible week. Death in my extended family, increasing pressure from work (mandatory overtime), etc. I did not take the proper time this week to learn all I could before sitting down and finishing this assignment. I have run into the few errors you can see ahead and after battling errors for over 7 hours I have to throw in the towel. # # Prior to calling it, I have done what you can see above. In addition to that I had several issues with my data as I tried to model it. Eventually I had to go back and re-clean it all and changed a few of my categorical columns to numerical via one_hot_encoding as well as drop a few columns. # # Despite the cleaned up data I kept running into several different errors. My best model prediction continued to be the catbooster. Following along with the FTE I attempted to make that my best model, but everytime I ran it, it gave me an error that basically boiled down to saying "The filepath changes your column 1 from PhoneService to PaymentMethod, and that's not what it should be." # # I spent a good chunk of time trying to resolve that issue via google, stack overflow, and github. None of my attempts at fixing it worked, so I eventually settled with GBC. Which gave me several errors of having 10 features when I should only be having 9. Eventually I was able to find a solution through the Pycaret documentation that I corrected during the setup with the fix_imbalance=True addition that you can see above. # # With the model seemingly figured out I moved onto VS Code and wrote the script that you can see above and attached with this assignment. I struggled for some time trying to understand how to truly write a script. The book exercises did not compute, especially when compared to the (to me) large scope of this particular script. I eventually managed to make something that made a little sense to me through googling and YouTube tutorials. # # Despite all that I have consistently run into the error seen above, and I no longer have the will to continue fighting it and hopefully you will be able to make heads or tails out of it and can point me in a better direction. # # I apologize for not reaching out sooner, but as I said earlier, I didn't know I would struggle this hard and I didn't have the time to make it to the zoom meeting (I work every monday), nor the free time to schedule another time with you. #
Justin Racine week 5 .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from tensorflow.keras.utils import to_categorical from nltk.stem import WordNetLemmatizer from nltk.tokenize import TweetTokenizer from nltk.corpus import stopwords from nltk.tag import pos_tag from sklearn.model_selection import train_test_split from keras.models import Sequential from keras import layers import time df=pd.read_csv('data/sample.csv') def data_cleaning(text_list): stopwords_rem=False stopwords_en=stopwords.words('english') lemmatizer=WordNetLemmatizer() tokenizer=TweetTokenizer() reconstructed_list=[] for each_text in text_list: lemmatized_tokens=[] tokens=tokenizer.tokenize(each_text.lower()) pos_tags=pos_tag(tokens) for each_token, tag in pos_tags: if tag.startswith('NN'): pos='n' elif tag.startswith('VB'): pos='v' else: pos='a' lemmatized_token=lemmatizer.lemmatize(each_token, pos) if stopwords_rem: # False if lemmatized_token not in stopwords_en: lemmatized_tokens.append(lemmatized_token) else: lemmatized_tokens.append(lemmatized_token) reconstructed_list.append(' '.join(lemmatized_tokens)) return reconstructed_list demo_text=['Today is a good day', 'Today is a bad day', 'Today is ok'] demo_text=data_cleaning(demo_text) tokenizer=Tokenizer() tokenizer.fit_on_texts(demo_text) display(tokenizer.word_index) demo_ary=pad_sequences(tokenizer.texts_to_sequences(demo_text)) # word_index=tokenizer.word_index # pad_sequences(demo_ary, maxlen=5) demo_ary # + X=df['text']#.sample(200) y=df['label']#.sample(200) X=data_cleaning(X) X_train, X_test, y_train, y_test=train_test_split(X, y, train_size=.9) tokenizer=Tokenizer() tokenizer.fit_on_texts(X_train) vocab_size=len(tokenizer.word_index)+1 print(f'Vocab Size: {vocab_size}') X_train=pad_sequences(tokenizer.texts_to_sequences(X_train), maxlen=40) X_test=pad_sequences(tokenizer.texts_to_sequences(X_test), maxlen=40) y_train=to_categorical(y_train) y_test=to_categorical(y_test) # + current_time=time.time() model=Sequential() model.add(layers.Embedding(input_dim=vocab_size,\ output_dim=100,\ input_length=40)) model.add(layers.Bidirectional(layers.LSTM(128))) model.add(layers.Dense(2,activation='softmax')) model.compile(optimizer='adam',\ loss='categorical_crossentropy',\ metrics=['accuracy']) model.fit(X_train,\ y_train,\ batch_size=256,\ epochs=5,\ validation_data=(X_test,y_test)) print(f'Time to train: {time.time()-current_time}') # - demo_df=pd.DataFrame(demo_ary) demo_df['text']=demo_text demo_df pad_sequences(tokenizer.texts_to_sequences(['Today is a good day'])) y_train=to_categorical(y_train) y_test=to_categorical(y_test)
notebooks/lstm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Artificial Neural Network (ANN) - Part I # # ## 1. ANN Definition # # Artificial Neural Networks are models that try to mimic some of the human brain behaviors, specifically how to acquire and store that knowledge. From a process called training, the ANN can extract relationships and patterns found on a set of historical samples, and generalize this to samples not seen before. For example, imagine one can train an ANN to model the patterns that characterize a chronic disease from a set of past medical exams. Once the model is trained, it can predict the chance of a new exam be from an ill patient. Besides classification, ANN has been successfully applied in several other domains like Regression, clustering, time-series forecasting, and so on. Nowadays, ANN has evolved into Deep Learning, the cutting-edge technology used to control self-driving cars and to perform image classification. # ## 1. Perceptron # # The Perceptron is the fundamental rock to anyone who wants to goes deeper into Artificial Neural Network and Deep Learning. The Perceptron network is classified as a Feedforward single-layer network, is based on the McCulloch & Pitts (1943). Figure 1 depicts the MPC neuron: # # <img src="images/perceptron01.png" width="55%" title="MCP Neuron"> # # The MCP neuron has the main parts depicted next: # # **a) Input**: $[x_1, x_2,..., x_n]$ # # Signs or values from the environment (application). For example, in an application to predict houses prices, $x_1$ could be the size of the house at $m^2$, while $x_2$ could be the number of bedrooms. # # **b) Weights**: $[w_1, w_2,..., w_n]$ # # Values used to weight the importance of each input variable over the neuron output. For example, considering the house prices prediction application, $w_1$, and $w_2$ would weight how significantly the size and number of bedrooms (respectively) are to predict the price. # # **c) Linear combination**: $\sum$ # # To sum the weighted inputs to generate a activation potential. # # **d) Bias(Activation threshold)**: $-\theta$ # # A threshold to limit the value of the linear combination. # # **e) Action Potential**: $u$ # # The result from the diference between the linear combination and the bias. # # **f) Activation function**: $g(u)$ # # This function is responsible to generate the output in a appropriate interval range for the application. # # **g) Output**: $y$ # # The final value generated by the neuron. # ## 2. Perceptron code # # First, we have to compute the action potential, which is given by $u = \sum_{i=1}^n x_i \cdot w_i - \theta$. # # Once, the values of inputs and weights are in the form of a matrix, we could simple perform a matrix multiplication, given by: # # $\sum_{i=1}^n = [w_1, w_2, ..., w_n]^T \cdot [x_1, x_2, ..., x_n]$ # # After that, it would be necessary subtract $\theta$ from $\Sigma$: # # $u = \sum - \theta$ # # In order to simply, we could simply add $-1$ to the input matrix: $[-1, w_1, w_2,..., w_n]$ and $\theta$ to weights matrix: $[\theta, w_1, w_2,..., w_n]$. At the end, we have: # # $u = \sum_{i=0}^n x_i \cdot w_i$ # # ### 2.1. Numpy library # # In python, the Numpy library gives us a powerfull set of resources to operate linear algebra operations. For example, consider we have the input given by $x = [0.1, 0.9, 0.5]$, the weights given by $[0.4, 0.3, 0.2]$ and $\theta = -1.5$. The basic operation of our neuron could be simple: # + import numpy as np x = np.array([-1, 0.1, 0.9, 0.5]) weights = np.array([1.5,0.4,0.3,0.2]) u = np.matmul(x, weights) print(round(u,2)) # - # ### 2.2. Version 1 of our Perceptron class # + import numpy as np class Perceptron: def __init__(self, input_size): self.weights = np.random.normal(size=input_size+1) self.inputs = [] def bi_step(self, u): return 1 if u >= 0 else -1 def output(self, inputs): inputs = np.append(-1, inputs) u = np.matmul(self.weights, inputs) return self.bi_step(u) sample = np.array([0.1, 0.9, 0.5]) perceptron = Perceptron(3) y = perceptron.output(sample) print(y) # - # ## 2.3. Training our model # # As said before, it is necessary an algorithm to train a model, based on a set of samples. In this tutorial, we will coven an basic algorithm called Hebb Rule. # ## 2.4. The complete code # + import numpy as np class Perceptron: def __init__(self, input_size, learning_rate = 0.05): self.input_size = input_size self.weights = np.random.normal(size=self.input_size+1) self.inputs = [] self.epoch = 0 self.learning_rate = learning_rate self.samples = np.array([]) self.outputs = np.array([]) def bi_step(self, u): return 1 if u >= 0 else -1 def train(self): if len(self.samples) == 0 or len(self.outputs) == 0: raise Exception('Please, you must provide a dataset for training') self.weights = np.random.normal(size=self.input_size+1) error = True self.epoch = 0 while error: error = False for i, sample in enumerate(self.samples): sample = np.append(-1, sample) u = np.matmul(sample, self.weights) y = self.bi_step(u) if y != self.outputs[i]: error = True self.weights = self.weights + self.learning_rate * (self.outputs[i] - y) * sample self.epoch += 1 def output(self, inputs): inputs = np.append(-1, inputs) u = np.matmul(self.weights, inputs) return self.bi_step(u) samples = np.array([[0.1, 0.4], [0.3, 0.7], [0.6, 0.9], [0.5, 0.7]]) outputs = np.array([1, -1, -1, 1]) perceptron = Perceptron(len(samples[0])) perceptron.samples = samples perceptron.outputs = outputs perceptron.train() print(perceptron.weights) y = perceptron.output(np.array([0.3, 0.7])) print(y) # -
ANN-Part-I-Introduction-to-Perceptron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="fbtMNMXAoGxY" # # Общий финансовый анализ на Python (Часть 1) # > General Financial Analysis in Python (Part 1) # # - toc: true # - branch: master # - badges: true # - comments: true # - author: Zmey56 # - categories: [finance, investment, python] # + [markdown] id="teM5_eTBlI5t" # В прошлой статье рассмотрено как можно получить информацию по финансовым инструментам. Дальше будет опубликовано несколько статей о том, что первоначально можно делать с полученными данными, как проводить анализ и составлять стратегию. Материалы составлены на основании публикаций в иностранных источниках и курсах на одной из онлайн платформ. # # В этой статье будет рассмотрено, как рассчитывать доходность, волатильность и построить один из основных индикаторов. # + import pandas as pd import yfinance as yf import numpy as np import matplotlib.pyplot as plt sber = yf.download('SBER.ME','2016-01-01') # - # **Доходность** # # Данная величина представляет собой процентное изменение стоимости акции за один торговый день. Оно не учитывает дивиденды и комиссии. Его легко рассчитать используя функцию pct_change () из пакета Pandas. # # Как правило используют лог доходность, так как она позволяет лучше понять и исследовать изменения с течением времени. # + # Скорректированая цена закрытия` daily_close = sber[['Adj Close']] # Дневная доходность daily_pct_change = daily_close.pct_change() # Заменить NA значения на 0 daily_pct_change.fillna(0, inplace=True) print(daily_pct_change.head()) # Дневная лог доходность daily_log_returns = np.log(daily_close.pct_change()+1) print(daily_log_returns.head()) # - # Чтобы из полученных данных узнать недельную и/или месячную доходность, используют функцию resample(). # + # Взять у `sber` значения за последний рабочий день месяца monthly = sber.resample('BM').apply(lambda x: x[-1]) # Месячная доходность print(monthly.pct_change().tail()) # Пересчитать `sber` по кварталам и взять среднее значение за квартал quarter = sber.resample("4M").mean() # Квартальную доходность print(quarter.pct_change().tail()) # - # Функция pct_change () удобна для использования, но в свою очередь скрывает то, как получается значение. Схожее вычисление, которое поможет понять механизм, можно выполнить при помощи shift() из пакета из пакета Pandas. Дневная цена закрытия делится на прошлую (сдвинутую на один) цену и из полученного значения вычитается единица. Но есть один незначительный минус – первое значение в результате получается NA. # # Расчет доходности основан на формуле: # + [markdown] id="wHqxba1UlI5v" # ![](images/20210110_formula_1.png) # - # Дальше строится диаграмма распределения доходности и рассчитывается основная статистика: # Для значений по российским акциям есть небольшая тонкость. К названию акцию добавляется точка и заглавными буквами ME. Спасибо знатоки на смартлабе подсказали. # + # Дневная доходность daily_pct_change = daily_close / daily_close.shift(1) - 1 print(daily_pct_change.head()) # + # Диаграмма `daily_pct_c` daily_pct_change.hist(bins=50) plt.show() # Общая статистика print(daily_pct_change.describe()) # - # Распределение выглядит очень симметрично и нормально распределённым вокруг значения 0,00. Для получения других значений статистики используется функция description (). В результате видно, что среднее значение немного больше нуля, а стандартное отклонение составляет практически 0,02. # # **Кумулятивная доходность** # # Кумулятивная дневная прибыль полезна для определения стоимости инвестиций через определенные промежуток времени. Ее можно рассчитать, как приводится в коде ниже. # + # Кумулютивная дневная доходность cum_daily_return = (1 + daily_pct_change).cumprod() print(cum_daily_return.tail()) # + # Построение кумулятивной дневной доходности cum_daily_return.plot(figsize=(8,5)) plt.show() # - # Можно пересчитать доходность в месячном периоде: # + # Месячная кумулятивная доходность cum_monthly_return = cum_daily_return.resample("M").mean() print(cum_monthly_return.tail()) # - # Знание того, как рассчитать доходность, является ценным при анализе акции. Но еще большую ценность оно представляет при сравнении с другими акциями. # # Возьмем некоторые акции (выбор их совершенно случайный) и построим их диаграмму. # + ticker = ['AFLT.ME','DSKY.ME','IRAO.ME','PIKK.ME', 'PLZL.ME','SBER.ME','ENRU.ME'] stock = yf.download(ticker,'2018-01-01') # Дневная доходность в `daily_close_px` daily_pct_change = stock['Adj Close'].pct_change() # Распределение daily_pct_change.hist(bins=50, sharex=True, figsize=(20,8)) plt.show() # - # Еще один полезный график —матрица рассеяния. Ее можно легко построить при помощи функции scatter_matrix (), входящей в библиотеку pandas. В качестве аргументов используется daily_pct_change и устанавливается параметр Ядерной оценки плотности — Kernel Density Estimation. Кроме того, можно установить прозрачность с помощью параметра alpha и размер графика с помощью параметра figsize. # + from pandas.plotting import scatter_matrix # Матрица рассеивания `daily_pct_change` scatter_matrix(daily_pct_change, diagonal='kde', alpha=0.1,figsize=(20,20)) plt.show() # - # На этом пока все. В следующей статье будет рассмотрено вычисление волатильности, средней и использование метода наименьших квадратов.
_notebooks/2020-03-14-general-financial-analysis-python-part-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import tensorflow as tf x = tf.Variable(3, name="x") y = tf.Variable(4, name="y") f = x*x*y+y+2 f # no computation has been performed until this point. To actually do something we need # to run a session which will place the operations onto CPU or GPU. sess = tf.Session() sess.run(x.initializer) sess.run(y.initializer) result = sess.run(f) result sess.close()
tensorflow/tensorflow_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf import numpy as np from tensorflow import keras from os import mkdir from tensorflow.keras import optimizers, metrics, initializers from tensorflow.keras import backend as K from tensorflow.keras import layers import matplotlib.pyplot as plt from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt thudohanoi_df.loc[thudohanoi_df.index.get_level_values(0) == 47, 'PM25'].values fig = plt.figure(figsize=(16,16)) gs = fig.add_gridspec(2, 1) axe_1 = fig.add_subplot(gs[0,0]) axe_2 = fig.add_subplot(gs[1,0]) axe_1.plot(y=thudohanoi_df.loc[thudohanoi_df.index.get_level_values(0) == 47, 'PM25'].values) thudohanoi_df.loc[thudohanoi_df.index.get_level_values(0) == 48, 'PM25'].plot() plt.tight_layout() plt.show() # + import pandas as pd import numpy as np import glob import xarray as xr idx = pd.IndexSlice root_path = "/mnt/4ba37af6-51fd-47bc-8321-8c500c229114/study/School/KHOA LUAN TOT NGHIEP" _thudohanoi_data_path = root_path + '/Data/thudohanoi/refined_data' _thudohanoi_files = glob.glob(_thudohanoi_data_path + '/*.csv') thudohanoi_df = pd.DataFrame() for file in _thudohanoi_files: print('Currently reading file \n{}'.format(file)) thudohanoi_df = thudohanoi_df.append(pd.read_csv(file, parse_dates=True, index_col=['site_id', 'time'], dtype={'CO': np.float64, 'NO2': np.float64,'PM25': np.float64, 'AQI_h': np.float64, 'AQI_h_I': np.int, 'site_id': np.int})) # Remove site 16 because of some inconsistency in data thudohanoi_df = thudohanoi_df[(thudohanoi_df.index.get_level_values(0) != 49)] # - from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(-1,1)) scaler_PM25 = MinMaxScaler(feature_range=(-1,1)) scaler_PM25.fit(thudohanoi_df['PM25'].values.reshape(-1, 1)) scaler_AQI = MinMaxScaler(feature_range=(-1,1)) scaler_AQI.fit(thudohanoi_df['AQI_h'].values.reshape(-1, 1)) def reshape_array_and_save_to_path(arr_data, arr_label, path, timesteps, target_hour, data_type="Train"): # reshaping the array from 3D # matrice to 2D matrice. arr_data_reshaped = arr_data.reshape(arr_data.shape[0], -1) arr_label_reshaped = arr_label.reshape(arr_label.shape[0], -1) # saving reshaped array to file. saved_data = np.savez_compressed(path + "/{}_{}_{}_data.npz".format(timesteps, target_hour, data_type), arr_data_reshaped) saved_label = np.savez_compressed(path + "/{}_{}_{}_label.npz".format(timesteps, target_hour, data_type), arr_label_reshaped) # retrieving data from file. loaded_arr_data_file = np.load(path + "/{}_{}_{}_data.npz".format(timesteps, target_hour, data_type), allow_pickle=True) loaded_arr_label_file = np.load(path + "/{}_{}_{}_label.npz".format(timesteps, target_hour, data_type), allow_pickle=True) loaded_arr_data = loaded_arr_data_file['arr_0'] loaded_arr_data_file.close() loaded_arr_label = loaded_arr_label_file['arr_0'].ravel() loaded_arr_label_file.close() # This loadedArr is a 2D array, therefore # we need to convert it to the original # array shape.reshaping to get original # matrice with original shape. loaded_arr_data = loaded_arr_data.reshape( loaded_arr_data.shape[0], loaded_arr_data.shape[1] // arr_data.shape[2], arr_data.shape[2]) # check the shapes: print("Data array:") print("shape of arr: ", arr_data.shape) print("shape of loaded_array: ", loaded_arr_data.shape) # check if both arrays are same or not: if (arr_data == loaded_arr_data).all(): print("Yes, both the arrays are same") else: print("No, both the arrays are not same") # check the shapes: print("Label array:") print("shape of arr: ", arr_label.shape) print("shape of loaded_array: ", loaded_arr_label.shape) # check if both arrays are same or not: if (arr_label == loaded_arr_label).all(): print("Yes, both the arrays are same") else: print("No, both the arrays are not same") return None def load_reshaped_array(timesteps, target_hour, folder_path, data_type="train"): features = np.load(folder_path + "/features.npy", allow_pickle=True).ravel()[0] loaded_file = np.load(folder_path + "/{}_{}_{}_data.npz".format(timesteps, target_hour, data_type), allow_pickle=True) loaded_data = loaded_file['arr_0'] loaded_data = loaded_data.reshape( loaded_data.shape[0], loaded_data.shape[1] // features, features).astype(float) loaded_file.close() loaded_file_label = np.load(folder_path + "/{}_{}_{}_label.npz".format(timesteps, target_hour, data_type), allow_pickle=True) loaded_label = loaded_file_label['arr_0'].ravel().astype(float) loaded_file_label.close() return loaded_data, loaded_label def create_tensorflow_dataset(arr_data, arr_label, batch_size): tf_dataset = tf.data.Dataset.from_tensor_slices((arr_data, arr_label)) tf_dataset = tf_dataset.repeat().batch(batch_size, drop_remainder=True) steps_per_epochs = len(arr_data) // batch_size return tf_dataset, steps_per_epochs # + # What we need to do: # For each hour # For each timestep # Load the data # Create the model # Load the saved weights # Create all 4 metrics # Evaluate on the test set with 4 metrics. # Plot that evaluation on a canvas # - class Custom_ModelCheckPoint(keras.callbacks.Callback): def __init__(self, path, timesteps, target_hour): super(Custom_ModelCheckPoint, self).__init__() self.path = path self.timesteps = timesteps self.target_hour = target_hour def on_train_begin(self, logs=None): self.best_val = np.Inf # def on_train_end(self, logs=None): # keys = list(logs.keys()) # print("Stop training; got log keys: {}".format(keys)) # def on_epoch_begin(self, epoch, logs=None): # keys = list(logs.keys()) # print("Start epoch {} of training; got log keys: {}".format(epoch, keys)) def on_epoch_end(self, epoch, logs=None): current_train = logs.get('loss') current_val = logs.get('val_loss') if np.less_equal(abs(current_train - current_val), 0.05): if np.less_equal(current_val, self.best_val): self.best_val = current_val self.model.save_weights(self.path + get_model_weigts_name(self.timesteps, self.target_hour)) # def on_test_begin(self, logs=None): # keys = list(logs.keys()) # print("Start testing; got log keys: {}".format(keys)) # def on_test_end(self, logs=None): # keys = list(logs.keys()) # print("Stop testing; got log keys: {}".format(keys)) # def on_predict_begin(self, logs=None): # keys = list(logs.keys()) # print("Start predicting; got log keys: {}".format(keys)) # def on_predict_end(self, logs=None): # keys = list(logs.keys()) # print("Stop predicting; got log keys: {}".format(keys)) # def on_train_batch_begin(self, batch, logs=None): # keys = list(logs.keys()) # print("...Training: start of batch {}; got log keys: {}".format(batch, keys)) # def on_train_batch_end(self, batch, logs=None): # keys = list(logs.keys()) # print("...Training: end of batch {}; got log keys: {}".format(batch, keys)) # def on_test_batch_begin(self, batch, logs=None): # keys = list(logs.keys()) # print("...Evaluating: start of batch {}; got log keys: {}".format(batch, keys)) # def on_test_batch_end(self, batch, logs=None): # keys = list(logs.keys()) # print("...Evaluating: end of batch {}; got log keys: {}".format(batch, keys)) # def on_predict_batch_begin(self, batch, logs=None): # keys = list(logs.keys()) # print("...Predicting: start of batch {}; got log keys: {}".format(batch, keys)) # def on_predict_batch_end(self, batch, logs=None): # keys = list(logs.keys()) # print("...Predicting: end of batch {}; got log keys: {}".format(batch, keys)) class EarlyStoppingAtMinLossAndMinValLoss(keras.callbacks.Callback): """Stop training when the loss is at its min, i.e. the loss stops decreasing. Arguments: patience: Number of epochs to wait after min has been hit. After this number of no improvement, training stops. """ def __init__(self, patience=0): super(EarlyStoppingAtMinLossAndMinValLoss, self).__init__() self.patience = patience # best_weights to store the weights at which the minimum loss occurs. self.best_weights = None def on_train_begin(self, logs=None): # The number of epoch it has waited when loss is no longer minimum. self.wait_loss = 0 self.wait_val_loss = 0 # The epoch the training stops at. self.stopped_epoch = 0 # Initialize the best as infinity. self.best_val = np.Inf self.bet_loss = np.Inf def on_epoch_end(self, epoch, logs=None): current_loss = logs.get("loss") current_val_loss = logs.get("val_loss") if np.less_equal(current_loss, self.best_loss): self.best = current self.wait = 0 # Record the best weights if current results is better (less). self.best_weights = self.model.get_weights() else: self.wait += 1 if self.wait >= self.patience: self.stopped_epoch = epoch self.model.stop_training = True print("Restoring model weights from the end of the best epoch.") self.model.set_weights(self.best_weights) def on_train_end(self, logs=None): if self.stopped_epoch > 0: print("Epoch %05d: early stopping" % (self.stopped_epoch + 1)) # + import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras import optimizers, metrics, initializers from tensorflow.keras import backend as K from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error import matplotlib.pyplot as plt import random import pandas as pd from os import mkdir root_path = "/mnt/4ba37af6-51fd-47bc-8321-8c500c229114/study/School/KHOA LUAN TOT NGHIEP/" _data_to_model_path = root_path + "Data/thudohanoi/data_to_model_hanoi/" _model_path = root_path + "program/saved_models/latest_model/Hanoi/" try: mkdir('{}program/saved_models/fig/'.format(root_path)) except FileExistsError: pass def get_model_name(timesteps, target_hour): return 'model_{}_{}.h5'.format(timesteps, target_hour) def get_model_weigts_name(timesteps, target_hour): return 'weights_{}_{}.ckpt'.format(timesteps, target_hour) def root_mean_squared_error(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true), axis=0)) def mean_absolute_percentage_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): epsilon = np.finfo(np.float64).eps mape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon) output_errors = np.average(mape, weights=sample_weight, axis=0) if isinstance(multioutput, str): if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': # pass None as weights to np.average: uniform mean multioutput = None return np.average(output_errors, weights=multioutput) for hour in [1]: rmse = [] r2 = [] mae = [] mape = [] for timestep in range(1, 13): batch_size = 700 test, y_test = load_reshaped_array(timestep, target_hour=hour, folder_path=_data_to_model_path, data_type="test") rand = random.randint(0, len(y_test)-40) if len(y_test) % batch_size != 0: remain_count = len(y_test)%batch_size test = test[remain_count:] y_test = y_test[remain_count:] test_data_tf, test_steps_per_epochs = create_tensorflow_dataset(test, y_test, batch_size) # model = create_model(batch_size=batch_size, timestep=timestep,features=7, dropout = 0.2) model = keras.models.load_model(root_path + 'program/saved_models/latest_model/Hanoi/{}/model_of_{}_hour/{}'.format( timestep, hour, get_model_name(timestep, hour)), custom_objects={'LeakyReLU': layers.LeakyReLU(alpha=0.01), 'root_mean_squared_error': root_mean_squared_error}) model.load_weights('{}{}/model_of_{}_hour/{}'.format(_model_path, timestep, hour, get_model_weigts_name(timesteps=timestep, target_hour=hour))).expect_partial() predict = model.predict(test_data_tf, steps=test_steps_per_epochs).ravel() scaler_AQI = MinMaxScaler(feature_range=(-1,1)) scaler_AQI.fit(thudohanoi_df['AQI_h'].values.reshape(-1, 1)) y_test = scaler_AQI.inverse_transform(y_test.reshape(-1, 1)) predict = scaler_AQI.inverse_transform(predict.reshape(-1, 1)) print("=============================================\n") # print("Predict") # predict_vs_truth = pd.DataFrame({'predict': predict[rand:rand+20], # 'truth': y_test[rand:rand+20]}) # print(predict_vs_truth) print("R2: {}".format(r2_score(predict, y_test))) print("Root mean squared error: {}".format(mean_squared_error(predict, y_test, squared=False))) print("Mean absolute percentage error: {}".format(mean_absolute_percentage_error(predict, y_test))) print("Mean absolute error: {}".format(mean_absolute_error(predict, y_test))) rmse.append(mean_squared_error(predict, y_test, squared=False)) r2.append(r2_score(predict, y_test)) mape.append(mean_absolute_percentage_error(predict, y_test)) mae.append(mean_absolute_error(predict, y_test)) fig = plt.figure(figsize=(16,9)) ax = fig.add_subplot() ax.plot(range(1,13),rmse) ax.plot(range(1,13),r2) ax.plot(range(1,13),mape) ax.plot(range(1,13),mae) # axes = [0, 0] # axes[0] = fig.add_subplot(gs[0,0]) # axes[1] = fig.add_subplot(gs[1,0]) # axes[0].plot(y_test) # axes[1].plot(predict) # fig.suptitle("Timestep {}".format(timesteps)) # fig.save_fig("{}_{}.png".format(timesteps, counter)) plt.show() # - model = create_model(128, 24, 7, 0.2, 2, True) keras.utils.plot_model(model, "sample model.png", show_shapes=True) model = keras.models.load_model(root_path + '/program/saved_models/latest_model/model_1_24.h5', custom_objects={'LeakyReLU': layers.LeakyReLU(alpha=0.01), 'root_mean_squared_error': root_mean_squared_error}) keras.utils.plot_model(model, "sample model.png", show_shapes=True) model = keras.models.load_model(root_path + '/program/saved_models/experimental/{}/model_of_1_hour/model_{}.h5'.format(24,1), custom_objects={'LeakyReLU': layers.LeakyReLU(alpha=0.01), 'root_mean_squared_error': root_mean_squared_error}) keras.utils.plot_model(model, "sample model.png", show_shapes=True) y_test.shape predict.shape predict = model.predict(test_data_tf, steps=test_steps_per_epochs) test, y_test = load_reshaped_array(24, 1, 7, folder_path=_data_to_model_path, data_type="test") test_data_tf, test_steps_per_epochs = create_tensorflow_dataset(test, y_test, batch_size)
notebooks/Load model and plot result.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:remote-sensing] # language: python # name: conda-env-remote-sensing-py # --- # # Example of a DEM interpolation from pathlib import Path import skimage.io import verde as vd import numpy as np import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec # Make some fake data using the Kilauea DEM. kilauea = skimage.io.imread(Path("../data/kilauea/kilauea2009.tif"))[::-10, ::10] kilauea[kilauea == -9999] = np.nan x, y = np.meshgrid(np.arange(kilauea.shape[1]), np.arange(kilauea.shape[0])) x = x[~np.isnan(kilauea)].ravel() y = y[~np.isnan(kilauea)].ravel() kilauea = kilauea[~np.isnan(kilauea)].ravel() (x, y), kilauea = vd.BlockReduce(np.median, spacing=4).filter((x, y), kilauea) np.random.seed(0) x += np.random.uniform(-2, 2, size=x.size) y += np.random.uniform(-2, 2, size=y.size) kilauea.shape # Use Verde to interpolate the points into a DEM. kilauea_grid = vd.Spline(damping=1e-8, mindist=1).fit((x, y), kilauea).grid(spacing=4) kilauea_grid = vd.distance_mask((x, y), maxdist=5, grid=kilauea_grid) # Make a nice figure. # + fig = plt.figure(figsize=(10, 4)) grid = GridSpec(1, 61, figure=fig, wspace=0, hspace=0) axes =[ fig.add_subplot(grid[0, 0:30]), fig.add_subplot(grid[0, 30:60]), ] ax3 = fig.add_subplot(grid[0, 60]) ax1, ax2 = axes vmin, vmax = kilauea.min(), kilauea.max() ax1.scatter(x, y, c=kilauea, s=3, cmap="viridis", vmin=vmin, vmax=vmax) ax1.text( 3, 201, f"Point cloud", color="black", fontweight="normal", fontsize=14, verticalalignment="top", backgroundcolor="white", ) tmp = kilauea_grid.scalars.plot(ax=ax2, vmin=vmin, vmax=vmax, add_colorbar=False) ax2.text( 3, 201, f"Interpolated DEM", color="black", fontweight="normal", fontsize=14, verticalalignment="top", backgroundcolor="white", ) fig.colorbar(tmp, cax=ax3, label="elevation (meters)") for ax in axes: ax.axis("off") ax.set_xlim(x.min(), x.max()) ax.set_ylim(y.min(), y.max()) plt.tight_layout(pad=0, w_pad=0, h_pad=0) plt.savefig("../lectures/images/dem-generation.png", dpi=200, bbox_inches="tight", pad_inches=0)
code/dem_interpolation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: LVV-RI # language: python # name: lvv-ri # --- # # Lateral ventricle volume trajectories and response inhibition - prep # # Copyright (c) 2019, <NAME> # # *<NAME>, <NAME>, <NAME>* <br> # **Lateral ventricle volume trajectories predict response inhibition in older age - a # longitudinal brain imaging and machine learning approach** (to appear in PLOS ONE) # # ## Prepare # Data being used is derived from the original data processed from Freesurfer longitudinal stream, <br> # and results form the four conditions on the CWIT from D-KEFS test at Wave 3. # ### Packages and libraries # Enable inline plotting # %matplotlib inline import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # ## Reading and exploring data from IPython.display import Image Image(filename='../assets/Figure1.png', width=800) fn_data = '../data/lvv_ri_data.csv' fn_cwit = '../data/cwit_data.csv' df = pd.read_csv(fn_data) df_cwit = pd.read_csv(fn_cwit) # ### Explore data df.info() df_cwit.info() df.head(5).T df_cwit.head(5).T df.describe(percentiles = [.5]).T.round(2) df['Sex'].value_counts() rename_dict = { 'Subject': 'subj', 'Sex': 'gender', 'AcquisitionYearsW1': 'yrW1', 'AcquisitionYearsW2': 'yrW2', 'AcquisitionYearsW3': 'yrW3', 'Left-Lateral-Ventricle_W1': 'left_lvvW1', 'Left-Lateral-Ventricle_W2': 'left_lvvW2', 'Left-Lateral-Ventricle_W3': 'left_lvvW3', 'Right-Lateral-Ventricle_W1': 'right_lvvW1', 'Right-Lateral-Ventricle_W2': 'right_lvvW2', 'Right-Lateral-Ventricle_W3': 'right_lvvW3', 'EstimatedTotalIntraCranialVol_W3': 'eTIV', 'Stroop_3_R_W3': 'RI' } df.rename(columns = rename_dict, inplace = True) df.head(5).T # Preliminary exploration across gender pd.DataFrame(df.describe(percentiles=[0.5]).T.round(1)) # Preliminary exploration across gender pd.DataFrame(df_cwit.describe(percentiles=[0.5]).T.round(1)) # Preliminary exploration by gender pd.DataFrame(df.groupby('gender').describe(percentiles=[0.5]).T.unstack().T.round(1)) # Save data frame with renamed columns df.to_csv('../data/01_lvv_ri_renamed_data.csv', index=False) # ## Calculate eTIV-normalized volumes # + eTIV_NORMALIZED = True df_eTIV = df.copy() if eTIV_NORMALIZED: for col in ['left_lvvW1', 'left_lvvW2', 'left_lvvW3', 'right_lvvW1', 'right_lvvW2','right_lvvW3']: df_eTIV[col] = df[col]/df['eTIV'] df_eTIV.head().T # - pd.DataFrame(df_eTIV.groupby('gender').describe(percentiles=[0.5]).T.unstack().T.round(4)) # ### Save eTIV-normalized data to disk # Save data frame with renamed columns df_eTIV.to_csv('../data/01_lvv_ri_renamed_data_eTIV_norm.csv', index=False)
notebooks/01_lvv_ri_prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # One Type in multiple Tables # # The repository with the original R code does not provide code for this case but only refers to other projects that cannot be replicated any more (because the source website is *not* available any more). # ## Messy Data # # > It’s also common to find data values about a single type of observational unit spread out over multiple tables or files. These tables and files are often split up by another variable, so that each represents a single year, person, or location. # ## Tidy Data # # > As long as the format for individual records is consistent, this is an easy problem to fix: # 1. Read the files into a list of tables. # 2. For each table, add a new column that records the original file name (because the file name is often the value of an important variable). # 3. Combine all tables into a single table
5_one_type_in_multiple_tables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GradientBoostingClassifier with MaxAbsScaler # This Code template is for the Classification tasks using a GradientBoostingClassifier based on the Gradient Boosting Ensemble Learning Technique and feature rescaling technique MaxAbsScaler # ### Required Packages import warnings as wr import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MaxAbsScaler from sklearn.model_selection import train_test_split from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report wr.filterwarnings('ignore') # ### Initialization # # Filepath of CSV file #filepath file_path="" # List of features which are required for model training . #x_values features=[] # Target feature for prediction. #y_value target='' # ### Data Fetching # # Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. # # We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. df=pd.read_csv(file_path) #reading file df.head()#displaying initial entries print('Number of rows are :',df.shape[0], ',and number of columns are :',df.shape[1]) df.columns.tolist() # ### Data Preprocessing # # Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. # def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df # #### Correlation Map # # In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. plt.figure(figsize = (20, 12)) corr = df.corr() mask = np.triu(np.ones_like(corr, dtype = bool)) sns.heatmap(corr, mask = mask, linewidths = 1, annot = True, fmt = ".2f") plt.show() # ### Feature Selections # # It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. # # We will assign all the required input features to X and target/outcome to Y. # + #spliting data into X(feature) and Y(Target) X=df[features] Y=df[target] # - x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() # #### Distribution Of Target Variable plt.figure(figsize = (10,6)) sns.countplot(Y,palette='pastel') # ### Data Splitting # # The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. #we can choose randomstate and test_size as over requerment X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) #performing datasplitting # # Data scaling # ## MaxAbsScaler # Scale each feature by its maximum absolute value. # # This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. # For more information... [click here](For more information... [click here](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html)) #scaling traing and testing set scaler=MaxAbsScaler() X_train=scaler.fit_transform(X_train) X_test=scaler.fit_transform(X_test) # * Now over data is scaled, let's trained the moder # # ## Model # **GradientBoostingClassifier** # # Gradient Boosting builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions.In each stage nclasses regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function. # # #### Model Tuning Parameters # # 1. loss : {‘deviance’, ‘exponential’}, default=’deviance’ # > The loss function to be optimized. ‘deviance’ refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss ‘exponential’ gradient boosting recovers the AdaBoost algorithm. # # # 2. learning_ratefloat, default=0.1 # > Learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators. # # # 3. n_estimators : int, default=100 # > The number of trees in the forest. # # 4. criterion : {‘friedman_mse’, ‘mse’, ‘mae’}, default=’friedman_mse’ # > The function to measure the quality of a split. Supported criteria are ‘friedman_mse’ for the mean squared error with improvement score by Friedman, ‘mse’ for mean squared error, and ‘mae’ for the mean absolute error. The default value of ‘friedman_mse’ is generally the best as it can provide a better approximation in some cases. # # # 5. max_depth : int, default=3 # > The maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. # # 6. max_features : {‘auto’, ‘sqrt’, ‘log2’}, int or float, default=None # > The number of features to consider when looking for the best split: # # 7. random_state : int, RandomState instance or None, default=None # > Controls both the randomness of the bootstrapping of the samples used when building trees (if <code>bootstrap=True</code>) and the sampling of the features to consider when looking for the best split at each node (if `max_features < n_features`). # # 8. verbose : int, default=0 # > Controls the verbosity when fitting and predicting. # # 9. n_iter_no_change : int, default=None # > n_iter_no_change is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside validation_fraction size of the training data as validation and terminate training when validation score is not improving in all of the previous n_iter_no_change numbers of iterations. The split is stratified. # # 10. tol : float, default=1e-4 # > Tolerance for the early stopping. When the loss is not improving by at least tol for <code>n_iter_no_change</code> iterations (if set to a number), the training stops. #training the GradientBoostingClassifier model = GradientBoostingClassifier(random_state = 50) model.fit(X_train, y_train) # #### Model Accuracy # score() method return the mean accuracy on the given test data and labels. # # In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100)) #prediction on testing set prediction=model.predict(X_test) # #### Confusion Matrix # # A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. #ploting_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues) cf_matrix=confusion_matrix(y_test,prediction) plt.figure(figsize=(7,6)) sns.heatmap(cf_matrix,annot=True,fmt="d") # #### Classification Report # # A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False. # # * **where**: # - Precision:- Accuracy of positive predictions. # - Recall:- Fraction of positives that were correctly identified. # - f1-score:- percent of positive predictions were correct # - support:- Support is the number of actual occurrences of the class in the specified dataset. print(classification_report(y_test,model.predict(X_test))) # #### Feature Importances. # # The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction. plt.figure(figsize=(8,6)) n_features = len(X.columns) plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), X.columns) plt.xlabel("Feature importance") plt.ylabel("Feature") plt.ylim(-1, n_features) # #### Creator:<NAME> , Github: [Profile](https://github.com/shreepad-nade) #
Classification/Gradient Boosting Machine/GradientBoostingClassifier_MaxAbsScaler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TDA with Python using the Gudhi Library # # # Building simplicial complexes from a point cloud : Rips and Alpha complexes # **Authors:** <NAME> and <NAME> import numpy as np import pickle as pickle import gudhi as gd from pylab import * from mpl_toolkits.mplot3d import Axes3D # %matplotlib inline # TDA typically aims at extracting topological signatures from a point cloud in $\mathbb R^d$ or in a general metric space. [Simplicial complexes](https://en.wikipedia.org/wiki/Simplicial_complex) are used in computational geometry to infer topological signatures from a point cloud. # # This tutorial explains how to build [Vietoris-Rips complexes](https://en.wikipedia.org/wiki/Vietoris%E2%80%93Rips_complex) and [alpha complexes](https://en.wikipedia.org/wiki/Alpha_shape#Alpha_complex) from a data point in $\mathbb R ^d$. # # ![title](https://upload.wikimedia.org/wikipedia/commons/thumb/d/d0/VR_complex.svg/600px-VR_complex.svg.png) # # The walk of 3 persons A, B and C, has been recorded using the accelerometer sensor of a smartphone in their pocket, giving rise to 3 multivariate time series in $\mathbb R ^d$: each time series represents the 3 coordinates of the acceleration of the corresponding walker in a coordinate system attached to the sensor. # # Notice that the smartphone was carried in a possibly different position for each person and thus these time series cannot be compared coordinates by coordinates. # # Using a sliding window, each serie have been splitted in a list of 100 times series made of 200 consecutive points, that are stored in `data_A`, `data_B` and `data_C`. # # We load the data with the pickle module : f = open("./datasets/data_acc","rb") data = pickle.load(f) f.close() data_A = data[0] data_B = data[1] data_C = data[2] label = data[3] print(label) # The object <code>data_A</code> is a list of 100 time series of the 3d acceleration for Walker A, let's have a look at the dimensions of <code>data_A</code>: print(np.shape(data_A)) # We now represent the trajectory of accelerations for the first trajectory of walker A : data_A_sample = data_A[0] fig = plt.figure() ax = fig.add_subplot(111, projection = '3d') ax.scatter(data_A_sample[:, 0], data_A_sample[:, 1], data_A_sample[:, 2]); # ### Vietoris-Rips complex # # # The [$\alpha$-Rips complex](https://en.wikipedia.org/wiki/Vietoris%E2%80%93Rips_complex) of a point cloud $\mathbb X$ in $(\mathbb R^d,\| \|_2)$ is an [abstract simplicial complex](https://en.wikipedia.org/wiki/Abstract_simplicial_complex) that can be defined by forming a simplex for every finite subset of $\mathbb X$ that has diameter at most $\alpha$. # # Definition of diameter here? # # Vietoris-Rips complexes can be defined for any metric space from the matrix of pairwise distances (see this [notebook](Tuto-GUDHI-simplicial-complexes-from-distance-matrix.ipynb)). # # In order to efficiently compute an $\alpha$-Rips complex, one can start by building a topological graph with: # # - as many vertices as there are points; and, # - as edges only pairs of points whose distance is smaller than or equal to $\alpha$. # # In other words, it corresponds to the [1-skeleton](https://en.wikipedia.org/wiki/N-skeleton) of the point cloud, with upper-bounded edge length. In GUDHI, this is performed via the `RipsComplex()` function (see the [Documentation](http://gudhi.gforge.inria.fr/python/latest/rips_complex_user.html) for details on the syntax): skeleton = gd.RipsComplex(points = data_A_sample, max_edge_length = 0.2) # The `max_edge_length` parameter is the maximal diameter: only edges of length smaller than or equal to this value are included in the 1-skeleton. # # From the $\alpha$-*truncated* 1-skeleton, it is then possible to add higher dimensional simplices to the simplicial complex, subject to the condition that all their faces are already in the complex. Their filtration value is then defined as the maximum filtration value of its faces which matches their diameter by design. This process necessarily yields the desired $\alpha$-Rips complex since filtration values can never exceeds $\alpha$ in this way. # # In practice, one should define a maximal dimension of simplicies to be added to the Rips complex for computational reasons. In GUDHI, this is achieved through the `create_simplex_tree()` method of the `RipsComplex` class which takes the argument `max_dimension` to limit the tree: Rips_simplex_tree_sample = skeleton.create_simplex_tree(max_dimension = 3) # The `max_dimension` parameter is the maximum dimension of the simplices included in the filtration. The result is a simplex tree, of dimension 3 in this example: Rips_simplex_tree_sample.dimension() # We can use the methods of the simplex tree object to describe the Rips filtration. For instance, we can check that the 200 points of `data_A_sample` are all vertices of the Rips filtration: Rips_simplex_tree_sample.num_vertices() # The number of simplices in the Rips complex is: Rips_simplex_tree_sample.num_simplices() # Note that this is actually the number of simplices in the "last" Rips complex of the filtration, namely with parameter $\alpha=$ `max_edge_length=`0.2. # # Notice that the number of simplices in a Rips complex increases very fast with the number of points and the dimension. AND THE $\alpha$ ?? # Now let's compute the list of simplices in the Rips complex with the `get_filtration() ` function: rips_generator = Rips_simplex_tree_sample.get_filtration() # Let's print the 300 first elements in the list: rips_list = list(rips_generator) for splx in rips_list[0:300] : print(splx) # The filtration value is the diameter of the simplex, which is zero for the vertices of course. The first edge in the filtration is `[6, 34]`, these two points are the two closest points in `data_A_sample`, at distance $0.0100$ of each other. If you scroll down, you will see that the first triangle is `[4, 53, 191]`, for the filtration value $0.0327$. # ### Alpha complex # The [alpha complex](https://en.wikipedia.org/wiki/Alpha_shape#Alpha_complex) is a simplicial complex built from the finite cells of a [Delaunay triangulation](https://en.wikipedia.org/wiki/Delaunay_triangulation). Alpha complexes contain less simplices than Rips complexes and so they can be a better option. They are sub-complexes of the Delaunay complex and, as such, they are geometric simpicial complexes. # # The `AlphaComplex()` function directly computes the simplex tree representing the alpha complex: alpha_complex = gd.AlphaComplex(points = data_A_sample) st_alpha = alpha_complex.create_simplex_tree(max_alpha_square = 0.2**2) # Talk about `max_alpha_square` ? # # Talk about `default_filtration_value` ? # # The point cloud `data_A_sample` belongs to $\mathbb R^3$ and so does the Alpha Complex: st_alpha.dimension() # As for the Rips complex, the 200 points of `data_A_sample` are all vertices of the Alpha complex : st_alpha.num_vertices() # Note that the number of simplices in the Alpha complex is smaller than for the Rips complex: st_alpha.num_simplices()
Tuto-GUDHI-simplicial-complexes-from-data-points.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # MLB Modern Era Salary Analysis # # [Lahman’s Baseball Database](http://seanlahman.com/baseball-archive/statistics/) contains complete batting and pitching statistics from 1871 to 2013, plus fielding statistics, standings, team stats, managerial records, post-season data, and more. # # # ## Objective # # One of the topics covered in **Lahman's Baseball Database** is MLB annual salaries. This notebook provides a sample analysis that explores trends in player salaries. Several questions are addressed: # # 1. What is the average salary increase per league since 1985? # 2. What is the average salary increase per league since 1985? # 3. Can we predict future average salary increase per league? # # # ## Prepare Environment # Bootstrap notebook with necessary notebook and library dependencies. # # ### Prerequisites # This notebook requires the installation of the following software dependencies: # ``` # # !pip install statsmodels # ``` # + # Provide the inline code necessary for loading any required libraries # - import matplotlib.pyplot as plt # %matplotlib inline # ## Load the Data # # 1. Visit the [Lahman’s Baseball Database](http://seanlahman.com/baseball-archive/statistics/) and download the latest data. After unzipping the package, upload the **Salaries.csv** file to your workbench. # 2. Import the latest MLB Salary data and take a peak at the dataset. # <div class="alert" style="border: 1px solid #aaa; background: radial-gradient(ellipse at center, #ffffff 50%, #eee 100%);"> # <div class="row"> # <div class="col-sm-1"><img src="https://knowledgeanyhow.org/static/images/favicon_32x32.png" style="margin-top: -6px"/></div> # <div class="col-sm-11">In IBM Knowledge Anyhow Workbench, you can drag/drop the file on your workbench browser tab to simplify the uploading process.</div> # </div> # </div> import pandas as pd df_mlb_Salaries = pd.read_csv('/resources/mlb_Salaries_2011.csv').dropna() df_mlb_Salaries.describe() df_mlb_Salaries.plot() df_mlb_Salaries.tail() # ### Observations # Based on the 2013 data: # # * Between 1985 and 2013 there are 23,956 Salary records. # * The average annual Salary across all of MLB for the last 28 years is 1.8M USD. # * The largest annual Salary across all of MLB for the last 28 years is 33M USD. # ## Question 1: Average Salary Per League # What is the average salary increase per league since 1985? # # ### Data Munging # #### Step A: Partition the data by league american_league = df_mlb_Salaries.query('lgID == "AL"') national_league = df_mlb_Salaries.query('lgID == "NL"') # #### Step B: Create a pivot table for salaries per year per league american_league_avg_annual_salary = american_league.groupby(['yearID']).mean()[['salary']] national_league_avg_annual_salary = national_league.groupby(['yearID']).mean()[['salary']] # #### Step C: Create a Dataframe depicting average annual salaries per league per year. al = pd.Series(american_league_avg_annual_salary.salary, name="American League") nl = pd.Series(national_league_avg_annual_salary.salary, name="National League") df_league_salary_history = pd.concat([al, nl], axis=1) df_league_salary_history.head() # ### Data Exploration # Plot our results and compare annual league salary averages. df_league_salary_history.describe() # + render=true df_league_salary_history.plot() plt.rcParams['xtick.major.pad']='10' plt.title('MLB Annual Salary Averages') plt.xlabel('Year') plt.ylabel('Average Salary') yvalues = american_league_avg_annual_salary.index plt.xticks(yvalues, rotation='vertical') plt.show() # - # ### Observations # # * Between 1985 and 2013 the American League has paid an average of 200K USD more than the National League. # * In 2004, the annual salaries per league diverged with a greater degree of seperation. # # <div class="alert alert-info">In 2004, [Texas traded A-Rod to the NY Yankees](http://sports.espn.go.com/mlb/news/story?id=1735937). This transaction could have triggered the abnormal divergence. # </div> # ## Question 2: Average Annual Salary Increases Per League # What is the average salary increase per league since 1985? # # ### Data Munging # # #### Step A: Define Helper Methods # + def annual_salary_delta(previous_year_salary, current_year_salary): '''Compute annual salary delta.''' return current_year_salary - previous_year_salary def annual_league_salary_increases(salaries): '''Return list of annual salary deltas for a series of league salaries.''' salincperyr = [] for idx, val in enumerate(salaries): if idx == 0: salincperyr.append(0) elif idx == len(salaries)-1: salincperyr.append(0) else: salincperyr.append(annual_salary_delta(salaries.iloc[idx-1], salaries.iloc[idx])) return salincperyr # - # #### Step B: Extend Dataset # Compute Salary increases per year. tablecols = [] tablecols.append(yvalues) tablecols.append(df_league_salary_history["American League"].tolist()) tablecols.append(df_league_salary_history["National League"].tolist()) tablecols.append(annual_league_salary_increases(df_league_salary_history["American League"])) tablecols.append(annual_league_salary_increases(df_league_salary_history["National League"])) df_league_salary_detailed_history = pd.DataFrame(tablecols).T df_league_salary_detailed_history.columns = ['Year','alAvgSalary', 'nlAvgSalary','alAnnualSalaryIncrease', 'nlAnnualSalaryIncrease'] df_league_salary_detailed_history.set_index(df_league_salary_detailed_history['Year']) df_league_salary_detailed_history # ### Explore Results # Plot comparison between annual league salary increases per year. x = df_league_salary_detailed_history['Year'] plt.plot(x,df_league_salary_detailed_history["alAnnualSalaryIncrease"], label="American League") plt.plot(x,df_league_salary_detailed_history["nlAnnualSalaryIncrease"], label="National League") plt.title('MLB Year-to-Year Salary Deltas') plt.xlabel('Year') plt.ylabel('Average Salary') xvalues = american_league_avg_annual_salary.index plt.xticks(xvalues, rotation='vertical') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ### Observations # # * Between 1985 and 2002 the leagues on average tended to offer similiar annual salary increases. # * After 2002 it would seem that the leagues were more reactionary as they tend to handle increases in opposite manners. # ## Question 3: Future Predictions # Can we predict future average salary increase per league? # # ### Data Munging # # #### Step A: Define Helper Methods def create_season_opener_datetime_index(date_series): '''Return a series of datetime index objects for a series of date strings''' import datetime import pandas as pd dates = [] for i in date_series: d = datetime.date(i,4,1) dates.append(d) return pd.DatetimeIndex(dates) # #### Step B: Perform ETL # We have several data structures we need to transform in order to perform predictive analytics. Most notably is the need to have our data structures indexed using a datetime object. Our original data contains only the year for a season but our algorithms require an actual date so we will assume April 1st for each year as the season opener. season_opener_dates = create_season_opener_datetime_index(df_league_salary_detailed_history['Year']) df_al_annual_salary_inc = pd.DataFrame( df_league_salary_detailed_history["alAnnualSalaryIncrease"].tolist(), index=season_opener_dates, columns=["alAnnualSalaryIncrease"] ) df_nl_annual_salary_inc = pd.DataFrame( df_league_salary_detailed_history["nlAnnualSalaryIncrease"].tolist(), index=season_opener_dates, columns=["nlAnnualSalaryIncrease"] ) print df_al_annual_salary_inc.head() print df_nl_annual_salary_inc.head() # #### Step C: Create Prediction Models import statsmodels.api as sm al_ar_model = sm.tsa.AR(df_al_annual_salary_inc) al_ar_model_res = al_ar_model.fit() print al_ar_model_res.params nl_ar_model = sm.tsa.AR(df_nl_annual_salary_inc) nl_ar_model_res = nl_ar_model.fit() print nl_ar_model_res.params predict_al_increases = al_ar_model_res.predict(start='1995-04-01', end='2025-01-01', dynamic=True) predict_nl_increases = nl_ar_model_res.predict(start='1995-04-01', end='2025-01-01', dynamic=True) # ### Explore Results # # #### Step A: Plot comparison between annual league growth predictions. plt.plot(predict_al_increases.index,predict_al_increases, label="American League") plt.plot(predict_nl_increases.index,predict_nl_increases, label="National League") plt.rcParams['xtick.major.pad']='15' plt.title('MLB Predictive Annual Averages Salary Increase') plt.xlabel('Year') plt.ylabel('Average Salary Increases') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # #### Step B: Plot comparison between annual league salary increases per year and the predicted growth for each league. data_to_compare = {'AL Prediction': predict_al_increases, 'AL Actual': df_al_annual_salary_inc.alAnnualSalaryIncrease, 'NL Prediction': predict_nl_increases, 'NL Actual': df_nl_annual_salary_inc.nlAnnualSalaryIncrease } df_annual_growth_predictive_analysis = pd.DataFrame(data_to_compare) ax = df_annual_growth_predictive_analysis.plot() ax.set_ylabel('Average Salary Increases') plt.rcParams['xtick.major.pad']='15' plt.title('MLB Annual Salary Growth Predictive Analysis') plt.xlabel('Year') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # ## Summary # # * Over a 28 year period the actual average increases are a bit sporadic. # * Given the degree of historic fluctuation and the small amount of data, it is difficult to predict future growth. # * The prediction models seem to imply (given the small sample set) a linear regression. # # ## References # # * [Lahman’s Baseball Database](http://seanlahman.com/baseball-archive/statistics/) # * [Data Analysis Workflow Navigation repository](https://github.com/vinomaster/dawn): This notebook outline was derived from the **Research Analysis Navigation Template**. # # <div class="alert" style="border: 1px solid #aaa; background: radial-gradient(ellipse at center, #ffffff 50%, #eee 100%);"> # <div class="row"> # <div class="col-sm-1"><img src="https://knowledgeanyhow.org/static/images/favicon_32x32.png" style="margin-top: -6px"/></div> # <div class="col-sm-11">This notebook was created using <a href="https://knowledgeanyhow.org">IBM Knowledge Anyhow Workbench</a>. To learn more, visit us at <a href="https://knowledgeanyhow.org">https://knowledgeanyhow.org</a>.</div> # </div> # </div>
mlb/mlb-salaries.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import tensorflow as tf from sklearn.model_selection import train_test_split # from sklearn.cluster import DBSCAN import matplotlib.pyplot as plt import networkx as nx import numpy as np import time from kgcnn.literature.GNNExplain import GNNExplainer, GNNInterface # from kgcnn.utils.adj import precompute_adjacency_scaled, convert_scaled_adjacency_to_list, add_self_loops_to_edge_indices from kgcnn.literature.GCN import make_model from kgcnn.utils.data import ragged_tensor_from_nested_numpy from kgcnn.utils.learning import LinearLearningRateScheduler from scipy.cluster.hierarchy import dendrogram, linkage from scipy.spatial.distance import cdist from sklearn.cluster import AgglomerativeClustering from kgcnn.data.datasets.MutagenicityDataset import MutagenicityDataset # - # ## Load data # + dataset = MutagenicityDataset() labels, nodes, edge_indices, edges, atoms = dataset.graph_labels, dataset.node_attributes, dataset.edge_indices, dataset.edge_attributes, dataset.node_number for i in range(len(labels)): # edge_indices[i], edges[i] = add_self_loops_to_edge_indices(edge_indices[i], np.expand_dims(edges[i],axis=-1)) edges[i] = np.expand_dims(edges[i], axis=-1).astype(np.float32) # Make edge feature dimension for i in range(len(labels)): nodes[i] = np.array( np.expand_dims(nodes[i],axis=-1) == np.array([[ 1, 3, 6, 7, 8, 9, 11, 15, 16, 17, 19, 20, 35, 53]]) , dtype=np.int) # Make One-Hot encoding # Train Test split labels_train, labels_test, nodes_train, nodes_test, edges_train, edges_test, edge_indices_train, edge_indices_test = train_test_split( labels, nodes, edges, edge_indices, train_size=0.8, random_state=1) # Convert to tf.RaggedTensor or tf.tensor # a copy of the data is generated by ragged_tensor_from_nested_numpy() nodes_train, edges_train, edge_indices_train = ragged_tensor_from_nested_numpy( nodes_train), ragged_tensor_from_nested_numpy(edges_train), ragged_tensor_from_nested_numpy( edge_indices_train) nodes_test, edges_test, edge_indices_test = ragged_tensor_from_nested_numpy( nodes_test), ragged_tensor_from_nested_numpy(edges_test), ragged_tensor_from_nested_numpy( edge_indices_test) xtrain = nodes_train, edges_train, edge_indices_train xtest = nodes_test, edges_test, edge_indices_test ytrain = np.expand_dims(labels_train, axis=-1) ytest = np.expand_dims(labels_test, axis=-1) print([x.shape for x in xtrain]) print([x.shape for x in xtest]) print(ytrain.shape, ytest.shape) # - # ## Make and train GCN # + model_args = {'name': "GCN", 'inputs': [{'shape': (None, 14), 'name': "node_attributes", 'dtype': 'float32', 'ragged': True}, {'shape': (None, 1), 'name': "edge_attributes", 'dtype': 'float32', 'ragged': True}, {'shape': (None, 2), 'name': "edge_indices", 'dtype': 'int64', 'ragged': True}], 'input_embedding': {"node": {"input_dim": 55, "output_dim": 64}, "edge": {"input_dim": 10, "output_dim": 64}}, 'output_embedding': 'graph', 'output_mlp': {"use_bias": [True, True, False], "units": [140, 70, 1], "activation": ['relu', 'relu', 'sigmoid']}, 'gcn_args': {"units": 64, "use_bias": True, "activation": 'relu', "pooling_method": 'mean', "is_sorted": False, "has_unconnected": True}, 'depth': 3, 'verbose': 1 } model = make_model(**model_args) # Set learning rate and epochs learning_rate_start = 1e-3 learning_rate_stop = 1e-4 epo = 150 epomin = 100 epostep = 10 # Compile model with optimizer and loss optimizer = tf.keras.optimizers.Adam(lr=learning_rate_start) cbks = LinearLearningRateScheduler(learning_rate_start, learning_rate_stop, epomin, epo) model.compile(loss='binary_crossentropy', optimizer=optimizer, weighted_metrics=['accuracy']) print(model.summary()) # Start and time training start = time.process_time() hist = model.fit(xtrain, ytrain, epochs=epo, batch_size=32, callbacks=[cbks], validation_freq=epostep, validation_data=(xtest, ytest), verbose=2 ) stop = time.process_time() print("Print Time for taining: ", stop - start) # Get loss from history trainlossall = np.array(hist.history['accuracy']) testlossall = np.array(hist.history['val_accuracy']) acc_valid = testlossall[-1] # Plot loss vs epochs plt.figure() plt.plot(np.arange(trainlossall.shape[0]), trainlossall, label='Training ACC', c='blue') plt.plot(np.arange(epostep, epo + epostep, epostep), testlossall, label='Test ACC', c='red') plt.scatter([trainlossall.shape[0]], [acc_valid], label="{0:0.4f} ".format(acc_valid), c='red') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.title('Interaction Network Loss') plt.legend(loc='upper right', fontsize='x-large') plt.savefig('gcn_explain_mutag_3.png') plt.show() # - # ## Implement the ExplainableGNN from GNNInterface class ExplainableGCN(GNNInterface): def __init__(self, gnn_model, **kwargs): super(ExplainableGCN, self).__init__() self.gnn_model = gnn_model def predict(self, gnn_input, masking_info=None): return self.gnn_model(gnn_input, training=False)[0] def masked_predict(self, gnn_input, edge_mask, feature_mask, node_mask, training=False): node_input, edge_input, edge_index_input = gnn_input masked_edge_input = tf.ragged.map_flat_values(tf.math.multiply, edge_input, edge_mask) masked_feature_input = tf.ragged.map_flat_values(tf.math.multiply, tf.dtypes.cast(node_input, tf.float32), tf.transpose(feature_mask)) masked_node_feature_input = tf.ragged.map_flat_values(tf.math.multiply, masked_feature_input, node_mask) masked_pred = \ self.gnn_model([masked_node_feature_input, masked_edge_input, edge_index_input], training=training)[0] return masked_pred def get_number_of_nodes(self, gnn_input): node_input, _, _ = gnn_input return node_input[0].shape[0] def get_number_of_node_features(self, gnn_input): node_input, _, _ = gnn_input return node_input.shape[2] def get_number_of_edges(self, gnn_input): _, edge_input, _ = gnn_input return edge_input[0].shape[0] def get_explanation(self, gnn_input, edge_mask, feature_mask, node_mask): edge_relevance = np.array(edge_mask[:, 0]) node_relevance = np.array(node_mask[:, 0]) feature_relevance = np.array(feature_mask[:, 0]) features = np.array(gnn_input[0][0]) edges = np.array(gnn_input[2][0]) graph = nx.Graph() for i, f in enumerate(features): graph.add_node(i, features=f, relevance=node_relevance[i]) for i, e in enumerate(edges): if edge_relevance is None: graph.add_edge(e[0], e[1]) else: graph.add_edge(e[0], e[1], relevance=edge_relevance[i]) return graph, feature_relevance def present_explanation(self, explanation, threshold=0.5): graph = explanation[0] # element_labels = np.array([[ 1, 3, 6, 7, 8, 9, 11, 15, 16, 17, 19, 20, 35, 53]]) element_labels = ['H', 'Li', 'C', 'N', 'O', 'F', 'Na', 'P', 'S', 'Cl', 'K', 'Ca', 'Br', 'I'] important_edges = [] color_map = [] node_color_map = [] node_labels = {} for (u, v, relevance) in graph.edges.data('relevance'): relevance = min(relevance + 0.1, 1.0) color_map.append((0, 0, 0, relevance)) for n, f in graph.nodes.data('features'): element = np.argmax(f) r, g, b, a = plt.get_cmap('tab20')(element) node_color_map.append((r, g, b, graph.nodes[n]['relevance'])) node_labels[n] = (element_labels[element]) if np.all(explanation[1] == 1): nx.draw_kamada_kawai(graph, edge_color=color_map, labels=node_labels, node_color=node_color_map) else: f, axs = plt.subplots(2, figsize=(8, 12)) nx.draw_kamada_kawai(graph, ax=axs[0], edge_color=color_map, labels=node_labels, node_color=node_color_map) bar_colors = [plt.get_cmap('tab20')(element) for element in np.arange(14)] axs[1].bar(np.array(element_labels), explanation[1], color=bar_colors) # Instanciate a Explainable GNN: # + explainable_gcn = ExplainableGCN(model) compile_options = {'loss': 'binary_crossentropy', 'optimizer': tf.keras.optimizers.Adam(lr=0.2)} fit_options={'epochs': 100, 'batch_size': 1, 'verbose': 0} gnnexplaineroptimizer_options = {'edge_mask_loss_weight': 0.001, 'edge_mask_norm_ord': 1, 'feature_mask_loss_weight': 0, 'feature_mask_norm_ord': 1, 'node_mask_loss_weight': 0, 'node_mask_norm_ord': 1} explainer = GNNExplainer(explainable_gcn, compile_options=compile_options, fit_options=fit_options, gnnexplaineroptimizer_options=gnnexplaineroptimizer_options) inspection_result = explainer.explain([tensor[776:777] for tensor in xtest], inspection=True) # - explainer.present_explanation(explainer.get_explanation(), threshold=0.5) # Plot predicion plt.figure() plt.plot(inspection_result['predictions']) plt.xlabel('Iterations') plt.ylabel('GNN output') plt.show() # PLot loss plt.figure() plt.plot(inspection_result['total_loss']) plt.xlabel('Iterations') plt.ylabel('Total Loss') plt.show() # Plot Edge Mask loss plt.figure() plt.plot(inspection_result['edge_mask_loss']) plt.xlabel('Iterations') plt.ylabel('Node Mask Loss') plt.show() # sample 200 mutagenic molecules: pred = model.predict(xtest)[:,0] sampled_mutagenic_molecules = np.random.choice(np.argwhere(pred < 0.5)[:,0], 200) print(sampled_mutagenic_molecules) # Generate explanations for all those 50 molecules (this will take a while): explanations = [] for i,mol_index in enumerate(sampled_mutagenic_molecules): explainer.explain([tensor[mol_index:mol_index+1] for tensor in xtest]) print(i, end=',') explanations.append(explainer.get_explanation()) # We transform the explanation graphs to vectors, in order to apply a cluster algorithm on the explanation vectors: def explanation_to_vector(explanation): graph = explanation[0] bond_matrix = np.zeros((14,14)) for (u, v, relevance) in graph.edges.data('relevance'): atom1 = np.argwhere(graph.nodes[u]['features']==1)[0] atom2 = np.argwhere(graph.nodes[v]['features']==1)[0] bond_matrix[atom1, atom2] += relevance bond_matrix[atom2, atom1] += relevance bond_vector = bond_matrix[np.triu_indices(bond_matrix.shape[0])] bond_vector = bond_vector / np.sum(bond_vector) return bond_vector explanation_vectors = [explanation_to_vector(expl) for expl in explanations] # a dendogram of the explanation vectors: plt.figure() linked = linkage(explanation_vectors, 'complete', metric='cityblock') dendrogram(linked, orientation='top', distance_sort='descending', show_leaf_counts=True) plt.show() # Print one representative graph explanation for each cluster: num_clusters = 7 db = AgglomerativeClustering(n_clusters=num_clusters, affinity='manhattan', linkage='complete').fit(explanation_vectors) vector_clusters = [] explanation_clusters = [] for cluster_ind in range(num_clusters): plt.figure() vector_cluster = np.array([explanation_vectors[i] for i in np.argwhere(db.labels_ == cluster_ind)[:,0]]) vector_clusters.append(vector_cluster) explanation_cluster = [explanations[i] for i in np.argwhere(db.labels_ == cluster_ind)[:,0]] explanation_clusters.append(explanation_cluster) cluster_mean = np.mean(vector_cluster, axis=0) dist = cdist(np.array([cluster_mean]), vector_cluster)[0] print(vector_cluster.shape) ax = plt.subplot() explainer.present_explanation(explanation_cluster[np.argmin(dist)]) plt.show()
notebooks/graph_explainer/explain_GNNExplain_mutagenicity_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demo notebook for accessing NLCD data on Azure # # This notebook provides an example of accessing NLCD (National Land Cover Database) data from blob storage on Azure, including (1) looking at the directory where data is stored, (2) load the cloud optimized geotiff for a aprticular year, and (3) doing some simple processing and plotting of the data. # # The U.S. Geological Survey (USGS), in partnership with several federal agencies, has developed and released four National Land Cover Database (NLCD) products over the past two decades: NLCD 1992, 2001, 2006, and 2011. These products provide spatially explicit and reliable information on the Nation’s land cover and land cover change. # + import fsspec import matplotlib.pyplot as plt import numpy as np import xarray as xr # + # view the directory of available data account_name = 'carbonplan' # change to your institution! container = 'carbonplan-data' # change! area = 'conus' # alaska can be accessed through 'ak' fs = fsspec.get_filesystem_class('az')(account_name=account_name) fs.ls(path=f'{container}/raw/nlcd/{area}/30m/') # + # load data for a single year year = 2001 da = xr.open_rasterio(f'https://{account_name}.blob.core.windows.net/{container}/raw/nlcd/{area}/30m/{year}.tif', chunks=dict(x=2560, y=2560)) # a trick to use dask and chuck the data to get better performance # transform our data array to dataset by selecting the only data variable band # rename variable to something useful ds = da.to_dataset(dim='band').rename({1: 'landcover'}) ds # + # let's look at forest lands in the U.S. # land cover data are stored as categorical variables, and the legend for the code can be found in # https://www.mrlc.gov/data/legends/national-land-cover-database-2016-nlcd2016-legend # forest lands corresponds to codes 41, 42, and 43 deciduous = ds.landcover.isin([41]).astype(int) evergreen = ds.landcover.isin([42]).astype(int) mixed = ds.landcover.isin([43]).astype(int) # coarsen by 100x on each dimension to get to a reasonable plotting size factor = 100 deciduous = deciduous.coarsen(dim={'x': factor, 'y': factor}, boundary='trim').mean().compute() evergreen = evergreen.coarsen(dim={'x': factor, 'y': factor}, boundary='trim').mean().compute() mixed = mixed.coarsen(dim={'x': factor, 'y': factor}, boundary='trim').mean().compute() # - # let's now plot the different forests and see their distributions in the continental US plt.figure(figsize=(15,4)) ax = plt.subplot(1, 3, 1) deciduous.plot(cmap='YlGn') plt.title('Deciduous forest fraction') plt.subplot(1, 3, 2) evergreen.plot(cmap='YlGn') plt.title('Evergreen forest fraction') plt.subplot(1, 3, 3) mixed.plot(cmap='YlGn') plt.title('Mixed forest fraction') plt.show() plt.close()
nlcd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # language: python # name: python36964biteb906d34f39c46cdba4549d9faab3c2f # --- # # Modified CKY # + import numpy as np def CKY(s): # grammar's rules rules = [ ['S0','AS'], ['S0','BS'], ['S0','BA'], ['S0','AB'], ['S0','AA'], ['S0','BB'], ['S','AS'], ['S','BS'], ['S','BA'], ['S','AB'], ['S','AA'], ['S','BB'] ] non_term = { 'S0':'a', 'S0':'b', 'A':'a', 'B':'b' } # indexes for dealing with non terminals matrix layers index = { 'S0':0, 'A':1, 'B':2, 'S':3 } # algo from pseudo-code adapted to start from 0 instead of 1 N = len(s) matrix = np.zeros((len(index),N,N)) for i in range(N): for key, value in non_term.items(): if value == s[i]: matrix[index[key],N-1,i] = 1 for span in range(1, N+1): k = N-1-span for key, value in rules: left = matrix[index[value[0]],N-1, k] right = matrix[index[value[1]], k+1, k+1] matrix[index[key], k, k] += right*left return matrix[0,0,0]>0 # print('SHOULD BE TRUE') s = "babbabbbb" print(CKY(s)) print('SHOULD BE FALSE') s = "babbabCbbb" print(CKY(s))
A2/Q3/Q3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regression Discontinuity Design # # # We don't stop to think about it much, but it is impressive how smooth nature is. You can't grow a tree without first getting a bud, you can't teleport from one place to another, a wound takes its time to heal. Even in the social realm, smoothness seems to be the norm. You can't grow a business in one day, consistency and hard work are required to build wealth and it takes years before you learn how linear regression works. Under normal circumstances, nature is very cohesive and doesn't jump around much. # # # > When the intelligent and animal souls are held together in one embrace, they can be kept from separating. # # \- <NAME>, <NAME>. # # Which means that **when we do see jumps and spikes, they are probably artificial** and often man made situations. These events are usually accompanied by counterfactuals to the normal way of things: if a weird thing happens, this gives us some insight into what would have happened if nature was to work in a different way. Exploring these artificial jumps is at the core of Regression Discontinuity Design. # # ![img](./data/img/rdd/smooth.png) # # The basic setup goes like this. Imagine that you have a treatment variable \\(T\\) and potential outcomes \\(Y_0\\) and \\(Y_1\\). The treatment T is a discontinuous function of an observed running variable \\(R\\) such that # # $ # D_i = \mathcal{1}\{R_i>c\} # $ # # In other words, this is saying that treatment is zero when \\(R\\) is below a threshold \\(c\\) and one otherwise. This means that we get to observe \\(Y_1\\) when \\(R>c\\) and \\(Y_0\\) when \\(R<c\\). To wrap our head around this, think about the potential outcomes as 2 functions that we can't observe entirely. Both \\(Y_0(R)\\) and \\(Y_1(R)\\) are there, we just can't see that. The threshold acts as a switch that allows us to see one or the other of those function, but never both, much like in the image below: # # ![img](./data/img/rdd/rdd.png) # # The idea of regression discontinuity is to compare the outcome just above and just below the threshold to identify the treatment effect at the threshold. This is called a **sharp RD** design, since the probability of getting the treatment jumps from 0 to 1 at the threshold, but we could also think about a **fuzzy RD** design, where the probability also jumps, but is a less dramatic manner. # # ## Is Alcohol Killing You? # # A very relevant public policy question is what should be the minimal drinking age. Most countries, Brazil included, set it to be 18 year, but in the US (most states) it is currently 21. So, is it the case that the US is being overly prudent and that they should lower their minimal drinking age? Or is it the case that other countries should make their legal drinking age higher? # # One way to look at this question is from a [mortality rate perspective (Carpenter and Dobkin, 2009)](https://www.aeaweb.org/articles?id=10.1257/app.1.1.164). From the public policy standpoint, one could argue that we should lower the mortality rate as much as possible. If alcohol consumption increases the mortality rate by a lot, we should avoid lowering the minimum drinking age. This would be consistent with the objective of lowering deaths caused by alcohol consumption. # # To estimate the impacts of alcohol on death, we could use the fact that legal drinking age imposes a discontinuity on nature. In the US, those just under 21 years don't drink (or drink much less) while those just older than 21 do drink. This means that the probability of drinking jumps at 21 years and that is something we can explore with an RDD. # + import warnings warnings.filterwarnings('ignore') import pandas as pd import numpy as np from matplotlib import style from matplotlib import pyplot as plt import seaborn as sns import statsmodels.formula.api as smf # %matplotlib inline style.use("fivethirtyeight") # - # To do so we can grab some mortality data aggregated by age. Each row is the average age of a group of people and the average mortality by all causes (`all`), by moving veiche accident (`mva`) and by suicide (`suicide`). drinking = pd.read_csv("./data/drinking.csv") drinking.head()[["agecell", "all", "mva", "suicide"]] # Just to aid visibility (and for another important reason we will see later) we will centralize the running variable `agecell` at the threshold 21. drinking["agecell"] -= 21 # If we plot the multiple outcome variables (`all`, `mva`, `suicide`) with the runing variable on the x axis, we get some visual cue about some sort of jump in mortality as we cross the legal drinking age. # + plt.figure(figsize=(8,8)) ax = plt.subplot(3,1,1) drinking.plot.scatter(x="agecell", y="all", ax=ax) plt.title("Death Cause by Age (Centered at 0)") ax = plt.subplot(3,1,2, sharex=ax) drinking.plot.scatter(x="agecell", y="mva", ax=ax) ax = plt.subplot(3,1,3, sharex=ax) drinking.plot.scatter(x="agecell", y="suicide", ax=ax); # - # There are some cues, but we need more than that. What exactly is the effect of drinking on mortality at the threshold? And what is the standard error on that estimate? # # ## RDD Estimation # # The key assumption that RDD relies on is the smoothness of the potential outcome at the threshold. Formally, the limits of the potential outcomes as the running variable approaches the threshold from the right and from the left should be the same. # # $$ # \lim_{r \to c^-} E[Y_{ti}|R_i=r] = \lim_{r \to c^+} E[Y_{ti}|R_i=r] # $$ # # If this holds true, we can find the causal effect at the threshold # # \begin{align} # \lim_{r \to c^+} E[Y_{ti}|R_i=r] - \lim_{r \to c^-} E[Y_{ti}|R_i=r]=&\lim_{r \to c^+} E[Y_{1i}|R_i=r] - \lim_{r \to c^-} E[Y_{0i}|R_i=r] \\ # =& E[Y_{1i}|R_i=r] - E[Y_{0i}|R_i=r] \\ # =& E[Y_{1i} - Y_{0i}|R_i=r] # \end{align} # # This is, in its own way, a sort of Local Average Treatment Effect (LATE), since we can only know it at the threshold. In this setting, we can think of RDD as a local randomized trial. For those at the threshold, the treatment could have gone either way and, by chance, some people fell below the threshold, and some people fell above. In our example, at the same point in time, some people are just above 21 years and some people are just below 21. What determines this is if someone was born some days later or not, which is pretty random. For this reason, RDD provides a very compelling causal story. It is not the golden standard of RCT, but it is close. # # Now, to estimate the treatment effect at the threshold, all we need to do is estimate both of the limits in the formula above and compare them. The simplest way to do that is by running a linear regression # # ![img](./data/img/rdd/ols.png) # # To make it work, we interact a dummy for being above the threshold with the running variable # # $ # y_i = \beta_0 + \beta_1 r_i + \beta_2 \mathcal{1}\{r_i>c\} + \beta_3 \mathcal{1}\{r_i>c\} r_i # $ # # Essentially, this is the same as fitting a linear regression above the threshold and another below it. The parameter \\(\beta_0\\) is the intercept of the regression below the threshold and \\(\beta_0+\beta_2\\) is the intercept for the regression above the threshold. # # Here is where the trick of centering the running variable at the threshold comes into play. After this pre-processing step, the threshold becomes zero. This causes the intercept \\(\beta_0\\) to be the predicted value at the threshold, for the regression below it. In other words, \\(\beta_0=\lim_{r \to c^-} E[Y_{ti}|R_i=r]\\). By the same reasoning, \\(\beta_0+\beta_2\\) is the limit of the outcome from above. Wich means, that # # $ # \lim_{r \to c^+} E[Y_{ti}|R_i=r] - \lim_{r \to c^-} E[Y_{ti}|R_i=r]=\beta_2=E[ATE|R=c] # $ # # Here is what this looks like in code for the case where we want to estimate the effect of alcohol consumption on death by all causes at 21 years. # + rdd_df = drinking.assign(threshold=(drinking["agecell"] > 0).astype(int)) model = smf.wls("all~agecell*threshold", rdd_df).fit() model.summary().tables[1] # - # This model is telling us that mortality increases by 7.6627 points with the consumption of alcohol. Another way of putting this is that alcohol increases the chance of death by all causes by 8% ((7.6627+93.6184)/93.6184). Notice that this also gives us standard errors for our causal effect estimate. In this case, the effect is statistically significant, since the p-value is below 0.01. # # If we want to verify this model visually, we can show the predicted values on the data that we have. You can see that it is as though we had 2 regression models: one for those above the threshold and one for below it. ax = drinking.plot.scatter(x="agecell", y="all", color="C0") drinking.assign(predictions=model.fittedvalues).plot(x="agecell", y="predictions", ax=ax, color="C1") plt.title("Regression Discontinuity"); # If we do the same for the other causes, this is what we get. # + plt.figure(figsize=(8,8)) for p, cause in enumerate(["all", "mva", "suicide"], 1): ax = plt.subplot(3,1,p) drinking.plot.scatter(x="agecell", y=cause, ax=ax) m = smf.wls(f"{cause}~agecell*threshold", rdd_df).fit() ate_pct = 100*((m.params["threshold"] + m.params["Intercept"])/m.params["Intercept"] - 1) drinking.assign(predictions=m.fittedvalues).plot(x="agecell", y="predictions", ax=ax, color="C1") plt.title(f"Impact of Alcohol on Death: {np.round(ate_pct, 2)}%") plt.tight_layout() # - # RDD is telling us that alcohol increases the chance of deth by suicide and car accidents by 15%, wich is a pretty significant ammount. These results are compelling arguments to not lower the drinking age, if we want to minimize mortality rates. # # ### Kernel Weighting # # Regression Discontinuity relies heavily on the extrapolations properties of linear regression. Since we are looking at the values at the beginning and end of 2 regression lines, we better get those limits right. What can happen is that regression might focus too much on fitting the other data points at the cost of a poor fit at the threshold. If this happens, we might get the wrong measure of the treatment effect. # # One way to solve this is to give higher weights for the points that are closer to the threshold. There are many ways to do this, but a popular one is to reweight the samples with the **triangular kernel** # # $ # K(R, c, h) = \mathcal{1}\{|R-c| \leq h\} * \bigg(1-\frac{|R-c|}{h}\bigg) # $ # # The first part of this kernel is an indicator function to whether we are close to the threshold. How close? This is determined by a bandwidth parameter \\(h\\). The second part of this kernel is a weighting function. As we move away from the threshold, the weights get smaller and smaller. These weights are divided by the bandwidth. If the bandwidth is large, the weights get smaller at a slower rate. If the bandwidth is small, the weights quickly go to zero. # # To make it easier to understand, here is what the weights look like for this kernel applied to our problem. I've set the bandwidth to be 1 here, meaning we will only consider data from people that are no older than 22 years and no younger than 20 years. def kernel(R, c, h): indicator = (np.abs(R-c) <= h).astype(float) return indicator * (1 - np.abs(R-c)/h) plt.plot(drinking["agecell"], kernel(drinking["agecell"], c=0, h=1)) plt.xlabel("agecell") plt.ylabel("Weight") plt.title("Kernel Weight by Age"); # If we apply these weights to our original problem, the impact of alcohol gets bigger, at least for all causes. It jumps from 7.6627 to 9.7004. The result remains very significant. Also, notice that I'm using `wls` instead of `ols` # + model = smf.wls("all~agecell*threshold", rdd_df, weights=kernel(drinking["agecell"], c=0, h=1)).fit() model.summary().tables[1] # - ax = drinking.plot.scatter(x="agecell", y="all", color="C0") drinking.assign(predictions=model.fittedvalues).plot(x="agecell", y="predictions", ax=ax, color="C1") plt.title("Regression Discontinuity (Local Regression)"); # And here is what it looks like for the other causes of death. Notice how the regression on the right is more negatively sloped since it disconsiders the right most points. # + plt.figure(figsize=(8,8)) weights = kernel(drinking["agecell"], c=0, h=1) for p, cause in enumerate(["all", "mva", "suicide"], 1): ax = plt.subplot(3,1,p) drinking.plot.scatter(x="agecell", y=cause, ax=ax) m = smf.wls(f"{cause}~agecell*threshold", rdd_df, weights=weights).fit() ate_pct = 100*((m.params["threshold"] + m.params["Intercept"])/m.params["Intercept"] - 1) drinking.assign(predictions=m.fittedvalues).plot(x="agecell", y="predictions", ax=ax, color="C1") plt.title(f"Impact of Alcohol on Death: {np.round(ate_pct, 2)}%") plt.tight_layout() # - # With the exception of suicide, it looks like adding the kernel weight made the negative impact on alcohol bigger. Once again, if we want to minimize the death rate, we should NOT recommend lowering the legal drinking age, since there is a clear impact of alcohol on the death rates. # # This simple case covers what happens when regression discontinuity design works perfectly. Next, we will see some diagnostics that we should run in order to check how much we can trust RDD and talk about a topic that is very dear to our heart: the effect of education on earnings. # # ## Sheepskin Effect and Fuzzy RDD # # When it comes to the effect of education on earnings, there are two major views in economics. The first one is the widely known argument that education increases human capital, increasing productivity and thus, earnings. In this view, education actually changes you for the better. Another view is that education is simply a signaling mechanism. It just puts you through all these hard tests and academic tasks. If you can make it, it signals to the market that you are a good employee. In this way, education doesn't make you more productive. It only tells the market how productive you have always been. What matters here is the diploma. If you have it, you will be paid more. We refer to this as the **sheepskin effect**, since diplomas were printed in sheepskin in the past. # # To test this hypothesis, [<NAME>](https://faculty.smu.edu/millimet/classes/eco7321/papers/clark%20martorell%202014.pdf) used regression discontinuity to measure the effect of graduating 12th grade on earnings. In order to do that, they had to think about some running variable where students that fall above it graduate and those who fall below it, don't. They found such data in the Texas education system. # # In order to graduate in Texas, one has to pass an exam. Testing starts at 10th grade and students can do it multiple times, but eventually, they face a last chance exam at the end of 12th grade. The idea was to get data from students who took those last chance exams and compare those that had barely failed it to those that barely passed it. These students will have very similar human capital, but different signaling credentials. Namely, those that barely passed it, will receive a diploma. sheepskin = pd.read_csv("./data/sheepskin.csv")[["avgearnings", "minscore", "receivehsd", "n"]] sheepskin.head() # One again, this data is grouped by the running variable. It contains not only the running variable (`minscore`, already centered at zero) and the outcome (`avgearnings`), but it also has the probability of receiving a diploma in that score cell and the size of the call (`n`). So, for example, out of the 12 students in the cell -30 below the score threshold, only 5 were able to get the diploma (12 * 0,416). # # This means that there is some slippage in the treatment assignment. Some students that are below the passing threshold managed to get the diploma anyway. Here, the regression discontinuity is **fuzzy**, rather than sharp. Notice how the probability of getting the diploma doesn't jump from zero to one at the threshold. But it does jump from something like 50% to 90%. sheepskin.plot.scatter(x="minscore", y="receivehsd", figsize=(10,5)) plt.xlabel("Test Scores Relative to Cut off") plt.ylabel("Fraction Receiving Diplomas") plt.title("Last-chance Exams"); # We can think of fuzzy RD as a sort of non compliance. Passing the threshold should make everyone receive the diploma, but some students, the never takers, don't get it. Likewise, being below the threshold should prevent you from getting a diploma, but some students, the allways takers, manage to get it anyway. # # Just like when we have the potential outcome, we have the potential treatment status in this situation. \\(T_1\\) is the treatment everyone would have received had they been above the threshold. \\(T_0\\) is the treatment everyone would have received had they been below the threshold. As you've might have noticed, we can think of the **threshold as an Instrumental Variable**. Just as in IV, if we naively estimate the treatment effect, it will be biased towards zero. # # ![img](./data/img/rdd/rdd_fuzzy.png) # # The probability of treatment being less than one, even above the threshold, makes the outcome we observe less than the true potential outcome \\(Y_1\\). By the same token, the outcome we observe below the threshold is higher than the true potential outcome \\(Y_0\\). This makes it look like the treatment effect at the threshold is smaller than it actually is and we will have to use IV techniques to correct for that. # # Just like when we've assumed smoothness on the potential outcome, we now assume it for the potential treatment. Also, we need to assume monotonicity, just like in IV. In case you don't remember, it states that \\(T_{i1}>T_{i0} \ \forall i\\). This means that crossing the threshold from the left to the right only increases your chance of getting a diploma (or that there are no defiers). With these 2 assumptions, we have a Wald Estimator for LATE. # # $$ # \dfrac{\lim_{r \to c^+} E[Y_i|R_i=r] - \lim_{r \to c^-} E[Y_i|R_i=r]}{\lim_{r \to c^+} E[T_i|R_i=r] - \lim_{r \to c^-} E[T_i|R_i=r]} = E[Y_{1i} - Y_{0i} | T_{1i} > T_{0i}, R_i=c] # $$ # # Notice how this is a local estimate in two senses. First, it is local because it only gives the treatment effect at the threshold \\(c\\). This is the RD locality. Second, it is local because it only estimates the treatment effect for the compliers. This is the IV locality. # # To estimate this, we will use 2 linear regression. The numerator can be estimated just like we've done before. To get the denominator, we simply replace the outcome with the treatment. But first, let's talk about a sanity check we need to run to make sure we can trust our RDD estimates. # # ### The McCrary Test # # One thing that could break our RDD argument is if people can manipulate where they stand at the threshold. In the sheepskin example this could happen if students just below the threshold found a way around the system to increase their test score by just a bit. Another example is when you need to be below a certain income level to get a government benefit. Some families might lower their income on purpose, just to be just eligible for the program. # # In these sorts of situations, we tend to see a phenomenon called bunching on the density of the running variable. This means that we will have a lot of entities just above or just below the threshold. To check for that, we can plot the density function of the running variable and see if there are any spikes around the threshold. For our case, the density is given by the `n` column in our data. # + plt.figure(figsize=(8,8)) ax = plt.subplot(2,1,1) sheepskin.plot.bar(x="minscore", y="n", ax=ax) plt.title("McCrary Test") plt.ylabel("Smoothness at the Threshold") ax = plt.subplot(2,1,2, sharex=ax) sheepskin.replace({1877:1977, 1874:2277}).plot.bar(x="minscore", y="n", ax=ax) plt.xlabel("Test Scores Relative to Cut off") plt.ylabel("Spike at the Threshold"); # - # The first plot shows how our data density looks like. As we can see, there are no spikes around the threshold, meaning there is no bunching. Students are not manipulating where they fall on the threshold. Just for illustrative purposes, the second plot shows what bunching would look like if students could manipulate where they fall on the threshold. We would see a spike in the density for the cells just above the threshold, since many students would be on that cell, barely passing the exam. # # Getting this out of the way, we can go back to estimate the sheepskin effect. As I've said before, the numerator of the Wald estimator can be estimated just like we did in the Sharp RD. Here, we will use as weight the kernel with a bandwidth of 15. Since we also have the cell size, we will multiply the kernel by the sample size to get a final weight for the cell. # + sheepsking_rdd = sheepskin.assign(threshold=(sheepskin["minscore"]>0).astype(int)) model = smf.wls("avgearnings~minscore*threshold", sheepsking_rdd, weights=kernel(sheepsking_rdd["minscore"], c=0, h=15)*sheepsking_rdd["n"]).fit() model.summary().tables[1] # - # This is telling us that the effect of a diploma is -97.7571, but this is not statistically significant (P-value of 0.5). If we plot these results, we get a very continuous line at the threshold. More educated people indeed make more money, but there isn't a jump at the point where they receive the 12th grade diploma. This is an argument in favor of the view that says that education increases earnings by making people more productive, rather than being just a signal to the marker. In other words, there is no sheepskin effect. ax = sheepskin.plot.scatter(x="minscore", y="avgearnings", color="C0") sheepskin.assign(predictions=model.fittedvalues).plot(x="minscore", y="predictions", ax=ax, color="C1", figsize=(8,5)) plt.xlabel("Test Scores Relative to Cutoff") plt.ylabel("Average Earnings") plt.title("Last-chance Exams"); # However, as we know from the way non compliance bias works, this result is biased towards zero. To correct for that, we need to scale it by the first stage and get the Wald estimator. Unfortunately, there isn't a good Python implementation for this, so we will have to do it manually and use bootstrap to get the standard errors. # # The code below runs the numerator of the Wald estimator just like we did before and also constructs the denominator by replacing the target variable with the treatment variable `receivehsd`. The final step just divides the numerator by the denominator. def wald_rdd(data): weights=kernel(data["minscore"], c=0, h=15)*data["n"] denominator = smf.wls("receivehsd~minscore*threshold", data, weights=weights).fit() numerator = smf.wls("avgearnings~minscore*threshold", data, weights=weights).fit() return numerator.params["threshold"]/denominator.params["threshold"] # + from joblib import Parallel, delayed np.random.seed(45) bootstrap_sample = 1000 ates = Parallel(n_jobs=4)(delayed(wald_rdd)(sheepsking_rdd.sample(frac=1, replace=True)) for _ in range(bootstrap_sample)) ates = np.array(ates) # - # With the bootstrap samples, we can plot the distribution of ATEs and see where the 95% confidence interval is. sns.distplot(ates, kde=False) plt.vlines(np.percentile(ates, 2.5), 0, 100, linestyles="dotted") plt.vlines(np.percentile(ates, 97.5), 0, 100, linestyles="dotted", label="95% CI") plt.title("ATE Bootstrap Distribution") plt.xlim([-10000, 10000]) plt.legend(); # As you can see, even when we scale the effect by the first stage, it is still not statistically different from zero. This means that education doesn't increase earnings by a simple sheepskin effect, but rather by increasing one's productivity. # # ## Key Ideas # # We learned how to take advantage of artificial discontinuities to estimate causal effects. The idea is that we will have some artificial threshold that makes the probability of treatment jump. One example that we saw was how age makes the probability of drinking jump at 21 years. We could use that to estimate the impact of drinking on mortality rate. We use the fact that very close to the threshold, we have something close to a randomized trial. Entities very close to the threshold could have gone either way and what determines where they've landed is essentially random. With this, we can compare those just above and just below to get the treatment effect. We saw how we could do that with weighted linear regression using a kernel and how this even gave us, for free, standard errors for our ATE. # # Then, we look at what would happen in the fuzzy RD design, where we have non compliance. We saw how we could approach the situation much like we did with IV. # # # ## References # # I like to think of this entire book as a tribute to <NAME>, <NAME> and <NAME> for their amazing Econometrics class. Most of the ideas here are taken from their classes at the American Economic Association. Watching them is what is keeping me sane during this tough year of 2020. # * [Cross-Section Econometrics](https://www.aeaweb.org/conference/cont-ed/2017-webcasts) # * [Mastering Mostly Harmless Econometrics](https://www.aeaweb.org/conference/cont-ed/2020-webcasts) # # I'll also like to reference the amazing books from Angrist. They have shown me that Econometrics, or 'Metrics as they call it, is not only extremely useful but also profoundly fun. # # * [Mostly Harmless Econometrics](https://www.mostlyharmlesseconometrics.com/) # * [Mastering 'Metrics](https://www.masteringmetrics.com/) # # Other important reference is <NAME> and <NAME>' book. It has been my trustworthy companion in the most thorny causal questions I had to answer. # # * [Causal Inference Book](https://www.hsph.harvard.edu/miguel-hernan/causal-inference-book/) # # ![img](./data/img/poetry.png)
causal-inference-for-the-brave-and-true/16-Regression-Discontinuity-Design.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Making Choices # # ## Learning Objectives # # * Explain the similarities and differences between tuples and lists. # * Write conditional statements including `if`, `elif`, and `else` branches. # * Correctly evaluate expressions containing `and` and `or`. # # In our last lesson, we discovered something suspicious was going on # in our inflammation data by drawing some plots. # How can we use Python to automatically recognize the different features we saw, # and take a different action for each? In this lesson, we'll learn how to write code that # runs only when certain conditions are true. # # ## Conditionals # # We can ask Python to take different actions, depending on a condition, with an if statement: # + attributes={"classes": [" {.python"], "id": ""} num = 37 if num > 100: print('greater') else: print('not greater') print('done') # - # The second line of this code uses the keyword `if` to tell Python that we want to make a choice. # If the test that follows the `if` statement is true, # the body of the `if` # (i.e., the lines indented underneath it) are executed. # If the test is false, # the body of the `else` is executed instead. # Only one or the other is ever executed: # # ![Executing a Conditional](fig/python-flowchart-conditional.svg) # # Conditional statements don't have to include an `else`. # If there isn't one, # Python simply does nothing if the test is false: # + attributes={"classes": [" {.python"], "id": ""} num = 53 print('before conditional...') if num > 100: print('53 is greater than 100') print('...after conditional') # - # We can also chain several tests together using `elif`, # which is short for "else if". # The following Python code uses `elif` to print the sign of a number. # + attributes={"classes": [" {.python"], "id": ""} num = -3 if num > 0: print(num, "is positive") elif num == 0: print(num, "is zero") else: print(num, "is negative") # - # One important thing to notice in the code above is that we use a double equals sign `==` to test for equality # rather than a single equals sign # because the latter is used to mean assignment. # # We can also combine tests using `and` and `or`. # `and` is only true if both parts are true: # + attributes={"classes": [" {.python"], "id": ""} if (1 > 0) and (-1 > 0): print('both parts are true') else: print('one part is not true') # - # while `or` is true if at least one part is true: # + attributes={"classes": [" {.python"], "id": ""} if (1 < 0) or (-1 < 0): print('at least one test is true') # - # ## Checking our Data # # Now that we've seen how conditionals work, # we can use them to check for the suspicious features we saw in our inflammation data. # In the first couple of plots, the maximum inflammation per day # seemed to rise like a straight line, one unit per day. # We can check for this inside the `for` loop we wrote with the following conditional: # # ```python # if data.min(axis=0)[0] == 0 and data.max(axis=0)[20] == 20: # print('Suspicious looking maxima!') # ``` # # We also saw a different problem in the third dataset; # the minima per day were all zero (looks like a healthy person snuck into our study). # We can also check for this with an `elif` condition: # # ```python # elif data.min(axis=0).sum() == 0: # print 'Minima add up to zero!' # ``` # # And if neither of these conditions are true, we can use `else` to give the all-clear: # # ```python # else: # print 'Seems OK!' # ``` # Let's test that out: # + attributes={"classes": [" {.python"], "id": ""} import numpy data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',') if data.max(axis=0)[0] == 0 and data.max(axis=0)[20] == 20: print('Suspicious looking maxima!') elif data.min(axis=0).sum() == 0: print('Minima add up to zero!') else: print('Seems OK!') # + attributes={"classes": [" {.python"], "id": ""} data = numpy.loadtxt(fname='data/inflammation-03.csv', delimiter=',') if data.max(axis=0)[0] == 0 and data.max(axis=0)[20] == 20: print('Suspicious looking maxima!') elif data.min(axis=0).sum() == 0: print('Minima add up to zero!') else: print('Seems OK!') # - # In this way, # we have asked Python to do something different depending on the condition of our data. # Here we printed messages in all cases, # but we could also imagine not using the `else` catch-all # so that messages are only printed when something is wrong, # freeing us from having to manually examine every plot for features we've seen before. # ## Challenges # # ### How many paths? # # > Which of the following would be printed if you were to run this code? Why did you pick this answer? # > # > 1. A # > 2. B # > 3. C # > 4. B and C # > # > ~~~ {.python} # > if 4 > 5: # > print 'A' # > elif 4 == 5: # > print 'B' # > elif 4 < 5: # > print 'C' # > ~~~ # # ### What is truth? # # > `True` and `False` are special words in Python called `booleans` which represent true # and false statements. However, they aren't the only values in Python that are true and false. # > In fact, *any* value can be used in an `if` or `elif`. # > After reading and running the code below, # > explain what the rule is for which values are considered true and which are considered false. # > (Note that if the body of a conditional is a single statement, we can write it on the same line as the `if`.) if '': print('empty string is true') if 'word': print('word is true') if []: print('empty list is true') if [1, 2, 3]: print('non-empty list is true') if 0: print('zero is true') if 1: print('one is true') # ### Close enough # # > Write some conditions that print `True` if the variable `a` is within 10% of the variable `b` # > and `False` otherwise. # > Compare your implementation with your partner's: # > do you get the same answer for all possible pairs of numbers? # ### In-place operators # # Python (and most other languages in the C family) provides [in-place operators](reference.html#in-place-operator) # that work like this: x = 1 # original value x += 1 # add one to x, assigning result back to x x *= 3 # multiply x by 3 print(x) # Write some code that sums the positive and negative numbers in a list separately, # using in-place operators. # Do you think the result is more or less readable than writing the same without in-place operators? # ### Tuples and exchanges # # Explain what the overall effect of this code is: # + left = 'L' right = 'R' temp = left left = right right = temp # - # Compare it to: left, right = right, left # Do they always do the same thing? # Which do you find easier to read?
python/05-cond.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 # + pycharm={"name": "#%%\n"} data = pd.read_table('data/dev.tsv') test = pd.read_table('data/eval.tsv') # + pycharm={"name": "#%%\n"} df = data.copy() eval = test.copy() # + pycharm={"name": "#%%\n"} from scipy import stats def happy_sad(x): if x>df['valence'].mean(): return 'happy' else: return 'sad' df['boringness'] = df['loudness'] + df['tempo'] + (df['energy']*100) + (df['danceability']*100) df['valence_happy_sad'] = df['valence'].apply(lambda x: happy_sad(x)) df['loudness_plus_60'] = df['loudness'].apply(lambda x: x+60) df['loudness_pos'] = df['loudness'].apply(lambda x: -1*x) df['loudness_pos'] = np.sqrt(df['loudness_pos']) df['boringness_plus_60'] = df['boringness'].apply(lambda x: x+60) df['duration_ms_box_cox_trans'] = stats.boxcox(df['duration_ms'])[0] df['acousticness_sqrt_trans'] = np.sqrt(df['acousticness']) df['liveness_sqrt_trans'] = np.sqrt(df['liveness']) df['popularity_sqrt_trans'] = np.sqrt(df['popularity']) df['speechiness_sqrt_trans'] = np.sqrt(df['speechiness']) # + pycharm={"name": "#%%\n"} df = df.fillna(value=0) # + pycharm={"name": "#%%\n"} col = [ 'valence', 'year', 'acousticness', # 'artists', 'danceability', 'duration_ms', 'energy', 'explicit', # 'id', 'instrumentalness', 'key', 'liveness', 'loudness', 'popularity', 'speechiness', 'tempo', 'mode', 'loudness_plus_60', 'loudness_pos', 'boringness', # 'valence_happy_sad', 'boringness_plus_60', 'duration_ms_box_cox_trans', 'acousticness_sqrt_trans', 'liveness_sqrt_trans', 'popularity_sqrt_trans', 'speechiness_sqrt_trans' ] X = df[col] y = df['mode'] # + pycharm={"name": "#%%\n"} X.describe().T # + pycharm={"name": "#%%\n"} test = SelectKBest(score_func=chi2, k=4) fit = test.fit(X, y) # Summarize scores np.set_printoptions(precision=3) print(fit.scores_) features = fit.transform(X) # + pycharm={"name": "#%%\n"} print(features) # + pycharm={"name": "#%%\n"} from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression model = LogisticRegression() rfe = RFE(model, 7) fit = rfe.fit(X, y) print("Num Features: %s" % (fit.n_features_)) print("Selected Features: %s" % (fit.support_)) print("Feature Ranking: %s" % (fit.ranking_)) # + pycharm={"name": "#%%\n"} from sklearn.linear_model import Ridge ridge = Ridge(alpha=1.0) ridge.fit(X,y) def pretty_print_coefs(coefs, names = None, sort = False): if names == None: names = ["X%s" % x for x in range(len(coefs))] lst = zip(coefs, names) if sort: lst = sorted(lst, key = lambda x:-np.abs(x[0])) return " + ".join("%s * %s" % (round(coef, 3), name) for coef, name in lst) print ("Ridge model:", pretty_print_coefs(ridge.coef_)) # - # X0 X5 X9 X10 X12 X13 X18 X21 # valence energy liveness loudness speechiness tempo popularity_sqrt_trans # + pycharm={"name": "#%%\n"} X.columns
FeatureSelection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Segmented Regression # + import gc import os from typing import Dict, List, Tuple import dask.bag as db import numba import numpy as np import pandas as pd import seaborn as sns from dask.diagnostics import ProgressBar from matplotlib import pyplot as plt from scipy import stats from tqdm.notebook import tqdm # - # ## Read Trade Data # Trades are sorted by trade_id already(except BitMEX, which is sorted by timestamp) PER_TRADE_DATA_DIR = '/data/csv' # + BTC_PAIRS = [ ('Binance', 'Spot', 'BTC_USDT'), ('Binance', 'Swap', 'BTC_USDT'), ('BitMEX', 'Swap', 'BTC_USD'), ('Huobi', 'Spot', 'BTC_USDT'), ('Huobi', 'Swap', 'BTC_USD'), ('OKEx', 'Spot', 'BTC_USDT'), ('OKEx', 'Swap', 'BTC_USDT'), ('OKEx', 'Swap', 'BTC_USD'), ] ETH_PAIRS = [ ('Binance', 'Spot', 'ETH_USDT'), ('Binance', 'Swap', 'ETH_USDT'), ('BitMEX', 'Swap', 'ETH_USD'), ('Huobi', 'Spot', 'ETH_USDT'), ('Huobi', 'Swap', 'ETH_USD'), ('OKEx', 'Spot', 'ETH_USDT'), ('OKEx', 'Swap', 'ETH_USDT'), ('OKEx', 'Swap', 'ETH_USD'), ] # - def get_csv_file(exchange: str, market_type: str, pair: str)->str: assert market_type == 'Spot' or market_type == 'Swap' return os.path.join(PER_TRADE_DATA_DIR, f'{exchange}.{market_type}.{pair}.csv') get_csv_file(*BTC_PAIRS[0]) get_csv_file(*ETH_PAIRS[-1]) def read_csv(trade_csv_file: str)->pd.DataFrame: df = pd.read_csv(trade_csv_file, engine='c', dtype={'exchange': 'category', 'marketType': 'category', 'pair': 'category', 'timestamp': 'int64', 'price': 'float64', 'quantity': 'float64', 'side': 'bool', 'trade_id': 'string'}, usecols=['timestamp', 'price', 'quantity']) return df okex_swap_eth_usd = read_csv(get_csv_file(*ETH_PAIRS[-1])) okex_swap_eth_usd.head() # ## Ordinary Least Square # see https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html <EMAIL>() def ols(X: np.ndarray, y: np.ndarray)->np.ndarray: if np.unique(X).size < 2: return y y_first = y[0] X = X - X[0] y = y - y[0] A = np.vstack((X, np.ones(len(X)))).T m, b = np.linalg.lstsq(A, y, rcond=None)[0] y_hat = m * X + b + y_first return y_hat ols(np.array([0, 1, 2, 3]), np.array([-1, 0.2, 0.9, 2.1])) # see https://devarea.com/linear-regression-with-numpy/ @numba.njit(fastmath=True, parallel=True) def ols_1d(X: np.ndarray, y: np.ndarray)->np.ndarray: if np.unique(X).size < 2: return y y_first = y[0] X = X - X[0] y = y - y[0] m = (len(X) * np.sum(X*y) - np.sum(X) * np.sum(y)) / (len(X)*np.sum(X*X) - np.sum(X) * np.sum(X)) b = (np.sum(y) - m *np.sum(X)) / len(X) y_hat = m * X + b + y_first return y_hat ols_1d(np.array([0, 1, 2, 3]), np.array([-1, 0.2, 0.9, 2.1])) # + # from https://machinelearningmastery.com/probabilistic-model-selection-measures/ # calculate aic for regression def calculate_aic(n, mse, num_params): aic = n * np.log(mse) + 2 * num_params return aic # calculate bic for regression def calculate_bic(n, mse, num_params): bic = n * np.log(mse) + num_params * np.log(n) return bic # - def calc_stats(Y: np.ndarray, Y_hat: np.ndarray)->Dict: assert Y.shape == Y_hat.shape n = Y.shape[0] squared_error = np.sum(np.power(Y - Y_hat, 2)) variance = np.sum(np.power(Y- np.mean(Y), 2)) r_square = (variance-squared_error)/variance mse = squared_error / n mae = np.sum(np.abs(Y - Y_hat)) / n return { 'R2': r_square, 'MSE': mse, 'MAE': mae, 'AIC': calculate_aic(n, mse, 1), 'BIC': calculate_bic(n, mse, 1), } # ## Segmented Linear Regression def segmented_linear_regression(csv_file: str, bar_type: str, bar_size)->Dict: df = read_csv(csv_file) if bar_type == 'TimeBar': df['bar_index'] = df['timestamp'] // bar_size elif bar_type == 'TickBar': df['bar_index'] = (df.index // bar_size).to_series().reset_index(drop=True) elif bar_type == 'VolumeBar': df['bar_index'] = df['quantity'].astype('float64').cumsum().floordiv(bar_size).astype('uint32') elif bar_type == 'DollarBar': df['bar_index'] = (df['quantity'] * df['price']).astype('float64').cumsum().floordiv(bar_size).astype('uint32') df = df[['bar_index','timestamp', 'price']] # remove quantity column grouped = df.groupby('bar_index').agg(list) grouped['timestamp'] = grouped['timestamp'].apply(np.array) grouped['price'] = grouped['price'].apply(np.array) predicted = grouped.apply(lambda row: ols_1d(row['timestamp'], row['price']), axis=1) Y_hat = np.concatenate(predicted.values) stats = calc_stats(df['price'].values, Y_hat) del Y_hat del predicted del grouped del df gc.collect() exchange, market_type, pair, _ = os.path.basename(csv_file).split('.') result = { 'exchange': exchange, 'market_type': market_type, 'pair': pair, 'bar_type': bar_type, 'bar_size': bar_size, } result.update(stats) return result segmented_linear_regression(get_csv_file(*ETH_PAIRS[-1]), 'TimeBar', 60000) # ## Compare different bars # + TIME_BAR_SIZES = { 'BTC': [4000, 8000, 10000], 'ETH': [4000, 8000, 10000], } TICK_BAR_SIZES = { 'BTC': [16, 32, 64, 128], 'ETH': [8, 16, 32, 64], } VOLUME_BAR_SIZES = { 'BTC': [1, 2, 4, 8, 16, 32], 'ETH': [16, 32, 64, 128, 256, 512], } DOLLAR_BAR_SIZES = { 'BTC': [10000, 20000, 40000, 80000, 160000, 320000], 'ETH': [4000, 8000, 16000, 32000, 64000, 128000], } # - def gen_tasks(exchange_market_pairs: List[Tuple[str, str, str]], bar_type: str, bar_sizes: List[int])->None: csv_files = [get_csv_file(*exchange_market_pair) for exchange_market_pair in exchange_market_pairs] tasks = [(csv_file, bar_type, bar_size) for csv_file in csv_files for bar_size in bar_sizes] return tasks def batch(base: str)->pd.DataFrame: exchange_market_pairs = BTC_PAIRS if base == 'BTC' else ETH_PAIRS tasks = gen_tasks(exchange_market_pairs, 'TimeBar', TIME_BAR_SIZES[base]) #tasks.extend(gen_tasks(exchange_market_pairs, 'TickBar', TICK_BAR_SIZES[base])) #tasks.extend(gen_tasks(exchange_market_pairs, 'VolumeBar', VOLUME_BAR_SIZES[base])) #tasks.extend(gen_tasks(exchange_market_pairs, 'DollarBar', DOLLAR_BAR_SIZES[base])) #lst = [] with ProgressBar(): lst = db.from_sequence(tasks).map(lambda t: segmented_linear_regression(t[0], t[1], t[2])).compute() return pd.DataFrame(lst) #for t in tqdm(tasks): #lst.append(segmented_linear_regression(t[0], t[1], t[2])) df_btc = batch('BTC') df_btc df_eth = batch('ETH') df_eth # ## References # # * [Probabilistic Model Selection with AIC, BIC, and MDL](https://machinelearningmastery.com/probabilistic-model-selection-measures/) # * [AIC/BIC for a segmented regression model?](https://stats.stackexchange.com/questions/337852/aic-bic-for-a-segmented-regression-model) # * [Linear Regression With Numpy - Developers Area](https://devarea.com/linear-regression-with-numpy/) # * [What's the relationship between mean squared error and likelihood? - Quora](https://www.quora.com/Whats-the-relationship-between-mean-squared-error-and-likelihood) # * [numpy.linalg.lstsq - NumPy](https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html) # * [scipy.stats.linregress - SciPy](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html) # * [Ordinary Least Squares - statsmodels](https://www.statsmodels.org/dev/examples/notebooks/generated/ols.html)
samples/crypto-notebooks-master/analysis/segmented-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # United States - Crime Rates - 1960 - 2014 # ### Introduction: # # This time you will create a data # # Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. # # ### Step 1. Import the necessary libraries # + jupyter={"outputs_hidden": false} import pandas as pd # - # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv). df=pd.read_csv('https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv') df.head() # ### Step 3. Assign it to a variable called crime. # ### Step 4. What is the type of the columns? # + jupyter={"outputs_hidden": false} df.info() # - # ##### Have you noticed that the type of Year is int64. But pandas has a different type to work with Time Series. Let's see it now. # # ### Step 5. Convert the type of the column Year to datetime64 # ### [ ] pd.to_datetime(df.column, format, qué tipos?????? # + jupyter={"outputs_hidden": false} df.Year = pd.to_datetime(df.Year, format='%Y') df.head() # - # ### Step 6. Set the Year column as the index of the dataframe # + jupyter={"outputs_hidden": false} df=df.set_index('Year', drop= True) df.head() # - # ### Step 7. Delete the Total column # + jupyter={"outputs_hidden": false} df=df.drop(columns= 'Total') df.head(10) # también podriamos usar "del df['Total'] # + [markdown] tags=[] # ### Step 8. Group the year by decades and sum the values # # #### Pay attention to the Population column number, summing this column is a mistake # - # ### [ ] cómo agrupar por trimestres, semestres...???? dfsel=df.resample('10AS').sum() dfsel # ### [ ] explicar resample de la columna Population. dfsel['Population']= df['Population'].resample('10AS').max() dfsel.head() # ### Step 9. What is the most dangerous decade to live in the US? # ### [ ] ¿no habría que eliminar la columna de Population para no distorsionar los resultados? # + jupyter={"outputs_hidden": false} dfsel.idxmax() # -
04_Apply/[ ] US_Crime_Rates/[ ] US Crime Rates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/AltamarMx/perceptron_cneer2021/blob/main/notebooks/Dia_02_en_Taller.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="NCICDLd6OVsO" import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib.tri as mtri # + [markdown] id="xFEr9oJyOzMv" # # Al finalizar el taller uds sabrán: # * Gradient descent # * Learning rate # * Activation function # * Epochs # * Cost/Loss function # * TensorFlow # + id="rhyIM7UBOtUd" def problema(x,w,b): return np.dot(x,w)+b def sigmoide(x): return 1/ ( 1 + np.exp(-x)) def costo(y,Y): return (y - Y)**2 def dCdW(y,Y): return 2*(y-Y)*(y)*(1-y) def punto(x,w,b,Y): y = sigmoide(problema(x,w,b)) C = costo(y,Y) return C # + id="chb2jH96Bqu_" epochs = 20 x = [5,2] w = [0,-1] LR = 1 Y = 0.71 b = 0 P_h = [] for epoch in range(epochs): y = sigmoide(problema(x,w,b)) C = costo(y,Y) P_h.append([w[0],w[1],C]) correcion = dCdW(y,Y)*LR w = w - correcion P_h = np.array(P_h) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="ZwsxITywCb83" outputId="b301bab5-5d97-476e-9ca3-a90080e385b7" plt.plot(P_h[:,2]) # + colab={"base_uri": "https://localhost:8080/", "height": 466} id="GowPZbLNCrhF" outputId="e4260041-7319-4168-fa5a-b7a58bb126b1" L = 50 w1 = np.linspace(-2,2,L) w2 = np.linspace(-2,0,L) x1 = 5 x2 = 2 S = [] for W1 in w1: for W2 in w2: P = punto([x1,x2],[W1,W2],b=0,Y=0.71) S.append([W1,W2,P]) S = np.array(S) fig = plt.figure(figsize=(16,8)) ax = fig.add_subplot(1, 2, 1, projection='3d') ax.set_xlabel('w1') ax.set_ylabel('w2') ax.set_zlabel('C') tri = mtri.Triangulation(S[:,0], S[:,1]) ax.plot_trisurf(S[:,0],S[:,1],S[:,2],triangles=tri.triangles,alpha=0.7) puntos = ax.scatter(P_h[:,0],P_h[:,1],P_h[:,2],c=P_h[:,2],cmap="jet",s=50) fig.colorbar(puntos, shrink=0.5, aspect=5) ax.view_init(60,40) plt.show() # + id="fy7t-FX-DbVv" def resuelve_grafique(x,w,b,Y,epochs,LR): # epochs = 20 # x = [5,2] # w = [0,-1] # LR = 1 # Y = 0.71 # b = 0 P_h = [] for epoch in range(epochs): y = sigmoide(problema(x,w,b)) C = costo(y,Y) P_h.append([w[0],w[1],C]) correcion = dCdW(y,Y)*LR w = w - correcion print("w =",w) print("C =",C) print("y =",y) P_h = np.array(P_h) L = 50 w1 = np.linspace(-2,2,L) w2 = np.linspace(-2,0,L) x1 = x[0] x2 = x[1] S = [] for W1 in w1: for W2 in w2: P = punto([x1,x2],[W1,W2],b=0,Y=0.71) S.append([W1,W2,P]) S = np.array(S) fig = plt.figure(figsize=(16,8)) ax = fig.add_subplot(1, 2, 1, projection='3d') ax.set_xlabel('w1') ax.set_ylabel('w2') ax.set_zlabel('C') tri = mtri.Triangulation(S[:,0], S[:,1]) ax.plot_trisurf(S[:,0],S[:,1],S[:,2],triangles=tri.triangles,alpha=0.7) puntos = ax.scatter(P_h[:,0],P_h[:,1],P_h[:,2],c=P_h[:,2],cmap="jet",s=50) fig.colorbar(puntos, shrink=0.5, aspect=5) ax.view_init(0,100) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 518} id="tALWSHoyDgxW" outputId="c563cbf1-d6e8-432d-a0a6-bc3a9634917b" resuelve_grafique(x=[5,2],w=[-1,-0.5],b=-1,Y=0.71,epochs=200,LR=0.) # + colab={"base_uri": "https://localhost:8080/", "height": 518} id="BjDHKh2CHyz9" outputId="4ee62f57-be1f-414b-afaa-5d3e71456ba4" resuelve_grafique(x=[5,2],w=[-1,0],b=0,Y=0.71,epochs=20,LR=0.1) # + colab={"base_uri": "https://localhost:8080/", "height": 518} id="bKDHxvveLc85" outputId="297c1e09-1ac2-4ba5-d4f8-adb41415d9c8" resuelve_grafique(x=[5,2],w=[-1.1,0],b=0,Y=0.71,epochs=200,LR=1) # + id="kgetLKNOLfUh" import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # + id="A3gT1P-PPZPn" x_train = np.array([[5,2]]) y_train = np.array([[0.71]]) # + id="RTbfMW2iPt6-" model = keras.Sequential() model.add(keras.Input(shape=2)) model.add(layers.Dense(1,activation="sigmoid")) model.compile(loss="MSE",optimizer=keras.optimizers.SGD(lr=.2)) modelo = model.fit(x_train,y_train,epochs=40,verbose=0) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="Oi03tI1DP4DP" outputId="520da570-3acd-4898-d430-942df2f95096" plt.plot(modelo.history["loss"]) # + colab={"base_uri": "https://localhost:8080/"} id="mM0i6gTiRFC8" outputId="38e00816-d5e3-47d0-f597-88fbeed7e3a4" X = np.array([[5,2]]) model.predict(X) model.weights # + colab={"base_uri": "https://localhost:8080/", "height": 518} id="KZTjsEPQRQZK" outputId="fbc9a8df-f154-4c8c-8c1c-1f98fc346d7e" resuelve_grafique(x=[5,2],w=[-1.1,0],b=0,Y=0.71,epochs=1000000,LR=.2) # + id="wjXS-bfpRZx1"
notebooks/Dia_02_en_Taller.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import h2o h2o.init() url="http://h2o-public-test-data.S3.amazonaws.com/smalldata/iris/iris_wheader.csv" iris=h2o.import_file(url) train,test=iris.split_frame([0.8]) train.summary() train.nrows test.nrows from h2o.estimators.random_forest import H2ORandomForestEstimator mRF=H2ORandomForestEstimator() mRF.train(["sepal_len","sepal_wid","petal_len","petal_wid"],"class",train) mRF p=mRF.predict(test) p mRF.model_performance(test) help(h2o.estimators.random_forest.H2ORandomForestEstimator) h2o.cluster().shutdown()
3 Random Forest in H2O.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=[] # # Preprocessing of individual surveys # ## Introduction # # Included here: # * [Eurobarometer 34.0 (Oct-Nov 1990)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-340-za-1960-oct-nov-1990) # * [Eurobarometer 41.0 (Mar-May 1994)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-410-za-2490-mar-may-1994) # * [Eurobarometer 44.0 (Oct-Nov 1995)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-440-za-2689-oct-nov-1995) # * [Eurobarometer 50.0 (Oct-Nov 1998)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-500-za-3085-oct-nov-1998) # * [Eurobarometer 52.0 (Oct-Nov 1999)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-520-za-3204-oct-nov-1999) # * [Eurobarometer 54LAN (Dec 2000)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-54lan-za-3389-dec-2000) # * [Eurobarometer 55.1 (April-May 2001)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-551-za-3507-apr-may-2001) # * [Eurobarometer 63.4 (May-Jun 2005)](https://search.gesis.org/research_data/ZA4411) # * [Eurobarometer 64.3 (Nov- Dec 2005)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-643-za-4415-nov-dec-2005) # * [Eurobarometer 77.1 (2012)](https://dbk.gesis.org/dbksearch/SDesc2.asp?ll=10&notabs=1&af=&nf=&search=&search2=&db=E&no=5597) # # Not included: # * [Eurobarometer 28 (Oct-Nov 1987)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-28-za-1713-oct-nov-1987) -> mother tongue not polled # * [Eurobarometer 28.1 (Oct-Nov 1987)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-281-za-2041-oct-nov-1987) -> only respondents aged 15-24 # * [Eurobarometer 47.2OVR (Apr-June 1997)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/standard-special-eb/study-overview/eurobarometer-472ovr-za-2938-apr-jun-1997) -> doesn't ask about mother tongue, youth oversample # * [Candidate Countries EB 2003.1YouthOVR (March-May 2003)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/candidate-countries-eb/study-profiles/cc-eb-20031youth) -> youth oversample # # # TODO? # * [Candidate Countries EB 2001.1 (October 2001)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/candidate-countries-eb/study-profiles/cc-eb-20011) # * [Candidate Countries EB 2002.2 (Sep-Oct 2002)](https://www.gesis.org/en/eurobarometer-data-service/survey-series/candidate-countries-eb/study-profiles/cc-eb-20022) # - # ### Selected questions for L1 and L2 # All selected surveys contain the following data: # * Origin country of sample. # * L1 data. Languages provided as answers to the following questions: # 1. What is you mother tongue? (LM) # 2. What language do you speak at home? (LH) (only somewhat reflects mother tongue) # * L2 data. Languages provided as answers to the following questions: # 1. What is your second/third/fourth/etc. language? Which languages have you learned, except your mothertongue? (LL) # 3. Which languages do you speak well enough to hold a conversation, except your mothertongue? (LS) # # Answers could be provided both in single and multiple choice. # # ### Encoding # * Multiple choice, `n` options: `n` consecutive columns (mostly) with key `v{question number i}` corresponding to one language each, e.g. `v123` might indicate the answers for French. Answers are indicated (mostly) with `(not) mentioned`. Order and possible selection of languages vary between surveys and must be provided separately (see below). # * Single: Single column with key `v{question number}` Language as string or missing. `DK` if the question could not be answered. # # # ### Reproducability / Input # * `data/raw_data`: # * Survey microdata (see links) in `.sav` format . Prefix filenames with `EB{ survey number }_`. # * Newline separated language choices `langs_{survey number}.txt`used for multiple choice questions. # ## Setup # + language="html" # <style> # table {float:left} # </style> # + import pandas as pd import numpy as np import os import matplotlib.pyplot as plt # _ = !pip install pyreadstat # + basedir = '../../data/eurobarometer/' in_dir = basedir + "raw_data/" step1_dir = basedir + "step1/" os.makedirs(step1_dir, exist_ok=True) # + [markdown] tags=[] # ## Util # + def open_survey(survey): """ Load SPSS dataframe """ path = f'{in_dir}{survey}.sav' df = pd.read_spss(path) return df def get_langs(filename): """ Load list of languages used for multiple choice questions """ with open(f'{in_dir}{filename}') as fp: ls = [l.title() for l in fp.read().split('\n')] print(" ".join(ls)) return ls def plot_valuecounts(L1, L2, idx): """ Plot barplots - how many respondents gave 1, 2, etc. languages as their first/second languages L1 - Dataframe indicating whether a language was mentioned as L1 or not, using 0 and 1 L2 - Dataframe indicating whether a language was mentioned as L2 or not, using 0 and 1 idx - Rows of L1 to include """ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,3)) l1c = L1.loc[idx].sum(axis=1).value_counts() l2c = L2.sum(axis=1).value_counts() ax1.bar(l1c.index, l1c.values) ax2.bar(l2c.index, l2c.values) def delete_duplicates_from_L2(L1, L2): """ Set languages to 0 for L2 if they were mentioned as L1, as the respondent's mother tongue should not be counted as foreign language. L1 - Dataframe indicating whether a language was mentioned as L1 or not, using 0 and 1 L2 - Dataframe indicating whether a language was mentioned as L2 or not, using 0 and 1 """ l_intersec = set([ c.split('_')[1] for c in L1.columns]).intersection([ c.split('_')[1] for c in L2.columns]) for l in l_intersec: if l == 'Other': continue and_idx = L2['L2_' + l] * L1['L1_' + l] and_idx = and_idx.astype(bool) if and_idx.sum() > 0: print(l, 'mentioned both for L1 and L2:', and_idx.sum()) L2.loc[and_idx, 'L2_' + l] = 0 return L2 def concat_and_save(nat, L1, L2, year, survey, idx=None): """ Concat arguments into one dataframe containing nationality, indicated L1 and L2, and year of the survey and save it in csv format. Only consider the rows/respondents indicated by idx. Plot distribution of number of L1/L2 given. nat - Series indicating nationality of respondents L1 - Dataframe indicating whether a language was mentioned as L1 or not, using 0 and 1 L2 - Dataframe indicating whether a language was mentioned as L2 or not, using 0 and 1 year - Year the survey was held in idx - Rows of L1 to include """ L2 = delete_duplicates_from_L2(L1, L2) res = pd.concat([nat, L1, L2], axis=1) lcols = list(L1.columns) + list(L2.columns) res.loc[:, lcols] = res.loc[:, lcols].clip(0, 1) res = res.drop('L2_nan', axis=1, errors='ignore') # res['Year'] = year if idx is not None: res = res.loc[idx, :].reset_index(drop=True) res.to_csv(f'{step1_dir}{survey}.csv', index=False) plot_valuecounts(L1, L2, idx) return res # - def rename(df, frm, to, ls, new_prefix, old_prefix='v'): """ Clean up question data provided in multiple choice format/wide form, i.e. `n` columns named `{old prefix}{i}` with `i` in `[frm, to]`. Map values to 1 and 0 indicating mentioned or not mentioned respectively. Rename columns to be easily readable and mergeable, e.g. `{new prefix}_{language name}`. The language name is decoded from the column index according to the list of choices of languages (`ls`). [frm, to] - range of column numbers belonging to the question ls - list of languages corresponding to the columns indicated by frm, to new_prefix - prefix for the new column names old_prefix - prefix for the column names """ keys = [f'{old_prefix}{i}' for i in range(frm, to + 1)] if len(keys) != len(ls): print('Warning: Languages might not match columns') d = df[keys].astype(object)\ .fillna(0)\ .apply(lambda x: x.str.lower())\ .replace('mentioned', 1)\ .replace({'not mentioned': 0})\ .fillna(0)\ .astype(int) new_keys = [f'{new_prefix}_{l}' for l in ls] d = d.rename(dict(zip(keys, new_keys)), axis=1) d = d.drop([f'{new_prefix}_Dk', f'{new_prefix}_None'], axis=1, errors='ignore') return d def piv(df, nq, new_prefix="L2", old_prefix="v", ls=None): """ Pivot answers to single choice questions from one column containing the language string to `n` columns with 0/1 encoding (for compatibility with multiple choice questions). nq - column number for the question new_prefix - prefix for the new column names old_prefix - prefix for the column names ls - List of languages which should appear as columns. Will be inferred from column values if not given. """ key = f'{old_prefix}{nq}' df[key] = df[key].str.title() dk = df[key].fillna('None').astype(str).to_frame().replace(['No Second Language', 'No Third Language', 'Dk'], 'None') d = new_prefix + '_' + dk d['cnt'] = 1 d = d.pivot(columns=key).fillna(0).astype(int) d.columns = d.columns.droplevel() if ls: cols = set([c.split('_')[1] for c in d.columns]) diff = set(ls) - set(cols) d[[f'{new_prefix}_{l}' for l in diff]] = 0 d = d.drop([f'{new_prefix}_Dk', f'{new_prefix}_None'], axis=1, errors='ignore') return d # ## Clean up dfs # # * LM - What is you mother tongue? # * LH - What language do you speak at home? # * LL - What is your second/third/fourth/etc. language? Which languages have you learned, except your mothertongue? # * LS - Which languages do you speak well enough to hold a conversation, except your mothertongue? # ### EB 77.1 # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | d48a_1 - d48a_39 | # | L1 | LH | - | # | L2 | LL | d48b, d48c, d48d | # | L2 | LS | - | survey = 'EB771_2012_ZA5597_v3-0-0' year = 2012 df = open_survey(survey) # + ls = get_langs('languages_771.txt') L1 = rename(df, 1, 39, ls, "L1", old_prefix='d48a_') rows = L1[L1.sum(axis=1) > 0].index # Multiple questions for L2 - What is your first, second etc. language? cols = [f"L2_{l}" for l in ls if l != 'None'] L2 = pd.DataFrame(index=L1.index, columns=cols).fillna(0) for nq in ['b', 'c', 'd']: d = piv(df, nq, old_prefix='d48', ls=ls) L2 = L2 + d nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # ### EB 64.3 # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v264 - v303 | # | L1 | LH | - | # | L2 | LL | - | # | L2 | LS | v304, v305, v306 | survey = 'EB643_2005_ZA4415_v1-0-1' year = 2005 df = open_survey(survey) # + ls = get_langs('languages_643.txt') df = df.replace('Irish/Gael', 'Irish / Gaelic') L1 = rename(df, 264, 303, ls, "L1") rows = L1[L1.sum(axis=1) > 0].index cols = [f"L2_{l}" for l in ls if l != 'None'] L2 = pd.DataFrame(index=L1.index, columns=cols).fillna(0) for nq in range(304, 307): d = piv(df, nq, ls=ls) L2 = L2 + d nat = df['v7'].rename('isocntry', axis=1) res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # ### EB 63.4 # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v442 - v476| # | L1 | LH | - | # | L2 | LL | - | # | L2 | LS | v477, v478, v479, v480-v514 | survey = 'EB634_2005_ZA4411_v1-1-0' year = 2005 df = open_survey(survey) # + ls = get_langs('languages_634.txt') L1 = rename(df, 442, 476, ls, "L1") rows = L1[L1.sum(axis=1) > 0].index L2 = rename(df, 480, 514, ls, "L2") for nq in range(477, 480): d = piv(df, nq, ls=ls) L2 = L2 + d nat = df['v7'].rename('isocntry', axis=1) res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # ### EB 55.1 # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v38 | # | L1 | LH | - | # | L2 | LL | - | # | L2 | LS | v39 - v53 | # + survey = 'EB551_2001_ZA3507_v1-0-1' year = 2001 df = open_survey(survey) ls = get_langs('languages_551.txt') L2 = rename(df, 39, 53, ls, 'L2') L1 = piv(df, 38, "L1", ls=ls) rows = L1[L1.sum(axis=1) > 0].index nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # #### EB 54LAN # # LL here: What is your first, second, third, fourth foreign language? # # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v24 - v42 | # | L1 | LH | - | # | L2 | LL | v43 - v62, v63 - v82, v83 - v102, v103 - v122 | # | L2 | LS | - | # + survey = 'EB54LAN_2000_ZA3389_v1-0-1' year = 2000 df = open_survey(survey) ls = get_langs('languages_54LAN.txt') L1 = rename(df, 24, 42, ls, 'L1') rows = L1[L1.sum(axis=1) > 0].index L2 = rename(df, 43, 62, ls, 'L2') for frm, to in [(63, 82),(83, 102), (103, 122)]: L2 = L2 + rename(df, frm, to, ls, 'L2') nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # ### EB 52.0 # # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v38 | # | L1 | LH | - | # | L2 | LL | - | # | L2 | LS | v49 - v63 | # + survey = 'EB520_1999_ZA3204_v1-0-1' year = 1999 df = open_survey(survey) df = df.replace('Other (SPECIFY)', 'Other') ls = get_langs('languages_520.txt') L1 = piv(df, 38, "L1", ls=ls) rows = L1[L1.sum(axis=1) > 0].index L2 = rename(df, 49, 53, ls, 'L2') nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # + [markdown] tags=[] # ### EB 50.0 # # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v38 | # | L1 | LH | - | # | L2 | LL | - | # | L2 | LS | v39 - v55 | # + survey = 'EB500_1998_ZA3085_v1-1-0' year = 1998 df = open_survey(survey) ls = get_langs('languages_500.txt') L2 = rename(df, 39, 55, ls, 'L2') L1 = piv(df, 38, 'L1', ls=ls) rows = L1[L1.sum(axis=1) > 0].index nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # ### EB 44.0 # # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v264 - v280 | # | L1 | LH | - | # | L2 | LL | - | # | L2 | LS | v281 - v297 | # + survey = 'EB440_1995_ZA2689_v1-0-1' year = 1995 df = open_survey(survey) ls = get_langs('languages_440.txt') L1 = rename(df, 264, 280, ls, 'L1') rows = L1[L1.sum(axis=1) > 0].index L2 = rename(df, 281, 297, ls, 'L2') nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # ### EB 41.0 # # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v209 | # | L1 | LH | (v234 - v245) | # | L2 | LL | v210 - v221 | # | L2 | LS | v222 - v233 | survey = 'EB410_1994_ZA2490_v1-1-0' year = 1994 df = open_survey(survey) ls = get_langs('languages_410.txt') # + L1 = piv(df, 209, 'L1', ls=ls) rows = L1[L1.sum(axis=1) > 0].index L2 = rename(df, 210, 221, ls, 'L2') L2 = L2 + rename(df, 222, 233, ls, 'L2') nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # ### EB 34.0 # # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | v181 - Country specific, data not available | # | L1 | LH | v206 - v217 | # | L2 | LL | v182 - v193 | # | L2 | LS | v194 - v205 | # + survey = 'EB340_1990_ZA1960_v1-0-1' year = 1990 df = open_survey(survey) ls = get_langs('languages_340.txt') L1 = rename(df, 206, 217, ls, 'L1') rows = L1[L1.sum(axis=1) > 0].index L2 = rename(df, 182, 193, ls, 'L2') L2 = L2 + rename(df, 194, 205, ls, 'L2') nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head() # - # #### EB 28 # # | Proficiency | Question Type | Columns | # | --- | --- | --- | # | L1 | LM | - | # | L1 | LH | v97 - v106 | # | L2 | LL | v77 - v86 | # | L2 | LS | v87 - v95 | survey = 'EB28_1987_ZA1713_v1-1-0' year = 1987 df = open_survey(survey) ls = get_langs('languages_28.txt') # + L1 = rename(df, 97, 106, ls, 'L1') rows = L1[L1.sum(axis=1) > 0].index L2 = rename(df, 77, 86, ls, 'L2') L2 = L2 + rename(df, 87, 96, ls, 'L2') L2 = rename(df, 87, 96, ls, 'L2') nat = df['isocntry'] res = concat_and_save(nat, L1, L2, year, survey, idx=rows) res.groupby('isocntry').sum().head()
notebooks/Evelyn/eurobarometer_preprocessing_step1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 均线定投 # + import pandas as pd from datetime import datetime import trdb2py import numpy as np isStaticImg = False width = 960 height = 768 pd.options.display.max_columns = None pd.options.display.max_rows = None trdb2cfg = trdb2py.loadConfig('./trdb2.yaml') # + # 具体基金 asset = 'jqdata.000300_XSHG|1d' # baselineasset = 'jrj.510310' # asset = 'jrj.110011' # baselineasset = 'jqdata.000300_XSHG|1d' # 起始时间,0表示从最开始算起 tsStart = 0 tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d')) # 结束时间,-1表示到现在为止 tsEnd = -1 tsEnd = int(trdb2py.str2timestamp('2020-12-31', '%Y-%m-%d')) # 初始资金池 paramsinit = trdb2py.trading2_pb2.InitParams( money=10000, ) # 买入参数,用全部的钱来买入(也就是复利) paramsbuy = trdb2py.trading2_pb2.BuyParams( perHandMoney=1, ) # 买入参数,用全部的钱来买入(也就是复利) paramsbuy2 = trdb2py.trading2_pb2.BuyParams( perHandMoney=0.5, ) # 卖出参数,全部卖出 paramssell = trdb2py.trading2_pb2.SellParams( perVolume=1, ) paramsaip = trdb2py.trading2_pb2.AIPParams( money=10000, type=trdb2py.trading2_pb2.AIPTT_MONTHDAY, day=1, ) # 止盈参数,120%止盈 paramstakeprofit = trdb2py.trading2_pb2.TakeProfitParams( perVolume=1, isOnlyProfit=True, # isFinish=True, ) # 止盈参数,120%止盈 paramstakeprofit1 = trdb2py.trading2_pb2.TakeProfitParams( perVolume=1, # isOnlyProfit=True, # isFinish=True, ) # 卖出参数,全部卖出 paramssell7 = trdb2py.trading2_pb2.SellParams( # perVolume=1, keepTime=7 * 24 * 60 * 60, ) lststart = [1, 2, 3, 4, 5] lsttitle = ['周一', '周二', '周三', '周四', '周五'] # - def calcweekday2val2(wday, offday): if offday == 1: if wday == 5: return 3 if offday == 2: if wday >= 4: return 4 if offday == 3: if wday >= 3: return 5 if offday == 4: if wday >= 2: return 6 return offday # + asset = 'jrj.110011' # asset = 'jqdata.000036_XSHG|1d' # asset = 'jqdata.000032_XSHG|1d' asset = 'jqdata.000300_XSHG|1d' # baseline s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) buy0 = trdb2py.trading2_pb2.CtrlCondition( name='buyandhold', ) paramsbuy = trdb2py.trading2_pb2.BuyParams( perHandMoney=1, ) paramsinit = trdb2py.trading2_pb2.InitParams( money=10000, ) s0.buy.extend([buy0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsInit.CopyFrom(paramsinit) p0 = trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='沪深300', ) pnlBaseline = trdb2py.simTrading(trdb2cfg, p0) trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height) # + lstparams = [] for i in range(2, 181): buy0 = trdb2py.trading2_pb2.CtrlCondition( name='monthdayex', vals=[1], ) buy1 = trdb2py.trading2_pb2.CtrlCondition( name='waittostart', vals=[i], ) buy2 = trdb2py.trading2_pb2.CtrlCondition( name='indicatorsp', operators=['up'], strVals=['ta-sma.{}'.format(i)], ) sell0 = trdb2py.trading2_pb2.CtrlCondition( name='indicatorsp', operators=['downcross'], strVals=['ta-sma.{}'.format(i)], ) s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) # paramsaip = trdb2py.trading2_pb2.AIPParams( # money=10000, # type=trdb2py.trading2_pb2.AIPTT_WEEKDAY, # day=1, # ) s0.buy.extend([buy0, buy1, buy2]) s0.sell.extend([sell0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsSell.CopyFrom(paramssell) # s0.paramsInit.CopyFrom(paramsinit) s0.paramsAIP.CopyFrom(paramsaip) lstparams.append(trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='{}定投'.format(i), )) lstaippnl = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=1.5) trdb2py.showPNLs(lstaippnl + [pnlBaseline], toImg=isStaticImg, width=width, height=height) # - # 我们看到不管是每个月的几号买入,最终其实都差异不大 # + dfpnl1b = trdb2py.buildPNLReport(lstaippnl + [pnlBaseline]) dfpnl1b[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False) # -
home/trdb2/aip003.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 8.6 # language: '' # name: sagemath # --- # ## Theoretic Bounds # # Set up the functions as described in the paper # + def FR(r,l): if l == 0: return 1 return 2*r*(2*r-1)**(l-1) def CR(r,l): if l == 0: return 1 return (2*r-1)**l + 1 + (r-1)*(1+(-1)**l) def CR2(r,l1,l2): value = 0 for i in range(l1,l2+1): value = value + CR(r,i) return value def nc1TightBound(lam,r,l,m): value = 2*l*(l - 2*ceil(lam*l) - 2) * FR(r,ceil(lam*l)) * (2*r-1)**(l - 2*ceil(lam*l)) for k in range(1, ceil(lam*l)+1): value = value + l * CR(r,k) * (2*r-1)**(l - ceil(lam*l) - k) return value def nc2TightBound(lam,r,l1,l2,m): value = 0 for i in range(l1,l2+1): for j in range(l2,l2+1): value = value + 2*i*j \ * FR(r,ceil(lam*min(i,j))) \ * (2*r-1)**(i+j - 2*ceil(lam*min(i,j))) return value def lowerBound(lam,r,l1,l2,m): value = 1 for i in range(l1,l2+1): value = value - m * nc1TightBound(lam,r,i,m) / CR2(r,l1,l2) value = value - m * (m-1) * nc2TightBound(lam,r,l1,l2,m) / (CR2(r,l1,l2)**2) return value def knownZeroProp(lam,r,l,m): return bool( FR(r,ceil(lam*l)) < 2*m*l ) def upperBound_l1(lam,l): return floor(l/ceil(lam*l)) def upperBound_l2(lam,l): return l - upperBound_l1(lam,l)*ceil(lam*l) def upperBound_omega(i,lam,r,l,m): return CR(r,l) - 4*(i-1)*l*(r-1)*(2*r-1)**(l-ceil(lam*l)-1) def upperBound_alpha(i,k,lam,r,l,m): val = FR(r,ceil(lam*l)) - 2*(i-1)*l if k == 1: return val return val - 2*( (k-2)*ceil(lam*l) + upperBound_l2(lam,l) + 1) def upperBound_beta(lam,r,l,m): return FR(r,upperBound_l2(lam,l)) def upperBound(lam,r,l,m): value = 1 for i in range(1,m+1): value2 = upperBound_beta(lam,r,l,m) for k in range(1,upperBound_l1(lam,l)+1): value2 = value2 * min_symbolic((2*r-1)**ceil(lam*l), upperBound_alpha(i,k,lam,r,l,m)) value = value * min_symbolic(upperBound_omega(i,lam,r,l,m), value2) / CR(r,l) return value def knownSmallCancelation(p,lam,r,l,m): return ( (1-p) * (2*r-1)**(1+lam*l) - 8 * r * m**2 * l**2 >= 0 ) # also, a rough estimate of the probability def lowerBoundRough(lam,r,l,m): return ( (2*r-1)**(1+lam*l) - 8 * r * m**2 * l**2 ) / ( (2*r-1)**(1+lam*l) ) # - # ## Read the Experimental Data # # Read files ``../data/*/data_r-[r]_m-[m]_l1-[l1]_l2-[l2].csv`` # #### Supporting functions # # The following supporting functions are used to store and retrieve the values which have been read from the data files. full_data_set = {} def experimental_data(r,m,l1,l2): data = full_data_set if int(r) in data: data = data[int(r)] else: return None if int(m) in data: data = data[int(m)] else: return None if int(l1) in data: data = data[int(l1)] else: return None if int(l2) in data: data = data[int(l2)] else: return None return data def experimental_data_add(r,m,l1,l2, datum): if int(r) not in full_data_set: full_data_set[int(r)] = {} if int(m) not in full_data_set[int(r)]: full_data_set[int(r)][int(m)] = {} if int(l1) not in full_data_set[int(r)][int(m)]: full_data_set[int(r)][int(m)][int(l1)] = {} if int(l2) not in full_data_set[int(r)][int(m)][int(l1)]: full_data_set[int(r)][int(m)][int(l1)][int(l2)] = datum return old = full_data_set[int(r)][int(m)][int(l1)][int(l2)] new_data = [sum(x) for x in zip(old, datum)] full_data_set[int(r)][int(m)][int(l1)][int(l2)] = new_data # #### Read the files # + import re import subprocess import os for at_time in os.listdir('../data/'): for meta in os.listdir('../data/'+at_time): file_name = os.path.join('../data/'+at_time,meta) parts = re.search("^data_r-([0-9]*)_m-([0-9]*)_l1-([0-9]*)_l2-([0-9]*)\.csv$", meta) if not parts: continue r = Integer(int(parts.group(int(1)))) m = Integer(int(parts.group(int(2)))) l1 = Integer(int(parts.group(int(3)))) l2 = Integer(int(parts.group(int(4)))) line = subprocess.check_output(['tail', '-1', file_name]) new_data = [int(v) for v in line.split(",")[:int(2)]] experimental_data_add(r,m,l1,l2,new_data) # - # #### Final function # # we use the following function `get_prob` to obtain a sage object representing # the experimental probability of obtaining small cancelation def get_prob(r,m,l1,l2): is_known_zero = True for l in range(int(l1),int(l2)+1): if not knownZeroProp(1/6,r,l,m): is_known_zero = False if is_known_zero: return 0 data = experimental_data(r,m,l1,l2) if not data: return None return Integer(data[int(0)])/Integer(data[int(1)]) # ## Colour Schemes Used in Plots # # The following colour scheme is based on a technical note from SRON (<NAME>, 2012) # + import numpy as np import matplotlib PaulTol_rainbow = matplotlib.colors.ListedColormap([ \ [ \ np.clip( ( 0.472 - 0.567*x + 4.05*x**2 ) / ( 1 + 8.72*x - 19.17*x**2 + 14.1*x**3 ) ,0.0,1.0), \ np.clip( 0.108932 - 1.22635*x + 27.284*x**2 - 98.577*x**3 + 163.3*x**4 - 131.395*x**5 + 40.634*x**6 ,0.0,1.0), \ np.clip( 1 / ( 1.97 + 3.54*x - 68.5*x**2 + 243*x**3 - 297*x**4 + 125*x**5 ) ,0.0,1.0) \ ] \ for x in [ float(n)/float(256-1) for n in range(int(256)) ] \ ]) PaulTol_recomended = matplotlib.colors.ListedColormap([ \ [ \ np.clip( 1.0 - 0.392*(1.0 + math.erf( (x-0.869)/0.255) ) ,0.0,1.0), \ np.clip( 1.021 - 0.456*( 1.0 + math.erf( (x-0.527)/0.376 ) ) ,0.0,1.0), \ np.clip( 1.0 - 0.493*( 1.0 + math.erf( (x-0.272)/0.309 ) ) ,0.0,1.0) \ ] \ for x in [ float(n)/float(256-1) for n in range(int(256)) ] \ ]) colmap = PaulTol_rainbow # the colour scheme to use throughout # - # ## Contour Plots of Theoretic Bounds # ###### necessary includes and settings # + import matplotlib import matplotlib.ticker as ticker import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np # the following ensures that text in the graphs is correctly displayed plt.rc('text', usetex=True) plt.rc('font', family='serif') # - # ### Part 1: r vs. m # #### Generate the data to produce the contour plot # + length = 20 X_rm = range(6,20+1) # range of r to use Y_rm = range(2,100+1) # range of m to use Z_rm_lower = [] Z_rm_upper = [] Z_rm_exper = [] for r in X_rm: for m in Y_rm: lower_bound = lowerBound(1/6,r,length,length,m).n(digits=8) upper_bound = upperBound(1/6,r,length,m).n(digits=8) exper_bound = get_prob(r,m,length,length) Z_rm_lower = Z_rm_lower + [lower_bound] Z_rm_upper = Z_rm_upper + [upper_bound] Z_rm_exper = Z_rm_exper + [exper_bound] Z_rm_lower = np.array(Z_rm_lower).reshape((len(X_rm),len(Y_rm))).transpose() Z_rm_upper = np.array(Z_rm_upper).reshape((len(X_rm),len(Y_rm))).transpose() Z_rm_exper = np.array(Z_rm_exper).reshape((len(X_rm),len(Y_rm))).transpose() X_rm = np.array(X_rm) Y_rm = np.array(Y_rm) # - # #### Plot the data # ###### lower bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_rm,Y_rm,Z_rm_lower,cmap=colmap,vmin=0.0, vmax=1.0,rasterized=True) plt.xlabel("number of generators $r$") plt.ylabel("number of relators $m$") plt.xticks([int(i) for i in [6,8,10,12,14,16,18,20]]) plt.yticks([int(i) for i in [2,20,40,60,80,100]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/rmLower.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # - # ###### upper bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_rm,Y_rm,Z_rm_upper,cmap=colmap,vmin=0.0, vmax=1.0,rasterized=True) plt.xlabel("number of generators $r$") plt.ylabel("number of relators $m$") plt.xticks([int(i) for i in [6,8,10,12,14,16,18,20]]) plt.yticks([int(i) for i in [2,20,40,60,80,100]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/rmUpper.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # - # ###### experimental bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_rm,Y_rm,Z_rm_exper,cmap=colmap,vmin=0.0, vmax=1.0,rasterized=True) plt.xlabel("number of generators $r$") plt.ylabel("number of relators $m$") plt.xticks([int(i) for i in [6,8,10,12,14,16,18,20]]) plt.yticks([int(i) for i in [2,20,40,60,80,100]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/rmExper.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # - # --- # # ### Part 2: length vs. m # #### Generate the data to produce the contour plot # + r = 20 X_lm = range(6,30+1) # range of l to use Y_lm = range(2,40+1) # range of m to use Z_lm_lower = [] Z_lm_upper = [] Z_lm_exper = [] for length in X_lm: for m in Y_lm: lower_bound = lowerBound(1/6,r,length,length,m).n(digits=8) upper_bound = upperBound(1/6,r,length,m).n(digits=8) exper_bound = get_prob(r,m,length,length) Z_lm_lower = Z_lm_lower + [lower_bound] Z_lm_upper = Z_lm_upper + [upper_bound] Z_lm_exper = Z_lm_exper + [exper_bound] Z_lm_lower = np.array(Z_lm_lower).reshape((len(X_lm),len(Y_lm))).transpose() Z_lm_upper = np.array(Z_lm_upper).reshape((len(X_lm),len(Y_lm))).transpose() Z_lm_exper = np.array(Z_lm_exper).reshape((len(X_lm),len(Y_lm))).transpose() X_lm = np.array(X_lm) Y_lm = np.array(Y_lm) # - # #### Plot the data # ##### lower bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_lm,Y_lm,Z_lm_lower,cmap=colmap,vmin=0.0, vmax=1.0,rasterized=True) plt.xlabel("length $\ell$") plt.ylabel("number of relators $m$") plt.xticks([int(i) for i in [6,10,15,20,25,30]]) plt.yticks([int(i) for i in [2,10,20,30,40]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/lmLower.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # - # ##### upper bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_lm,Y_lm,Z_lm_upper,cmap=colmap,vmin=0.0, vmax=1.0,rasterized=True) plt.xlabel("length $\ell$") plt.ylabel("number of relators $m$") plt.xticks([int(i) for i in [6,10,15,20,25,30]]) plt.yticks([int(i) for i in [2,10,20,30,40]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/lmUpper.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # - # ##### experimental bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_lm,Y_lm,Z_lm_exper,cmap=colmap,vmin=0.0, vmax=1.0,rasterized=True) plt.xlabel("length $\ell$") plt.ylabel("number of relators $m$") plt.xticks([int(i) for i in [6,10,15,20,25,30]]) plt.yticks([int(i) for i in [2,10,20,30,40]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/lmExper.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # - # --- # # ### Part 3: r vs. length # #### Generate the data to produce the contour plot # + m = 10 X_lr = range(2,40+1) # range of l to use Y_lr = range(6,50+1) # range of r to use Z_lr_lower = [] Z_lr_upper = [] Z_lr_exper = [] for length in X_lr: for r in Y_lr: lower_bound = lowerBound(1/6,r,length,length,m).n(digits=8) upper_bound = upperBound(1/6,r,length,m).n(digits=8) exper_bound = get_prob(r,m,length,length) Z_lr_lower = Z_lr_lower + [lower_bound] Z_lr_upper = Z_lr_upper + [upper_bound] Z_lr_exper = Z_lr_exper + [exper_bound] Z_lr_lower = np.array(Z_lr_lower).reshape((len(X_lr),len(Y_lr))).transpose() Z_lr_upper = np.array(Z_lr_upper).reshape((len(X_lr),len(Y_lr))).transpose() Z_lr_exper = np.array(Z_lr_exper).reshape((len(X_lr),len(Y_lr))).transpose() X_lr = np.array(X_lr) Y_lr = np.array(Y_lr) # - # #### Plot the data # ##### lower bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_lr,Y_lr,Z_lr_lower,cmap=colmap,vmin=0.0, vmax=1.0, rasterized=True) plt.xlabel("length $\ell$") plt.ylabel("number of generators $r$") plt.xticks([int(i) for i in [5,10,15,20,25,30,35,40]]) plt.yticks([int(i) for i in [10,15,20,25,30,35,40,45,50]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/lrLower.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # - # ##### upper bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_lr,Y_lr,Z_lr_upper,cmap=colmap,vmin=0.0, vmax=1.0, rasterized=True) plt.xlabel("length $\ell$") plt.ylabel("number of generators $r$") plt.xticks([int(i) for i in [5,10,15,20,25,30,35,40]]) plt.yticks([int(i) for i in [10,15,20,25,30,35,40,45,50]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/lrUpper.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # - # ##### experimental bound # + fig, ax = plt.subplots(figsize=(float(5),float(4))) pcol = plt.pcolormesh(X_lr,Y_lr,Z_lr_exper,cmap=colmap,vmin=0.0, vmax=1.0, rasterized=True) plt.xlabel("length $\ell$") plt.ylabel("number of generators $r$") plt.xticks([int(i) for i in [5,10,15,20,25,30,35,40]]) plt.yticks([int(i) for i in [10,15,20,25,30,35,40,45,50]]) ax_divider = make_axes_locatable(ax) cax = ax_divider.append_axes("top", size="7%", pad="7%") cb = plt.colorbar(pcol, cax=cax, orientation="horizontal",label="probability") cax.xaxis.set_ticks_position("top") cb.ax.xaxis.set_label_position("top") cb.solids.set_rasterized(True) plt.savefig("../output/lrExper.pdf",bbox_inches="tight",pad_inches=int(3),dpi=200) # -
working/Generate Summary Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''base'': conda)' # language: python # name: python37364bitbasecondac2861e416ca946f48ef8d65a1d2373a7 # --- # # chapter 6. decision trees # ## setup # + # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "decision_trees" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) print('image path:', IMAGES_PATH) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # - # ## training and visualizing a decision tree # + from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier iris = load_iris() X = iris.data[:, 2:] # petal length and width y = iris.target tree_clf = DecisionTreeClassifier(max_depth=2, random_state=42) tree_clf.fit(X, y) # + from graphviz import Source from sklearn import tree out_file_full_path = os.path.join(IMAGES_PATH, 'iris_tree.dot') tree.export_graphviz( tree_clf, out_file=out_file_full_path, feature_names=iris.feature_names[2:], class_names=iris.target_names, rounded=True, filled=True ) Source.from_file(out_file_full_path) # -
.ipynb_checkpoints/ch6_decision_trees-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### <NAME> # # &nbsp; # # This model draws inspiration from Stanford CS229 Autumn 2016 Problem Set 4 Problem 2. Platt-Burges Model is used in NIPS for paper review calibration. Unfortunately, not much information of this model is disclosed to the general public. You have to settle with the limited description either on Neil Lawrence's personal blog or in Hong Ge's paper. Please note that these two authors solve the latent variable problem via regularized least squares. This script solves the latent variable problem via Expectation Minimization algorithm, exactly the same procedure as CS229 Problem Set 4. # # Assuming $P$ papers are submitted to the conference and $R$ reviewers in the committee mark the score of these papers, each paper will be given $R$ different scores by all the reviewers. Therefore, the score of a paper given by a reviewer, denoted as $x$, can be decomposed into the linear combination of three components – the underlying intrinsic value $y$, the reviewer bias $z$ and some random disturbance $\epsilon$. $x$, $y$ and $z$ independently follow different Gaussian distributions. # # $$ y^{(pr)} \sim \mathcal{N} (\mu_p,\sigma_p^2)$$ # # $$ z^{(pr)} \sim \mathcal{N} (\nu_r,\tau_r^2)$$ # # $$ x^{(pr)}|y^{(pr)},z^{(pr)} \sim \mathcal{N} (y^{(pr)}+z^{(pr)},\sigma^2)$$ # # E-Step # # ![alt text](./preview/e-step.JPG) # # M-Step # # ![alt text](./preview/m-step.JPG) # # &nbsp; # # For RLS, plz check the below # # https://github.com/je-suis-tm/machine-learning/blob/master/Wisdom%20of%20Crowds%20project/regularized%20least%20squares.ipynb # # Reference to the original paper # # https://github.com/je-suis-tm/machine-learning/blob/master/Wisdom%20of%20Crowds%20project/Regularized%20Least%20Squares%20to%20Remove%20Reviewer%20Bias.pdf # # Reference to Hong Ge's paper # # http://mlg.eng.cam.ac.uk/hong/unpublished/nips-review-model.pdf # # <NAME>'s personal blog # # https://inverseprobability.com/2014/08/02/reviewer-calibration-for-nips # # <NAME>'s jupyter notebook # # https://github.com/lawrennd/conference # # Others' jupyter notebook # # https://github.com/leonidk/reviewers # # For discrete case, plz check Dawid-Skene Model # # https://github.com/je-suis-tm/machine-learning/blob/master/Wisdom%20of%20Crowds%20project/dawid%20skene.ipynb import matplotlib.pyplot as plt import os os.chdir('K:/ecole/github/televerser/wisdom of crowds') import numpy as np import pandas as pd import seaborn as sns #raise error when zero is encountered in logarithm np.seterr(divide='raise') # ### Functions #equations 1,2 in the problem set def e_step(miu_p,sigma_p,nu_r,tau_r,X,sigma,): miu_pr_y=miu_p+np.multiply( np.divide(sigma_p, (sigma+sigma_p+tau_r)),(X-miu_p-nu_r)) miu_pr_z=nu_r+np.multiply(np.divide( tau_r,(sigma+sigma_p+tau_r)),(X-miu_p-nu_r)) sigma_pr_yy=np.divide( np.multiply((tau_r+sigma),sigma_p), (sigma+sigma_p+tau_r)) sigma_pr_zz=np.divide( np.multiply((sigma_p+sigma),tau_r), (sigma+sigma_p+tau_r)) return miu_pr_y,miu_pr_z,sigma_pr_yy,sigma_pr_zz #equations 3,4,5,6 in the problem set def m_step(miu_pr_y,miu_pr_z, sigma_pr_yy,sigma_pr_zz): miu_p=np.repeat( miu_pr_y.mean(axis=0),miu_pr_y.shape[0],axis=0) nu_r=np.repeat(miu_pr_z.mean(axis=1), miu_pr_z.shape[1],axis=1) sigma_p_temp=(sigma_pr_yy+np.square(miu_pr_y)- \ 2*np.multiply(miu_pr_y,miu_p)+ \ np.square(miu_p)).mean(axis=0) sigma_p=np.repeat(sigma_p_temp.mean(axis=0), miu_p.shape[0],axis=0) tau_r_temp=(sigma_pr_zz+np.square(miu_pr_z)- \ 2*np.multiply(miu_pr_z,nu_r)+ \ np.square(nu_r)).mean(axis=1) tau_r=np.repeat(tau_r_temp.mean(axis=1), miu_p.shape[1],axis=1) return miu_p,sigma_p,nu_r,tau_r #compute lower bound to determine the convergence def get_lower_bound(miu_pr_y,miu_pr_z, sigma_pr_yy,sigma_pr_zz, miu_p,sigma_p,nu_r,tau_r): var_y=sigma_pr_yy+np.square(miu_pr_y)- \ 2*np.multiply(miu_pr_y,miu_p)+np.square(miu_p) var_z=sigma_pr_zz+np.square(miu_pr_z)- \ 2*np.multiply(miu_pr_z,nu_r)+np.square(nu_r) logpdf=np.log( np.divide( 1,np.multiply( sigma_p,tau_r)))-np.divide( var_y,sigma_p)/2-np.divide(var_z,tau_r)/2 lower_bound=logpdf.sum() return lower_bound #platt burges model solved in em algorithm def platt_burges(X,tolerance=0.001,num_of_itr=200, diagnosis=True): #initialize intrinsic value miu_p=np.repeat(X.mean(axis=0),X.shape[0],axis=0) sigma_p=np.repeat(X.var(axis=0),X.shape[0],axis=0) #initialize banks bias level nu_r=np.repeat(X.mean(axis=1),X.shape[1],axis=1) tau_r=np.repeat(X.var(axis=1),X.shape[1],axis=1) #observed data sigma=np.empty(X.shape) sigma.fill(X.var()) #initialize others lower_bound_old=None lower_bound=None counter=0 while counter<num_of_itr: #e step miu_pr_y,miu_pr_z,sigma_pr_yy,sigma_pr_zz=e_step( miu_p,sigma_p,nu_r,tau_r,X,sigma,) #m step miu_p,sigma_p,nu_r,tau_r=m_step( miu_pr_y,miu_pr_z,sigma_pr_yy,sigma_pr_zz) counter+=1 #use lower bound to determine if converged lower_bound_old=lower_bound lower_bound=get_lower_bound(miu_pr_y,miu_pr_z, sigma_pr_yy, sigma_pr_zz, miu_p,sigma_p, nu_r,tau_r) if lower_bound_old and \ np.abs(lower_bound/lower_bound_old-1)<tolerance: if diagnosis: print(f'{counter} iterations to reach convergence\n') return miu_p[0],sigma_p[0],nu_r[:,0],tau_r[:,0] print('Not converged. Consider increase number of iterations or tolerance') return miu_p[0],sigma_p[0],nu_r[:,0],tau_r[:,0] #create ridgeline plot #the input data should be 2 dimensional #one column for variable name #the other for value def create_ridgeline(data,values,variables, title,xlabel,cmap): #crucial! #avoid bottom axis overlap sns.set_theme(style="white", rc={"axes.facecolor":(0, 0, 0, 0)}) #initialize the size g=sns.FacetGrid(data,row=variables,hue=variables, aspect=10,height=.9,palette=cmap) #draw density g.map(sns.kdeplot,values, bw_adjust=.5,clip_on=False, fill=True,alpha=1,linewidth=1.5) #draw density boundary g.map(sns.kdeplot,values,clip_on=False, color="w",lw=2,bw_adjust=.5) #draw x axis g.map(plt.axhline,y=0,lw=2,clip_on=False) #define and use a simple function #to label the plot in axes coordinates def label(x,color,label): ax=plt.gca() ax.text(-.2,.2,label.replace(' ','\n'), fontweight="bold",color=color, ha="left",va="center", transform=ax.transAxes) #draw labels g.map(label,values) #set the subplots to overlap #think of it as tight layout g.fig.subplots_adjust(hspace=-.25) #remove axes details that don't play well with overlap g.set_titles("") g.set(yticks=[]) g.set_xlabels(xlabel) g.despine(bottom=True,left=True) g.fig.suptitle(title,fontweight="bold") plt.show() # ### ETL # + #read data y0matrix2019=pd.read_csv('y0matrix2019.csv') y1matrix2020=pd.read_csv('y1matrix2020.csv') monthly=pd.read_csv('monthly.csv') annual=pd.read_csv('annual.csv') # + #set index y0matrix2019.set_index('Source Name',inplace=True) y1matrix2020.set_index('Source Name',inplace=True) monthly.set_index('Date',inplace=True) monthly.index=pd.to_datetime(monthly.index) monthly.columns=y0matrix2019.columns annual=annual.pivot(index='Date', columns='Name',values='Value') annual.index=pd.to_datetime(annual.index) annual.columns=y0matrix2019.columns # - #normalize forecast by pct return y0_mat_nor=np.mat( np.divide(y0matrix2019, monthly['2019-08-31':'2019-08-31'])-1) y1_mat_nor=np.mat( np.divide(y1matrix2020, monthly['2019-08-31':'2019-08-31'])-1) #compute unit volatility unit_volatility=monthly.std()/monthly.mean() # ### Run Model and Forecast y0_params=platt_burges(y0_mat_nor) miu_p,sigma_p,nu_r,tau_r=y0_params # + #revert to price forecast y0_forecast=(miu_p+1)*monthly['2019-08-31':'2019-08-31'] #compute model error pb_erreur_y0=abs(np.divide(y0_forecast, annual['2019':'2019'])-1) #compute simple average error avg_erreur_y0=abs(np.divide( y0matrix2019.mean(axis=0).tolist(), annual['2019':'2019'])-1) #compute spot price error spot_erreur_y0=abs(np.divide( monthly['2019-08':'2019-08'], annual['2019':'2019'])-1) # + #create dict erreur_by_model_y0=list( pb_erreur_y0.to_dict(orient='index').values())[0] erreur_by_cons_y0=list( avg_erreur_y0.to_dict(orient='index').values())[0] erreur_by_spot_y0=list( spot_erreur_y0.to_dict(orient='index').values())[0] #sort by spot error erreur_by_spot_y0=dict( sorted(erreur_by_spot_y0.items(),key=lambda x:x[1])) # - y1_params=platt_burges(y1_mat_nor) miu_p,sigma_p,nu_r,tau_r=y1_params # + #revert to price forecast y1_forecast=(miu_p+1)*monthly['2019-08-31':'2019-08-31'] #compute model error pb_erreur_y1=abs(np.divide(y1_forecast, annual['2020':'2020'])-1) #compute simple average error avg_erreur_y1=abs(np.divide( y1matrix2020.mean(axis=0).tolist(), annual['2020':'2020'])-1) #compute spot price error spot_erreur_y1=abs( np.divide(monthly['2019-08':'2019-08'], annual['2020':'2020'])-1) # + #create dict erreur_by_model_y1=list( pb_erreur_y1.to_dict(orient='index').values())[0] erreur_by_cons_y1=list( avg_erreur_y1.to_dict(orient='index').values())[0] erreur_by_spot_y1=list( spot_erreur_y1.to_dict(orient='index').values())[0] #sort by spot error erreur_by_spot_y1=dict( sorted(erreur_by_spot_y1.items(),key=lambda x:x[1])) # - # ### Bar Chart Viz # + #viz section_width=7 bar_width=4 intra_width=2 ticks=[] ax=plt.figure(figsize=(10,5)).add_subplot(111) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) for ind,val in enumerate(erreur_by_spot_y0): x1=(ind+1)*section_width+(3*ind)*bar_width+ \ (2*ind)*intra_width x2=x1+bar_width+intra_width x3=x2+bar_width+intra_width bar1=plt.bar(x1,erreur_by_cons_y0[val], color='#DBCEB0',width=bar_width) bar2=plt.bar(x2,erreur_by_model_y0[val], color='#d8cfc5',width=bar_width) bar3=plt.bar(x3,erreur_by_spot_y0[val], color='#eda2a0',width=bar_width) ticks.append(x2) line,=plt.plot(ticks, unit_volatility[erreur_by_spot_y0.keys()], c='#100917') plt.xticks(ticks, [i.replace( ' ','\n') for i in erreur_by_spot_y0.keys()], fontsize=8) plt.legend((line,bar1,bar2,bar3), ('Unit Volatility', 'Consensus Error', 'Model Error', 'Spot Error'),loc=0) plt.xlabel('Commodities') plt.ylabel('Percentage') plt.title('Y+0 Forecast Error by Commodities') plt.show() # + #viz section_width=7 bar_width=4 intra_width=2 ticks=[] ax=plt.figure(figsize=(10,5)).add_subplot(111) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) for ind,val in enumerate(erreur_by_spot_y1): x1=(ind+1)*section_width+(3*ind)*bar_width+ \ (2*ind)*intra_width x2=x1+bar_width+intra_width x3=x2+bar_width+intra_width bar1=plt.bar(x1,erreur_by_cons_y1[val], color='#bab81e',width=bar_width) bar2=plt.bar(x2,erreur_by_model_y1[val], color='#a2aac5',width=bar_width) bar3=plt.bar(x3,erreur_by_spot_y1[val], color='#B2B2B2',width=bar_width) ticks.append(x2) line,=plt.plot(ticks, unit_volatility[erreur_by_spot_y1.keys()], c='#eb5c27') plt.xticks(ticks, [i.replace( ' ','\n') for i in erreur_by_spot_y1.keys()], fontsize=8) plt.legend((line,bar1,bar2,bar3), ('Unit Volatility', 'Consensus Error', 'Model Error', 'Spot Error'),loc=0) plt.xlabel('Commodities') plt.ylabel('Percentage') plt.title('Y+1 Forecast Error by Commodities') plt.show() # - # ### Distribution Chart Viz # + #unpack _,_,nu_r,tau_r=y0_params #sort by mean of banks bias y0_gaussian={} for ind,val in enumerate(y0matrix2019.index): y0_gaussian[val]=(nu_r[ind].item(), tau_r[ind].item()**0.5, y0_mat_nor[ind].ravel().tolist()[0]) y0_gaussian=dict(sorted(y0_gaussian.items(), key=lambda x:x[1][0])) #create dataframe ridgeline=pd.DataFrame() ridgeline['value']=[ j for i in y0_gaussian for j in y0_gaussian[i][2]] ridgeline['banks']=[ j+f' µ={round(y0_gaussian[i][0],6)} σ={round(y0_gaussian[i][1],6)}' for i in y0_gaussian for j in [i]*len(y0_gaussian[i][2])] # + cmap="gist_heat" values='value' variables='banks' title='Investment Bank Y+0 Forecast Bias' xlabel="Forecast Return (%)" create_ridgeline(ridgeline,values,variables, title,xlabel,cmap) # + #unpack _,_,nu_r,tau_r=y1_params #sort by mean of banks bias y1_gaussian={} for ind,val in enumerate(y1matrix2020.index): y1_gaussian[val]=(nu_r[ind].item(), tau_r[ind].item()**0.5, y1_mat_nor[ind].ravel().tolist()[0]) y1_gaussian=dict(sorted(y1_gaussian.items(), key=lambda x:x[1][0])) #create dataframe ridgeline=pd.DataFrame() ridgeline['value']=[ j for i in y1_gaussian for j in y1_gaussian[i][2]] ridgeline['banks']=[ j+f' µ={round(y1_gaussian[i][0],6)} σ={round(y1_gaussian[i][1],6)}' for i in y1_gaussian for j in [i]*len(y1_gaussian[i][2])] # + cmap="gist_earth" values='value' variables='banks' title='Investment Bank Y+1 Forecast Bias' xlabel="Forecast Return (%)" create_ridgeline(ridgeline,values,variables, title,xlabel,cmap) # -
Wisdom of Crowds project/platt burges.ipynb