code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import datetime print(datetime.datetime.now()) from pygentoolbox import Tools from pygentoolbox import ReadSubtractionByGenomeMappingColorsProportion as readsubtract #dir(pygentoolbox.Tools) # %matplotlib inline import matplotlib.pyplot as plt # - import os print(os.getcwd()) # + totalreads = 4554180 # ran wc -l *.fastq (=18216720)on the cleaned reads in the fastp folder (number of cleaned reads from insterts 15-35) fileextension='.fastq.gz' directory = '/media/sf_LinuxShare/Projects/Theresa/Hisat2/FlagHA_Pt08/Inserts53Later' outplotname = 'Ptiwi08RIP_53Later_Inserts_Stacked_Barplot_Colors_Proportion.pdf' startseqlength = 15 cleanreads=False blastdatabase = '/media/sf_LinuxShare/Humans/Genome/Seqs/GRCh38_top_level.fa' aligndatabaselist=['/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Kpne_CompleteGenome',\ '/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Vector_FlagHA_Dcl5',\ '/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Pt_mtGenome',\ '/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Pt_51_Mac',\ '/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Pt_51_MacAndIES',\ '/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Pt_51_Mic2'] # '/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/OESv1',\ colors = ['black', \ 'darkviolet', \ 'orange', \ 'forestgreen', \ 'red', \ 'dodgerblue', \ 'grey'] # this is length of align databases + 1 because we need a color for unmapped # Vector, color=‘darkviolet’ # MAC, color=‘forestgreen’ # IES, color=‘red’ # OES, color=‘dodgerblue’ # Mitochondrial, color=‘orange’ # Klebsiella, color=‘black’ # Unmapped, color=‘grey’ readsubtract.main(totalreads, fileextension, directory, outplotname, startseqlength, cleanreads, blastdatabase, aligndatabaselist, colors) # -
notebooks/Therese/ReadSubtractionByGenomeMapping_Ptiwi08RIPsRNA_53Later_ColorsProportion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Tests whether a data sample has a Gaussian distribution. # Example of the Shapiro-Wilk Normality Test from scipy.stats import shapiro data = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869,-3.817] stat, p = shapiro(data) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably Gaussian') else: print('Probably not Gaussian') import warnings warnings.filterwarnings('ignore') from matplotlib import pyplot as plt import seaborn as sns sns.distplot(data) # Example of the D'Agostino's K^2 Normality Test from scipy.stats import normaltest data = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] stat, p = normaltest(data) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably Gaussian') else: print('Probably not Gaussian') # Example of the Anderson-Darling Normality Test from scipy.stats import anderson data = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] result = anderson(data) print('stat = %.3f' % (result.statistic)) for i in range(len(result.critical_values)): sl, cv = result.significance_level[i], result.critical_values[i] if result.statistic < cv: print('Probably Gaussian at the %.1f%% level' % (sl)) else: print('Probably not Gaussian at the %.1f%% level' % (sl)) # Example of the Pearson's Correlation test #Tests whether two samples have a linear relationship. from scipy.stats import pearsonr data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [0.353, 3.517, 0.125, -7.545, -0.555, -1.536, 3.350, -1.578, -3.537, -1.579] stat, p = pearsonr(data1, data2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably independent') else: print('Probably dependent') plt.scatter(data1,data2) from scipy.stats import spearmanr #Tests whether two samples have a monotonic relationship. data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [0.353, 3.517, 0.125, -7.545, -0.555, -1.536, 3.350, -1.578, -3.537, -1.579] stat, p = spearmanr(data1, data2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably independent') else: print('Probably dependent') # Example of the Kendall's Rank Correlation Test #Tests whether two samples have a monotonic relationship. from scipy.stats import kendalltau data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [0.353, 3.517, 0.125, -7.545, -0.555, -1.536, 3.350, -1.578, -3.537, -1.579] stat, p = kendalltau(data1, data2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably independent') else: print('Probably dependent') # Example of the Chi-Squared Test #Tests whether two categorical variables are related or independent. from scipy.stats import chi2_contingency table = [[10, 20, 30],[6, 9, 17]] stat, p, dof, expected = chi2_contingency(table) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably independent') else: print('Probably dependent') # ### Parametric Statistical Hypothesis Tests¶ # Example of the Student's t-test #Tests whether the means of two independent samples are significantly different. from scipy.stats import ttest_ind data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [1.142, -0.432, -0.938, -0.729, -0.846, -0.157, 0.500, 1.183, -1.075, -0.169] stat, p = ttest_ind(data1, data2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') print(shapiro(data1)) print(shapiro(data2)) sns.distplot(data1) sns.distplot(data2) # Example of the Paired Student's t-test #Tests whether the means of two paired samples are significantly different. from scipy.stats import ttest_rel data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [1.142, -0.432, -0.938, -0.729, -0.846, -0.157, 0.500, 1.183, -1.075, -0.169] stat, p = ttest_rel(data1, data2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') sns.distplot(data1) sns.distplot(data2) # Example of the Analysis of Variance Test #Tests whether the means of two or more independent samples are significantly different. from scipy.stats import f_oneway data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [1.142, -0.432, -0.938, -0.729, -0.846, -0.157, 0.500, 1.183, -1.075, -0.169] data3 = [-0.208, 0.696, 0.928, -1.148, -0.213, 0.229, 0.137, 0.269, -0.870, -1.204] stat, p = f_oneway(data1, data2, data3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') sns.distplot(data1) sns.distplot(data2) sns.distplot(data3) # ### Nonparametric Statistical Hypothesis Tests¶ # Example of the Mann-Whitney U Test #Tests whether the distributions of two independent samples are equal or not. from scipy.stats import mannwhitneyu data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [1.142, -0.432, -0.938, -0.729, -0.846, -0.157, 0.500, 1.183, -1.075, -0.169] stat, p = mannwhitneyu(data1, data2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') print(shapiro(data1)) print(shapiro(data2)) #Compare with parametric t_test from scipy.stats import ttest_ind ttest_ind(data1,data2) sns.distplot(data1) sns.distplot(data2) # Example of the Wilcoxon Signed-Rank Test #Tests whether the distributions of two paired samples are equal or not. from scipy.stats import wilcoxon data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [1.142, -0.432, -0.938, -0.729, -0.846, -0.157, 0.500, 1.183, -1.075, -0.169] stat, p = wilcoxon(data1, data2) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') sns.distplot(data1) sns.distplot(data2) from scipy.stats import kruskal #Tests whether the distributions of two or more independent samples are equal or not. data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869] data2 = [1.142, -0.432, -0.938, -0.729, -0.846, -0.157, 0.500, 1.183, -1.075, -0.169] data3 = [-0.208, 0.696, 0.928, -1.148, -0.213, 0.229, 0.137, 0.269, -0.870, -1.204] stat, p = kruskal(data1, data2,data3) print('stat=%.3f, p=%.3f' % (stat, p)) if p > 0.05: print('Probably the same distribution') else: print('Probably different distributions') sns.distplot(data1) sns.distplot(data2) sns.distplot(data3)
3_Statistical_Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Spyder) # language: python3 # name: python3 # --- # # Lumped Elements Circuits # In this notebook, we construct various network from basic lumped elements (resistor, capacitor, inductor), with the 'classic' and the `Circuit` approach. Generally the `Circuit` approach is more verbose than the 'classic' way for building a circuit. However, as the circuit complexity increases, in particular when components are connected in parallel, the `Circuit` approach is interesting as it increases the readability of the code. Moreover, `Circuit` object can be plotted using its `plot_graph()` method, which is usefull to rapidly control if the circuit is built as expected. import numpy as np # for np.allclose() to check that S-param are similars import skrf as rf rf.stylely() # ## LC Series Circuit # In this section we reproduce a simple equivalent model of a capacitor $C$, as illustrated by the figure below: # # ![](designer_capacitor_simple.png) # # reference LC circuit made in Designer LC_designer = rf.Network('designer_capacitor_30_80MHz_simple.s2p') # scikit-rf: manually connecting networks line = rf.media.DefinedGammaZ0(frequency=LC_designer.frequency, z0=50) LC_manual = line.inductor(24e-9) ** line.capacitor(70e-12) # + # scikit-rf: using Circuit builder port1 = rf.Circuit.Port(frequency=LC_designer.frequency, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=LC_designer.frequency, name='port2', z0=50) line = rf.media.DefinedGammaZ0(frequency=LC_designer.frequency, z0=50) cap = line.capacitor(70e-12, name='cap') ind = line.inductor(24e-9, name='ind') connections = [ [(port1, 0), (cap, 0)], [(cap, 1), (ind, 0)], [(ind, 1), (port2, 0)] ] circuit = rf.Circuit(connections) LC_from_circuit = circuit.network # - # testing the equivalence of the results print(np.allclose(LC_designer.s, LC_manual.s)) print(np.allclose(LC_designer.s, LC_from_circuit.s)) circuit.plot_graph(network_labels=True, edge_labels=True, port_labels=True) # ## A More Advanced Equivalent Model # In this section we reproduce an equivalent model of a capacitor $C$, as illustrated by the figure below: # # ![](designer_capacitor_adv.png) # Reference results from ANSYS Designer LCC_designer = rf.Network('designer_capacitor_30_80MHz_adv.s2p') # scikit-rf: usual way, but this time this is more tedious to deal with connection and port number freq = LCC_designer.frequency line = rf.media.DefinedGammaZ0(frequency=freq, z0=50) elements1 = line.resistor(1e-2) ** line.inductor(24e-9) ** line.capacitor(70e-12) elements2 = line.resistor(20e6) T_in = line.tee() T_out = line.tee() ntw = rf.connect(T_in, 1, elements1, 0) ntw = rf.connect(ntw, 2, elements2, 0) ntw = rf.connect(ntw, 1, T_out, 1) ntw = rf.innerconnect(ntw, 1, 2) LCC_manual = ntw ** line.shunt_capacitor(50e-12) # + # scikit-rf: using Circuit builder freq = LCC_designer.frequency port1 = rf.Circuit.Port(frequency=freq, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=freq, name='port2', z0=50) line = rf.media.DefinedGammaZ0(frequency=freq, z0=50) cap = line.capacitor(70e-12, name='cap') ind = line.inductor(24e-9, name='ind') res_series = line.resistor(1e-2, name='res_series') res_parallel = line.resistor(20e6, name='res_parallel') cap_shunt = line.capacitor(50e-12, name='cap_shunt') ground = rf.Circuit.Ground(frequency=freq, name='ground', z0=50) connections = [ [(port1, 0), (res_series, 0), (res_parallel, 0)], [(res_series, 1), (cap, 0)], [(cap, 1), (ind, 0)], [(ind, 1), (cap_shunt, 0), (res_parallel, 1), (port2, 0)], [(cap_shunt, 1), (ground, 0)], ] circuit = rf.Circuit(connections) LCC_from_circuit = circuit.network # - # testing the equivalence of the results print(np.allclose(LCC_designer.s, LCC_manual.s)) print(np.allclose(LCC_designer.s, LCC_from_circuit.s)) circuit.plot_graph(network_labels=True, edge_labels=True, port_labels=True) # ## Pass band filter # Below we construct a pass-band filter, from an example given in [Microwaves101](https://www.microwaves101.com/encyclopedias/lumped-element-filter-calculator): # ![](designer_bandpass_filter_450_550MHz.png) # # Reference result calculated from Designer passband_designer = rf.Network('designer_bandpass_filter_450_550MHz.s2p') # scikit-rf: freq = passband_designer.frequency passband_manual = line.shunt_capacitor(25.406e-12) ** line.shunt_inductor(4.154e-9) ** \ line.capacitor(2.419e-12) ** line.inductor(43.636e-9) ** \ line.shunt_capacitor(25.406e-12) ** line.shunt_inductor(4.154e-9) # + # scikit-rf: the filter with the Circuit builder freq = passband_designer.frequency line = rf.media.DefinedGammaZ0(frequency=freq) C1 = line.capacitor(25.406e-12, name='C1') C2 = line.capacitor(2.419e-12, name='C2') C3 = line.capacitor(25.406e-12, name='C3') L1 = line.inductor(4.154e-9, name='L1') L2 = line.inductor(43.636e-9, name='L2') L3 = line.inductor(4.154e-9, name='L3') port1 = rf.Circuit.Port(frequency=freq, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=freq, name='port2', z0=50) ground1 = rf.Circuit.Ground(frequency=freq, name='ground1', z0=50) ground2 = rf.Circuit.Ground(frequency=freq, name='ground2', z0=50) ground3 = rf.Circuit.Ground(frequency=freq, name='ground3', z0=50) ground4 = rf.Circuit.Ground(frequency=freq, name='ground4', z0=50) connections = [ [(port1, 0), (C1, 0), (L1, 0), (C2, 0)], [(C2, 1), (L2, 0)], [(L2, 1), (C3, 0), (L3, 0), (port2, 0)], # grounding must be done on ground ntw having different names [(C1, 1), (ground1, 0)], [(C3, 1), (ground2, 0)], [(L1, 1), (ground3, 0)], [(L3, 1), (ground4, 0)], ] circuit = rf.Circuit(connections) passband_circuit = circuit.network passband_circuit.name = 'Pass-band circuit' # - passband_circuit.plot_s_db(m=0, n=0, lw=2) passband_circuit.plot_s_db(m=1, n=0, lw=2) passband_designer.plot_s_db(m=0, n=0, lw=2, ls='-.') passband_designer.plot_s_db(m=1, n=0, lw=2, ls='-.') circuit.plot_graph(network_labels=True, port_labels=True, edge_labels=True)
doc/source/examples/circuit/Lumped Element Circuits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using RSView # # This notebook walks through how to use the main scripts for the ``RSView`` project by Jilliane, Kate, and Katie. # # This program is writen to mainly be run on the command line, so most scripts are run as if from the command line (using '!'). # ## Download sequencing data # # Example usage of `seq_download.py` to download respiratory syncytial virus (RSV) G protein sequences and metadata from GenBank. # # Note: This notebook requires that ``rsview`` has been installed. Additionally, if one would like to run particular scripts, ``rsview`` can be imported and specific scripts can be called like any other python module. The code below is for running our package as you would on the command line, which relies on `argparse` arguments for user input. # To get a sense of what `seq_download.py` does, let's first look at the help. # ! seq_download.py --help # There are lots of optional arguments which are mostly required by `Bio.Entrez` for GenBank downloads. # ### Downloading a small set of data # # Next, let's go through downloading data using `seq_download.py`. All optional arguments will remain with their default values except those we need to change to select a random start location and set the download size to 500 sequences. # # The mapping and plotting examples will use the full dataset, but since downloading takes a while, this example just downloads a small portion. # # This example is still for downloading RSV G sequences. Theoretically this workflow could be adapted to other viruses, but the subtype and genotype calling that `seq_download.py` and `genotype.py` do are quite specific to RSV G. # + import random random.seed(1) start = random.choice(range(0, 15000)) print(start) print(start + 500) # - # ! seq_download.py --email '<EMAIL>' --query 'human respiratory syncytial virus G' --outdir './demodata' --firstseq 2201 --filesize 500 --maxseqs 2701 # + import pandas as pd demodownload = pd.read_csv('demodata/RSVG_gb_metadata_2201-2701.csv', index_col=0) demodownload[490:] # - # I am only including the last 10 sequences in the dataframe for ease of visualization. # # As seen by the table above, even after rather extensive parsing, the output of `seq_download.py` is still quite raw. # # Furthermore, while most (~90%) of sequences have subtypes, many of them are missing genotypes. Only about one third of the sequences in GenBank have genotypes already assigned. # # To clean the data further and assign additional genotypes, we must next run `genotype.py`. # ## Genotyping and cleaning data # # The `genotype.py` module can assign sequences a genotype if sequences of that genotype have already been annotated in the downloade dsequences. # # This module relies on `.fasta` formatting and `mafft` to assign genotypes. It goes through a progressive alignment and then assigns genotypes based on how similar the alignment output for a sequence lacking a genotype is to the alignment output for 'reference' genotype sequences. The 'reference' genotype sequences are determined based on the longest sequence already annotated for a given genotype. # # Due to the reliance on already genotyped sequences, `genotype.py` requires that there are at least some genotyped and non-genotyped full-length RSV G sequences. # # This is a very rough way to assign genotypes and is not without error. To control somewhat for error, we do require that new genotypes are only assigned to sequences with known subtypes and that the genotype assigned matches the provided subtype. There could be subtype errors in the database, but those should also be rare. # # Furthermore, this method of genotype assignment relies quite heavily on previous genotype sampling and calling, so novel genotypes will not be found. Additionally, I have not tested this, but it is probable that genotypes that are more prevalent in the previously genotyped sequences are assigned more frequently. # # Despite these (and other) caveats, the `genotype.py` script is able to genotype about another third of sequences. An example for the demo sequences is shown below along with the help info. # ! genotype.py --help # ! genotype.py --inprefix './demodata/RSVG_gb_metadata' --seqsdir './demodata/seqs' --outdir './demodata/' # + demogenotyped = pd.read_csv('demodata/RSVG_all_genotyped.csv', index_col=0) demogenotyped[490:] # - # As can be seen above, this genotyping scheme, assigned the BA and ON1 genotypes to those two sequences from Argentina. Despite not being very rigorous or scientifically validated, this program seems to work adequately for our purposes. # # It is important to note that due to possible inaccuracies in genotype calling and a rather limited number of sequences genotyped, it is difficult to draw many genotype-level conclusions about these GenBank sequences. # # Importantly, the simple fact that sequences tend to be deposited into GenBank in bursts when big sequencing experiments are carried out rather than regularly as a part of surveillance limits the conclusions we can draw from this data. While this project would benefit greatly from additional data, this project using the currently available GenBank data is a good starting point for looking at the global distribution of RSV sequences. Below, we go through how to visualize this data using our package, `rsview`. # # map_rsv.py # map_rsv.py can be used to plot the global distribution of all available RSV sequences according to the collection location of each sequence. map_rsv.py looks in rsview/data/ for the output files of seq_download.py. If these files are not present, an error message is thrown notifying the user to run seq_download.py before map_rsv.py. # # map_rsv.py will aggregate all downloaded sequences by their collection date and location (at country resolution) and display these numbers on a global map. The user must specify whether map_rsv.py should display viral sequences grouped by genotype or subtype. Other, optional arguments, allow the user to specify a specific temporal range to display and whether or not genotypes should be further grouped into clades. # #### For help determining specifying arguments, use "-h" # ! map_rsv.py -h # #### To map available RSV sequences grouped by subtype (A vs. B) from the data in the directory `data` # ! map_rsv.py subtype ../rsview/data # #### To map available RSV sequences by genotype group. Note: this is equivalent to specifying the optional argument `--genotype-level collapse` # ! map_rsv.py genotype ../rsview/data # #### To map available RSV sequences by genotype # ! map_rsv.py genotype ../rsview/data --genotype-level all # #### To map available RSV sequences that were collected during a specific time range. Note: the default range is [1990,2018] # # ! map_rsv.py subtype ../rsview/data --years [1960,2000] # #### To map available RSV sequences with no filter on the collection time # # ! map_rsv.py genotype ../rsview/data --years all # # plot_rsv.py # plot_rsv.py can be used to generate graphs for the viewing and analysis of the dataset on child death rates from acute respiratory infection, which we are using as a proxy for RSV disease severity. # #### For help determining which arguments to use to plot the appropriate health data, use "-h" # ! plot_rsv.py -h # #### To plot health data for reach country, averaged from 2000-2016: # ! plot_rsv.py all rufive9 ../rsview/data # #### Use the optional argument "--highlight_country" to emphasize the country of interest in a full plot # ! plot_rsv.py all rufive9 ../rsview/data --highlight_country='Kenya' # #### To plot health data by year for a single country, use level='country' and the optional "--country" argument # ! plot_rsv.py country rufive9 ../rsview/data --country='Kenya' # #### If you don't specify a country, it will plot global results over time # ! plot_rsv.py country rufive9 ../rsview/data # # # # # plot_correlation.py # plot_correlation.py can be used to calculate the prevalence of different RSV subtypes in the RSV sequence dataset and plot that against the dataset on child death rates from acute respiratory infection. This can be used to check for correlation between subtype prevalence or switching between subtypes and the severity of the disease. # #### Use plot_correlation.py with level='all' to generate a scatterplot of a health metric vs the ratio of subtype A to subtype B in each country # ! plot_correlation.py all rufive9 ../rsview/data # #### To break the data for each country into per-year subtype data (colored by year), use level='year' # ! plot_correlation.py year rufive9 ../rsview/data
examples/rsview_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 11 # # * Computational Physics (PHYS 202) # * Cal Poly, Spring 2015 # * <NAME> # # # ## In class # * Go over midterm # - Discuss solutions # - Grade distribution # # * Coding tips and help # - Difference between `return` and `print`. # - Transforming data between different container types. # - How to count things. # - Comments on performance. # # * Fetch today's material: # # nbgrader fetch phys202-2015 day11 # nbgrader fetch phys202-2015 assignment08 # ## Coding tips and help # ### Different between `return` and `print` # A function that has no `return` statement will always return `None`: def f(x): print(x**2) a = f(2) print(a) # If you want a function to do anything useful, you have to return something: def g(x): return x**2 b = g(2) print(b) # ### Transforming container types # String can be turned into lists and tuples: import random alpha = 'abcdefghijklmnopqrstuvwxyz' l = [random.choice(alpha) for i in range(10)] l s = ''.join(l) s for c in s: print(c) [c.upper() for c in s] list(s) tuple(s) digits = str(1001023039) list(digits) [int(d) for d in digits] # ### How to count things def random_string(n): return ''.join([random.choice(alpha) for i in range(n)]) random_string(100) def count0(seq): counts = {} for s in seq: counts[s] = seq.count(s) return counts rs = random_string(10) count0(rs), rs # %timeit count0(random_string(10000)) def count1(seq): counts = {} for s in seq: if s in counts: counts[s] += 1 else: counts[s] = 1 return counts rs = random_string(10) count1(rs), rs # %timeit count1(random_string(10000)) # + from collections import defaultdict def count2(seq): counts = defaultdict(int) for s in seq: counts[s] += 1 return counts # - rs = random_string(10) count2(rs), rs # %timeit count2(random_string(10000)) from collections import Counter def count3(seq): return dict(Counter(seq)) rs = random_string(10) count3(rs), rs # %timeit count3(random_string(10000)) # ## Out of class # * Complete `assignment08` by Wednesday (Day 12).
days/day11/Day11.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Загрузка зависимостей import numpy import pandas import matplotlib.pyplot from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler # Загрузка и анализ набора данных raw_dataset = pandas.read_csv('machine.data.csv', header=None) # Убедиться в правильности пути к файлу! raw_dataset.head(10) # Вывод первых 10 строк # Размер набора данных print(raw_dataset.shape) # Создаем набор данных, в котором будут храниться обработанные данные dataset = pandas.DataFrame() # Столбец №0 data = raw_dataset[0] new_data = pandas.get_dummies(data) print(new_data) dataset = new_data print(dataset) # Столбец №2 data = raw_dataset[2] print(data) matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() matplotlib.pyplot.hist(numpy.log(data), bins = 50) matplotlib.pyplot.show() data = numpy.log(data) print(numpy.min(data)) print(numpy.max(data)) scaler = MinMaxScaler() data = numpy.array(data).reshape(-1,1) data = scaler.fit_transform(data) data = data.flatten() print(numpy.min(data)) print(numpy.max(data)) matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() dataset['2'] = data print(dataset) # + # Обработка данных в столбце №3 (MMIN: minimum main memory in kilobytes (integer)) # Загружаем данные data = raw_dataset[3] # Анализируем распределение, используя гистограмму. Параметр bins отвечает за число столбцов в гистрограмме. matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + # Наблюдаем проблему №4 - выброс в районе 32000. Применяем отсечение с разрешенным интервалом от 0 до 16000. data = numpy.clip(data, 0, 16000) # Результат matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + # Наблюдаем проблему №3 - очень неравномерное распределение. Попробуем применить к данным логарифм и извлечение квадратного корня. matplotlib.pyplot.hist(numpy.log(data), bins = 50) matplotlib.pyplot.show() matplotlib.pyplot.hist(data ** 0.5, bins = 50) matplotlib.pyplot.show() # - # Логарифм даёт более равномерно распределенные данные, используем его data = numpy.log(data) # Теперь данные имеют следующую область значений print(numpy.min(data)) print(numpy.max(data)) # Приводим значения к интервалу (0, 1), считая, что они ближе к равномерному распределению scaler = MinMaxScaler() data = numpy.array(data).reshape(-1,1) data = scaler.fit_transform(data) # Результат matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # Можем взглянуть на сами данные matplotlib.pyplot.plot(data) matplotlib.pyplot.show() # Проверяем и убеждаемся, что в процессе трансформации данные получили "лишнее" измерение print(data.ndim) # Конвертируем в одномерный массив data = data.flatten() # Сохраняем в итоговом наборе данных dataset['MMIN'] = data # Столбец №4 data = raw_dataset[4] print(data) matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + # Наблюдаем проблему №4 - выброс в районе 32000. Применяем отсечение с разрешенным интервалом от 0 до 16000. data = numpy.clip(data, 0, 32000) # Результат matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + matplotlib.pyplot.hist(numpy.log(data), bins = 50) matplotlib.pyplot.show() matplotlib.pyplot.hist(data ** 0.5, bins = 50) matplotlib.pyplot.show() # - data = data ** 0.5 sscaler = MinMaxScaler() data = numpy.array(data).reshape(-1,1) data = scaler.fit_transform(data) data = data.flatten() matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() dataset['4'] = data print(dataset) # Столбец №5 data = raw_dataset[5] print(data) matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + data = numpy.clip(data, 0, 170) # Результат matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # - matplotlib.pyplot.hist(data ** 0.5, bins = 50) matplotlib.pyplot.show() data = data ** 0.5 sscaler = MinMaxScaler() data = numpy.array(data).reshape(-1,1) data = scaler.fit_transform(data) data = data.flatten() matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() dataset['5'] = data print(dataset) # Столбец №6 data = raw_dataset[6] print(data) matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + data = numpy.clip(data, 1, 32) # Результат matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # - data = numpy.log(data) sscaler = MinMaxScaler() data = numpy.array(data).reshape(-1,1) data = scaler.fit_transform(data) data = data.flatten() matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() dataset['6'] = data print(dataset) # + # Обработка данных в столбце №7 (CHMAX: maximum channels in units (integer)) # Загружаем данные data = raw_dataset[7] # Анализируем распределение, используя гистограмму. Параметр bins отвечает за число столбцов в гистрограмме. matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + # Наблюдаем проблему №4 - выбросы значений в интервале (100, 175). Применяем отсечение с разрешенным интервалом от 0 до 70. data = numpy.clip(data, 0, 70) # Результат matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + # Наблюдаем проблему №3 - очень неравномерное распределение. Применять логарифм нельзя, т.к. среди значений есть нули. # Применим извлечение квадратного корня. matplotlib.pyplot.hist(data ** 0.5, bins = 50) matplotlib.pyplot.show() # - data = data ** 0.5 # Теперь данные имеют следующую область значений print(numpy.min(data)) print(numpy.max(data)) # Приводим значения к интервалу (0, 1), считая, что они ближе к равномерному распределению scaler = MinMaxScaler() data = numpy.array(data).reshape(-1,1) data = scaler.fit_transform(data) # Результат matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # Можем взглянуть на сами данные matplotlib.pyplot.plot(data) matplotlib.pyplot.show() # Проверяем и убеждаемся, что в процессе трансформации данные получили "лишнее" измерение print(data.ndim) # Конвертируем в одномерный массив data = data.flatten() # Сохраняем в итоговом наборе данных dataset['CHMAX'] = data print(dataset) # Столбец №8 data = raw_dataset[8] print(data) matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + data = numpy.clip(data, 0, 600) # Результат matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() # + matplotlib.pyplot.hist(numpy.log(data), bins = 50) matplotlib.pyplot.show() matplotlib.pyplot.hist(data ** 0.5, bins = 50) matplotlib.pyplot.show() # - data = numpy.log(data) sscaler = MinMaxScaler() data = numpy.array(data).reshape(-1,1) data = scaler.fit_transform(data) data = data.flatten() matplotlib.pyplot.hist(data, bins = 50) matplotlib.pyplot.show() dataset['8'] = data print(dataset) dataset.to_csv('prepared_data.csv')
data_preparation_all.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Homework 2: Control Structures # Please complete this homework assignment in code cells in the iPython notebook. Include comments in your code when necessary. Please rename the notebook as SIS ID_HW02.ipynb (your student ID number) and save the notebook once you have executed it as a PDF (note, that when saving as PDF you don't want to use the option with latex because it crashes, but rather the one to save it directly as a PDF). # # **The homework should be submitted on bCourses under the Assignments tab (both the .ipynb and .pdf files). Please label it by your student ID number (SIS ID)** # ## Problem 1: Binomial Coefficients # # [Adapted from Newman, Exercise 2.11] The binomial coefficient $n \choose k$ is an integer equal to # # $$ {n \choose k} = \frac{n!}{k!(n-k)!} = \frac{n \times (n-1) \times (n-2) \times \cdots \times (n-k + 1)}{1 \times 2 \times \cdots \times k} $$ # # when $k \geq 1$, or ${n \choose 0} = 1$ when $k=0$. (The special case $k=0$ can be included in the general definition by using the conventional definition $0! \equiv 1$.) # # 1. Write a function `factorial(n)` that takes an integer $n$ and returns $n!$ as an integer. It should yield $1$ when $n=0$. You may assume that the argument will also be an integer greater than or equal to 0. # # 1. Using the form of the binomial coefficient given above, write a function `binomial(n,k)` that calculates the binomial coefficient for given $n$ and $k$. Make sure your function returns the answer in the form of an integer (not a float) and gives the correct value of 1 for the case where $k=0$. (Hint: Use your `factorial` function from Part 1.) # # 1. Using your `binomial` function, write a function `pascals_triangle(N)` to print out the first $N$ lines of "Pascal's triangle" (starting with the $0$th line). The $n$th line of Pascal's triangle contains $n+1$ numbers, which are the coefficients $n \choose 0$, $n \choose 1$, and so on up to $n \choose n$. Thus the first few lines are # 1 # 1 1 # 1 2 1 # 1 3 3 1 # 1 4 6 4 1 # This would be the result of `pascals_triangle(5)`. Print the first 10 rows of Pascal's triangle. # # 1. The probability that an ubiased coin, tossed $n$ times, will come up heads $k$ times is ${n \choose k} / 2^n$. (Or instead of coins, perhaps you'd prefer to think of spins measured in a [Stern-Gerlach experiment](https://en.wikipedia.org/wiki/Stern%E2%80%93Gerlach_experiment).) # - Write a function `heads_exactly(n,k)` to calculate the probability that a coin tossed $n$ times comes up heads exactly $k$ times. # - Write a function `heads_atleast(n,k)` to calculate the probability that a coin tossed $n$ times comes up heads $k$ or more times. # - Print the probabilities (to four decimal places) that a coin tossed 100 times comes up heads exactly 70 times, and at least 70 times. You should print corresponding statements with the numbers so it is clear what they each mean. # # 1. Along with the printed statements from Part 4, have your code generate and display two labelled plots for `heads_exactly(n,k)` and `heads_atleast(n,k)` with $n=100$. You should have values of $k$ on the $x$-axis, and probabilities on the $y$-axis. (Note that $k$ only takes integer values from 0 to $n$, inclusive. Your plots can be connected curves or have discrete markers for each point; either is fine.) # # #### Output # # To summarize, your program should output the following things: # # 1. The first 10 rows of Pascal's triangle # 1. The probabilities (to three decimal places) that a coin tossed 100 times comes up heads exactly 70 times, and at least 70 times, with corresponding statements so it is clear what each number signifies. # 1. Two labeled plots for `heads_exactly(n,k)` and `heads_atleast(n,k)` with $n=100$, representing probability distributions for 100 coin flips. # # #### Reminder # # Remember to write informative doc strings, comment your code, and use descriptive function and variable names so others (and future you) can understand what you're doing! # # + '''The numpy library has a lot of useful functions and we always use matplotlib for plotting, so it's generally a good idea to import them at the beginning.''' import numpy as np import matplotlib.pyplot as plt def factorial(n): """Returns the factorial of n""" return_value = 1 #Try using a for loop to update the return_value and calculate n! return return_value # - def binomial(n, k): """Returns the binomial coefficient n choose k""" #Use a conditional statement to return 1 in the case k = 0 return #Use factorial(n) to calculate the binomial coefficient def pascals_triangle(N): """Prints out N rows of pascal's triangle""" #A "double for loop" has been set up below; #Python goes through the entire inner loop during each pass through the outer loop for row in range(0, N + 1): #This is the outer loop; each pass through the loop corresponds to one row of the triangle for k in range(0, row + 1): #This is is the inner loop; each pass through the loop corresponds to a number on the row #Code here is part of each inner loop iteration (i.e. print a binomial coefficient) #Code here is part of the outer loop #This function doesn't need to return anything # + def heads_exactly(n,k): """Returns the probability of getting k heads if you flip a coin n times""" return #Use binomial(n,k) to calculate the probability # - def heads_atleast(n,k): """Returns the probability of getting at least k heads if you flip a coin n times""" total_prob = 0 #Use a for loop and heads_exactly(n,k) to update total_prob return total_prob # + #Now use your defined functions to produce the desired outputs #For the plots, the np.arange() function is useful for creating a numpy array of integers k_values = np.arange(1,101) #integers from 1 to 100 (lower bound is inclusive; upper bound is exclusive) # - # ## Problem 2: Semi-Empirical Mass Formula # # [Adapted from Newman, Exercise 2.10] In nuclear physics, the semi-empirical mass formula is a formula for calculating the approximte nuclear binding energy $B$ of an atomic nucleus with atomic number $Z$ and mass number $A$: # # $$ B = a_V A - a_S A^{2/3} - a_C \frac{Z^2}{A^{1/3}} - a_S \frac{(A-2Z)^2}{A} + \delta\frac{a_P}{A^{1/2}}, $$ # # where, in units of millions of electron volts (MeV), the constants are $a_V = 14.64$, $a_S = 14.08$, $a_C = 0.64$, $a_S = 21.07$, $a_P=11.54$, and # # $$ \delta = \begin{cases} # 0 & \text{if } A \text{ is odd,}\\ # +1 & \text{if } A \text{ and } Z \text{ are both even,} \\ # -1 & \text{if } A \text{ is even and } Z \text{ is odd.} # \end{cases} $$ # The values above are taken from <NAME> <i>et al.</i>, NUCL SCI TECH <b>31</b>, 9 (2020); https://doi.org/10.1007/s41365-019-0718-8 # # # 1. Write a function `binding_energy(A, Z)` that takes as its input the values of $A$ and $Z$, and returns the binding energy for the corresponding atom. Check your function by computing the binding energy of an atom with $A = 58$ and $Z = 28$. (Hint: The correct answer is around 490 MeV.) # # 1. Write a function `binding_energy_per_nucleon(A, Z)` which returns not the total binding energy $B$, but the binding energy per nucleon, which is $B/A$. # # 1. Write a function `max_binding_energy_per_nucleon(Z)` which takes as input just a single value of the atomic number $Z$ and then goes through all values of $A$ from $A = Z$ to $A = 3Z$, to find the one that has the largest binding energy per nucleon. This is the most stable nucleus with the given atomic number. Have your function return the value of $A$ for this most stable nucleus and the value of the binding energy per nucleon. # # 1. Finally, use the functions you've written to write a program which runs through all values of $Z$ from 1 to 100 and prints out the most stable value of $A$ for each one. At what value of $Z$ does the maxium binding energy per nucleon occur? (The true answer, in real life, is $Z = 28$, which is nickel. You should find that the semi-empirical mass formula gets the answer roughly right, but not exactly.) # # #### Output # # Your final output should look like # # Z = 1 : most stable A is 2 # Z = 2 : most stable A is 4 # . # . # . # Z = 10 : most stable A is 20 # Z = 11 : most stable A is 23 # . # . # . # Z = 100 : most stable A is 210 # The most stable Z is ____ # with binding energy per nucleon ____ # # With the ...'s and ____'s replaced with your results. The binding energy per nucleon in the last line should have three decimal places. # # For maximum readability, you should include the extra whitespace around the $Z =$ numbers so everything lines up, as shown. (To remember the `print` formatting syntax to do this, see Table 1.1 in the Ayars text.) # # #### Reminder # # Remember to write informative doc strings, comment your code, and use descriptive function and variable names so others (and future you) can understand what you're doing! # # + import numpy as np def binding_energy(A, Z): """Returns the nuclear binding energy in MeV of an atomic nucleus with atomic number Z and mass number A""" aV = 14.64 aS = 14.08 aC = 0.64 aS = 21.07 aP = 11.54 #Use conditional statements (if, elif, else) to declare the variable delta with the appropriate value return #Use the above formula for B, the binding energy #Now check your function by calculating the requested binding energy # - def binding_energy_per_nucleon(A, Z): """Returns the nuclear binding energy per nucleon in MeV of an atomic nucleus with atomic number Z and mass number A""" return #Use binding_energy(A, Z) and the number of nucleons def max_binding_energy_per_nucleon(Z): """For atomic nucleus with atomic number Z, returns that mass number A that yields that maximum binding energy per nucleon, as well as that resultant maximum binding energy per nucleon in MeV""" #We can make our default return value A = Z and the corresponding binding energy max_A = Z max_binding_energy_per_nucleon = binding_energy_per_nucleon(Z, Z) #Use a for loop to go from A = Z to A = 3*Z, and update the return variables if a new maximum is found #A conditional statement within the loop is useful for comparing max_binding_energy_per_nucleon to a potential new maximum return max_A, max_binding_energy_per_nucleon # + #Now use a for loop and the function max_binding_energy_per_nucleon(Z) to print the final output # - # ## Problem 3: Particle in a Box # # [Adapted from Ayars, Problem 3-1] The energy levels for a quantum particle in a three-dimensional rectangular box of dimensions $\{L_1, L_2, \text{ and } L_3\}$ are given by # # $$ E_{n_1, n_2, n_3} = \frac{\hbar^2 \pi^2}{2m} \left[ \frac{n_1^2}{L_1^2} + \frac{n_2^2}{L_2^2} + \frac{n_3^2}{L_3^2} \right] $$ # # where the $n$'s are integers greater than or equal to one. Your goal is to write a program that will calculate, and list in order of increasing energy, the values of the $n$'s for the 10 lowest *different* energy levels, given a box for which $L_2 = 2L_1$ and $L_3 = 4L_1$. # # Your program should include two user-defined functions that you may find helpful in accomplishing your goal: # # 1. A function `energy(n1, n2, n3)` that takes integer values $n_1$, $n_2$, and $n_3$, and computes the corresponding energy level in units of $\hbar^2 \pi^2/2 m L_1^2$. # # 1. A function `lowest_unique_K(K, List)` which takes a positive integer $K$ and a list of real numbers `List`, and returns an ordered (ascending) list of the lowest $K$ unique numbers in the list `List`. For instance, `lowest_unique_K(3, [-0.5, 3, 3, 2, 6, 7, 7])` would return `[-0.5, 2, 3]`. The function should not modify the original list `List`. # - As with most programming puzzles, there are several ways to write this function. Depending on how you do it, you may or may not find it helpful to Google how to "sort" lists, or how to "del" or "pop" items out of lists. # # You may also wish to make other user-defined functions depending on how you go about solving the problem. In fact, if you find some clever way to solve the problem that doesn't use `lowest_unique_K`, that is fine too! (You still need to write `lowest_unique_K`, though.) But whatever you do, be sure to comment your code clearly! # # #### Output # # Your final output should look like this (though with different numbers, and not necessarily the same number of lines): # # energy, n1, n2, n3 # (0.4375, 1, 1, 1) # (0.625, 1, 2, 1) # (0.8125, 2, 1, 1) # (0.9375, 1, 3, 1) # (1.0, 2, 2, 1) # (1.1875, 1, 1, 2) # (1.3125, 2, 3, 1) # (1.375, 1, 2, 2) # (1.375, 1, 4, 1) # (1.4375, 3, 1, 1) # (1.5625, 2, 1, 2) # # Notice how there are only 10 unique energies listed, but more than 10 lines. Each line could also have brackets instead of parentheses if you prefer, like this: `[0.4375, 1, 1, 1]`. # # #### Reminder # # Remember to write informative doc strings, comment your code, and use descriptive function and variable names so others (and future you) can understand what you're doing! # # #### Just for fun # # If you'd like, write a function `print_table(list_of_lists)` that takes a list of lists (or a list of tuples) and prints them in a nicely aligned table. Feel free to Google to get ideas on how to do this. Try to get your function to produce something like # # energy n1 n2 n3 # 0.4375 1 1 1 # 0.625 1 2 1 # 0.8125 2 1 1 # 0.9375 1 3 1 # 1.0 2 2 1 # 1.1875 1 1 2 # 1.3125 2 3 1 # 1.375 1 2 2 # 1.375 1 4 1 # 1.4375 3 1 1 # 1.5625 2 1 2 def energy(n1, n2, n3): """Returns the n-dependent coefficient of the particle-in-a-3D-box energy level for quantum numbers n1, n2, and n3. The box's lengths along dimensions 1, 2, and 3 go as L, 2*L, 4*L""" return #Use the formula given above # + import copy def lowest_unique_K(K, List): """Takes a positive integer K and a list of real numbers List, and returns an ordered (ascending) list of the lowest K unique numbers in the list List""" lowest_unique_K_list = [0] * K #This is a list of zeros (K of them) #Or you may want to start with lowest_unique_K_list = [], an empty list copied_list = copy.copy(List) #This gives us a copy of List; any changes you make to copied_list will not affect List #There's a lot of different ways to approach writing this function #Try breaking it up into smaller steps and figure out what you'd like to do before writing any code #If you have trouble turning logical steps into actual code, feel free to ask for help return lowest_unique_K_list # + #Now create a list of energies for different values of n1, n2, n3 (taking each from 1 to 10 should be sufficient) #Remember to keep track of the corresponding n1, n2, n3 values for each energy, since we need to print them #Then use lowest_unique_K(10, List) on this list of energies to find the first 10 #Finally, print these 10 energies and their corresponding n1, n2, n3 values #You may find a dictionary helpful for keeping track of the association between energy values and n values
Week03/Homework02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="66sm4IOTRcfL" # # **Setup Model, Data Loader, Optimizer and Loss** # + colab={"base_uri": "https://localhost:8080/", "height": 81, "referenced_widgets": ["f51a82a7f1594a46b37eba1541d7bd74", "14e0c07ccea246259eb6b934c88f5a73", "df3ef6fd6fbd48c08827a297d75dedbd", "044427a950024640b8c5e8f5182e3ebe", "6860ceffc4c54063b48485637cffe3d5", "deae8cb9a49a4ff3afc5dbe3bf5fba97", "99a604dc5af14b658da25e0fa2146793", "70a50a3ba01b4774ba243cbd37a23071"]} id="8sSrGn3Co72s" outputId="91c66831-966d-41d4-8520-e4cfcf3cc10b" from models import networks as n import torch.nn.functional as F import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as opt from util import losses as loss from util import loaders as load from util import training as train from util import helpers as helper from models import networks as n ############################################################################ # Network ############################################################################ res_net = n.CustomResnet(51) res_net.cuda() ############################################################################ # Loss ############################################################################ mse_crit = loss.LogCoshLoss() point_crit = loss.PointLoss().cuda() ############################################################################ # Optimizer ############################################################################ lr = 1e-3 lr_array = np.array([lr/100,lr/10,lr]) lr_groups = res_net.lr_groups() opt_params = [{"params":lr_groups[0].parameters(),"lr":lr_array[0]}, {"params":lr_groups[1].parameters(),"lr":lr_array[1]}, {"params":lr_groups[2].parameters(),"lr":lr_array[2]}] adam_opt = opt.Adam(opt_params,betas=(0.9, 0.999), weight_decay= .0001) ############################################################################ # Data Generators ############################################################################ transform = load.NormDenorm([.5, .5, .5], [.5, .5, .5]) train_data= load.LandMarkGenerator( transform, output_res=128, size = 10000, rand_rot = .7) test_data = load.LandMarkGenerator( transform, output_res=128, size = 300, rand_rot = .7) batch_size = 8 mtrain = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=8, shuffle=True, drop_last=True) mtest = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=8, shuffle=False, drop_last=True) ############################################################################ # Train Dictionary ############################################################################ train_dict = { "train_loader":mtrain, "test_loader":mtest, "opt":adam_opt, "net":res_net, "mse_crit":mse_crit, "point_crit":point_crit } output_name = "test_" # + [markdown] id="nsG3PAQ4RY_Q" # # **View Data Sample** # + colab={"base_uri": "https://localhost:8080/", "height": 545} id="-44-AanuAbvc" outputId="ae6f2adb-3145-4096-e075-a3d82a563bde" % matplotlib inline helper.view_dataset(train_data,transform) # + [markdown] id="j_gCsptRRzTb" # # **Train Round One** # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="nBeb9R1MRx31" outputId="125e1f81-7da8-44cd-a594-944d73c1a3a9" lr = 1e-3 lr_array = np.array([lr/5,lr/5,lr]) for i in range(1,7): print (f'Run #{i}') epoch_count = i lr_list = train.set_lr_sched(epoch_count,mtrain.__len__()//batch_size,2.0) helper.lr_preview(lr_list) freeze = (i-1)*2 if i == 4: freeze = 100 train.one_run(train_dict,freeze,lr_list,lr_array) # + colab={"base_uri": "https://localhost:8080/", "height": 630} id="N8kTcgX3R8hm" outputId="2b6535cd-a886-4c31-d318-3510475e0e90" helper.view_predictions(res_net,train_data) helper.save_jit('output_run',1,res_net) # + [markdown] id="mM565CxgS9MQ" # # **Train Round Two** # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="SWqWcFPaSinz" outputId="fb3bbfe0-0c1f-4c36-adb1-393e2c87efbf" lr = 1e-4 lr_array = np.array([lr/5,lr/3,lr]) for i in range(1,7): print (f'Run #{i}') epoch_count = i lr_list = train.set_lr_sched(epoch_count,mtrain.__len__()//batch_size,2.0) helper.lr_preview(lr_list) freeze = 100 train.one_run(train_dict,freeze,lr_list,lr_array) # + colab={"base_uri": "https://localhost:8080/", "height": 630} id="CLctQAxwTBax" outputId="8e3f4deb-c284-419a-d647-904a96d1001f" helper.view_predictions(res_net,train_data) helper.save_jit('output_run',2,res_net) # + [markdown] id="rqOXTZBNTBnq" # # **Train Round Three** # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="nxfdsc6KTL1B" outputId="c21b72bc-d355-46bb-c145-8e98fb4d9e5a" lr = 1e-5 lr_array = np.array([lr/5,lr/3,lr]) for i in range(1,7): print (f'Run #{i}') epoch_count = i lr_list = train.set_lr_sched(epoch_count,mtrain.__len__()//batch_size,2.0) helper.lr_preview(lr_list) freeze = 100 train.one_run(train_dict,freeze,lr_list,lr_array) # + colab={"base_uri": "https://localhost:8080/", "height": 630} id="RnIjl5pCTL-s" outputId="5cfba4b7-d242-40ac-d04e-a8cc35b14e58" helper.view_predictions(res_net,train_data) helper.save_jit('output_run',3,res_net) # + id="IWhNaDouM3Z0"
train_network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: html # language: python # name: html # --- # ## Bayesian Optimization with Scikit-Optimize # # In this notebook, we will use **Bayesian Optimization** with Gaussian Processes using the open source Python package [Scikit-Optimize](https://scikit-optimize.github.io/stable/index.html). # # We will do the search manually, defining the objective function (hyperparameter response function ) and using the [Gaussian Process minimizer class from Scikit-optimize]( # https://scikit-optimize.github.io/stable/modules/generated/skopt.gp_minimize.html#skopt.gp_minimize). # # # ### Procedure # # To tune the hyper-parameters of our model we need to: # # - define a model # - decide which parameters to optimize # - define the objective function we want to minimize. # # ### NOTE # # Scikit-Optimize will always **minimize** the objective function, so if we want to maximize a function, for example the roc-auc, we need to **negate** the metric. Thus, instead of maximizing the roc-auc, we minimize the -roc-auc. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_breast_cancer from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import cross_val_score, train_test_split from skopt import gp_minimize # Bayesian Opt with GP # for the analysis from skopt.plots import ( plot_convergence, plot_evaluations, plot_objective, ) from skopt.space import Real, Integer, Categorical from skopt.utils import use_named_args # + # load dataset breast_cancer_X, breast_cancer_y = load_breast_cancer(return_X_y=True) X = pd.DataFrame(breast_cancer_X) y = pd.Series(breast_cancer_y).map({0:1, 1:0}) X.head() # + # the target: # percentage of benign (0) and malign tumors (1) y.value_counts() / len(y) # + # split dataset into a train and test set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=0) X_train.shape, X_test.shape # - # ## Define the Hyperparameter Space # # Scikit-optimize provides an utility function to create the range of values to examine for each hyperparameters. More details in [skopt.Space](https://scikit-optimize.github.io/stable/modules/generated/skopt.Space.html) # + # determine the hyperparameter space param_grid = [ Integer(10, 120, name="n_estimators"), Integer(1, 5, name="max_depth"), Real(0.0001, 0.1, prior='log-uniform', name='learning_rate'), Real(0.001, 0.999, prior='log-uniform', name="min_samples_split"), Categorical(['deviance', 'exponential'], name="loss"), ] # Scikit-optimize parameter grid is a list type(param_grid) # - # ## Define the model # + # set up the gradient boosting classifier gbm = GradientBoostingClassifier(random_state=0) # - # ## Define the objective function # # This is the hyperparameter response space, the function we want to minimize. # + # We design a function to maximize the accuracy, of a GBM, # with cross-validation # the decorator allows our objective function to receive the parameters as # keyword arguments. This is a requirement for scikit-optimize. @use_named_args(param_grid) def objective(**params): # model with new parameters gbm.set_params(**params) # optimization function (hyperparam response function) value = np.mean( cross_val_score( gbm, X_train, y_train, cv=3, n_jobs=-4, scoring='accuracy') ) # negate because we need to minimize return -value # - # ## Bayesian Optimization with Gaussian Process # + # gp_minimize performs by default GP Optimization # using a Marten Kernel gp_ = gp_minimize( objective, # the objective function to minimize param_grid, # the hyperparameter space n_initial_points=10, # the number of points to evaluate f(x) to start of acq_func='EI', # the acquisition function n_calls=30, # the number of subsequent evaluations of f(x) random_state=0, ) # + # function value at the minimum. # note that it is the negative of the accuracy "Best score=%.4f" % gp_.fun # - # The Bayesian optimization got a better accuracy in less iterations than the random search (previous notebook). print("""Best parameters: ========================= - n_estimators = %d - max-depth = %d - min_samples_split = %.3f - learning_rate = %.3f - loss = %s""" % (gp_.x[0], gp_.x[1], gp_.x[2], gp_.x[3], gp_.x[4])) # ## Evaluate convergence of the search # # [plot_convergence](https://scikit-optimize.github.io/stable/modules/generated/skopt.plots.plot_convergence.html#skopt.plots.plot_convergence) plot_convergence(gp_) # Compared to random search (previous notebook), smaller values for the negative of the accuracy are found faster by Bayesian Optimization. # # ## Partially dependency plots # # [plot_objective](https://scikit-optimize.github.io/stable/modules/generated/skopt.plots.plot_objective.html#skopt.plots.plot_objective) dim_names = ['n_estimators', 'max_depth', 'min_samples_split', 'learning_rate', 'loss'] plot_objective(result=gp_, plot_dims=dim_names) plt.show() # In the diagonal we see the objective function respect to each hyperparameter. Below the diagonal, we see the objective function in colour code, respect to 2 hyperparameters. # # ## Evaluation order # # [plot_evaluations](https://scikit-optimize.github.io/stable/modules/generated/skopt.plots.plot_evaluations.html) plot_evaluations(result=gp_, plot_dims=dim_names) plt.show() # Because the search is guided, the hyperparameters are not sampled at random, thus the colours in the 2-D diagrams tend to converge to one corner of the plot, and the histograms are skewed rather than uniform. # # ## The search class # + # all together in one dataframe, so we can investigate further tmp = pd.concat([ pd.DataFrame(gp_.x_iters), pd.Series(gp_.func_vals), ], axis=1) tmp.columns = dim_names + ['accuracy'] tmp.sort_values(by='accuracy', ascending=True, inplace=True) tmp.head() # -
Section-10-Scikit-Optimize/03-Bayesian-Optimization-GP-Manual.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Graph databases # # ## <NAME> # + [markdown] slideshow={"slide_type": "slide"} # # Graphs # # * ordered pair G = (V, E) comprising a set V of vertices (nodes, points) together with a set E of edges (arcs, lines), which are 2-element subsets of V # * Thanks Euler, thanks... # # <div align="center"><img src="https://github.com/jakubwilkowski/lunch_GraphDB/blob/master/img/bridges.png?raw=true"/></div> # # [source](https://en.wikipedia.org/wiki/Seven_Bridges_of_K%C3%B6nigsberg) # + [markdown] heading_collapsed=true slideshow={"slide_type": "slide"} # # Graph algorithms # # * Shortest path # * Minimum spanning tree # * Strongly connected components # * and many, many more # # + [markdown] slideshow={"slide_type": "slide"} # # Graph databases # # * There are no isolated pieces of information, but rich, connected domains all around us. # * a graph database stores connections as first class citizens # # + [markdown] slideshow={"slide_type": "slide"} # # Top players # # <div align="center"><img src="https://github.com/jakubwilkowski/lunch_GraphDB/blob/master/img/graphdb_popularity.png?raw=true"></div> # # [source](https://db-engines.com/en/ranking/graph+dbms) # + [markdown] slideshow={"slide_type": "slide"} # # Wait, but what's wrong with traditional relational databases? # + [markdown] slideshow={"slide_type": "fragment"} # * RDBMS are great for storing data that is consistent, well structured. Otherwise... # * RDBMS do not like recursive queries # * RDBMS are not that good for discovering patterns # * RDBMS have a lot of overhead with many-to-many relations # + [markdown] slideshow={"slide_type": "slide"} # # Relationships are important in life # # <div align="center"><iframe src="https://giphy.com/embed/R8RscTZV7Iy7m" width="480" height="362" frameBorder="0" class="giphy-embed" allowFullScreen></iframe></div> # # + [markdown] slideshow={"slide_type": "subslide"} # # RDBMS way # # <div align="center"><img src="https://github.com/jakubwilkowski/lunch_GraphDB/blob/master/img/m2m_rdbms.png?raw=true"></div> # # [source](https://neo4j.com/developer/graph-db-vs-rdbms/#_relational_databases) # + [markdown] slideshow={"slide_type": "subslide"} # # And graph database way # # <div align="center"><img src="https://github.com/jakubwilkowski/lunch_GraphDB/blob/master/img/m2m_graph.png?raw=true"></div> # # [source](https://neo4j.com/developer/graph-db-vs-rdbms/#_relational_databases) # + [markdown] slideshow={"slide_type": "slide"} # # Property graph data model # # <div align="center"><img src="https://github.com/jakubwilkowski/lunch_GraphDB/blob/master/img/property_graph.png?raw=true"></div> # + [markdown] slideshow={"slide_type": "slide"} # # Query language(s): # # * **Cypher** # * Gremlin # * SPARQL # # + [markdown] slideshow={"slide_type": "slide"} # # Typical use cases # # 1. Fraud detection # 1. Recommendation engines # 1. Social networks # 1. Master data management # 1. Network, IT Operations, Security # # + [markdown] slideshow={"slide_type": "slide"} # # Data modeling # # 1. Identify the nodes # 2. Assign labels to nodes (zero, one or more) # 3. Find relations between nodes # 4. Assign properties to nodes and relations (year_born, review, date_read, ...) # # # > Two _people_, **John** and **Sally**, \_are friends\_. Both John and Sally \_have read\_ the _book_, **Graph Databases**. # # # [source](https://neo4j.com/developer/guide-data-modeling/) # + [markdown] slideshow={"slide_type": "slide"} # <div align="center"><img style="height: 200px;" src="https://github.com/jakubwilkowski/lunch_GraphDB/blob/master/img/neo4j_logo.png?raw=true"></div> # # * ACID compliant # * written in Java # * v1 in 2010 (now 3.3.3) # * Community/Enterprise/Government # * own bolt protocol on port 7474 # # # + [markdown] slideshow={"slide_type": "slide"} # # Who uses Neo4j? # # * Walmart # * Airbnb # * Microsoft # * IBM # * ebay # * NASA # * Orange # * CISCO # * LinkedIn China # * TomTom # # # [source](https://neo4j.com/customers/) # + [markdown] slideshow={"slide_type": "slide"} # # Cypher # + [markdown] slideshow={"slide_type": "fragment"} # * declarative graph query language # * created by Neo Technology # * opened up in October 2015 # * ASCII like # + [markdown] slideshow={"slide_type": "fragment"} # ``` # ( ) <- node # -- <- relation # --> <- directed relation # -[:LABEL]- <- labeled relation # ()-->() <- two nodes and one direction # # ``` # + [markdown] slideshow={"slide_type": "subslide"} # <div align="center"><img src="https://github.com/jakubwilkowski/lunch_GraphDB/blob/master/img/cypher.png?raw=true"></div> # # [source](https://neo4j.com/developer/cypher-query-language/) # + [markdown] slideshow={"slide_type": "subslide"} # ## Query examples # `:play movie-graph` # + [markdown] slideshow={"slide_type": "slide"} # # Examples: Cypher vs SQL # # # + [markdown] slideshow={"slide_type": "fragment"} # <div align="center"><img src="https://github.com/jakubwilkowski/lunch_GraphDB/blob/master/img/schema.png?raw=true"></div> # + [markdown] slideshow={"slide_type": "subslide"} # ## Q1: List all <NAME> movies... # # ### Cypher: # ```SQL # MATCH (tom:Person {name: "<NAME>"})-[:ACTED_IN]->(tomHanksMovies) # RETURN tom,tomHanksMovies # ``` # # ### SQL # ```SQL # select p.name, m.title # from person p # inner join movieperson mp # on mp.person_id = p.id # inner join movie m # on m.id = mp.movie_id # where p.name = '<NAME>' and mp.relation_type = 'acted_in'; # # ``` # # + [markdown] slideshow={"slide_type": "subslide"} # ## Q2: <NAME>' co-actors... # ### Cypher: # ```SQL # MATCH (tom:Person {name:"<NAME>"})-[:ACTED_IN]->(m)<-[:ACTED_IN]-(coActors) # RETURN distinct coActors.name # ``` # # ### SQL # ```SQL # with tom_hanks_movies as ( # select m.id # from person p # inner join movieperson mp # on mp.person_id = p.id # inner join movie m # on m.id = mp.movie_id # where p.name = '<NAME>' and mp.relation_type = 'acted_in' # ) # # select distinct p.name # from person p # inner join movieperson mp # on mp.person_id = p.id # where mp.movie_id in (select id from tom_hanks_movies) # and mp.relation_type = 'acted_in' # and p.name != '<NAME>'; # # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ## Q3: How people are related to "Cloud Atlas"... # ### Cypher: # ```SQL # MATCH (people:Person)-[relatedTo]-(:Movie {title: "Cloud Atlas"}) # RETURN people.name, Type(relatedTo), relatedTo # ``` # # ### SQL # # ```SQL # select p.name, mp.relation_type # from person p # inner join movieperson mp # on mp.person_id = p.id # inner join movie m # on m.id = mp.movie_id # where m.title = 'Cloud Atlas'; # ``` # # ### Note: # I chose a naive schema, what if I decided to create separate table for each relation, ie. moviedirector, movieactor, movieproducer, etc? # -> SELECT ... FROM moviedirector UNION SELECT ... FROM movieactor UNION SELECT ... FROM movieproducer; # + [markdown] slideshow={"slide_type": "subslide"} # ## Q4: Movies and actors up to 4 "hops" away from <NAME> # ### Cypher: # ```SQL # MATCH (bacon:Person {name:"<NAME>"})-[*1..4]-(hollywood) # RETURN DISTINCT hollywood # ``` # # ### SQL # ```SQL # with recursive bacon(pid, mid, lvl) as ( # select distinct p.id, m.id, 1 # from person p # inner join movieperson mp # on mp.person_id = p.id # inner join movie m # on m.id = mp.movie_id # where p.name = '<NAME>' # UNION ALL # select distinct mp.person_id, mp.movie_id, b.lvl + 1 # from movieperson mp # inner join bacon b # on (b.mid = mp.movie_id OR mp.person_id = b.pid) # where (b.lvl < 4) # ) # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ## Q4: continued # # ```SQL # select distinct p.name # from bacon b # inner join person p # on p.id = b.pid # UNION ALL # select distinct m.title # from bacon b # inner join movie m # on m.id = b.mid; # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ## Q5: Bacon path, the shortest path of any relationships to Meg Ryan # ### Cypher: # ```SQL # MATCH (bacon:Person {name:"<NAME>"})-[*]-(meg:Person {name:"<NAME>"}) # RETURN bacon, meg; # ``` # # ### SQL # ```SQL # with recursive bacon(pid, mid, lvl) as ( # select distinct p.id, m.id, 1 # from person p # inner join movieperson mp # on mp.person_id = p.id # inner join movie m # on m.id = mp.movie_id # where p.name = '<NAME>' and mp.relation_type = 'acted_in' # UNION ALL # select distinct mp.person_id, mp.movie_id, b.lvl + 1 # from movieperson mp # inner join bacon b # on (b.mid = mp.movie_id OR mp.person_id = b.pid) # where mp.relation_type = 'acted_in' and (b.lvl < 10) # ) # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ## Q5: continued # # ```SQL # select min(b.lvl) # from bacon b # inner join person p # on p.id = b.pid # where p.name = '<NAME>'; # ``` # + [markdown] slideshow={"slide_type": "slide"} # # Neo4j & python # + [markdown] slideshow={"slide_type": "slide"} # # py2neo # # [docs](http://py2neo.org/2.0/) # + [markdown] slideshow={"slide_type": "subslide"} # ## Quick setup # # ``` # docker run --publish=7474:7474 \ # --publish=7687:7687 \ # --volume=$HOME/neo4j/data:/data \ # --env=NEO4J_AUTH=none \ # neo4j # ``` # + slideshow={"slide_type": "fragment"} from py2neo import Graph, Relationship, NodeSelector graph = Graph("http://localhost:7474/") # + [markdown] slideshow={"slide_type": "subslide"} # ## Some examples # + [markdown] slideshow={"slide_type": "subslide"} # ## Select <NAME> # # - selector = NodeSelector(graph) selected = selector.select("Person", name="<NAME>") list(selected) # + [markdown] slideshow={"slide_type": "subslide"} # ## Some 'raw' queries # - graph.data("MATCH (a:Person)-[]-(m:Movie) RETURN a.name, m.title LIMIT 10") # + [markdown] slideshow={"slide_type": "subslide"} # ## Creation & transactions! # + from py2neo import Node, Relationship from py2neo.database import Schema, Transaction tx = graph.begin() andrzej = Node("Person", name="<NAME>", born=1952) kiepscy = Node("Movie", title="Świat wg kiepskich") pitbull = Node("Movie", title="Pitbull. nowe porządki") listy = Node("Movie", title="Listy do M. 3") graph.create(andrzej) graph.create(kiepscy) graph.create(pitbull) graph.create(listy) graph.create(Relationship(andrzej, "ACTED_IN", kiepscy)) graph.create(Relationship(andrzej, "ACTED_IN", pitbull)) graph.create(Relationship(andrzej, "ACTED_IN", listy)) tx.commit() # + [markdown] slideshow={"slide_type": "subslide"} # ## More accurate dates # + from py2neo.ext.calendar import GregorianCalendar calendar = GregorianCalendar(graph) birth = Relationship(andrzej, "BORN", calendar.date(1952, 3, 15).day) graph.create(birth) # + [markdown] slideshow={"slide_type": "subslide"} # ## Indexing # + from py2neo import watch watch("httpstream") graph.schema.create_index('Person', 'name') # graph.schema.drop_index('Person', 'name') # - graph.schema.create_index('Person', 'name') # + [markdown] slideshow={"slide_type": "slide"} # # ipython-cypher magic # # https://github.com/versae/ipython-cypher # - # %reload_ext cypher # %cypher MATCH (a {name: '<NAME>'})-[:ACTED_IN]-(b) RETURN b.title as TomHanksMovies limit 6; # + [markdown] slideshow={"slide_type": "slide"} # # Thank you # # <div align="center"><iframe align="center" src="https://giphy.com/embed/l3V0sNZ0NGomeurCM" width="480" height="268" frameBorder="0" class="giphy-embed" allowFullScreen></iframe></div> # + [markdown] slideshow={"slide_type": "slide"} # # # Questions?
GraphDB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Load dependencies import pandas as pd import numpy as np from scipy.stats import gmean import matplotlib.pyplot as plt # %matplotlib inline import sys sys.path.insert(0, '../../../statistics_helper') from CI_helper import * # # Estimating the number of phages in subseafloor sediments # To estimate the total number of phages in subseafloor sediments, we rely on two recent papers which measured the ratio between the number of prokaryotes in subseafloor sediments and the number of phage like particles ([Engelhardt et al.](http://dx.doi.org/10.1038/ismej.2013.245) and [Middelboe et al.](https://doi.org/10.3354/ame01485). We extracted the data from figure 3 of Engelhardt et al. and from figure 2 of Middelboe et al.: # + # Load data extracted from Engelhardt et al. data = pd.read_excel('marine_deep_subsurface_phage_data.xlsx',skiprows=1) # Load data extracted from Middelboe et al. middelboe = pd.read_excel('marine_deep_subsurface_phage_data.xlsx','Middelboe',skiprows=1,index_col=0) # Plot the data plt.loglog(data['Cells concentration [cells cm^-3]'],data['Phage concentration [virions cm^-3]'],'.',label='Engelhardt data') plt.loglog(middelboe['Prokaryote abundance [cm^-3]'],middelboe['Viral abundance [cm^-3]'],'.',label='Middelboe data') # Plot the fit Engelhardt et al. used for the data fit_xdata = 10**np.linspace(np.log10(data['Cells concentration [cells cm^-3]'].min()),np.log10(data['Cells concentration [cells cm^-3]'].max()),100) plt.loglog(fit_xdata,271.8*fit_xdata**0.768,label='Engelhardt et al. fit') plt.xlabel(r'Cell concentration [cells cm$^{-3}$]') plt.ylabel(r'Phage-like particle concentration [particles cm$^{-3}$]') plt.legend() # - # As the data from the two studies seem to correspond well to the same fit used in Engelhardt et al., we combined the data from the two studies and calculate the geometic mean of the ratios between phage-like particles and prokaryotes across measurements in Engelhardt et al.: # Merge data from Engelhardt et al. and Middelboe et al. merged_data = pd.concat([(data['Phage concentration [virions cm^-3]']/data['Cells concentration [cells cm^-3]']),(middelboe['Viral abundance [cm^-3]']/middelboe['Prokaryote abundance [cm^-3]'])]) geo_mean_ratio = gmean(merged_data) print('Our best estimate for the ratio between the concentration of phage-like particles and cells in subseafloor sediments is ≈%.0f.' %geo_mean_ratio) # To calculate the total number of phages in subseafloor sediments, we multiply the ratio of phage-like particles to prokaryotes by our estimate for the total number of prokaryotes in subseafloor sediments. # + prokaryote_estimate = pd.read_excel('../../../bacteria_archaea/marine_deep_subsurface/marine_deep_subsurface_prok_biomass_estimate.xlsx') best_estimate = prokaryote_estimate.loc[0]['Value']*geo_mean_ratio print('Our best estimate for the total number of phages in subseafloor sediments is ≈%.0e' %best_estimate) old_results = pd.read_excel('../phage_num_estimate.xlsx') result = old_results.copy() result.loc[1] = pd.Series({ 'Parameter': 'Total number of phages in the marine deep subsurface', 'Value': best_estimate, 'Units': 'Number of individuals', 'Uncertainty': np.nan }) result.to_excel('../phage_num_estimate.xlsx',index=False)
viruses/phage_num/marine_deep_subsurface/marine_deep_subusrface_phage_num.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/krsmith/DS-Sprint-01-Dealing-With-Data/blob/master/module3-basicdatavisualizations/LS_DS_113_Plotting_Playground.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="IG1v46jnGkax" colab_type="code" colab={} # https://matplotlib.org/gallery/lines_bars_and_markers/barh.html#sphx-glr-gallery-lines-bars-and-markers-barh-py import matplotlib.pyplot as plt import numpy as np # Fixing random state for reproducibility np.random.seed(19680801) plt.rcdefaults() fig, ax = plt.subplots() # Example data people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim') y_pos = np.arange(len(people)) performance = 3 + 10 * np.random.rand(len(people)) error = np.random.rand(len(people)) ax.barh(y_pos, performance, xerr=error, align='center', color='green', ecolor='black') ax.set_yticks(y_pos) ax.set_yticklabels(people) ax.invert_yaxis() # labels read top-to-bottom ax.set_xlabel('Performance') ax.set_title('How fast do you want to go today?') plt.show() # + id="DWcnKAt4H9PT" colab_type="code" colab={} # Adapted to piechart # https://matplotlib.org/gallery/pie_and_polar_charts/pie_features.html#sphx-glr-gallery-pie-and-polar-charts-pie-features-py import matplotlib.pyplot as plt import numpy as np # Fixing random state for reproducibility np.random.seed(19680801) plt.rcdefaults() fig, ax = plt.subplots() # Example data people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim') performance = 3 + 10 * np.random.rand(len(people)) error = np.random.rand(len(people)) ax.pie(performance, labels=people) ax.set_title('How fast do you want to go today?') plt.show() # + id="Y26IktTfIZmO" colab_type="code" colab={} # https://matplotlib.org/gallery/lines_bars_and_markers/scatter_demo2.html#sphx-glr-gallery-lines-bars-and-markers-scatter-demo2-py import numpy as np import matplotlib.pyplot as plt import matplotlib.cbook as cbook # Load a numpy record array from yahoo csv data with fields date, open, close, # volume, adj_close from the mpl-data/example directory. The record array # stores the date as an np.datetime64 with a day unit ('D') in the date column. with cbook.get_sample_data('goog.npz') as datafile: price_data = np.load(datafile)['price_data'].view(np.recarray) price_data = price_data[-250:] # get the most recent 250 trading days delta1 = np.diff(price_data.adj_close) / price_data.adj_close[:-1] # Marker size in units of points^2 volume = (15 * price_data.volume[:-2] / price_data.volume[0])**2 close = 0.003 * price_data.close[:-2] / 0.003 * price_data.open[:-2] fig, ax = plt.subplots() ax.scatter(delta1[:-1], delta1[1:], c=close, s=volume, alpha=0.5) ax.set_xlabel(r'$\Delta_i$', fontsize=15) ax.set_ylabel(r'$\Delta_{i+1}$', fontsize=15) ax.set_title('Volume and percent change') ax.grid(True) fig.tight_layout() plt.show() # + id="DaEiVQD2K0T1" colab_type="code" colab={} # https://matplotlib.org/gallery/mplot3d/scatter3d.html#sphx-glr-gallery-mplot3d-scatter3d-py # This import registers the 3D projection, but is otherwise unused. from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import import matplotlib.pyplot as plt import numpy as np # Fixing random state for reproducibility np.random.seed(19680801) def randrange(n, vmin, vmax): ''' Helper function to make an array of random numbers having shape (n, ) with each number distributed Uniform(vmin, vmax). ''' return (vmax - vmin)*np.random.rand(n) + vmin fig = plt.figure() ax = fig.add_subplot(111, projection='3d') n = 100 # For each set of style and range settings, plot n random points in the box # defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh]. for c, m, zlow, zhigh in [('r', 'o', -50, -25), ('b', '^', -30, -5)]: xs = randrange(n, 23, 32) ys = randrange(n, 0, 100) zs = randrange(n, zlow, zhigh) ax.scatter(xs, ys, zs, c=c, marker=m) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show() # + [markdown] id="kuVcR9RwK2Xd" colab_type="text" # # Plot Assignment # # Draw at least 3 plots with the data you loaded yesterday. # + id="02mt_9taK6FT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="138d55b1-d79c-43b0-f7cf-d068c8570d78" import pandas as pd b_cancer = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data' ''' Attribute Information: 1. Sample code number id number 2. Clump Thickness 1 - 10 3. Uniformity of Cell Size 1 - 10 4. Uniformity of Cell Shape 1 - 10 5. Marginal Adhesion 1 - 10 6. Single Epithelial Cell Size 1 - 10 7. Bare Nuclei 1 - 10 8. Bland Chromatin 1 - 10 9. Normal Nucleoli 1 - 10 10. Mitoses 1 - 10 11. Class: 2 for benign, 4 for malignant ''' col_names = ['code_number', 'clump_thickness', 'cell_size_uniformity', 'cell_shape_uniformity', 'marginal_adhesion', 'single_ep', 'bare_nuclei', 'bland_chromatin', 'normal_nucleoli', 'mitoses', 'class'] bc_data = pd.read_csv(b_cancer, header=None, names=col_names) bc_data.head() # + id="oubhWIPs8dib" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ff0e52b3-ee5b-481c-a900-314599ff4e33" # Need to clean up the missing values which are showing as '?' in the data bc_data = pd.read_csv(b_cancer, header=None, names=col_names, na_values=['?']) bc_data.isna().sum().sum() # + id="SLlljNca9I99" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8d61671f-d42e-4eb9-e0d3-b84ede661383" import numpy as np bc_data = pd.read_csv(b_cancer, header=None, names=col_names) bc_data.replace('?', np.nan, inplace=True) bc_data.isna().sum().sum() # + id="EHx4w6kOLoA0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 472} outputId="b68a2fa2-fcfd-4fdd-cca9-c76f0ae91dc8" import matplotlib.pyplot as plt plt.scatter(bc_data['cell_shape_uniformity'], bc_data['bare_nuclei']) plt.title('Breast Cancer - Cell Shape Uniformity by Bare Nuclei') plt.ylabel("Bare Nuclei") plt.xlabel("Cell Shape Uniformity"); # + [markdown] id="OIFw-LDe-Htv" colab_type="text" # ## Scatterplot Summary # # ####"What insight do your plots give you in the data?" # # There doesn't look to be much correlation between the two attributes of these breast cancer clumps. Both attributes tend to be lower, but both have outliers. # # ####"What was most challenging about making plots?" # # This scatterplot was pretty straight forward. # + id="MoULGhjENcH3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="c38a3037-a698-4d99-f25c-0441af917731" # Boxplot with pandas bc_data.boxplot(column=['cell_shape_uniformity'], by=['bare_nuclei']) # + id="JRu1RfOwZzAa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 457} outputId="bdc0aadf-cabe-4425-df69-7447c03d13ee" # Boxplot with Matplotlib grouped_bc = bc_data['cell_shape_uniformity'].groupby(bc_data['bare_nuclei']) bp_data = [group for name, group in grouped_bc] fig, ax1 = plt.subplots(figsize=(10,6)) # Sizing the graph itself bp = plt.boxplot(bp_data, sym='k.', showfliers=True) # showfliers will plot the outliers ax1.set_title('Breast Cancer - Cell Shape Uniformity by Bare Nuclei') ax1.set_xlabel('Cell Shape Uniformity') ax1.set_ylabel('Bare Nuclei') plt.show() # + [markdown] id="lbYkQDcdIw4h" colab_type="text" # ## Boxplot Summary # # ####"What insight do your plots give you in the data?" # # As Cell Shape Uniformity rises, so does Bare Nuclei. # # ####"What was most challenging about making plots?" # # As we saw in the lecture, making the boxplot with the grouping was more complex than the scatterplot creation. # + id="yh-fwEwxWZTC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 430} outputId="9c1c6fb7-143c-4eaf-9d49-a4be77cce3c6" mapping = {2: 'Benign', 4: 'Malignant'} bc_data_rep = bc_data.replace({'class': mapping}) bc_data_rep.convert_objects() bc_data_rep.head(10) # + id="N0YKsrS-Yf4O" colab_type="code" colab={} # + id="gjwfFGR_LPvr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 472} outputId="f5757879-7e01-4ea7-9378-91667d1ee36d" # Bar plot with matplotlib fig, ax = plt.subplots() ax.plot(bc_data_rep['cell_shape_uniformity'], bc_data_rep['class'], label='Cell Shape Uniformity') ax.plot(bc_data_rep['bare_nuclei'], bc_data_rep['class'], label='Bare Nuclei') ax.legend() plt.title('Breast Cancer - Cell Shape Uniformity & Bare Nuclei by Class') plt.ylabel("Class") plt.xlabel("Level"); plt.show() # + [markdown] id="qjzreNUVUEnK" colab_type="text" # ## Plot Summary # # ####"What insight do your plots give you in the data?" # # I added in Class to this plot and compared Cell Shape Uniformity by Class to Bare Nuclei by Class (Class being whether the clump tested as malignant (4) or benign (2)). Each of the two attributes show that at any level, they could be malignant, but when they are lower, the chances are greater that they will be benign. # # ####"What was most challenging about making plots?" # # # + id="dVfXV-tENlvY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 338} outputId="cb03dddc-809d-4d57-dc9c-414d9c7331e1" fig, axs = plt.subplots(1, 2, figsize=(9, 3), sharey=True) axs[0].bar(bc_data['class'], bc_data['cell_shape_uniformity']) axs[1].bar(bc_data['class'], bc_data['bare_nuclei']) fig.suptitle('Categorical Plotting') # + id="l96bzGoGTijC" colab_type="code" colab={}
module3-basicdatavisualizations/LS_DS_113_Plotting_Playground.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: CoCo Projects # language: python # name: coco-projects # --- # + import numpy as np import pandas as pd import xarray as xr import pyproj import pygmt import pooch import boule as bl import verde as vd import harmonica as hm # - import matplotlib.pyplot as plt # ## Download data # Download and cache the CSV version (no metadata), getting gravity data for australia fname = pooch.retrieve( url="doi:10.6084/m9.figshare.13643837/australia-ground-gravity.csv", known_hash="md5:d47fef200d92c682dc8b63fe31b80364", progressbar=True, ) # Load the data with pandas data = pd.read_csv(fname) data #doi from zenodo "fatiando" doi = "10.5281/zenodo.5167357" # + #Download earth topgraphy grid topography = f"doi:{doi}/earth-topography-10arcmin.nc" topography_hash = "md5:c43b61322e03669c4313ba3d9a58028d" path_topography = pooch.retrieve( topography, known_hash=topography_hash, progressbar=True ) path_topography # - #load of gravity data using xarray topography = xr.load_dataarray(path_topography) topography fig = plt.figure(figsize=(12, 12)) plt.scatter(data.longitude, data.latitude, c=data.gravity, s=0.1) plt.gca().set_aspect("equal") plt.colorbar() plt.show() # + #alice spring region in Australia (133, 23 *based on google maps jeje) region = (128, 136, -26, -20) coordinates = (data.longitude, data.latitude) inside = vd.inside(coordinates, region) data = data[inside] data # - region_pad = vd.pad_region(region, pad=5) region_pad topography = topography.sel( longitude=slice(region_pad[0], region_pad[1]), latitude=slice(region_pad[2], region_pad[3]) ) topography #projection Mercanto, using mean latitude projection = pyproj.Proj(proj="merc",lat_ts=data.latitude.mean()) easting, northing = projection(data.longitude.values, data.latitude.values) data = data.assign(easting_m=easting, northing_m=northing) data #projection of topoggraphy topography_proj = vd.project_grid(topography, projection, method="nearest") topography_proj # ## Gravity disturbance | Disturbio de gravedad #defininf elipsoide type ellipsoid = bl.WGS84 ellipsoid normal_gravity = ellipsoid.normal_gravity(data.latitude, data.height) normal_gravity # + gravity_disturbance = data.gravity - normal_gravity data = data.assign(gravity_disturbance_mgal=gravity_disturbance) data # + density = np.where(topography_proj > 0, 2670, 1040 - 2670) topography_model = hm.prism_layer( coordinates=(topography_proj.easting, topography_proj.northing), surface=topography_proj, reference=0, properties={"density": density}) topography_model # + coordinates = (data.easting_m, data.northing_m, data.height) terrain_effect = topography_model.prism_layer.gravity(coordinates, field="g_z") terrain_effect # - data = data.assign(terrain_effect_mgal=terrain_effect) data data # + gravity_bouguer = data.gravity_disturbance_mgal - data.terrain_effect_mgal data = data.assign(gravity_bouguer_mgal=gravity_bouguer) data # - scatter_plot_gravity( data.easting_m, data.northing_m, data.gravity_bouguer_mgal, title="Bouguer gravity disturbance", )
australia_disturbance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Daniela's PyTorch # language: python # name: dhuppenk_pytorch # --- # # Practical Machine Learning with SDSS Data # # In this tutorial, we are going to use SDSS data to get some hands-on experience with machine learning. In the first exercise, we're going to train a classifier to distinguish stars from galaxies. This is an important problem, because many scientific questions depends on a clean data sets, and stars can be hard to distinguish from far-away galaxies. We don't want our study of galaxy evolution contaminated by stars in our own galaxy! # # As a note in advance: none of the results you'll get out of this are science-worthy. This tutorial is meant to give you a first idea for how to set up your own machine learning model. But the first, and most important lesson, is this: **don't blindly trust your ML results.** # As with any other science project, reporting or using results from a machine learning classifier or regressor requires careful understanding of the biases and caveats, assumptions and limitations that come with the data and algorithms chosen. Because the data sets you'll be using come straight out of the SDSS catalogue, you can expect there to be funny effects (both subtle and not) that may mess up any classification you'd want to do, and in a real-world setting, this would include understanding the limitations of the instrument and the data processing, before drawing any scientific conclusions from your procedure. # # With that out of the way, let's have some fun with machine learning! In this tutorial, we will use python and a library called `scikit-learn` to do our machine learning, `pandas` to deal with data structures, and `matplotlib` and `seaborn` to do our plotting. # + # make plots interactive and import plotting functionality # %matplotlib notebook import matplotlib.pyplot as plt # pretty plotting import seaborn as sns # my standard styles for plots sns.set_style("whitegrid") sns.set_context("talk") # Always need numpy import numpy as np # data array operations import pandas as pd # - # ### Load the Data # # Our first task is loading the data. For this exercise, we'll work with `sdss_dataset1.csv`. Your task is to find the correct file in this folder and load the data into a `pandas.DataFrame` (if you've never worked with pandas, take a look at the `read_csv` function): data = # add your code here # The `head` method on your loaded `DataFrame` gives you a quick overview of what's in your data. # # **Exercise**: What columns do you recognize? Which ones are new to you? Which columns do you think will be su # Some quick lingo: In machine learning, the things we are trying to learn are often called **labels**, and the quantities we can use to learn them are **features**. For example, in some of the data sets, you're going to try and separate stars and galaxies by their magnitudes and colours. Here, for each **sample** in your data set, you have a bunch of magnitude and colour measurements, your features, and you're trying to predict whether that sample is a galaxy or a star, its label. For the photometric redshift estimation case, you similarly have magnitudes and colours as features, and you're trying to predict the redshifts (your labels). This is called **supervised learning**. # # Note that in this case, we always need examples where we *know* the ground truth: we need to know the class really well, or we need to know the redshift beyond a reasonable doubt (in our case here e.g. through precise spectroscopic measurements). This is often not the case in astronomy (or, indeed, science): we often don't know exactly what our labels should be. In these cases, **unsupervised learning** can be really helpful. Some of you have data sets without labels. You'll be playing around with clustering algorithms. # ## Machine Learning With Messy Data # # Let's start with something I told you *not* to do in the earlier class: we're just going to build a classifier and see how it does, without knowing too much of what's in the data. # # Normally, you wouldn't *start* by doing a classification, but for most of your data sets, there are some points we're going to make throughout this tutorial, so having a classification without knowing much about the data serves as a useful baseline. In general, though, running an ML algorithm comes at the end of *many* important steps, which is part of the point of this entire tutorial. # # ### Splitting the Data into a Training and Test Set # # Our first task is to split the data into a training and a test data set. Pick the first 51% of the data and designate this as our training data set, and the remaining 49% of the DataFrame will be our test data set. This assigns a way higher fraction of data to the test data set than one normally would, but bear with me here. This entire example is a little contrived in the service of being instructional. :) ntraining = # number of training examples ntest = # number of test examples # Note: the thing you're classifying on should *not* be part of the array you use to classify, so remove the `class` column from the training arrays (but make sure you store it in a separate array first! (Hint: the pandas `drop` method comes in handy here) # # **Advice**: It may be tempting to store the column with the label `class` in a variable `class`. Please don't! `class` is one of python's protected variable names, because it's used to set up a class. Similarly, you should not name a variable `def`: python might let you do it, but then you can never ever make a function again, because you used the keyword Python uses to create a function (i.e. when you type `def myfunction(...): ...` into a variable! There are a few of those to look out for, other examples include `list` and `lambda`. You will not believe how often I've named a list, well `list`, and could then no longer make any lists! Don't make my mistakes! :) classes = # store class labels here # **Exercise**: Now let's set up a simple classifier. For this exercise, we're going to use a *Logistic Regression* classifier, as you might have encountered during the earlier lecture. # # For details, you can take a look at the scikit-learn documentation for [logistic regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html). For now, use the default parameters, then train the algorithm on the training data and explore how well it does using the test data (take a look at the `score` method of the Logistic Regression classifier). Do you think it produces good results? # # **Hint**: Basically all algorithms implemented in scikit-learn have the same interface. Basically all of them have a `fit` method that will fit your data, a `predict` method that will predict the classes/values of new samples, and a `score` method that tells you something about how good your algorithm is at making predictions. Some algorithms also have a `transform` and a `fit_transform` method, which allows you to transform your features (e.g. dimensionality reduction algorithms like Principal Component Analysis). Many machine learning libraries outside of scikit-learn have adopted the same structure, which is super helpful when using these libraries for algorithms that are not implemented in scikit-learn. Learning how to do a workflow in scikit-learn is well worth the investment. # So the accuracy on the training is very low, but it seems like the model does a *perfect* job on the test data set and manages to classify *every single* training example correctly (at least in my version). Pretty cool, right? # # Well, no. In general, a model *cannot* do better on examples that it has never seen before than on samples it *has* seen before. A test (or validation) score that is perfect (or at least significantly better than the training score) should make us suspicious. That the optimizer complained at us doesn't help, either. # # **Exercise**: Take five minutes and thing about why this might happen. Take a look at the training and the test data. Are there any notable differences? # # **Hint**: You might want to look at the class probabilities that the Logistic Regression classifier returns. You can also plot distributions of samples and labels for the two different datasets. # So it looks like the model only returns probabilities of 0.5 for all of our examples! This is obviously not great; it suggests that the model knows *nothing* about what the samples in our test data are. Whether you get a prediction for a STAR or a GALAXY depends *entirely* on what the programmers of scikit-learn decided what the algorithm should return in case that the probabilities are 0.5. We might just as well have gotten back a score of 0 if they'd made a different choice. # # Obviously, this is bad: we *thought* that the algorithm had learned something useful, but it hadn't. But why? # # Well, when I generated the data, I did a silly thing: # * I downloaded 50,000 stars and 50,000 galaxies from SDSS # * I concatenated both together and stored them in a file # * Then we took the first 51,000 examples from that file and called it training data, and called the rest test data. # # What does that do? Display the number of samples labelled "STAR" and those labelled "GALAXY" in both the training and test data set to find out. :) # Hint: There are many ways you could do this, but casting the training and test labels into a `pandas.Series` object and using the `value_counts()` function gives you a pretty straightforward way of doing this. # # You can do this by hand, of course, but the scikit-learn function `train_test_split` is very useful here. Let's use that instead: # **Exercise**: Let's train our logistic regression classifier again and see how it goes this time: # That looks a lot better, but still not great. This, in part, is because we left out a bunch of crucial steps. Remember how I said **look at the data first**? Yeah, let's do a better job of this and start from the beginning: # # # # # Machine Learning: From Start To Finish # # I set a really bad example above by (1) asking you to run the algorithm on the data before we'd even looked at it, and (2) using the test data set before we were ready. The idea was to give you an idea to explore some of the things that can go wrong with your machine learning if you're not careful. We're now going to do better, and run through a typical machine learning workflow from start to finish. # # # ## Figuring out your goal: Asking why! # # The first step in *any* research project, but certainly in any machine learning project, is to define your goal. What are you going to do with your results? Are you trying to learn something about physics with your data set? Are you just trying to separate out good signals from bad ones? # # Unfortunately, many physics questions don't necessarily easily translate to a machine learning problem (this is often true for statistics as well). Going from "I want to know what dark matter is" to "run a random forest on SDSS photometric measurements" is really hard, and requires multiple iterations of reducing your *physics* question down to one that ML or statistics (or a combination of the two) can answer. It's well worth spending significant time at this stage, because this process will give you crucial insights about every step of your analysis procedure, from which columns in your data might be useful, to which dimensionality reduction algorithms might work, to the type of appropriate ML algorithm to use. # # One important question I've already implicitly answered for you above: Do you have ground-truth labels in your training data? That is, do you have data for which you are reasonably sure that the labels assigned to each sample are correct? Another question I've already implicitly answered: Do you have a **classification** or a **regression** problem. In a classification problem, you have categorical labels (e.g. "star", "galaxy", ...) that you try to assign to new samples. In a regression context, the variable you try to assign is continuous (e.g. redshift). # # Here are some additional questions you might want to think about at the very start, but also keep in mind throughout your analysis: # * Do you only care about the *predictions*, or do I also care about the *parameters*? That is, are you trying to learn something about the structure of the problem itself (e.g. physics), or do you just care that the predictions are right? # * How well does your training data match the samples for which you don't know the labels? Are they from the same survey/instrument? Are there significant differences between the data you can train on, and the data you want to classify? # * What biases do you already know if in your training data? Is your survey flux-limited? Did the team making the catalogue only care about a particular funny type of Cataclysmic Variable and leave out another you might be interested in? Is there a part of feature space that's just not covered? # * What physical knowledge do you have about your measurements? How can that physics knowledge guide you in selecting or constructing good features? # # # ## Feature Selection and Engineering # # One of the crucial parts of machine learning, and the part that you will likely spend most of the time on, is selecting and engineering features for training on. Features are, essentially, meaningful summaries of your data that are ideally designed such that they make classification and regression easy. In terms of the problems you're considering today, notice that the magnitudes we've extracted from the SDSS catalogue are not actual data. They're measurements derived from the *images* that the telescope took. # # There are machine learning algorithms that can take raw data, e.g. the pixel values in an image. The most popular type used especially in image recognition in recent years is the Convolutional Neural Network (CNN), which takes raw data and essentially internally learns what good representations of the data are. # # CNNs have been enormously successful for a whole number of tasks, but it's worth pointing out that they're not always the ideal solution. They're big and very, very expensive to train (some of them can take weeks even on supercomputers!). If you have knowledge about the structure of your data and your problem, then it may be more efficient and reliable to use that knowledge to extract meaningful features. For example, we know that photons hit a CCD following a point spread function (PSF). We might not know that PSF very well, but if we do, there's no point making a neural network *learn* that a PSF exists; we can just extract magnitudes and work with them directly. So for any ML problem, it's worth thinking about what you know, and what you don't know. In cases where you *don't* know your PSF very well (or any number of things that may affect your measurements), it might be worth having your model learn that structure, but just take note that that's not universally true. # # Let's now turn to the data you extracted. Earlier, you've taken a look at columns and might already have some ideas about which columns are useful for the star/galaxy classification project. # # In any data set, you might have columns that are more useful than others! Let's find the ones we want to use for classification! # # **Exercise**: Print the columns in your features table. What columns are there in your table? Are all of them useful for solving your problem? Discuss with your team which ones might be useful, and which ones might not be! Reminder: you can look up what some of those columns mean [here](http://skyserver.sdss.org/dr14/en/help/browser/browser.aspx#&&history=description+SpecPhoto+V). # # **Exercise**: drop all columns that you don't think are useful. # # # **Note**: For this exercise, I've made you columns that contain the colours as potentially useful classification features, but these might not exist in data sets that you encounter in research contexts. # # **Hint**: Feature engineering might include *combining* measurements into new features based on your domain knowledge of the problem you're trying to solve. # # **Note**: for now, let's leave the `class` column in the data, which will help us with visualization later: # # ### Visualizing Feature Spaces # # One of the most useful things you can do to star with is visualize your data! There are, of course, many different ways to visualize high-dimensional data. For example, you can make a histogram of the distributions for each feature (for classification problems, colour-coded by class works well), or you can make scatter plots or heatmaps of one feature against one another. You can also do both at the same time in what's called a *corner plot* or a *pair plot*. In python, the package [`seaborn`](https://seaborn.pydata.org/index.html) has a lot of nice visualizations pre-built to cut down the time you need to deal with nitty-gritty details of making complicated plots work. # # **Exercise**: Discuss and try out ways to visualize your features. Take a look at the [`pairplot`](https://seaborn.pydata.org/examples/scatterplot_matrix.html) function in seaborn. In particular, it can be useful to set `hue="class"` to automatically plot separate distributions for `STAR` and `GALAXY` samples. For the classification problems, look for features that separate stars and galaxies well. For the regression, try to gauge by looking at the features how complex a model you'll likely need. # # **Note**: a pairplot for *all* of the features will be pretty big: pick a subset you think might be informative with respect to classification, and then do a pairplot of those. It's also helpful to start by just plotting the first 1000 samples in your DataFrame if `pairplot` runs very slowly, and to set `diag_kind = "kde"` # # **Hint**: This is a good time to scramble the order of the data, so let's do that first: data = data.sample(frac=1, replace=False).reset_index(drop=True) # You should see that there are values around -10000 for the colour and some of the magnitudes. This is because SDSS uses `-10000` as their placeholder for bad measurements, equivalent to the `NaN` ("not a number") you might see in other datasets. # # Let's remove all samples that have any -10000 value (there are ways to deal with missing data in machine learning, but we'll not learn about those today): # ### Looking for Weird Things # # One of the key tasks during this stage of your analysis is to *sanity-check* your data. Are there weird things? Instrumental artifacts? Things that don't look right? This is where you explore your data and try to find (and explain) these things, and potentially remove them before training. # # Our star/galaxy data looks reasonably ok at this point, there are no longer any magnitudes that are wildly different from what we'd expect from either stars or galaxies in an optical survey like SDSS. # # # ### Separate Out the Classes Again + Setting up training/testing splits # # At this point, we should remove the 'class' column from the data again and set up our train/test splits. # # From now on, we'll *only* explore things on the training data, and ignore the test data until the very end. # # Another important thing to think about is **stratification**: if your data set is very imbalanced, randomized splitting into training and test sets can lead to all examples of the smaller class ending up in one of the two sets, but not in both. Stratified splitting uses the class labels to ensure that examples of all classes end up in the training and test sets. # # classes = data_clean["class"] X_train, X_test, l_train, l_test = train_test_split(data_clean, classes, train_size=0.75, shuffle=True, stratify=None) X_train = X_train.drop(["class"], axis=1) X_test = X_test.drop(["class"], axis=1) # ### Dimensionality Reduction # # Another way to try and make sense of high-dimensional feature spaces is to perform *dimensionality reduction*. There are a lot of different ways to reduce the dimensionality of your features, and some are more useful than others, depending on the structure of your data and your problem. One idea with dimensionality reduction is to find the combination of features that gives you the most useful information in terms of your regression or classification problem. Some other times, all you want is a handy visualization in 2D, since humans in general aren't very good at thinking in higher-dimensional spaces. # # [Principal Component Analysis (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) is one of the most straightforward ways of reducing dimensionality (see also [here](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition) for alternative methods implemented in scikit-learn). One useful thing to try might be to project your features into a smaller space and see whether you still capture the information relevant in order to perform a good classification or regression. # # **Exercise**: Use PCA to project your features into 2 dimensions, and compare with the full data space. Do you think most of the information in your features is captured by the 2D representation? # # **Hint**: The `fit_transform` method will come in handy here. # Another way to visualize high-dimensional data is called *t-distributed stochastic neighbour embedding*, or, for short, t-SNE. [The paper](https://lvdmaaten.github.io/publications/papers/JMLR_2008.pdf) is surprisingly readable for a computer science paper, and the method is generally pretty good at visualizing complex feature spaces (and you can spend some entertaining minutes letting your brain find fun things in the patterns it produces). # # One thing you have to be aware of, however, is that t-SNE *does not generalize to new examples*. Wheras methods like PCA can be trained on some data, and then the trained model applied to new samples, this is *not true* for t-SNE. So this is a method that's useful for visualization, but it doesn't necessarily produce features you want to use in your classification. Note that t-SNE is generally quite a slow algorithm, so running this on very large data sets might leave you waiting for a while! # # **Exercise**: Let's try it out! Visualize your feature space in 2 dimensions using t-SNE (hint: this is also implemented in scikit-learn). # ### Scaling Features # # Sometimes, your features vary wildly in order of magnitude. You may have photometric magnitudes that all lie between 13 and 20, but maybe you have a feature that's between 1 and 1,000,000, so that your different dimensions have vastly different scales. Some algorithms (e.g. random forests) can deal with that pretty well, but others can't. It's worth knowing enough about the algorithms you use whether they will deal with this kind of issue or not. If the method can't, one common solution is to re-scale the features such that they all have a mean of zero and a variance of one. # # Scikit-learn has a few ways of scaling features and other potentially useful steps for pre-processing data. Take a look at [this tutorial](http://scikit-learn.org/stable/modules/preprocessing.html). # # **Exercise**: Do you think scaling will help in your problem? Try scaling your features and re-running the PCA and t-SNE algorithms. Did your results change? # ## Picking a Machine Learning Algorithm to Start With # # Which machine learning algorithm you choose for your problem depends, as we've discussed above, strongly on the type of problem you're trying to solve and the structure of the data. But even once you've decided whether you have a regression or classification problem, and whether it's an unsupervised or supervised one, there remain many algorithms to choose from! # # For classification, bs you learned earlier this week, there are two different types of algorithms to keep in mind: *generative* algorithms and *discriminative* algorithms. Generative algorithms are named this way because they can *generate* data. A popular example is Gaussian Mixture Models (GMMs). Discriminative models draw more or less complex functions in a multi-dimensional space (the number of dimensions corresponds to your number of features). Broadly, in the regression case, these methods try to find the best function to draw *through* the data points in order to model them. In the classification case, these methods try to find a surface that *separates* the different classes from another (this is also called a decision boundary). # # One big question is how *interpretable* the model ought to be. Simpler algorithms are often easier to understand and the results more straightforward to interpret than from, say, a random forest. A general good suggestion is to start with the simplest model you think you can get away with, and only move to more complex models if your problem demands them. # # Neural networks have been hugely successful in solving complex machine learning problems, in part because they can *learn* features rather than require the user to hand-craft them. While they do very well in many circumstances, be aware that this isn't *always* the case! These networks work really well (especially for image recognition) when you have no idea what your features ought to be, or there are things in your data that you can't model very easily (or that might be very expensive to model). However, if you understand the underlying structures in your data really well, then extracting physically meaningful features related to your problem can make your algorithm outperform those that need to learn these structures from scratch, or be faster to run. # # There is another side to this, though: if you *don't* know structures in your data, then neural networks can be very powerful *emulators* of the process you're trying to model. This has been used, for example, for modelling detectors, where running physical simulations of these detectors would be hugely expensive. # # **Exercise**: You've earlier tried out logistic regression as an algorithm. It's time to revisit this choice. Do you think it was a reasonable one? Given your data explorations earlier, do you think you could get away with fitting a linear model (i.e. a method linear in the parameters)? Or is your function complex enough that you'll need to fit a more complicated function? Note that for many problems, there isn't a single right answer. [This slightly tongue-in-cheek flow chart](http://scikit-learn.org/stable/tutorial/machine_learning_map/index.html) might give you a starting point which methods to explore (Note that in my science projects, I very often end up in the field labelled "tough luck"). # # Some fun methods to try: # * Logistic Regression (linear) # * Support Vector Machines (linear) # * Decision Trees + Random Forests # # # ### Hyperparameters # # Before we go into actually applying a machine learning algorithm to our data, there is one more thing we need to think about: the parameters! Here's where it gets a little confusing, though, so bear with me. All maachine learning algorithms have parameters. For example, a neural network has *weights* for each of the connections between network nodes. However, basically all algorithms also have *hyperparameters*. For the K-Nearest Neighbour algorithm, one hyperparameter is the number of neighbours to use in order to determine the model value. For a random forest, one hyperparameter is the number of trees. # # **Exercise**: Take a look at the hyperparameters for the algorithm you picked above. Which do you think are particularly useful? Are there any that don't make sense to you at all? # ### K-Fold Cross Validation # # Okay, so we've established that basically all machine learning methods have *hyperparameters* we somehow need to deal with. # # What to do with all those hyperparameters? One way to deal with them is *model selection*. Each different set of hyperparameters defines a different models, which you can compare. To compare, you'll to *score* your model in some way, i.e. determine how good it is. What "good" means depends on your problem at hand. Above, you used the `score` method for the K-Nearest Neighbour method, which by default uses *accuracy*, i.e. the fraction of correctly identified samples in your data set. We've also seen above that accuracy isn't always the best option, depending on what you're trying to find out. # # Another key piece of information is that there's a reasonably high *variance* in whatever score you compute. That is, a random slice into training and test data will produce different scores than a second random slice. One way to deal with this is called cross-validation. There are several kinds, but the most common one we'll discuss here is called **K-Fold Cross Validation**. Under this scheme, you split your training data set (*after* you've already set aside a test set!) into $K$ different slices. In the first instance, you train on nine of these slices, and test on the tenth. In the second instance, you train on slices 1-8 and 10 and test on 9. You continue this process until each of your slices has been used to test the performance. To formally distinguish these slices from your test set, these are usually called **validation** sets, because you use them to *validate* your model, and choose hyperparameters. # # **Note**: For problems with imbalanced class distributions, there's also a useful scikit-learn class called `StratifiedKFold`. Using this to set up your k-fold cross validation will ensure that all the subsets of data created by the k-fold cross validation always contain a proportional fraction of all classes. Otherwise, especially with classes that are rare in your data set, you might end up with training or validation sets that contain *no* examples of a class you're interested in. # # **Exercise**: Check the hyperparameters for the algorithm you intend to use. Normally, you should try to learn enough about your algorithm in order to make some educated guesses for what those hyperparameters should be based on your problem, your data and the algorithm itself. In this case, however, we are going to use cross-validation in order to figure out which ones you should use. So pick a parameter you think might be important to your model's performance, and run cross-validation for multiple different values of this. Print or plot the resulting scores. # # **Hint**: In `scikit-learn`, you can use e.g. [`cross_val_score`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html#sklearn.model_selection.cross_val_score) in order to evaluate models using cross-validation. You can learn more about evaluating the estimator performance with cross validation [here](https://scikit-learn.org/stable/modules/cross_validation.html). # So if you run a 5-fold cross-validation, this function returns 4 accuracy scores for the subset of data it trained on. This means that the computer split the data set inot 5 subsets. It then trained on the first four, and calculated a validation score on the last. Then it did the same again, but left out the second-to-last subset to use as a validation set, and so on, until every subset had been used once as a validation set. You can use more or fewer folds within the cross validation, but it's useful to remember: fewer folds of cross-validation means that you will be more uncertain about your scores (it's harder to see what the variance in scores is), but with more folds, there are fewer training examples per fold, which might degrade the performance. # # # **Exercise**: Write a loop that calculates cross-validated scores for a list of different values of `C`: # # **Hint**: You'll want explore values of `C` over multiple orders of magnitude. That's usually better done on a logarithmic scale. # ## Exploring the Results # # Now is the time to explore the results: where did the algorithm do well? Are there parts of parameter space that the algorithm systematically gets wrong? # # Here, you'll want to split your training data into a single training and single validation set using `test_train_split`, instead of doing cross-validation, so that you can explore the performance on a single validation set in more detail. # # Things to explore: # * plots of different features coloured by whether the algorithm successfully classified samples or not --> are there parts of parameter space where it does badly? # * make a [confusion matrix](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html) (this is especially useful on problems with more than two classes :) ) # * We've not talked a lot about scoring functions in this tutorial, but in reality, you might find that accuracy, AKA "how many samples were classified correctly" can be a bit of a limiting performance estimator in real circumstances. Take a look at [this tutorial](https://scikit-learn.org/stable/modules/model_evaluation.html) on scoring functions for more information. # # Based on what you learn during this exploration, you might want to # 1. do some more hyper-parameter optimization # 2. explore a different set of features (maybe a subset, or some scientifically meaningful combination of features) # 3. try a different algorithm # 4. Celebrate your success. :) # # ### Share Your Results! # # You can play around with algorithms, hyperparameters, subsets or combinations of features. When you're done, add your results to the [SOSTAT 2021 ML Exercise Leaderboard](https://board.net/p/sostat2021-ml-leaderboard). Who will get the highest accuracy?
tutorials/daniela_machine_learning/SOSTAT2021_ML_Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import scipy.constants as co import matplotlib.pyplot as plt #import solver2 #from bolos import parser, grid #from bolos import parser, grid, solver2 from bolosKhai import parser, grid, solver2 np.seterr(divide='ignore', invalid='ignore') # Create an energy grid for Boltzmann Solver # This energy grid has unit in eV gr = grid.QuadraticGrid(0, 20, 200) bsolver = solver2.BoltzmannSolver(gr) # Import data file, which contains the cross section data. with open('Cross Section.dat') as fp: processes = parser.parse(fp) processes = bsolver.load_collisions(processes) bsolver.target['CH4'].density = 0.5 bsolver.target['Ar'].density = 0.5 ################################################## # INPUT bsolver.OmegaN = 0.10000E-11 # Omega / N bsolver.kT = 400 * co.k / co.eV # Gas - Temperature 400 K # GUESS by Maxwell distribution function. # Here we are starting with # with an electron temperature of 6 eV f0 = bsolver.maxwell(6.0) mean_max = bsolver.mean_energy(f0) def EEDF_AC(EN, f0): bsolver.grid = gr bsolver.EN = EN * solver2.TOWNSEND # After change any parameter we must initial the solver bsolver.init() f1 = bsolver.converge(f0, maxn=200, rtol=1e-4) mean1 = bsolver.mean_energy(f1) print('E/N = %.0f Td' % EN) print('Mean Energy 1 = %.4f eV' % (mean1)) # Get new grid newgrid = grid.QuadraticGrid(0, 10 * mean1, 200) bsolver.grid = newgrid bsolver.init() # Interpolate the previous EEDF over new grid f2 = bsolver.grid.interpolate(f1, gr) mean2 = bsolver.mean_energy(f2) # Find final EEDF f3 = bsolver.converge(f2, maxn=200, rtol=1e-5) mean3 = bsolver.mean_energy(f3) print('Mean Energy Inter-EEDF = %.4f eV' % mean2) print('Mean Energy Final-EEDF = %.4f eV \n' % mean3) grid_EEDF = bsolver.cenergy return f3, grid_EEDF, mean3 # Range of Electric field / Number of electron - E0/N # E = E0 * exp (i * Omega * t) EN = np.linspace(100,2000,21) rate1 = np.zeros_like(EN) mean1 = np.zeros_like(EN) plt.figure() for i, ien in enumerate(EN): EEDF, gr_EEDF, mean1[i] = EEDF_AC(ien, f0) # We calculate the rate of reation : CH4 -> CH4(V13)(0.361eV) rate1[i] = bsolver.rate(EEDF, "CH4 -> CH4(V13)(0.361eV)") ##################################################### ### INPUT DATA BOLSIG+ input_file = 'CH4(V13)(0361eV)Bolsig.dat' data = np.loadtxt(input_file) x1 = data[:,0] y1 = data[:,1] ########################################### plt.plot(mean1, rate1, 'bo-' ,label = 'Bolos') plt.plot(x1,y1,'rx-', label = 'Bolsig+') plt.xlabel('Mean Energy (eV)') plt.ylabel('Rate coefficient k (m$^\mathdefault{3}$/s)') plt.title("CH4 -> CH4(V13)(0.361eV)") plt.legend() plt.show()
.ipynb_checkpoints/Untitled2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **Announcement** # Unless you have any specific topic you would like to cover the rest of the lessons (this + 2 more) will be dedicated to this silly BotGame # - today: **object oriented programming** and definition of the problem # - next time: finishing the implementation of game and go over what **you will** need to develop for the final lesson # - last lesson: the **big fight**!! we'll be running a lot of simulations where each bot has a logic that each of you developed!! Winner gets *immortal glory*! # # Object Oriented Programming # > Object-oriented programming (OOP) is a programming paradigm based on the concept of "objects", which can contain data and code: data in the form of fields (often known as attributes or properties), and code, in the form of procedures (often known as methods). [Wikipedia](https://en.wikipedia.org/wiki/Object-oriented_programming) # # This is a powerful but slightly advanced topic, for more information I strongly suggest to look into either of these references: # - [py4e](https://www.py4e.com/lessons/Objects) 4 youtube videos describing **how to** use classes and corresponding written chapter # - [realpython](https://realpython.com/python3-object-oriented-programming/) article with code stippets # # In this lesson we'll go get an overview and then focus on an example to show **why** it can be useful. # First, some nomenclature: # - **class**: think of it as the "cookie-cutter"/"blueprint"/"recipe" that generates *instances* ("cookie"/"building"/"food") # - **instance** (or **object**): this is the materialisation of the class and will be the one used in the program # - **method**: is a function that is attached to the class (and the instances that it creates), it must take at least one argument (conventionally named `self`) which is a reference to the instance. # - **attribute**: it's a variable that is attached to the class (and the instances that it creates), it can be called using the `.` notation. # # One way to think of the use of a class is to think of it as a sub program, where everything within it only care about what's in the boundery. In this analogy `methods` are functions and `attributes` are variables. # # There are some build-in *magic* methods (aka *dunder* methods) which get called when specific things happen, e.g.`__init__` gets called at initialisation of an object. # + class Dog: # class (cookie-cutter) scientific_species = 'Canis lupus' # class attribute (all instances automatically have the same value) def __init__(self, name): # magic method self.name = name # instance attribute (each instance will get a different name) def bark(self): # method myname = self.name # accessing instance attribute with `self` print(f"{myname}: woof") mydog = Dog(name='fido') # instance (actual cookie) print(mydog.scientific_species) mydog.bark() print("="*20) yourdog = Dog('dido') # instance (actual cookie) print(yourdog.scientific_species) yourdog.bark() # - # # The Bot Race Game # We want to build a simple bot game. All bots are racing to get to square 10 as fast as possible but there are a few rules: # - Each turn a bot can either of the following actions: # - `walk` (move 1 square in the direction it is facing), # - or `sabotage` (making all the opponents at the same square turn in the opposite direction for one turn) # - The order of bot actions is randomised each round # - at the beginning of each round, all bots are turned to face forward # - bots cannot go further backwards than the starting position class Bot: position = 0 # square at which the bot is located direction = 1 # can be 1 (facing forward) or -1 (facing backwards) def walk(self): self.position += self.direction # walk in the current direction def sabotage(self, bots): pass # TODO: we'll implement this later # + mybot = Bot() print(mybot.position) mybot.walk() print(mybot.position) mybot.walk() print(mybot.position) print(mybot) # - # ## magic methods # Magic methods allow to overwrite default behaviour of the class, for example: # - \_\_init\_\_: gets called when the object/instance is initialised # - \_\_repr\_\_: gets called when a representation of the object is required, e.g.: `print(my_obj)` # # Let's add a specific name for our bot at initialisation and a nice representation for it class Bot: position = 0 direction = 1 def __init__(self, name): self.name = name # adding a name attribute at initialisation to identify the bot def __repr__(self): return f"{self.name}Bot" # returning a custom string representation def walk(self): from_position = self.position self.position = max(0, self.position+self.direction) # making sure the position in never < 0 to_position = self.position return from_position, to_position # returning the from and to position so we can use display them if we want to def sabotage(self, bots): pass # + mybot = Bot('Bit') from_pos, to_pos = mybot.walk() print(f"{mybot} moved from {from_pos} to {to_pos}") from_pos, to_pos = mybot.walk() print(f"{mybot} moved from {from_pos} to {to_pos}") # - # ## listen all y'all it's a sabotage # Let's implement the sabotage action on the Bot class Bot: position = 0 direction = 1 def __init__(self, name): self.name = name def __repr__(self): return f"{self.name}Bot" def walk(self): from_position = self.position self.position = max(0, self.position+self.direction) to_position = self.position return from_position, to_position def sabotage(self, bots): # this method will need the bots as a input so it can check and sabotage the ones that are in the same position sabotaged_bots = [] # keeping a list of all the bots we are sabotaging for bot in bots: # going through every bot if bot.position == self.position and bot != self: # if it in the same position and not myself.. bot.direction = -1 # sabotage it (turn it facing backwards) sabotaged_bots.append(bot) # add it to the bots I've sabotaged return sabotaged_bots # return the list of sabotaged bots evilbot = Bot('Evil') walkingbot = Bot('Walking') all_bots = [evilbot, walkingbot] all_bots # + print("\n === FIRST ROUND: both walking ===") from_pos, to_pos = walkingbot.walk() print(f"{walkingbot} moved from {from_pos} to {to_pos}") from_pos, to_pos = evilbot.walk() print(f"{evilbot} moved from {from_pos} to {to_pos}") # + print("\n === SECOND ROUND: evil stikes ===") sabotaged = evilbot.sabotage(all_bots) print(f"{evilbot} sabotaged {sabotaged}") from_pos, to_pos = walkingbot.walk() print(f"{walkingbot} moved from {from_pos} to {to_pos}") # - # # Strategy Hook # let's add the `play` method which is what will implement the strategy the bot will use, for now let's set it as just pick randomly between `move` and `sabotage` # + import random class Bot: position = 0 direction = 1 def __init__(self, name): self.name = name def __repr__(self): return f"{self.name}Bot" def walk(self): from_position = self.position self.position = max(0, self.position+self.direction) to_position = self.position return from_position, to_position def sabotage(self, bots): sabotaged_bots = [] for bot in bots: if bot.position == self.position and bot != self: bot.direction = -1 sabotaged_bots.append(bot) return sabotaged_bots def play(self): # when it's my turn to act... return random.choice(['walk', 'sabotage']) # ...randomly pick between 'walk' and 'sabotage' # - bot = Bot("botty") print(bot.play()) print(bot.play()) print(bot.play()) print(bot.play()) # # Game object # now let's create a game class that will allow to handle the game, first let's allow the game to show a board class Game: n_squares = 10 # defining size of the board def __init__(self, bots): # the game will need to be initialised with a list of competitor bots self.bots = bots # storing the list as an instance attribute so we can access it as `<game instance>.bots` def show_board(self): print("=" * 30) board = {i: [] for i in range(self.n_squares+1)} # creating a dictionary of empty lists with numbers 0-10 as keys for bot in self.bots: board[bot.position] = bot # adding the bot to the list corresponding to its position for square, bots_in_square in board.items(): # visualising the current positions print(f"{square}: {bots_in_square}") # + tags=[] lonley_bot = Bot("Mr.Lonley") competitors = [lonley_bot] game = Game(competitors) game.show_board() # + tags=[] lonley_bot.walk() lonley_bot.walk() game.show_board() # - # # \~\~\~ Question Break!! \~\~\~ # ![question_break](https://raw.githubusercontent.com/gabrielecalvo/Language4Water/master/assets/xx_question_break.jpg) # ## Allowing Game to play rounds # We'll need to: # - setup an attribute `winner` which we'll use to check if race is over # - we'll then implement the `play_round` method which will: # - randomise the order of call for each bot to act # - let each bot play in that random order using another method called `_play_bot` (just to keep function smaller) # - reset the direction of all bots at the end of each round so they are all facing forward # - print out the current board status # # # The `_play_bot` method will take a single bot as input (the one that is playing at the moment) and need to: # - call the `play` method of the acting bot # - based on the action the acting bot is taking it will: # - call the corresponding bot method # - pass the necessary input # - print out a line describing what the action and its effects were (just to better understand what's going on) # - if the bot has reach the end, we'll declare it the winner, which make the `play_round` method will stop the race # # ***NOTE***: starting a method name with `_` is just a convention to signal that the method should not be called directly but should only be called by another method withing the class. # + import random class Game: n_squares = 10 winner = None # attribute that will be filled by the winner, we can use it to check if race is over def __init__(self, bots): self.bots = bots def show_board(self): print("=" * 30) board = {i: [] for i in range(self.n_squares+1)} for bot in self.bots: board[bot.position].append(bot) for square, bots_in_square in board.items(): print(f"{square}: {bots_in_square}") def play_round(self): if self.winner is None: # if winner has not been declared yet, play a round random.shuffle(self.bots) # randomise order of bots print(self.bots) # print out the random order for bot in self.bots: # for every bot in the random order self._play_bot(bot) # have the bot play their move if self.winner: # if the bot reaches the end and wins there is no need to continue break # reset directions after every bot has had their turn for bot in bots: bot.direction = 1 if self.winner: # if a winner was declared, show it print(f"========== Race Over, WINNER: {self.winner} ========== ") self.show_board() # show the current status of the board def _play_bot(self, bot): action_str = bot.play() # action_str will be the string "walk" or "sabotage" if action_str == "walk": pos_from, pos_to = bot.walk() print(f"{bot} walked from {pos_from} to {pos_to}") elif action_str == "sabotage": sabotaged_bots = bot.sabotage(self.bots) print(f"{bot} sabotaged {sabotaged_bots}") if bot.position >= self.n_squares: self.winner = bot # - bots = [Bot("Albatros"), Bot("Buffalo"), Bot("Cow")] game = Game(bots) game.show_board() game.play_round() # # \~\~\~ Question Break!! \~\~\~ # ![question_break](https://raw.githubusercontent.com/gabrielecalvo/Language4Water/master/assets/xx_question_break.jpg) # ## too chatty? # let's add a `verbose` attribute to the game which will skip the print statements if set to `False`. # This way it is easier to switch between debugging (finding mistakes) and just running to get the result class Game: n_squares = 10 winner = None def __init__(self, bots, verbose=False): # adding `verbose` which will be False by default self.bots = bots self.verbose = verbose # storing it as an instance attribute def show_board(self): print("=" * 30) board = {i: [] for i in range(10)} for bot in self.bots: board[bot.position].append(bot) for square, bots_in_square in board.items(): print(f"{square}: {bots_in_square}") def play_round(self): if self.winner is None: random.shuffle(self.bots) if self.verbose: print(self.bots) # only show the board if verbose=True for bot in self.bots: self._play_bot(bot) if self.winner: break for bot in bots: bot.direction = 1 if self.verbose: # only show the board if verbose=True if self.winner: print(f"========== Race Over, WINNER: {self.winner} ========== ") self.show_board() def _play_bot(self, bot): bot_position_dictionary = {b: b.position for b in self.bots} action_str = bot.play(bot_position_dictionary) if action_str == "walk": pos_from, pos_to = bot.walk() if self.verbose: # only show the board if verbose=True print(f"{str(bot):<15} walked from {pos_from} to {pos_to}") elif action_str == "sabotage": sabotaged_bots = bot.sabotage(self.bots) if self.verbose: # only show the board if verbose=True print(f"{str(bot):<15} sabotaged {sabotaged_bots}") if bot.position >= self.n_squares: self.winner = bot # ***Note***: you might have noticed also the print lines in the `_play_bot` method now look like: # ```python # print(f"{str(bot):<15} ...") # ``` # # this is just so that we are aligining the names to the left of a 15 character string leaving the rest empty. # ``` # # so instead of ... # Mybot waked from 0 to 1 # Myotherbot waked from 0 to 1 # # # ... we'll get # Mybot waked from 0 to 1 # Myotherbot waked from 0 to 1 # ``` # ## Final Touches # Let's add a last couple of things: # - add a `_set_starting_positions` which will ensure all bots' position are 0 at the start of the Game (in the `__init__`) # - let's allow the bots to be smarter by passing all bots' position to the `.play` method so they can use that information to decide the move # + tags=[] import random class Game: n_squares = 10 winner = None def __init__(self, bots, verbose=False): self.bots = bots self.verbose = verbose self._set_starting_positions() # calling the method that will reset all positions def _set_starting_positions(self): for b in self.bots: # go through every bot in the competition and set the position to 0 b.position = 0 def show_board(self): print("=" * 30) board = {i: [] for i in range(self.n_squares + 1)} for bot in self.bots: board[bot.position].append(bot) for square, bots_in_square in board.items(): print(f"{square}: {bots_in_square}") def play_round(self): if self.winner is None: random.shuffle(self.bots) if self.verbose: print(self.bots) for bot in self.bots: self._play_bot(bot) if self.winner: break for bot in bots: bot.direction = 1 if self.verbose: if self.winner: print(f"========== Race Over, WINNER: {self.winner} ========== ") self.show_board() def _play_bot(self, bot): bot_position_dictionary = {b: b.position for b in self.bots} # creating a dictionary of bot positions action_str = bot.play(bot_position_dictionary) # passing it to the `play` method to allow the bot to make smarter decisions if action_str == "walk": pos_from, pos_to = bot.walk() if self.verbose: print(f"{str(bot):<15} walked from {pos_from} to {pos_to}") elif action_str == "sabotage": sabotaged_bots = bot.sabotage(self.bots) if self.verbose: print(f"{str(bot):<15} sabotaged {sabotaged_bots}") if bot.position >= self.n_squares: self.winner = bot class Bot: position = 0 direction = 1 def __init__(self, name): self.name = name def __repr__(self): return f"{self.name}Bot" def walk(self): from_position = self.position self.position = max(0, self.position+self.direction) to_position = self.position return from_position, to_position def sabotage(self, bots): sabotaged_bots = [] for bot in bots: if bot.position == self.position and bot != self: bot.direction = -1 sabotaged_bots.append(bot) return sabotaged_bots def play(self, bot_positions): # added `bot_positions` as input, though we are currently still just picking randomly return random.choice(['walk', 'sabotage']) # - # # It's your Turn!!! # let's start simple, each of you send me a list of 'sabotage' and 'walk', let's have it with 12 elements # + example_strategy = ['sabotage', 'sabotage', 'walk', 'sabotage', 'sabotage', 'walk', 'sabotage', 'sabotage', 'walk', 'sabotage', 'sabotage', 'walk'] duckduckgo_bot = Bot("DuckDuckGo") def duckduckgo_play(*a,**kw): return example_strategy.pop(0) # + strategies = { 'Alena': ['walk', 'walk', 'sabotage', 'walk', 'walk', 'sabotage', 'walk', 'walk', 'walk', 'walk', 'walk', 'walk']*2, 'Aubrey': ['walk', 'walk', 'walk', 'sabotage', 'walk', 'walk', 'walk', 'sabotage', 'walk', 'walk', 'walk', 'sabotage']*2, 'Iacopo': ['walk', 'sabotage', 'walk', 'walk', 'walk', 'walk', 'sabotage', 'sabotage', 'walk', 'walk', 'walk', 'walk']*2, 'Robert': ['walk', 'sabotage', 'sabotage', 'sabotage', 'sabotage', 'walk', 'walk', 'walk', 'walk', 'sabotage', 'sabotage', 'walk']*2, } bots = [] for name, strategy_list in strategies.items(): b = Bot(name, strategy_list) print(name, strategy_list.count("walk")) bots.append(b) game = Game(bots, verbose=True) # + tags=[] game.play_round() # - # ## Defining a barely-smart Bot Strategy # We'll be using what is called `Class Inheritance` class BarelySmartBot(Bot): # defining a new class which *inherits* all methods and attributes from the Bot class def play(self, bot_positions): # re-defining (overwriting) the `play` method to impement the new strategy value = range(5,7) if value in bot_positions.values(): return "sabotage" else: return "walk" # and let's play!!! bots = [Bot('Dummy1'), Bot('Dummy2'), Bot('Dummy3')]#, BarelySmartBot("BarelySmart")] game = Game(bots, verbose=True) game.play_round() # # The Grand Prix # + # # !pip install tqdm ipywidgets # # !jupyter nbextension enable --py widgetsnbextension # + tags=[] from tqdm.auto import tqdm import pandas as pd def grand_prix(n=1000): winnings = {b: 0 for b in bots} for _ in tqdm(range(n)): game = Game(bots, verbose=False) while game.winner is None: game.play_round() winnings[game.winner] += 1 return winnings winnings = grand_prix(n=10_000) podium = pd.Series(winnings).sort_values(ascending=False) podium.plot.bar(grid=True, figsize=(25,6), rot=0) # + cursed_squares = [5, 6, 7] def strategy(bot_positions): is_there = False for name, pos in bot_positions.items(): if name != "aubrey": if pos in cursed_squares: is_there = True break if value in bot_positions.values(): return "sabotage" else: return "walk" # - bot_positions = {"Bot1": 1, "Bot2":3, "Bot3": 5} bot_positions.values() value a = [5,7] b = [1, 3, 5] # + is_there = False for i in a: if i in b: is_there =True break is_there # - [i for i in a if i in b]
2020-21_semester2/10_Object_Oriented_Programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:qiskit] # language: python # name: conda-env-qiskit-py # --- # Основано на: # # - [Inversion over mean](https://gitlab.com/qworld/bronze-qiskit/-/blob/master/quantum-with-qiskit/Q84_Inversion_About_the_Mean.ipynb) QBronze. # - [Grover search](https://gitlab.com/qworld/bronze-qiskit/-/blob/master/quantum-with-qiskit/Q92_Grovers_Search_Implementation.ipynb) QBronze. # - [Wiki](https://en.wikipedia.org/wiki/Grover%27s_algorithm) # - [Qiskit tutorial](https://qiskit.org/textbook/ch-algorithms/grover.html) # # Диффузор Гровера :) # # У этого метода есть несколько названий. Он известен как "итерация Гровера" (Grover iteration), "диффузор Гровера" (Grover diffusor), "усиление комплексной амплитуды" (Amplitude Amplification, AA) или **"обращение относительно среднего"** (inversion/reflection over/about mean). Именно последнее название выражает математическую суть метода поиска помеченных объектов в неупорядоченном массиве. Его мы и разберём. # # Этот метод базируется на следующих идеях: # 1. Что произойдёт со средним значением массива из одинаковых чисел, если мы отразим относительно него все элементы массива? # 2. Насколько изменятся значения элементов массива, если среди них все, кроме одного, одинаковые? $[1, 1, 1, 1, 1, 1, 1, 1, 1, -1] \rightarrow ?$ Насколько сильно изменится отличающееся значение? # 3. Как будет меняться ответ на второй вопрос, если отличающихся элементов будет становиться больше? # # Попробуйте на бумаге выполнить следующие действия со массивом `[1, 1, 1, 1, 1]`. # # 1. Измените знак 4-го элемента. # 2. Обратите все значение относительно среднего. # 3. Измените знак 4-го элемента. # 4. Обратите все значение относительно среднего. # # То, что вы только что поделали, называется алгоритмом Гровера! Осталось научиться реализовывать его на квантовом компьютере. # # ## Демонстрация # Давайте рассмотрим демонстрацию этих идей. Для этого: # # 1. Реализуйте недостающие методы. # 2. Поменяйте $N$ чтобы понять, как оно влияет на результат. # + import random import numpy as np import matplotlib.pyplot as plt # 50 элементов в массиве, 100 итераций W, T = 50, 100 # тут мы будем накапливать результат изменений. # i-й cтолбец нашего "холста" будет отражать состояние # массива чисел после i-го применения нашего метода. # на нулевой итерации там просто одинаковые значения canvas = np.ones((W, T)) / (W ** .5) # тут будет храниться информация о помеченных элементах # 0 будет стоять там, где элемент ничем не примечательный # а 1 -- там где он "интересный" и мы хотим его найти markers = np.zeros((W,)) # число помеченных элементов N = 1 # cгенерируем N случайным образом помеченных чисел for i in range(N): markers[random.randrange(canvas.shape[0])] = 1 print("Массив markers:", markers) # + def invert(markers, vec): res = vec.copy() # напишите функцию, которая меняет в массиве res # знак элемента на противоположный # если в массиве markers на этом месте стоит 1. return res def reflect_over_mean(vec): res = vec.copy() # отразите все элементы массива res # относительно среднего значения в массиве return res # - # [решение](./Q81R_Inversion_Over_Mean_Solution.ipynb) # Проведём 100 итерация и заполним наш "холст" состояниями массива после этих итераций. for i in range(1, T): inv = invert(markers, canvas[:, i-1]) ref = reflect_over_mean(inv) canvas[:, i] = ref # А теперь нарисуем картинку. # + # зададим координатную сетку для 3D-графика _xx, _yy = np.meshgrid(np.arange(canvas.shape[1]), np.arange(canvas.shape[0])) # разделим её на x-ы и y-ки в отдельные массивы x, y = _xx.ravel(), _yy.ravel() # нарисуем в 3D heights = [canvas[j, i] for i, j in zip(x, y)] bottom = np.zeros_like(heights) fig = plt.figure(figsize=(20, 20)) ax1 = fig.add_subplot(121, projection='3d') ax1.set_xlabel("итерации") ax1.set_ylabel("индексы массива") ax1.set_zlabel("значения") ax1.bar3d(x, y, 0, 1, 1, heights, shade=True, alpha=.4, color='orange') plt.show() # и в 2D plt.figure(figsize=(15, 3)) plt.imshow(canvas, cmap='gray') plt.xlabel("итерации") plt.ylabel("индексы массива") plt.show() # - # ## Матричная форма операции обращения относительно среднего # # Итак, мы рассмотрели две важных функции, из которых, по сути, и состоит алгоритм Гровера. Нам осталось только найти их реализацию на квантовом копьютере. Здесь мы остановимся именно на отражении относительно среднего: # # Давайте выведем, как можно записать это отражение в виде умножения вектора (массива) на матрицу. # # Когда вы реализовывали функции, вы должны были заметить, что новое значение $i$-го элемента массива равно: # # $v'_i = 2\bar{v} - v_i = 2\bar{v} - 1*v_i$. # # Для всего массива (вектора) $v$ целиком мы можем записать это как: # # $v' = 2\bar{v}\mathbb{1} - v = 2*\mathbb{1}\bar{v} - Iv$. # # Кстати, что это за матрицы $\mathbb{1}, I$? # # Давайте раскроем среднее значение и тоже запишем его в виде матричного произведения: # # $\bar{v} = \frac{1}{N}\times\sum_i{v_i} = \sum_i\frac{1}{N} # \times{v_i}= (\frac{1}{N}\mathbb{1}^T)\times v$ # # А теперь подставим это в формулу выше: # # $v' = 2\frac{1}{N}\mathbb{1}(\mathbb{1}^Tv) - Iv = $ # # $ = \frac{2}{N}(\mathbb{1}\mathbb{1}^T)v - Iv = $ # # $ = (\frac{2}{N}\mathbb{E} - I)v$ # # Итак, мы получили матрицу $(\frac{2}{N}\mathbb{E} - I)$, не зависящую от значений переменных, на которую можно умножить любой вектор и получить тот же результат, что и в функции `reflect_over_mean(vec)`. # # ## Как нам собрать такую матрицу из квантовых вентилей? # + # матрица, которую мы хотим получить M = 2 * (1 / 4) * np.ones((4, 4)) - np.eye(4) # некоторые известные нам квантовые операторы # <NAME> H = np.array([[1, 1], [1, -1]]) * (.5 ** .5) # отрицание X = np.array([[0, 1], [1, 0]]) # контролируемое изменение фазы CZ = np.eye((4)) CZ[-1, -1] = -1 print("[H⊗H][X⊗X](CZ)[X⊗X][H⊗H]") U = np.kron(H, H) @ np.kron(X, X) @ CZ @ np.kron(X, X) @ np.kron(H, H) print(U) U = -U print("-[H⊗H][X⊗X](CZ)[X⊗X][H⊗H]") print(U) assert np.allclose(M, U) # - # Повторим это в виде контура, и проверим, что матрицы совпадают. # + from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, Aer, execute from qiskit.visualization import plot_histogram import matplotlib.pyplot as plt qc = QuantumCircuit(2) ################################################## # Ваш код здесь. Не обращайте внимание на минус :) ################################################## print(qc.draw()) job = execute(qc, Aer.get_backend('unitary_simulator')) MX = job.result().get_unitary().real assert np.allclose(-MX, M) # - # [решение](./Q81R_Inversion_Over_Mean_Solution.ipynb)
materials/third_day/Q81R_Inversion_Over_Mean.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Recurrent Neural Networks for Sequences # ## Sentiment Analysis with the IMDb Dataset # ## <NAME> # --- # --- # # ## Introduction # # Recurrent Neural Networks (RNN) are a class of artificial neural networks that are used with sequential data. RNNs can use their internal memory to process variable length sequences of inputs which makes them good for connected handwriting or speech recognition. They process sequences of data to perform binary classification which predicts whether a given review's sentiment is positive or negative. The *recurrent* aspect comes from the loops within the neural network that cause the output of one layer to become the input to the the same layer in the next point in time. As we will be using the IMDb movie reviews dataset that is built-in to the `Keras` package, our time step will be the next word in the sequence of words. # # These aforementioned loops allow the RNN to learn and remember the relationships of the data in the given sequence. For instance, # # + Negative Sentiment # # - `The movie is not good.` # + Displays a negative sentiment. # # + Positive Sentiment # # - `The movie is good.` # + Displays a positive sentiment, but not as positive as: # # - `The movie is excellent!` # # Although the initial statement shows `good` which is positive, because it has `not` before it the sentiment is deemed negative. # With this example in mind, the RNN takes into account the relationships between earlier and later parts of a sequence. Since, there can be instances where there can be many words to consider instead of an adjacent sentiment like in our prior example. To account for this, we will use a Long Short-Term Memory (LSTM) layer to make the neural network *recurrent* and optimizes so that it can handle learning for sequences that contain many key words with more arbitrary words between them. # # ### Applications # # RNNs can be used for multiple tasks, such as: # # + predictive text input: displaying possible next words while typing, # + sentiment analysis # + inter-language translation, and # + automated closed captioning in video # # ### How does it works? # # ![Basic RNN Structure](basic_rnn.png) # # As the image above depicts, basic RNNs are a network of neuron-like nodes that are organized into successive layers. Each node have a one-way (directed) connection to all of the other nodes in the next successive layer with each node having a time-varying real-valued activation. Each connection has a modifiable real-valued weight while nodes are either input nodes, output nodes, or hidden nodes. The recursive neural network applies the same set of weights recursively by traversing the structure in topological order. # # For the purpose of the RNN we will build today, we will use a long short-term memory (LSTM) layer which avoids the vanishing gradient problem. This means that the LSTM prevents back-propagated errors from vanishing which results in the ability for errors to flow backwards through however many virtual layers unfolded in space. In short, the LSTM can learn tasks that require memories of events that happened many discrete time steps earlier. # # The LSTM cell is depicted below: # # ![Long Short-Term Memory Diagram](lstm.png) # # --- # --- # # ## Data Manipulation # ### Import Packages # # To build the network we will need to import the following packages: # # + `tensorflow.keras` # -`datasets`: to access the IMDb data set # - `utils`: to access the function `plot_model` # + Allows us to store the visualization of our RNN model after `Keras` assigns the layer names in the image # - `models`: to access the `Sequential` model # + Executes our layers sequentially--output of one layer becomes the input for the next. # - `layers`: to access the functions `Embedding`, `LSTM`, and `Dense` # + Allows us to add our Embedding, LSTM, and Dense layers. # # + `IPython.display` # - `Image`: to show the image in the notebook # # --- from tensorflow.keras.datasets import imdb from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM, Embedding from tensorflow.keras.utils import plot_model from IPython.display import Image # %pprint #To turn pretty printing off for display consolidation # --- # ### Gather the Data # # For the purposes of this notebook, as stated previously, we will be using the `IMDb` dataset from `tensorflow.keras.datasets` which contains 25,000 training samples and 25,000 testing samples labeled with its positive (1) or negative (0) sentiment. It contains over 88,000 unique words in the dataset. Due to the limits of our CPU, we will only be focusing on the top 10,000 most frequently occurring words as training will take longer with the more data you load. # + word_number = 10000 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=word_number) print("Shape of X_train: ", X_train.shape) print("Shape of y_train: ", y_train.shape) print("Shape of X_train: ", X_test.shape) print("Shape of X_train: ", y_test.shape) X_train[1] # - # As we can see that the `IMDb` data is numerically encoded, in order to see the original text, we need to know the word to which each number corresponds to. # + word_toIndex = imdb.get_word_index() print("Index for great: ",word_toIndex['great']) print("Index for good: ", word_toIndex['good']) print("Classification: ", y_train[1]) index_toWord = {index: word for (word, index) in word_toIndex.items()} [index_toWord[i] for i in range(1, 51)] # - # The word with the ranking `1` is the most frequently occurring word, `2` the second, and so on. As such, the dictionary values begin with `1` in each encoded review has its ranking values are offset by 3 because the package `Keras` reserves the values 0, 1, and 2 for *padding*, *tokens denoting the start of a text sequence for learning*, and *unknown words (words that were not loaded due to adding the* `num_words` *argument in* `load_data`, respectively. In the following code cell, we will account for this offset when transforming the frequency ratings into words. ' '.join([index_toWord.get(i-3, '?') for i in X_train[1]]) # --- # ### Prepare the Data # # As `Keras` requires all samples to have the same dimensions, we will restrict every review to the same number of words. As such, some of the reviews will need to be padded, while others will need to be truncated. To do this, we will use the `pad_sequences` function to reshape `X_train`'s row samples using a `maxlen=225` which returns a 2-D array. # # Once this is completed, we will split the data into training and testing data sets, and confirm that split by checking the shapes of our output for `X_test` and `X_train`. # + words_per_review = 225 X_train = pad_sequences(X_train, maxlen=words_per_review) X_test = pad_sequences(X_test, maxlen=words_per_review) print("Shape of X_train: ", X_train.shape) print("Shape of X_train: ", X_test.shape) # - X_test, X_train, y_test, y_train = train_test_split(X_test, y_test, random_state=111, test_size=0.25) # --- # ## Build the Network # # We will use the `Sequential()` model to add the layers to our network adjacently. These layers will be added using functions from the `tensorflow.keras.layers` module. # # As our data set is so large, we will reduce its dimensionality by utilizing an embedding layer which will encode each word into a more compact dense-vector representation. This allows the word's context to be captured in the embedding. To embed the layer we will use the `Embedding()`function which takes the following information: # # + `input_dim`: number of unique words # # + `output_dim`: size of each word embedding # # + `input_length=words_per_review`: number of words in each input sample # # Once the embedding layer has been added, we will add two additional layers which will be a Long Short-Term Memory (LSTM) layers using the `LSTM()` function. The arguments to this layer are: # # + `units`: number of neurons in the layer # # + `dropout`: percentage of neurons to randomly disable when processing the layer's input and output. # # - This reduces the over-fitting of the model. # # + `recurrent_dropout`: percentage of neurons to randomly disable when the layer's output is fed back into the layer again to allow the network to learn from previously seen information. # # + `return_sequences`: to direct the output of the initial LSTM layer as input into the second LSTM layer. # # Lastly, we will add a Dense Output layer using the `Dense()` function which takes in the following arguments: # # + `units`: number of neurons in the layer # # + `activation`: the function used to activate # # We will be using the `sigmoid` function as our activation function which is preferential for binary classification because it will reduce the arbitrary values to a range of $0.0-1.0$, producing a probability. The following code cell will create our neural network that we just described. # + # Create the Neural Network RNN = Sequential() # Add the Embedding Layer RNN.add(Embedding(input_dim=word_number, output_dim=128, input_length=words_per_review)) # Add the LSTM Layer RNN.add(LSTM(units=128, dropout=0.25, recurrent_dropout=0.25, return_sequences=True)) RNN.add(LSTM(units=128, dropout=0.2, recurrent_dropout=0.2)) # Add The Dense Layer RNN.add(Dense(units=1, activation='sigmoid')) # - # --- # ## Compile Network # # Now it is time to complete the model, so that we can train and evaluate it. # We will be using the `adam` optimizer to adjust the weights throughout the network, the `binary_crossentropy`method for the loss function, and `accuracy` metrics to check the percentage of successful predictions. RNN.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # ### Summarize the Network RNN.summary() # ### Model Visualization # + plot_model(RNN, to_file='RNN.png', show_shapes=True, show_layer_names=True) Image(filename='RNN.png') # - # --- # ### Training & Evaluation # # For the sake of time and the fact that we have a Embedding layer, two LSTM layers, and a Dense layer; this notebook will only run two epochs (iterations). RNN.fit(X_train, y_train, epochs=3, batch_size=32, validation_data=(X_test, y_test)) loss, accuracy = RNN.evaluate(X_test, y_test) print("\n-----------------------------------------------") print(f'Loss Percentage: {loss:.10%}') print(f'Accuracy Percentage: {accuracy:.10%}') print("-----------------------------------------------") # --- # ## Conclusion # # <font size="3">Although the accuracy of our model seems low as it is not above 90%, our small neural network model did reasonably well considering it only has four layers and is being computed on a CPU. </font> # # ### For more information regarding Recurrent Neural Networks, please see: # <br> # <font size="3"> # # + [Recurrent Neural Network on Wiki](https://en.wikipedia.org/wiki/Recurrent_neural_network) # <br> # # + [Keras LSTM Tutorial](https://adventuresinmachinelearning.com/keras-lstm-tutorial/) # <br> # # + [Understanding LSTM and Its Diagrams](https://medium.com/mlreview/understanding-lstm-and-its-diagrams-37e2f46f1714) # <br> # # + [Animate RNN, LSTM, and GRU](https://towardsdatascience.com/animated-rnn-lstm-and-gru-ef124d06cf45) # <br> # # + [Fundamentals of Deep Learning: Intro to LSTM](https://www.analyticsvidhya.com/blog/2017/12/fundamentals-of-deep-learning-introduction-to-lstm/) # # </font> # # --- # ---
RNN/RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2.4.4 & 2.4.5 Re and Beautiful Soup # ### Re 함수 import re pattern = ' \W+' re_pattern = re.compile(pattern) pattern = ' \W+' re_pattern = re.compile(pattern) re.search( "(\w+)", "wow, it is awesome" ) re.split("(\w+)", "wow, it is world of word") re.sub("\d", "number", "7 candy" ) # ### Beautiful soup 사용 # - pip install html5lib 으로 html5lib 를 추가 설치 from bs4 import BeautifulSoup # + string = '<body> 이 글은 Beautiful soup 라이브러리를 사용하는 방법에 대한 글입니다. <br> </br> 라이브러리를 사용하면 쉽게 HTML 태그를 제거할 수 있습니다.</body>' string = BeautifulSoup(string,"html5lib").get_text() # HTML 태그를 제외한 텍스트만 가져온다 print(string) # 텍스트 확인
2.NLP_PREP/.ipynb_checkpoints/2.4.4&5.re_and_beautiful_soup-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + class Student: passingPercentage = 40 def __init__(self, name, age=15, percentage = 80): self.name = name self.age = age self.percentage = percentage def student_details(self): print("Name: ", self.name) print("Age: ", self.age) print("Percentage: ", self.percentage) def isPassed(self): if self.percentage > Student.passingPercentage: print("Student is passed") else: print('Student is failed') @staticmethod def welcome(): print("Welcome to the school") @staticmethod def isage(age): return age>16 s = Student('Ansh') print(s.name) s.student_details() s.name = 'Asayhem' s.student_details() # here you can see i can access the name variable outside the class # not a good habit so that's is called public access modifies # to make it private: only access inside the class we have to change # our code a little bit # + pycharm={"name": "#%%\n"} class StudentBetter: passingPercentage = 40 def __init__(self, name, age=15, percentage = 80): self.__name = name self.age = age self.percentage = percentage def student_details(self): print("Name: ", self.__name) print("Age: ", self.age) print("Percentage: ", self.percentage) def isPassed(self): if self.percentage > Student.passingPercentage: print("Student is passed") else: print('Student is failed') @staticmethod def welcome(): print("Welcome to the school") @staticmethod def isage(age): return age>16 # i added '__' before variable so now it's private so cannot be access outside the class s1 = StudentBetter('Ansh') s1.name = 'Asayhem' # cannot be access s1.__name = 'Asayhem' # cannot be access s1.student_details() print(StudentBetter.__name) # you can see we cannot get Asayhem in Name # even we cannot access it # + pycharm={"name": "#%%\n"} # Still there is a concept in Python know as Name Mangling # where you can still access the private variable outside the class # 'DONT USE THIS ANYWHERE IN YOUR CODE UNLESS IT'S EMERGENCY' s1._StudentBetter__name = 'Asayhem' s1.student_details()
05. OOPS Part-1/9.Public and Private Modifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color=blue size=25em>PyAwake Tutorial</font> # <hr style="border-top-width: 4px; border-top-color: #FFFF00;"> # # Accessing and Extracting the AWAKE CSV compressed database as well as visualizing the extracted datasets # <br> # https://gitlab.cern.ch/AWAKE/pyawake # # Author : <NAME>, <NAME> # ## <font color=red>Searching Modules</font> from pyawake.Searching import searching # ### Master Search Method # <br/> # This master search method is used to search the AWAKE CSV database in given timestamp range and load datasets in the form of dicts for futher visualization. # <br/> # <b>Input Parameter</b> • Dataset Keywords separated by space<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Comment Keywords separated by space<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Starting Timestamp<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Ending Timestamp<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Dataset Cuts<br/> # <b>User Responses</b><br/> # Enter which dataset to load (0,limit)&emsp;&emsp;Input the dataset with the image (eg : 1)<br/> # Enter Which file to load (0,limit)&emsp;&emsp;Accepted : Single Value, CSV Value, Range Value (eg : 0-50)<br/> # <b>Output</b><br/> # Array of datasets(dicts) dataset_cuts = "/AwakeEventData/TT41.BCTF.412340/Acquisition/totalIntensityPreferred < 100.0,/AwakeEventData/TT41.BCTF.412340/Acquisition/totalIntensityPreferred > 1.0" dataset_image = searching.search("bovwa 04 raw", "", "2018-05-12 00:00:00", "2018-05-12 23:59:59", dataset_cuts) #/AwakeEventData/BOVWA.04TT41.CAM4/ExtractionImage/imageRawData 0-1218 # ### Load specific columns # <br/> # This master search method is used to search the AWAKE CSV database in given timestamp range and load datasets in the form of 1-d array with dataset dependent attributes defined used in further manual analysis. # <br/> # <b>Input Parameter</b> • Dataset Keywords separated by space<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Comment Keywords separated by space<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Starting Timestamp<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Ending Timestamp<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Dataset Cuts<br/> # <b>User Responses</b><br/> # Enter which dataset to load (0,limit)&emsp;&emsp;CSV Value of datasets (eg : 0,2,4,6) <br/> # Enter Which file to load (0,limit)&emsp;&emsp;&emsp;&emsp;Accepted : Single Value, CSV Value, Range Value<br/> # <b>Output</b><br/> # 2-D Array in the format : [Datetime, Datasets values, HDF Filename] dataset_col = searching.load_column("xmpp streak", "", "2018-05-12 00:00:00", "2018-05-12 23:59:59", "") # ## <font color=red>Visualizing Modules</font> from pyawake.Visualizing import visualizing # ### Display Movie Method # <br/> # This method will automatically display movie using awake_analysis_tool features. # <br/> # <b>Input Parameter</b> Array of datasets in dicts visualizing.displayMovie(dataset_image[190:215]) # ### Visualize Multiple Images # <br/> # Visualization multiple images in sub-plots (Less than 10 images for faster results) # <br/> # <b>Input Parameter</b> • Dataset Keywords separated by space<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Comment Keywords separated by space<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Starting Timestamp<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Ending Timestamp<br/> # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;• Dataset Cuts<br/> # <b>User Responses</b><br/> # Enter which dataset to load (0,limit)&emsp;&emsp;CSV Value of datasets (eg : 0,2,4,6) <br/> # Enter Which file to load (0,limit)&emsp;&emsp;&emsp;&emsp;Accepted : Single Value, CSV Value, Range Value<br/> # <b>Output</b><br/> # 2-D Array in the format : [Datetime, Datasets values, HDF Filename] visualizing.visualize_multiple_image(dataset_image[190:215]) # <img src="https://developers.google.com/open-source/gsoc/resources/downloads/GSoC-logo-horizontal-200.png" alt="GSoC logo">
examples/ExampleNotebookpyAwake.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic classification in TensorFlow/Keras # Based on: # https://www.tensorflow.org/tutorials/keras/basic_classification # and # https://elitedatascience.com/keras-tutorial-deep-learning-in-python # ## 0. Install TensorFlow and Keras # TensorFlow is an open source machine learning framework created by Google. # Keras is a high-level API to build and train models in TensorFlow used in this tutorial. # ### 0.1. Install TensorFlow and Keras with Anaconda3: # pip install tensorflow # pip install keras # ### 0.2. TensorFlow and Python3 compatibility issues # (As of November, 2018) Anaconda3 is supplied with Python 3.7, unfortunately Tensorflow is not ready yet for that, and TensorFlow installation would fail. # You should downgrade to Python 3.5 and then reinstall TensorFlow+Keras: # # conda install python=3.5.0 # # pip install tensorflow # pip install keras # ## 1. Prepare the workspace and obtain+preprocess MNIST data # TensorFlow and tf.keras import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Flatten, Dense, Dropout, Convolution2D, MaxPooling2D from keras.utils import np_utils # Helper libraries import numpy as np import matplotlib.pyplot as plt print("TensorFlow version:", tf.__version__) print("Keras version:", keras.__version__) # MNIST dataset: 70,000 images of decimal digits (0..9) in format 28x28: # ![image.png](attachment:image.png) # Import MNIST dataset split into 60,000 for training and 10,000 for testing dset = keras.datasets.mnist (train_images, train_labels), (test_images, test_labels) = dset.load_data() # Insight into imported data num = 0 plt.imshow(train_images[num]) print(train_images[num]) print(train_images[num].shape) print(train_labels[num]) print(train_labels) # Categorize labels (not mandatory for our first simple network) train_labels_cat = np_utils.to_categorical(train_labels, 10) test_labels_cat = np_utils.to_categorical(test_labels, 10) print(train_labels[num], train_labels_cat[num]) # Preprocess image data test_images_orig = test_images train_images = train_images / 255 test_images = test_images / 255 print(train_images[num]) # ## 2. Create, train and run model # ### 2.1. Create simple model model = keras.Sequential() model.add(Flatten(input_shape=(28, 28))) model.add(Dense(128, activation=tf.nn.relu)) model.add(Dense(10, activation=tf.nn.softmax)) # ### 2.2. Compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # ### 2.3. Train model model.fit(train_images, train_labels_cat, epochs=5) # ## 3. Test model test_loss, test_acc = model.evaluate(test_images, test_labels_cat) # Print accuracy print('Test accuracy:', test_acc) # Calculate prediction for test data predictions = model.predict(test_images) # Check what is predicted num = 1 plt.imshow(test_images[num]) print(predictions[num]) print(np.argmax(predictions[num])) # ## 4. Visualize results # ### 4.1. Define auxiliary functions # + def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array[i], true_label[i] plt.grid(False) plt.xticks([]) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') # - # ### 4.2. Plot prediction # class names just for visualization class_names = ['0-zero', '1-one', '2-two', '3-three', '4-four', '5-five', '6-six', '7-seven', '8-eight', '9-nine'] # visualization of prediction of one image i = 0 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images_orig) plt.subplot(1,2,2) plot_value_array(i, predictions, test_labels) # ### 4.3. More visualization # Plot the first X test images, their predicted label, and the true label # Color correct predictions in blue, incorrect predictions in red num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions, test_labels, test_images_orig) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions, test_labels) # ## 5. Import, preprocess and train FASHION-MNIST # Fashion-MNIST is similar to MNIST but of clothings: # ![image.png](attachment:image.png) # Load data dset = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = dset.load_data() num = 0 plt.imshow(train_images[num]) # Preprocess data train_labels_cat = np_utils.to_categorical(train_labels, 10) test_labels_cat = np_utils.to_categorical(test_labels, 10) test_images_orig = test_images train_images = train_images / 255 test_images = test_images / 255 # Create and compile model model = keras.Sequential() model.add(Flatten(input_shape=(28, 28))) model.add(Dense(128, activation=tf.nn.relu)) model.add(Dense(10, activation=tf.nn.softmax)) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Train model and obtain predictions model.fit(train_images, train_labels_cat, epochs=5) test_loss, test_acc = model.evaluate(test_images, test_labels_cat) print('Test accuracy:', test_acc) predictions = model.predict(test_images) # Plot one prediction class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] i = 1 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images_orig) plt.subplot(1,2,2) plot_value_array(i, predictions, test_labels) # Plot the first X test images, their predicted label, and the true label # Color correct predictions in blue, incorrect predictions in red num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions, test_labels, test_images_orig) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions, test_labels) # ## 6. Convolutional neural network # Convolutional neural networks are better suited for image recognition # Load data dset = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = dset.load_data() test_images_orig = test_images train_images = train_images.reshape(train_images.shape[0], 28, 28, 1) test_images = test_images.reshape(test_images.shape[0], 28, 28, 1) train_labels_cat = np_utils.to_categorical(train_labels, 10) test_labels_cat = np_utils.to_categorical(test_labels, 10) train_images = train_images / 255 test_images = test_images / 255 # + # Create and train model, and obtain predictions model = keras.Sequential() model.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(28,28, 1))) model.add(Convolution2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(train_images, train_labels_cat, batch_size=32, epochs=1, verbose=1) test_loss, test_acc = model.evaluate(test_images, test_labels_cat) print('Test accuracy:', test_acc) predictions = model.predict(test_images) # - # Plot one prediction class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] i = 1 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images_orig) plt.subplot(1,2,2) plot_value_array(i, predictions, test_labels) num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions, test_labels, test_images_orig) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions, test_labels)
Keras_TensorFlow_Image_Recognition/keras_image_recognition_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: default:Python # language: python # name: conda-env-default-py # --- # # 7장. XGBoost로 외계 행성 찾기 # *아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.* # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://nbviewer.org/github/rickiepark/handson-gb/blob/main/Chapter07/Discovering_Exoplanets.ipynb"><img src="https://jupyter.org/assets/share.png" width="60" />주피터 노트북 뷰어로 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/rickiepark/handson-gb/blob/main/Chapter07/Discovering_Exoplanets.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # </table> # + # 노트북이 코랩에서 실행 중인지 체크합니다. import sys if 'google.colab' in sys.modules: # !pip install -q --upgrade xgboost # !wget -q https://raw.githubusercontent.com/rickiepark/handson-gb/main/Chapter07/exoplanets.csv.zip # !unzip -o exoplanets.csv.zip # + # 경고 끄기 import warnings warnings.filterwarnings('ignore') import xgboost as xgb xgb.set_config(verbosity=0) # - # ## 외계 행성 찾기 # # ### 외계 행성 데이터셋 import pandas as pd df = pd.read_csv('exoplanets.csv', nrows=400) df.head() df['LABEL'].value_counts() # ### 그래프 시각화 # + import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set() X = df.iloc[:,1:] y = df.iloc[:,0] - 1 def light_plot(index): y_vals = X.iloc[index] x_vals = np.arange(len(y_vals)) plt.figure(figsize=(15,8)) plt.xlabel('Number of Observations') plt.ylabel('Light Flux') plt.title('Light Plot ' + str(index), size=15) plt.plot(x_vals, y_vals) plt.show() # - light_plot(0) light_plot(37) light_plot(1) # ### 데이터 준비 df.info() # 누락된 값의 개수를 셉니다. df.isnull().sum().sum() # ### 초기 XGBClassifier # + # XGBRegressor를 임포트합니다. from xgboost import XGBClassifier # accuracy_score를 임포트합니다. from sklearn.metrics import accuracy_score # train_test_split를 임포트합니다. from sklearn.model_selection import train_test_split # 데이터를 훈련 세트와 테스트 세트로 나눕니다. X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2) # + # XGBClassifier를 초기화합니다. model = XGBClassifier(booster='gbtree') # 훈련 세트로 모델을 훈련합니다. model.fit(X_train, y_train) # 테스트 세트에 대한 예측을 만듭니다. y_pred = model.predict(X_test) score = accuracy_score(y_pred, y_test) print('점수: ' + str(score)) # - # ## 오차 행렬 분석하기 # # ### confusion_matrix from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_pred) # ### classification_report from sklearn.metrics import classification_report print(classification_report(y_test, y_pred)) # ### 다른 측정 방법 # # #### recall_score from sklearn.metrics import recall_score recall_score(y_test, y_pred) # ## 불균형 데이터 리샘플링 # # ### 언더샘플링 # # #### xgb_clf 함수 def xgb_clf(model, nrows): df = pd.read_csv('exoplanets.csv', nrows=nrows) # 데이터를 X와 y로 나눕니다. X = df.iloc[:,1:] y = df.iloc[:,0] - 1 # 데이터를 훈련 세트와 테스트 세트로 나눕니다. X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2) # 훈련 세트에서 모델을 훈련합니다. model.fit(X_train, y_train) # 테스트 세트에 대한 예측을 만듭니다. y_pred = model.predict(X_test) score = recall_score(y_test, y_pred) print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) return score # #### 언더샘플링 테스트 xgb_clf(XGBClassifier(), nrows=800) xgb_clf(XGBClassifier(), nrows=200) xgb_clf(XGBClassifier(), nrows=74) # ### 오버샘플링 df_train = pd.merge(y_train, X_train, left_index=True, right_index=True) # + newdf = pd.DataFrame(np.repeat(df_train[df_train['LABEL']==1].values, 9,axis=0)) newdf.columns = df_train.columns df_train_resample = pd.concat([df_train, newdf]) df_train_resample['LABEL'].value_counts() # - X_train_resample = df_train_resample.iloc[:,1:] y_train_resample = df_train_resample.iloc[:,0] # + # XGBClassifier를 초기화합니다. model = XGBClassifier() # 훈련 세트로 모델을 훈련합니다. model.fit(X_train_resample, y_train_resample) # 테스트 세트에 대해 예측을 만듭니다. y_pred = model.predict(X_test) score = recall_score(y_test, y_pred) print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) print(score) # - # ## XGBClassifier 튜닝 # # ### 가중치 조정하기 # # #### replace 메서드 # + df['LABEL'] = df['LABEL'].replace(1, 0) df['LABEL'] = df['LABEL'].replace(2, 1) df['LABEL'].value_counts() # - # #### scale_pos_weight # + # 데이터를 X와 y로 나눕니다. X = df.iloc[:,1:] y = df.iloc[:,0] # 데이터를 훈련 세트와 테스트 세트로 나눕니다. X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2) # + model = XGBClassifier(scale_pos_weight=10) model.fit(X_train, y_train) # 테스트 세트에 대한 예측을 만듭니다. y_pred = model.predict(X_test) score = recall_score(y_test, y_pred) print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) print(score) # - # ### XGBClassifier 튜닝 # # #### 기준 모델 from sklearn.model_selection import GridSearchCV, RandomizedSearchCV,StratifiedKFold, cross_val_score # + kfold = StratifiedKFold(n_splits=2, shuffle=True, random_state=2) model = XGBClassifier(scale_pos_weight=10) # 교차 검증 점수를 계산합니다. scores = cross_val_score(model, X, y, cv=kfold, scoring='recall') # 재현율을 출력합니다. print('재현율: ', scores) # 재현율의 평균을 출력합니다. print('재현율 평균: ', scores.mean()) # - # #### grid_search def grid_search(params, random=False, X=X, y=y, model=XGBClassifier(scale_pos_weight=10, random_state=2)): xgb = model if random: grid = RandomizedSearchCV(xgb, params, cv=kfold, n_jobs=-1, random_state=2, scoring='recall') else: # 그리드 서치 객체를 초기화합니다. grid = GridSearchCV(xgb, params, cv=kfold, n_jobs=-1, scoring='recall') # X_train와 y_train으로 훈련합니다. grid.fit(X, y) # 최상의 매개변수를 추출합니다. best_params = grid.best_params_ # 최상의 매개변수를 출력합니다. print("최상의 매개변수:", best_params) # 최상의 점수를 추출합니다. best_score = grid.best_score_ # 최상의 점수를 출력합니다. print("최상의 점수: {:.5f}".format(best_score)) grid_search(params={'n_estimators':[50, 200, 400, 800]}) grid_search(params={'learning_rate':[0.01, 0.05, 0.2, 0.3]}) grid_search(params={'max_depth':[1, 2, 4, 8]}) grid_search(params={'subsample':[0.3, 0.5, 0.7, 0.9]}) grid_search(params={'gamma':[0.05, 0.1, 0.5, 1]}) grid_search(params={'learning_rate':[0.001, 0.01, 0.03], 'max_depth':[1, 2], 'gamma':[0.025, 0.05, 0.5]}) grid_search(params={'max_delta_step':[1, 3, 5, 7]}) grid_search(params={'subsample':[0.3, 0.5, 0.7, 0.9, 1], 'colsample_bylevel':[0.3, 0.5, 0.7, 0.9, 1], 'colsample_bynode':[0.3, 0.5, 0.7, 0.9, 1], 'colsample_bytree':[0.3, 0.5, 0.7, 0.9, 1]}, random=True) # #### 균형잡힌 서브셋 X_short = X.iloc[:74, :] y_short = y.iloc[:74] grid_search(params={'max_depth':[1, 2, 3], 'colsample_bynode':[0.5, 0.75, 1]}, X=X_short, y=y_short, model=XGBClassifier(random_state=2)) # #### 전체 데이터로 튜닝하기 # + df_all = pd.read_csv('exoplanets.csv') df_all['LABEL'] = df_all['LABEL'].replace(1, 0) df_all['LABEL'] = df_all['LABEL'].replace(2, 1) X_all = df_all.iloc[:,1:] y_all = df_all.iloc[:,0] # - df_all['LABEL'].value_counts() weight = int(5050/37) # + model = XGBClassifier(scale_pos_weight=weight) # 교차 검증 점수를 계산합니다. scores = cross_val_score(model, X_all, y_all, cv=kfold, scoring='recall') # 재현율을 출력합니다. print('재현율:', scores) # 재현율의 평균을 출력합니다. print('재현율 평균:', scores.mean()) # - grid_search(params={'learning_rate':[0.001, 0.01]}, X=X_all, y=y_all, model=XGBClassifier(scale_pos_weight=weight)) grid_search(params={'max_depth':[1, 2],'learning_rate':[0.001]}, X=X_all, y=y_all, model=XGBClassifier(scale_pos_weight=weight)) # ### 결과 통합 def final_model(X, y, model): model.fit(X, y) y_pred = model.predict(X_all) score = recall_score(y_all, y_pred) print(score) print(confusion_matrix(y_all, y_pred)) print(classification_report(y_all, y_pred)) # #### 74개 샘플 final_model(X_short, y_short, XGBClassifier(max_depth=2, colsample_by_node=0.5, random_state=2)) # #### 400개 샘플 final_model(X, y, XGBClassifier(max_depth=2, colsample_bynode=0.5, scale_pos_weight=10, random_state=2)) # #### 5,050개 샘플 # + tags=[] final_model(X_all, y_all, XGBClassifier(max_depth=2, colsample_bynode=0.5, scale_pos_weight=weight, random_state=2))
Chapter07/Discovering_Exoplanets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="zBKwtXYvHElN" outputId="8df89332-1627-4259-b442-3e12ca8ac689" # cd /content/drive/MyDrive/Fun With MNIST/MNIST-2 (Binary Label Classification) # + id="U9q9MR0sHau-" import tensorflow as tf import numpy as np import pandas as pd import cv2 from google.colab.patches import cv2_imshow import matplotlib.pyplot as plt from tensorflow.keras.utils import to_categorical from sklearn.model_selection import train_test_split from sklearn.preprocessing import MultiLabelBinarizer from tensorflow.keras.utils import plot_model from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Input, Flatten, Dropout, Add, Activation from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam, SGD # + id="DRg2KyaV36oO" def create_dataset(dataset_size): mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x = np.concatenate((x_train, x_test), axis = 0) y = np.concatenate((y_train, y_test)) print('Shape of the dataset after concatinating:') print(x.shape, y.shape) x_new = [] y_new = [] for _ in range(dataset_size): indices = np.random.randint(0, 70000, size = 2) new_image = np.concatenate((x[indices[0]], x[indices[1]]), axis = 1) x_new.append(new_image) ans1, ans2 = y[indices[0]], y[indices[1]] # new_y = [1 if z == ans1 or z == ans2 else 0 for z in range(10)] y_new.append((ans1, ans2)) return x_new, y_new # + colab={"base_uri": "https://localhost:8080/"} id="8D_RzwD_4yH9" outputId="1f33bfc8-d72c-4931-c63a-da5408309551" dataset_size = 50000 x, y = create_dataset(dataset_size) # + colab={"base_uri": "https://localhost:8080/", "height": 233} id="Ibpx-6yBN1Ef" outputId="88c72e13-f0b1-4a74-d657-5ddd6391f820" random = np.random.randint(dataset_size) plt.imshow(x[random], cmap = 'gray') print(y[random]) # + id="YXnly3MLwYqJ" def ml_split(x, y): ml = MultiLabelBinarizer() y = ml.fit_transform(y) X_train, X_valid, y_train, y_valid = train_test_split(x, y, test_size=0.20, random_state=42) X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size = 0.2, random_state = 42) return X_train, y_train, X_valid, y_valid, X_test, y_test # + id="oL_yr-1sw_Zb" X_train, y_train, X_valid, y_valid, X_test, y_test = ml_split(x, y) # + id="ELRquq7jOsIs" def print_shapes(): print('Shapes of dataset:') print('Training dataset:') print(X_train.shape, y_train.shape) print('\nValidation dataset:') print(X_valid.shape, y_valid.shape) print('\nTesting dataset:') print(X_test.shape, y_test.shape) # + id="5U0XL5Rz7GNH" def format_input(features, labels): features = tf.convert_to_tensor(features) features = tf.expand_dims(features, axis = -1) labels = tf.convert_to_tensor(labels) return features, labels # + colab={"base_uri": "https://localhost:8080/"} id="53Ayt3Gq9C6L" outputId="7bd13f11-4f77-4167-8ceb-86cd94b16db1" X_train, y_train = format_input(X_train, y_train) X_valid, y_valid = format_input(X_valid, y_valid) X_test, y_test = format_input(X_test, y_test) print_shapes() # + id="rkr--_mpHI0J" def create_generator(bath_size, X_train, y_train, X_valid, y_valid, X_test, y_test): train_datagen = ImageDataGenerator(rescale = 1.0/255.0, dtype = 'float') valid_datagen = ImageDataGenerator(rescale = 1.0/255., dtype = 'float') train_generator = train_datagen.flow(X_train, y_train, batch_size=batch_size, shuffle = True, seed = 42) valid_generator = valid_datagen.flow(X_valid, y_valid, batch_size=batch_size, seed = 42) test_generator = valid_datagen.flow(X_test, y_test, batch_size=batch_size, seed = 42) return train_generator, valid_generator, test_generator # + id="uvkvAT2hCsr0" batch_size = 256 train_generator, valid_generator, test_generator = create_generator(batch_size, X_train, y_train, X_valid, y_valid, X_test, y_test) # + id="IQyYBDwcl83H" # define cnn model def define_model(shape=(28, 56, 1), num_classes=10): model = tf.keras.models.Sequential() model.add(Conv2D(32, (3, 3), kernel_initializer='he_uniform', padding='same', input_shape=shape)) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3), kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(64, (3, 3), kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # model.add(Dropout(0.2)) model.add(Conv2D(128, (3, 3), kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(128, (3, 3), kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # model.add(Dropout(0.2)) model.add(Conv2D(256, (3, 3), kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(256, (3, 3), kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) model.add(Flatten()) model.add(Dense(512, kernel_initializer='he_uniform')) model.add(Dense(256, kernel_initializer='he_uniform')) model.add(Dense(128, kernel_initializer='he_uniform')) model.add(Dense(64, kernel_initializer='he_uniform')) # model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='sigmoid')) return model model = define_model() # + id="ukXsFfX8WjCy" colab={"base_uri": "https://localhost:8080/"} outputId="5673c9d3-0128-45b0-fa84-43548cc033a9" model.summary() # + id="bs61hu5f_bjT" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="333bfac3-56c3-4118-b647-b8b71e837426" plot_model(model, show_shapes = True) # + id="IlbrgCbXRkZR" class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy')>0.75): print("\nReached 75% accuracy so cancelling training!") self.model.stop_training = True es = tf.keras.callbacks.EarlyStopping(monitor = 'accuracy', patience = 20, verbose = 1, restore_best_weights=True, min_delta = 0.02) callbacks = myCallback() # + id="HdlSnMOrl172" opt = tf.keras.optimizers.Adam(0.000001) model.compile(loss = 'binary_crossentropy', optimizer = opt, metrics = ['accuracy']) # + id="0WOvOXxsjRRN" colab={"base_uri": "https://localhost:8080/"} outputId="8e53a273-c607-464b-80a0-9fc1abbeb789" epochs = 100 hist = model.fit(train_generator, epochs = epochs, validation_data = valid_generator, callbacks=[callbacks]) # + id="kxjIpLRIEkJp" # outputLabels = np.unique(y_train) # from sklearn.utils import compute_class_weight # classWeight = compute_class_weight('balanced', outputLabels, y_train) # classWeight = dict(enumerate(classWeight)) # model.fit(train_generator, epochs = epochs, validation_data = (valid_generator), class_weight=classWeight, callbacks=[callbacks, es]) # + id="ktQtCfNYp-NB" train_acc = hist.history['accuracy'] train_loss = hist.history['loss'] valid_acc = hist.history['val_accuracy'] valid_loss = hist.history['val_loss'] epochs = range(len(train_acc)) # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="0cvXJuH1qsm-" outputId="e3b9bb6f-da49-489b-ee0c-26c48060caa3" plt.plot(epochs, train_acc, 'r', label = 'Train Accuracy') plt.plot(epochs, valid_acc, 'b', label = 'Validation Accuracy') plt.legend() plt.title('Accuracy') # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="x5Q_lbyzram2" outputId="c73c9747-e5af-4d97-9c34-2087cfc9c964" plt.plot(epochs, train_loss, 'r', label = 'Train Loss') plt.plot(epochs, valid_loss, 'b', label = 'Validation Loss') plt.legend() plt.title('Loss') # + id="z5b7-VcxR_Ao" colab={"base_uri": "https://localhost:8080/"} outputId="8d5454ca-93db-4a2d-a814-d9c8a8d6ead0" loss, accuracy = model.evaluate(test_generator, batch_size = batch_size) print(loss) print(int(accuracy * 100), '%') # + id="tKdWCx6_Jja3" model.save('mnist-2-2.h5')
MNIST-2/V1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="sVtvH58nb_Hp" # # Word2Vec for Text Classification # # In this short notebook, we will see an example of how to use a pre-trained Word2vec model for doing feature extraction and performing text classification. # # We will use the sentiment labelled sentences dataset from UCI repository # http://archive.ics.uci.edu/ml/datasets/Sentiment+Labelled+Sentences # # The dataset consists of 1500 positive, and 1500 negative sentiment sentences from Amazon, Yelp, IMDB. Let us first combine all the three separate data files into one using the following unix command: # # Let us get started! # - # ## Setup # ### Imports # + #basic imports import os, subprocess from time import time #pre-processing imports from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from string import punctuation #imports related to modeling import numpy as np from gensim.models import Word2Vec, KeyedVectors from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report # - # ### Loading Data & Word2Vec Model # + import zipfile #Download Training data DATA_PATH = "Data" TRAIN_ZIP_URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/00331/sentiment%20labelled%20sentences.zip" TRAIN_ZIP_PATH = os.path.join(DATA_PATH, TRAIN_ZIP_URL.split('/')[-1].replace("%20"," ")) TRAIN_FOLDER_PATH = TRAIN_ZIP_PATH.replace('.zip','') TRAIN_DATA_PATH = os.path.join(TRAIN_FOLDER_PATH, 'sentiment_sentences.txt') if not os.path.exists(TRAIN_ZIP_PATH): process = subprocess.run('curl "%s" --output "%s"'%(TRAIN_ZIP_URL, TRAIN_ZIP_PATH), shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True) if not os.path.exists(TRAIN_FOLDER_PATH): with zipfile.ZipFile(TRAIN_ZIP_PATH, 'r') as zip_ref: zip_ref.extractall(DATA_PATH) if not os.path.exists(TRAIN_DATA_PATH): subprocess.run('cd "%s" && cat amazon_cells_labelled.txt imdb_labelled.txt yelp_labelled.txt > sentiment_sentences.txt'%TRAIN_FOLDER_PATH, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True) # - #Download Word2Vec model WORD2VEC_URL = "https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz" WORD2VEC_PATH = os.path.join(DATA_PATH, WORD2VEC_URL.split('/')[-1]) if not os.path.exists(WORD2VEC_PATH): process = subprocess.run('curl "%s" --output "%s"'%(WORD2VEC_URL, WORD2VEC_PATH), shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True) # + colab={} colab_type="code" id="COUGXAxcb_H5" outputId="f1b6d8ad-e22b-4126-d2ea-862697c4158b" #Load W2V model. This will take some time. w2v_model = KeyedVectors.load_word2vec_format(WORD2VEC_PATH, binary=True) #Read text data and labels. data_df = pd.read_csv(TRAIN_DATA_PATH,sep='\t',header=None).rename(columns={0:'text',1:'label'}) # - # ## EDA data_df.head() data_df.label.value_counts() # + colab={} colab_type="code" id="m-WjFyC6b_IE" outputId="5df9e11b-6f8e-42b8-e198-6fe343293cc3" # Inspect the word2vec model word2vec_vocab = w2v_model.vocab.keys() word2vec_vocab_lower = [item.lower() for item in word2vec_vocab] print(len(word2vec_vocab)) # - # ## Preprocessing # # - Convert tokens to lowercase # - Remove stopwords # - Remove numbers and punctuations # + colab={} colab_type="code" id="MFOGaDTwb_Ig" outputId="7603e297-9167-43ec-c7da-46d82dc850ad" #preprocess the text. mystopwords = set(stopwords.words("english")) def preprocess_corpus(text): #Nested function that converts token to lowercase and removes stopwords & digits from a list of tokens tokens = word_tokenize(text) keep_token = lambda token: token not in mystopwords and not token.isdigit() and token not in punctuation return [token.lower() for token in tokens if keep_token(token)] data_df['tokens'] = data_df['text'].apply(preprocess_corpus) data_df.head() # - # ## Training # ### Vectorizing Documents # - Get mean of all token vectors in a document to get document vector # + colab={} colab_type="code" id="fXRiGtY1b_Iq" outputId="2d57a96f-8da8-4285-ca1e-2c617578b9e1" # Creating a feature vector by averaging all embeddings for all sentences WORD2VEC_DIMENSION = 300 def get_doc_vec(tokens): token_vecs = np.array([w2v_model[token] for token in tokens if token in w2v_model]) doc_vec = token_vecs.mean(axis=0) if len(token_vecs)>0 else np.zeros(WORD2VEC_DIMENSION) return doc_vec data_df['text_vec'] = data_df['tokens'].apply(get_doc_vec) # - # ### Training the Classifier # + colab={} colab_type="code" id="mr9IaQppb_Ix" outputId="<PASSWORD>" #Take any classifier (LogisticRegression here, and train/test it like before. classifier = LogisticRegression(random_state=1234) train_data, test_data, train_cats, test_cats = train_test_split(data_df['text_vec'].apply(lambda x:x.tolist()), data_df['label']) classifier.fit(train_data.to_list(), train_cats) # - # ## Evaluation pred = lambda text : classifier.predict_proba([get_doc_vec(preprocess_corpus(text))])[0,1] # pred = lambda text : classifier.predict([get_doc_vec(preprocess_corpus(text))]) print("Accuracy: ", classifier.score(test_data.to_list(), test_cats)) preds = classifier.predict(test_data.to_list()) print(classification_report(test_cats, preds)) # + [markdown] colab_type="text" id="k7wjLB8rb_JB" # With little efforts we got 84% accuracy. Thats a great starting model to have!! # - pred('Enjoyed the show. Will try again!') pred('Service was unsatisfactory!') # ### Analysing postive and negative tokens all_tokens = pd.Series(list(set(sum(data_df['tokens'].to_list(),[])))) all_token_scores = all_tokens.apply(pred) positive_tokens = all_token_scores.set_axis(all_tokens).nlargest(20).index.to_list() negative_tokens = all_token_scores.set_axis(all_tokens).nsmallest(20).index.to_list() print ("Positive tokens (top 20 from corpus): \n%s"%', '.join(positive_tokens)) print ("\nNegative tokens sample (top 20 from corpus): \n%s"%', '.join(negative_tokens))
Ch4/03_Word2Vec_Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''tf2'': conda)' # language: python # name: python38364bittf2condab6220203f80e4d5598e709edc4d26640 # --- # %reload_ext autoreload # %autoreload 2 import sys import os BASE_DIR = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir)) sys.path.append(BASE_DIR) import os import time import imgaug.augmenters as iaa import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from models.dcgan import DCGAN os.environ["CUDA_VISIBLE_DEVICES"] = "1" gpus = tf.config.experimental.list_physical_devices(device_type='GPU') cpus = tf.config.experimental.list_physical_devices(device_type='CPU') tf.config.experimental.set_virtual_device_configuration( gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*6)]) (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() print(train_images.shape) train_images = train_images.reshape(train_images.shape[0], 28, 28, 1) aug = iaa.Resize({"height": 64, "width": 64}) show_batch(train_images[:36]) show_batch(aug(images=train_images[:36])) train_images = train_images.reshape(train_images.shape[0], 28, 28, 1) train_images = aug(images=train_images) train_images = (train_images.astype(np.float32) - 127.5) / 127.5 # 将图片标准化到 [-1, 1] 区间内 BUFFER_SIZE = 60000 BATCH_SIZE = 256 noise_dim = 100 train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dcgan = DCGAN(image_shape=(64, 64, 1)) def show_batch(images): fig = plt.figure(figsize=(6, 6)) for i in range(images.shape[0]): plt.subplot(6, 6, i+1) image = images[i, :, :, 0] * 127.5 + 127.5 plt.imshow(image, cmap='gray') plt.axis('off') plt.show() images = next(iter(train_dataset)) show_batch(images[: 36]) # + num_examples_to_generate = 36 seed = tf.random.normal([num_examples_to_generate, noise_dim]) # - def show_generator(generator, seed): predictions = generator(seed, training=False) show_batch(predictions) show_generator(dcgan.generator, seed) for _ in range(100): for image_batch in train_dataset: dcgan.train_step(image_batch, num_iter_disc=1, num_iter_gen=1) show_generator(dcgan.generator, seed)
experiment/dcgan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="0qtAzmipHZMh" # # Find 3 Disks # Given an image with a background texture, and three overlaid texured disks, predict the centerpoint of the labeled “most concpicuous disk.” # # (**Note:** on February 16, 2022 I saved a version of this to `graveyard/Find_3_Disks_before_generators.ipynb`) # + id="oROsyXIQrzSM" colab={"base_uri": "https://localhost:8080/"} outputId="afc58da0-f5d8-4bbb-ce09-aee98853a18d" import gc import PIL import math import time import random import numpy as np from os import listdir from os.path import join import os.path from tqdm.auto import tqdm from sklearn.model_selection import train_test_split # %tensorflow_version 2.x import tensorflow as tf print('TensorFlow version:', tf.__version__) # Import DiskFind utilities for PredatorEye. import sys sys.path.append('/content/drive/My Drive/PredatorEye/shared_code/') import DiskFind as df # df.set_global_random_seed(20220108) df.set_global_random_seed(20220220) model_save_directory = '/content/drive/My Drive/PredatorEye/saved_models/' # + [markdown] id="tMa0B_-6IIFC" # # Define uniform texture dataset # + id="pdjCCGblIGdW" # def MakeUniformTextureDataset(dataset_size, image_size, image_depth): def make_uniform_dataset(dataset_size, image_size, image_depth): dataset_shape = (dataset_size, image_size, image_size, image_depth) images = [] labels = [] for i in tqdm(range(dataset_size)): # (image, label) = generateUniformExample(image_size, image_depth) (image, label) = generate_uniform_example(image_size, image_depth) images.append(image) labels.append(label) # # TODO TEMP -- Draw image for debugging # if ((i % 50) == 0) and (i < (50 * 5)): df.draw_image(image) return (images, labels) # blah blah # return an image (pixel tensor) and a label as (x, y) tensor # def generateUniformExample(image_size, image_depth): def generate_uniform_example(image_size, image_depth): bg_color = np.random.random_sample(image_depth) fg_color = np.random.random_sample(image_depth) disk_radius = image_size * df.relative_disk_radius() image_shape = (image_size, image_size, image_depth) image = np.full(image_shape, bg_color, dtype=np.float32) # Find 3 non-overlapping disk positions inside image. def random_center(): s = image_size - (2 * disk_radius) return (np.random.random_sample(2) * s) + disk_radius centers = [random_center()] min_dist = 3 * disk_radius while len(centers) < 3: c = random_center() all_ok = True for o in centers: if (df.dist2d(c, o) < min_dist): all_ok = False if (all_ok): centers.append(c) # Draw soft-eged disk with given centerpoint and color. def draw_disk(center, color): cx = int(center[0]) cy = int(center[1]) dr = int(disk_radius) for x in range(cx - dr, cx + dr + 1): for y in range(cy - dr, cy + dr + 1): d = math.sqrt(math.pow(x - cx, 2) + math.pow(y - cy, 2)) if (d <= dr): blend = df.spot_utility((x, y), center, dr * 0.85, dr) image[x, y, :] = df.interpolate(blend, bg_color, color) # Draw 3 soft-edged disks, with colors progressively more like background. draw_disk(centers[0], fg_color) draw_disk(centers[1], df.interpolate(0.33, fg_color, bg_color)) draw_disk(centers[2], df.interpolate(0.66, fg_color, bg_color)) # return image # print('centers[0] =', centers[0]) # print('type(centers[0]) =', type(centers[0])) label = centers[0] / image_size # TODO When I visualized the labels, they appeared xy flipped # trying "unflipping them" label = np.array((label[1], label[0])) return (image, label) # + [markdown] id="SLmzNXRO6J5O" # # Reader for complex dataset # + colab={"base_uri": "https://localhost:8080/"} id="aFDGwHRQ5MGJ" outputId="cddbe73b-d6b6-439c-b549-4ab8b944e7ad" # Define complex texture (photo plus synthetic) dataset # 20220130 this structure (copied from above) is wrong. # We want to read the dir in the main function. # the "per filename" function could later be expanded to do amplification # if so, should return lists, which would need to be concatenated in caller. # # temp for prototyping # directory_pathname = '/content/drive/My Drive/PredatorEye/f3d_training_set/' # # "Amplify" dataset by adding one modified version of each precomputed image # amplify_2x = True # def make_complex_dataset(): def make_complex_dataset(directory_pathname = '/content/drive/My Drive/PredatorEye/f3d_training_set/', amplify_2x = True): directory_contents = listdir(directory_pathname) ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## # size_limit = 50 # print('Limit complex dataset size to', size_limit) # directory_contents = directory_contents[0:size_limit] ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## images = [] labels = [] for filename in tqdm(directory_contents): image_pathname = os.path.join(directory_pathname, filename) (image, label) = make_complex_example(image_pathname) images.append(image) labels.append(label) if amplify_2x: (image, label) = modify_complex_example(image, label) images.append(image) labels.append(label) return (images, labels) # Read image file at given pathname, pre-process for use in training model. def make_complex_example(image_pathname): # Read image file. pixels = df.read_image_file_as_pixel_tensor(image_pathname) # Check for expected format. assert df.check_pixel_tensor(pixels), 'wrong file format: ' + image_pathname # Parse disk center position from file name. filename = os.path.basename(image_pathname) center_position = df.fcd_normalized_xy(filename, pixels) return (pixels, center_position) # Make modified copy of one traing example (as a pixel tensor and xy label). # (Specifically: invert brighness, mirror horizontally, rotate 1/4, 1/2, or 3/4) def modify_complex_example(image, label): def center_rot90(cp): return (cp[1], 0.5 - (cp[0] - 0.5)) def center_flip(cp): return (0.5 - (cp[0] - 0.5), cp[1]) # invert brighness of image: image = 1 - image # mirror horizontally scaled_pixels = np.flip(image, axis=1) label = center_flip(label) # Rotate one of: 1/4, 1/2, or 3/4 n = random.randrange(1, 4) for i in range(n): # image = np.rot90(image, k=1, axes=(0, 1)) image = np.rot90(image, k=1, axes=(1, 0)) label = center_rot90(label) # label = np.array(label) # counter-sensible experiment -- 11:38 20220202 label = center_flip(label) image = image.astype(np.float32) return (image, label) # # make_complex_example() # (i, p) = make_complex_dataset() # print('len(i) =', len(i)) # print('i[0].dtype =', i[0].dtype) # print('i[0].shape =', i[0].shape) # print('type(i) =', type(i)) # print('p[0] =', p[0]) def center_rot90(cp): return (cp[1], 0.5 - (cp[0] - 0.5)) def center_flip(cp): return (0.5 - (cp[0] - 0.5), cp[1]) print('center_flip((0.2, 0.1)) =', center_flip((0.2, 0.1))) print('center_flip([0.2, 0.1]) =', center_flip([0.2, 0.1])) print() print('center_rot90((0.2, 0.1)) =', center_rot90((0.2, 0.1))) print('center_rot90([0.2, 0.1]) =', center_rot90([0.2, 0.1])) print() print('center_rot90((0.8, 0.4)) =', center_rot90((0.8, 0.4))) print('center_rot90((0.4, 0.2)) =', center_rot90((0.4, 0.2))) print('center_rot90((0.2, 0.6)) =', center_rot90((0.2, 0.6))) print('center_rot90((0.6, 0.8)) =', center_rot90((0.6, 0.8))) np.array((1,2)) # + [markdown] id="QPEykI8aTmOZ" # # New experiment with generators. # has exit at end of this # + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["dccd19e7d5844634bfd063868b971a7f", "<KEY>", "74b3201d642045d1aaf9b5608325f112", "ffb73e8cb16c4feebe7603273c7e1523", "c6c3125dff3a40909d905914fdd8f0e1", "34d234ee37c04803bf30a5aee5041333", "daa0df29b4064e0cb6ad95dec69f5e7a", "9b426c2d83bb4a818d7a5dff45c0fda3", "79fda559ce134416b7f7dde2aaccd8ff", "803e96fcd1ff4294834c609529b3a207", "5f79ce856281465e84f4daab5ea35ea4"]} id="SC0pFYKVT43b" outputId="3ec60081-640f-4010-c471-a6fcc4ddabec" # ... # a little nugget of code from # https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator # which may suggest how I can do "real-time data augmentation" # # Maybe I can read the base dataset with 20,000 images, hold them in memory, # then make a Generator (or something that just behaves like it) each batch I # "flow" from it will get randomly augumented. # # I assume the call to model.fit(x_batch, y_batch) defaults to a single batch # (epoch=1) do I need to do my own logging etc? # # Note that I am currently using batch_size=128 (of 128x128x3 images). # def dummy(): # # here's a more "manual" example # for e in range(epochs): # print('Epoch', e) # batches = 0 # for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32): # model.fit(x_batch, y_batch) # batches += 1 # if batches >= len(x_train) / 32: # # we need to break the loop by hand because # # the generator loops indefinitely # break # # Should be able to just call: make_complex_dataset(amplify_2x=False) # # to get the non-augmented base dataset of 20,000 images # # Then lets split that (80%/20%?) holding out a dataset for evaluation # def runtime_augmentation_test(model, epochs, batch_size): # # Read base dataset, return list of images and labels. # base_images, base_labels = make_complex_dataset(amplify_2x=False) # # Split both 80% / 20%. # train_fraction = 0.8 # train_count = int(len(base_images) * train_fraction) # train_images = base_images[: train_count] # test_images = base_images[train_count : ] # print('len(train_images) =', len(train_images)) # print('len(test_images) =', len(test_images)) # print('train_images[0].shape =', train_images[0].shape) # train_labels = base_labels[: train_count] # test_labels = base_labels[train_count : ] # print('len(test_labels) =', len(test_labels)) # train_images = np.array(train_images, dtype=np.float32) # train_labels = np.array(train_labels, dtype=np.float32) # test_images = np.array(test_images, dtype=np.float32) # test_labels = np.array(test_labels, dtype=np.float32) # very_temp_train_util(train_images, train_labels, model, epochs, batch_size) # def very_temp_train_util(x_train, y_train, model, epochs, batch_size): # # here's a more "manual" example # for e in range(epochs): # print('Epoch', e) # # batches = 0 # # batch counter # b = 0 # # for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32): # # for x_batch, y_batch in (x_train[b * batch_size: (b + 1) * batch_size], # # y_train[b * batch_size: (b + 1) * batch_size]): # while True: # x_batch = x_train[b * batch_size: (b + 1) * batch_size] # y_batch = y_train[b * batch_size: (b + 1) * batch_size] # model.fit(x_batch, y_batch) # # batches += 1 # # if batches >= len(x_train) / 32: # b += 1 # if b >= len(x_train) / batch_size: # # we need to break the loop by hand because # # the generator loops indefinitely # break # # debug tests: # epochs = 10 # batch_size = 128 # model = df.make_disk_finder_model(np.zeros((1, 128, 128, 3))) # runtime_augmentation_test(model, epochs, batch_size) # TODO 20220219 -- build a real generator based on tf.keras.utils.Sequence # # Initially do something like the "complex dataset": 20,000 read from disk and # 20,000 more through doing one augmentation per image. # # see example code: # API doc: tf.keras.utils.Sequence # https://www.tensorflow.org/api_docs/python/tf/keras/utils/Sequence # Write your own Custom Data Generator for TensorFlow Keras # https://medium.com/analytics-vidhya/write-your-own-custom-data-generator-for-tensorflow-keras-1252b64e41c3 # class CIFAR10Sequence(Sequence): # class Find3DisksGenerator(Sequence): class Find3DisksGenerator(tf.keras.utils.Sequence): # Construct generator given arrays of base images and labels, and batch size # def __init__(self, x_set, y_set, batch_size): # self.x, self.y = x_set, y_set def __init__(self, base_images, base_labels, batch_size): self.base_images = base_images self.base_labels = base_labels self.batch_size = batch_size # TODO 20220219 for now assume generator is twice the size of base data. def __len__(self): # return math.ceil(len(self.x) / self.batch_size) return math.ceil(2 * len(self.base_images) / self.batch_size) # # TODO 20220219 for now assume generator is twice the size of base data. # # TODO note this is surely not the most efficient/pythonic way to do this. # # def __getitem__(self, idx): # def __getitem__(self, index): # # print() # # print('index = ', index) # # print('len(self) = ', len(self)) # # print('self.batch_size = ', self.batch_size) # # print('len(self.base_images) = ', len(self.base_images)) # # batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] # # batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] # # Take the next "half batch" chunk from base arrays. # # hbs = self.batch_size / 2 # # print('hbs = ', hbs) # hbs = int(self.batch_size / 2) # # batch_images = self.base_images[idx * self.batch_size : (idx + 1) * hbs] # # batch_labels = self.base_labels[idx * self.batch_size : (idx + 1) * hbs] # # start = index * self.batch_size # start = index * hbs # end = (index + 1) * hbs # # print('start = ', start) # # print('end = ', end) # batch_images = self.base_images[start : end] # batch_labels = self.base_labels[start : end] # # print('half batch:') # # print('batch_images.shape = ', batch_images.shape) # # print('batch_labels.shape = ', batch_labels.shape) # # Glue two copies of each array together, doubling the size. # batch_images = np.concatenate((batch_images, batch_images), axis=0) # batch_labels = np.concatenate((batch_labels, batch_labels), axis=0) # # print('full batch:') # # print('batch_images.shape = ', batch_images.shape) # # print('batch_labels.shape = ', batch_labels.shape) # # Now for the first "hbs" of each, make a modified augmentation. # for i in range(hbs): # image = batch_images[i] # label = batch_labels[i] # image, label = modify_complex_example(image, label) # batch_images[i] = image # batch_labels[i] = label # # print('modified batch:') # # print('batch_images.shape = ', batch_images.shape) # # print('batch_labels.shape = ', batch_labels.shape) # # TODO 20220219 later come back and learn what this means: # # return np.array([ # # resize(imread(file_name), (200, 200)) # # for file_name in batch_x]), np.array(batch_y) # return np.array(batch_images), np.array(batch_labels) # TODO 20220219 for now assume generator is twice the size of base data. # TODO note this is surely not the most efficient/pythonic way to do this. # def __getitem__(self, idx): def __getitem__(self, index): # Take the next "half batch" chunk from base arrays. hbs = int(self.batch_size / 2) start = index * hbs end = (index + 1) * hbs batch_images = self.base_images[start : end] batch_labels = self.base_labels[start : end] # Glue two copies of each array together, doubling the size. batch_images = np.concatenate((batch_images, batch_images), axis=0) batch_labels = np.concatenate((batch_labels, batch_labels), axis=0) # Now for the first "hbs" of each, make a modified augmentation. for i in range(hbs): image = batch_images[i] label = batch_labels[i] image, label = modify_complex_example(image, label) batch_images[i] = image batch_labels[i] = label # # TODO 20220220 # df.visualize_dataset(batch_images, batch_labels) return np.array(batch_images), np.array(batch_labels) # 20220219 note that the pathname from which data is read is not passed in here. # It is buried down inside make_complex_dataset(). Should it be an arg? def f3d_read_and_split_base_dataset(): # Read base dataset, return list of images and labels. base_images, base_labels = make_complex_dataset(amplify_2x=False) # Split both 80% / 20%. train_fraction = 0.8 train_count = int(len(base_images) * train_fraction) train_images = base_images[: train_count] test_images = base_images[train_count : ] print('len(train_images) =', len(train_images)) print('len(test_images) =', len(test_images)) print('train_images[0].shape =', train_images[0].shape) train_labels = base_labels[: train_count] test_labels = base_labels[train_count : ] print('len(test_labels) =', len(test_labels)) train_images = np.array(train_images, dtype=np.float32) train_labels = np.array(train_labels, dtype=np.float32) test_images = np.array(test_images, dtype=np.float32) test_labels = np.array(test_labels, dtype=np.float32) # very_temp_train_util(train_images, train_labels, model, epochs, batch_size) return train_images, train_labels, test_images, test_labels # 20220219 copied from DiskFind.py # def f3d_train_augmented_model(model, X_train, y_train, X_test, y_test, # epochs, batch_size, plot_title): def f3d_train_augmented_model(model, epochs, batch_size, plot_title): (train_images, train_labels, test_images, test_labels) = f3d_read_and_split_base_dataset() training_data_generator = Find3DisksGenerator(train_images, train_labels, batch_size) # history = model.fit(X_train, # y_train, # validation_data = (X_test, y_test), history = model.fit(training_data_generator, validation_data = (test_images, test_labels), epochs=epochs, batch_size=batch_size) print() # plot_accuracy_and_loss(history, plot_title) df.plot_accuracy_and_loss(history, plot_title) return history # debug tests: epochs = 100 batch_size = 128 model = df.make_disk_finder_model(np.zeros((1, 128, 128, 3))) # runtime_augmentation_test(model, epochs, batch_size) f3d_train_augmented_model(model, epochs, batch_size, 'test') # TODO 20220217 stop here rather than proceed on with rest of notebook. assert False # + [markdown] id="HlgYZGvt4zbw" # # Generate dataset # + id="NgE_VxpN49U5" df.reset_random_seeds() start_time = time.time() # (dataset_images, # dataset_labels) = make_uniform_dataset(dataset_size = 5000, # image_size = 128, # image_depth = 3) # RGB (dataset_images, dataset_labels) = make_complex_dataset() elapsed_seconds = int(time.time() - start_time) print('Elapsed time: ' + str(elapsed_seconds) + ' seconds (' + str(int(elapsed_seconds / 60)) +' minutes).') # + [markdown] id="P2RaIfYLThfZ" # # Split dataset # + id="3bwzS-uJOuc-" print('Total dataset size =', len(dataset_images)) (images_train, images_test, labels_train, labels_test) = train_test_split(dataset_images, dataset_labels, test_size=0.2) # 20220202 oops, I was keeping these around, think I ought to free them. dataset_images = [] dataset_labels = [] # Convert from Python lists to np arrays. images_train = np.array(images_train, dtype=np.float32) labels_train = np.array(labels_train, dtype=np.float32) images_test = np.array(images_test, dtype=np.float32) labels_test = np.array(labels_test, dtype=np.float32) print('images_train.shape[0] =', images_train.shape[0]) print('labels_train.shape[0] =', labels_train.shape[0]) print('images_test.shape[0] =', images_test.shape[0]) print('labels_test.shape[0] =', labels_test.shape[0]) # + [markdown] id="tXSQ_2SG3he0" # # Visualize some labels # + id="td99jO4135uy" df.reset_random_seeds() df.visualize_dataset(images = images_test, labels=labels_test) # + [markdown] id="9Iot7WB8KhGg" # # Build and train model # + id="2ppYkZ-grG2E" # Run a model. df.reset_random_seeds() start_time = time.time() fcd_model_timestamp = df.timestamp_string() (model, history) = ([], []) # To release memory when rerunning in notebook. gc.collect() model = df.make_disk_finder_model(images_train) history = df.run_model(model, images_train, labels_train, images_test, labels_test, epochs=100, batch_size= 128, plot_title='F3D') elapsed_seconds = int(time.time() - start_time) print('Elapsed time: ' + str(elapsed_seconds) + ' seconds (' + str(int(elapsed_seconds / 60)) +' minutes).') # model.save(model_save_directory + fcd_model_timestamp) model_save_path = (model_save_directory + fcd_model_timestamp + '_Find_3_Disks_complex') model.save(model_save_path) print('Saved trained model to', model_save_path) # + id="7GQ_6teTVVSA" print(type(history)) print(type(history.history['loss'])) print(len(history.history['loss'])) print(history.history['loss']) # + [markdown] id="e6C1NXAPARMu" # # Visualize some predictions # + id="KJHBxJv_AVXX" df.reset_random_seeds() # df.visualize_dataset(images=images_test, labels=labels_test, model=model) df.visualize_dataset(images=images_test, labels=labels_test, model=model, count=20)
graveyard/Find_3_Disks_20220220.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.066412, "end_time": "2021-06-22T14:36:08.514783", "exception": false, "start_time": "2021-06-22T14:36:08.448371", "status": "completed"} tags=[] # # **Importing Libraries and Dataset** # + papermill={"duration": 0.926924, "end_time": "2021-06-22T14:36:09.505988", "exception": false, "start_time": "2021-06-22T14:36:08.579064", "status": "completed"} tags=[] # Importing Libraries import pandas as pd import numpy as np import seaborn as sns # import matplotlib as plt import matplotlib.pyplot as plt from matplotlib import patches as mpatches import matplotlib.cm as cm # Reading Dataset supermarket_ram = pd.read_csv("../data/data.csv") supermarket_ram.head() sns.set_style("white") # sns.set(font_scale=1) # + [markdown] papermill={"duration": 0.064101, "end_time": "2021-06-22T14:36:09.634834", "exception": false, "start_time": "2021-06-22T14:36:09.570733", "status": "completed"} tags=[] # * **Invoice:** Represents the order ID. # * **Branch:** Branch of the supermarket. # * **City:** City of the supermarket. (**Note:** Branch and city represent the same information.) # * **Customer Type:** Type of customer (member or non-member). # * **Product line:** Type of the product category. # * **Unit Price:** Price of single unit. # * **Quantity:** Quantity of ordered product. # * **Tax 5%:** Tax on order. # * **Total:** Total value of order. # * **Date:** Order date. # * **Time:** Time of day for order. # * **Payment:** Mode of payment. # * **cogs:** Cost of order. # * **gross margin percentage:** Profit margin of the order. # * **gross income:** Total profit of the order. # * **Rating:** Customer rating received. # # + [markdown] papermill={"duration": 0.066138, "end_time": "2021-06-22T14:36:09.765425", "exception": false, "start_time": "2021-06-22T14:36:09.699287", "status": "completed"} tags=[] # # **Data Description & Structure** # + [markdown] papermill={"duration": 0.066399, "end_time": "2021-06-22T14:36:09.896751", "exception": false, "start_time": "2021-06-22T14:36:09.830352", "status": "completed"} tags=[] # #### **A. Column-Wise Structure of Dataframe** # + papermill={"duration": 0.098385, "end_time": "2021-06-22T14:36:10.060913", "exception": false, "start_time": "2021-06-22T14:36:09.962528", "status": "completed"} tags=[] supermarket_ram.info() # + [markdown] papermill={"duration": 0.06454, "end_time": "2021-06-22T14:36:10.191751", "exception": false, "start_time": "2021-06-22T14:36:10.127211", "status": "completed"} tags=[] # #### **B. Column-Wise Statistical Description of Dataframe** # + papermill={"duration": 0.117321, "end_time": "2021-06-22T14:36:10.373720", "exception": false, "start_time": "2021-06-22T14:36:10.256399", "status": "completed"} tags=[] supermarket_ram.describe() # + [markdown] papermill={"duration": 0.066336, "end_time": "2021-06-22T14:36:10.505737", "exception": false, "start_time": "2021-06-22T14:36:10.439401", "status": "completed"} tags=[] # #### **C. Looking for any NaNs** # + papermill={"duration": 0.084942, "end_time": "2021-06-22T14:36:10.657916", "exception": false, "start_time": "2021-06-22T14:36:10.572974", "status": "completed"} tags=[] nan_df = supermarket_ram[supermarket_ram.isna().any(axis=1)] nan_df.head(10) # + [markdown] papermill={"duration": 0.076728, "end_time": "2021-06-22T14:36:10.810502", "exception": false, "start_time": "2021-06-22T14:36:10.733774", "status": "completed"} tags=[] # # **Question #1: In which month most of the sales occur?** # + [markdown] papermill={"duration": 0.065571, "end_time": "2021-06-22T14:36:10.948635", "exception": false, "start_time": "2021-06-22T14:36:10.883064", "status": "completed"} tags=[] # ## **1.1 Month-Wise Sales of Supermarket** # + [markdown] papermill={"duration": 0.066068, "end_time": "2021-06-22T14:36:11.080817", "exception": false, "start_time": "2021-06-22T14:36:11.014749", "status": "completed"} tags=[] # #### **A. Adding Month Column** # + papermill={"duration": 0.098931, "end_time": "2021-06-22T14:36:11.245837", "exception": false, "start_time": "2021-06-22T14:36:11.146906", "status": "completed"} tags=[] supermarket_ram['Month'] = supermarket_ram['Date'].str[0:1] supermarket_ram.head() # + [markdown] papermill={"duration": 0.066499, "end_time": "2021-06-22T14:36:11.385744", "exception": false, "start_time": "2021-06-22T14:36:11.319245", "status": "completed"} tags=[] # #### **B. Converting the "Month" Column to Integer (from string)** # + papermill={"duration": 0.115743, "end_time": "2021-06-22T14:36:11.567832", "exception": false, "start_time": "2021-06-22T14:36:11.452089", "status": "completed"} tags=[] supermarket_ram['Month'] = supermarket_ram['Month'].astype(int) supermarket_ram.describe() # + [markdown] papermill={"duration": 0.066711, "end_time": "2021-06-22T14:36:11.708049", "exception": false, "start_time": "2021-06-22T14:36:11.641338", "status": "completed"} tags=[] # #### **C. Plotting Month-Wise Sales** # + papermill={"duration": 0.270707, "end_time": "2021-06-22T14:36:12.045853", "exception": false, "start_time": "2021-06-22T14:36:11.775146", "status": "completed"} tags=[] sales = supermarket_ram.groupby('Month').sum() fig, ax = plt.subplots() months = ['January', 'February', 'March'] bars = ax.bar(months, sales['Total'], width = 0.3, color = '#2940d3') # color = '#2940d3' # bars[0].set_color('#ffc947') # bars[1].set_color('#2940d3') # bars[2].set_color('#ff96ad') ax.set_title('Month-Wise Supermarket Sales', fontsize = 14) ax.set_xticks(months) ax.tick_params(axis='both', labelsize=12) ax.set_ylabel('Total Sales', fontsize = 14) # Setting plot margins plt.subplots_adjust(left=0.01, bottom=0.1, right=1.3, top=1.5, wspace=0.9, hspace=0.9) plt.savefig("../visualizations/01-monthly-sales.png") plt.show() # + [markdown] papermill={"duration": 0.067848, "end_time": "2021-06-22T14:36:12.182997", "exception": false, "start_time": "2021-06-22T14:36:12.115149", "status": "completed"} tags=[] # #### **# Result:** # * From January, supermarket sales have slightly decreased. # * February receives least sales. This can be because February is the shortest month. # * Month-wise change in sales not very significant. # + [markdown] papermill={"duration": 0.068132, "end_time": "2021-06-22T14:36:12.319245", "exception": false, "start_time": "2021-06-22T14:36:12.251113", "status": "completed"} tags=[] # ## **1.2 Month-Wise Sales of Different Branches** # + [markdown] papermill={"duration": 0.067746, "end_time": "2021-06-22T14:36:12.455123", "exception": false, "start_time": "2021-06-22T14:36:12.387377", "status": "completed"} tags=[] # #### **A. Finding the Supermarket Braches (City-Wise)** # + papermill={"duration": 0.081772, "end_time": "2021-06-22T14:36:12.606045", "exception": false, "start_time": "2021-06-22T14:36:12.524273", "status": "completed"} tags=[] supermarket_cities = [City for City, df in supermarket_ram.groupby('City')] supermarket_cities # + [markdown] papermill={"duration": 0.067723, "end_time": "2021-06-22T14:36:12.743219", "exception": false, "start_time": "2021-06-22T14:36:12.675496", "status": "completed"} tags=[] # #### **B. Filtering the Cities and Forming their Individual Data Frames** # + papermill={"duration": 0.0928, "end_time": "2021-06-22T14:36:12.905269", "exception": false, "start_time": "2021-06-22T14:36:12.812469", "status": "completed"} tags=[] # forming mandalay dataframe mandalay_filter = (supermarket_ram['City'] == 'Mandalay') mandalay_df = supermarket_ram[mandalay_filter].copy() # Taking a copy of df to avoid warnings----Python shell cannot recognize wheather mandalay_df is view or copy mandalay_month_df = mandalay_df.groupby('Month').sum() mandalay_month_df.head() # + papermill={"duration": 0.09294, "end_time": "2021-06-22T14:36:13.067135", "exception": false, "start_time": "2021-06-22T14:36:12.974195", "status": "completed"} tags=[] # forming napy dataframe napy_filter = (supermarket_ram['City'] == 'Naypyitaw') napy_df = supermarket_ram[napy_filter].copy() napy_month_df = napy_df.groupby('Month').sum() napy_month_df.head() # + papermill={"duration": 0.091773, "end_time": "2021-06-22T14:36:13.232352", "exception": false, "start_time": "2021-06-22T14:36:13.140579", "status": "completed"} tags=[] # forming yan dataframe yan_filter = (supermarket_ram['City'] == 'Yangon') yan_df = supermarket_ram[yan_filter].copy() yan_month_df = yan_df.groupby('Month').sum() yan_month_df.head() # + [markdown] papermill={"duration": 0.068722, "end_time": "2021-06-22T14:36:13.370139", "exception": false, "start_time": "2021-06-22T14:36:13.301417", "status": "completed"} tags=[] # #### **C. Plotting Sales for Different Branches** # + papermill={"duration": 0.288853, "end_time": "2021-06-22T14:36:13.728385", "exception": false, "start_time": "2021-06-22T14:36:13.439532", "status": "completed"} tags=[] # Forming bar plot x = np.arange(3) month = ['January', 'February', 'March'] plt.bar(x-0.25, mandalay_month_df['Total'], width = 0.25, color = '#ffc947') plt.bar(x, napy_month_df['Total'], width = 0.25, color = '#2940d3') plt.bar(x+0.25, yan_month_df['Total'], width = 0.25, color = '#ff96ad') # Setting labels, title and legend plt.xticks(x, month, size = 14) plt.title('Comparison of Month-Wise Sales of Different Branches', fontsize=14) plt.ylabel('Total Sales', fontsize=14) plt.legend(["Mandalay", "Naypyitaw", "Yangon"], loc='upper right', bbox_to_anchor=(1.145,1), fontsize=13) # Naypyitaw', 'Yangon # Setting plot margins plt.subplots_adjust(left=0.01, bottom=0.1, right=1.3, top=1.5, wspace=0.9, hspace=0.9) plt.savefig("../visualizations/02-monthly-sales.png") plt.show() # + [markdown] papermill={"duration": 0.070446, "end_time": "2021-06-22T14:36:13.870283", "exception": false, "start_time": "2021-06-22T14:36:13.799837", "status": "completed"} tags=[] # #### **# Result:** # * Sales of the branches is similar. # * From January, the sales have slightly decreased for each branch. # * Mandalay has most consistent sales. # + [markdown] papermill={"duration": 0.070626, "end_time": "2021-06-22T14:36:14.011648", "exception": false, "start_time": "2021-06-22T14:36:13.941022", "status": "completed"} tags=[] # # **Question #2: How is the overall performance of different branches?** # + [markdown] papermill={"duration": 0.070721, "end_time": "2021-06-22T14:36:14.153179", "exception": false, "start_time": "2021-06-22T14:36:14.082458", "status": "completed"} tags=[] # #### **A. Obtaining Total Sales & Total Profit of Different Branches** # + papermill={"duration": 0.083713, "end_time": "2021-06-22T14:36:14.307268", "exception": false, "start_time": "2021-06-22T14:36:14.223555", "status": "completed"} tags=[] mandalay_sales = mandalay_df["Total"].sum() mandalay_profit = mandalay_df['gross income'].sum() napy_sales = napy_df["Total"].sum() napy_profit = napy_df['gross income'].sum() yan_sales = yan_df["Total"].sum() yan_profit = yan_df['gross income'].sum() city_sales = [mandalay_sales, napy_sales, yan_sales] city_profit = [mandalay_profit, napy_profit, yan_profit] city_names = ["Mandalay", "Naypyitaw", "Yangon"] print(city_sales) print(city_profit) # + [markdown] papermill={"duration": 0.071012, "end_time": "2021-06-22T14:36:14.451495", "exception": false, "start_time": "2021-06-22T14:36:14.380483", "status": "completed"} tags=[] # #### **B. Obtaining Number of Members for each Supermarket Branch** # + papermill={"duration": 0.086812, "end_time": "2021-06-22T14:36:14.611002", "exception": false, "start_time": "2021-06-22T14:36:14.524190", "status": "completed"} tags=[] mandalay_filter = (mandalay_df['Customer type'] == 'Member') mandalay_member_df = mandalay_df[mandalay_filter].copy() mandalay_members = len(mandalay_member_df['Customer type']) napy_filter = (napy_df['Customer type'] == 'Member') napy_member_df = napy_df[napy_filter].copy() napy_members = len(napy_member_df['Customer type']) yan_filter = (yan_df['Customer type'] == 'Member') yan_member_df = yan_df[yan_filter].copy() yan_members = len(yan_member_df['Customer type']) city_members = [mandalay_members, napy_members, yan_members] city_members # + [markdown] papermill={"duration": 0.071071, "end_time": "2021-06-22T14:36:14.753361", "exception": false, "start_time": "2021-06-22T14:36:14.682290", "status": "completed"} tags=[] # #### **C. Plotting the Performance Metrics for all Supermarket Branches** # + papermill={"duration": 0.427205, "end_time": "2021-06-22T14:36:15.252128", "exception": false, "start_time": "2021-06-22T14:36:14.824923", "status": "completed"} tags=[] fig, axs = plt.subplots(1, 3, figsize=(17, 17)) axs[0].pie(city_sales, colors = ['#ffc947', '#2940d3', '#ff96ad'], radius=1.2, autopct = "%0.1f%%") axs[0].set_title('Total Sales Comparison of Different Cities', fontsize=14) axs[0].legend(city_names, loc = 'upper right', bbox_to_anchor=(1.25,1), fontsize=12) axs[0].set_aspect('equal') axs[1].pie(city_profit, colors = ['#ffc947', '#2940d3', '#ff96ad'], radius=1.2, autopct = "%0.1f%%") axs[1].set_title('Total Profit Comparison of Different Cities', fontsize=14) axs[1].legend(city_names, loc = 'upper right', bbox_to_anchor=(1.25,1), fontsize=12) axs[1].set_aspect('equal') axs[2].pie(city_members, colors = ['#ffc947', '#2940d3', '#ff96ad'], radius=1.2, autopct = "%0.1f%%") axs[2].set_title('Members Comparison of Different Cities', fontsize=14) axs[2].legend(city_names, loc = 'upper right', bbox_to_anchor=(1.25,1), fontsize=12) axs[2].set_aspect('equal') plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4) plt.savefig("../visualizations/03-region-comparison.png") plt.show() # + [markdown] papermill={"duration": 0.072633, "end_time": "2021-06-22T14:36:15.398044", "exception": false, "start_time": "2021-06-22T14:36:15.325411", "status": "completed"} tags=[] # #### **# Result:** # * Sales and profit of Napyitaw is slightly larger. # * All the branches have almost same members. # + [markdown] papermill={"duration": 0.073834, "end_time": "2021-06-22T14:36:15.545033", "exception": false, "start_time": "2021-06-22T14:36:15.471199", "status": "completed"} tags=[] # # **Question #3: What were the most sold items? What is the most profitable product line?** # + [markdown] papermill={"duration": 0.072505, "end_time": "2021-06-22T14:36:15.690783", "exception": false, "start_time": "2021-06-22T14:36:15.618278", "status": "completed"} tags=[] # #### **A. Grouping Dataframe According to the Product Line** # + papermill={"duration": 0.117517, "end_time": "2021-06-22T14:36:15.881311", "exception": false, "start_time": "2021-06-22T14:36:15.763794", "status": "completed"} tags=[] # Grouping dataframe sales = supermarket_ram.groupby('Product line').sum() sales['Unit price'] = sales['Unit price']/sales['Quantity'] # Correcting corresponding columns sales['gross margin percentage'] = sales['gross margin percentage']/sales['Quantity'] sales['Rating'] = (sales['Rating'])*(len(sales['Rating'])/len(supermarket_ram['Rating'])) sales.head(10) # + [markdown] papermill={"duration": 0.073177, "end_time": "2021-06-22T14:36:16.028310", "exception": false, "start_time": "2021-06-22T14:36:15.955133", "status": "completed"} tags=[] # #### **B. Plotting the Sales of Different Product Lines and Representing Corresponding Prices and Profits** # + papermill={"duration": 0.677246, "end_time": "2021-06-22T14:36:16.779083", "exception": false, "start_time": "2021-06-22T14:36:16.101837", "status": "completed"} tags=[] # Declaring figure parameters products = [Product for Product, df in supermarket_ram.groupby('Product line')] fig, ax1 = plt.subplots(figsize=(10, 6)) # Plotting Bar Chart ax1 = sns.barplot(x=products, y=sales['Total'], data = sales, palette='crest') ax1.set_title('Sales of Different Products', fontsize = 16) ax1.set_ylabel('Total Sales', fontsize=16) ax1.tick_params(axis='x', rotation = 30) ax1.set_xticklabels(products, fontsize=12) ax1.tick_params(axis='y') # Plotting Line Charts ax2 = ax1.twinx() ax2 = sns.lineplot(x = products, y= sales['Unit price'], data=sales, color = 'r' ,lw=1.5, marker='o') # '#ffc947', '#2940d3' ax2.set(ylabel=None) ax2.set(yticklabels=[]) # ax2.tick_params(left=False) ax2.set_yticks([]) ax3 = ax1.twinx() ax3 = sns.lineplot(x = products, y= sales['gross margin percentage'], data=sales, color = 'black', lw=1.5, marker='o') ax3.set(yticklabels=[]) ax3.set(ylabel=None) ax3.set_yticks([]) price = mpatches.Patch(color='r') profit = mpatches.Patch(color='black') # Defining Legend name = ['Unit Price', 'Profit(%)'] ax1.legend(labels=name, handles=[price, profit], loc="upper right", bbox_to_anchor=(1.14, 1), fontsize=13) plt.savefig("../visualizations/04-product-sales-vs-price.png") plt.show() # + [markdown] papermill={"duration": 0.075313, "end_time": "2021-06-22T14:36:16.930657", "exception": false, "start_time": "2021-06-22T14:36:16.855344", "status": "completed"} tags=[] # #### **# Result:** # * The sales of different products are similar. # * Profit is approximately proportional to the price. # * Profit earned from fashion accessories and food and beverages is significantly larger. # + [markdown] papermill={"duration": 0.075716, "end_time": "2021-06-22T14:36:17.083017", "exception": false, "start_time": "2021-06-22T14:36:17.007301", "status": "completed"} tags=[] # # **Question #4: How are the ratings of different product lines? Is there a relation of ratings with prices?** # + [markdown] papermill={"duration": 0.076599, "end_time": "2021-06-22T14:36:17.235668", "exception": false, "start_time": "2021-06-22T14:36:17.159069", "status": "completed"} tags=[] # #### A. Plotting Ratings and Corresponding Prices (A sectio pending) # + papermill={"duration": 0.509936, "end_time": "2021-06-22T14:36:17.821307", "exception": false, "start_time": "2021-06-22T14:36:17.311371", "status": "completed"} tags=[] fig, ax1 = plt.subplots(figsize=(10, 6)) ax1 = sns.barplot(x=products, y=sales['Rating'], data = sales, palette='crest') ax1.set_title('Rating of Different Products', fontsize = 16) ax1.set_ylabel('Unit Price', fontsize=16) ax1.set_xticklabels(products, fontsize=12) ax1.tick_params(axis='x', rotation = 30) ax1.tick_params(axis='y') ax3 = ax1.twinx() ax3.set_ylabel('Unit price', fontsize = 16) ax3 = sns.lineplot(x = products, y= 'Unit price', data=sales, color = 'r', marker='o', lw=1.8) ax3.tick_params(axis='y') plt.savefig("../visualizations/05-product-sales-vs-rating.png") plt.show() # + [markdown] papermill={"duration": 0.076852, "end_time": "2021-06-22T14:36:17.975777", "exception": false, "start_time": "2021-06-22T14:36:17.898925", "status": "completed"} tags=[] # #### **# Result:** Products with smaller prices tend to get slightly lower ratings. # + [markdown] papermill={"duration": 0.077189, "end_time": "2021-06-22T14:36:18.130350", "exception": false, "start_time": "2021-06-22T14:36:18.053161", "status": "completed"} tags=[] # # **# Question #5: Who are the leading buyers?** # + [markdown] papermill={"duration": 0.078657, "end_time": "2021-06-22T14:36:18.286916", "exception": false, "start_time": "2021-06-22T14:36:18.208259", "status": "completed"} tags=[] # #### **A. Obtaining Sales of Members and Non-Members** # + papermill={"duration": 0.102522, "end_time": "2021-06-22T14:36:18.467432", "exception": false, "start_time": "2021-06-22T14:36:18.364910", "status": "completed"} tags=[] customer_type_df = supermarket_ram.groupby('Customer type').sum() member_sales = customer_type_df.iloc[0]['Unit price'] normal_sales = customer_type_df.iloc[1]['Unit price'] member_normal_sales = [member_sales, normal_sales] member_normal_legend = ['Member', 'Normal'] customer_type_df # + [markdown] papermill={"duration": 0.077865, "end_time": "2021-06-22T14:36:18.624736", "exception": false, "start_time": "2021-06-22T14:36:18.546871", "status": "completed"} tags=[] # #### **B. Obtaining Sales of Males and Females** # + papermill={"duration": 0.107088, "end_time": "2021-06-22T14:36:18.810353", "exception": false, "start_time": "2021-06-22T14:36:18.703265", "status": "completed"} tags=[] customer_gender_df = supermarket_ram.groupby('Gender').sum() female_sales = customer_gender_df.iloc[0]['Total'] male_sales = customer_gender_df.iloc[1]['Total'] female_male_sales = [female_sales, male_sales] female_male_legend = ['Females', 'Males'] customer_gender_df.head() # + [markdown] papermill={"duration": 0.078725, "end_time": "2021-06-22T14:36:18.967834", "exception": false, "start_time": "2021-06-22T14:36:18.889109", "status": "completed"} tags=[] # #### **C. Plotting the Sales of Respective Customer Segments** # + papermill={"duration": 0.30775, "end_time": "2021-06-22T14:36:19.354068", "exception": false, "start_time": "2021-06-22T14:36:19.046318", "status": "completed"} tags=[] fig, axs = plt.subplots(1, 2, figsize=(13, 13)) axs[0].pie(member_normal_sales, colors = ['#ffc947', '#2940d3'], radius=1.2, autopct = "%0.1f%%") axs[0].set_title('Sales Comparison of Members and Normal Customers') axs[0].legend(member_normal_legend, loc = 'upper right') axs[0].set_aspect('equal') axs[1].pie(female_male_sales, colors = ['#ffc947', '#2940d3'], radius=1.2, autopct = "%0.1f%%") axs[1].set_title('Sales Comparison of Males and Females') axs[1].legend(female_male_legend, loc = 'upper right') axs[1].set_aspect('equal') plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4) plt.savefig("../visualizations/06-segment-wise-sales-.png") plt.show() # + [markdown] papermill={"duration": 0.079593, "end_time": "2021-06-22T14:36:19.513970", "exception": false, "start_time": "2021-06-22T14:36:19.434377", "status": "completed"} tags=[] # #### # **Result:** # * The sale from members and non-members is almost same. # * The sale from female customers is slightly more than the male customers. # + [markdown] papermill={"duration": 0.079822, "end_time": "2021-06-22T14:36:19.675053", "exception": false, "start_time": "2021-06-22T14:36:19.595231", "status": "completed"} tags=[] # # **Question #6: Which days of the week have most sales?** # + [markdown] papermill={"duration": 0.080209, "end_time": "2021-06-22T14:36:19.835176", "exception": false, "start_time": "2021-06-22T14:36:19.754967", "status": "completed"} tags=[] # #### **A. Adding Day of the Week Column** # + papermill={"duration": 0.128821, "end_time": "2021-06-22T14:36:20.044545", "exception": false, "start_time": "2021-06-22T14:36:19.915724", "status": "completed"} tags=[] supermarket_ram['Date'] = pd.to_datetime(supermarket_ram['Date']) supermarket_ram['Day'] = supermarket_ram['Date'].dt.day_name() # "day_name()" is the new command instead of "day_name" supermarket_ram.head() # + [markdown] papermill={"duration": 0.07958, "end_time": "2021-06-22T14:36:20.205461", "exception": false, "start_time": "2021-06-22T14:36:20.125881", "status": "completed"} tags=[] # #### **B. Summing the Total Sales According to the Day of the Week** # + papermill={"duration": 0.109281, "end_time": "2021-06-22T14:36:20.396249", "exception": false, "start_time": "2021-06-22T14:36:20.286968", "status": "completed"} tags=[] daywise_df = supermarket_ram.groupby('Day').sum() daywise_df = daywise_df.reindex(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']) daywise_df.head(10) # + [markdown] papermill={"duration": 0.081832, "end_time": "2021-06-22T14:36:20.559243", "exception": false, "start_time": "2021-06-22T14:36:20.477411", "status": "completed"} tags=[] # #### **C. Plotting the Sales** # + papermill={"duration": 0.307479, "end_time": "2021-06-22T14:36:20.948494", "exception": false, "start_time": "2021-06-22T14:36:20.641015", "status": "completed"} tags=[] fig, ax1 = plt.subplots(figsize=(10, 6)) daywise_day = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] ax1 = sns.barplot(x=daywise_day, y=daywise_df['Total'], data = daywise_df, palette='crest') ax1.set_title('Day-Wise Sales of Supermarket', size=16) ax1.tick_params(axis='x') ax1.tick_params(axis='y') ax1.set_ylabel('Total Sales', fontsize=14) ax1.set_xticklabels(daywise_day, fontsize=14) plt.savefig("../visualizations/07-daywise-sales.png") plt.show() # + [markdown] papermill={"duration": 0.081724, "end_time": "2021-06-22T14:36:21.112823", "exception": false, "start_time": "2021-06-22T14:36:21.031099", "status": "completed"} tags=[] # #### **# Result:** # 1. Tuesday and saturday have larger sale in the week. # 2. Monday has the least sale in the week. # + [markdown] papermill={"duration": 0.081466, "end_time": "2021-06-22T14:36:21.276070", "exception": false, "start_time": "2021-06-22T14:36:21.194604", "status": "completed"} tags=[] # # **Question #7: How different customers prefer visiting the supermarket?** # + [markdown] papermill={"duration": 0.081581, "end_time": "2021-06-22T14:36:21.441900", "exception": false, "start_time": "2021-06-22T14:36:21.360319", "status": "completed"} tags=[] # ### **1.1. Sales Analysis Based on Membership** # + [markdown] papermill={"duration": 0.082749, "end_time": "2021-06-22T14:36:21.606568", "exception": false, "start_time": "2021-06-22T14:36:21.523819", "status": "completed"} tags=[] # #### **A. Filtering and Forming the Data Frame for Members** # + papermill={"duration": 0.135895, "end_time": "2021-06-22T14:36:21.824582", "exception": false, "start_time": "2021-06-22T14:36:21.688687", "status": "completed"} tags=[] member_filter = (supermarket_ram['Customer type'] == 'Member') member_df = supermarket_ram[member_filter] member_daywise_df = member_df.groupby('Day').sum() member_daywise_df = member_daywise_df.reindex(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']) member_daywise_df.head(7) # + [markdown] papermill={"duration": 0.082182, "end_time": "2021-06-22T14:36:21.991165", "exception": false, "start_time": "2021-06-22T14:36:21.908983", "status": "completed"} tags=[] # #### **B. Filtering and forming the dataframe for Non-Members (Normal Customers)** # + papermill={"duration": 0.112421, "end_time": "2021-06-22T14:36:22.186136", "exception": false, "start_time": "2021-06-22T14:36:22.073715", "status": "completed"} tags=[] nonmember_filter = (supermarket_ram['Customer type'] == 'Normal') nonmember_df = supermarket_ram[nonmember_filter] nonmember_daywise_df = nonmember_df.groupby('Day').sum() nonmember_daywise_df = nonmember_daywise_df.reindex(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']) nonmember_daywise_df.head(7) # + [markdown] papermill={"duration": 0.083888, "end_time": "2021-06-22T14:36:22.360366", "exception": false, "start_time": "2021-06-22T14:36:22.276478", "status": "completed"} tags=[] # #### **C. Plotting Day-Wise Sales Comparison of Members and Non-Members** # + papermill={"duration": 0.335006, "end_time": "2021-06-22T14:36:22.778388", "exception": false, "start_time": "2021-06-22T14:36:22.443382", "status": "completed"} tags=[] day = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] x = np.arange(7) plt.bar(x-0.2, nonmember_daywise_df['Total'], width = 0.4, color = '#ffc947') plt.bar(x+0.2, member_daywise_df['Total'], width = 0.4, color = '#2940d3') plt.xticks(x, day, fontsize = 12) plt.xticks(fontsize = 12) plt.title('Sales Comparison of Members and Non-Members', fontsize = 14) plt.ylabel('Total Sales', fontsize = 14) plt.legend(["Non-Members", "Members"], loc = 'upper right', bbox_to_anchor=(1.2,1), fontsize=13) plt.subplots_adjust(left=0.01, bottom=0.1, right=1.3, top=1.2, wspace=0.9, hspace=0.9) plt.savefig("../visualizations/08-daywise-members-sale.png") plt.show() # + [markdown] papermill={"duration": 0.084078, "end_time": "2021-06-22T14:36:22.946900", "exception": false, "start_time": "2021-06-22T14:36:22.862822", "status": "completed"} tags=[] # #### **# Result:** # * On tuesday and sunday, member purchases is larger. On these days, supermarket should focus on member's segment. # * On thursday, non-member purchases is larger. On this day, supermarket has a better chance to increase memberships. # + [markdown] papermill={"duration": 0.084731, "end_time": "2021-06-22T14:36:23.117220", "exception": false, "start_time": "2021-06-22T14:36:23.032489", "status": "completed"} tags=[] # ### **1.2 Sales Analysis Based on Gender** # + [markdown] papermill={"duration": 0.084436, "end_time": "2021-06-22T14:36:23.286801", "exception": false, "start_time": "2021-06-22T14:36:23.202365", "status": "completed"} tags=[] # #### **A. Filtering Female Customers and Forming their Dataframe** # + papermill={"duration": 0.11493, "end_time": "2021-06-22T14:36:23.487519", "exception": false, "start_time": "2021-06-22T14:36:23.372589", "status": "completed"} tags=[] female_filter = (supermarket_ram['Gender'] == 'Female') female_df = supermarket_ram[female_filter] female_df.head() # + [markdown] papermill={"duration": 0.084948, "end_time": "2021-06-22T14:36:23.657711", "exception": false, "start_time": "2021-06-22T14:36:23.572763", "status": "completed"} tags=[] # #### **B. Filtering Male Customers and Forming their Dataframe** # + papermill={"duration": 0.116278, "end_time": "2021-06-22T14:36:23.859454", "exception": false, "start_time": "2021-06-22T14:36:23.743176", "status": "completed"} tags=[] male_filter = (supermarket_ram['Gender'] == 'Male') male_df = supermarket_ram[male_filter] male_df.head() # + [markdown] papermill={"duration": 0.085468, "end_time": "2021-06-22T14:36:24.030882", "exception": false, "start_time": "2021-06-22T14:36:23.945414", "status": "completed"} tags=[] # #### **C. Representing Female Dataframe on Weekday Basis (Day-Wise)** # + papermill={"duration": 0.113361, "end_time": "2021-06-22T14:36:24.230246", "exception": false, "start_time": "2021-06-22T14:36:24.116885", "status": "completed"} tags=[] female_daywise_df = female_df.groupby('Day').sum() female_daywise_df = female_daywise_df.reindex(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']) female_daywise_df.head(10) # + [markdown] papermill={"duration": 0.137361, "end_time": "2021-06-22T14:36:24.461190", "exception": false, "start_time": "2021-06-22T14:36:24.323829", "status": "completed"} tags=[] # #### **D. Representing Male Dataframe on Weekday Basis (Day-Wise)** # + papermill={"duration": 0.114568, "end_time": "2021-06-22T14:36:24.661955", "exception": false, "start_time": "2021-06-22T14:36:24.547387", "status": "completed"} tags=[] male_daywise_df = male_df.groupby('Day').sum() male_daywise_df = male_daywise_df.reindex(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']) male_daywise_df.head(10) # + [markdown] papermill={"duration": 0.086533, "end_time": "2021-06-22T14:36:24.842363", "exception": false, "start_time": "2021-06-22T14:36:24.755830", "status": "completed"} tags=[] # #### **E. Plotting Male and Female Sales (Day-Wise)** # + papermill={"duration": 0.354574, "end_time": "2021-06-22T14:36:25.283692", "exception": false, "start_time": "2021-06-22T14:36:24.929118", "status": "completed"} tags=[] x = np.arange(7) plt.bar(x-0.2, female_daywise_df['Total'], width = 0.4, color = '#ffc947') plt.bar(x+0.2, male_daywise_df['Total'], width = 0.4, color = '#2940d3') plt.xticks(x, day, size = 10, fontsize=13) plt.yticks(size = 10, fontsize=12) plt.title('Comparison of Day-Wise Sales of Males and Females', fontsize = 14) plt.ylabel('Total Sales', fontsize =14) plt.legend(["Females", "Males"], fontsize = 13) plt.subplots_adjust(left=0.01, bottom=0.1, right=1.3, top=1.2, wspace=0.9, hspace=0.9) plt.savefig("../visualizations/09-daywise-gender-sales.png") plt.show() # + [markdown] papermill={"duration": 0.087534, "end_time": "2021-06-22T14:36:25.459852", "exception": false, "start_time": "2021-06-22T14:36:25.372318", "status": "completed"} tags=[] # #### **# Result:** # * Sales from males and females is similar. Families seem to do most of the purchases. # * Exceptionally, on tuesday number of female customers is significantly larger. Therefore; on tuesday, supermarket should focus more on ladies segment. # * Wednesday and thursday have more male customers. Therefore; on these days, supermarket should focus on men segment. # + [markdown] papermill={"duration": 0.088834, "end_time": "2021-06-22T14:36:25.636337", "exception": false, "start_time": "2021-06-22T14:36:25.547503", "status": "completed"} tags=[] # # **Question #8: Which customer segment prefer membership?** # + [markdown] papermill={"duration": 0.08806, "end_time": "2021-06-22T14:36:25.812196", "exception": false, "start_time": "2021-06-22T14:36:25.724136", "status": "completed"} tags=[] # ### **1.1 Analysing Memberships Among Customer Genders** # + [markdown] papermill={"duration": 0.088225, "end_time": "2021-06-22T14:36:25.988327", "exception": false, "start_time": "2021-06-22T14:36:25.900102", "status": "completed"} tags=[] # #### **A. Forming Member and Non-Member Dataframes for Female Customers** # + papermill={"duration": 0.100236, "end_time": "2021-06-22T14:36:26.177501", "exception": false, "start_time": "2021-06-22T14:36:26.077265", "status": "completed"} tags=[] female_member_filter = (female_df['Customer type'] == 'Member') female_member_df = female_df[female_member_filter] total_female_members = len(female_member_df) female_nonmember_filter = (female_df['Customer type'] == 'Normal') female_nonmember_df = female_df[female_nonmember_filter] total_female_nonmembers = len(female_nonmember_df) # + [markdown] papermill={"duration": 0.089462, "end_time": "2021-06-22T14:36:26.360930", "exception": false, "start_time": "2021-06-22T14:36:26.271468", "status": "completed"} tags=[] # #### **B. Forming Member and Non-Member Dataframes for Male Customers** # + papermill={"duration": 0.098219, "end_time": "2021-06-22T14:36:26.550033", "exception": false, "start_time": "2021-06-22T14:36:26.451814", "status": "completed"} tags=[] male_member_filter = (male_df['Customer type'] == 'Member') male_member_df = male_df[male_member_filter] total_male_members = len(male_member_df) male_nonmember_filter = (male_df['Customer type'] == 'Normal') male_nonmember_df = male_df[male_nonmember_filter] total_male_nonmembers = len(male_nonmember_df) # + [markdown] papermill={"duration": 0.087774, "end_time": "2021-06-22T14:36:26.727286", "exception": false, "start_time": "2021-06-22T14:36:26.639512", "status": "completed"} tags=[] # #### **C. Plotting Membership and Non-Membership Count for Males and Females** # + papermill={"duration": 0.266728, "end_time": "2021-06-22T14:36:27.082574", "exception": false, "start_time": "2021-06-22T14:36:26.815846", "status": "completed"} tags=[] x = np.arange(2) categories = ['Members', 'Non-Members'] female_count = [total_female_members, total_female_nonmembers] male_count = [total_male_members, total_male_nonmembers] plt.bar(x-0.1, female_count, width = 0.2, color = '#ffc947') plt.bar(x+0.1, male_count, width = 0.2, color = '#2940d3') plt.xticks(x, categories, size = 10, fontsize=14) plt.yticks(size = 10, fontsize=14) plt.title('Comparison of Memberships Among Males and Females', fontsize = 14) plt.ylabel('Total Customers', fontsize =14) plt.legend(["Females", "Males"], fontsize = 12, loc="upper right", bbox_to_anchor=(1.15,1)) plt.subplots_adjust(left=0.01, bottom=0.1, right=1, top=1.2, wspace=0.9, hspace=0.9) plt.savefig("../visualizations/10-category-wise-total-sales.png") plt.show() # + [markdown] papermill={"duration": 0.088262, "end_time": "2021-06-22T14:36:27.260108", "exception": false, "start_time": "2021-06-22T14:36:27.171846", "status": "completed"} tags=[] # #### **# Result:** # * There is a slight difference between female and male memberships. # * The difference between male and female non-members is also small. # * Overall, female customers prefer memberships slightly more. # + [markdown] papermill={"duration": 0.088124, "end_time": "2021-06-22T14:36:27.436958", "exception": false, "start_time": "2021-06-22T14:36:27.348834", "status": "completed"} tags=[] # # **Question #9: Which products are more purchased by different customer segments?** # + papermill={"duration": 0.118323, "end_time": "2021-06-22T14:36:27.643910", "exception": false, "start_time": "2021-06-22T14:36:27.525587", "status": "completed"} tags=[] supermarket_ram.head() # + [markdown] papermill={"duration": 0.088505, "end_time": "2021-06-22T14:36:27.822258", "exception": false, "start_time": "2021-06-22T14:36:27.733753", "status": "completed"} tags=[] # #### **A. Forming Product Wise Dataframes** # + papermill={"duration": 0.158805, "end_time": "2021-06-22T14:36:28.070826", "exception": false, "start_time": "2021-06-22T14:36:27.912021", "status": "completed"} tags=[] # 1. Electronics Accessories electronic_filter = (supermarket_ram['Product line'] == 'Electronic accessories') product_electronic_df = supermarket_ram[electronic_filter] female_filter = (product_electronic_df['Gender'] == 'Female') product_electronic_female_df = product_electronic_df[female_filter] total_female_electronic_sales = product_electronic_female_df['Total'].sum() print('total_female_electronic_sales: {}'.format(total_female_electronic_sales)) male_filter = (product_electronic_df['Gender'] == 'Male') product_electronic_male_df = product_electronic_df[male_filter] total_male_electronic_sales = product_electronic_male_df['Total'].sum() print('total_male_electronic_sales: {}'.format(total_male_electronic_sales)) member_filter = (product_electronic_df['Customer type'] == 'Member') product_electronic_member_df = product_electronic_df[member_filter] total_member_electronic_sales = product_electronic_member_df['Total'].sum() print('total_member_electronic_sales: {}'.format(total_member_electronic_sales)) nonmember_filter = (product_electronic_df['Customer type'] == 'Normal') product_electronic_nonmember_df = product_electronic_df[nonmember_filter] total_nonmember_electronic_sales = product_electronic_nonmember_df['Total'].sum() print('total_nonmember_electronic_sales: {}'.format(total_nonmember_electronic_sales)) # 2. Fashion Accessories fashion_filter = (supermarket_ram['Product line'] == 'Fashion accessories') product_fashion_df = supermarket_ram[fashion_filter] female_filter = (product_fashion_df['Gender'] == 'Female') product_fashion_female_df = product_fashion_df[female_filter] total_female_fashion_sales = product_fashion_female_df['Total'].sum() print('\ntotal_female_fashion_sales: {}'.format(total_female_fashion_sales)) male_filter = (product_fashion_df['Gender'] == 'Male') product_fashion_male_df = product_fashion_df[male_filter] total_male_fashion_sales = product_fashion_male_df['Total'].sum() print('total_male_fashion_sales: {}'.format(total_male_fashion_sales)) member_filter = (product_fashion_df['Customer type'] == 'Member') product_fashion_member_df = product_fashion_df[member_filter] total_member_fashion_sales = product_fashion_member_df['Total'].sum() print('total_member_fashion_sales: {}'.format(total_member_fashion_sales)) nonmember_filter = (product_fashion_df['Customer type'] == 'Normal') product_fashion_nonmember_df = product_fashion_df[nonmember_filter] total_nonmember_fashion_sales = product_fashion_nonmember_df['Total'].sum() print('total_nonmember_fashion_sales: {}'.format(total_nonmember_fashion_sales)) # 3. Food and beverages food_filter = (supermarket_ram['Product line'] == 'Food and beverages') product_food_df = supermarket_ram[food_filter] female_filter = (product_food_df['Gender'] == 'Female') product_food_female_df = product_food_df[female_filter] total_female_food_sales = product_food_female_df['Total'].sum() print('\ntotal_female_food_sales: {}'.format(total_female_food_sales)) male_filter = (product_food_df['Gender'] == 'Male') product_food_male_df = product_food_df[male_filter] total_male_food_sales = product_food_male_df['Total'].sum() print('total_male_food_sales: {}'.format(total_male_food_sales)) member_filter = (product_food_df['Customer type'] == 'Member') product_food_member_df = product_food_df[member_filter] total_member_food_sales = product_food_member_df['Total'].sum() print('total_member_food_sales: {}'.format(total_member_food_sales)) nonmember_filter = (product_food_df['Customer type'] == 'Normal') product_food_nonmember_df = product_food_df[nonmember_filter] total_nonmember_food_sales = product_food_nonmember_df['Total'].sum() print('total_nonmember_food_sales: {}'.format(total_nonmember_food_sales)) # 4. Health and beauty health_filter = (supermarket_ram['Product line'] == 'Health and beauty') product_health_df = supermarket_ram[health_filter] female_filter = (product_health_df['Gender'] == 'Female') product_health_female_df = product_health_df[female_filter] total_female_health_sales = product_health_female_df['Total'].sum() print('\ntotal_female_health_sales: {}'.format(total_female_health_sales)) male_filter = (product_health_df['Gender'] == 'Male') product_health_male_df = product_health_df[male_filter] total_male_health_sales = product_health_male_df['Total'].sum() print('total_male_health_sales: {}'.format(total_male_health_sales)) member_filter = (product_health_df['Customer type'] == 'Member') product_health_member_df = product_health_df[member_filter] total_member_health_sales = product_health_member_df['Total'].sum() print('total_member_health_sales: {}'.format(total_member_health_sales)) nonmember_filter = (product_health_df['Customer type'] == 'Normal') product_health_nonmember_df = product_health_df[nonmember_filter] total_nonmember_health_sales = product_health_nonmember_df['Total'].sum() print('total_nonmember_health_sales: {}'.format(total_nonmember_health_sales)) # 5. Home and lifestyle home_filter = (supermarket_ram['Product line'] == 'Home and lifestyle') product_home_df = supermarket_ram[home_filter] female_filter = (product_home_df['Gender'] == 'Female') product_home_female_df = product_home_df[female_filter] total_female_home_sales = product_home_female_df['Total'].sum() print('\ntotal_female_home_sales: {}'.format(total_female_home_sales)) male_filter = (product_home_df['Gender'] == 'Male') product_home_male_df = product_home_df[male_filter] total_male_home_sales = product_home_male_df['Total'].sum() print('total_male_home_sales: {}'.format(total_male_home_sales)) member_filter = (product_home_df['Customer type'] == 'Member') product_home_member_df = product_home_df[member_filter] total_member_home_sales = product_home_member_df['Total'].sum() print('total_member_home_sales: {}'.format(total_member_home_sales)) nonmember_filter = (product_home_df['Customer type'] == 'Normal') product_home_nonmember_df = product_home_df[nonmember_filter] total_nonmember_home_sales = product_home_nonmember_df['Total'].sum() print('total_nonmember_home_sales: {}'.format(total_nonmember_home_sales)) # 6. Sports and travel sports_filter = (supermarket_ram['Product line'] == 'Sports and travel') product_sports_df = supermarket_ram[sports_filter] female_filter = (product_sports_df['Gender'] == 'Female') product_sports_female_df = product_sports_df[female_filter] total_female_sports_sales = product_sports_female_df['Total'].sum() print('\ntotal_female_sports_sales: {}'.format(total_female_sports_sales)) male_filter = (product_sports_df['Gender'] == 'Male') product_sports_male_df = product_sports_df[male_filter] total_male_sports_sales = product_sports_male_df['Total'].sum() print('total_male_sports_sales: {}'.format(total_male_sports_sales)) member_filter = (product_sports_df['Customer type'] == 'Member') product_sports_member_df = product_sports_df[member_filter] total_member_sports_sales = product_sports_member_df['Total'].sum() print('total_member_sports_sales: {}'.format(total_member_sports_sales)) nonmember_filter = (product_sports_df['Customer type'] == 'Normal') product_sports_nonmember_df = product_sports_df[nonmember_filter] total_nonmember_sports_sales = product_sports_nonmember_df['Total'].sum() print('total_nonmember_sports_sales: {}'.format(total_nonmember_sports_sales)) # + papermill={"duration": 0.701393, "end_time": "2021-06-22T14:36:28.861703", "exception": false, "start_time": "2021-06-22T14:36:28.160310", "status": "completed"} tags=[] # Declaring plotting variables x = np.arange(6) female_product_sales = [total_female_electronic_sales, total_female_fashion_sales, total_female_food_sales, total_female_health_sales, total_female_home_sales, total_female_sports_sales] male_product_sales = [total_male_electronic_sales, total_male_fashion_sales, total_male_food_sales, total_male_health_sales, total_male_home_sales, total_male_sports_sales] member_product_sales = [total_member_electronic_sales, total_member_fashion_sales, total_member_food_sales, total_member_health_sales, total_member_home_sales, total_member_sports_sales] nonmember_product_sales = [total_nonmember_electronic_sales, total_nonmember_fashion_sales, total_nonmember_food_sales, total_nonmember_health_sales, total_nonmember_home_sales, total_nonmember_sports_sales] products = ['Electronic \n accessories', 'Fashion \n accessories', 'Food and \n beverages', 'Health and \n beauty', 'Home and \n lifestyle', 'Sports and \n travel'] fig, ax = plt.subplots(1, 2, figsize=(16.5, 6.5)) # Subplotting (1,1) ax[0].bar(x-0.2, female_product_sales, width = 0.4, color = '#ffc947') ax[0].bar(x+0.2, male_product_sales, width = 0.4, color = '#2940d3') ax[0].set_xticks(x) ax[0].set_xticklabels(products, fontsize=13, rotation = '0') ax[0].set_title('Product Wise Sales of Females and Males', fontsize = 14) ax[0].set_ylabel('Total Sales', fontsize =14) ax[0].legend(["Females", "Males"], fontsize = 12) # Subplotting (1,2) ax[1].bar(x-0.2, member_product_sales, width = 0.4, color = '#ffc947') ax[1].bar(x+0.2, nonmember_product_sales, width = 0.4, color = '#2940d3') ax[1].set_xticks(x) ax[1].set_xticklabels(products, fontsize=13, rotation = '0') ax[1].set_title('Product Wise Sales of Members and Non-Members', fontsize = 14) ax[1].set_ylabel('Total Sales', fontsize =14) ax[1].legend(["Members", "Non-Members"], fontsize = 12) fig.tight_layout() plt.savefig("../visualizations/11-product-wise-categorical-sales.png") plt.show() # + [markdown] papermill={"duration": 0.091445, "end_time": "2021-06-22T14:36:29.045133", "exception": false, "start_time": "2021-06-22T14:36:28.953688", "status": "completed"} tags=[] # #### **# Result (Based on Gender):** # * "Electronic Accessories" and "Sports and Travel" items are almost equally purchased by males and females. # * Female customers contribute larger amount of sales from "Fashion Accessories", "Food and Beverages" and "Home and Lifestyle". # * Male customers contribute larger amount of sales from "Health adn Beauty". # + [markdown] papermill={"duration": 0.090852, "end_time": "2021-06-22T14:36:29.227154", "exception": false, "start_time": "2021-06-22T14:36:29.136302", "status": "completed"} tags=[] # #### **# Result (Based on Membership):** # * "Fashion Accessories" "Health and Beauty", "Home and Lifestyle" and "Sports and Travel" items are almost equally purchased by Members and Non-Members. # * Non-Members contribute larger amount of sales from "Electronic Accessories". # * Members contribute larger amount of sales from "Food and Beverages". # + [markdown] papermill={"duration": 0.090448, "end_time": "2021-06-22T14:36:29.408985", "exception": false, "start_time": "2021-06-22T14:36:29.318537", "status": "completed"} tags=[] # # **Question #10: What is the preferred payment for different customers?** # + [markdown] papermill={"duration": 0.090481, "end_time": "2021-06-22T14:36:29.589581", "exception": false, "start_time": "2021-06-22T14:36:29.499100", "status": "completed"} tags=[] # #### **A. Finding Different Types of Payments (Not Given in the Dataset)** # + papermill={"duration": 0.100966, "end_time": "2021-06-22T14:36:29.783435", "exception": false, "start_time": "2021-06-22T14:36:29.682469", "status": "completed"} tags=[] supermarket_payment = [Payment for Payment, df in supermarket_ram.groupby('Payment')] supermarket_payment # + [markdown] papermill={"duration": 0.092279, "end_time": "2021-06-22T14:36:29.966537", "exception": false, "start_time": "2021-06-22T14:36:29.874258", "status": "completed"} tags=[] # #### **B. Forming Dataset for Members and Non-Members** # + papermill={"duration": 0.106024, "end_time": "2021-06-22T14:36:30.164516", "exception": false, "start_time": "2021-06-22T14:36:30.058492", "status": "completed"} tags=[] member_payment_df = member_df.groupby('Payment').sum() member_payment = member_payment_df['Total'] nonmember_payment_df = nonmember_df.groupby('Payment').sum() nonmember_payment = nonmember_payment_df['Total'] payment_legends = ['Cash', 'Credit card', 'Ewallet'] # + [markdown] papermill={"duration": 0.090962, "end_time": "2021-06-22T14:36:30.346274", "exception": false, "start_time": "2021-06-22T14:36:30.255312", "status": "completed"} tags=[] # #### **C. Forming Dataset for Females and Males** # + papermill={"duration": 0.117966, "end_time": "2021-06-22T14:36:30.555028", "exception": false, "start_time": "2021-06-22T14:36:30.437062", "status": "completed"} tags=[] female_payment_df = female_df.groupby('Payment').sum() female_payment = female_payment_df['Total'] male_payment_df = male_df.groupby('Payment').sum() male_payment = male_payment_df['Total'] female_payment_df # + [markdown] papermill={"duration": 0.090284, "end_time": "2021-06-22T14:36:30.738087", "exception": false, "start_time": "2021-06-22T14:36:30.647803", "status": "completed"} tags=[] # #### **D. Plotting Payment Preferences for Different Customer Segments** # + papermill={"duration": 0.527339, "end_time": "2021-06-22T14:36:31.356100", "exception": false, "start_time": "2021-06-22T14:36:30.828761", "status": "completed"} tags=[] x = np.arange(3) fig, ax = plt.subplots(1, 2, figsize=(14.5, 5.5)) ax[0].bar(x-0.2, member_payment, width = 0.4, color = '#ffc947') ax[0].bar(x+0.2, nonmember_payment, width = 0.4, color = '#2940d3') ax[0].set_xticks(x) ax[0].set_xticklabels(supermarket_payment, fontsize=12) ax[0].set_title('Comparison of Payment of Members and Non-Members', fontsize = 14) ax[0].set_ylabel('Total Sales', fontsize =14) ax[0].legend(["Members", 'Non-Members'], fontsize = 12, loc = "upper right", bbox_to_anchor=(1.24,1)) ax[1].bar(x-0.2, female_payment, width = 0.4, color = '#ffc947') ax[1].bar(x+0.2, male_payment, width = 0.4, color = '#2940d3') ax[1].set_xticks(x) ax[1].set_xticklabels(supermarket_payment, fontsize=12) ax[1].set_title('Comparison of Payment of Males and Females', fontsize = 14) ax[1].set_ylabel('Total Sales', fontsize =14) ax[1].legend(["Females", 'Males'], fontsize = 12, loc="upper right",bbox_to_anchor=(1.15,1)) fig.tight_layout() plt.savefig("../visualizations/12-payment-method-preferences.png") plt.show() # + [markdown] papermill={"duration": 0.092626, "end_time": "2021-06-22T14:36:31.541557", "exception": false, "start_time": "2021-06-22T14:36:31.448931", "status": "completed"} tags=[] # #### **# Result (Based on Memberships):** # * All payment methods are significantly used. # * Cash payments methods are almost equally used among members and non-members. # * Members use more credit card payments. # * Non-members use slightly larger e-wallet payments. # + [markdown] papermill={"duration": 0.09378, "end_time": "2021-06-22T14:36:31.728683", "exception": false, "start_time": "2021-06-22T14:36:31.634903", "status": "completed"} tags=[] # #### **# Result (Based on Gender):** # * All payment methods are significantly used. # * Cash and credit card payments methods are more used by females. # * Ewallet payment is almost equally used by both the genders
notebooks/eda-for-supermarket.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- Visualize a Decision Tree with pydot # + from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y) from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() #Instantiate tree class dtc.fit(X_train, y_train) # + import numpy as np from sklearn import tree from sklearn.externals.six import StringIO import pydot from IPython.display import Image dot_iris = StringIO() tree.export_graphviz(dtc, out_file = dot_iris, feature_names = iris.feature_names) graph = pydot.graph_from_dot_data(dot_iris.getvalue()) Image(graph.create_png()) # + from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier(criterion='entropy') dtc.fit(X_train, y_train) dot_iris = StringIO() tree.export_graphviz(dtc, out_file = dot_iris, feature_names = iris.feature_names) graph = pydot.graph_from_dot_data(dot_iris.getvalue()) Image(graph.create_png())
Chapter09/Visualize a Decision Tree with pydot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="1UCMn7s7F1Np" # # Reference # # This example is taken from the book [DL with Python](https://www.manning.com/books/deep-learning-with-python) by <NAME>. It explains how to create CNN classifier from scratch and how to improve its performances # # All the notebooks from the book are available for free on [Github](https://github.com/fchollet/deep-learning-with-python-notebooks) # # If you like to run the example locally follow the instructions provided on [Keras website](https://keras.io/#installation) # # --- # + id="1ZeAAi2-F1Nq" outputId="f22fd89c-68df-466d-beef-d1bfee7e2b77" colab={"base_uri": "https://localhost:8080/", "height": 35} import keras keras.__version__ # + [markdown] id="V0KZR1HuF1Nt" # # 5.2 - Using convnets with small datasets # # This notebook contains the code sample found in Chapter 5, Section 2 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments. # # ## Training a convnet from scratch on a small dataset # # Having to train an image classification model using only very little data is a common situation, which you likely encounter yourself in # practice if you ever do computer vision in a professional context. # # Having "few" samples can mean anywhere from a few hundreds to a few tens of thousands of images. As a practical example, we will focus on # classifying images as "dogs" or "cats", in a dataset containing 4000 pictures of cats and dogs (2000 cats, 2000 dogs). We will use 2000 # pictures for training, 1000 for validation, and finally 1000 for testing. # # In this section, we will review one basic strategy to tackle this problem: training a new model from scratch on what little data we have. We # will start by naively training a small convnet on our 2000 training samples, without any regularization, to set a baseline for what can be # achieved. This will get us to a classification accuracy of 71%. At that point, our main issue will be overfitting. Then we will introduce # *data augmentation*, a powerful technique for mitigating overfitting in computer vision. By leveraging data augmentation, we will improve # our network to reach an accuracy of 82%. # # In the next section, we will review two more essential techniques for applying deep learning to small datasets: *doing feature extraction # with a pre-trained network* (this will get us to an accuracy of 90% to 93%), and *fine-tuning a pre-trained network* (this will get us to # our final accuracy of 95%). Together, these three strategies -- training a small model from scratch, doing feature extracting using a # pre-trained model, and fine-tuning a pre-trained model -- will constitute your future toolbox for tackling the problem of doing computer # vision with small datasets. # + [markdown] id="xRKLALljF1Nt" # ## The relevance of deep learning for small-data problems # # You will sometimes hear that deep learning only works when lots of data is available. This is in part a valid point: one fundamental # characteristic of deep learning is that it is able to find interesting features in the training data on its own, without any need for manual # feature engineering, and this can only be achieved when lots of training examples are available. This is especially true for problems where # the input samples are very high-dimensional, like images. # # However, what constitutes "lots" of samples is relative -- relative to the size and depth of the network you are trying to train, for # starters. It isn't possible to train a convnet to solve a complex problem with just a few tens of samples, but a few hundreds can # potentially suffice if the model is small and well-regularized and if the task is simple. # Because convnets learn local, translation-invariant features, they are very # data-efficient on perceptual problems. Training a convnet from scratch on a very small image dataset will still yield reasonable results # despite a relative lack of data, without the need for any custom feature engineering. You will see this in action in this section. # # But what's more, deep learning models are by nature highly repurposable: you can take, say, an image classification or speech-to-text model # trained on a large-scale dataset then reuse it on a significantly different problem with only minor changes. Specifically, in the case of # computer vision, many pre-trained models (usually trained on the ImageNet dataset) are now publicly available for download and can be used # to bootstrap powerful vision models out of very little data. That's what we will do in the next section. # # For now, let's get started by getting our hands on the data. # + [markdown] id="RjqNFQ8TF1Nu" # ## Downloading the data # # The cats vs. dogs dataset that we will use isn't packaged with Keras. It was made available by Kaggle.com as part of a computer vision # competition in late 2013, back when convnets weren't quite mainstream. You can download the original dataset at: # `https://www.kaggle.com/c/dogs-vs-cats/data` (you will need to create a Kaggle account if you don't already have one -- don't worry, the # process is painless). # # The pictures are medium-resolution color JPEGs. They look like this: # # ![cats_vs_dogs_samples](https://s3.amazonaws.com/book.keras.io/img/ch5/cats_vs_dogs_samples.jpg) # + [markdown] id="2AaZAbYVF1Nv" # Unsurprisingly, the cats vs. dogs Kaggle competition in 2013 was won by entrants who used convnets. The best entries could achieve up to # 95% accuracy. In our own example, we will get fairly close to this accuracy (in the next section), even though we will be training our # models on less than 10% of the data that was available to the competitors. # This original dataset contains 25,000 images of dogs and cats (12,500 from each class) and is 543MB large (compressed). After downloading # and uncompressing it, we will create a new dataset containing three subsets: a training set with 1000 samples of each class, a validation # set with 500 samples of each class, and finally a test set with 500 samples of each class. # # Here are a few lines of code to do this: # + [markdown] id="2bdbkBl1F1Ny" # ## Run those cells If you're running the code on Colab # + id="43aRWi9GF1Nz" outputId="5b9d0331-ac84-4253-f926-14ae2c25f464" colab={"base_uri": "https://localhost:8080/", "height": 153} # !gdown https://drive.google.com/uc?id=1UT_LQ4xi2wBW6YraHtF9mVX-g_WRNRtt # + id="v3JXN-YiHKgq" # !unzip dogs-vs-cats.zip -d dogs-vs-cats # !unzip dogs-vs-cats/test1.zip -d dogs-vs-cats # !unzip dogs-vs-cats/train.zip -d dogs-vs-cats # + id="UvKSQIpyF1N2" import os, shutil # The path to the directory where the original # dataset was uncompressed original_dataset_dir = 'dogs-vs-cats/train' # The directory where we will store our smaller dataset base_dir = 'dogs-vs-cats-small' # Directories for our training, # validation and test splits train_dir = os.path.join(base_dir, 'train') os.makedirs(train_dir, exist_ok=True) validation_dir = os.path.join(base_dir, 'validation') os.makedirs(validation_dir, exist_ok=True) test_dir = os.path.join(base_dir, 'test') os.makedirs(test_dir, exist_ok=True) # Directory with our training cat pictures train_cats_dir = os.path.join(train_dir, 'cats') os.makedirs(train_cats_dir, exist_ok=True) # Directory with our training dog pictures train_dogs_dir = os.path.join(train_dir, 'dogs') os.makedirs(train_dogs_dir, exist_ok=True) # Directory with our validation cat pictures validation_cats_dir = os.path.join(validation_dir, 'cats') os.makedirs(validation_cats_dir, exist_ok=True) # Directory with our validation dog pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') os.makedirs(validation_dogs_dir, exist_ok=True) # Directory with our validation cat pictures test_cats_dir = os.path.join(test_dir, 'cats') os.makedirs(test_cats_dir, exist_ok=True) # Directory with our validation dog pictures test_dogs_dir = os.path.join(test_dir, 'dogs') os.makedirs(test_dogs_dir, exist_ok=True) # Copy first 1000 cat images to train_cats_dir fnames = ['cat.{}.jpg'.format(i) for i in range(1000)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(train_cats_dir, fname) shutil.copyfile(src, dst) # Copy next 500 cat images to validation_cats_dir fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(validation_cats_dir, fname) shutil.copyfile(src, dst) # Copy next 500 cat images to test_cats_dir fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(test_cats_dir, fname) shutil.copyfile(src, dst) # Copy first 1000 dog images to train_dogs_dir fnames = ['dog.{}.jpg'.format(i) for i in range(1000)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(train_dogs_dir, fname) shutil.copyfile(src, dst) # Copy next 500 dog images to validation_dogs_dir fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(validation_dogs_dir, fname) shutil.copyfile(src, dst) # Copy next 500 dog images to test_dogs_dir fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(test_dogs_dir, fname) shutil.copyfile(src, dst) # We also create a dir to save the models models_dir = 'models' os.makedirs(models_dir, exist_ok=True) # + id="jslt9-_aF1N5" outputId="9f4f3b90-4d0e-479d-deac-a8dc752906cb" colab={"base_uri": "https://localhost:8080/", "height": 34} print('total training cat images:', len(os.listdir(train_cats_dir))) # + id="MwOvoJjtF1N7" outputId="71116f53-7d44-49fd-c6c7-9f78714ee4c7" colab={"base_uri": "https://localhost:8080/", "height": 34} print('total training dog images:', len(os.listdir(train_dogs_dir))) # + id="U60z2YXtF1N-" outputId="bdf43c70-b3ff-4677-a209-bc2245351e92" colab={"base_uri": "https://localhost:8080/", "height": 34} print('total validation cat images:', len(os.listdir(validation_cats_dir))) # + id="hiw2vSCKF1OA" outputId="12f5d275-c92d-412c-b67f-d545bc265690" colab={"base_uri": "https://localhost:8080/", "height": 34} print('total validation dog images:', len(os.listdir(validation_dogs_dir))) # + id="e7v_U7GLF1OC" outputId="ee1219f8-da11-4de4-fc43-5bfed8f6d846" colab={"base_uri": "https://localhost:8080/", "height": 34} print('total test cat images:', len(os.listdir(test_cats_dir))) # + id="Sgh3-EuiF1OE" outputId="53933dc6-d352-49d4-df44-08e16c142d70" colab={"base_uri": "https://localhost:8080/", "height": 34} print('total test dog images:', len(os.listdir(test_dogs_dir))) # + [markdown] id="iTtm37P2F1OG" # # So we have indeed 2000 training images, and then 1000 validation images and 1000 test images. In each split, there is the same number of # samples from each class: this is a balanced binary classification problem, which means that classification accuracy will be an appropriate # measure of success. # + [markdown] id="VLmw0PuHF1OH" # ## Building our network # # We've already built a small convnet for MNIST in the previous example, so you should be familiar with them. We will reuse the same # general structure: our convnet will be a stack of alternated `Conv2D` (with `relu` activation) and `MaxPooling2D` layers. # # However, since we are dealing with bigger images and a more complex problem, we will make our network accordingly larger: it will have one # more `Conv2D` + `MaxPooling2D` stage. This serves both to augment the capacity of the network, and to further reduce the size of the # feature maps, so that they aren't overly large when we reach the `Flatten` layer. Here, since we start from inputs of size 150x150 (a # somewhat arbitrary choice), we end up with feature maps of size 7x7 right before the `Flatten` layer. # # Note that the depth of the feature maps is progressively increasing in the network (from 32 to 128), while the size of the feature maps is # decreasing (from 148x148 to 7x7). This is a pattern that you will see in almost all convnets. # # Since we are attacking a binary classification problem, we are ending the network with a single unit (a `Dense` layer of size 1) and a # `sigmoid` activation. This unit will encode the probability that the network is looking at one class or the other. # + id="uRahrRknF1OH" from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) # + [markdown] id="NEAOmGRBF1OJ" # Let's take a look at how the dimensions of the feature maps change with every successive layer: # + id="tOqJmWXcF1OK" outputId="efe9500f-a7ff-4f97-ac06-94f7275fab82" colab={"base_uri": "https://localhost:8080/", "height": 527} model.summary() # + [markdown] id="oruDvgFQF1OM" # For our compilation step, we'll go with the `RMSprop` optimizer as usual. Since we ended our network with a single sigmoid unit, we will # use binary crossentropy as our loss (as a reminder, check out the table in Chapter 4, section 5 for a cheatsheet on what loss function to # use in various situations). # + id="78Ofgxs1F1OM" from keras import optimizers model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc']) # + [markdown] id="s01JpsZOF1OO" # ## Data preprocessing # # As you already know by now, data should be formatted into appropriately pre-processed floating point tensors before being fed into our # network. Currently, our data sits on a drive as JPEG files, so the steps for getting it into our network are roughly: # # * Read the picture files. # * Decode the JPEG content to RBG grids of pixels. # * Convert these into floating point tensors. # * Rescale the pixel values (between 0 and 255) to the [0, 1] interval (as you know, neural networks prefer to deal with small input values). # # It may seem a bit daunting, but thankfully Keras has utilities to take care of these steps automatically. Keras has a module with image # processing helper tools, located at `keras.preprocessing.image`. In particular, it contains the class `ImageDataGenerator` which allows to # quickly set up Python generators that can automatically turn image files on disk into batches of pre-processed tensors. This is what we # will use here. # + id="G6DlVcaHF1OO" outputId="de5c3bf8-a1a4-4cf2-e829-5939a57278ef" colab={"base_uri": "https://localhost:8080/", "height": 51} from keras.preprocessing.image import ImageDataGenerator # All images will be rescaled by 1./255 train_datagen = ImageDataGenerator(rescale=1./255) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=20, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary') # + [markdown] id="99cfIZGvF1OQ" # Let's take a look at the output of one of these generators: it yields batches of 150x150 RGB images (shape `(20, 150, 150, 3)`) and binary # labels (shape `(20,)`). 20 is the number of samples in each batch (the batch size). Note that the generator yields these batches # indefinitely: it just loops endlessly over the images present in the target folder. For this reason, we need to `break` the iteration loop # at some point. # + id="3OFB1h_LF1OQ" outputId="e649b788-dcad-4c75-b8e2-6264f571a8fc" colab={"base_uri": "https://localhost:8080/", "height": 51} for data_batch, labels_batch in train_generator: print('data batch shape:', data_batch.shape) print('labels batch shape:', labels_batch.shape) break # + [markdown] id="rHkxSCJBF1OS" # Let's fit our model to the data using the generator. We do it using the `fit_generator` method, the equivalent of `fit` for data generators # like ours. It expects as first argument a Python generator that will yield batches of inputs and targets indefinitely, like ours does. # Because the data is being generated endlessly, the generator needs to know example how many samples to draw from the generator before # declaring an epoch over. This is the role of the `steps_per_epoch` argument: after having drawn `steps_per_epoch` batches from the # generator, i.e. after having run for `steps_per_epoch` gradient descent steps, the fitting process will go to the next epoch. In our case, # batches are 20-sample large, so it will take 100 batches until we see our target of 2000 samples. # # When using `fit_generator`, one may pass a `validation_data` argument, much like with the `fit` method. Importantly, this argument is # allowed to be a data generator itself, but it could be a tuple of Numpy arrays as well. If you pass a generator as `validation_data`, then # this generator is expected to yield batches of validation data endlessly, and thus you should also specify the `validation_steps` argument, # which tells the process how many batches to draw from the validation generator for evaluation. # + id="Og_8xP_vF1OS" outputId="c87587c9-d2c2-49d7-aa0f-a148c88e7a07" colab={"base_uri": "https://localhost:8080/", "height": 258} history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=5, validation_data=validation_generator, validation_steps=50) # + [markdown] id="dXh3_rnKF1OV" # It is good practice to always save your models after training: # + id="h-H4cWyMF1OV" model.save(os.path.join(models_dir, 'cats_and_dogs_small_1.h5')) # + [markdown] id="nOa-k0CYF1OX" # Let's plot the loss and accuracy of the model over the training and validation data during training: # + id="5N0z8Mi2F1OX" outputId="5467113c-3d90-4a89-b5d3-9723b67b586a" colab={"base_uri": "https://localhost:8080/", "height": 545} import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # + [markdown] id="vQPWMC2JF1OZ" # These plots are characteristic of overfitting. Our training accuracy increases linearly over time, until it reaches nearly 100%, while our # validation accuracy stalls at 70-72%. Our validation loss reaches its minimum after only five epochs then stalls, while the training loss # keeps decreasing linearly until it reaches nearly 0. # # Because we only have relatively few training samples (2000), overfitting is going to be our number one concern. You already know about a # number of techniques that can help mitigate overfitting, such as dropout and weight decay (L2 regularization). We are now going to # introduce a new one, specific to computer vision, and used almost universally when processing images with deep learning models: *data # augmentation*. # + [markdown] id="hCOqf9gPF1OZ" # ## Using data augmentation # # > Good example here as well: https://machinelearningmastery.com/how-to-configure-image-data-augmentation-when-training-deep-learning-neural-networks/ # # Overfitting is caused by having too few samples to learn from, rendering us unable to train a model able to generalize to new data. # Given infinite data, our model would be exposed to every possible aspect of the data distribution at hand: we would never overfit. Data # augmentation takes the approach of generating more training data from existing training samples, by "augmenting" the samples via a number # of random transformations that yield believable-looking images. The goal is that at training time, our model would never see the exact same # picture twice. This helps the model get exposed to more aspects of the data and generalize better. # # In Keras, this can be done by configuring a number of random transformations to be performed on the images read by our `ImageDataGenerator` # instance. Let's get started with an example: # + id="ba8SSyf5F1OZ" datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.5, height_shift_range=0.2, shear_range=0.2, zoom_range=0.5, horizontal_flip=True, fill_mode='nearest') # + [markdown] id="1JP4dXLxF1Ob" # These are just a few of the options available (for more, see the Keras documentation). Let's quickly go over what we just wrote: # # * `rotation_range` is a value in degrees (0-180), a range within which to randomly rotate pictures. # * `width_shift` and `height_shift` are ranges (as a fraction of total width or height) within which to randomly translate pictures # vertically or horizontally. # * `shear_range` is for randomly applying shearing transformations. # * `zoom_range` is for randomly zooming inside pictures. # * `horizontal_flip` is for randomly flipping half of the images horizontally -- relevant when there are no assumptions of horizontal # asymmetry (e.g. real-world pictures). # * `fill_mode` is the strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift. # # Let's take a look at our augmented images: # + id="ZV94lhifF1Ob" outputId="02892fc8-240e-4a8f-89b0-26b9d7f7a773" colab={"base_uri": "https://localhost:8080/", "height": 1000} # This is module with image preprocessing utilities from keras.preprocessing import image fnames = [os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)] # We pick one image to "augment" img_path = fnames[3] # Read the image and resize it img = image.load_img(img_path, target_size=(150, 150)) # Convert it to a Numpy array with shape (150, 150, 3) x = image.img_to_array(img) # Reshape it to (1, 150, 150, 3) x = x.reshape((1,) + x.shape) # The .flow() command below generates batches of randomly transformed images. # It will loop indefinitely, so we need to `break` the loop at some point! i = 0 for batch in datagen.flow(x, batch_size=1): plt.figure(i) imgplot = plt.imshow(image.array_to_img(batch[0])) i += 1 if i % 4 == 0: break plt.show() # + [markdown] id="8N8gVGBmF1Od" # If we train a new network using this data augmentation configuration, our network will never see twice the same input. However, the inputs # that it sees are still heavily intercorrelated, since they come from a small number of original images -- we cannot produce new information, # we can only remix existing information. As such, this might not be quite enough to completely get rid of overfitting. To further fight # overfitting, we will also add a Dropout layer to our model, right before the densely-connected classifier: # + id="1qYqI8ZJF1Od" model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc']) # + [markdown] id="DAbA-6WLF1Of" # Let's train our network using data augmentation and dropout: # + id="BdyqSQjQF1Of" outputId="ebf63301-c599-4d35-f41f-b2546393d56f" colab={"base_uri": "https://localhost:8080/", "height": 1000} train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,) # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=32, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=32, class_mode='binary') history = model.fit( train_generator, steps_per_epoch=2000//32-1, epochs=100, validation_data=validation_generator, validation_steps=50) # + [markdown] id="AvK0LXlTF1Oh" # Let's save our model -- we will be using it in the section on convnet visualization. __(You should also download them)__ # + id="CF1EbkBTF1Oh" model.save(os.path.join(models_dir, 'cats_and_dogs_small_2.h5')) # + [markdown] id="et73l4yMRlk-" # We can also save the directory with the small dataset and download it for later # + id="lONdMdzPWWTJ" outputId="dbbc69db-a0ba-4292-809f-f44000aa4bfe" colab={"base_uri": "https://localhost:8080/", "height": 34} from google.colab import drive drive.mount('/content/drive') # + id="Fh_aGHJORSbg" # !zip -r dogs-vs-cats-small.zip dogs-vs-cats-small # !cp dogs-vs-cats-small.zip /content/drive/My\ Drive/Datasets\ and\ Models/ # + [markdown] id="NSiqZ2Q8F1Oj" # Let's plot our results again: # + id="WngBeSQsF1Oj" outputId="9bb39aae-090a-4866-e0c5-77da229b7424" colab={"base_uri": "https://localhost:8080/", "height": 545} acc = history.history['acc'] # val_acc = history.history['val_acc'] loss = history.history['loss'] # val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training acc') # plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') # plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # + [markdown] id="qmMSqiHyF1Ol" # Thanks to data augmentation and dropout, we are no longer overfitting: the training curves are rather closely tracking the validation # curves. We are now able to reach an accuracy of 82%, a 15% relative improvement over the non-regularized model. # # By leveraging regularization techniques even further and by tuning the network's parameters (such as the number of filters per convolution # layer, or the number of layers in the network), we may be able to get an even better accuracy, likely up to 86-87%. However, it would prove # very difficult to go any higher just by training our own convnet from scratch, simply because we have so little data to work with. As a # next step to improve our accuracy on this problem, we will have to leverage a pre-trained model, which will be the focus of the next two # sections. # + id="JkyLzS45F1Ol" outputId="e8b55cd9-b730-4d94-d15f-1f8c2a41c60f" colab={"base_uri": "https://localhost:8080/", "height": 34} from keras.preprocessing import image import numpy as np img = image.load_img('/content/dogs-vs-cats-small/test/dogs/dog.1555.jpg', target_size=model.input_shape[1:3]) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) predictions = model.predict(x) print(predictions[0][0]) # + id="OYn7NOsKF1On"
samples/06-using-convnets-with-small-datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `Pandas` + `Matplotlib` Library Tutorial import numpy as np import matplotlib.pyplot as plt import pandas as pd # ## Plotting Real World Examples # ### Line Graph gas = pd.read_csv('gas_prices.csv') print(type(gas)) gas # We can see individual columns using [column_name] print(gas['Year']) gas['USA'] gas['Australia'] # + # we can now plot different countries' gas price using a line plot plt.figure(figsize=(10, 7)) plt.plot(gas['Year'], gas['USA'], '.-', label='USA') plt.plot(gas['Year'], gas['Australia'], '.-', label='Australia') plt.plot(gas['Year'], gas['France'], '.-', label='France') plt.plot(gas['Year'], gas['Germany'], '.-', label='Germany') plt.xticks(gas['Year'][::2]) plt.legend() plt.xlabel('Year') plt.ylabel('Price (USD)') plt.title('Gas Price For Different Countries (USD)') plt.show() # - # ## Fifa Data fifa = pd.read_csv('../../data/fifa_data.csv') fifa.head(5) # ### Distribution of Players By Skill # skills of players overall fifa['Overall'] # creating the bins for histogram bins = [x for x in range(0, 101, 10)] bins # + plt.figure(figsize=(10, 7)) plt.hist(fifa['Overall'], bins, rwidth=0.8) plt.xlabel('Skill Level') plt.ylabel('Number Of Players') plt.title('Player Skills FIFA 2018') plt.show() # - # ## Pie Chart # We will find the foot preferences of all the players fifa['Preferred Foot'] left_foot_users = (fifa['Preferred Foot'] == 'Left').sum() left_foot_users right_foot_users = (fifa['Preferred Foot'] == 'Right').sum() right_foot_users plt.figure(figsize=(10, 7)) label = ['Left Foot', 'Right Foot'] plt.pie([left_foot_users, right_foot_users], labels=label, autopct='%.2f%%') plt.title('FIFA Players Foot Preference') plt.show() # ## Weight of Professional Soccer Players fifa['Weight'] fifa['Weight'] = [int(x.strip('lbs')) if type(x) == str else x for x in fifa['Weight']] fifa['Weight'] light = (fifa['Weight'] < 125).sum() light light_medium = ((fifa['Weight'] < 150) & (fifa['Weight'] > 125)).sum() light_medium medium = ((fifa.Weight >= 150) & (fifa.Weight < 175)).sum() medium medium_heavy = ((fifa.Weight >= 175) & (fifa.Weight < 200)).sum() medium_heavy medium_heavy = ((fifa.Weight >= 175) & (fifa.Weight < 200)).sum() medium_heavy heavy = (fifa.Weight >= 200).sum() heavy weights = [light, light_medium, medium, medium_heavy, heavy] labels = ['Under 125', '125-150', '150-175', '175-200', 'over 200'] plt.figure(figsize=(10, 10)) plt.pie(weights, labels=labels, pctdistance=0.8, autopct='%.2f%%') plt.title('Weight of Professional Soccer Players (lbs)') plt.show() ## Weight Distributions By Frequnecy weights = fifa['Weight'][:] weights barcelona = fifa.loc[fifa.Club == "FC Barcelona"]['Overall'] madrid = fifa.loc[fifa.Club == "Real Madrid"]['Overall'] revs = fifa.loc[fifa.Club == "New England Revolution"]['Overall'] barcelona madrid revs # + plt.figure(figsize=(10, 10)) bp = plt.boxplot([barcelona, madrid, revs], labels=['FC Barcelona','Real Madrid','NE Revolution'], patch_artist=True, medianprops={'linewidth': 2}) plt.title('Professional Soccer Team Comparison') plt.ylabel('FIFA Overall Rating') for box in bp['boxes']: # change outline color box.set(color='#4286f4', linewidth=2) # change fill color box.set(facecolor = '#e0e0e0' ) # change hatch box.set(hatch = '#') # / * - (none) plt.show()
notebooks/matplotlib/pandas-matplot-lib-tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="WvAT84bA3fc1" colab_type="code" outputId="11f293fa-ee46-4d50-a7a7-1333a98a6fd9" executionInfo={"status": "ok", "timestamp": 1583515424919, "user_tz": -60, "elapsed": 18956, "user": {"displayName": "<NAME>\u0142czy\u0144ska", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3jDMt1EbfFEpVNRSF0tL54DkmkDTbVq8efj82aQ=s64", "userId": "10382511807738739434"}} colab={"base_uri": "https://localhost:8080/", "height": 457} # !pip install --upgrade tables # !pip install eli5 # !pip install xgboost # !pip install hyperopt # + id="yNbXCQcn3zZh" colab_type="code" colab={} import pandas as pd import numpy as np import xgboost as xgb from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score from hyperopt import hp, fmin, tpe, STATUS_OK import eli5 from eli5.sklearn import PermutationImportance # + id="gfdAbmtJ4xCO" colab_type="code" outputId="367ec240-7d03-466b-d50a-6954db3c72a9" executionInfo={"status": "ok", "timestamp": 1583515665910, "user_tz": -60, "elapsed": 4247, "user": {"displayName": "<NAME>\u0142czy\u0144ska", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3jDMt1EbfFEpVNRSF0tL54DkmkDTbVq8efj82aQ=s64", "userId": "10382511807738739434"}} colab={"base_uri": "https://localhost:8080/", "height": 33} # cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car" # + id="2iS_mCMT43P5" colab_type="code" outputId="fbef017a-faab-452f-f4b8-3373b304ba69" executionInfo={"status": "ok", "timestamp": 1583515687110, "user_tz": -60, "elapsed": 6118, "user": {"displayName": "<NAME>\u0142czy\u0144ska", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3jDMt1EbfFEpVNRSF0tL54DkmkDTbVq8efj82aQ=s64", "userId": "10382511807738739434"}} colab={"base_uri": "https://localhost:8080/", "height": 33} df = pd.read_hdf('data/car_h5') df.shape # + [markdown] id="SIAvfym25EBe" colab_type="text" # ##Feature Engineering # + id="NJFnpYt74796" colab_type="code" colab={} SUFFIX_CAT='_cat' for feat in df.columns: if isinstance(df[feat][0], list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT] = factorized_values # + id="tIqEuJm65QQZ" colab_type="code" outputId="6fd11a1d-4b19-405b-e121-6fd383f9df43" executionInfo={"status": "error", "timestamp": 1583517194191, "user_tz": -60, "elapsed": 1078, "user": {"displayName": "<NAME>142czy\u0144ska", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3jDMt1EbfFEpVNRSF0tL54DkmkDTbVq8efj82aQ=s64", "userId": "10382511807738739434"}} colab={"base_uri": "https://localhost:8080/", "height": 360} df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]) ) df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int( str(x).split('cm')[0].replace(' ', '')) ) # + id="n-eRq78M6Io2" colab_type="code" outputId="c04b3ac0-7cc8-4c46-9f58-1a395b7135bd" executionInfo={"status": "error", "timestamp": 1583518640272, "user_tz": -60, "elapsed": 1082, "user": {"displayName": "<NAME>\u0142czy\u0144ska", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3jDMt1EbfFEpVNRSF0tL54DkmkDTbVq8efj82aQ=s64", "userId": "10382511807738739434"}} colab={"base_uri": "https://localhost:8080/", "height": 130} df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split(' ')[0]) df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ', '')) ) # + id="rgDyy0pL_H8a" colab_type="code" colab={} def run_model(model, feats): X = df[feats].values y = df['price_value'].values scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + id="n2WNNd_WB5j6" colab_type="code" outputId="1b04a489-3177-4e71-fe11-f4a0fb9a03e8" executionInfo={"status": "ok", "timestamp": 1583519686337, "user_tz": -60, "elapsed": 13635, "user": {"displayName": "<NAME>\u0142czy\u0144ska", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3jDMt1EbfFEpVNRSF0tL54DkmkDTbVq8efj82aQ=s64", "userId": "10382511807738739434"}} colab={"base_uri": "https://localhost:8080/", "height": 33} feats = ['param_napęd_cat','param_rok-produkcji','param_stan_cat','param_skrzynia-biegów_cat','param_faktura-vat_cat','param_moc','param_marka-pojazdu_cat','feature_kamera-cofania_cat','param_typ_cat','param_pojemność-skokowa','seller_name_cat','feature_wspomaganie-kierownicy_cat','param_model-pojazdu_cat','param_wersja_cat','param_kod-silnika_cat','feature_system-start-stop_cat','feature_asystent-pasa-ruchu_cat','feature_czujniki-parkowania-przednie_cat','feature_łopatki-zmiany-biegów_cat','feature_regulowane-zawieszenie_cat'] xgb_params = { 'max_depth': 5, 'n_estimators': 50, 'learning_rate': 0.1, 'objective': 'reg:squarederror', 'seed': 0 } run_model (xgb.XGBRegressor(**xgb_params), feats ) # + [markdown] id="ZXxPcV9RUQ1r" colab_type="text" # ## Hyperopt # + id="KiYBI1c3Cy_L" colab_type="code" outputId="546f1a7b-9b63-4fcf-a2e4-eb8f42ced3ff" executionInfo={"status": "ok", "timestamp": 1583522337308, "user_tz": -60, "elapsed": 1314916, "user": {"displayName": "<NAME>\u0142czy\u0144ska", "photoUrl": "<KEY>", "userId": "10382511807738739434"}} colab={"base_uri": "https://localhost:8080/", "height": 917} def obj_func(params): print ("Training with params: ") print(params) mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats) return {'loss': np.abs(mean_mae), 'status': STATUS_OK} #space xgb_reg_params= { 'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)), 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)), 'subsample': hp.quniform ('subsample', 0.5 , 1, 0.5), 'colsample_bytree': hp.quniform ('colsample_bytree', 0.5 , 1, 0.5), 'objective': 'reg:squarederror', 'n_estimators': 100, 'seed': 0, } ## run best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=25) best # + id="fq3mbuaYLgeX" colab_type="code" colab={}
day5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/14vaishaligiri/atom/blob/master/Copy_of_AI%26ML_Project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="uxaPzB7wgSQ5" colab_type="code" colab={} #import the pandas libraries #visualization of data libraeies import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns import plotly.express as px import plotly.graph_objects as go import folium from folium import plugins plt.rcParams['figure.figsize'] = 10,12 import warnings warnings.filterwarnings('ignore') # + [markdown] id="rrdS7QCxroL6" colab_type="text" # #Analysing the present condition in India : How it started in India? # "The first **COVID-19** case was reported on 30th January 2020 when a student arrived **Kerala** from Wuhan. Just in next 2 days, Kerela reported 2 more cases. For almost a month, no new cases were reported in India, however, on 2nd March 2020, five new cases of corona virus were reported in Kerala again and since then the cases have been rising affecting **25** states, till now *(Bihar and Manipur being the most recent)*. Here is a brief timeline of the cases in India. # #Recent COVID-19 updates in India": # In India, from Jan 30 to 3:53pm CEST, 28 June 2020, there have been 528,859 confirmed cases of COVID-19 with 16,095 deaths. # Sikkim on Saturday reported its first +ve COVID-19 case\n", # "- With over 6,500 fresh cases, the Covid in India rose to 1,25,101 on Saturday morning, with 3,720 fatalities\n", # "- West Bengal asks Railways not to send migrant trains to State till May 26 in view of Cyclone Amphan\n", # "- 196 new COVID 19 positive cases were reported in Karnataka on Saturday\n", # "- Complete lockdown in Bengaluru on Sunday. \n", # " - Bruhat Bengaluru Mahanagara Palike (BBMP) Commissioner B.H. <NAME> said the conditions and restrictions on Sunday will be similar to that under coronavirus lockdown # # #How is AI-ML useful in fighting the COVID-19 pandemic? # 1.Medical resource optimization # 2.Ensuring demand planning stability # 3.Contact tracing # 4.Situational awareness and critical response analysis # # #1.1 Scraping the datasets from the [official Govt. website] # #(https://www.mohfw.gov.in) # # # # + id="YvTYjFf3vy3_" colab_type="code" colab={} #for date and time opeations from datetime import datetime # for file and folder operations import os # for regular expression opeations import re # for listing files in a folder import glob # for getting web contents import requests # for scraping web contents from bs4 import BeautifulSoup # + id="DLRkVmR-0wFM" colab_type="code" colab={} # get data # link at which web data recides link = 'https://www.mohfw.gov.in/' # get web data req = requests.get(link) # parse web data soup = BeautifulSoup(req.content,"html.parser") # + id="IgodYGvH1eef" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="046989ca-bad7-49c0-bc1b-21d874e42295" # find the table # our target table is the last table in the page # get the table head # table head may contain the column names, titles, subtitles thead = soup.find_all('thead')[-1] # print(thead) # get all the rows in table head # it usually have only one row, which has the column names head = thead.find_all('tr') # print(head) # get the table tbody # it contains the contents tbody = soup.find_all('tbody')[-1] # print(tbody) # get all the rows in table body # each row is each state's entry body = tbody.find_all('tr') # print(body)" # get the table contents # container for header rows / column title head_rows = [] # container for table body / contents body_rows = [] # loop through the head and append each row to head for tr in head: td = tr.find_all(['th', 'td']) row = [i.text for i in td] head_rows.append(row) # print(head_rows) # loop through the body and append each row to body for tr in body: td = tr.find_all(['th', 'td']) row = [i.text for i in td] body_rows.append(row) # print(head_rows) # save contents in a dataframe # skip last 3 rows, it contains unwanted info # head_rows contains column title df_bs = pd.DataFrame(body_rows[:len(body_rows)-6], columns=head_rows[0]) # Drop 'S. No.' column df_bs.drop('S. No.', axis=1, inplace=True) # there are 36 states+UT in India df_bs.head(36) # + id="KBZTNQPE6Mr4" colab_type="code" colab={} # date-time information # ===================== #saving a copy of the dataframe df_India = df_bs.copy() # today's date now = datetime.now() # format date to month-day-year df_India['Date'] = now.strftime("%m/%d/%Y") # add 'Date' column to dataframe df_India['Date'] = pd.to_datetime(df_India['Date'], format='%m/%d/%Y') # df_India.head(36) # + id="IMuCEV5l4YIP" colab_type="code" colab={} # remove extra characters from 'Name of State/UT' column df_India['Name of State / UT'] = df_India['Name of State / UT'].str.replace('#', '') df_India['Deaths**'] = df_India['Deaths**'].str.replace('#', '') # + id="T-7us2VC-s8w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f69e9bb9-4180-44c0-b3fb-f767da31658b" # latitude and longitude information #latitude of the states lat = {'Delhi':28.7041, 'Haryana':29.0588, 'Kerala':10.8505, 'Rajasthan':27.0238, 'Telengana':18.1124, 'Uttar Pradesh':26.8467, 'Ladakh':34.2996, 'Tamil Nadu':11.1271, 'Jammu and Kashmir':33.7782, 'Punjab':31.1471, 'Karnataka':15.3173, 'Maharashtra':19.7515, 'Andhra Pradesh':15.9129, 'Odisha':20.9517, 'Uttarakhand':30.0668, 'West Bengal':22.9868, 'Puducherry': 11.9416, 'Chandigarh': 30.7333, 'Chhattisgarh':21.2787, 'Gujarat': 22.2587, 'Himachal Pradesh': 31.1048, 'Madhya Pradesh': 22.9734, 'Bihar': 25.0961, 'Manipur':24.6637, 'Mizoram':23.1645, 'Goa': 15.2993, 'Andaman and Nicobar Islands': 11.7401, 'Assam' : 26.2006, 'Jharkhand': 23.6102, 'Arunachal Pradesh': 28.2180, 'Tripura': 23.9408, 'Nagaland': 26.1584, 'Meghalaya' : 25.4670, 'D<NAME>i' : 20.1809, 'Sikkim': 27.5330} #longitude of the states long = {'Delhi':77.1025, 'Haryana':76.0856, 'Kerala':76.2711, 'Rajasthan':74.2179, 'Telengana':79.0193, 'Uttar Pradesh':80.9462, 'Ladakh':78.2932, 'Tamil Nadu':78.6569, 'Jammu and Kashmir':76.5762, 'Punjab':75.3412, 'Karnataka':75.7139, 'Maharashtra':75.7139, 'Andhra Pradesh':79.7400, 'Odisha':85.0985, 'Uttarakhand':79.0193, 'West Bengal':87.8550, 'Puducherry': 79.8083, 'Chandigarh': 76.7794, 'Chhattisgarh':81.8661, 'Gujarat': 71.1924, 'Himachal Pradesh': 77.1734, 'Madhya Pradesh': 78.6569, 'Bihar': 85.3131, 'Manipur':93.9063, 'Mizoram':92.9376, 'Goa': 74.1240, 'Andaman and Nicobar Islands': 92.6586, 'Assam' : 92.9376, 'Jharkhand': 85.2799, 'Arunachal Pradesh': 94.7278, 'Tripura': 91.9882, 'Nagaland': 94.5624, 'Meghalaya' : 91.3662, '<NAME>' : 73.0169, 'Sikkim': 88.5122} # add latitude column based on 'Name of State / UT' column df_India['Latitude'] = df_India['Name of State / UT'].map(lat) # add longitude column based on 'Name of State / UT' column df_India['Longitude'] = df_India['Name of State / UT'].map(long) df_India.head(36) # + id="eTZY8j0fBMEz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="031578f8-5950-462a-a034-43f1fc7238bc" # rename columns df_India = df_India.rename(columns={'Cured/Discharged/Migrated' :'Cured/Discharged', 'Total Confirmed cases *': 'Confirmed', 'Total Confirmed cases ': 'Confirmed', 'Total Confirmed cases* ': 'Confirmed'}) df_India = df_India.rename(columns={'Cured/Discharged':'Cured'}) df_India = df_India.rename(columns={'Name of State / UT':'State/UnionTerritory'}) df_India = df_India.rename(columns={'Name of State / UT':'State/UnionTerritory'}) df_India = df_India.rename(columns=lambda x: re.sub('Total Confirmed cases' , 'Total Confirmed cases',x)) df_India = df_India.rename(columns={'Deaths ( more than 70% cases due to comorbidities )':'Deaths','Deaths**':'Deaths'}) # unique state names df_India['State/UnionTerritory'].unique() # + id="lXMWiEDZClpT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="2bd01c59-673f-4d89-b380-69f9c65e8d3e" # number of missing values df_India.isna().sum() # + id="6pDXOkRPCzjM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="a6214f41-15f0-42ae-c0c4-705e4c898140" # number of unique values df_India.nunique() # + id="s3Nz2Q236y1G" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="898536c0-30c2-4daa-e7dd-ae7536da2259" df_India.head(36) # + id="RRiAyJCoGQH_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="5ba22d15-8d0b-42fd-975a-49cd1b924179" # complete data info df_India.info() # + id="62-vc5vs7XfJ" colab_type="code" colab={} # fix datatype df_India['Date'] = pd.to_datetime(df_India['Date']) # + id="d2XBLufH7cxw" colab_type="code" colab={} # rename state/UT names df_India['State/UnionTerritory'].replace('Chattisgarh', 'Chhattisgarh', inplace=True) df_India['State/UnionTerritory'].replace('Pondicherry', 'Puducherry', inplace=True) # + id="v2VXu9l97sNi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f00246ec-4fe9-4d74-cbf0-17b085544104" df_India.head(36) # + id="_Hrm73uE7u4A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 609} outputId="0f2d5a6f-f6ca-4850-f93b-52bcabeeb5c5" #Learn how to read a .csv file by creating a dataframe using pandas # Reading the datasets df= pd.read_csv('/content/covid_19_india.csv') df_india = df.copy() df # + id="gZxR2Mi480dx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5b9a2f51-78f8-4e96-9649-c34e1e66454c" #COVID19 Cases in India total_cases = df['Confirmed'].sum() print('Total number of confirmed COVID 2019 cases across India till date (23rd May, 2020):', total_cases) # + id="tR9XSK6O8-Jf" colab_type="code" colab={} #Learn how to highlight your dataframe df_temp = df.drop(['Latitude', 'Longitude', 'Date', 'index', 'level_0'], axis=1) #Removing Date, Latitude and Longitude and other extra columns df_temp.style.background_gradient(cmap='Reds') # + id="NI9maSMc-o9d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="33714411-072a-4a28-d2c7-04675165c736" today = now.strftime("%Y_%m_%d") total_cured = df['Cured'].sum() print("Total people who were cured as of "+today+" are: ", total_cured) total_cases = df['Confirmed'].sum() print("Total people who were detected COVID+ve as of "+today+" are: ", total_cases) total_death = df['Deaths'].sum() print("Total people who died due to COVID19 as of "+today+" are: ",total_death) total_active = total_cases-total_cured-total_death print("Total active COVID19 cases as of "+today+" are: ",total_active) # + id="dUxAFw-N-d7G" colab_type="code" colab={} #Total Active is the Total cases - (Number of death + Cured) df['Total Active'] = df['Confirmed'] - (df['Deaths'] + df1['Cured']) total_active = df['Total Active'].sum() print('Total number of active COVID 2019 cases across India:', total_active) Tot_Cases = df.groupby('State/UnionTerritory')['Total Active'].sum().sort_values(ascending=False).to_frame() Tot_Cases.style.background_gradient(cmap='Reds') # + id="55ruU-4kDWuk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 838} outputId="f34363dd-afb5-4dd5-8b60-ce15e6204d4d" import numpy as np state_cases = df_india.groupby('State/UnionTerritory')['Confirmed','Deaths','Cured'].max().reset_index() #state_cases = state_cases.astype({'Deaths': 'int'}) state_cases['Active'] = state_cases['Confirmed'] - (state_cases['Deaths']+state_cases['Cured']) state_cases["Death Rate (per 100)"] = np.round(100*state_cases["Deaths"]/state_cases["Confirmed"],2) state_cases["Cure Rate (per 100)"] = np.round(100*state_cases["Cured"]/state_cases["Confirmed"],2) state_cases.sort_values('Confirmed', ascending= False).fillna(0).style.background_gradient(cmap='Blues',subset=["Confirmed"])\ .background_gradient(cmap='Blues',subset=["Deaths"])\ .background_gradient(cmap='Blues',subset=["Cured"])\ .background_gradient(cmap='Blues',subset=["Active"])\ .background_gradient(cmap='Blues',subset=["Death Rate (per 100)"])\ .background_gradient(cmap='Blues',subset=["Cure Rate (per 100)"]) # + id="py_H6yQEDjW4" colab_type="code" colab={} #Finding more detail COVID Insights in India age_details = pd.read_csv('/content/AgeGroupDetails_i0e5l.csv') india_covid_19 = pd.read_csv('/content/covid_19_india.csv') hospital_beds = pd.read_csv('/content/HospitalBedsIndia.csv') individual_details = pd.read_csv('/content/IndividualDetails.csv') ICMR_details = pd.read_csv('/content/ICMRTestingDetails.csv') ICMR_labs = pd.read_csv('/content/ICMRTestingLabs.csv') state_testing = pd.read_csv('/content/StatewiseTestingDetails.csv') population = pd.read_csv('/content/population_india_census2011.csv') # + id="b3S-tPV8EvAw" colab_type="code" colab={} india_covid_19['Date'] = pd.to_datetime(india_covid_19['Date'],dayfirst = True) state_testing['Date'] = pd.to_datetime(state_testing['Date']) ICMR_details['DateTime'] = pd.to_datetime(ICMR_details['DateTime'],dayfirst = True) ICMR_details = ICMR_details.dropna(subset=['TotalSamplesTested', 'TotalPositiveCases']) # + id="g74PajspETQq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 646} outputId="fa3bc357-d127-43f3-8935-53521e588f52" labels = list(age_details['AgeGroup']) sizes = list(age_details['TotalCases']) explode = [] for i in labels: explode.append(0.05) plt.figure(figsize= (15,10)) plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=9, explode =explode) centre_circle = plt.Circle((0,0),0.70,fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.title('India - Age Group wise Distribution',fontsize = 20) plt.axis('equal') plt.tight_layout() # + id="f71BZ8sFGWR_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 336} outputId="cc2d6b24-8c51-4b50-a212-43f60d6073bc" #Total Samples Tested import matplotlib.dates as mdates ICMR_details['Percent_positive'] = round((ICMR_details['TotalPositiveCases']/ICMR_details['TotalSamplesTested'])*100,1) fig, ax1 = plt.subplots(figsize= (15,5)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%b')) ax1.set_ylabel('Positive Cases (% of Total Samples Tested)') ax1.bar(ICMR_details['DateTime'] , ICMR_details['Percent_positive'], color="red",label = 'Percentage of Positive Cases') ax1.text(ICMR_details['DateTime'][0],4, 'Total Samples Tested as of Apr 23rd = 541789', style='italic',fontsize= 10, bbox={'facecolor': 'white' ,'alpha': 0.5, 'pad': 5}) ax2 = ax1.twinx() ax2.xaxis.set_major_formatter(mdates.DateFormatter('%d-%b')) ax2.set_ylabel('Num Samples Tested') ax2.fill_between(ICMR_details['DateTime'],ICMR_details['TotalSamplesTested'],color = 'black',alpha = 0.5,label = 'Samples Tested'); plt.legend(loc="upper left") plt.title('Total Samples Tested') plt.show() # + id="A9-YlfcUGb5b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0a856cfd-6862-4ec3-eaf6-3e0c64568532" import json # get response from the web page response = requests.get('https://api.covid19india.org/state_test_data.json') # get contents from the response content = response.content # parse the json file parsed = json.loads(content) # keys parsed.keys() # + id="iNR5Zj_KHCxe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="3dab4516-06b2-41d3-8e02-7e458511df07" # save data in a dataframe tested = pd.DataFrame(parsed['states_tested_data']) # first few rows tested.tail() # + id="EamR6Y0CHZ1W" colab_type="code" colab={} # fix datatype tested['updatedon'] = pd.to_datetime(tested['updatedon']) # + id="K5O-o95UHl6t" colab_type="code" colab={} # save file as a scv file tested.to_csv('updated_tests_latest_state_level.csv', index=False) # + id="h77xo7byHt50" colab_type="code" colab={} state_test_cases = tested.groupby(['updatedon','state'])['totaltested','populationncp2019projection','testpositivityrate', 'testsperpositivecase', 'testsperthousand','totalpeoplecurrentlyinquarantine'].max().reset_index() # + id="T05mR9h0Hxd-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f46e19a3-e4c6-48f5-c1c2-852dae8628a9" state_test_cases.head(36) # + id="ET_oiUo1H6BM" colab_type="code" colab={} state_test_cases = tested.groupby('state')['totaltested','populationncp2019projection','testpositivityrate', 'testsperpositivecase', 'testsperthousand','totalpeoplecurrentlyinquarantine'].max() state_test_cases['testpositivityrate'] = state_test_cases['testpositivityrate'].str.replace('%', '') # + id="UbTr6siLH-wD" colab_type="code" colab={} state_test_cases = state_test_cases.apply(pd.to_numeric) # + id="VRmPFii6ICeD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="3fadb3b1-f155-4d1c-cdd6-4ee0845d95ec" state_test_cases.nunique() # + id="QQ4ZBOM7IKyk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9b8d3de8-805c-4867-ca1c-4d0b272346d8" state_test_cases.sort_values('totaltested', ascending= False).style.background_gradient(cmap='Blues',subset=["totaltested"])\ .background_gradient(cmap='Blues',subset=["populationncp2019projection"])\ .background_gradient(cmap='Blues',subset=["testpositivityrate"])\ .background_gradient(cmap='Blues',subset=["testsperpositivecase"])\ .background_gradient(cmap='Blues',subset=["testsperthousand"])\ .background_gradient(cmap='Blues',subset=["totalpeoplecurrentlyinquarantine"]) # + id="KQLyNwboJB79" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f3efb294-582d-4982-b939-e741e86f9a2e" hospital_beds.head(36) # + id="auN1nNhbJWmi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 679} outputId="36017d11-d047-4136-8f9e-095a6f4e0004" #Exploring Statewise Testing Insights state_test = pd.pivot_table(state_testing, values=['TotalSamples','Negative','Positive'], index='State', aggfunc='max') state_names = list(state_test.index) state_test['State'] = state_names plt.figure(figsize=(25,20)) sns.set_color_codes("pastel") sns.barplot(x="TotalSamples", y= state_names, data=state_test,label="Total Samples", color = '#7370db') sns.barplot(x='Negative', y=state_names, data=state_test,label='Negative', color= '#af8887') sns.barplot(x='Positive', y=state_names, data=state_test,label='Positive', color='#6ff79d') plt.title('Testing statewise insight',fontsize = 20) plt.legend(ncol=2, loc="lower right", frameon=True); # + id="I3Torag4JglI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 536} outputId="636aaacb-5411-481a-b84a-97b8b3082ffa" #Number of ICMR Testing Centres in each state values = list(ICMR_labs['state'].value_counts()) names = list(ICMR_labs['state'].value_counts().index) plt.figure(figsize=(15,10)) sns.set_color_codes("pastel") plt.title('ICMR Testing Centers in each State', fontsize = 20) sns.barplot(x= values, y= names,color = '#ff2345'); # + [markdown] id="09mt7Xy6KI5d" colab_type="text" # #Let's Start with the predictions # # # Prophet is open source software released by Facebook’s Core Data Science team. It is available for download on CRAN and PyPI. # # We use Prophet, a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well. # # Why Prophet? # Accurate and fast: Prophet is used in many applications across Facebook for producing reliable forecasts for planning and goal setting. Facebook finds it to perform better than any other approach in the majority of cases. It fit models in Stan so that you get forecasts in just a few seconds. # # Fully automatic: Get a reasonable forecast on messy data with no manual effort. Prophet is robust to outliers, missing data, and dramatic changes in your time series. # # Tunable forecasts: The Prophet procedure includes many possibilities for users to tweak and adjust forecasts. You can use human-interpretable parameters to improve your forecast by adding your domain knowledge # # Available in R or Python: Facebook has implemented the Prophet procedure in R and Python. Both of them share the same underlying Stan code for fitting. You can use whatever language you’re comfortable with to get forecasts. # + id="1bVgKT15J7Kg" colab_type="code" colab={} train = pd.read_csv('/content/train.csv') test = pd.read_csv('/content/test.csv') train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) # + id="ML7X_boSKivS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="1a309e91-4fcb-447e-aeea-39b6370646cd" # !pip install Prophet # + id="Rpsyk5z1L6oR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bae63798-3deb-45f3-fb0a-2b150b390a86" # get response from the web page response = requests.get('https://api.covid19india.org/state_test_data.json') # get contents from the response content = response.content # parse the json file parsed = json.loads(content) # keys parsed.keys() # + id="i2vvvrVpL9eU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="55107209-a5d9-4b17-8482-1a6afcee2eee" # get response from the web page response = requests.get('https://api.covid19india.org/state_test_data.json') # get contents from the response content = response.content # parse the json file parsed = json.loads(content) # keys parsed.keys() # + id="Uxsi2R7bMHTj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 779} outputId="6a646f56-a791-4e64-c795-f6a0a5aa5577" # save data in a dataframe th = pd.DataFrame(parsed['states_tested_data']) # first few rows th # + id="Bsjvuo_TMMDV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="e077f90e-3acf-423f-d30b-073874c687d8" th.columns # + id="9FvAdEFFMRC4" colab_type="code" colab={} # save to csv` th.to_csv('tests_latest_state_level.csv', index=False) # + id="uLNfUOZIMVjD" colab_type="code" colab={} # to get web contents import requests # to parse json contents import json # to parse csv files import csv
Copy_of_AI&ML_Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_pytorch_latest_p36 # language: python # name: conda_pytorch_latest_p36 # --- # # Develop training and inference scripts for Script Mode # # ## Overview # In this notebook, we will learn how to develop training and inference scripts using HuggingFace framework. We will leverage SageMaker pre-build containers for HuggingFace (with PyTorch backend). # # We chose to solve a typical NLP task - text classification. We will use `20 Newsgroups` dataset which assembles ~ 20,000 newsgroup documents across 20 different newsgroups (categories). # # By the end of this notebook you will learn how to: # - prepare text corpus for training and inference using Amazon SageMaker; # - develop training script to run in pre-build HugginFace container; # - configure and schedule training job; # - develop inference code; # - configure and deploy real-time inference endpoint; # - test SageMaker endpoint. # # Please note, that this notebook was tested on SageMaker Notebook instance with latest PyTorch dependencies installed (conda environment `conda_pytorch_latest_p36`). If you are using different environment, please make sure to install following Python dependencies via PIP or Conda installers: # - `scikit-learn`. # - `sagemaker`. # <br><br> # # # ### Selecting Model Architecture # Our study task is to train model which can categorize newsgroup article based on its content into one of categories. # # There are number of model architecture which can address this task. Existing State-of-the-art (SOTA) models are usually based on Transformer architecture. Autoregressive models like BERT and its various derivatives are suitable for this task. We will use concept known as `Transfer learning` where pre-trained model on one task is used for a new task with minimal modifications. # # As a baseline model we will use model architecture known as `DistilBERT` which provides high accuracy on wide variety of tasks and is considerably smaller than other models (for instance, original BERT model). To adapt model for classification task, we would need to add a classification layer which will be trained during our training to recognize articles. # # ![title](static/finetuning.png) # # `HuggingFace Transformers` simplifies model selection and modification for fine-tuning: # - provides rich model zoo with number pre-trained models and tokenizers. # - has simple model API to modify baseline model for finetuning for specific task. # - implements inference pipelines, combining data preprocessing and actual inference together. # # ### Selecting SageMaker Training Containers # # Amazon SageMaker supports HuggingFace Transformer framework for inference and trainining. Hence, we won't need to develop any custom container. Instead we will use `Script Mode` feature to provide our custom training and inference scripts and execute them in pre-build containers. In this example we will develop intution how to develop these scripts. # # ## Preparing Dataset # First of, we need to acquire `20 Newsgroups` dataset. For this, we can use `sklearn` module utility. To shorten training cycle, let's choose 6 newsgroup categories (original dataset contains 20). The datasets will be loaded into memory. # + from sklearn.datasets import fetch_20newsgroups # We select 6 out of 20 diverse newsgroups categories = [ "comp.windows.x", "rec.autos", "sci.electronics", "misc.forsale", "talk.politics.misc", "alt.atheism" ] train_dataset = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42 ) test_dataset = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42 ) n=6 # arbitrary sample index print(f"Number of training samples: {len(train_dataset['data'])}") print(f"Number of test samples: {len(test_dataset['data'])}") print(f"\n=========== Sample article for category {train_dataset['target'][n]} ============== \n") print(f"{train_dataset['data'][n]}") # - # Now, we need to save selected datasets into files and upload resulting files to Amazon S3 storage. # SageMaker will download them to training container at training time. # + import csv for file in ['train_dataset.csv', 'test_dataset.csv']: with open(file, 'w') as f: w = csv.DictWriter(f, ['data', 'category_id']) w.writeheader() for i in range(len(train_dataset["data"])): w.writerow({"data":train_dataset["data"][i], "category_id":train_dataset["target"][i]}) # - # `sagemaker.Session()` object provides a set of utilizities to manage interaction with Sagemaker and AWS services in general. Let's use it to upload our data files in dedicated S3 bucket. # + import sagemaker session = sagemaker.Session() train_dataset_uri=session.upload_data("train_dataset.csv", key_prefix="newsgroups") test_dataset_uri=session.upload_data("test_dataset.csv", key_prefix="newsgroups") print(f"Datasets are available in following locations: {train_dataset_uri} and {test_dataset_uri}") # - # ## Developing training script # # When running SageMaker training job we need to provide training script. Additionally, we may provide any other dependencies. We can also install or modify Python packages installed in pre-built containers via `requirements.txt` file. # # In this sample, we will use fairly new feature of HuggingFace framework to fine-tune multicategorical classifiers using Trainer API. Let's make sure that training container has newer HuggingFace Transformer library installed. For this, we create `requirements.txt` and specify minimal compatible version. We will provide this file to our SageMaker training job later. # !pygmentize 1_sources/requirements.txt # Next, we need to actually develop training script. See it's content prepared script `1_sources/trian.py` below. Here are several highlights of this script: # * At training time, SageMaker starts training by calling `user_training_script --arg1 value1 --arg2 value2 ...`. Here, arg1..N are hyperparameters provided by users as part of training job configuration. To correctly kick off training process in our script we need to include `a main guard` into our script (see line #100) # - To correctly capture hyperparameters, training script need to be able to parse command line arguments. We use Python argpars library to do it (see code snippet #104-#112) # * `train()` method is resposible for running end-to-end training job. It includes following components: # - calling `_get_tokenized_dataset` to load and tokenize dataset using pretrained DistilBERT tokenizer from HuggingFace library; # - loading and configuring DistilBERT model from HuggingFace model Zoo. Please note that we update default config for classification task to adjust for our chosen number of categories (line #80); # - configure HuggingFace Trainer and start training process (lines #86-#93); # - once training is done, we save trained model (line #97) # # # # SageMaker Training toolkit setups up several environmental variables which comes handy when writing your training script: # - `"SM_CHANNEL_TRAIN"` and `"SM_CHANNEL_TEST"` are locations where data files are download before training begins; # - `"SM_OUTPUT_DIR"` is a directory for any output artifacts, SageMaker will upload this directory to S3 whether training job succeeds or failes; # - `"SM_MODEL_DIR"`is a directory to store resulting model artifacts, SageMaker will also upload the model to S3. # ! pygmentize -O linenos=1 1_sources/train.py # ## Running training job # # Once we have training script and dependencies ready, we can proceed and schedule training job via SageMaker Python SDK. # # We start with import of HuggingFace Estimator object and getting IAM execution role for our training job. # + from sagemaker.huggingface.estimator import HuggingFace from sagemaker import get_execution_role role=get_execution_role() # - # Next, we need to define our hyperparameters of our model and training process. These variables will be passed to our script at training time. hyperparameters = { "epochs":1, # 2 params below may need to updated if non-GPU instances is used for training "per-device-train-batch-size":16, "per-device-eval-batch-size":64, "warmup-steps":100, "logging-steps":100, "weight-decay":0.01 } # + estimator = HuggingFace( py_version="py36", entry_point="train.py", source_dir="1_sources", pytorch_version="1.7.1", transformers_version="4.6.1", hyperparameters=hyperparameters, instance_type="ml.p2.xlarge", instance_count=1, role=role ) estimator.fit({ "train":train_dataset_uri, "test":test_dataset_uri }) # - # ## Developing Inference Code # # Now that we have trained model, let's deploy it as SageMaker real-time endpoint. Similar to training job, we will use SageMaker pre-build HuggingFace container and will only provide our inference script. The inference requests will be handled by [Multi-Model Server](https://github.com/awslabs/multi-model-server) which exposes HTTP endpoint. # # When using pre-build inference containers, SageMaker automatically recognizes our inference script. According to SageMaker convention, inference script has to contain following methods: # - `model_fn(model_dir)` (lines #16-#45) is executed at container start time to load model in the memory. This method takes model directory as an input argument. You can use `model_fn()` to initiatilize other components of your inference pipeline, such as tokenizer in our case. Note, that HuggingFace Transformers has a convenient Pipeline API which allows to combine data pre-processing (in our case, text tokenization) and actual inference in a single object. Hence, instead of loaded model, we return inference pipeline (line #45). # - `transform_fn(inference_pipeline, data, content_type, accept_type)` is responsible for running actual inference (line #). Since we are communicating with end-client via HTTP, we also need to do payload deserialization and response serialization. In our sample example we expect JSON payload and return back JSON payload, however, this can be extended to any other formats based on the requirements (e.g. CSV, Protobuf). # # # Sometimes combining deserialization, inference, and serialization in a single method can be inconvenient. Alternatively, SageMaker supports more granular API: # - `input_fn(request_body, request_content_type)` runs deserialization. # - `predict_fn(deser_input, model)` performs predictions. # - `output_fn(prediction, response_content_type)` run serialization of predictions. # # Note, that `transform_fn()` and `input_fn(); predict_fn(); output_fn()` are mutually exclusive. # # ! pygmentize -O linenos=1 1_sources/inference.py # ## Deploying Inference Endpoint # # Now we are ready to deploy and test our Newsgroup Classification endpoint. We can use method `estimator.create_model()` to configure our model deployment parameters, specifically: # - define inference script and other dependencies which will be uploaded by SageMaker to endpoint; # - identify inference container. If you provide `transformers_version`, `pytorch_version` and `py_version` parameters, SageMaker will automatically find appropriate pre-built inference container (if it exists). Alternatively, you can provide `image_uri` to directly specify container image you wish to use. # # + from sagemaker.huggingface.estimator import HuggingFaceModel model = estimator.create_model(role=role, entry_point="inference.py", source_dir="1_sources", py_version="py36", transformers_version="4.6.1", pytorch_version="1.7.1" ) # - # Next, we define parameters of our endpoint such as number and type of instances behind it. Remember, SageMaker supports horizontal scaling of your inference endpoints! `model.deploy()` method starts inference deployment (which usually takes several minutes) and returns `Predictor` object to run inference requests. predictor = model.deploy( initial_instance_count=1, instance_type="ml.m5.xlarge" ) # Now that endpoint is deployed, let's test it out! Note that we don't expect stellar performance, since model is likely undertrained because we only trained for single epoch to shorten training cycle. However, we expect that model will get most predictions right. # # + import random for i in range(10): sample_id = random.randint(0, len(test_dataset['data'])) prediction = predictor.predict([test_dataset['data'][sample_id]]) print(f"Sample index: {sample_id}; predicted newsgroup: {prediction[0]['label']}; actual newsgroup: {test_dataset['target'][sample_id]}") # - # ## Summary # # In this notebook, you learned how to train and deploy custom HuggingFace model using **SageMaker Script mode**. Script mode provide a lot of flexibility for developers when it comes to development of training and inference scripts (as long as it's inline SageMaker conventions which we discussed in this notebook). You can also modify container runtime via `requirements.txt` if you need to install additional Python packages or upload custom code dependencies. # # However, there are scenarios when you need to have more control over your runtime environments. SageMaker allows you to extend pre-built containers or BYO containers. In next notebooks of this chapter we will learn when you need to consider modifying your containers and how to do it.
chapter2/1_Using_SageMaker_Script_Mode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/someshsingh22/TeamEleven/blob/main/ClassificationReport.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="neVzLujPgDwK" import json import numpy as np import pandas as pd import sklearn from sklearn.preprocessing import LabelEncoder,OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import cross_val_score from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import LassoCV from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import accuracy_score import datetime import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import classification_report # + id="CKjG5ARZgOV-" outputId="bbe2cf8c-242e-46f6-c795-04f1ad6fd5bc" colab={"base_uri": "https://localhost:8080/"} # !pip install --upgrade tables # + id="9dqIVK46iVFr" np.random.seed(60) # + [markdown] id="wglRbEPkpUW6" # #Classification reports using the Decision Tree Regressor # + id="JPGK_6z8qX3q" # Access data store data_store = pd.HDFStore('flipped_data.h5') # Retrieve data using key flipped_df = data_store['flip_df'] data_store.close() # + id="Ewz-ngcyqX3z" outputId="2bfe20a2-4159-4c8d-f101-408907de2aba" colab={"base_uri": "https://localhost:8080/", "height": 309} flipped_df.head() # + id="jR2PmU5YqX39" outputId="08847684-9cfd-4421-e685-a1b868439674" colab={"base_uri": "https://localhost:8080/"} len(flipped_df) # + id="s7_xU9PNqQh5" enc = OneHotEncoder(handle_unknown='ignore') # + id="N8l-WAEiqQid" enc_df = pd.DataFrame(enc.fit_transform(flipped_df[['Winner_team','TOSS']]).toarray()) # + id="VhVXiwSrqQim" outputId="81a1cd85-8708-4cb9-925d-738fd712570d" colab={"base_uri": "https://localhost:8080/", "height": 309} flipped_df = flipped_df.join(enc_df) flipped_df.head() # + id="WCDT7XwBqQiw" flipped_df.rename(columns = {0: 'Win1',1: 'Win2',2: 'Toss1',3: 'Toss2'}, inplace = True) # + id="zzoYNLHqqQi4" labelencoder = LabelEncoder() flipped_df['Team_1Enc'] = labelencoder.fit_transform(flipped_df['Team_1']) flipped_df['Team_2Enc'] = labelencoder.fit_transform(flipped_df['Team_2']) # + id="lO-_ub5WqQi_" outputId="9b281144-2ecc-4e48-9e20-3f62b80b987c" colab={"base_uri": "https://localhost:8080/", "height": 309} flipped_df.head() # + id="RnMzG0m-rQHa" team_group = flipped_df.groupby(["Team_1", "Team_2"]).size().reset_index(name="Matches") # + id="lBBQ-_Ecsv32" outputId="5984ad77-3b19-49eb-be8d-e8bfbeda5be3" colab={"base_uri": "https://localhost:8080/", "height": 202} team_group.head() # + id="yDfSo64MtA6I" teams_list = team_group.values.tolist() # + id="roIf3A1FuaDv" outputId="d1f2a810-00fc-46e0-be9d-1653015f312e" colab={"base_uri": "https://localhost:8080/"} print(teams_list) # + [markdown] id="HFUcBWVJwVuJ" # ##Classification Report on Team vs Team # + id="osD_ywYFwj7d" def print_model_scores(model, data, y, predictors, target): ''' A generic function to generate the performance report of the model in question on the data passed to it Args: model: ML Model to be checked data: data on which the model needs to pe trained y: data containing the target variables predictors: independent feature variable target: target variable ''' model.fit(data[predictors].copy(), y[target].copy()) predictions = model.predict(data[predictors].copy()) rms = sklearn.metrics.mean_squared_error(predictions,y[target])**0.5 #print('RMS : %s' % '{0:.2%}'.format(rms)) r2 = sklearn.metrics.r2_score(predictions,y[target]) #print('R2 : %s' % '{0:.2%}'.format(r2)) return np.asarray(predictions) # + id="hG12XUD_wj7r" def winner_prediction_scores(model, data, y, predictors,winner): ''' A generic function to predict the winner for the model in question Args: model: ML Model to be checked data: data on which the model needs to be trained y: data containing the target variables predictors: independent feature variable winner: winning team ''' pred1 = print_model_scores(model, X_train, y_train,predictor_var, ['Score_1']) pred2 = print_model_scores(model, X_train, y_train,predictor_var, ['Score_2']) pred = pred1 - pred2 for i in range(len(pred)): if ((pred[i])>0): pred[i] = int(1) else: pred[i] = int(2) pred = pred.ravel() pred = pred.astype(np.int32) winner = np.asarray(winner).astype(np.int32) try: print("based on Scores: ") print(classification_report(winner, pred)) except: print(pred) print(pred.dtype) print(np.asarray(winner)) print(np.asarray(winner).dtype) # + id="Nztr730bxXnT" def winner_prediction_nrr(model, data, y, predictors,winner): pred = print_model_scores(model, X_train, y_train,predictor_var, ['NRR_team1']) for i in range(len(pred)): if ((pred[i])>0): pred[i] = int(1) elif (pred[i]==0): pred[i] = int(0) else: pred[i] = int(2) pred = pred.astype(np.int32) winner = np.asarray(winner).astype(np.int32) print("based on NRR: ") print(classification_report(winner, pred)) # + [markdown] id="ZPuHj7wwxvxk" # ###Model1 - Toss + GroundCode # + id="_xFHYHsMq8UB" outputId="ae32eb05-24ae-4581-eb96-d444bac892ba" colab={"base_uri": "https://localhost:8080/"} for x,y,_ in teams_list: df = flipped_df.loc[(flipped_df['Team_1'].isin([x])) & (flipped_df['Team_2'].isin([y]))].copy() X_data = df[['Date','Team_1Enc','Team_2Enc','Venue','GroundCode','TOSS','Toss1','Toss2']].copy() y_data = df[['Winner_team','Win1','Win2','Score_1','Score_2','NRR_team1','NRR_team2']].copy() if (len(X_data)==1): continue X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 1/5) print("Classification report for Teams",x,"vs",y) winner = y_train['Winner_team'].copy() predictor_var = ['Team_1Enc','Team_2Enc', 'GroundCode','TOSS'] model = DecisionTreeRegressor() winner_prediction_scores(model, X_train, y_train,predictor_var, winner) winner_prediction_nrr(model, X_train, y_train,predictor_var, winner) print('****************************************************') # + [markdown] id="seamE2VgQJmj" # ###Model2 - Toss + GroundCode + Venue # + id="KAnGRUyyQI7h" outputId="7a47dd96-877e-4361-dd87-d4bfd8475a83" colab={"base_uri": "https://localhost:8080/"} for x,y,_ in teams_list: df = flipped_df.loc[(flipped_df['Team_1'].isin([x])) & (flipped_df['Team_2'].isin([y]))].copy() X_data = df[['Date','Team_1Enc','Team_2Enc','Venue','GroundCode','TOSS','Toss1','Toss2']].copy() y_data = df[['Winner_team','Win1','Win2','Score_1','Score_2','NRR_team1','NRR_team2']].copy() if (len(X_data)==1): continue X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 1/5) print("Classification report for Teams",x,"vs",y) winner = y_train['Winner_team'].copy() predictor_var = ['Team_1Enc','Team_2Enc', 'GroundCode','TOSS','Venue'] model = DecisionTreeRegressor() winner_prediction_scores(model, X_train, y_train,predictor_var, winner) winner_prediction_nrr(model, X_train, y_train,predictor_var, winner) print('****************************************************') # + [markdown] id="v-UVClp5R04j" # ##Classification Report on Team vs All # + id="EZliP0oCWmNo" team_list = flipped_df.groupby(["Team_1"]).size().reset_index(name="Matches") # + id="CGK-37XuWne5" outputId="31a21749-d3ec-4309-b1e4-1495253672dc" colab={"base_uri": "https://localhost:8080/", "height": 202} team_list.head() # + id="wlBZIH1GXJll" teams_list = team_list.values.tolist() # + id="jt_XXyo4XLVI" outputId="b408e838-4947-416d-86c9-7a235f524491" colab={"base_uri": "https://localhost:8080/"} print(teams_list) # + id="zifqkJBER04m" def print_model_scores(model, data, y, predictors, target): ''' A generic function to generate the performance report of the model in question on the data passed to it Args: model: ML Model to be checked data: data on which the model needs to pe trained y: data containing the target variables predictors: independent feature variable target: target variable ''' model.fit(data[predictors].copy(), y[target].copy()) predictions = model.predict(data[predictors].copy()) rms = sklearn.metrics.mean_squared_error(predictions,y[target])**0.5 #print('RMS : %s' % '{0:.2%}'.format(rms)) r2 = sklearn.metrics.r2_score(predictions,y[target]) #print('R2 : %s' % '{0:.2%}'.format(r2)) return np.asarray(predictions) # + id="r8e9fAj5R04w" def winner_prediction_scores(model, data, y, predictors,winner): ''' A generic function to predict the winner for the model in question Args: model: ML Model to be checked data: data on which the model needs to be trained y: data containing the target variables predictors: independent feature variable winner: winning team ''' pred1 = print_model_scores(model, X_train, y_train,predictor_var, ['Score_1']) pred2 = print_model_scores(model, X_train, y_train,predictor_var, ['Score_2']) pred = pred1 - pred2 for i in range(len(pred)): if ((pred[i])>0): pred[i] = int(1) else: pred[i] = int(2) pred = pred.ravel() pred = pred.astype(np.int32) winner = np.asarray(winner).astype(np.int32) try: print("based on Scores: ") print(classification_report(winner, pred)) except: print(pred) print(pred.dtype) print(np.asarray(winner)) print(np.asarray(winner).dtype) # + id="0KIGygz6R044" def winner_prediction_nrr(model, data, y, predictors,winner): pred = print_model_scores(model, X_train, y_train,predictor_var, ['NRR_team1']) for i in range(len(pred)): if ((pred[i])>0): pred[i] = int(1) elif (pred[i]==0): pred[i] = int(0) else: pred[i] = int(2) pred = pred.astype(np.int32) winner = np.asarray(winner).astype(np.int32) print("based on NRR: ") print(classification_report(winner, pred)) # + [markdown] id="1ueXkNezR05D" # ###Model1 - Toss + GroundCode # + id="j5ez1oL2R05F" outputId="717cc950-1ed0-440c-a440-69e7fbfdda49" colab={"base_uri": "https://localhost:8080/"} for x,_ in teams_list: df = flipped_df.loc[(flipped_df['Team_1'].isin([x]))].copy() X_data = df[['Date','Team_1Enc','Team_2Enc','Venue','GroundCode','TOSS','Toss1','Toss2']].copy() y_data = df[['Winner_team','Win1','Win2','Score_1','Score_2','NRR_team1','NRR_team2']].copy() if (len(X_data)==1): continue X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 1/5) print("Classification report for Teams",x,"vs",y) winner = y_train['Winner_team'].copy() predictor_var = ['Team_1Enc','Team_2Enc', 'GroundCode','TOSS'] model = DecisionTreeRegressor() winner_prediction_scores(model, X_train, y_train,predictor_var, winner) winner_prediction_nrr(model, X_train, y_train,predictor_var, winner) print('****************************************************') # + [markdown] id="8eC8opvPR05O" # ###Model2 - Toss + GroundCode + Venue # + id="XRTLIKV1R05Q" outputId="55a359ea-a292-45a2-b139-21a5ffafe20a" colab={"base_uri": "https://localhost:8080/"} for x,_ in teams_list: df = flipped_df.loc[(flipped_df['Team_1'].isin([x]))].copy() X_data = df[['Date','Team_1Enc','Team_2Enc','Venue','GroundCode','TOSS','Toss1','Toss2']].copy() y_data = df[['Winner_team','Win1','Win2','Score_1','Score_2','NRR_team1','NRR_team2']].copy() if (len(X_data)==1): continue X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 1/5) print("Classification report for Teams",x,"vs",y) winner = y_train['Winner_team'].copy() predictor_var = ['Team_1Enc','Team_2Enc', 'GroundCode','TOSS','Venue'] model = DecisionTreeRegressor() winner_prediction_scores(model, X_train, y_train,predictor_var, winner) winner_prediction_nrr(model, X_train, y_train,predictor_var, winner) print('****************************************************')
analysis/ClassificationReport.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="VFOm3ahQ-3Mj" colab_type="code" outputId="68749248-f8fa-4154-ae87-81ad46aed2ee" executionInfo={"status": "ok", "timestamp": 1588801380250, "user_tz": 240, "elapsed": 44103, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4QNksupzJ_tvoHXK0P_PWwExCE1HBSRW-Afh7=s64", "userId": "08926911206443480647"}} colab={"base_uri": "https://localhost:8080/", "height": 122} from google.colab import drive drive.mount("/content/drive") # + id="IhCDRHcs_L5P" colab_type="code" colab={} import os os.chdir("/content/drive/Shared drives/CARD/projects/iNDI/line_prioritization/projects_lirong") # + id="DiFUWR13F9hv" colab_type="code" outputId="d5faede9-1012-4c3f-83af-952fc8a7ac1b" executionInfo={"status": "ok", "timestamp": 1588801502879, "user_tz": 240, "elapsed": 1654, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4QNksupzJ_tvoHXK0P_PWwExCE1HBSRW-Afh7=s64", "userId": "08926911206443480647"}} colab={"base_uri": "https://localhost:8080/", "height": 68} # %ls Florian_data/data/filtered_cellranger_matrix/ # + id="0hgHUk7k8waj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="22ab9b10-46e8-4f2e-d553-ff1cc5021fc7" executionInfo={"status": "ok", "timestamp": 1588807856030, "user_tz": 240, "elapsed": 279123, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4QNksupzJ_tvoHXK0P_PWwExCE1HBSRW-Afh7=s64", "userId": "08926911206443480647"}} language="bash" # cd Florian_data/ # start=$(date +%s) # #ls # python3 scanpy_QC_basic_florian_cortical_dopamin_3_and_4.py # #cat scanpy_QC_basic_florian_cortical_dopamin_3_and_4.py # end=$(date +%s) # difference=$(($end - $start)) # echo $difference
run_scanpy_py_florian.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import matplotlib.pyplot as plt import matplotlib.image as mpimg import matplotlib as mpl import numpy as np import scipy as sp from skimage import transform as skt import os import time import multiprocessing as mp import skimage.filters as skf from scipy import ndimage as scnd import hdf5storage as h5 from matplotlib_scalebar.scalebar import ScaleBar,SI_LENGTH_RECIPROCAL import math import ase.build as asb import ase import numba import sys from ncempy import io as emio import pyfftw.interfaces as pfi import pymcr import stemtool as st # %matplotlib inline plt.rcParams['image.cmap'] = 'magma' plt.rcParams.update({'font.size': 25}) plt.rcParams.update({'font.weight': 'bold'}) import warnings warnings.filterwarnings('ignore') pco = ase.io.read('/Users/7dm/Desktop/PdCoO2 APL/PdCoO2.cif') pco.__dict__ cell_dim = pco._cellobj[0:3] cell_pos = pco.positions atoms = pco.numbers def find_uc_pos(atom_pos,cell_dim): uc_pos = np.zeros_like(atom_pos) for ii in numba.prange(len(uc_pos)): for jj in range(len(cell_dim)): cc = atom_pos[ii,:]/cell_dim[jj,:] cc[cc < 0] += 1 cc[cc == np.inf] = 0 cc[cc > 0.001] uc_pos[ii,jj] = cc[jj] uc_nonzero = uc_pos != 0 uc_inv = 1/uc_pos[uc_nonzero] uc_inv[np.abs(uc_inv - np.round(uc_inv)) < 0.001] = np.round(uc_inv[np.abs(uc_inv - np.round(uc_inv)) < 0.001]) uc_pos[uc_nonzero] = 1/uc_inv uc_pos[uc_pos == 1] = 0 return uc_pos st.tic() uc_vals = find_uc_pos(cell_pos,cell_dim) st.toc() uc_vals def vec_angles(vec1,vec2): cross_vec = np.sum(np.multiply(vec1,vec2)) vec_ref = cross_vec/(np.linalg.norm(vec1)*np.linalg.norm(vec2)) vec_ang = np.arccos(vec_ref) vec_ang_d = vec_ang*(180/np.pi) if np.abs(vec_ang_d - np.round(vec_ang_d)) < 0.001: vec_ang_d = np.round(vec_ang_d) return vec_ang_d vec_vals = (1,-1,0) vec_dir = np.matmul(np.transpose(cell_dim),np.asarray(vec_vals)) slice_spacing = 0.5 slice_vec = vec_dir/(np.linalg.norm(vec_dir)/slice_spacing) slice_vec dir_angs = np.zeros(len(cell_pos)) for ii in range(len(dir_angs)): dir_angs[ii] = vec_angles(cell_pos[ii,:]+np.asarray((1,1,1)),slice_vec) dir_angs cell_lengths = (np.sum((cell_dim**2), axis=1))**0.5 cell_lengths miller_dir = np.asarray((1,2,0)) # + def miller_inverse(miller): miller_inv = np.empty_like(miller,dtype=np.float) miller_inv[miller==0] = 0 miller_inv[miller!=0] = 1/miller[miller!=0] return miller_inv def get_number_cells(miller_dir, length, cell_lengths): miller_vec_ang = miller_inverse(miller_dir)*cell_lengths return np.ceil(miller_inverse(miller_dir/(length/np.linalg.norm(miller_vec_ang)))) # - no_cells = get_number_cells(miller_dir, 200, cell_lengths) no_cells miller_dir, cell_lengths yy, xx = np.meshgrid(np.arange(0, int(no_cells[1]), 1), np.arange(0, int(no_cells[0]), 1)) yy = yy.ravel() xx = xx.ravel() vm = 2+np.amax(no_cells) xp = np.arange(vm) yp = xp*(miller_dir[0]/miller_dir[1]) plt.figure(figsize=(15, 15)) plt.scatter(xx, yy) plt.plot(xp, yp, 'r') plt.xlim(-2, vm) plt.ylim(-2, vm) yy_ang, xx_ang = np.meshgrid(np.arange(0, int(cell_lengths[1]*no_cells[1]), 1), np.arange(0, int(cell_lengths[0]*no_cells[0]), 1)) yy_ang = np.ravel(yy_ang) xx_ang = np.ravel(xx_ang) np.cross(np.asarray((0,0,1)),miller_inverse(miller_dir)) miller_axis = miller_inverse(miller_dir)/np.linalg.norm(miller_inverse(miller_dir)) miller_test = (miller_axis==np.max(miller_axis)) coord_axes = np.arange(len(miller_test)) if (miller_dir[miller_test]<miller_axis[miller_test]): second_axis = (np.roll(miller_test,1)).astype(int) else: calc_axes = point_distances = np.abs((miller_vec_ang[1]*yy_ang) - (miller_vec_ang[0]*xx_ang))/(((miller_vec_ang[1]**2) + (miller_vec_ang[0]**2))**0.5) np.amax(point_distances) max_dist = 122 yy_new, xx_new = np.meshgrid(np.arange(0-np.ceil(max_dist), int(no_cells[1])+np.ceil(max_dist), 1), np.arange(0-np.ceil(max_dist), int(no_cells[0])+np.ceil(max_dist), 1)) yy_new = yy_new.ravel() xx_new = xx_new.ravel() vm_new = 2+np.amax((np.amax(yy_new), np.max(xx_new))) plt.figure(figsize=(15, 15)) plt.scatter(xx_new, yy_new, c='b') plt.scatter(xx, yy, c='y') plt.plot(xp, yp, 'r') plt.xlim(-vm_new, vm_new) plt.ylim(-vm_new, vm_new) dists = np.abs((miller_dir[1]*yy_new) - (miller_dir[0]*xx_new))/(((miller_dir[1]**2) + (miller_dir[0]**2))**0.5) xx_firstpass = xx_new[dists < max_dist] yy_firstpass = yy_new[dists < max_dist] dist_angles = np.abs(np.arctan2((yy_firstpass - 0), (xx_firstpass - 0)) - np.arctan2(1,2)) xx_secondpass = xx_firstpass[dist_angles < (np.pi/2)] yy_secondpass = yy_firstpass[dist_angles < (np.pi/2)] dist_angles2 = np.abs(np.arctan2((yy_secondpass - 81), (xx_secondpass - 40)) - np.arctan2(1,2)) xx_thirdpass = xx_secondpass[dist_angles2 > (np.pi/2)] yy_thirdpass = yy_secondpass[dist_angles2 > (np.pi/2)] plt.figure(figsize=(15, 15)) plt.scatter(xx_thirdpass, yy_thirdpass, c='b') plt.scatter(xx, yy, c='y') plt.plot(xp, yp, 'r') plt.xlim(-vm_new, vm_new) plt.ylim(-vm_new, vm_new) np.arctan2(2,1) def slabbing_2D(miller_dir, no_cells, max_hdist): yy, xx = np.meshgrid(np.arange(0, int(no_cells[1]), 1), np.arange(0, int(no_cells[0]), 1)) yy = yy.ravel() xx = xx.ravel() xp = np.arange(np.amax((np.amax(yy), np.max(xx)))) yp = xp*(miller_dir[0]/miller_dir[1]) yy_new, xx_new = np.meshgrid(np.arange(0-np.ceil(max_hdist), int(no_cells[1])+np.ceil(max_hdist), 1), np.arange(0-np.ceil(max_hdist), int(no_cells[0])+np.ceil(max_hdist), 1)) yy_new = yy_new.ravel() xx_new = xx_new.ravel() dists = np.abs((miller_dir[1]*yy_new) - (miller_dir[0]*xx_new))/(((miller_dir[1]**2) + (miller_dir[0]**2))**0.5) xx_firstpass = xx_new[dists < max_hdist] yy_firstpass = yy_new[dists < max_hdist] dist_angles = np.abs(np.arctan2((yy_firstpass - 0), (xx_firstpass - 0)) - np.arctan2(miller_dir[0],miller_dir[1])) xx_secondpass = xx_firstpass[dist_angles < (np.pi/2)] yy_secondpass = yy_firstpass[dist_angles < (np.pi/2)] dist_angles2 = np.abs(np.arctan2((yy_secondpass - 81), (xx_secondpass - 40)) - np.arctan2(miller_dir[0],miller_dir[1])) xx_thirdpass = xx_secondpass[dist_angles2 > (np.pi/2)] yy_thirdpass = yy_secondpass[dist_angles2 > (np.pi/2)] vals = np.asarray((yy_thirdpass, xx_thirdpass)) return vals.transpose()
docs/examples/ImageSimulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp nodes # - # %load_ext autoreload # %autoreload 2 # + #hide #import sys #sys.path.append("..") # - # # Nodes # # > A node is a single control unit representing a feedback control loop. # ## Overview # A node comprises four functions, reference, perceptual, comparator and output. Executing the node will run each of the functions in the order indicated above and return the output value. # # The functions can actually be a collection of functions, each executed in the order they are added. This allows a chain of functions in case pre-processing is required, or post-processing in the case of the output. # + #export import enum import uuid from pct.putils import UniqueNamer from pct.putils import FunctionsList from pct.putils import dynamic_module_import from pct.functions import FunctionFactory from pct.functions import ControlUnitFunctions import json from pct.functions import * # - #export class ControlUnitIndices(enum.IntEnum): PER_INDEX = 0 OUT_INDEX = 1 REF_INDEX = 2 ACT_INDEX = 3 #export class PCTNode(): "A single PCT controller." def __init__(self, reference=None, perception=None, comparator=None, output=None, default=True, name="pctnode", history=False, build_links=False, mode=0, namespace=None, **pargs): # mode # 0 - per:var, ref:con, com:sub, out:prop # 1 - per:ws, ref:ws, com:sub, out:prop # 2 - per:ws, ref:con, com:sub, out:prop # 3 - per:ws, ref:ws, com:sub, out:ws # 4 - per:ws, ref:con, com:sub, out:ws # 5 - per:ws, ref:con, com:sub, out:smws # 6 - per:ws, ref:ws, com:sub, out:smws if namespace ==None: namespace = uuid.uuid1() self.namespace=namespace self.links_built = False self.history = None if history: self.history = PCTNodeData() self.name = UniqueNamer.getInstance().get_name(self.namespace, name) FunctionsList.getInstance().add_function(self.namespace, self) if default: if perception==None: perception = FunctionFactory.createFunctionWithNamespace(PCTNode.get_function_type(mode, ControlUnitFunctions.PERCEPTION), namespace=namespace) self.perceptionCollection = [perception] if reference==None: reference = FunctionFactory.createFunctionWithNamespace(PCTNode.get_function_type(mode, ControlUnitFunctions.REFERENCE), namespace=namespace) self.referenceCollection = [reference] if comparator==None: comparator = FunctionFactory.createFunctionWithNamespace(PCTNode.get_function_type(mode, ControlUnitFunctions.COMPARATOR), namespace=namespace) self.comparatorCollection = [comparator] if output==None: output = FunctionFactory.createFunctionWithNamespace(PCTNode.get_function_type(mode, ControlUnitFunctions.OUTPUT), namespace=namespace) self.outputCollection = [output] if build_links: self.build_links() def __call__(self, verbose=False): if not self.links_built: self.build_links() for referenceFunction in self.referenceCollection: referenceFunction(verbose) for perceptionFunction in self.perceptionCollection: perceptionFunction(verbose) for comparatorFunction in self.comparatorCollection: comparatorFunction(verbose) for outputFunction in self.outputCollection: outputFunction(verbose) self.output = self.outputCollection[-1].get_value() if verbose: print() if not self.history == None: self.history.add_data(self) return self.output def set_name(self, name): self.name=name def get_name(self): return self.name def get_function_from_collection(self, collection, position=-1): if collection == ControlUnitFunctions.REFERENCE: func = self.referenceCollection[position] return func if collection == ControlUnitFunctions.PERCEPTION: func = self.perceptionCollection[position] return func if collection == ControlUnitFunctions.COMPARATOR: func = self.comparatorCollection[position] return func if collection == ControlUnitFunctions.OUTPUT: func = self.outputCollection[position] return func return func def reset_checklinks(self, val=True): for func in self.referenceCollection: func.reset_checklinks(val) for func in self.perceptionCollection: func.reset_checklinks(val) for func in self.comparatorCollection: func.reset_checklinks(val) for func in self.outputCollection: func.reset_checklinks(val) def get_function(self, collection, position=-1): if collection == "reference": func = self.referenceCollection[position] if collection == "perception": func = self.perceptionCollection[position] if collection == "comparator": func = self.comparatorCollection[position] if collection == "output": func = self.outputCollection[position] return func def get_perception_value(self, position=-1): return self.perceptionCollection[position].get_value() def set_perception_value(self, value, position=-1): self.perceptionCollection[position].set_value(value) def add_link(self, collection, link): if collection == "reference": self.referenceCollection[0].add_link(link) if collection == "perception": self.perceptionCollection[0].add_link(link) if collection == "comparator": self.comparatorCollection[-1].add_link(link) if collection == "output": self.outputCollection[-1].add_link(link) def build_links(self): if len(self.referenceCollection)>0: link = self.referenceCollection[0] for i in range (1, len(self.referenceCollection)): self.referenceCollection[i].add_link(link) link = self.referenceCollection[i] if len(self.perceptionCollection)>0: link = self.perceptionCollection[0] for i in range (1, len(self.perceptionCollection)): self.perceptionCollection[i].add_link(link) link = self.perceptionCollection[i] self.comparatorCollection[0].add_link(self.referenceCollection[-1]) self.comparatorCollection[0].add_link(self.perceptionCollection[-1]) if len(self.comparatorCollection)>1: link = self.comparatorCollection[1] for i in range (1, len(self.comparatorCollection)): self.comparatorCollection[i].add_link(link) link = self.comparatorCollection[i] self.outputCollection[0].add_link(self.comparatorCollection[-1]) if len(self.outputCollection)>0: link = self.outputCollection[0] for i in range (1, len(self.outputCollection)): self.outputCollection[i].add_link(link) link = self.outputCollection[i] self.links_built = True def run(self, steps=None, verbose=False): for i in range(steps): out = self(verbose) return out def set_output(self, value): self.outputCollection[-1].set_value(value) def get_output_function(self): return self.outputCollection[-1] def set_function_name(self, collection, name, position=-1): if collection == "reference": self.referenceCollection[position].set_name(name) if collection == "perception": self.perceptionCollection[position].set_name(name) if collection == "comparator": self.comparatorCollection[position].set_name(name) if collection == "output": self.outputCollection[position].set_name(name) def replace_function(self, collection, function, position=-1): if collection == "reference": self.referenceCollection[position] = function if collection == "perception": self.perceptionCollection[position] = function if collection == "comparator": self.comparatorCollection[position] = function if collection == "output": self.outputCollection[position] = function def insert_function(self, collection, function, position=-1): if collection == "reference": self.referenceCollection[position] = function if collection == "perception": self.perceptionCollection[position] = function if collection == "comparator": self.comparatorCollection[position] = function if collection == "output": self.outputCollection[position] = function def summary(self, build=True): if build: if not self.links_built: self.build_links() print(self.name, type(self).__name__) print("----------------------------") print("REF:", end=" ") for referenceFunction in self.referenceCollection: referenceFunction.summary() print("PER:", end=" ") for perceptionFunction in self.perceptionCollection: perceptionFunction.summary() print("COM:", end=" ") for comparatorFunction in self.comparatorCollection: comparatorFunction.summary() print("OUT:", end=" ") for outputFunction in self.outputCollection: outputFunction.summary() print("----------------------------") def graph(self, layer=0, layout={'r':2,'c':1,'p':2, 'o':0}): graph = nx.DiGraph() self.set_graph_data(graph, layer=layer, layout=layout) return graph def validate(self, num_lower_perceptions=None, num_higher_outputs=None): if num_higher_outputs is not None: for func in self.referenceCollection: func.validate(num_higher_outputs) for func in self.perceptionCollection: func.validate(num_lower_perceptions) def clear_values(self): for referenceFunction in self.referenceCollection: referenceFunction.value = 0 for comparatorFunction in self.comparatorCollection: comparatorFunction.value = 0 for perceptionFunction in self.perceptionCollection: perceptionFunction.value = 0 for outputFunction in self.outputCollection: outputFunction.value = 0 def change_namespace(self, namespace): for referenceFunction in self.referenceCollection: referenceFunction.change_namespace(namespace) for comparatorFunction in self.comparatorCollection: comparatorFunction.change_namespace(namespace) for perceptionFunction in self.perceptionCollection: perceptionFunction.change_namespace(namespace) for outputFunction in self.outputCollection: outputFunction.change_namespace(namespace) def set_graph_data_node(self, graph, layer=0): graph.add_node(self.name, layer=layer) for referenceFunction in self.referenceCollection: referenceFunction.set_graph_data(graph, layer+2) for perceptionFunction in self.perceptionCollection: perceptionFunction.set_graph_data(graph, layer+2) def set_graph_data(self, graph, layer=0, layout={'r':2,'c':1,'p':2, 'o':0}): for referenceFunction in self.referenceCollection: referenceFunction.set_graph_data(graph, layer+layout['r']) for comparatorFunction in self.comparatorCollection: comparatorFunction.set_graph_data(graph, layer+layout['c']) for perceptionFunction in self.perceptionCollection: perceptionFunction.set_graph_data(graph, layer+layout['p']) for outputFunction in self.outputCollection: outputFunction.set_graph_data(graph, layer+layout['o']) def get_edge_labels(self, labels): for func in self.referenceCollection: func.get_weights_labels(labels) for func in self.comparatorCollection: func.get_weights_labels(labels) for func in self.perceptionCollection: func.get_weights_labels(labels) for func in self.outputCollection: func.get_weights_labels(labels) def get_node_list(self, node_list): for func in self.referenceCollection: node_list[func.get_name()] = self.name for func in self.comparatorCollection: node_list[func.get_name()] = self.name for func in self.perceptionCollection: node_list[func.get_name()] = self.name for func in self.outputCollection: node_list[func.get_name()] = self.name def get_parameters_list(self): ref_list = [] for func in self.referenceCollection: ref_list.append(func.get_parameters_list()) per_list = [] for func in self.perceptionCollection: per_list.append(func.get_parameters_list()) out_list = [] for func in self.outputCollection: out_list.append(func.get_parameters_list()) node_list = [ref_list, per_list, out_list] return node_list def change_link_name(self, old_name, new_name): for func in self.referenceCollection: func.links = [new_name if i==old_name else i for i in func.links ] for func in self.comparatorCollection: func.links = [new_name if i==old_name else i for i in func.links ] for func in self.perceptionCollection: func.links = [new_name if i==old_name else i for i in func.links ] for func in self.outputCollection: func.links = [new_name if i==old_name else i for i in func.links ] def set_graph_data_node(self, graph, layer=0): graph.add_node(self.name, layer=layer) def graph_node(self, layer=0): graph = nx.DiGraph() self.set_graph_data_node(graph, layer=layer) return graph def draw_node(self, with_labels=True, font_size=12, font_weight='bold', node_color='red', node_size=500, arrowsize=25, align='horizontal', file=None, figsize=(5,5), move={}): graph = self.graph_node() pos = nx.multipartite_layout(graph, subset_key="layer", align=align) plt.figure(figsize=figsize) nx.draw(graph, pos=pos, with_labels=with_labels, font_size=font_size, font_weight=font_weight, node_color=node_color, node_size=node_size, arrowsize=arrowsize) def draw(self, with_labels=True, font_size=12, font_weight='bold', node_color='red', node_size=500, arrowsize=25, align='horizontal', file=None, figsize=(5,5), move={}): graph = self.graph() pos = nx.multipartite_layout(graph, subset_key="layer", align=align) plt.figure(figsize=figsize) nx.draw(graph, pos=pos, with_labels=with_labels, font_size=font_size, font_weight=font_weight, node_color=node_color, node_size=node_size, arrowsize=arrowsize) def get_config(self): config = {"type": type(self).__name__, "name": self.name} coll_name = 'refcoll' collection = self.referenceCollection config[coll_name] = self.get_collection_config(coll_name, collection) coll_name = 'percoll' collection = self.perceptionCollection config[coll_name] = self.get_collection_config(coll_name, collection) coll_name = 'comcoll' collection = self.comparatorCollection config[coll_name] = self.get_collection_config(coll_name, collection) coll_name = 'outcoll' collection = self.outputCollection config[coll_name] = self.get_collection_config(coll_name, collection) return config def get_collection_config(self, coll_name, collection): coll = {} ctr=0 for func in collection: coll[str(ctr)] = func.get_config() ctr+=1 return coll def save(self, file=None, indent=4): jsondict = json.dumps(self.get_config(), indent=indent) f = open(file, "w") f.write(jsondict) f.close() @classmethod def load(cls, file): with open(file) as f: config = json.load(f) return cls.from_config(config) @classmethod def from_config(cls, config, namespace=None): node = PCTNode(default=False, name=config['name'], namespace=namespace) namespace= node.namespace node.referenceCollection = [] collection = node.referenceCollection coll_dict = config['refcoll'] PCTNode.collection_from_config(collection, coll_dict, namespace) node.perceptionCollection = [] collection = node.perceptionCollection coll_dict = config['percoll'] PCTNode.collection_from_config(collection, coll_dict, namespace) node.comparatorCollection = [] collection = node.comparatorCollection coll_dict = config['comcoll'] PCTNode.collection_from_config(collection, coll_dict, namespace) node.outputCollection = [] collection = node.outputCollection coll_dict = config['outcoll'] PCTNode.collection_from_config(collection, coll_dict, namespace) node.links_built = True return node @classmethod def collection_from_config(node, collection, coll_dict, namespace): #print("collection_from_config", coll_dict) for fndict_label in coll_dict: #print("fndict_label",fndict_label) fndict = coll_dict[fndict_label] #print(fndict) fnname = fndict.pop('type') #print(fndict) #func = eval(fnname).from_config(fndict, namespace) func = FunctionFactory.createFunctionFromConfig(fnname, namespace, fndict) collection.append(func) @classmethod def get_function_type(cls, mode, function): type = 'WeightedSum' if function == ControlUnitFunctions.PERCEPTION: if mode == 0 : type = 'Variable' if function == ControlUnitFunctions.REFERENCE: if mode == 0 or mode == 2 or mode == 4 or mode == 5 : type = 'Constant' if function == ControlUnitFunctions.COMPARATOR: type = 'Subtract' if function == ControlUnitFunctions.OUTPUT: if mode < 3: type = 'Proportional' elif mode == 5 or mode == 6 : type = 'SmoothWeightedSum' return type # + #node = PCTNode() #node.summary() # - #export class PCTNodeData(): "Data collected for a PCTNode" def __init__(self, name="pctnodedata"): self.data = { "refcoll":{}, "percoll":{}, "comcoll":{}, "outcoll":{}} def add_data(self, node): ctr = 0 self.add_collection( node.referenceCollection, "refcoll") self.add_collection( node.perceptionCollection, "percoll") self.add_collection( node.comparatorCollection, "comcoll") self.add_collection( node.outputCollection, "outcoll") def add_collection(self, collection, collname): for func in collection: if self.data[collname].get(func.get_name()) == None: dlist=[] cdict={func.get_name():dlist} self.data[collname]=cdict else: dlist = self.data[collname][func.get_name()] dlist.append(func.get_value()) # ## Creating a Node # # A node can be created simply. node = PCTNode() node.summary() # That creates a node with default functions. Those are, a constant of 1 for the reference, a variable, with initial value 0, for the perception and a proportional function for the output, with a gain of 10. # A node can also be created by providing a name, and setting the history to True. The latter means that the values of all the functions are recorded during execution, which is useful for plotting the data later, as can be seen below. dynamic_module_import( 'pct.functions', 'Constant') reference = Constant(1) namespace=reference.namespace node = PCTNode(name="mypctnode", history=True, reference = reference, output=Proportional(10, namespace=namespace), namespace=namespace) node.summary() # Another way of creating a node is by first declaring the functions you want and passing them into the constructor. UniqueNamer.getInstance().clear() r = Variable(0, name="velocity_reference") p = Constant(10, name="constant_perception") o = Integration(10, 100, name="integrator") integratingnode = PCTNode(reference=r, perception=p, output=o, name="integratingnode", history=True) # Yet another way to create a node is from a text configuration. config_node = PCTNode.from_config({ 'name': 'mypctnode', 'refcoll': {'0': {'type': 'Proportional', 'name': 'proportional', 'value': 0, 'links': {}, 'gain': 10}}, 'percoll': {'0': {'type': 'Variable', 'name': 'velocity', 'value': 0.2, 'links': {}}}, 'comcoll': {'0': {'type': 'Subtract', 'name': 'subtract', 'value': 1, 'links': {0: 'constant', 1: 'velocity'}}}, 'outcoll': {'0': {'type': 'Proportional', 'name': 'proportional', 'value': 10, 'links': {0: 'subtract'}, 'gain': 10}}}, namespace=namespace) # ## Viewing Nodes # # The details of a node can be viewed in a number of ways, which is useful for checking the configuration. The summary method prints to the screen. The get_config method returns a string in a JSON format. integratingnode.summary() #print(integratingnode.get_config()) assert integratingnode.get_config() == {'type': 'PCTNode', 'name': 'integratingnode', 'refcoll': {'0': {'type': 'Variable', 'name': 'velocity_reference', 'value': 0, 'links': {}}}, 'percoll': {'0': {'type': 'Constant', 'name': 'constant_perception', 'value': 10, 'links': {}}}, 'comcoll': {'0': {'type': 'Subtract', 'name': 'subtract', 'value': 0, 'links': {0: 'velocity_reference', 1: 'constant_perception'}}}, 'outcoll': {'0': {'type': 'Integration', 'name': 'integrator', 'value': 0, 'links': {0: 'subtract'}, 'gain': 10, 'slow': 100}}} integratingnode.get_config() # A node can also be viewed graphically as a network of connected nodes. import os if os.name=='nt': integratingnode.draw(node_size=2000, figsize=(8,4)) # ## Running a Node # # For the purposes of this example we first create a function which is a very basic model of the physical environment. It defines how the world behaves when we pass it the output of the control system. # + def velocity_model(velocity, force , mass): velocity = velocity + force / mass return velocity mass = 50 force = 0 # - # In the following cell we start with a velocity of zero. The node is run once (second line), the output of which is the force to apply in the world velocity_model. That returns the updated velocity which we pass back into the node to be used in the next iteration of the loop. velocity=0 force = node() velocity = velocity_model(velocity, force, mass) node.set_perception_value(velocity) print(force) assert force == 10 # The node can be run in a loop as shown below. With verbose set to True the output of each loop will be printed to the screen. # + pctnode = PCTNode(history=True) pctnode.set_function_name("perception", "velocity") pctnode.set_function_name("reference", "reference") for i in range(40): print(i, end=" ") force = pctnode(verbose=True) vel = velocity_model(pctnode.get_perception_value(), force, mass) pctnode.set_perception_value(vel) # - # ## Save and Load # Save a node to file. import json integratingnode.save("inode.json") # Create a node from file. nnode = PCTNode.load("inode.json") print(nnode.get_config()) # ## Plotting the Data # # As the history of the variable pctnode was set to True the data is available for analysis. It can be plotted with python libraries such as matplotlib or plotly. Here is an example with the latter. # # The graph shows the changing perception values as it is controlled to match the reference value. # ```python # import plotly.graph_objects as go # fig = go.Figure(layout_title_text="Velocity Goal") # fig.add_trace(go.Scatter(y=pctnode.history.data['refcoll']['reference'], name="ref")) # fig.add_trace(go.Scatter(y=pctnode.history.data['percoll']['velocity'], name="perc")) # ``` # This following code is only for the purposes of displaying image of the graph generated by the above code. from IPython.display import Image Image(url='http://www.perceptualrobots.com/wp-content/uploads/2020/08/pct_node_plot.png') # + # #%nbdev_hide #from nbdev import * #notebook2script() # -
nbs/.ipynb_checkpoints/03_nodes-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Expectations of test functions # # The expected value of a function $\phi(X): \mathcal{X} \rightarrow \mathcal{R}$ is defined as # # $$ # E[\phi(X)] = \int \phi(X) p(X) dx # $$ # # * Data distribution: $p(X)$ # * Test function: $\phi(X)$ # # Intuitively, this is the average value that the function $\phi$ take when given random inputs $X$ with a distribution of $p(X)$. # # Some test functions are special # # Mean # # $\phi(X) = X$ # # $$ # E[X] = \int p(X) X dx = \int X \mu(dx) # $$ # # # # Variance # # $\phi(X) = (X - E[X])^2$ # # $$ # Var[X] = E[(X - E[X])^2] = \int p(X) (X - E[X])^2 dx # $$ # # # Covariance # # Data distribution: $p(X, Y)$ # # $$ # \phi = (X-E[X])(Y - E[Y]) # $$ # # $$ # Cov[X,Y] = E[(X-E[X])(Y - E[Y])] # $$ # # # # Correlation Coefficient # # $$ # \rho(X,Y) = \frac{Cov[X,Y]}{\sqrt{Var[X]Var[Y]}} # $$ # # $$ # -1 \leq \rho\leq 1 # $$ # # Emprical distributions # # Suppose we are given a dataset $X = \{x_1, x_2, \dots, x_N\}$ # # $$ # \tilde{p}(x) = \frac{1}{N}\sum_{i=1}^N \delta(x - x_i) # $$ # # ## Emprical bivariate distribution # # Dataset of pairs $X = \{(x_1,y_1), (x_2,y_2), \dots, (x_N, y_N)\}$ # # $$ # \tilde{p}(x, y) = \frac{1}{N}\sum_{i=1}^N \delta(x - x_i)\delta(y - y_i) # $$ # # # Sample average and sample variance # # Compute expectations with respect to the emprical distribution # # $$ # E[x] = \int x \tilde{p}(x) dx = \int x \frac{1}{N}\sum_{i=1}^N \delta(x - x_i) dx = \frac{1}{N}\sum_{i=1}^N x_i \equiv s_1/N # $$ # # $$ # Var[x] = \int (x-E[x])^2 \tilde{p}(x) dx = E[x^2] - m^2 = \frac{1}{N}\sum_{i=1}^N x^2_i - \frac{1}{N^2}s_1^2 \equiv # \frac{1}{N}s_2 - \frac{1}{N^2}s_1^2 # $$ # # Here, $s_1 = \sum_{i=1}^N x_i$ and $s_2 = \sum_{i=1}^N x_i^2$ are known as the first and second (sample) moments, respectively. # # # Generative models # # <img src='pharma2.jpg' width=200 align='left' caption='Image: www.pharmamanufacturing.com'> # A generative model is a computational procedure with random inputs that describes how to simulate a dataset $X$. The model defines a joint distribution of the variables of the dataset and possibly additional hidden (unobserved) variables and parameters $H$ to aid the data generation mechanism, denoted as $p(X, H)$. # # A new terminology for a generative model is a _probabilistic program_. # # Given a generative model and a dataset, the posterior distribution over the hidden variables can be computed via Bayesian inference $P(H|X)$. The hidden variables and parameters provide explanations for the observed data. # ## Generative Model Example # # \begin{eqnarray} # w & \sim & \mathcal{U}(0,1) \\ # u & = & \cos(2\pi w) # \end{eqnarray} # + # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np N = 50 u = np.cos(2*np.pi*np.random.rand(N)) plt.figure(figsize=(6,2)) plt.plot(u, np.zeros_like(u), 'o') plt.show() # + N = 500 u = np.cos(2*np.pi*np.random.rand(N)) plt.figure(figsize=(6,2)) plt.hist(u, bins=30) plt.show() # - # ## Generative Model Example # # \begin{eqnarray} # w & \sim & \mathcal{U}(0,1) \\ # u & = & \cos(2\pi w) \\ # e & \sim & \mathcal{N}\left(0, (\sigma u)^2 \left(\begin{array}{cc} 1 & 0\\ 0 & 1\\\end{array}\right) \right) \\ # x & \sim & \left(\begin{array}{c} \theta_1 \\ \theta_2 \end{array} \right)u + e # \end{eqnarray} # + # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np N = 100 sigma = 0.8 theta = np.mat([3,-1]).T u = np.cos(2*np.pi*np.random.rand(1,N)) X = theta*u X = X + sigma*u*np.random.randn(X.shape[0],X.shape[1]) plt.figure(figsize=(6,6)) plt.plot(X[0,:],X[1,:],'k.') plt.show() # + import seaborn as sns import pandas as pd sns.set(color_codes=True) plt.figure(figsize=(5,5)) df = pd.DataFrame(X.T, columns=['x','y']) sns.jointplot(x="x", y="y", data=df); plt.show() # - # ## Generative Model Example # # \begin{eqnarray} # w & \sim & \mathcal{U}(w; 0,2\pi) \\ # \epsilon & \sim & \mathcal{N}(\epsilon; 0, I) \\ # u & = & \left(\begin{array}{c} \mu_1 \\ \mu_2 \end{array}\right) + \left(\begin{array}{cc} s_1 & 0 \\ 0& s_2 \end{array}\right) \left(\begin{array}{c} \cos(w) \\ \sin(w) \end{array}\right) + \left(\begin{array}{cc} \sigma_1 & 0 \\ 0& \sigma_2 \end{array}\right) \left(\begin{array}{c} \epsilon_1 \\ \epsilon_2 \end{array}\right) # \end{eqnarray} # + # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np N = 100 sigma_1 = 0.1 sigma_2 = 0.0 mu_1 = 5 mu_2 = 5 s_1 = 1 s_2 = 3 w = 2*np.pi*np.random.rand(1,N) u1 = mu_1 + s_1*np.cos(w) + sigma_1*np.random.randn(1,N) u2 = mu_2 + s_2*np.sin(w) + sigma_2*np.random.randn(1,N) plt.figure(figsize=(6,6)) plt.plot(u1, u2,'k.') plt.axis('equal') plt.show() for i in range(N): print('%3.3f %3.3f' % (u1[0,i],u2[0,i] )) # - # ## Generative Model Example # # \begin{eqnarray} # w & \sim & \mathcal{U}(0,1) \\ # u & = & 2 w - 1 \\ # x|u & \sim & \mathcal{N}\left(x; u^2, r \right) # \end{eqnarray} # # + # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np N = 100 r = 0.01 u = 2*np.random.randn(1,N)-1 x = u**2 + np.sqrt(r)*np.random.randn(1,N) plt.figure(figsize=(6,6)) plt.plot(u,x,'k.') plt.xlabel('u') plt.ylabel('x') plt.show() # - # ## Generative Model Example (Principal Components Analysis) # # $h \in \mathbb{R}^{D_h}$, $x \in \mathbb{R}^{D_x}$, $A \in \mathbb{R}^{{D_x}\times {D_h}}$, $r\in \mathbb{R}^+$ # # \begin{eqnarray} # h & \sim & {\mathcal N}(h; 0, I) \\ # x|h & \sim & {\mathcal N}(x; A h, rI) # \end{eqnarray} # # + # %matplotlib inline from IPython.display import display, Math, Latex import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from notes_utilities import pnorm_ball_points from notes_utilities import mat2latex import pandas as pd import seaborn as sns # Number of points N = 30 # Parameters A = np.mat('[3;-1]') r = 0.1 Dh = 1 Dx = 2 h = np.random.randn(Dh, N) y = A*h + np.sqrt(r)*np.random.randn(Dx, N) #sns.jointplot(x=y[0,:], y=y[1,:]); plt.figure(figsize=(5,5)) plt.scatter(y[0,:],y[1,:]) plt.xlabel('y_0') plt.ylabel('y_1') plt.show() # - # ## Example # # Generate a data set as follows # # \begin{eqnarray} # x & \sim & {\mathcal N}(x; 0, 1) \\ # y|x & \sim & {\mathcal N}(a x, R) # \end{eqnarray} # # How is this model related to the PCA? # + # %matplotlib inline from IPython.display import display, Math, Latex import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from notes_utilities import pnorm_ball_points from notes_utilities import mat2latex import pandas as pd #import seaborn as sns #sns.set(color_codes=True) # Number of points N = 10 # Parameters a = -0.8 R = 0.1 x = np.random.randn(N) y = a*x + np.sqrt(R)*np.random.randn(N) sns.jointplot(x=x, y=y); # - # We can work out the joint distribution as: # # \begin{eqnarray} # \left(\begin{array}{c} x \\ y \end{array}\right) \sim # \mathcal{N}\left( \left(\begin{array}{c} 0 \\ 0 \end{array}\right) , # \left(\begin{array}{cc} 1 & a\\ a & a^2 + R \end{array}\right) # \right) # \end{eqnarray} # # + # %matplotlib inline from IPython.display import display, Math, Latex import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from notes_utilities import pnorm_ball_points from notes_utilities import mat2latex import pandas as pd #import seaborn as sns #sns.set(color_codes=True) # Number of points N = 10 # Parameters a = -0.8 R = 0.1 # Theoretical Covariance Cov = np.mat([[1,a],[a, a**2+R]]) x = np.random.randn(N) y = a*x + np.sqrt(R)*np.random.randn(N) np.set_printoptions(precision=4) X = np.c_[x,y].T N = X.shape[1] print('True Covariance') display(Math(r'\mu='+mat2latex(np.mat('[0;0]')))) display(Math(r'\Sigma='+mat2latex(Cov))) print('The ML Estimates from Data') mean_est = np.mean(X,axis=1,keepdims=True) cov_est = np.cov(X,bias=True) display(Math(r'\bar{m}='+mat2latex(mean_est))) display(Math(r'\bar{S}='+mat2latex(cov_est))) print('The estimate when we assume that we know the true mean') cov2_est = X.dot(X.T)/N display(Math(r'\bar{\Sigma}='+mat2latex(cov2_est))) plt.figure(figsize=(8,8)) plt.plot(x, y, '.') ax = plt.gca() ax.axis('equal') ax.set_xlabel('x') ax.set_ylabel('y') # True mean and Covariance dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov)) ln = plt.Line2D(dx,dy, color='r') ln.set_label('True') ax.add_line(ln) ln = plt.Line2D([0],[0], color='r', marker='o') ax.add_line(ln) dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov), mu=mean_est) ln = plt.Line2D(dx,dy, color='b') ln.set_label('ML Estimate') ax.add_line(ln) ln = plt.Line2D(mean_est[0],mean_est[1], color='b', marker='o') ax.add_line(ln) # Estimate conditioned on knowing the true mean dx,dy = pnorm_ball_points(3*np.linalg.cholesky(cov2_est)) ln = plt.Line2D(dx,dy, color='g') ln.set_label('Conditioned on true mean') ax.add_line(ln) ln = plt.Line2D([0],[0], color='g', marker='o') ax.add_line(ln) Lim = 6 ax.set_ylim([-Lim,Lim]) ax.set_xlim([-Lim,Lim]) ax.legend() plt.title('Covariance Matrix Estimates') plt.show() # - # ## Frequentist approach to statistics # # * Assume there is a true parameter that we don't know. For example the covariance $\Sigma$ # * Construct an estimator (=a function that spits out a parameter value given data) # $$ # \bar{\Sigma} = X^\top X/N # $$ # * (Conceptually) sample new random dataset from the same distribution for $i=1\dots K$ # $$ # X^{(i)} \sim p(X) # $$ # * Study the distribution of the estimator -- the output of the estimator is random as input data is random # $$ # \bar{\Sigma}^{(i)} = {X^{(i)}}^\top X^{(i)}/N # $$ # # + EPOCH = 20 fig = plt.figure(figsize=(6,6)) ax = fig.gca() Lim = 6 ax.set_ylim([-Lim,Lim]) ax.set_xlim([-Lim,Lim]) for i in range(EPOCH): x = np.random.randn(N) y = a*x + np.sqrt(R)*np.random.randn(N) X = np.c_[x,y].T cov2_est = X.dot(X.T)/N dx,dy = pnorm_ball_points(3*np.linalg.cholesky(cov2_est)) ln = plt.Line2D(dx,dy, color='g') ax.add_line(ln) dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov)) ln = plt.Line2D(dx,dy, color='r', linewidth=3) ax.add_line(ln) plt.show() # - # Every green ellipse corresponds to an estimated covariance $\Sigma^{(i)}$ from each new dataset $X^{(i)}$ sampled from the data distribution. The picture suggests that the true covariance could be somehow obtained as the average ellipse. # # An estimator is called unbiased, if the true parameter is exactly the expected value of the estimator. Otherwise, the estimator is called biased. # # The variance of the estimator is the amount of fluctuation around the mean. Ideally, we wish it to be small, in fact zero. However, obtaining a zero variance turns out to be impossible when the bias is zero. The variance is always greater or equal to a positive quantity called the Cramer-Rao bound. # # ## Bootstrap # # In practice, we have only a single dataset, so we need to approximate the data distribution $p(X)$. The effect of sampling new datasets can be done by sampling data points with replacement. This procedure is known as the bootstrap. # # Below, we use a dataset of $M+N$ # + EPOCH = 100 M = N x = np.random.randn(N+M) y = a*x + np.sqrt(R)*np.random.randn(N+M) fig = plt.figure(figsize=(6,6)) ax = fig.gca() Lim = 6 ax.set_ylim([-Lim,Lim]) ax.set_xlim([-Lim,Lim]) for i in range(EPOCH): idx = np.random.permutation(N+M) X = np.c_[x[idx[0:N]],y[idx[0:N]]].T cov2_est = X.dot(X.T)/N dx,dy = pnorm_ball_points(3*np.linalg.cholesky(cov2_est)) ln = plt.Line2D(dx,dy, color='g') ax.add_line(ln) dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov)) ln = plt.Line2D(dx,dy, color='r', linewidth=3) ax.add_line(ln) plt.show() # - # ## Bayesian approach to statistics # # - Assume there is only one dataset $X$ -- namely only the one that we have observed # - Postulate a prior for the parameter $p(\Sigma)$ # - Compute the posterior $p(\Sigma|X)$ # + EPOCH = 20 fig = plt.figure(figsize=(6,6)) ax = fig.gca() Lim = 6 ax.set_ylim([-Lim,Lim]) ax.set_xlim([-Lim,Lim]) x = np.random.randn(N) y = a*x + np.sqrt(R)*np.random.randn(N) X = np.c_[x,y].T cov2_est = X.dot(X.T)/N W = np.linalg.cholesky(cov2_est) plt.plot(x,y,'.') for i in range(EPOCH): U = W.dot(np.random.randn(2,N)) S = U.dot(U.T)/N dx,dy = pnorm_ball_points(3*np.linalg.cholesky(S)) ln = plt.Line2D(dx,dy, color='k') ax.add_line(ln) dx,dy = pnorm_ball_points(3*np.linalg.cholesky(Cov)) ln = plt.Line2D(dx,dy, color='r', linewidth=3) ax.add_line(ln) plt.show() # + from notes_utilities import mat2latex print(mat2latex(np.mat([[1,0],[0,1]])))
MeanVarianceCorrelation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # first analysis # # if you think that there should be more things to analize deeper or if anything isn't clear just let me know. if you also found out something usefull that is not listed here, add it # import numpy as np import os import pandas as pd from scipy.sparse import * from tqdm import tqdm pl = pd.read_csv("../../../dataset/playlists.csv", sep='\t') pl.head() pl2 = pl[['pid','num_tracks','duration_ms']] pl_np = np.squeeze(pl2.as_matrix()) import plotly.plotly as py import matplotlib.pyplot as plt import seaborn as sns # import matplotlib and allow it to plot inline # %matplotlib inline # seaborn can generate several warnings, we ignore them import warnings warnings.filterwarnings("ignore") sns.set(style="white", color_codes=True) sns.set_context(rc={"font.family":'sans',"font.size":20,"axes.titlesize":4,"axes.labelsize":24}) # + num_playlists = [0] *251 duration_playlists = [None] *251 for i in range(251): num_playlists[i] = len( pl2.loc[pl2['num_tracks'] == i]) duration_playlists[i] = pl2.loc[pl2['num_tracks'] == i]['duration_ms'].as_matrix().copy() if num_playlists[i]!=len(duration_playlists[i]): print("error") duration_playlists # - var1 = list() mean1 = list() std1 = list() for i in range(len(num_playlists)): var1.append( np.var(durate_playlists[i]/i) ) mean1.append( np.mean(durate_playlists[i]/i) ) std1.append( np.std(durate_playlists[i]/i) ) var2 = list() mean2 = list() std2 = list() duration_in_minutes = durate_playlists.copy() for i in range(len(num_playlists)): duration_in_minutes[i] = durate_playlists[i]/1000/60/i var2.append( np.var(duration_in_minutes[i])) mean2.append(np.mean(duration_in_minutes[i])) std2.append(np.std(duration_in_minutes[i])) # # graphs of duration # ### mean / variance / standard deviation # import matplotlib.pyplot as plt plt.figure(dpi=130) plt.plot(mean2) plt.ylabel('mean dur in minutes') plt.show() import matplotlib.pyplot as plt plt.figure(dpi=130) plt.plot(var2) plt.ylabel('var dur (mins)') plt.show() np.argmax(var1[5:251]) var1[211] import matplotlib.pyplot as plt plt.figure(dpi=130) plt.plot(std2) plt.ylabel('std') plt.show() # seems like there are a lot of jazz lovers with 211 songs in their playlists. we might check if those are strange playlists. i tried a little but it seemed there isn't anything strange. # # ### check the playlists with 211 elements durations_211 = sorted( np.array( pl2.loc[pl2['num_tracks'] == 211]['duration_ms']) /211/60/1000) plt.hist(durations_211) durations_50 = sorted( np.array( pl2.loc[pl2['num_tracks'] == 99]['duration_ms']) /211/60/1000) plt.hist(durations_50) pl3 = pl[['pid','num_tracks','duration_ms']] pl3.head() # + pl3.loc[pl3['num_tracks'] == 211].sort_values('duration_ms') pid_d = pl3.loc[pl3['num_tracks'] == 211].duration_ms pid = pl3.loc[pl3['num_tracks'] == 211].pid pid_dur = pid_d.apply( lambda x : x/211/1000/60) long_211_pls = pd.DataFrame([pid,pid_dur ] ).T.sort_values('duration_ms') long_211_pls.head() # - long_211_pls.describe()
personal/Lele/analysis notebooks/analysis_durations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Step 8: Use model to perform inference # # Use example data stored on disk to perform inference with your model by sending REST requests to TensorFlow Serving. # + """A client for serving the chicago_taxi workshop example locally.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import base64 import json import os import subprocess import tempfile import requests import tensorflow as tf import tfx_utils from tfx.utils import io_utils from tensorflow_metadata.proto.v0 import schema_pb2 from tensorflow_transform import coders as tft_coders from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import dataset_schema from tensorflow_transform.tf_metadata import schema_utils from google.protobuf import text_format from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import from tfx.examples.chicago_taxi.trainer import taxi _MODEL_NAME = 'taxi' _INFERENCE_TIMEOUT_SECONDS = 5.0 _PIPELINE_NAME = 'taxi_solution' _LABEL_KEY = 'tips' # - # The data that we will use to send requests to our model is stored on disk in [csv](https://en.wikipedia.org/wiki/Comma-separated_values) format; we will convert these examples to [TensorFlow Example](https://www.tensorflow.org/api_docs/python/tf/train/Example) to send to our model being served by TensorFlow Serving. # # Construct the following two utility functions: # # * `_make_proto_coder` which creates a coder that will decode a single row from the CSV data file and output a tf.transform encoded dict. # * `_make_csv_coder` which creates a coder that will encode a tf.transform encoded dict object into a TF Example. # + def _get_raw_feature_spec(schema): """Return raw feature spec for a given schema.""" return schema_utils.schema_as_feature_spec(schema).feature_spec def _make_proto_coder(schema): """Return a coder for tf.transform to read TF Examples.""" raw_feature_spec = _get_raw_feature_spec(schema) raw_schema = dataset_schema.from_feature_spec(raw_feature_spec) return tft_coders.ExampleProtoCoder(raw_schema) def _make_csv_coder(schema, column_names): """Return a coder for tf.transform to read csv files.""" raw_feature_spec = _get_raw_feature_spec(schema) parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec) return tft_coders.CsvCoder(column_names, parsing_schema) # - # Implement routine to read examples from a CSV file and for each example, send an inference request containing a base-64 encoding of the serialized TF Example. def do_inference(server_addr, model_name, examples_file, num_examples, schema): """Sends requests to the model and prints the results. Args: server_addr: network address of model server in "host:port" format model_name: name of the model as understood by the model server examples_file: path to csv file containing examples, with the first line assumed to have the column headers num_examples: number of requests to send to the server schema: a Schema describing the input data Returns: Response from model server """ filtered_features = [ feature for feature in schema.feature if feature.name != _LABEL_KEY ] del schema.feature[:] schema.feature.extend(filtered_features) column_names = io_utils.load_csv_column_names(examples_file) csv_coder = _make_csv_coder(schema, column_names) proto_coder = _make_proto_coder(schema) input_file = open(examples_file, 'r') input_file.readline() # skip header line serialized_examples = [] for _ in range(num_examples): one_line = input_file.readline() if not one_line: print('End of example file reached') break one_example = csv_coder.decode(one_line) serialized_example = proto_coder.encode(one_example) serialized_examples.append(serialized_example) parsed_server_addr = server_addr.split(':') host=parsed_server_addr[0] port=parsed_server_addr[1] json_examples = [] for serialized_example in serialized_examples: # The encoding follows the guidelines in: # https://www.tensorflow.org/tfx/serving/api_rest example_bytes = base64.b64encode(serialized_example).decode('utf-8') predict_request = '{ "b64": "%s" }' % example_bytes json_examples.append(predict_request) json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}' server_url = 'http://' + host + ':' + port + '/v1/models/' + model_name + ':predict' response = requests.post( server_url, data=json_request, timeout=_INFERENCE_TIMEOUT_SECONDS) response.raise_for_status() prediction = response.json() print(json.dumps(prediction, indent=4)) # Open the metadata store, obtain the URI for the schema of your model, as inferred by TFDV, fetch the schema file and parse it into a `Schema` object. def _make_schema(pipeline_name): """Reads and constructs schema object for provided pipeline. Args: pipeline_name: The name of the pipeline for which TFX Metadata Store has Schema. Returns: An instance of Schema or raises Exception if more or fewer than one schema was found for the given pipeline. """ db_path = os.path.join(os.environ['HOME'], 'airflow/tfx/metadata/', pipeline_name, 'metadata.db') store = tfx_utils.TFXReadonlyMetadataStore.from_sqlite_db(db_path) schemas = store.get_artifacts_of_type_df(tfx_utils.TFXArtifactTypes.SCHEMA) assert len(schemas.URI) == 1 schema_uri = schemas.URI.iloc[0] + 'schema.pbtxt' schema = schema_pb2.Schema() contents = file_io.read_file_to_string(schema_uri) text_format.Parse(contents, schema) return schema # Use the utilities that we have defined to send a batch of inference requests to the model being served by TensorFlow Serving listening on the host's network interface. # # Note: **If running on a Mac** change the host to `docker.for.mac.localhost` below # + host = 'localhost' # If running on a Windows or Linux host # host = 'docker.for.mac.localhost' # If running on a MacOS host do_inference(server_addr='{}:8501'.format(host), model_name=_MODEL_NAME, examples_file='/root/airflow/data/taxi_data/data.csv', num_examples=3, schema=_make_schema(_PIPELINE_NAME))
tfx_airflow/notebooks/step8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # metadata: # interpreter: # hash: 4cd7ab41f5fca4b9b44701077e38c5ffd31fe66a6cab21e0214b68d958d0e462 # name: Python 3.6.9 64-bit # --- # # This is a notebook implementing the multilingual BERT for NER classification on the DaNE dataset # + tags=[] # Loading packages ## Standard packages import os import math import pandas as pd import numpy as np ## pyTorch import torch import torch.nn.functional as F from torch import nn from torch.optim import Adam from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler ## Transformers from transformers import BertTokenizer, AutoTokenizer from transformers import BertForTokenClassification, AutoModelForTokenClassification, AdamW from transformers import get_linear_schedule_with_warmup ## Other ML utils from sklearn.metrics import confusion_matrix, f1_score, classification_report, accuracy_score from sklearn.model_selection import train_test_split from keras.preprocessing.sequence import pad_sequences from tqdm import tqdm,trange from danlp.datasets import DDT # - os.chdir("/bachelor_project") os.getcwd() # ## Loading and inspecting data # + # Load the DaNE data train, val, test = DDT().load_as_simple_ner(predefined_splits=True) # Split sentences and labels tr_sentences, tr_labels = train val_sentences, val_labels = val test_sentences, test_labels = test # + def is_misc(ent: str): if len(ent) < 4: return False return ent[-4:] == 'MISC' def remove_miscs(se: list): return [ [entity if not is_misc(entity) else 'O' for entity in entities] for entities in se ] # - # Replace MISC with O for fair comparisons tr_labels = remove_miscs(tr_labels) val_labels = remove_miscs(val_labels) test_labels = remove_miscs(test_labels) #Sanity checking number of words print(len(tr_labels)) print(len(val_labels)) print(len(test_labels)) # The ELECTRA model requires input data to be in a specific format. One requirement is to have special tokens that marks the beginning ([CLS]) and the separation/end of sentences ([SEP]). These tokens are added to the list of label values below. Furthermore, the label [PAD] is added to indicate padded tokens after padding the sentences later in the process. # + tags=[] # Adding labels to fine-tune the BERT tag_values = ['O', 'B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG'] #list(set(labels for list in tr_labels for labels in list)) tag_values.append("[PAD]") tag_values.append("[CLS]") tag_values.append("[SEP]") print(tag_values) #Creating tag to index and index to tags variables tag2idx = {t: i for i, t in enumerate(tag_values)} idx2tag = {i: t for t, i in tag2idx.items()} print(tag2idx) print(idx2tag) # - # ## Making the training data using the vocabulary from multilingual BERT # Checking whether GPU is available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() torch.cuda.get_device_name(0) # BERT provides its own tokenizer which is imported below. The tokenizer is created with a Wordpiece model and it creates a vocabulary of whole words, subwords and individual characters. # + tags=[] # load tokenizer, with manual file address or pretrained address from the Transformers library tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-uncased", do_lower_case = True, strip_accents = False) # - def tokenize_and_preserve_labels(sentence, text_labels): tokenized_sentence = [] labels = [] for word, label in zip(sentence, text_labels): # Tokenize the word and count # of subwords the word is broken into tokenized_word = tokenizer.tokenize(word) n_subwords = len(tokenized_word) # Add the tokenized word to the final tokenized word list tokenized_sentence.extend(tokenized_word) # Add the same label to the new list of labels `n_subwords` times labels.extend([label] * n_subwords) return tokenized_sentence, labels # + tr_tokenized_texts_and_labels = [ tokenize_and_preserve_labels(s, l) for s, l in zip(tr_sentences, tr_labels) ] val_tokenized_texts_and_labels = [ tokenize_and_preserve_labels(s, l) for s, l in zip(val_sentences, val_labels) ] test_tokenized_texts_and_labels = [ tokenize_and_preserve_labels(s, l) for s, l in zip(test_sentences, test_labels) ] # - tr_tokenized_texts_and_labels[0] # + tags=[] tr_tokenized_texts = [["[CLS]"] + tr_token_label_pair[0] + ["[SEP]"] for tr_token_label_pair in tr_tokenized_texts_and_labels] tr_labels = [["[CLS]"] + tr_token_label_pair[1] + ["[SEP]"] for tr_token_label_pair in tr_tokenized_texts_and_labels] val_tokenized_texts = [["[CLS]"] + val_token_label_pair[0] + ["[SEP]"] for val_token_label_pair in val_tokenized_texts_and_labels] val_labels = [["[CLS]"] + val_token_label_pair[1] + ["[SEP]"] for val_token_label_pair in val_tokenized_texts_and_labels] test_tokenized_texts = [["[CLS]"] + test_token_label_pair[0] + ["[SEP]"] for test_token_label_pair in test_tokenized_texts_and_labels] test_labels = [["[CLS]"] + test_token_label_pair[1] + ["[SEP]"] for test_token_label_pair in test_tokenized_texts_and_labels] #Example of word-piece tokenizations: print(tr_tokenized_texts[0]) print(tr_labels[0]) # - # Note that number of words in all datasets are increased due to the word-piece tokenization. For the test dataset this means that it will have a higher number of words i.e. labels also during evaluation and comparison to the rule-based classification #number of words increased from 7,416 to 22,424 tmp=0 for labels in test_labels: tmp=tmp+len(labels) tmp # + tags=[] # Len of the sentence must be not bigger than the training model # See model's 'max_position_embeddings' = 512 MAX_LEN = len(max(tr_tokenized_texts, key = len)) print(MAX_LEN) bs = 32 # - # Indexing tokens in sentences # + tags=[] tr_input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tr_tokenized_texts], maxlen=MAX_LEN, dtype="long", truncating="post", padding="post") val_input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in val_tokenized_texts], maxlen=MAX_LEN, dtype="long", truncating="post", padding="post") test_input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in test_tokenized_texts], maxlen=MAX_LEN, dtype="long", truncating="post", padding="post") #Example of indexing print(tr_input_ids[0]) # - # Indexing labels # + tags=[] tr_tags = pad_sequences([[tag2idx.get(l) for l in lab] for lab in tr_labels], maxlen=MAX_LEN, value=tag2idx["[PAD]"], padding="post", dtype="long", truncating="post") val_tags = pad_sequences([[tag2idx.get(l) for l in lab] for lab in val_labels], maxlen=MAX_LEN, value=tag2idx["[PAD]"], padding="post", dtype="long", truncating="post") test_tags = pad_sequences([[tag2idx.get(l) for l in lab] for lab in test_labels], maxlen=MAX_LEN, value=tag2idx["[PAD]"], padding="post", dtype="long", truncating="post") #Example of indexing print(tr_tags[10]) # - # Creating attention masks that indicates which elements in the sentence are tokens and which are padding elements. So here we create the mask to ignore the padded elements in the sequences. # + tags=[] tr_attention_masks = [[float(i != 0) for i in ii] for ii in tr_input_ids] val_attention_masks = [[float(i != 0) for i in ii] for ii in val_input_ids] test_attention_masks = [[float(i != 0) for i in ii] for ii in test_input_ids] #Example of attention masks print(tr_attention_masks[10]) # - # Pytorch requires converting datasets into torch tensors (multidimensional matrices). Inputs, tags and mask ID's for training and test data are converted to tensors and moved to the GPU by applying .to(device) # + tr_inputs = torch.tensor(tr_input_ids) val_inputs = torch.tensor(val_input_ids) test_inputs = torch.tensor(test_input_ids) tr_tags = torch.tensor(tr_tags) val_tags = torch.tensor(val_tags) test_tags = torch.tensor(test_tags) tr_masks = torch.tensor(tr_attention_masks) val_masks = torch.tensor(val_attention_masks) test_masks = torch.tensor(test_attention_masks) # - # Creating training and test tensor datasets and defining data loaders. Shuffling the training data with RandomSampler and at test time we just pass them sequentially with the SequentialSampler. # + train_data = TensorDataset(tr_inputs, tr_masks, tr_tags) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=bs) valid_data = TensorDataset(val_inputs, val_masks, val_tags) valid_sampler = SequentialSampler(valid_data) valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=bs) test_data = TensorDataset(test_inputs, test_masks, test_tags) test_sampler = SequentialSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=bs) # - # ## Train model # We load the pre-trained bert-base-cased model and provide the number of possible labels. # + tags=[] pt_model_dir = "bert-base-multilingual-uncased" # Will load config and weight with from_pretrained(). model = BertForTokenClassification.from_pretrained(pt_model_dir, num_labels=len(tag2idx), output_attentions = False, output_hidden_states = False) # - # Set model to GPU,if you are using GPU machine model.cuda(); print(f'Number of trainable parameters: {model.num_parameters()}') # Setting full finetuning to true because we have capacity to fine tune all layers / update all weights. Before we can start the fine-tuning process, we have to setup the optimizer and add the parameters it should update. A common choice is the AdamW optimizer. We also add some weight_decay as regularization to the main weight matrices. If you have limited resources, you can also try to just train the linear classifier on top of BERT and keep all other weights fixed. This will still give you a good performance. pad_tok = tokenizer.vocab["[PAD]"] sep_tok = tokenizer.vocab["[SEP]"] cls_tok = tokenizer.vocab["[CLS]"] def flat_accuracy(valid_tags, pred_tags): """ Define a flat accuracy metric to use while training the model. """ return (np.array(valid_tags) == np.array(pred_tags)).mean() def annot_confusion_matrix(valid_tags, pred_tags): """ Create an annotated confusion matrix by adding label annotations and formatting to sklearn's `confusion_matrix`. """ # Create header from unique tags header = sorted(list(set(valid_tags + pred_tags))) # Calculate the actual confusion matrix matrix = confusion_matrix(valid_tags, pred_tags, labels = ['B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG']) # Final formatting touches for the string output mat_formatted = [header[i] + "\t" + str(row) for i, row in enumerate(matrix)] content = "\t" + " ".join(header) + "\n" + "\n".join(mat_formatted) return content # + FULL_FINETUNING = True if FULL_FINETUNING: param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0} ] else: param_optimizer = list(model.classifier.named_parameters()) optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}] optimizer = AdamW( optimizer_grouped_parameters, lr=3e-5 #The authors of BERT uses 3e-5 as lr for BERT-base ) # + epochs = 4 # Train a maximum of 3-4 epochs. More will simply result in overfitting the training data. max_grad_norm = 1.0 # Total number of training steps is number of batches * number of epochs. total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=total_steps ) # + tags=[] epoch = 0 torch.manual_seed(1) np.random.seed(1) tr_loss_values, eval_loss_values = [], [] for _ in trange(epochs, desc="Epoch"): epoch += 1 # Training loop print("\nStarting training loop.") model.train() tr_loss, tr_accuracy = 0, 0 nb_tr_examples, nb_tr_steps = 0, 0 tr_preds, tr_labels = [], [] for step, batch in enumerate(train_dataloader): # Add batch to gpu batch = tuple(t.to(torch.int64).to(device) for t in batch) b_input_ids, b_input_mask, b_labels = batch # Forward pass outputs = model( b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, ) loss, tr_logits = outputs[:2] # Backward pass loss.backward() # Compute train loss tr_loss += loss.item() nb_tr_examples += b_input_ids.size(0) nb_tr_steps += 1 # Subset out unwanted predictions on CLS/PAD/SEP tokens preds_mask = ( (b_input_ids != cls_tok) & (b_input_ids != pad_tok) & (b_input_ids != sep_tok) ) #preds_mask = preds_mask.detach().cpu().numpy() tr_logits = tr_logits.detach().cpu().numpy() tr_label_ids = torch.masked_select(b_labels, (preds_mask == 1)) tr_batch_preds = np.argmax(tr_logits[preds_mask.detach().cpu().numpy().squeeze()], axis=1) tr_batch_labels = tr_label_ids.to("cpu").numpy() tr_preds.extend(tr_batch_preds) tr_labels.extend(tr_batch_labels) # Compute training accuracy tmp_tr_accuracy = flat_accuracy(tr_batch_labels, tr_batch_preds) tr_accuracy += tmp_tr_accuracy # Gradient clipping torch.nn.utils.clip_grad_norm_( parameters=model.parameters(), max_norm=max_grad_norm ) # Update parameters optimizer.step() model.zero_grad() tr_loss = tr_loss / nb_tr_steps tr_loss_values.append(tr_loss) tr_accuracy = tr_accuracy / nb_tr_steps # Print training loss and accuracy per epoch print(f"Train loss: {tr_loss}") print(f"Train accuracy: {tr_accuracy}") # Validation loop print("Starting validation loop.") model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 predictions, true_labels = [], [] for batch in valid_dataloader: batch = tuple(t.to(torch.int64).to(device) for t in batch) b_input_ids, b_input_mask, b_labels = batch with torch.no_grad(): outputs = model( b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, ) tmp_eval_loss, logits = outputs[:2] # Subset out unwanted predictions on CLS/PAD/SEP tokens preds_mask = ( (b_input_ids != cls_tok) & (b_input_ids != pad_tok) & (b_input_ids != sep_tok) ) logits = logits.to("cpu").numpy() label_ids = torch.masked_select(b_labels, (preds_mask == 1)) val_batch_preds = np.argmax(logits[preds_mask.detach().cpu().numpy().squeeze()], axis=1) val_batch_labels = label_ids.to("cpu").numpy() predictions.extend(val_batch_preds) true_labels.extend(val_batch_labels) tmp_eval_accuracy = flat_accuracy(val_batch_labels, val_batch_preds) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += b_input_ids.size(0) nb_eval_steps += 1 # Evaluate loss, acc, conf. matrix, and class. report on validation set pred_tags = [idx2tag[i] for i in predictions] valid_tags = [idx2tag[i] for i in true_labels] cl_report = classification_report(valid_tags, pred_tags) conf_mat = annot_confusion_matrix(valid_tags, pred_tags) eval_loss = eval_loss / nb_eval_steps eval_loss_values.append(eval_loss) eval_accuracy = eval_accuracy / nb_eval_steps f1score = f1_score(valid_tags, pred_tags, labels = ['B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG'], average="micro") # Report metrics print(f"Validation loss: {eval_loss}\n") print(f"Validation Accuracy: {eval_accuracy}\n") print(f"F1-Score: {f1score}\n") print(f"Classification Report:\n {cl_report}") print(f"Confusion Matrix:\n {conf_mat}") # + import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # Use plot styling from seaborn. sns.set(style='darkgrid') # Increase the plot size and font size. sns.set(font_scale=1.5) plt.rcParams["figure.figsize"] = (12,6) # Plot the learning curve. plt.plot(tr_loss_values, 'b-o', label="training loss") plt.plot(eval_loss_values, 'r-o', label="validation loss") # Label the plot. plt.title("Learning curve") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.show() # - ner_model_path = 'research/daNLP/NER/models/multi_BERT_uncased' # Make dir if not exits if not os.path.exists(ner_model_path): os.makedirs(ner_model_path) # + # Saving the model and the tokenizer model_to_save = model.module if hasattr(model, 'module') else model #Take care of distrubuted/parallel training model_to_save.save_pretrained(ner_model_path) tokenizer.save_pretrained(ner_model_path) # - # Loading the model to tokenize the test sentece model = BertForTokenClassification.from_pretrained(ner_model_path) tokenizer = BertTokenizer.from_pretrained(ner_model_path, do_lower_case = True, strip_accents = False) model.cuda(); # + tags=[] pad_tok = tokenizer.vocab["[PAD]"] sep_tok = tokenizer.vocab["[SEP]"] cls_tok = tokenizer.vocab["[CLS]"] torch.manual_seed(1) np.random.seed(1) test_loss, test_accuracy = 0, 0 nb_test_steps, nb_test_examples = 0, 0 predictions, true_labels = [], [] tr_loss_values, test_loss_values = [], [] for batch in test_dataloader: batch = tuple(t.to(torch.int64).to(device) for t in batch) b_input_ids, b_input_mask, b_labels = batch with torch.no_grad(): outputs = model( b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, ) tmp_test_loss, logits = outputs[:2] # Subset out unwanted predictions on CLS/PAD/SEP tokens preds_mask = ( (b_input_ids != cls_tok) & (b_input_ids != pad_tok) & (b_input_ids != sep_tok) ) logits = logits.to("cpu").numpy() label_ids = torch.masked_select(b_labels, (preds_mask == 1)) test_batch_preds = np.argmax(logits[preds_mask.detach().cpu().numpy().squeeze()], axis=1) test_batch_labels = label_ids.to("cpu").numpy() predictions.extend(test_batch_preds) true_labels.extend(test_batch_labels) tmp_test_accuracy = flat_accuracy(test_batch_labels, test_batch_preds) test_loss += tmp_test_loss.mean().item() test_accuracy += tmp_test_accuracy nb_test_examples += b_input_ids.size(0) nb_test_steps += 1 # Evaluate loss, acc, conf. matrix, and class. report on devset pred_tags = [idx2tag[i] for i in predictions] valid_tags = [idx2tag[i] for i in true_labels] cl_report = classification_report(valid_tags, pred_tags, labels = ['B-LOC', 'B-PER', 'I-LOC', 'I-PER', 'B-ORG', 'I-ORG']) conf_mat = annot_confusion_matrix(valid_tags, pred_tags) test_loss = test_loss / nb_test_steps test_loss_values.append(test_loss) test_accuracy = test_accuracy / nb_test_steps f1score_micro = f1_score(valid_tags, pred_tags, labels = ['B-LOC', 'B-PER', 'I-LOC', 'I-PER', 'B-ORG', 'I-ORG'], average="micro") f1score_macro = f1_score(valid_tags, pred_tags, labels = ['B-LOC', 'B-PER', 'I-LOC', 'I-PER', 'B-ORG', 'I-ORG'], average="macro") # Report metrics print(f"Number of Epochs: {epochs}\n") print(f"Test loss: {test_loss}\n") print(f"Test Accuracy: {test_accuracy}\n") print(f"F1-Score Micro: {f1score_micro}\n") print(f"F1-Score Macro: {f1score_macro}\n") print(f"Classification Report:\n {cl_report}") print(f"Confusion Matrix:\n {conf_mat}") with open(f'{ner_model_path}/TESTMETRICS','a+') as f: f.write(f"Number of Epochs: {epochs}\n") f.write(f"Test loss: {test_loss}\n") f.write(f"Test Accuracy: {test_accuracy}\n") f.write(f"F1-Score Micro: {f1score_micro}\n") f.write(f"F1-Score Macro: {f1score_macro}\n") f.write(f"Classification Report:\n {cl_report}") f.write(f"Confusion Matrix:\n {conf_mat}") # - # # Testing the model's capabilities on specific tokens only # + #test["labels"][test["labels"].str.contains("PER")]="B-PER" valid_tags = pd.Series(valid_tags) valid_tags[valid_tags.str.contains("PER")] = "B-PER" valid_tags[valid_tags.str.contains("LOC")] = "B-LOC" valid_tags = valid_tags.tolist() pred_tags = pd.Series(pred_tags) pred_tags[pred_tags.str.contains("PER")] = "B-PER" pred_tags[pred_tags.str.contains("LOC")] = "B-LOC" pred_tags = pred_tags.tolist() # + tags=[] cl_report = classification_report(valid_tags, pred_tags, labels = ['B-LOC', 'B-PER']) conf_mat = confusion_matrix(valid_tags, pred_tags) f1score = f1_score(valid_tags, pred_tags, labels = ['B-LOC', 'B-PER'], average = "macro") # Report metrics print(f"F1-Score: {f1score}\n") print(f"Classification Report:\n {cl_report}") print(f"Confusion Matrix:\n {conf_mat}") # - test_sentence = "Indkøb af Melon 1 kg, 2 slags Karen Volf 200g, 2 poser Chili og Timian fra Santa Maria, Arla 1L. Vores referencer: <NAME>, <NAME>, <NAME> og <NAME>. Kontaktperson er <NAME>. Levering til Timianvej 12" test_sentence2 = "<NAME> har bestilt 10 kasser Lego til levering på Hc. Andersensvej 13 A første Sal tv og han har købt det til sin datter chili som går med Åben ble fra Abena og hun elsker i øvrigt elsker at spise chili, så derfor har de 10 kg chili derhjemme, men hvad chili ikke ved er at hendes far har købt en hvid 3 hjulet cykel fra Toys R Us ved Toppen Nr. 3 Aarhus- helt specifikt er det en 3 hjulet nr 30 fra kataloget og han har husket Toppen beskyttelseshjelm, str 35 og Far Timian kan godt lide chiLi men han elsker at spise en Tivoli stang, derfor bestilte han 20 stk Toms tivoli stang så han kan dele med sin ven Sebsatian i stedet for at få Melon i Grøn Box." # + tags=[] tokenized_sentence = tokenizer.encode(test_sentence) print(tokenized_sentence) # - input_ids = torch.tensor([tokenized_sentence]).cuda() # + with torch.no_grad(): logits = model(input_ids) logits = F.softmax(logits[0], dim = 2) logits_label = torch.argmax(logits, dim = 2) logits_label = logits_label.detach().cpu().numpy().tolist()[0] logits_confidence = [values[label].item() for values, label in zip(logits[0], logits_label)] len(logits_confidence) # - # join bpe split tokens tokens = tokenizer.convert_ids_to_tokens(input_ids.to('cpu').numpy()[0]) new_tokens, new_labels, new_probs = [], [], [] for token, label_idx, probs in zip(tokens, logits_label, logits_confidence): if token.startswith("##"): new_tokens[-1] = new_tokens[-1] + token[2:] else: new_labels.append(tag_values[label_idx]) new_tokens.append(token) new_probs.append(probs) # + tags=[] for token, label, prob in zip(new_tokens, new_labels, new_probs): print("{}\t{}\t{}".format(label, token, prob)) # - dict_predictions = [{"Word":token,"Label":label,"Confidence":prob} for token, label, prob in zip(new_tokens, new_labels, new_probs)] dict_predictions
notebooks/fine-tuning/daNLP/NER/daNLP_uncased_NER_multi_BERT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import config from dataloader.loader import Loader from preprocessing.utils import Preprocess, remove_empty_docs from dataloader.embeddings import GloVe from model.cnn_document_model import DocumentModel, TrainingParameters from keras.callbacks import ModelCheckpoint, EarlyStopping import numpy as np from keras.utils import to_categorical import keras.backend as K from sklearn.manifold import TSNE # - # ## Load Data Sets for 20 News Group # + dataset = Loader.load_20newsgroup_data(subset='train') corpus, labels = dataset.data, dataset.target corpus, labels = remove_empty_docs(corpus, labels) test_dataset = Loader.load_20newsgroup_data(subset='test') test_corpus, test_labels = test_dataset.data, test_dataset.target test_corpus, test_labels = remove_empty_docs(test_corpus, test_labels) # - # ## Mapping 20 Groups to 6 High level Categories # + six_groups = { 'comp.graphics':0,'comp.os.ms-windows.misc':0,'comp.sys.ibm.pc.hardware':0, 'comp.sys.mac.hardware':0, 'comp.windows.x':0, 'rec.autos':1, 'rec.motorcycles':1, 'rec.sport.baseball':1, 'rec.sport.hockey':1, 'sci.crypt':2, 'sci.electronics':2,'sci.med':2, 'sci.space':2, 'misc.forsale':3, 'talk.politics.misc':4, 'talk.politics.guns':4, 'talk.politics.mideast':4, 'talk.religion.misc':5, 'alt.atheism':5, 'soc.religion.christian':5 } # - map_20_2_6 = [six_groups[dataset.target_names[i]] for i in range(20)] labels = [six_groups[dataset.target_names[i]] for i in labels] test_labels = [six_groups[dataset.target_names[i]] for i in test_labels] # ## Pre-process Text to convert it to word index sequences Preprocess.MIN_WD_COUNT=5 preprocessor = Preprocess(corpus=corpus) corpus_to_seq = preprocessor.fit() test_corpus_to_seq = preprocessor.transform(test_corpus) # ## Initialize Embeddings glove=GloVe(50) initial_embeddings = glove.get_embedding(preprocessor.word_index) # ## Build Model newsgrp_model = DocumentModel(vocab_size=preprocessor.get_vocab_size(), sent_k_maxpool = 5, sent_filters = 20, word_kernel_size = 5, word_index = preprocessor.word_index, num_sentences=Preprocess.NUM_SENTENCES, embedding_weights=initial_embeddings, conv_activation = 'relu', train_embedding = True, learn_word_conv = True, learn_sent_conv = True, sent_dropout = 0.4, hidden_dims=64, input_dropout=0.2, hidden_gaussian_noise_sd=0.5, final_layer_kernel_regularizer=0.1, num_hidden_layers=2, num_units_final_layer=6) # ## Save model parameters # + train_params = TrainingParameters('6_newsgrp_largeclass', model_file_path = config.MODEL_DIR+ '/20newsgroup/model_6_01.hdf5', model_hyper_parameters = config.MODEL_DIR+ '/20newsgroup/model_6_01.json', model_train_parameters = config.MODEL_DIR+ '/20newsgroup/model_6_01_meta.json', num_epochs=20, batch_size = 128, validation_split=.10, learning_rate=0.01) train_params.save() newsgrp_model._save_model(train_params.model_hyper_parameters) # - # ## Compile and run model # + newsgrp_model._model.compile(loss="categorical_crossentropy", optimizer=train_params.optimizer, metrics=["accuracy"]) checkpointer = ModelCheckpoint(filepath=train_params.model_file_path, verbose=1, save_best_only=True, save_weights_only=True) early_stop = EarlyStopping(patience=2) # + x_train = np.array(corpus_to_seq) y_train = to_categorical(np.array(labels)) x_test = np.array(test_corpus_to_seq) y_test = to_categorical(np.array(test_labels)) # + #Set LR K.set_value(newsgrp_model.get_classification_model().optimizer.lr, train_params.learning_rate) newsgrp_model.get_classification_model().fit(x_train, y_train, batch_size=train_params.batch_size, epochs=train_params.num_epochs, verbose=2, validation_split=train_params.validation_split, callbacks=[checkpointer,early_stop]) newsgrp_model.get_classification_model().evaluate( x_test, y_test, verbose=2) preds = newsgrp_model.get_classification_model().predict(x_test) preds_test = np.argmax(preds, axis=1) # - # ## Evaluate Model Accuracy from sklearn.metrics import classification_report,accuracy_score,confusion_matrix print(classification_report(test_labels, preds_test)) print(confusion_matrix(test_labels, preds_test)) print(accuracy_score(test_labels, preds_test)) # ## Visualization: Document Embeddings with tsne - what the model learned from utils import scatter_plot doc_embeddings = newsgrp_model.get_document_model().predict(x_test) print(doc_embeddings.shape) doc_proj = TSNE(n_components=2, random_state=42, ).fit_transform(doc_embeddings) f, ax, sc, txts = scatter_plot(doc_proj, np.array(test_labels)) f.savefig('nws_grp_embd.png')
notebooks/Ch07 - Text Document Categorization/20_newsgrp_cnn_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import cv2 import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torchvision import datasets, transforms, models # - model = models.vgg19(pretrained=True) model for parameters in model.parameters(): parameters.requires_grad = False model.features[32] = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) model.features[34] = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1) model.avgpool = nn.AdaptiveAvgPool2d(1) #生成 (10, 1, 1)的输出 10是n_channels model.classifier = nn.Linear(2, 2) # + #Create datasets and dataloaders input_size=224 batch_size = 16 train_transforms = transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.RandomRotation(degrees=20), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) valid_transforms = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) dataset_train = datasets.ImageFolder(root='hymenoptera_data/train/', transform=train_transforms) dataset_valid = datasets.ImageFolder(root='hymenoptera_data/val/', transform=valid_transforms) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=batch_size, shuffle=True) # - def train_batch_loss(model, loss_function, x, y, opt=None): loss = loss_function(model(x), y) if opt is not None: loss.backward() opt.step() opt.zero_grad() return np.array(loss.item()), np.array(len(x)) def valid_batch_loss(model, loss_function, x, y): output = model(x) loss = loss_function(output, y) return np.array(loss.item()), np.array(len(x)) # + criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr =0.0001) epochs = 200 device = torch.device("cuda") model.to(device) for e in range(epochs): model.train() loss, number_of_data = zip(*[train_batch_loss(model, criterion, x.to(device), y.to(device), optimizer) for x, y in train_loader]) train_loss = np.sum(np.multiply(loss,number_of_data)) / np.sum(number_of_data) model.eval() with torch.no_grad(): loss, number_of_data = zip(*[valid_batch_loss(model, criterion, x.to(device), y.to(device)) for x, y in valid_loader]) validation_loss = np.sum(np.multiply(loss, number_of_data)) / np.sum(number_of_data) print(f"[Epoch {e+1}/{epochs}]" f"Train loss:{train_loss:.6f}\t" f"Validation loss:{validation_loss:.6f}\t") # - save_path = r"D:\PYHTON\python3.7\DeepLearningProgram\Class_activation_mapping\VGG_fine_tuned_on_hymenoptera.pt" torch.save(model.state_dict(), save_path) save_path = r"D:\PYHTON\python3.7\DeepLearningProgram\Class_activation_mapping\VGG_fine_tuned_on_hymenoptera.pt" state_dict = torch.load(save_path) model = models.vgg19() model.features[32] = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) model.features[34] = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1) model.avgpool = nn.AdaptiveAvgPool2d(1) #生成 (10, 1, 1)的输出 10是n_channels model.classifier = nn.Linear(2, 2) model.load_state_dict(state_dict) model.eval() model.cuda() class get_feature_map: features = None def __init__(self, model): self.hook = model.register_forward_hook(self.hook_fn) def hook_fn(self, module, input, output): self.features = output def remove(self): self.hook.remove() # + #change batch size to 1 to grab one image at a time valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=1, shuffle=True, num_workers=1) im, lab = next(iter(valid_loader)) im = im.type(torch.cuda.FloatTensor).to(device) print(im.dtype) print(im.shape) model.to(device) feature_map = get_feature_map(model.features[-3]) logits = model(im) feature_map.remove() feature_map = feature_map.features.cpu().detach().numpy().squeeze() print(feature_map.shape) weights = list(model.parameters())[-2].data.cpu().detach().numpy() print(weights.shape) # (2, 2) (class number, channel number) cam = weights @ feature_map.reshape(2, -1) # (2, 14X14) print(cam.shape) cam = cam.reshape(2, 14, 14).transpose(1, 2, 0) cam = cv2.resize(cam, (224, 224)) print(cam.shape) im = im.cpu().numpy() im = np.squeeze(im).transpose(1, 2, 0) plt.figure() plt.imshow(im) plt.figure() plt.imshow(im) plt.imshow(cam[:,:,0], alpha=0.4, cmap="jet") plt.figure() plt.imshow(im) plt.imshow(cam[:,:,1], alpha=0.4, cmap="jet") # -
VGG-19_on_hymenoptera_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ML Application # # In this last homework, I would like you to practice the techniques you've learnt from earlier lectures and apply them to solve a real world application. # # Your first task is to pick a project topic. Pick an application that interests you, and explore how best to apply learning algorithms to solve it. I have some suggested projects below from Kaggle. However, if you've already had something in mind or if you're already working on a research or industry project that machine learning might apply to, then you may already have a great project idea. # # Once you figure out the topic/application. You need to formulate it into one where you can apply ML. You will ask yourself questions like the following: # - What machine learning techniques are you planning to apply or improve upon? # - Is it supervised learning or unsupervised learning? Is it a regression problem or a classification one? # - What experiments are you planning to run? # - How do you quantify success? # - How do you avoid overfitting and underfitting? # - How do you handle categorical variables? # - ... # # Here are some popular Kaggle challenges. You are welcomed to explore other datasets and projects on Kaggle that might interest you and use those as your final project. # - [Credit Card Fraud Detection](https://www.kaggle.com/mlg-ulb/creditcardfraud) # - [Product Recommendation](https://www.kaggle.com/c/santander-product-recommendation) # - [Predict movie rating and success](https://www.kaggle.com/tmdb/tmdb-movie-metadata/home) # - [Predict used var value](https://www.kaggle.com/ddmngml/trying-to-predict-used-car-value) # # To evaluate one's work, we will grade based on the following criterions: # - The technical quality of the work. (i.e., Does the technical material make sense? Are the things tried reasonable? Are the ML algorithms used sound? Do the authors convey novel insight about the problem and/or algorithms?) # - Significance. (Did the authors choose an interesting or a “real" problem to work on, or only a small “toy" problem? Is this work likely to be useful and/or have impact in real world?) # - The novelty of the work. (Is this project applying a common technique to a well-studied problem, or is the problem or method relatively unexplored? Clearly identify the contribution of the work) # - Clarity in writing. (Do the authors articulate and clearly identify the findings from the data?) # # + # TODO # Predict used car value # - import pandas as pd import numpy as np import seaborn as sns; sns.set() from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import matplotlib.pyplot as plt # %matplotlib inline from sklearn.model_selection import train_test_split np.random.seed(0) cars = pd.read_csv('../data/cars.csv') # Analyze Data cars.head() print(f'Number of null values is {cars.isnull().sum().sum()}') # Find Distrubution of Numerical Columns cars.describe() cars.describe(include = 'object') # Split data y = cars['Price'] X = cars[['Mileage','Cylinder','Doors','Cruise','Sound','Leather', 'Price']] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # + # Correlation between features f, ax = plt.subplots(figsize=(12, 9)) corr_matrix = X_train.corr().abs() sns.heatmap(corr_matrix, vmax=1, square=True) plt.xticks(rotation=90) plt.yticks(rotation=0) plt.show(); # - # Split data y = cars['Price'] X = cars[['Mileage','Cylinder','Doors','Cruise','Sound','Leather']] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # Linear Regression Model from sklearn.linear_model import LinearRegression LR = LinearRegression() LR.fit(X_train, y_train) LR.score(X_train,y_train) # + from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error y_pred = LR.predict(X_test) print("model Root Mean Squared Error: {}".format(round(np.sqrt(mean_squared_error(y_test, y_pred))))) print("model Mean Absolute Error: {}".format(round(mean_absolute_error(y_test, y_pred)))) # + # Decision Tree Regressor from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV parameters = { 'max_depth': [2, 4, 6, 8], 'min_samples_split': [0.001, 0.01, 0.05, 0.1, 0.2], } dt = DecisionTreeRegressor() grid = GridSearchCV(dt, parameters) grid.fit(X_train, y_train) # summarize the results of the grid search print("The best score is {}".format(grid.best_score_)) print("The best hyper parameter setting is {}".format(grid.best_params_)) # + from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error y_pred = grid.predict(X_test) print("model Root Mean Squared Error: {}".format(round(np.sqrt(mean_squared_error(y_test, y_pred))))) print("model Mean Absolute Error: {}".format(round(mean_absolute_error(y_test, y_pred)))) # + # Random Forest Regressor from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV parameters = { 'max_depth': [2, 4, 6, 8], 'min_samples_split': [0.001, 0.01, 0.05, 0.1, 0.2], } rf = RandomForestRegressor() grid = GridSearchCV(rf, parameters) grid.fit(X_train, y_train) # summarize the results of the grid search print("The best score is {}".format(grid.best_score_)) print("The best hyper parameter setting is {}".format(grid.best_params_)) # + from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error y_pred = grid.predict(X_test) print("model Root Mean Squared Error: {}".format(round(np.sqrt(mean_squared_error(y_test, y_pred))))) print("model Mean Absolute Error: {}".format(round(mean_absolute_error(y_test, y_pred)))) # - # Predictions x = {'Mileage':10000, 'Cylinder':4, 'Doors':2, 'Cruise':1, 'Sound':1, 'Leather':1} new_car = pd.DataFrame.from_dict(data=x,orient='index').T y_pred = grid.predict(new_car) y_pred
module10/kevin/HW10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Download GNPS data <br> # Replace Job ID below with your GNPS job ID: # ! curl -d "" 'https://gnps.ucsd.edu/ProteoSAFe/DownloadResult?task=b817262cb6114e7295fee4f73b22a3ad&view=download_cytoscape_data' -o GNPS_output_graphML.zip # ! unzip -d GNPS_output_graphML/ GNPS_output_graphML.zip # specify your NAP job ID: nap_id = 'c4bb6b8be9e14bdebe87c6ef3abe11f6' # load libraries from pyMolNetEnhancer import * import pandas as pd import os import csv import json from networkx import * path = '/Applications/MarvinSuite/bin/' #path to MarvinSuite's molconvert used to convert SMILES to InchiKeys os.environ['PATH'] += ':'+path if 'clusterinfo_summary' in os.listdir('GNPS_output_graphML/') and 'DB_result' in os.listdir('GNPS_output_graphML/'): netfile = 'GNPS_output_graphML/clusterinfo_summary/' + str(os.listdir('GNPS_output_graphML/clusterinfo_summary/')[0]) gnpslibfile = 'GNPS_output_graphML/DB_result/'+ str(os.listdir('GNPS_output_graphML/DB_result/')[0]) elif 'clusterinfosummarygroup_attributes_withIDs_withcomponentID' in os.listdir('GNPS_output_graphML/'): netfile = 'GNPS_output_graphML/clusterinfosummarygroup_attributes_withIDs_withcomponentID/' + str(os.listdir('GNPS_output_graphML/clusterinfosummarygroup_attributes_withIDs_withcomponentID/')[0]) gnpslibfile = 'GNPS_output_graphML/result_specnets_DB/'+ str(os.listdir('GNPS_output_graphML/result_specnets_DB/')[0]) else: netfile = 'GNPS_output_graphML/clusterinfosummary/' + str(os.listdir('GNPS_output_graphML/clusterinfosummary/')[0]) gnpslibfile = 'GNPS_output_graphML/result_specnets_DB/'+ str(os.listdir('GNPS_output_graphML/result_specnets_DB/')[0]) nap = pd.read_csv("http://proteomics2.ucsd.edu/ProteoSAFe/DownloadResultFile?task="+nap_id+"&block=main&file=final_out/node_attributes_table.tsv", sep = "\t") gnpslib = pd.read_csv(gnpslibfile, sep='\t') # add all chemical structural information output as dataframe items in list matches = [gnpslib, nap] out = unique_smiles(matches) out['df'].to_csv("SMILES.csv", quoting=csv.QUOTE_NONE, escapechar='&') # convert SMILES to InchiKeys # ! molconvert inchikey:SAbs SMILES.csv{csv:strucSMILES} -o InchiKeys.txt # + ikeys = pd.read_csv("InchiKeys.txt", sep='\t',header = None) out['df']["inchikey"] = ikeys inchi_dic = make_inchidic(out) ikeys.columns = ["InChIKey"] ikeys.to_csv("InchiKeys.txt", quoting=csv.QUOTE_NONE, escapechar='&') # - # retrieve ClassyFire classifcations get_classifications("InchiKeys.txt") # + with open("all_json.json") as tweetfile: jsondic = json.loads(tweetfile.read()) df = make_classy_table(jsondic) df = df.rename(columns = {'class':'CF_class','smiles':'SMILES'}) net = pd.read_csv(netfile, sep='\t') final = molfam_classes(net,df,inchi_dic) # - final.head() # write ClassyFire results per molecular family to file. To visualize chemical classes within the mass spectral molecular network, this file can be imported as table into Cytoscape. final.to_csv("ClassyFireResults_Network.txt", sep = '\t', index = False) # how many InChIKeys could not be classified? (percentage of total submitted InChIKeys) len(set(list(ikeys.InChIKey)) - set(list(df.inchikey)))/len(set(list(ikeys.InChIKey))) # which InChIKeys could not be classified? set(list(ikeys.InChIKey)) - set(list(df.inchikey)) # create graphML file if any("FEATURE" in s for s in os.listdir('GNPS_output_graphML/')): graphMLfile = 'GNPS_output_graphML/' + [x for x in os.listdir('GNPS_output_graphML/') if 'FEATURE' in x][0] graphML = read_graphml(graphMLfile) graphML_classy = make_classyfire_graphml(graphML,final) nx.write_graphml(graphML_classy, "ClassyFireResults_Network.graphml", infer_numeric_types = True) elif any("METABOLOMICS" in s for s in os.listdir('GNPS_output_graphML/')): graphMLfile = 'GNPS_output_graphML/' + [x for x in os.listdir('GNPS_output_graphML/') if 'METABOLOMICS' in x][0] graphML = read_graphml(graphMLfile) graphML_classy = make_classyfire_graphml(graphML,final) nx.write_graphml(graphML_classy, "ClassyFireResults_Network.graphml", infer_numeric_types = True) else: print('There is no graphML file for this GNPS molecular network job')
Example_notebooks/ChemicalClasses_2_Network_FeatureBased.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # (As of September 3rd, this is just for organization and not at all complete or accurate.) # # Week 0 # Thursday: # * Basic navigation in the Jupyter notebook. Code cells vs markdown cells. # * Importing external libraries. # * Some basic data structures: range, lists, tuples, sets, numpy arrays. What are some similarities and differences? # * Two basic NumPy commands: arange and linspace. # * Timing in Jupyter. # # Friday: # * Practice reading error messages and documentation. # * for loops and if statements. Importance of indentation. # * Iterators, Iterable via error messages # * NumPy arrays # * Slicing and indexing # # Week 1 # Topics: # * more data types: int, str, float, Boolean # * Introduce iterable, hashable, mutable, immutable via error messages and documentation. # * Documentation (the difference between extend and append, sorted, range, np.zeros, np.empty, difference between keyword arguments and positional arguments.) # * while loops # * Dictionaries # * Counters, dictionaries # * list comprehension # * Writing a function: square root, prime numbers, modular arithmetic # * Introduction to Jupyter Notebooks, lists and things similar to lists. # * Introduction to Jupyter Notebooks/Anaconda/Spyder/Terminal # * Comparison to Matlab and Mathematica # * for loops/while loops/if statements # * Lists # * Prime numbers # * Reading documentation: sorted, range, np.zeros, np.empty, difference between keyword arguments and positional arguments. # * Every built-in function in Python # * Loading external libraries # * Introduction to NumPy # * Dictionaries # * Counting in Python # * Probability simulation # * Image processing with Pillow and NumPy # <p style="font-size:20px; color:blue; font-weight:bold">Question 3:</p> # # Complete the following code so that the function `my_positive_root(x)` returns a value of y such that $|y - \sqrt[3]{x}| \leq .001.$ You are not allowed to import any libraries. You are only allowed to use integer exponents, like `a**3`, not `a**(1/3)`. You should assume $x > 0$. # # def my_positive_root(x): # a = 0 # while ???: # a = a + .001 # return ??? # # <p style="font-size:20px; color:blue; font-weight:bold">Question 4:</p> # # Write a new function, `my_cube_root(x)`, that also works for negative values of x. Use an `if` statement and your function from up above. You should literally be typing `my_positive_root`; do not copy and paste your code. def replace_elts(A): m,n = A.shape for i in range(m): for j in range(n): if A[i,j] < 10: A[i,j] = -2 return A # # Week 2 # Topics: # * Introduction to pandas # * Strings # # Week 3 - Plotting # Topics: # * Matplotlib # * Seaborn # * Altair # * Plotting from a pandas DataFrame # # Week 4 # Topics: # * Streamlit # # Week 5 # Topics: # * scikit-learn # # Week 6 # Topics: # * Regular expressions # # Week 7 # (No Thursday class for Veteran's Day.) # # Topics: # * Keras # * Handwritten digit recognition # # Week 8 # # Week 9 # (No Thursday or Friday class for Thanksgiving.) # # Week 10 # # Finals Week
Drafts/Schedule.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="LmTRIfgW42c7" # # A Primer on Artificial Intelligence in Plant Digital Phenomics: Embarking on the Data to Insights Journey (*Exercises*) # + [markdown] id="XrceqdN94lkt" # This notebook is a supplement to the paper **A Primer on Artificial Intelligence in Plant Digital Phenomics: Embarking on the Data to Insights Journey** (submitted to *Trends in Plant Science, 2021*) by <NAME>, <NAME>, <NAME>, <NAME>. # Harfouche, <NAME>, and <NAME>. # # Read the accompanying paper [here](https://doi.org). # + [markdown] id="cOQ9Xxkf40AM" # Before attempting to solve the exercises found in this notebook, visit our Github repository and try to open and run the notebook provided by the tutorial. # # Here, the solution for each exercise can be found in a hidden code cell at its end. # # Interested users should try to solve the exercises with the help of the notebook provided by the tutorial before looking at the solution. # + [markdown] id="6vtVIEPE48DH" # **It is important to note that Colab deletes all unsaved data once the instance is recycled. Therefore, remember to download your results once you run the code.** # + [markdown] id="TaYNvHN37-GO" # #Exercise I: Dataset Preparation # + [markdown] id="igjrPSO68HY9" # The next code block defines a function that downloads files from Google Drive based on the file ID. # # Use this function to download the cassava dataset hosted on Google Drive at https://drive.google.com/file/d/13jwC684Sg1wWLhF7SjPIlsfJNuKqJ_IQ/view?usp=sharing. # + id="RSUhmoKj8BDc" import requests def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params = { 'id' : id }, stream = True) token = get_confirm_token(response) if token: params = { 'id' : id, 'confirm' : token } response = session.get(URL, params = params, stream = True) save_response_content(response, destination) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) # + id="kddEfyi18rw5" ### WRITE YOUR CODE HERE ### # + [markdown] id="U6iAHx5M8bKe" # #Solution # + id="vhQLvy8a4Rli" file_id = '13jwC684Sg1wWLhF7SjPIlsfJNuKqJ_IQ' destination = '/content/dataset.zip' download_file_from_google_drive(file_id, destination) # + [markdown] id="NzryC1Fj85KG" # # Exercise II: Data Extraction # + [markdown] id="jUtgOUQJ9DGO" # Complete the code using the unzip command to extract the dataset to /content/dataset/ # # + id="G8rhEWqD9K8h" # !mkdir /content/dataset # !apt-get install unzip ### Write YOUR CODE HERE ### # + [markdown] id="-PDwslA_9OHA" # #Solution # + id="YS2l7bHa9Suq" #unzip dataset # !mkdir /content/dataset # !apt-get install unzip # !unzip /content/dataset.zip -d /content/dataset/ # !rm -R /content/dataset.zip #save some space # + [markdown] id="hj5cb0LJ9bbp" # #Exercise III: Descriptive Data Analysis # + [markdown] id="ocsaesac94Bv" # The next code block will count all images in every class in the training dataset and bar plot will be used to display the results. This helps us discover whether or not our training dataset is balanced over all classes. # + id="GA8vgEQG93lX" import numpy as np import pandas as pd import os import shutil import cv2 import matplotlib.pyplot as plt import seaborn as sns train_dir = '/content/dataset/cdsv5/train/' train_classes = [path for path in os.listdir(train_dir)] train_imgs = dict([(ID, os.listdir(os.path.join(train_dir, ID))) for ID in train_classes]) train_classes_count = [] for trainClass in train_classes: train_classes_count.append(len(train_imgs[trainClass])) plt.figure(figsize=(15, 10)) g = sns.barplot(x=train_classes, y=train_classes_count) g.set_xticklabels(labels=train_classes, rotation=30, ha='right') # + [markdown] id="kljflJAG-Iir" # Use the code above to check the distribution of the augmented training dataset located under /content/dataset/cdsv5/train_aug # + id="3U1AAtdh-QJs" ### WRITE YOUR CODE HERE ### # + [markdown] id="LuMFlpnk-PI9" # #Solution # + id="fHVnQZ7W-YJG" train_dir = '/content/dataset/cdsv5/train_aug/' train_classes = [path for path in os.listdir(train_dir)] train_imgs = dict([(ID, os.listdir(os.path.join(train_dir, ID))) for ID in train_classes]) train_classes_count = [] for trainClass in train_classes: train_classes_count.append(len(train_imgs[trainClass])) plt.figure(figsize=(15, 10)) g = sns.barplot(x=train_classes, y=train_classes_count) g.set_xticklabels(labels=train_classes, rotation=30, ha='right') # + [markdown] id="DSeQtm2u-oF_" # #Exercise IV: Cloning a GitHub Repository # + [markdown] id="HoKnoE4T-s3y" # Our implementation of the 'this looks like that' explainable by design AI algorithm is hosted on our GitHub repository at https://github.com/HarfoucheLab/A-Primer-on-AI-in-Plant-Digital-Phenomics. # # Clone this repository under /content to obtain the code so that you can use it later on to train a model. # + id="QgtodbAa-9Wc" ### YOUR CODE HERE ### # + [markdown] id="bPCA-oXl-9HW" # #Solution # + id="pcORqc_I_AMR" # !git clone https://github.com/HarfoucheLab/A-Primer-on-AI-in-Plant-Digital-Phenomics.git # + [markdown] id="7xxhp583_E-U" # #Exercise V: Configuring 'This Looks Like That' # + [markdown] id="84eoXwWi_Iwa" # As in any AI algorithm, 'this looks like that' requires some hyperparameters to be set before running it. To satisfy these requirements, we save all hyperparameters in a file called settings.py located under /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/settings.py. # # The settings are defined in a string variable in the next code block. # # Complete the code that saves these settings to settings.py. # + id="i4_-GbHx_hFi" settings = """base_architecture = 'densenet161' img_size = 224 prototype_shape = (2000, 128, 1, 1) num_classes = 5 prototype_activation_function = 'log' add_on_layers_type = 'regular' experiment_run = '001' data_path = '/content/dataset/cdsv5/' train_dir = data_path + 'train_aug/' test_dir = data_path + 'val/' train_push_dir = data_path + 'train/' train_batch_size = 40 #80 test_batch_size = 40 train_push_batch_size = 64 num_workers=3 min_saving_accuracy=0.05 joint_optimizer_lrs = {'features': 1e-4, 'add_on_layers': 3e-3, 'prototype_vectors': 3e-3} joint_lr_step_size = 5 warm_optimizer_lrs = {'add_on_layers': 3e-3, 'prototype_vectors': 3e-3} last_layer_optimizer_lr = 1e-4 coefs = { 'crs_ent': 1, 'clst': 0.8, 'sep': -0.08, 'l1': 1e-4, } num_train_epochs = 1000 num_warm_epochs = 5 push_start = 10 push_epochs = [i for i in range(num_train_epochs) if i % 10 == 0] """ # + id="y1zkkykg_roH" text_file = open("/content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/settings.py", "w") ### YOUR CODE HERE ### text_file.close() # + [markdown] id="WZ_EcY37_v39" # #Solution # + id="VJe5XbCQ_w1r" text_file = open("/content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/settings.py", "w") n = text_file.write(settings) text_file.close() # + [markdown] id="U3DI7Yf8Axzu" # #Exercise VI: Training 'This Looks Like That' # + [markdown] id="VOcjZc1pA1xi" # Now with the settings all set, run the code located in mainDistributed.py under /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/ to start the training process. # # Hint: use 1 node, 1 gpu, and set nr to 0. # + id="7Imu4CVvA0ck" # %cd /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/ ### WRITE YOUR CODE HERE ### # + [markdown] id="mZ35u5obBHMU" # #Solution # + id="so4ISLMzA-PW" # %cd /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/ # !python3 mainDistributed.py --nodes 1 --gpus 1 --nr 0 # + [markdown] id="9GjNMBqKBOBi" # #Exercise VII: Using a Pretrained Model # + [markdown] id="9czbDy6oBQWW" # The next codeblock will download a pretrained model which will be extracted to /content/pretrained/ and a testing dataset which will be extracted to /content/dataset/cdsv5/test/. # + id="8dOFUKILBP7p" # %cd /content/ file_id = '12ugCaMfPdylDPPmfqzoOMWtB55k0L9tL' destination = '/content/pretrained.zip' download_file_from_google_drive(file_id, destination) # mkdir /content/pretrained # !unzip /content/pretrained.zip -d /content/pretrained/ # !rm -R /content/pretrained.zip file_id = '1Ruy2At0G3oLlA1Gb9gz1-aMpcfJ6653B' destination = '/content/dataset_test.zip' download_file_from_google_drive(file_id, destination) # !unzip /content/dataset_test.zip -d /content/dataset/cdsv5/ # !rm -R /content/dataset_test.zip # + [markdown] id="78NMR3DKBi3l" # Complete the code below to use the downloaded pretrained model and test dataset to test the model performance and generate the confusion matrix. # # Hint: The python file for the the testing and confusion matrix generation is located under /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/RunTestAndConfusionMatrix.py # + id="7ea2DAwwBsxU" # %cd /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/ ### YOUR CODE HERE ### # + [markdown] id="HpLZKyKgB49I" # #Solution # + id="34pGjXN4B6Vj" # %cd /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/ # !python3 RunTestAndConfusionMatrix.py # %cd /content/ # + [markdown] id="uTTN-uU9B811" # #Exercise VIII: Generating the Confusion Matrix # + [markdown] id="iqRLO2rxCDkK" # Testing the pretrained model should have generated a PNG file containing the confusion matrix located under /content/confusion_matrix.png. # # Complete the below code to display the confusion matrix. # + id="3KgxML6wB_Sa" import matplotlib.pyplot as plt import matplotlib.image as mpimg img = mpimg.imread('/content/confusion_matrix.png') ### YOUR CODE HERE ### plt.show() # + [markdown] id="ORyGBYarCRy5" # #Solution # + id="rvdJj1uZCS5e" import matplotlib.pyplot as plt import matplotlib.image as mpimg img = mpimg.imread('/content/confusion_matrix.png') plt.figure(figsize=(10,10)) plt.imshow(img) plt.show() # + [markdown] id="_nCADrPuCYDP" # # Exercise IX: Explaning Predictions # + [markdown] id="GNpNTaCrCbZ6" # Explanations to predictions are generated by running a local analysis on a specific image. To do so, the local_analysis.py python file located under /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/ is used. # # + id="UbThzOEaCq3S" # !python3 /content/A-Primer-on-AI-in-Plant-Digital-Phenomics/py/local_analysis.py -modeldir /content/pretrained/ -model 240_12push0.8884.pth -imgdir /content/dataset/cdsv5/test/1/ -img 931787054.jpg -imgclass 1 # + [markdown] id="HsZqri0FCuVb" # Running the above codeblock should have generated many images under /content/dataset/cdsv5/test/1/pretrained/240_12push0.8884.pth/top-1_class_prototypes. # # Complete the python code below to display the best explanation generated for the prediction of the test image located under /content/dataset/cdsv5/test/1/931787054.jpg # + id="lUgObGgsDFeT" img = mpimg.imread('/content/dataset/cdsv5/test/1/pretrained/240_12push0.8884.pth/top-1_class_prototypes/prototype_activation_map_by_top-1_prototype.png') ### WRITE YOUR CODE HERE ### # + [markdown] id="TyBT5gRjDMxw" # #Solution # + id="Zj5S99mkDMS1" img = mpimg.imread('/content/dataset/cdsv5/test/1/pretrained/240_12push0.8884.pth/top-1_class_prototypes/prototype_activation_map_by_top-1_prototype.png') plt.figure(figsize=(10,10)) plt.imshow(img) plt.show() # + [markdown] id="l3Gi7EaSDMEQ" # #Exercise X: Displaying the Prototypes # + [markdown] id="zzqX3uK8DP-I" # Complete the next codeblock to display the best two prototypes used for the classification on the above explained image. # + id="INsIqmKFDZrQ" img = mpimg.imread('/content/dataset/cdsv5/test/1/pretrained/240_12push0.8884.pth/top-1_class_prototypes/top-1_activated_prototype.png') img2 = mpimg.imread('/content/dataset/cdsv5/test/1/pretrained/240_12push0.8884.pth/top-1_class_prototypes/top-2_activated_prototype.png') ### WRITE YOUR CODE HERE ### # + [markdown] id="a4ROngg1DjK3" # #Solution # + id="KR3arYTTDgGM" img = mpimg.imread('/content/dataset/cdsv5/test/1/pretrained/240_12push0.8884.pth/top-1_class_prototypes/top-1_activated_prototype.png') plt.figure(figsize=(3,3)) plt.imshow(img) plt.show() img = mpimg.imread('/content/dataset/cdsv5/test/1/pretrained/240_12push0.8884.pth/top-1_class_prototypes/top-2_activated_prototype.png') plt.figure(figsize=(3,3)) plt.imshow(img) plt.show() img = mpimg.imread('/content/dataset/cdsv5/test/1/pretrained/240_12push0.8884.pth/top-1_class_prototypes/top-17_activated_prototype.png') plt.figure(figsize=(3,3)) plt.imshow(img) plt.show()
Exercise_Novice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Cleaning Up the Titanic Manifest dataset # The inspiration for using this dataset came from the following Kaggle competition: https://www.kaggle.com/c/titanic # # The data in Kaggle requiers a Kaggle user and entering the competition, but it is also public avilable in http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.xls # # ## Importing Packages import numpy as np import pandas as pd # ## Downloading Dataset # + import os import subprocess import requests import tqdm.notebook as tqdm original_dataset_file = '../../static/datasets/original/titanic3.csv' dataset_file = '../../static/datasets/titanic_manifest.csv' if not os.path.isfile(original_dataset_file): response = requests.get('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.csv', stream=True) with open(original_dataset_file, 'wb') as fid: total_length = int(response.headers.get('content-length')) for chunk in tqdm(response.iter_content(chunk_size=1024), desc='Downloading', total=(total_length / 1024) + 1): if chunk: fid.write(chunk) fid.flush() # - # ## Loading the Dataset full_dataset = pd.read_csv(original_dataset_file) # ## Displaying the First 10 Rows of the Dataset print('Dataset size: {}'.format(len(full_dataset))) full_dataset.head(10) # ## Cleaning Up the Data # + dataset = full_dataset.copy() # Creat a copy of the data ## Translate the passenger Sex column into a numeric value (0 for male and 1 for female) def return_numeric_sex(sex): if sex == 'male': return 0 elif sex == 'female': return 1 else: return np.nan dataset['numeric_sex'] = dataset['sex'].apply(return_numeric_sex) ## Filter the data dataset = dataset.query( ## Throw out rows with nan values 'age >= 0 &' + 'pclass >= 0 &' + 'numeric_sex >= 0') dataset = dataset.loc[dataset['embarked'].notna()] ## Remove rows with non-integer age dataset = dataset.loc[np.modf(dataset['age'].values)[0] < 1e-6] dataset['age'] = dataset['age'].astype(np.int) print('Dataset size: {}'.format(len(dataset))) dataset.head(10) # - # ## Save the Clean Dataset dataset.to_csv(dataset_file, index=False)
content/datasets_generation/titanic_manifest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="Oerk1EtzsEqC" # + colab={"base_uri": "https://localhost:8080/", "height": 919} colab_type="code" id="ihCwnWvH-crR" outputId="406eb8d3-56e7-4f6b-ff57-7daedeb6af18" # !wget "http://www.philharmonia.co.uk/assets/audio/samples/trumpet/trumpet_A4_05_forte_normal.mp3" # !wget "http://www.philharmonia.co.uk/assets/audio/samples/flute/flute_A5_05_pianissimo_normal.mp3" # !wget "http://www.philharmonia.co.uk/assets/audio/samples/snare_drum/flute_A5_05_pianissimo_normal.mp3" # !wget "http://www.philharmonia.co.uk/assets/audio/samples/flute/flute_A6_05_pianissimo_normal.mp3" # + colab={"base_uri": "https://localhost:8080/", "height": 201} colab_type="code" id="6V-_hGH_Al6P" outputId="50bee201-a6a1-4d07-a8a8-e53bf7d708a0" import librosa import numpy as np import sklearn.decomposition import matplotlib.pyplot as plt y, sr = librosa.load("prueba_jaja.wav") from scipy import signal as sg #function to find the fundamental pitch frequency counting zeroes #From https://www.kaggle.com/asparago/simple-pitch-detector def find_fundamental(y, sr): signal = y #one should be careful in deciding if it is worth analysing the entire record or #just chunks of it, and excluding more noisy parts #signal=signal[:len(signal)/2] rate = sr #wf.getframerate() swidth = len(y) # wf.getsampwidth() #first of all we remove the horizontal offset signal = signal - np.mean(signal) #now we calculate the autocorrelation of the signal against itself but inverted in time #and we throw away negative lags corr = sg.fftconvolve(signal, signal[::-1], mode='full') corr = corr[int(len(corr)/2):] diff = np.diff(corr) n = [i for i in range(0,len(diff)) if diff[i]>0][0] peak = np.argmax(corr[n:]) + n return rate/peak def separate(y, sr): S1 = librosa.stft(y) S = np.abs(S1) print("antes") print(np.sum(S1)) print(np.sum(S)) # comps, acts = librosa.decompose.decompose(S, n_components=2, sort=True) T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=6) comps, acts = librosa.decompose.decompose(S, transformer=T, n_components=6, sort=True) print(comps.shape) print(acts.shape) # comps = comps.transpose() # comp0 = comps[1,:].reshape(1025, 1) # act0 = acts[:, 1].reshape(1, 6) # print(comp0.shape) # print(act0.shape) S_approx = comps.dot(acts) # res = librosa.amplitude_to_db(S_approx,ref=np.max) resultado = librosa.istft(S_approx) librosa.output.write_wav("test_resultado" + str(0), resultado, sr) actuscopu = acts.copy() acts[1, :] = 0 # acts[0, :] = 0 S_approx = comps.dot(acts) # res = librosa.amplitude_to_db(S_approx,ref=np.max) print("saprox") print(np.sum(S_approx)) resultado = librosa.istft(S_approx) librosa.output.write_wav("test_resultado_matando_1" + str(0), resultado, sr) # acts[1, :] = 0 actuscopu[0, :] = 0 S_approx = comps.dot(actuscopu) # res = librosa.amplitude_to_db(S_approx,ref=np.max) print("saprox") print(np.sum(S_approx)) resultado = librosa.istft(S_approx) librosa.output.write_wav("test_resultado_matando_0" + str(0), resultado, sr) # for n, algo in enumerate(comps): # n_fft = sr # D = np.abs(librosa.stft(algo[:n_fft], n_fft=n_fft, hop_length=n_fft+1)) # plt.plot(D) # plt.show() # librosa.output.write_wav("test_" + str(n), algo, sr) separate(y, sr) # plt.figure(figsize=(10,8)) # plt.subplot(3, 1, 1) # librosa.display.specshow(librosa.amplitude_to_db(S, # ref=np.max), # y_axis='log', x_axis='time') # plt.title('Input spectrogram') # plt.colorbar(format='%+2.0f dB') # plt.subplot(3, 2, 3) # librosa.display.specshow(librosa.amplitude_to_db(comps, # ref=np.max), # y_axis='log') # plt.colorbar(format='%+2.0f dB') # plt.title('Components') # plt.subplot(3, 2, 4) # librosa.display.specshow(acts, x_axis='time') # plt.ylabel('Components') # plt.title('Activations') # plt.colorbar() # plt.subplot(3, 1, 3) # S_approx = comps.dot(acts) # librosa.display.specshow(librosa.amplitude_to_db(S_approx, # ref=np.max), # y_axis='log', x_axis='time') # plt.colorbar(format='%+2.0f dB') # plt.title('Reconstructed spectrogram') # plt.tight_layout() # plt.show() print(find_fundamental(y, sr)) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="_aocq6zYA_HK" outputId="86b87d4c-739e-4448-dea3-97b67f325be6" from librosa import display import matplotlib.pyplot as plt separate(y, sr) # Tgram = librosa.feature.tempogram(y=y, sr=sr) # librosa.display.specshow(Tgram, x_axis='time', y_axis='tempo') # plt.colorbar() # plt.title('Tempogram') # plt.tight_layout() # plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 389} colab_type="code" id="NUD2wJyos-g4" outputId="a730107a-bec9-4dc0-ad80-9301ddc34ab5" from librosa import display import matplotlib.pyplot as plt import numpy as np timeArray = list(range(0, len(y))) timeArray = list(map(lambda x: x/sr, timeArray)) # dividido por sampling rate timeArray = list(map(lambda x: x * len(y), timeArray)) # multiplicado por la cantidad de samples para escalar a milisegundos tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1) f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr) h_range = [1, 2, 3, 4, 5] t_harmonics = librosa.core.interp_harmonics(tempi, f_tempo, h_range) # print(t_harmonics.shape) print(np.sum(t_harmonics[0])) print(np.sum(t_harmonics[1])) print(np.sum(t_harmonics[2])) print(np.sum(t_harmonics[3])) print(np.sum(t_harmonics[4])) residuo = np.sum(t_harmonics[0]) fundamental = np.sum(t_harmonics[1]) harmonics_sum = np.sum(t_harmonics[2]) + np.sum(t_harmonics[3]) + np.sum(t_harmonics[4]) # print(harmonics_sum) # print(fundamental) # print( (harmonics_sum * 100) / fundamental ) # print(t_harmonics[0]) plt.figure() librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr) plt.yticks(0.5 + np.arange(len(h_range)), ['{:.3g}'.format(_) for _ in h_range]) plt.ylabel('Harmonic') plt.xlabel('Tempo (BPM)') plt.tight_layout() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 407} colab_type="code" id="sbq3Uylr41o3" outputId="71fb48fd-ac8c-4d83-cfa8-050b0cc81d2f" from librosa import display import matplotlib.pyplot as plt import numpy as np timeArray = list(range(0, len(y))) timeArray = list(map(lambda x: x/sr, timeArray)) # dividido por sampling rate timeArray = list(map(lambda x: x * len(y), timeArray)) # multiplicado por la cantidad de samples para escalar a milisegundos S = np.abs(librosa.stft(y)) fft_freqs = librosa.fft_frequencies(sr=sr) h_range = [1, 2, 3, 4, 5] S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0) print(S_harm.shape) print(np.sum(t_harmonics[0])) print(np.sum(t_harmonics[1])) print(np.sum(t_harmonics[2])) print(np.sum(t_harmonics[3])) print(np.sum(t_harmonics[4])) plt.figure() for i, _sh in enumerate(S_harm, 1): plt.subplot(3, 2, i) librosa.display.specshow(librosa.amplitude_to_db(_sh, ref=S.max()), sr=sr, y_axis='log') plt.title('h={:.3g}'.format(h_range[i-1])) plt.yticks([]) plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="be8FdDzM_8bG" outputId="047754b6-3046-459c-d721-8a3150244860" # + colab={"base_uri": "https://localhost:8080/", "height": 219} colab_type="code" id="tqOTbk10Amay" outputId="6c52cb45-5ac6-456f-d0e9-ee03586648e4" pip install aubio # + colab={"base_uri": "https://localhost:8080/", "height": 193} colab_type="code" id="jksaJ3BAves2" outputId="2cfc5be0-3100-4600-e01e-a29cc7d52377"
Non-negative-matrices/Caracterizacion de Instrumentos/Notebooks/.ipynb_checkpoints/Prueba_separando_instr_con_no_negativ_matrix-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # language: python # name: python36964bitf63b11862b5b49fb80b3d6b9505d0be0 # --- import numpy as np import pandas as pd data=pd.read_csv('ex1data2.txt', header=None) data.head() x=data.iloc[:,0:1] y=data.iloc[:,2] # + xtrain=x[:25] xtest =x[25:] ytrain=y[:25] ytest=y[25:] # - from model import LinearRegression as lr model=lr.LinearRegression(xtrain, ytrain).fit() sc = model.score() sc pred=model.predict(xtest) pred[:5] theta = model.theta_value() theta
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "notes"} # # Cruise Control: Frequency Domain Methods for Controller Design # + [markdown] slideshow={"slide_type": "notes"} # ## System model # # The transfer function model for the cruise control problem is given below. Please see the [Cruise Control: System Modeling](../CruiseControl/CruiseControl-SystemModeling.ipynb) page for the derivation. # # $$ # P(s) = \frac{V(s)}{U(s)} = \frac{1}{ms+b} \qquad [ \frac{m/s}{N} ] # $$ # + [markdown] slideshow={"slide_type": "notes"} # # # System parameters # # For this example, let's assume that the parameters of the system are # # ``` # (m) vehicle mass 1000 kg # (b) damping coefficient 50 N.s/m # (r) reference speed 10 m/s # (u) nominal control force 500 N # ``` # # and the block diagram of a typical unity feedback system is shown below. # # # # ![feedback_cruise.png](figures/feedback_cruise.png) # + [markdown] slideshow={"slide_type": "notes"} # ## Performance specifications # # - Rise time < 5 sec # - Overshoot < 10 # - Steady-state error < 2 # + [markdown] slideshow={"slide_type": "notes"} # ## Bode plot and open-loop response # # The first step in solving this problem using frequency response is to determine what open-loop transfer function to use. Just like for the Root-Locus design method, we will only use a proportional controller to solve the problem. The block diagram and the open-loop transfer function are shown below. # ![openloop_cruise.png](figures/openloop_cruise.png) # # # $$ # \frac{Y(s)}{E(s)} = \frac{K_p}{m s + b} \ # $$ # # In order to use a Bode plot, the open-loop response must be stable. Let $K_p$equal to 1 for now and see how the open-loop response looks like. # + import control import matplotlib.pyplot as plt import numpy as np import seaborn as sns # Generate Larger more readable plots sns.set( rc={ "axes.labelsize": 8, "axes.titlesize": 8, "figure.figsize": (4 * 1.618, 4), "figure.dpi": 200, } ) s = control.TransferFunction.s # - def pid(Kp=0, Ki=0, Kd=0): s = control.TransferFunction.s return Kp + Ki / s + Kd * s # + slideshow={"slide_type": "notes"} m = 1000 b = 50 u = 500 Kp = 1 P_cruise = 1 / (m * s + b) C = pid(Kp) sys = u * C * P_cruise T, yout = control.step_response(sys=sys) plt.plot(T, yout) plt.xlabel("Time (s)") plt.ylabel("Amplitude") plt.title("Step Response") plt.grid("on") # + [markdown] slideshow={"slide_type": "notes"} # As you can see, the open-loop system is stable; thus, we can go ahead and generate the Bode plot. Change the above m-file by deleting the |step| command and adding in the following command. # + slideshow={"slide_type": "notes"} mag, phase, omega = control.bode(C * P_cruise, plot=True, dB=True) mag_db = 20 * np.log10(mag) fig = plt.gcf() fig.suptitle("Bode Plot") fig.subplots_adjust(hspace=0) fig.axes[0].label_outer() # Zoom to data padding = 0.1 mag_range = np.max(mag_db) - np.min(mag_db) mag_max = np.max(mag_db) + padding * mag_range mag_min = np.min(mag_db) - padding * mag_range fig.axes[0].axis([omega[0], omega[-1], mag_min, mag_max]) # + [markdown] slideshow={"slide_type": "notes"} # ## Proportional controller # # Refer to the [Introduction: Frequency Domain Methods for Controller Design](../Introduction/Introduction-ControlFrequency.ipynb) page, and let's see what system characteristics we can determine from the above Bode plot. The steady-state error can be found from the following equation: # # $$ # \mathrm{ss \ error} = \frac{1}{1+M_{\omega \rightarrow 0}} \cdot 100\% \ # $$ # # For this system, the low frequency gain is -34dB = 0.02; therefore, the steady-state error should be 98%. We can confirm this by generating a closed-loop step response as follows. # + slideshow={"slide_type": "notes"} r = 10 sys_cl = control.feedback(C * P_cruise, 1) T, yout = control.step_response(sys=sys_cl) step_info = control.step_info(sys=sys_cl) plt.plot(T, yout) # Add steady state value as dashed line. plt.plot( [T[0], T[-1]], [step_info["SteadyStateValue"], step_info["SteadyStateValue"]], linestyle="dotted", ) ymax = step_info["SteadyStateValue"] * 1.1 plt.axis([T[0], T[-1], 0, ymax]) # Label axes. plt.xlabel("Time (s)") plt.ylabel("Amplitude") plt.title("Step Response") plt.grid("on") # + [markdown] slideshow={"slide_type": "notes"} # We need to increase the low frequency gain in order to improve the steady-state error. Specifically, the error needs to be < 2%; therefore, 1/(1+ $M_{w=0}$) < 0.02 $\Rightarrow$$M_{w=0}$ > 49 = 33.8 dB. Since, the low frequency gain is -34 dB and the steady-state error limit needs us to have a low frequency gain of 33.8 dB, to reach the desired steady-state error using proportional control only, requires a $K_p$> (34 dB + 33.8 dB) = 67.8 dB = 2455. Let's look at the Bode diagram of the compensated open-loop system. # + slideshow={"slide_type": "notes"} Kp = 2500 C = Kp control.bode(C * P_cruise, plot=True, dB=True) fig = plt.gcf() fig.suptitle("Bode Plot") fig.subplots_adjust(hspace=0) fig.axes[0].label_outer() # + [markdown] slideshow={"slide_type": "notes"} # As you can see from the Bode plot above, the low frequency magnitude is now, 34 dB. Now let's simulate the step response of the closed loop system with this gain. # + slideshow={"slide_type": "notes"} sys_cl = control.feedback(C * P_cruise, 1) # Plot Step Response T, yout = control.step_response(sys=r * sys_cl) step_info = control.step_info(sys=r * sys_cl) plt.plot(T, yout) # Add steady state value as dashed line. plt.plot( [T[0], T[-1]], [step_info["SteadyStateValue"], step_info["SteadyStateValue"]], linestyle="dotted", ) ymax = step_info["SteadyStateValue"] * 1.1 plt.axis([T[0], T[-1], 0, ymax]) # Label axes. plt.xlabel("Time (s)") plt.ylabel("Amplitude") plt.title("Step Response") plt.grid("on") # + [markdown] slideshow={"slide_type": "notes"} # The steady-state error meets the requirements; however, the rise time is much shorter than is needed and is unreasonable in this case since the car can not accelerate to 10 m/s in 2 sec. Therefore, we will try using a smaller proportional gain to reduce the control action required along with a lag compensator to reduce the steady-state error. # # ## Lag compensator # # If you take a look at the "Lag or Phase-Lag Compensator using Frequency Response" section of the Lead and Lag Compensator Design page, the lag compensator adds gain at the low frequencies while keeping the bandwidth frequency at the same place. This is actually what we need: Larger low frequency gain to reduce the steady-state error and keep the same bandwidth frequency to maintain the desired rise time. The transfer function of the lag controller is: # # $$ # C_{lag}(s) = \frac{s+z_0}{s+p_0} \ # $$ # # If you read the "Lag or Phase-Lag Compensator using Root-Locus" section in Lead and Lag Compensator Design page, the pole and the zero of a lag controller need to be placed close together. Also, it states that the steady-state error will be reduced by a factor of $z_0/p_0$. For these reasons, let $z_0$ equal 0.1 and $p_0$ equal 0.02. The proportional gain, $K_p$ = 1000 was chosen by trial-and-error. # + slideshow={"slide_type": "notes"} Kp = 1000 zo = 0.1 po = 0.02 C_lag = (s + zo) / (s + po) # control.bode(Kp * C_lag * P_cruise, plot=True, dB=True) fig = plt.gcf() fig.suptitle("Bode Plot") fig.subplots_adjust(hspace=0) fig.axes[0].label_outer() # + [markdown] slideshow={"slide_type": "notes"} # Let's confirm the performance by generating a closed-loop step response. # + sys_cl = control.feedback(Kp * C_lag * P_cruise, 1) T, yout = control.step_response(sys=r * sys_cl, T=np.arange(0, 20, 0.1)) step_info = control.step_info(sys=r * sys_cl, T=np.arange(0, 200, 1)) plt.plot(T, yout) # Add steady state value as dashed line. plt.plot( [T[0], T[-1]], [step_info["SteadyStateValue"], step_info["SteadyStateValue"]], linestyle="dotted", ) ymax = step_info["SteadyStateValue"] * 1.1 plt.axis([T[0], T[-1], 0, ymax]) # Label axes. plt.xlabel("Time (s)") plt.ylabel("Amplitude") plt.title("Step Response") plt.grid("on")
CruiseControl/CruiseControl_ControlFrequency.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Car price prediction model development # + from sklearn.linear_model import LinearRegression from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.pipeline import Pipeline import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns car_dataset = pd.read_csv('Automobile_data.csv', na_values='?') # - # <div class="alert alert-success alertdanger" style="margin-top: 20px"> # <h3> Functions for preprocessing, already explored in the EDA notebook in this repository. </h3> # # </div> # # + def replace_by_mean(dataframe, column): return dataframe[column].replace(np.nan, dataframe[column].mean(), inplace=True) def replace_by_most_frequent(dataframe, column): return dataframe[column].replace(np.nan, dataframe[column].mode()[0], inplace=True) def replace_nulls(dataframe, columns): for col in columns: if dataframe[col].dtype=='object': replace_by_most_frequent(dataframe, col) else: replace_by_mean(dataframe, col) def preprocess(data): with_null_cols=list(data.columns[data.isna().any()]) missing_dataset = data[with_null_cols].isna() replace_nulls(data, with_null_cols) return data # + # Calling preprocess function to clean the data cleaned_car=preprocess(car_dataset) # - # <div class="alert alert-success alertdanger" style="margin-top: 20px"> # <h3> Developing linear models. </h3> # # </div> # # ## 1. Simple Linear Regression # + # define function for simple linear regression def simple_lm(X, y): lm = LinearRegression() lm.fit(X, y) return lm.predict(X) pred_price = simple_lm(cleaned_car[['engine-size']], cleaned_car['price']) # - # ### Simple Linear regression Evaluation # <p> # One way to evaluate model is to visualizing a <b>distribution plot</b> : # <ul> # <li> The <span style='color:red;'><b>red</b></span> plot represents the actual price .</li> # <li> The <span style='color:green;'><b>green</b></span> plot represents the predicted price .</li> # </ul> # # </p> # + # Distribution plot plt.figure(figsize=(14, 8)) ax1 = sns.distplot(cleaned_car['price'], hist=False, color='r', label="Actual price") sns.distplot(pred_price, hist=False, color='g', label="Predicted price", ax=ax1) # - # ## 2. Multiple variable Linear Regression # + # Defining pipeline for multiple linear regression def multiple_linear_reg(X, y): in_steps = [('scale', StandardScaler()), ('polynomial', PolynomialFeatures(degree=2)), ('model', LinearRegression())] pipe = Pipeline(in_steps) pipe.fit(X, y) return pipe.predict(X), pipe.score # - numerical_data=cleaned_car.select_dtypes(exclude=['object']) y = numerical_data.pop('price') predicted_price, r_score = multiple_linear_reg(numerical_data, y) # ### Multiple Linear regression Evaluation # <p> # One way to evaluate model is to visualizing a <b>distribution plot</b> : # <ul> # <li> The <span style='color:red;'><b>red</b></span> plot represents the actual price .</li> # <li> The <span style='color:green;'><b>green</b></span> plot represents the predicted price .</li> # </ul> # The numerical data types attributes are good predictor for the price of a car. # </p> plt.figure(figsize=(14, 8)) sns.distplot(y, label='Actual Price', color='r', hist=False) sns.distplot(predicted_price, label='Predicted Price', color='g', hist=False) # <h3>Model Evaluation</h3> # # <p>The visualization evaluation shows that a multiple regression would be a good predictor than the simple one.</p>
Car price prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: env_melusine37 # language: python # name: env_melusine37 # --- import joblib import pandas as pd from tempfile import TemporaryDirectory, TemporaryFile # # NLP Tools tutorial # The **nlp_tools** subpackage offers classic NLP tools implemented as classes that will be used to preprocess an already cleaned text : # - a **Tokenizer class** : to split a sentence-like string into a list of sub-strings (tokens). # - a **Phraser class** : to transform common multi-word expressions into single elements (*new york* becomes *new_york*) # - an **Embedding class** : to represent of words in a lower dimensional vector space. # - a **Stemmer class**: to reduce words to their word stem. # ## Load data from melusine.data.data_loader import load_email_data df_emails = load_email_data(type="preprocessed") # ## The Tokenizer class # The Tokenizer class splits a sentence-like string into a list of sub-strings (tokens). # # The arguments of a Tokenizer object are just the input_columns and output_columns. from melusine.nlp_tools.tokenizer import Tokenizer tokenizer = Tokenizer (input_column='clean_body', output_column="tokens") # Use the **fit_transform** method on a dataframe to create a new ***tokens* column** df_emails = tokenizer.fit_transform(df_emails) print("Base text") print(df_emails.clean_body[1]) print("\nTokenized text") print(df_emails.tokens[1]) # #### Load / Save a tokenizer with TemporaryDirectory() as tmpdir: path = f"{tmpdir}/tokenizer.pkl" _ = joblib.dump(tokenizer, path, compress=True) tokenizer_reload = joblib.load(path) df_emails = tokenizer_reload.fit_transform(df_emails) print(df_emails.tokens[1]) # ## The Phraser class # The Phraser class transforms common multi-word expressions into single elements: for example *new york* becomes *new_york*. # The arguments of a Phraser object are: # - **input_column :** the name of the column of the dataframe that will be used as input for the training of the Phraser. # - **common_terms :** list of stopwords to be ignored. The default list is defined in the *conf.json* file. # - **threshold :** threshold to select collocations. # - **min_count :** minimum count of word to be selected as collocation. # + from melusine.nlp_tools.phraser import Phraser phraser = Phraser( input_column='tokens', output_column='phrased_tokens', threshold=5, min_count=2 ) # - # #### Training a phraser # The input dataframe must contain a column with a clean text : **a sentence-like string with only lowcase letters and no accents**. _ = phraser.fit(df_emails) df_emails = phraser.transform(df_emails) print(df_emails['phrased_tokens'].iloc[3]) # Expected result : You should see phrased tokens such as # "bulletin" + "salaire" = "bulletin_salaire" # #### Load/Save a phraser # + with TemporaryDirectory() as tmpdir: path = f"{tmpdir}/phraser.pkl" _ = joblib.dump(phraser, path, compress=True) phraser_reload = joblib.load(path) print(phraser_reload.transform(df_emails)['phrased_tokens'].iloc[3]) # - # ## The Embedding class # Word embeddings are abstract representations of words in a lower dimensional vector space. One of the advantages of word embeddings is thus to save computational cost. The Melusine Embedding class uses a **Word2Vec** model. The trained Embedding object will be used in the Models subpackage to train a Neural Network to classify emails. # The arguments of an Embedding object are : # - **input_column :** the name of the column used as an input for the training. # - **workers :** the number of cores used for computation. Default value, 40. # - **seed :** seed for the embedding model, # - **iter :** number of iterations for the training, # - **size :** dimension of the embeddings # - **window :** # - **min_count :** minimum number of occurences for a word to be taken into account. # + from melusine.nlp_tools.embedding import Embedding embedding = Embedding( tokens_column='tokens', size=300, workers=4, min_count=3 ) # - # #### Training embeddings embedding.train(df_emails) embedding.embedding.most_similar("vehicule") # Warning : The embedding is trained on a very small dataset so the results here are irrelevant # # The stemmer class # The Stemmer class reduces words (tokens) from a list to their word stem. # The arguments of a Stemmer object are: # - **input_column :** the name of the column of the dataframe that will be used as input for the reduction of the Stemmer. # - **output_column :** the name of the column of the dataframe that will be used as output for the reduction of the Stemmer. # - **language :** language used to stem words, default "french". # + from melusine.nlp_tools.stemmer import Stemmer stemmer = Stemmer() # - # The Stemmer doesn't need any training. You can apply it directly on your dataset. df_emails = stemmer.transform(df_emails) print(df_emails['stemmed_tokens'].iloc[3]) # # The lemmatizer class # The Lemmatizer class changes words from a string by the base or dictionary form of a word, which is known as the lemma. # # The arguments of a Lemmatizer object are: # # - **in_col :** the name of the column of the dataframe that will be used as input for lemmatization. # - **out_col :** the name of the column of the dataframe that will be used as output for lemmatization. # - **engine :** lemmatization engine to load. Choices are 'spacy' and 'Lefff'. # - **engine_conf:** Spacy model to load under the hood. For french, choices are 'fr_core_news_sm', 'fr_core_news_md', 'fr_core_news_lg'. # If you get the following error: # # <span style="color:red">MaxRetryError: HTTPSConnectionPool(host='github.com', port=443): Max retries exceeded with url: # /sammous/spacy-lefff-model/releases/latest/download/model.tar.gz (Caused by # NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fc058df3fd0>: Failed to establish a new connection: # [Errno 110] Connection timed out'))</span> # # This may due to the use of proxy. # Please download locally the model: https://github.com/sammous/spacy-lefff-model/releases/latest/download/model.tar.gz # # To do so: # Go to the repository: <site-packages>/spacy_lefff/data/tagger # Download and unzip the model with # # >>wget https://github.com/sammous/spacy-lefff-model/releases/latest/download/model.tar.gz # # >>tar xvfz model.tar.gz from melusine.nlp_tools.lemmatizer import Lemmatizer spacy_lemma_sm = Lemmatizer('clean_body', 'lemma_spacy_sm', engine='spacy', engine_conf='fr_core_news_sm') lefff_lemma = Lemmatizer('clean_body', 'lemma_lefff', engine='Lefff', engine_conf='fr_core_news_sm') df_emails = spacy_lemma_sm.fit_transform(df_emails) print(df_emails['lemma_spacy_sm'].iloc[3]) df_emails = lefff_lemma.fit_transform(df_emails) print(df_emails['lemma_lefff'].iloc[3])
tutorial/tutorial04_nlp_tools.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Crypto Price - Alexa Skills Kit Application # # Crypto Price is an Amazon Web Services (AWS) Lambda Function that pairs with the Alexa Skills Kit (ASK) to respond to an Amazon Echo user's request for the current price of any leading cryptocurrency (such as Bitcoin, Ethereum, Monero, Litecoin, etc.) in any world currency. By default the function will return the price in US Dollars, however, if a user specifies a currency it will return the price in that currency. Furthermore, if the user enables the app location permissions on their Alexa App it will return the price in the currency of their current country. Some typical questions and responses are as follows: # # --- # # > **User**: Alexa, tell me the price of Bitcoin from Crypto Price. *(location disabled by the user)* # > # > **Alexa**: The current price of Bitcoin is 1,487.91 US dollars. # # --- # # > **User**: Alexa, open Crypto Price and tell me the price of Ethereum. *(location enabled by the user and they live in the United Kingdom)* # > # > **Alexa**: The current price of Ethereum is 53.72 pounds. # # --- # # > **User**: Alexa, load Crypto Price. # > # > **Alexa**: Welcome to Crypto Price. Please ask a question like: What is the price of Bitcoin? # > # > **User**: Give me the price of Monero in yen. # > # > **Alexa**: The current price of Monero is 3,070.75 yen. # # --- # # > **User**: Alexa, get help from Crypto Price. # > # > **Alexa**: Crypto Price returns the price of the leading cryptocurrencies in any country's currency. You can ask questions like... # # --- # # ## Getting Started # # These instructions will you get a copy of the project up and running on your local machine for deployment and testing purpose. Please checkout the [blogpost](docs/blogpost.md) for full deployment details and how to integrate the app with the Alexa Skill Kit. # # ### Prerequisites # # This app runs using Python 3.6.1. Please checkout [www.python.org](https://www.python.org) to install it on your own system. It is recommended to build the project in a contained virtual environment. This can be achieved with a combination of [Virtualenv](https://virtualenv.pypa.io/en/stable/) and the [Virtualenv Wrapper](https://virtualenvwrapper.readthedocs.io/en/latest/) which allows you to create and delete Virtualenvs easily. # # ### Installing # # The first step to installing the app is to clone the git repository: # # ```bash # $ git clone https://github.com/CraigLangford/Crypto-Price.git # ``` # # If you have virtualenv and virtualenvwrapper installed (See [Prerequisites](#prerequisites)), create your Python 3.6 environment. # # ```bash # $ mkvirtualenv --python=python3.6 cryptoprice # ``` # # You can set the root directory of the project as well so whenever you run `workon cryptoprice` you'll be in your virtualenv in your root folder immediately. # # ```bash # $ setvirtualenvproject # ``` # # You should now be in the root directory with Python 3.6.x as your Python version. # # ```bash # $ ls # cryptoprice.py data docs LICENSE README.md requirements.txt setup.py test_cryptoprice.py # $ python --version # Python 3.6.1 # ``` # # For API requests the project uses [requests](http://docs.python-requests.org/en/master/) and for testing it uses [Pytest](https://docs.pytest.org/en/latest/). To install these simply install via the requirements file. # # ```bash # $ pip install --requirement requirements.txt # ``` # # That's it! You're now set up to work locally. You can build some tests in test_cryptoprice.py to test locally (see [Running the Tests](#running-the-tests)) or checkout the steps from [Deployment](#deployment) to set up the project as an Amazon Lambda function and integrate it with your own Alexa Skills Kit App! # # ## Running the Tests # # Pytest is used for testing the application, and is included in the requirements.txt file. If you already followed the steps in [Installing](#installing) and you're good to go! Just run the following on the root directory of the project to run the tests for the project. *Note: You must have an internet connection as the app will gather the prices from [www.cryptocompare.com](www.cryptocompare.com)* # # ```bash # $ py.test # ``` # # ## Deployment # # To deploy the system as an Amazon Lambda function you must create a zip composed of the core cryptoprice.py file, the data directory and the third party requests module. This is handled by the setup.py script which can be run in the root directory as below. # # ```bash # $ python setup.py # ``` # # This will generate a cryptoprice.zip file which can be uploaded as your lambda function to AWS Lambda. For more details see the [AWS Lambda Documentation](http://docs.aws.amazon.com/lambda/latest/dg/lambda-python-how-to-create-deployment-package.html). # # ## Contributing # # ## Authors # # * **<NAME>** - *Initial Work* - https://github.com/CraigLangford # # Please feel free to contribute to be added to the project! # # ## License # # This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. # # ## Acknowledgments # # * Hats off to to [www.cryptocompare.com](https://www.cryptocompare.com) for their easy to use and extensive [API](https://www.cryptocompare.com/api)
docs/README.ipynb
#!/usr/bin/env python3 # # torchsynth examples # # We walk through basic functionality of `torchsynth` in this Jupyter notebook. # # Just note that all ipd.Audio play widgets normalize the audio. # # If you're in Colab, remember to set the runtime to GPU. # and get the latest torchsynth: # # ``` # # !pip install git+https://github.com/torchsynth/torchsynth.git # ``` # %load_ext autoreload # %autoreload 2 # %matplotlib inline # + def iscolab(): # pragma: no cover return "google.colab" in str(get_ipython()) def isnotebook(): # pragma: no cover try: if iscolab(): return True shell = get_ipython().__class__.__name__ if shell == "ZMQInteractiveShell": return True # Jupyter notebook or qtconsole elif shell == "TerminalInteractiveShell": return False # Terminal running IPython else: return False # Other type (?) except NameError: return False # Probably standard Python interprete print(f"isnotebook = {isnotebook()}") # + if isnotebook(): # pragma: no cover import IPython.display as ipd import librosa import librosa.display import matplotlib.pyplot as plt from IPython.core.display import display else: class IPD: def Audio(*args, **kwargs): pass def display(*args, **kwargs): pass ipd = IPD() import random import numpy as np import numpy.random import torch.fft import torch.tensor as tensor from torch import Tensor as T from torchsynth.config import SynthConfig from torchsynth.module import ( ADSR, VCA, ControlRateUpsample, MonophonicKeyboard, Noise, SineVCO, TorchFmVCO, ) from torchsynth.parameter import ModuleParameterRange # Determenistic seeds for replicable testing random.seed(0) numpy.random.seed(0) torch.manual_seed(0) # - # Run examples on GPU if available if torch.cuda.is_available(): device = "cuda" else: device = "cpu" def time_plot(signal, sample_rate=44100, show=True): if isnotebook(): # pragma: no cover t = np.linspace(0, len(signal) / sample_rate, len(signal), endpoint=False) plt.plot(t, signal) plt.xlabel("Time") plt.ylabel("Amplitude") if show: plt.show() def stft_plot(signal, sample_rate=44100): if isnotebook(): # pragma: no cover X = librosa.stft(signal) Xdb = librosa.amplitude_to_db(abs(X)) plt.figure(figsize=(5, 5)) librosa.display.specshow(Xdb, sr=sample_rate, x_axis="time", y_axis="log") plt.show() # ## Globals # We'll generate 2 sounds at once, 4 seconds each synthconfig = SynthConfig( batch_size=2, reproducible=False, sample_rate=44100, buffer_size_seconds=4.0 ) # For a few examples, we'll only generate one sound synthconfig1 = SynthConfig( batch_size=1, reproducible=False, sample_rate=44100, buffer_size_seconds=4.0 ) # And a short one sound synthconfig1short = SynthConfig( batch_size=1, reproducible=False, sample_rate=44100, buffer_size_seconds=0.1 ) # ## The Envelope # Our module is based on an ADSR envelope, standing for "attack, decay, sustain, # release," which is specified by four values: # # - a: the attack time, in seconds; the time it takes for the signal to ramp # from 0 to 1. # - d: the decay time, in seconds; the time to 'decay' from a peak of 1 to a # sustain level. # - s: the sustain level; a value between 0 and 1 that the envelope holds during # a sustained note (**not a time value**). # - r: the release time, in seconds; the time it takes the signal to decay from # the sustain value to 0. # # Envelopes are used to modulate a variety of signals; usually one of pitch, # amplitude, or filter cutoff frequency. In this notebook we will use the same # envelope to modulate several different audio parameters. # # ### A note about note-on, note-off behaviour # # By default, this envelope reacts as if it was triggered with midi, for example # playing a keyboard. Each midi event has a beginning and end: note-on, when you # press the key down; and note-off, when you release the key. `note_on_duration` # is the amount of time that the key is depressed. During the note-on, the # envelope moves through the attack and decay sections of the envelope. This # leads to musically-intuitive, but programatically-counterintuitive behaviour. # # Assume attack is 0.5 seconds, and decay is 0.5 seconds. If a note is held for # 0.75 seconds, the envelope won't traverse through the entire attack-and-decay # phase (specifically, it will execute the entire attack, and 0.25 seconds of # the decay). # # If this is confusing, don't worry about it. ADSR's do a lot of work behind the # scenes to make the playing experience feel natural. Alternately, you may # specify one-shot mode (see below), which is more typical of drum machines. # + # Synthesis parameters. a = tensor([0.1, 0.2]) d = tensor([0.1, 0.2]) s = tensor([0.75, 0.8]) r = tensor([0.5, 0.8]) alpha = tensor([3.0, 4.0]) note_on_duration = tensor([0.5, 1.5], device=device) # Envelope test adsr = ADSR( attack=a, decay=d, sustain=s, release=r, alpha=alpha, synthconfig=synthconfig, device=device, ) envelope = adsr(note_on_duration) time_plot(envelope.clone().detach().cpu().T, adsr.control_rate.item()) print(adsr) # - # Here's the l1 error between the two envelopes err = torch.mean(torch.abs(envelope[0, :] - envelope[1, :])) print("Error =", err) time_plot(torch.abs(envelope[0, :] - envelope[1, :]).detach().cpu().T) # ##### And here are the gradients # + # err.backward(retain_graph=True) # for p in adsr.torchparameters: # print(adsr.torchparameters[p].data.grad) # print(f"{p} grad1={adsr.torchparameters[p].data.grad} grad2={adsr.torchparameters[p].data.grad}") # - # **Generating Random Envelopes** # # If we don't set parameters for an ADSR, then the parameters will be random when # initialized. # Note that module parameters are optional. If they are not provided, # they will be randomly initialized (like a typical neural network module) adsr = ADSR(synthconfig, device=device) envelope = adsr(note_on_duration) print(envelope.shape) time_plot(envelope.clone().detach().cpu().T) # # We can also use an optimizer to match the parameters of the two ADSRs # optimizer = torch.optim.Adam(list(adsr2.parameters()), lr=0.01) # fig, ax = plt.subplots() # time_plot(envelope.detach().cpu(), adsr.sample_rate, show=False) # time_plot(envelope2.detach().cpu(), adsr.sample_rate, show=False) # plt.show() # for i in range(100): # optimizer.zero_grad() # envelope = adsr(note_on_duration) # envelope2 = adsr2(note_on_duration) # err = torch.mean(torch.abs(envelope - envelope2)) # if i % 10 == 0: # ax.set_title(f"Optimization Step {i} - Error: {err.item()}") # ax.lines[0].set_ydata(envelope.detach().cpu()) # ax.lines[1].set_ydata(envelope2.detach().cpu()) # fig.canvas.draw() # err.backward() # optimizer.step() # - # ## Oscillators # # There are several types of oscillators and sound generators available. Oscillators that can be controlled by an external signal are called voltage-coltrolled oscillators (VCOs) in the analog world and we adpot a similar approach here; oscillators accept an input control signal and produce audio output. We have a simple sine oscilator:`SineVCO`, a square/saw oscillator: `SquareSawVCO`, and an FM oscillator: `TorchFmVCO`. There is also a white noise generator: `Noise`. # + # %matplotlib inline # Set up a Keyboard module keyboard = MonophonicKeyboard( synthconfig, device, midi_f0=tensor([69.0, 50.0]), duration=note_on_duration ) # Reset envelope adsr = ADSR( attack=a, decay=d, sustain=s, release=r, alpha=alpha, synthconfig=synthconfig, device=device, ) # Trigger the keyboard, which returns a midi_f0 and note duration midi_f0, duration = keyboard() # Create an envelope -- modulation signals are computed at a lower # sampling rate and must be upsampled prior to feeding into audio # rate modules envelope = adsr(duration) upsample = ControlRateUpsample(synthconfig) envelope = upsample(envelope) # SineVCO test -- call to(device) instead of passing in device to constructor also works sine_vco = SineVCO( tuning=tensor([0.0, 0.0]), mod_depth=tensor([-12.0, 12.0]), synthconfig=synthconfig, ).to(device) sine_out = sine_vco(midi_f0, envelope) stft_plot(sine_out[0].detach().cpu().numpy()) ipd.display( ipd.Audio(sine_out[0].detach().cpu().numpy(), rate=sine_vco.sample_rate.item()) ) stft_plot(sine_out[1].detach().cpu().numpy()) ipd.display( ipd.Audio(sine_out[1].detach().cpu().numpy(), rate=sine_vco.sample_rate.item()) ) # We can use auraloss instead of raw waveform loss. This is just # to show that gradient computations occur err = torch.mean(torch.abs(sine_out[0] - sine_out[1])) print("Error =", err) time_plot(torch.abs(sine_out[0] - sine_out[1]).detach().cpu()) # + # err.backward(retain_graph=True) # for p in sine_vco.torchparameters: # print(f"{p} grad1={sine_vco.torchparameters[p].grad.item()} grad2={sine_vco2.torchparameters[p].grad.item()}") ## Both SineVCOs use the sample envelope # for p in adsr.torchparameters: # print(f"{p} grad={adsr.torchparameters[p].grad.item()}") # - # ### SquareSaw Oscillator # # Check this out, it's a square / saw oscillator. Use the shape parameter to # interpolate between a square wave (shape = 0) and a sawtooth wave (shape = 1). # + from torchsynth.module import SquareSawVCO keyboard = MonophonicKeyboard(synthconfig, device, midi_f0=tensor([30.0, 30.0])).to( device ) square_saw = SquareSawVCO( tuning=tensor([0.0, 0.0]), mod_depth=tensor([0.0, 0.0]), shape=tensor([0.0, 1.0]), synthconfig=synthconfig, device=device, ) env2 = torch.zeros([2, square_saw.buffer_size], device=device) square_saw_out = square_saw(keyboard.p("midi_f0"), env2) stft_plot(square_saw_out[0].cpu().detach().numpy()) ipd.display( ipd.Audio( square_saw_out[0].cpu().detach().numpy(), rate=square_saw.sample_rate.item() ) ) stft_plot(square_saw_out[1].cpu().detach().numpy()) ipd.display( ipd.Audio( square_saw_out[1].cpu().detach().numpy(), rate=square_saw.sample_rate.item() ) ) err = torch.mean(torch.abs(square_saw_out[0] - square_saw_out[1])) print(err) # err.backward(retain_graph=True) # for p in square_saw.torchparameters: # print(f"{p} grad1={square_saw.torchparameters[p][0].grad.item()} grad2={square_saw.torchparameters[p][1].grad.item()}") # ### VCA # # Notice that this sound is rather clicky. We'll add an envelope to the # amplitude to smooth it out. # + vca = VCA(synthconfig, device=device) test_output = vca(envelope, sine_out) time_plot(test_output[0].detach().cpu()) # - # ### FM Synthesis # # What about FM? You bet. Use the `TorchFmVCO` class. It accepts any audio input. # # Just a note that, as in classic FM synthesis, you're dealing with a complex architecture of modulators. Each 'operator ' has its own pitch envelope, and amplitude envelope. The 'amplitude' envelope of an operator is really the *modulation depth* of the oscillator it operates on. So in the example below, we're using an ADSR to shape the depth of the *operator*, and this affects the modulation depth of the resultant signal. # + # FmVCO test keyboard = MonophonicKeyboard( synthconfig, device=device, midi_f0=tensor([50.0, 50.0]) ).to(device) # Make steady-pitched sine (no pitch modulation). sine_operator = SineVCO( tuning=tensor([0.0, 0.0]), mod_depth=tensor([0.0, 5.0]), synthconfig=synthconfig, device=device, ) operator_out = sine_operator(keyboard.p("midi_f0"), envelope) # Shape the modulation depth. operator_out = vca(envelope, operator_out) # Feed into FM oscillator as modulator signal. fm_vco = TorchFmVCO( tuning=tensor([0.0, 0.0]), mod_depth=tensor([2.0, 5.0]), synthconfig=synthconfig, device=device, ) fm_out = fm_vco(keyboard.p("midi_f0"), operator_out) stft_plot(fm_out[0].cpu().detach().numpy()) ipd.display(ipd.Audio(fm_out[0].cpu().detach().numpy(), rate=fm_vco.sample_rate.item())) stft_plot(fm_out[1].cpu().detach().numpy()) ipd.display(ipd.Audio(fm_out[1].cpu().detach().numpy(), rate=fm_vco.sample_rate.item())) # - # ### Noise # # The noise generator creates white noise the same length as the SynthModule buffer length # + noise = Noise(synthconfig, seed=42, device=device) out = noise() stft_plot(out[0].detach().cpu().numpy()) ipd.Audio(out[0].detach().cpu().numpy(), rate=noise.sample_rate.item()) # - # ## Audio Mixer # + from torchsynth.module import AudioMixer env = torch.zeros((synthconfig.batch_size, synthconfig.buffer_size), device=device) keyboard = MonophonicKeyboard(synthconfig, device=device) sine = SineVCO(synthconfig, device=device) square_saw = SquareSawVCO(synthconfig, device=device) noise = Noise(synthconfig, seed=123, device=device) midi_f0, note_on_duration = keyboard() sine_out = sine(midi_f0, env) sqr_out = square_saw(midi_f0, env) noise_out = noise() mixer = AudioMixer(synthconfig, 3, curves=[1.0, 1.0, 0.25]).to(device) output = mixer(sine_out, sqr_out, noise_out) ipd.Audio(out[0].cpu().detach().numpy(), rate=mixer.sample_rate.item(), normalize=False) # + # Mixer params are set in dB mixer.set_parameter("level0", tensor([0.25, 0.25], device=device)) mixer.set_parameter("level1", tensor([0.25, 0.25], device=device)) mixer.set_parameter("level2", tensor([0.125, 0.125], device=device)) out = mixer(sine_out, sqr_out, noise_out) ipd.Audio(out[0].cpu().detach().numpy(), rate=mixer.sample_rate.item()) # - # ## Modulation # # Besides envelopes, LFOs can be used to modulate parameters # + from torchsynth.module import LFO, ModulationMixer adsr = ADSR(synthconfig=synthconfig, device=device) # Trigger the keyboard, which returns a midi_f0 and note duration midi_f0, duration = keyboard() envelope = adsr(duration) lfo = LFO(synthconfig, device=device) lfo.set_parameter("mod_depth", tensor([10.0, 0.0])) lfo.set_parameter("frequency", tensor([1.0, 1.0])) out = lfo(envelope) lfo2 = LFO(synthconfig, device=device) out2 = lfo2(envelope) print(out.shape) time_plot(out[0].detach().cpu().numpy(), sample_rate=lfo.control_rate.item()) time_plot(out2[0].detach().cpu().numpy(), sample_rate=lfo.control_rate.item()) # A modulation mixer can be used to mix a modulation sources together # and maintain a 0 to 1 amplitude range mixer = ModulationMixer(synthconfig=synthconfig, device=device, n_input=2, n_output=1) mods_mixed = mixer(out, out2) print(f"Mixed: LFO 1:{mixer.p('0->0')[0]:.2}, LFO 2: {mixer.p('1->0')[0]:.2}") time_plot(mods_mixed[0][0].detach().cpu().numpy(), sample_rate=lfo.control_rate.item()) # - # ## Voice Module # # Alternately, you can just use the Voice class that composes all these modules # together automatically. from torchsynth.synth import Voice voice1 = Voice(synthconfig=synthconfig1).to(device) voice1.set_parameters( { ("keyboard", "midi_f0"): tensor([69.0]), ("keyboard", "duration"): tensor([1.0]), ("vco_1", "tuning"): tensor([0.0]), ("vco_1", "mod_depth"): tensor([12.0]), } ) voice_out1 = voice1() stft_plot(voice_out1.cpu().view(-1).detach().numpy()) ipd.Audio(voice_out1.cpu().detach().numpy(), rate=voice1.sample_rate.item()) # Additionally, the Voice class can take two oscillators. # + voice2 = Voice(synthconfig=synthconfig1).to(device) voice2.set_parameters( { ("keyboard", "midi_f0"): tensor([40.0]), ("keyboard", "duration"): tensor([3.0]), ("vco_1", "tuning"): tensor([19.0]), ("vco_1", "mod_depth"): tensor([24.0]), ("vco_2", "tuning"): tensor([0.0]), ("vco_2", "mod_depth"): tensor([12.0]), ("vco_2", "shape"): tensor([1.0]), } ) voice_out2 = voice2() stft_plot(voice_out2.cpu().view(-1).detach().numpy()) ipd.Audio(voice_out2.cpu().detach().numpy(), rate=voice2.sample_rate.item()) # - # Test gradients on entire voice err = torch.mean(torch.abs(voice_out1 - voice_out2)) print(err) # ## Random synths # # Let's generate some random synths in batch synthconfig16 = SynthConfig( batch_size=16, reproducible=False, sample_rate=44100, buffer_size_seconds=4 ) voice = Voice(synthconfig=synthconfig16).to(device) voice_out = voice() for i in range(synthconfig16.batch_size): stft_plot(voice_out[i].cpu().view(-1).detach().numpy()) ipd.display( ipd.Audio(voice_out[i].cpu().detach().numpy(), rate=voice.sample_rate.item()) ) # Parameters can be set and frozen before randomization as well # + voice.unfreeze_all_parameters() voice.set_frozen_parameters( { ("keyboard", "midi_f0"): 42.0, ("keyboard", "duration"): 3.0, ("vco_1", "tuning"): 0.0, ("vco_2", "tuning"): 0.0, }, ) voice_out = voice() for i in range(synthconfig16.batch_size): stft_plot(voice_out[i].cpu().view(-1).detach().numpy()) ipd.display( ipd.Audio(voice_out[i].cpu().detach().numpy(), rate=voice.sample_rate.item()) ) # + # ### Parameters # All synth modules and synth classes have named parameters which can be quered # and updated. Let's look at the parameters for the Voice we just created. for n, p in voice1.named_parameters(): print(f"{n:40}") # Parameters are passed into SynthModules during creation with an initial value and a parameter range. The parameter range is a human readable range of values, for example MIDI note numbers from 1-127 for a VCO. These values are stored in a normalized range between 0 and 1. Parameters can be accessed and set using either ranges with specific methods. # # Parameters of individual modules can be accessed in several ways: # Get the full ModuleParameter object by name from the module print(voice1.vco_1.get_parameter("tuning")) # Access the value as a Tensor in the full value human range print(voice1.vco_1.p("tuning")) # Access the value as a float in the range from 0 to 1 print(voice1.vco_1.get_parameter_0to1("tuning")) # Parameters of individual modules can also be set using the human range or a normalized range between 0 and 1 # Set the vco pitch using the human range, which is MIDI note number voice1.vco_1.set_parameter("tuning", tensor([12.0])) print(voice1.vco_1.p("tuning")) # Set the vco pitch using a normalized range between 0 and 1 voice1.vco_1.set_parameter_0to1("tuning", tensor([0.5])) print(voice1.vco_1.p("tuning")) # #### Parameter Ranges # # Conversion between [0,1] range and a human range is handled by `ModuleParameterRange`. The conversion from [0,1] can be shaped by specifying a curve. Curve values less than 1 put more emphasis on lower values in the human range and curve values greater than 1 put more emphasis on larger values in the human range. A curve of 1 is a linear relationship between the two ranges. # + # ModuleParameterRange with scaling of a range from 0-127 param_range_exp = ModuleParameterRange(0.0, 127.0, curve=0.5) param_range_lin = ModuleParameterRange(0.0, 127.0, curve=1.0) param_range_log = ModuleParameterRange(0.0, 127.0, curve=2.0) # Linearly spaced values from 0.0 1.0 param_values = torch.linspace(0.0, 1.0, 100) if isnotebook(): fig, axes = plt.subplots(1, 3, figsize=(12, 3)) axes[0].plot(param_values, param_range_exp.from_0to1(param_values)) axes[0].set_title("Exponential Scaling") axes[1].plot(param_values, param_range_lin.from_0to1(param_values)) axes[1].set_title("Linear Scaling") axes[2].plot(param_values, param_range_log.from_0to1(param_values)) axes[2].set_title("Logarithmic Scaling") # + # ModuleParameterRange with symmetric scaling of a range from -127 to 127 param_range_exp = ModuleParameterRange(-127.0, 127.0, curve=0.5, symmetric=True) param_range_log = ModuleParameterRange(-127.0, 127.0, curve=2.0, symmetric=True) # Linearly spaced values from 0.0 1.0 param_values = torch.linspace(0.0, 1.0, 100) if isnotebook(): fig, axes = plt.subplots(1, 2, figsize=(8, 3)) axes[0].plot(param_values, param_range_exp.from_0to1(param_values)) axes[0].set_title("Exponential Scaling") axes[1].plot(param_values, param_range_log.from_0to1(param_values)) axes[1].set_title("Logarithmic Scaling") # - # ### Hyperparameters # # ParameterRanges are considered hyperparameters in torchsynth and can be viewed and modified through a Synth # View all hyperparameters voice1.hyperparameters # Set a specific hyperparameter voice1.set_hyperparameter(("keyboard", "midi_f0", "curve"), 0.1) print(voice1.hyperparameters[("keyboard", "midi_f0", "curve")]) # ### Nebulae # # Different hyperparameter settings cause the parameters of a synth to be sampled in a # different way when generating random synths. We call these different versions of the # same synth different nebula. For example, here is the Voice loaded with the Drum # nebula, which is more likely to produce sounds that are similar synth drum hits # during random sampling. synthconfig16 = SynthConfig( batch_size=16, reproducible=False, sample_rate=44100, buffer_size_seconds=4 ) voice = Voice(synthconfig=synthconfig16, nebula="drum").to(device) voice_out = voice() for i in range(synthconfig16.batch_size): stft_plot(voice_out[i].cpu().view(-1).detach().numpy()) ipd.display( ipd.Audio(voice_out[i].cpu().detach().numpy(), rate=voice.sample_rate.item()) )
examples/examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="https://github.com/OpenMined/design-assets/raw/master/logos/OM/horizontal-primary-light.png" alt="he-black-box" width="600"/> # # # # Homomorphic Encryption using Duet: Data Scientist # ## Tutorial 0: Basic operations # # # # Welcome! # In this tutorial, we will show you how to use Duet with homomorphic encryption, and some use cases. This notebook shows the Data Scientist view on the operations. # # # Homomorphic encryption is efficient in client-server scenarios, like the one depicted in the diagram below. We will see a few use cases in which a Data Scientist can operate on encrypted data. # # # <img src="https://blog.openmined.org/content/images/2020/04/OM---CKKS-Graphic-v.01@2x.png" align="center" style="display: block; margin: auto;"/> # ## Setup # # All modules are imported here, make sure everything is installed by running the cell below. # + import syft as sy import tenseal as ts sy.load_lib("tenseal") duet = sy.join_duet(loopback=True) # - # ### <img src="https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png" alt="he-black-box" width="100"/> Checkpoint 0 : Now STOP and run the Data Owner notebook until Checkpoint 1. # ### Check the Duet store # # Now we can see all three objects and get pointers to them. # # # __Note__: we cannot get these without permission. duet.store.pandas # ### Get pointers to the encrypted vectors. ctx_ptr = duet.store["context"] enc_v1_ptr = duet.store["enc_v1"] enc_v2_ptr = duet.store["enc_v2"] # ### Request permission to work on the encrypted vectors. ctx_ptr.request(reason="I would like to get the context") enc_v1_ptr.request(reason="I would like to get first vector") enc_v2_ptr.request(reason="I would like to get second vector") # Now we can see our requests are pending duet.requests.pandas # ### <img src="https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png" alt="he-black-box" width="100"/> Checkpoint 1 : Now STOP and run the Data Owner notebook until Checkpoint 2. # ### Get pointers to the encrypted vectors. # We can see that the requests have been answered duet.requests.pandas # + ctx = ctx_ptr.get(delete_obj=False) enc_v1 = enc_v1_ptr.get(delete_obj=False) enc_v2 = enc_v2_ptr.get(delete_obj=False) enc_v1.link_context(ctx) enc_v2.link_context(ctx) (enc_v1, enc_v2) # - # ### Compute different operations over the two vectors locally result_add = enc_v1 + enc_v2 result_iadd = enc_v1 + [10, 10, 10, 10, 10] result_sub = enc_v1 - enc_v2 result_mul = enc_v1 * enc_v2 result_pow = enc_v1 ** 3 result_neg = -enc_v1 result_poly = enc_v1.polyval([1,0,1,1]) # 1 + X^2 + X^3 result_add # ### Send the result back to the Data Owner result_add_ptr = result_add.send(duet, searchable=True, tags=["result_add"]) result_iadd_ptr = result_iadd.send(duet, searchable=True, tags=["result_iadd"]) result_sub_ptr = result_sub.send(duet, searchable=True, tags=["result_sub"]) result_mul_ptr = result_mul.send(duet, searchable=True, tags=["result_mul"]) result_pow_ptr = result_pow.send(duet, searchable=True, tags=["result_pow"]) result_neg_ptr = result_neg.send(duet, searchable=True, tags=["result_neg"]) result_poly_ptr = result_poly.send(duet, searchable=True, tags=["result_poly"]) print(duet.store) # ### <img src="https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png" alt="he-black-box" width="100"/> Checkpoint 2 : Now STOP and run the Data Owner notebook until the next checkpoint. # # Congratulations!!! - Time to Join the Community! # # Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways! # # ### Star PySyft and TenSEAL on GitHub # # The easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building. # # - [Star PySyft](https://github.com/OpenMined/PySyft) # - [Star TenSEAL](https://github.com/OpenMined/TenSEAL) # # ### Join our Slack! # # The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org). #lib_tenseal and #code_tenseal are the main channels for the TenSEAL project. # # ### Donate # # If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups! # # [OpenMined's Open Collective Page](https://opencollective.com/openmined)
examples/homomorphic-encryption/Tutorial_0_TenSEAL_Syft_Data_Scientist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Matemática para a Administração - UFRJ (2016/2) # # Segunda Prova - Turma A (GABARITO) # **Questão 1.** (3 pontos) Determine $f'(x)$ das seguintes funções: # # (a) $f(x) = 3x^4 - x^3 + 2x^2 + x - 9$ # # (b) $f(x) = (x^4 + 8)(x^2 - x - 2)$ # # (c) $f(x) = \dfrac{x^2 - 5x + 7}{x^6 + 3}$ # # (d) $f(x) = (6x^2 + x - 1)^{90}$ # # (e) $f(x) = e^{2x + 1}$ # # (f) $f(x) = \ln{(x^2 + 1)}$ # # > #### **Solução** # # > (a) $f'(x) = 3x^4 - x^3 + 2x^2 + x - 9$ # # > (b) $f(x) = (x^4 + 8)(x^2 - x - 2)$ # # > (c) $f(x) = \dfrac{x^2 - 5x + 7}{x^6 + 3}$ # # > (d) $f(x) = (6x^2 + x - 1)^{90}$ # # > (e) $f(x) = e^{2x + 1}$ # # > (f) $f(x) = \ln{(x^2 + 1)}$ # **Questão 2.** Determine as equações das retas tangentes às curvas, nos pontos de abscissas dadas: # # (a) (1.25) $f(x) = 1 - x + 2x^2;\,x = 0$ # # (b) (1.25) $f(x) = e^{-\dfrac{1}{x}};\,x = -1$ # # > #### **Solução** # # > (a) (1.25) $f(x) = 1 - x + 2x^2;\,x = 0$ # # > (b) (1.25) $f(x) = e^{-\dfrac{1}{x}};\,x = -1$ # **Questão 3.** (2 pontos) Os executivos de uma importadora de arroz determinam que a demanda dos consumidores é aproximadamente igual a: # $$A(x) = \dfrac{5000}{x^2}$$ # toneladas por semana, quando o preço for $x$ reais. # Estima-se que daqui a $t$ semanas o preço do arroz será modelado por $x(t) = 0.02t^2 + 0.1t + 2$ reais por tonelada. # Qual será a taxa de variação da demanda semanal daqui a 10 semanas? # # > #### **Solução** # Substituindo $p = 5$ na expressão dada, obtemos $x = -18$ ou $x = 3$. Como não faz sentido demanda negativa, temos: # $x = 3$, ou seja, para o preço de $R\$$ 5,00 a demanda do produto é de 300 unidades. # # > Derivando a expressão implicitamente em relação ao tempo $t$: # $2x\dfrac{dx}{dt} + 3x\dfrac{dp}{dt} + 3p\dfrac{dx}{dt} + 2p\dfrac{dp}{dt} = 0$ # Agrupando os termos # $(2x + 3p)\dfrac{dx}{dt} = (- 3x - 2p)\dfrac{dp}{dt}$. # Finalmente podemos expressar $\dfrac{dx}{dt}$ (taxa de variação da demanda em relação ao tempo) em termos do preço, da demanda e da taxa de variação do preço em relação ao tempo (dada no enunciado) como # $\dfrac{dx}{dt} = \left(\dfrac{-3x - 2p}{2x + 3p}\right)\dfrac{dp}{dt}$, onde sabemos que $p = 5$, $x = 3$ e $\dfrac{dp}{dt} = -0.30$ (preço diminui à taxa de 30 centavos/mês). # Logo: # $\dfrac{dx}{dt} = \dfrac{-3(3) - 2(5)}{2(3) + 3(5)}(-0.30)$ # $\fbox{$\dfrac{dx}{dt} \approx 0,27$}$, ou seja, a demanda estará aumentando à taxa de 27 unidades/mês para as condições dadas. # **Questão 4** Usando derivação implícita, calcule $y'$: # # (a) (1.25) $2xy + y^4 = 5$ # # (b) (1.25) $x^2y = 4e^{-x} + \ln{y}$ # # > #### **Solução** # # > (a) (1.25) $2xy + y^4 = 5$ # # > (b) (1.25) $x^2y = 4e^{-x} + \ln{y}$
MAT_ADM_P2A_2016-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="o40y8HxDYE3G" colab_type="text" # # Time Series Anomaly Detection using LSTM Autoencoders with PyTorch in Python # + id="NyPqdFSzUW8F" colab_type="code" outputId="4a0b47bf-c222-4099-a124-65a037c024db" colab={"base_uri": "https://localhost:8080/", "height": 306} # !nvidia-smi # + id="_9tHU5two_TW" colab_type="code" colab={} # !pip install -qq arff2pandas # + id="XRYTtmMDtLuG" colab_type="code" colab={} # !pip install -q -U watermark # + id="-ugDZzk5bSju" colab_type="code" colab={} # !pip install -qq -U pandas # + id="NgNwOV2jtODd" colab_type="code" outputId="e637029b-b066-41d2-935c-a6b5b93bf8a1" colab={"base_uri": "https://localhost:8080/", "height": 143} # %reload_ext watermark # %watermark -v -p numpy,pandas,torch,arff2pandas # + id="3RY_N3gOmfDi" colab_type="code" colab={} import torch import copy import numpy as np import pandas as pd import seaborn as sns from pylab import rcParams import matplotlib.pyplot as plt from matplotlib import rc from sklearn.model_selection import train_test_split from torch import nn, optim import torch.nn.functional as F from arff2pandas import a2p # %matplotlib inline # %config InlineBackend.figure_format='retina' sns.set(style='whitegrid', palette='muted', font_scale=1.2) HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"] sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE)) rcParams['figure.figsize'] = 12, 8 RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) torch.manual_seed(RANDOM_SEED) # + [markdown] id="RN0e4KktjE88" colab_type="text" # In this tutorial, you'll learn how to detect anomalies in Time Series data using an LSTM Autoencoder. You're going to use real-world ECG data from a single patient with heart disease to detect abnormal hearbeats. # # - [Read the tutorial](https://www.curiousily.com/posts/time-series-anomaly-detection-using-lstm-autoencoder-with-pytorch-in-python/) # - [Run the notebook in your browser (Google Colab)](https://colab.research.google.com/drive/1_J2MrBSvsJfOcVmYAN2-WSp36BtsFZCa) # - [Read the Getting Things Done with Pytorch book](https://github.com/curiousily/Getting-Things-Done-with-Pytorch) # # By the end of this tutorial, you'll learn how to: # # - Prepare a dataset for Anomaly Detection from Time Series Data # - Build an LSTM Autoencoder with PyTorch # - Train and evaluate your model # - Choose a threshold for anomaly detection # - Classify unseen examples as normal or anomaly # + [markdown] id="ZnW3JsIFW_Yw" colab_type="text" # ## Data # # # # The [dataset](http://timeseriesclassification.com/description.php?Dataset=ECG5000) contains 5,000 Time Series examples (obtained with ECG) with 140 timesteps. Each sequence corresponds to a single heartbeat from a single patient with congestive heart failure. # # > An electrocardiogram (ECG or EKG) is a test that checks how your heart is functioning by measuring the electrical activity of the heart. With each heart beat, an electrical impulse (or wave) travels through your heart. This wave causes the muscle to squeeze and pump blood from the heart. [Source](https://www.heartandstroke.ca/heart/tests/electrocardiogram) # # We have 5 types of hearbeats (classes): # # - Normal (N) # - R-on-T Premature Ventricular Contraction (R-on-T PVC) # - Premature Ventricular Contraction (PVC) # - Supra-ventricular Premature or Ectopic Beat (SP or EB) # - Unclassified Beat (UB). # # > Assuming a healthy heart and a typical rate of 70 to 75 beats per minute, each cardiac cycle, or heartbeat, takes about 0.8 seconds to complete the cycle. # Frequency: 60–100 per minute (Humans) # Duration: 0.6–1 second (Humans) [Source](https://en.wikipedia.org/wiki/Cardiac_cycle) # # The dataset is available on my Google Drive. Let's get it: # + id="dDlfeY2VAYdU" colab_type="code" outputId="4f4b9224-ff7a-4c3a-ec3b-9eed064e0d52" colab={"base_uri": "https://localhost:8080/", "height": 85} # !gdown --id 16MIleqoIr1vYxlGk4GKnGmrsCPuWkkpT # + id="L_gYlNi2AaOK" colab_type="code" colab={} # !unzip -qq ECG5000.zip # + id="DFWsBcdWjDkU" colab_type="code" colab={} device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # + [markdown] id="iVrX_m4CBAD6" colab_type="text" # The data comes in multiple formats. We'll load the `arff` files into Pandas data frames: # + id="sh_8XjtEBVYq" colab_type="code" colab={} with open('ECG5000_TRAIN.arff') as f: train = a2p.load(f) with open('ECG5000_TEST.arff') as f: test = a2p.load(f) # + [markdown] id="UDtaZ2uTCG11" colab_type="text" # We'll combine the training and test data into a single data frame. This will give us more data to train our Autoencoder. We'll also shuffle it: # + id="tAyxwvbB5-9o" colab_type="code" outputId="7c69e8d9-7b80-436c-8dea-16109169fc83" colab={"base_uri": "https://localhost:8080/", "height": 35} df = train.append(test) df = df.sample(frac=1.0) df.shape # + id="KpkkVdrLCSAs" colab_type="code" outputId="9dc73fde-a213-47f4-8b8b-f55fbf3c1dc1" colab={"base_uri": "https://localhost:8080/", "height": 253} df.head() # + [markdown] id="K_f7F-ipCZNH" colab_type="text" # We have 5,000 examples. Each row represents a single heartbeat record. Let's name the possible classes: # + id="9fcKHB5rcDcm" colab_type="code" colab={} CLASS_NORMAL = 1 class_names = ['Normal','R on T','PVC','SP','UB'] # + [markdown] id="TFWOyTF3CpgT" colab_type="text" # Next, we'll rename the last column to `target`, so its easier to reference it: # + id="6bfjzgJ7YEww" colab_type="code" colab={} new_columns = list(df.columns) new_columns[-1] = 'target' df.columns = new_columns # + [markdown] id="nlf5aVLnjRCz" colab_type="text" # ## Exploratory Data Analysis # # Let's check how many examples for each heartbeat class do we have: # + id="QBxuqD1cdd2y" colab_type="code" outputId="0746b248-5074-42b1-ce48-a69dce46322e" colab={"base_uri": "https://localhost:8080/", "height": 119} df.target.value_counts() # + [markdown] id="2EZVZYCsQKXV" colab_type="text" # Let's plot the results: # + id="wn4s427Sf4eb" colab_type="code" outputId="e2577373-8571-452d-b41c-6b8659ea15d3" colab={"base_uri": "https://localhost:8080/", "height": 506} ax = sns.countplot(df.target) ax.set_xticklabels(class_names); # + [markdown] id="e2m55fTyRNIf" colab_type="text" # The normal class, has by far, the most examples. This is great because we'll use it to train our model. # # Let's have a look at an averaged (smoothed out with one standard deviation on top and bottom of it) Time Series for each class: # + id="UVvG0vhiU-ju" colab_type="code" colab={} def plot_time_series_class(data, class_name, ax, n_steps=10): time_series_df = pd.DataFrame(data) smooth_path = time_series_df.rolling(n_steps).mean() path_deviation = 2 * time_series_df.rolling(n_steps).std() under_line = (smooth_path - path_deviation)[0] over_line = (smooth_path + path_deviation)[0] ax.plot(smooth_path, linewidth=2) ax.fill_between( path_deviation.index, under_line, over_line, alpha=.125 ) ax.set_title(class_name) # + id="xHaslHZ8JMSk" colab_type="code" outputId="b394badd-9919-4aa7-ee69-8b7f9174484f" colab={"base_uri": "https://localhost:8080/", "height": 596} classes = df.target.unique() fig, axs = plt.subplots( nrows=len(classes) // 3 + 1, ncols=3, sharey=True, figsize=(14, 8) ) for i, cls in enumerate(classes): ax = axs.flat[i] data = df[df.target == cls] \ .drop(labels='target', axis=1) \ .mean(axis=0) \ .to_numpy() plot_time_series_class(data, class_names[i], ax) fig.delaxes(axs.flat[-1]) fig.tight_layout(); # + [markdown] id="jDt33QoDTUIb" colab_type="text" # It is very good that the normal class has a distinctly different pattern than all other classes. Maybe our model will be able to detect anomalies? # + [markdown] id="gJRQl1p0Q57K" colab_type="text" # ## LSTM Autoencoder # # The [Autoencoder's](https://en.wikipedia.org/wiki/Autoencoder) job is to get some input data, pass it through the model, and obtain a reconstruction of the input. The reconstruction should match the input as much as possible. The trick is to use a small number of parameters, so your model learns a compressed representation of the data. # # In a sense, Autoencoders try to learn only the most important features (compressed version) of the data. Here, we'll have a look at how to feed Time Series data to an Autoencoder. We'll use a couple of LSTM layers (hence the LSTM Autoencoder) to capture the temporal dependencies of the data. # # To classify a sequence as normal or an anomaly, we'll pick a threshold above which a heartbeat is considered abnormal. # # ### Reconstruction Loss # # When training an Autoencoder, the objective is to reconstruct the input as best as possible. This is done by minimizing a loss function (just like in supervised learning). This function is known as *reconstruction loss*. Cross-entropy loss and Mean squared error are common examples. # + [markdown] id="eFZ3o2F8eUaW" colab_type="text" # ## Anomaly Detection in ECG Data # # We'll use normal heartbeats as training data for our model and record the *reconstruction loss*. But first, we need to prepare the data: # + [markdown] id="QGKKj6fgUV_a" colab_type="text" # ### Data Preprocessing # # Let's get all normal heartbeats and drop the target (class) column: # + id="NA0k8mTijyh-" colab_type="code" outputId="53f14ed3-c11a-4475-9cb4-fa928598a6fe" colab={"base_uri": "https://localhost:8080/", "height": 34} normal_df = df[df.target == str(CLASS_NORMAL)].drop(labels='target', axis=1) normal_df.shape # + [markdown] id="PRCZ7uviaI1Y" colab_type="text" # We'll merge all other classes and mark them as anomalies: # + id="xpdXIaDJstD3" colab_type="code" outputId="c65ca508-0bb1-4de4-90c6-9fbad1eda682" colab={"base_uri": "https://localhost:8080/", "height": 34} anomaly_df = df[df.target != str(CLASS_NORMAL)].drop(labels='target', axis=1) anomaly_df.shape # + [markdown] id="ILcJJwpda15z" colab_type="text" # We'll split the normal examples into train, validation and test sets: # + id="n7kJ7C3IFWIV" colab_type="code" colab={} train_df, val_df = train_test_split( normal_df, test_size=0.15, random_state=RANDOM_SEED ) val_df, test_df = train_test_split( val_df, test_size=0.33, random_state=RANDOM_SEED ) # + [markdown] id="nWasRQ1dcAp4" colab_type="text" # We need to convert our examples into tensors, so we can use them to train our Autoencoder. Let's write a helper function for that: # + id="h2kKiIIeBwKb" colab_type="code" colab={} def create_dataset(df): sequences = df.astype(np.float32).to_numpy().tolist() dataset = [torch.tensor(s).unsqueeze(1).float() for s in sequences] n_seq, seq_len, n_features = torch.stack(dataset).shape return dataset, seq_len, n_features # + [markdown] id="WT4BekX2g4L_" colab_type="text" # Each Time Series will be converted to a 2D Tensor in the shape *sequence length* x *number of features* (140x1 in our case). # # Let's create some datasets: # + id="Rb1UeUwbjmMD" colab_type="code" colab={} train_dataset, seq_len, n_features = create_dataset(train_df) val_dataset, _, _ = create_dataset(val_df) test_normal_dataset, _, _ = create_dataset(test_df) test_anomaly_dataset, _, _ = create_dataset(anomaly_df) # + [markdown] id="gLe1hXvRUSnA" colab_type="text" # ### LSTM Autoencoder # # ![Autoencoder](https://lilianweng.github.io/lil-log/assets/images/autoencoder-architecture.png) # *Sample Autoencoder Architecture [Image Source](https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html)* # + [markdown] id="SgZpEEzfqxNp" colab_type="text" # The general Autoencoder architecture consists of two components. An *Encoder* that compresses the input and a *Decoder* that tries to reconstruct it. # # We'll use the LSTM Autoencoder from this [GitHub repo](https://github.com/shobrook/sequitur) with some small tweaks. Our model's job is to reconstruct Time Series data. Let's start with the *Encoder*: # + id="X_f1WaTJhiXy" colab_type="code" colab={} class Encoder(nn.Module): def __init__(self, seq_len, n_features, embedding_dim=64): super(Encoder, self).__init__() self.seq_len, self.n_features = seq_len, n_features self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim self.rnn1 = nn.LSTM( input_size=n_features, hidden_size=self.hidden_dim, num_layers=1, batch_first=True ) self.rnn2 = nn.LSTM( input_size=self.hidden_dim, hidden_size=embedding_dim, num_layers=1, batch_first=True ) def forward(self, x): x = x.reshape((1, self.seq_len, self.n_features)) x, (_, _) = self.rnn1(x) x, (hidden_n, _) = self.rnn2(x) return hidden_n.reshape((self.n_features, self.embedding_dim)) # + [markdown] id="DysklqYmxTib" colab_type="text" # The *Encoder* uses two LSTM layers to compress the Time Series data input. # # Next, we'll decode the compressed representation using a *Decoder*: # + id="AdEft7l3hk6S" colab_type="code" colab={} class Decoder(nn.Module): def __init__(self, seq_len, input_dim=64, n_features=1): super(Decoder, self).__init__() self.seq_len, self.input_dim = seq_len, input_dim self.hidden_dim, self.n_features = 2 * input_dim, n_features self.rnn1 = nn.LSTM( input_size=input_dim, hidden_size=input_dim, num_layers=1, batch_first=True ) self.rnn2 = nn.LSTM( input_size=input_dim, hidden_size=self.hidden_dim, num_layers=1, batch_first=True ) self.output_layer = nn.Linear(self.hidden_dim, n_features) def forward(self, x): x = x.repeat(self.seq_len, self.n_features) x = x.reshape((self.n_features, self.seq_len, self.input_dim)) x, (hidden_n, cell_n) = self.rnn1(x) x, (hidden_n, cell_n) = self.rnn2(x) x = x.reshape((self.seq_len, self.hidden_dim)) return self.output_layer(x) # + [markdown] id="WUMb2NGZyTmi" colab_type="text" # Our Decoder contains two LSTM layers and an output layer that gives the final reconstruction. # # Time to wrap everything into an easy to use module: # + id="vgUChGd_A-Bv" colab_type="code" colab={} class RecurrentAutoencoder(nn.Module): def __init__(self, seq_len, n_features, embedding_dim=64): super(RecurrentAutoencoder, self).__init__() self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device) self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device) def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x # + [markdown] id="zT6Cwq78sOrI" colab_type="text" # Our Autoencoder passes the input through the Encoder and Decoder. Let's create an instance of it: # + id="Mo0rvFqRBgnu" colab_type="code" colab={} model = RecurrentAutoencoder(seq_len, n_features, 128) model = model.to(device) # + [markdown] id="n1ENnubQdnJN" colab_type="text" # ## Training # # Let's write a helper function for our training process: # + id="ryEmRvl9DfEj" colab_type="code" colab={} def train_model(model, train_dataset, val_dataset, n_epochs): optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) criterion = nn.L1Loss(reduction='sum').to(device) history = dict(train=[], val=[]) best_model_wts = copy.deepcopy(model.state_dict()) best_loss = 10000.0 for epoch in range(1, n_epochs + 1): model = model.train() train_losses = [] for seq_true in train_dataset: optimizer.zero_grad() seq_true = seq_true.to(device) seq_pred = model(seq_true) loss = criterion(seq_pred, seq_true) loss.backward() optimizer.step() train_losses.append(loss.item()) val_losses = [] model = model.eval() with torch.no_grad(): for seq_true in val_dataset: seq_true = seq_true.to(device) seq_pred = model(seq_true) loss = criterion(seq_pred, seq_true) val_losses.append(loss.item()) train_loss = np.mean(train_losses) val_loss = np.mean(val_losses) history['train'].append(train_loss) history['val'].append(val_loss) if val_loss < best_loss: best_loss = val_loss best_model_wts = copy.deepcopy(model.state_dict()) print(f'Epoch {epoch}: train loss {train_loss} val loss {val_loss}') model.load_state_dict(best_model_wts) return model.eval(), history # + [markdown] id="7iWQrzV1ASpW" colab_type="text" # At each epoch, the training process feeds our model with all training examples and evaluates the performance on the validation set. Note that we're using a batch size of 1 (our model sees only 1 sequence at a time). We also record the training and validation set losses during the process. # # Note that we're minimizing the [L1Loss](https://pytorch.org/docs/stable/nn.html#l1loss), which measures the MAE (mean absolute error). Why? The reconstructions seem to be better than with MSE (mean squared error). # # We'll get the version of the model with the smallest validation error. Let's do some training: # + id="saamYyUsHdw0" colab_type="code" outputId="811c91c2-de6e-4215-f331-af9e33799c18" colab={"base_uri": "https://localhost:8080/", "height": 1000} model, history = train_model( model, train_dataset, val_dataset, n_epochs=150 ) # + id="uztvlmNkQSUE" colab_type="code" outputId="48c10c3b-55a8-4513-b263-3531fc6b51fd" colab={"base_uri": "https://localhost:8080/", "height": 523} ax = plt.figure().gca() ax.plot(history['train']) ax.plot(history['val']) plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['train', 'test']) plt.title('Loss over training epochs') plt.show(); # + [markdown] id="X2IGZFBEVJjq" colab_type="text" # Our model converged quite well. Seems like we might've needed a larger validation set to smoothen the results, but that'll do for now. # + [markdown] id="pmoaNSERn09J" colab_type="text" # ## Saving the model # # Let's store the model for later use: # + id="tLC_ClIpnv9H" colab_type="code" colab={} MODEL_PATH = 'model.pth' torch.save(model, MODEL_PATH) # + [markdown] id="E6q7H7-ea-Lb" colab_type="text" # Uncomment the next lines, if you want to download and load the pre-trained model: # + id="e4Hxo-Xftiej" colab_type="code" colab={} # # !gdown --id 1jEYx5wGsb7Ix8cZAw3l5p5pOwHs3_I9A # model = torch.load('model.pth') # model = model.to(device) # + [markdown] id="EwLujPFXT054" colab_type="text" # ## Choosing a threshold # # With our model at hand, we can have a look at the reconstruction error on the training set. Let's start by writing a helper function to get predictions from our model: # + id="AAhYZy9bRNLM" colab_type="code" colab={} def predict(model, dataset): predictions, losses = [], [] criterion = nn.L1Loss(reduction='sum').to(device) with torch.no_grad(): model = model.eval() for seq_true in dataset: seq_true = seq_true.to(device) seq_pred = model(seq_true) loss = criterion(seq_pred, seq_true) predictions.append(seq_pred.cpu().numpy().flatten()) losses.append(loss.item()) return predictions, losses # + [markdown] id="wVnNtIreDXf5" colab_type="text" # Our function goes through each example in the dataset and records the predictions and losses. Let's get the losses and have a look at them: # + id="pvn141SDS33P" colab_type="code" outputId="f6d9f42f-489e-4268-fc3e-368859fffa43" colab={"base_uri": "https://localhost:8080/", "height": 488} _, losses = predict(model, train_dataset) sns.distplot(losses, bins=50, kde=True); # + id="MjSCtDZ8_xGB" colab_type="code" colab={} THRESHOLD = 26 # + [markdown] id="mw2dm631T4a5" colab_type="text" # ## Evaluation # # Using the threshold, we can turn the problem into a simple binary classification task: # # - If the reconstruction loss for an example is below the threshold, we'll classify it as a *normal* heartbeat # - Alternatively, if the loss is higher than the threshold, we'll classify it as an anomaly # + [markdown] id="k94t9U3X7uVA" colab_type="text" # ### Normal hearbeats # # Let's check how well our model does on normal heartbeats. We'll use the normal heartbeats from the test set (our model haven't seen those): # + id="-z630B5v7Fid" colab_type="code" outputId="38289be7-f326-4759-d1e3-4be4783e3060" colab={"base_uri": "https://localhost:8080/", "height": 488} predictions, pred_losses = predict(model, test_normal_dataset) sns.distplot(pred_losses, bins=50, kde=True); # + [markdown] id="xCmzETkkLLvh" colab_type="text" # We'll count the correct predictions: # + id="BR-hcvUP7OBt" colab_type="code" outputId="9f7c7a5d-e130-483c-e3b9-526697bdf775" colab={"base_uri": "https://localhost:8080/", "height": 34} correct = sum(l <= THRESHOLD for l in pred_losses) print(f'Correct normal predictions: {correct}/{len(test_normal_dataset)}') # + [markdown] id="hYbH8iEz7wmh" colab_type="text" # ### Anomalies # + [markdown] id="hidyhcu6zC8-" colab_type="text" # We'll do the same with the anomaly examples, but their number is much higher. We'll get a subset that has the same size as the normal heartbeats: # + id="WJcg5DXWyiep" colab_type="code" colab={} anomaly_dataset = test_anomaly_dataset[:len(test_normal_dataset)] # + [markdown] id="gcBoplGU0hR8" colab_type="text" # Now we can take the predictions of our model for the subset of anomalies: # + id="tLCuS8oL7hG2" colab_type="code" outputId="5944d102-7682-4524-eb8e-4eb025fb8669" colab={"base_uri": "https://localhost:8080/", "height": 488} predictions, pred_losses = predict(model, anomaly_dataset) sns.distplot(pred_losses, bins=50, kde=True); # + [markdown] id="EkgGXs4E06so" colab_type="text" # Finally, we can count the number of examples above the threshold (considered as anomalies): # + id="_NEUmDoM8M6q" colab_type="code" outputId="389edfb6-3c88-4ec3-ed1e-63c386294ac1" colab={"base_uri": "https://localhost:8080/", "height": 34} correct = sum(l > THRESHOLD for l in pred_losses) print(f'Correct anomaly predictions: {correct}/{len(anomaly_dataset)}') # + [markdown] id="v0PfwvTZ23_s" colab_type="text" # We have very good results. In the real world, you can tweak the threshold depending on what kind of errors you want to tolerate. In this case, you might want to have more false positives (normal heartbeats considered as anomalies) than false negatives (anomalies considered as normal). # + [markdown] id="KrW0seHZ72Mu" colab_type="text" # #### Looking at Examples # # We can overlay the real and reconstructed Time Series values to see how close they are. We'll do it for some normal and anomaly cases: # + id="gBRWRk6WWdNC" colab_type="code" colab={} def plot_prediction(data, model, title, ax): predictions, pred_losses = predict(model, [data]) ax.plot(data, label='true') ax.plot(predictions[0], label='reconstructed') ax.set_title(f'{title} (loss: {np.around(pred_losses[0], 2)})') ax.legend() # + id="ZnN1K63BYomX" colab_type="code" outputId="4918d7d6-88a3-4549-a994-736fa1969176" colab={"base_uri": "https://localhost:8080/", "height": 596} fig, axs = plt.subplots( nrows=2, ncols=6, sharey=True, sharex=True, figsize=(22, 8) ) for i, data in enumerate(test_normal_dataset[:6]): plot_prediction(data, model, title='Normal', ax=axs[0, i]) for i, data in enumerate(test_anomaly_dataset[:6]): plot_prediction(data, model, title='Anomaly', ax=axs[1, i]) fig.tight_layout(); # + [markdown] id="dC1-69KD2lQ0" colab_type="text" # ## Summary # # In this tutorial, you learned how to create an LSTM Autoencoder with PyTorch and use it to detect heartbeat anomalies in ECG data. # # - [Read the tutorial](https://www.curiousily.com/posts/time-series-anomaly-detection-using-lstm-autoencoder-with-pytorch-in-python/) # - [Run the notebook in your browser (Google Colab)](https://colab.research.google.com/drive/1_J2MrBSvsJfOcVmYAN2-WSp36BtsFZCa) # - [Read the Getting Things Done with Pytorch book](https://github.com/curiousily/Getting-Things-Done-with-Pytorch) # # You learned how to: # # - Prepare a dataset for Anomaly Detection from Time Series Data # - Build an LSTM Autoencoder with PyTorch # - Train and evaluate your model # - Choose a threshold for anomaly detection # - Classify unseen examples as normal or anomaly # # While our Time Series data is univariate (we have only 1 feature), the code should work for multivariate datasets (multiple features) with little or no modification. Feel free to try it! # + [markdown] id="9icnPwTOW9RF" colab_type="text" # ## References # # - [Sequitur - Recurrent Autoencoder (RAE)](https://github.com/shobrook/sequitur) # - [Towards Never-Ending Learning from Time Series Streams](https://www.cs.ucr.edu/~eamonn/neverending.pdf) # - [LSTM Autoencoder for Anomaly Detection](https://towardsdatascience.com/lstm-autoencoder-for-anomaly-detection-e1f4f2ee7ccf)
06.time-series-anomaly-detection-ecg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="LgY989YWW5fI" # https://xavierbourretsicotte.github.io/LDA_QDA.html # + id="0mgiGOleW3zz" import numpy as np import pandas as pd from matplotlib import pyplot as plt import matplotlib.colors as colors from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits import mplot3d from sklearn import linear_model, datasets import seaborn as sns import itertools # %matplotlib inline sns.set() #plt.style.use('seaborn-white') # + id="RVv_bzInXNra" def multivariate_gaussian_pdf(X,MU,SIGMA): '''Returns the pdf of a nultivariate gaussian distribution - X, MU are p x 1 vectors - SIGMA is a p x p matrix''' #Initialize and reshape X = X.reshape(-1,1) MU = MU.reshape(-1,1) p,_ = SIGMA.shape #Compute values SIGMA_inv = np.linalg.inv(SIGMA) denominator = np.sqrt((2 * np.pi)**p * np.linalg.det(SIGMA)) exponent = -(1/2) * ((X - MU).T @ SIGMA_inv @ (X - MU)) #Return result return float((1. / denominator) * np.exp(exponent) ) def calculate_boundary(X,MU_k,MU_l, SIGMA,pi_k,pi_l): return (np.log(pi_k / pi_l) - 1/2 * (MU_k + MU_l).T @ np.linalg.inv(SIGMA)@(MU_k - MU_l) + X.T @ np.linalg.inv(SIGMA)@ (MU_k - MU_l)).flatten()[0] def LDA_score(X,MU_k,SIGMA,pi_k): #Returns the value of the linear discriminant score function for a given class "k" and # a given x value X return (np.log(pi_k) - 1/2 * (MU_k).T @ np.linalg.inv(SIGMA)@(MU_k) + X.T @ np.linalg.inv(SIGMA)@ (MU_k)).flatten()[0] def predict_LDA_class(X,MU_list,SIGMA,pi_list): #Returns the class for which the the linear discriminant score function is largest scores_list = [] classes = len(MU_list) for p in range(classes): score = LDA_score(X.reshape(-1,1),MU_list[p].reshape(-1,1),sigma,pi_list[0]) scores_list.append(score) return np.argmax(scores_list) def QDA_score(X,MU_k,SIGMA,pi_k): #Returns the value of the linear discriminant score function for a given class "k" and # a given x value X SIGMA_inv = np.linalg.inv(SIGMA) return (np.log(pi_k) - 1/2 * np.log(np.linalg.det(SIGMA_inv)) - 1/2 * (X - MU_k).T @ SIGMA_inv @ (X - MU_k)).flatten()[0] def predict_QDA_class(X,MU_list,SIGMA_list,pi_list): #Returns the class for which the the linear discriminant score function is largest scores_list = [] classes = len(MU_list) for p in range(classes): score = QDA_score(X.reshape(-1,1),MU_list[p].reshape(-1,1),SIGMA_list[p],pi_list[p]) scores_list.append(score) return np.argmax(scores_list) # + colab={"base_uri": "https://localhost:8080/", "height": 742} id="MptRSS0OXNuB" executionInfo={"status": "ok", "timestamp": 1625164713536, "user_tz": -180, "elapsed": 10518, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08851466340381164023"}} outputId="95f7033c-ca7f-45b6-b8e8-f424e9401a94" iris = sns.load_dataset("iris") sns.pairplot(iris, hue="species") # + colab={"base_uri": "https://localhost:8080/", "height": 488} id="H-277Rv7XNw1" executionInfo={"status": "ok", "timestamp": 1625164717014, "user_tz": -180, "elapsed": 1318, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08851466340381164023"}} outputId="be2bb26d-9a63-4111-faf5-f523e7d5de4a" iris = iris.rename(index = str, columns = {'sepal_length':'1_sepal_length','sepal_width':'2_sepal_width', 'petal_length':'3_petal_length', 'petal_width':'4_petal_width'}) sns.FacetGrid(iris, hue="species", size=6) .map(plt.scatter,"1_sepal_length", "2_sepal_width", ) .add_legend() plt.title('Scatter plot') df1 = iris[["1_sepal_length", "2_sepal_width",'species']] # + [markdown] id="7BvzZWJ2XdhZ" # Visualizing the gaussian estimations and the boundary lines # + colab={"base_uri": "https://localhost:8080/", "height": 777} id="Z0X3lvrqXNzf" executionInfo={"status": "ok", "timestamp": 1625164759500, "user_tz": -180, "elapsed": 6033, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08851466340381164023"}} outputId="3ff46327-9feb-4a2f-b3a0-32c6372c7a51" #Estimating the parameters mu_list = np.split(df1.groupby('species').mean().values,[1,2]) sigma = df1.cov().values pi_list = df1.iloc[:,2].value_counts().values / len(df1) # Our 2-dimensional distribution will be over variables X and Y N = 100 X = np.linspace(3, 8, N) Y = np.linspace(1.5, 5, N) X, Y = np.meshgrid(X, Y) #fig = plt.figure(figsize = (10,10)) #ax = fig.gca() color_list = ['Blues','Greens','Reds'] my_norm = colors.Normalize(vmin=-1.,vmax=1.) g = sns.FacetGrid(iris, hue="species", size=10, palette = 'colorblind') .map(plt.scatter,"1_sepal_length", "2_sepal_width", ) .add_legend() my_ax = g.ax for i,v in enumerate(itertools.combinations([0,1,2],2)): mu = mu_list[i] Sigma = sigma #Computing the cost function for each theta combination zz = np.array( [multivariate_gaussian_pdf( np.array([xx,yy]).reshape(-1,1), mu, Sigma) for xx, yy in zip(np.ravel(X), np.ravel(Y)) ] ) bb = np.array( [ calculate_boundary(np.array([xx,yy]).reshape(-1,1),mu_list[v[0]].reshape(-1,1),mu_list[v[1]].reshape(-1,1), sigma , .33,.33) for xx, yy in zip(np.ravel(X), np.ravel(Y)) ] ) #Reshaping the cost values Z = zz.reshape(X.shape) B = bb.reshape(X.shape) #Plot the result in 3D my_ax.contour( X, Y, Z, 3,cmap = color_list[i] , norm = my_norm, alpha = .3) my_ax.contour( X, Y, B , levels = [0] ,cmap = color_list[i] , norm = my_norm) # Adjust the limits, ticks and view angle my_ax.set_xlabel('X') my_ax.set_ylabel('Y') my_ax.set_title('LDA: gaussians of each class and boundary lines') plt.show() # + [markdown] id="Zu8yabFcXuZ_" # Visualizing the predicted classes based on the LDA score function # + colab={"base_uri": "https://localhost:8080/", "height": 777} id="z9772PduXN2I" executionInfo={"status": "ok", "timestamp": 1625164822448, "user_tz": -180, "elapsed": 9558, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08851466340381164023"}} outputId="9f7b1884-7a4c-460a-a58f-3d6baeb42eed" # Our 2-dimensional distribution will be over variables X and Y N = 200 X = np.linspace(3, 8, N) Y = np.linspace(1.5, 5, N) X, Y = np.meshgrid(X, Y) #Initialize seaborn facetplot g = sns.FacetGrid(iris, hue="species", size=10, palette = 'colorblind') .map(plt.scatter,"1_sepal_length", "2_sepal_width", ) .add_legend() my_ax = g.ax #Retrieving the faceplot axes #Computing the predicted class function for each value on the grid zz = np.array( [predict_LDA_class( np.array([xx,yy]).reshape(-1,1), mu_list, Sigma, pi_list) for xx, yy in zip(np.ravel(X), np.ravel(Y)) ] ) #Reshaping the predicted class into the meshgrid shape Z = zz.reshape(X.shape) #Plot the filled and boundary contours my_ax.contourf( X, Y, Z, 2, alpha = .1, colors = ('blue','green','red')) my_ax.contour( X, Y, Z, 2, alpha = 1, colors = ('blue','green','red')) # Addd axis and title my_ax.set_xlabel('X') my_ax.set_ylabel('Y') my_ax.set_title('LDA and boundaries') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 33} id="kWPRoBvUXN46" executionInfo={"status": "ok", "timestamp": 1625164831971, "user_tz": -180, "elapsed": 408, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08851466340381164023"}} outputId="873b0454-f0e1-48e6-c761-1dedcf9c0251" #Shape training data X_data = df1.iloc[:,0:2] y_labels = df1.iloc[:,2].replace({'setosa':0,'versicolor':1,'virginica':2}).copy() #Classify and compute accuracy accuracy y_pred = np.array( [predict_LDA_class( np.array([xx,yy]).reshape(-1,1), mu_list, Sigma, pi_list) for xx, yy in zip(np.ravel(X_data.values[:,0]), np.ravel(X_data.values[:,1])) ] ) display(np.mean(y_pred == y_labels)) # + id="q5gUaBCEXN70"
Chapter 4/Python/discriminant analysis/Iris LDA visualization from outside.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Install (some of the) required packages # !pip install ogb # !pip install POT # !pip install prettytable # !pip install tqdm # + # Import required modules import ogb; print('ogb version {}'.format(ogb.__version__)) # make sure the version is =>1.1.1. from ogb.graphproppred import PygGraphPropPredDataset from WEGL.WEGL import WEGL # + # Set the random seed random_seed = 55 # + # Load the dataset dataset = PygGraphPropPredDataset(name="ogbg-molhiv") print('# of graphs = {0}\n# of classes = {1}\n# of node features = {2}\n# of edge features = {3}'.\ format(len(dataset), dataset.num_classes, dataset.num_node_features, dataset.num_edge_features)) if isinstance(dataset, PygGraphPropPredDataset): # OGB datasets print('# of tasks = {}'.format(dataset.num_tasks)) # + # Specify the parameters # num_hidden_layers = range(3, 9) num_hidden_layers = [4] # node_embedding_sizes = [100, 300, 500] node_embedding_sizes = [300] # final_node_embeddings = ['concat', 'avg', 'final'] final_node_embeddings = ['final'] num_pca_components = 20 num_experiments = 10 classifiers = ['RF'] device = 'cpu' # + # Run the algorithm for final_node_embedding in final_node_embeddings: WEGL(dataset=dataset, num_hidden_layers=num_hidden_layers, node_embedding_sizes=node_embedding_sizes, final_node_embedding=final_node_embedding, num_pca_components=num_pca_components, num_experiments=num_experiments, classifiers=classifiers, random_seed=random_seed, device=device) # -
.ipynb_checkpoints/WEGL_RandomForest_ogbg_molhiv-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python2 # kernelspec: # display_name: Python [conda env:py35] # language: python # name: conda-env-py35-py # --- # + [markdown] inputHidden=false outputHidden=false # # Here is a nice Image Blending program written in C++ # - # ## A fast easy image blender called "controlBlend" # The script and compiling directions are below. It can be done without leaving the Jupyter Notebook<br /> # the top line ' %%writefile controlBlend.cpp ' will write the C++ file # Executing/running the next line will compile ' controlBlend.cpp ' # !sudo locate opencv.pc # !export PKG_CONFIG_PATH=/home/jack/anaconda2/envs/py35/lib/pkgconfig/opencv.pc g++ pkg-config --cflags --libs opencv controlBlend.cpp -o controlBlend # + inputHidden=false outputHidden=false # !g++ controlBlend.cpp -o controlBlend '/home/jack/anaconda2/envs/py35/lib/pkgconfig/opencv --cflags --libs' # + inputHidden=false outputHidden=false # !g++ controlBlend.cpp -o controlBlend `pkg-config opencv --cflags --libs` # - # When run the next line shows the file ' image1.png ' in a pop upwindow called <b>Image Viewer</b><br /> # Since it was copied to /usr/local/bin it is an executable from any directory. You many use<br /> # the notebook or in a terminal window via command line. # + inputHidden=false outputHidden=false # %%writefile controlBlend.cpp #include <cv.h> #include <highgui.h> using namespace cv; /// Global Variables const int alpha_slider_max = 100; int alpha_slider; double alpha; double beta; /// Matrices to store images Mat src1; Mat src2; Mat dst; /** * @function on_trackbar * @brief Callback for trackbar */ void on_trackbar( int, void* ) { alpha = (double) alpha_slider/alpha_slider_max ; beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); imshow( "Linear Blend", dst ); } int main( int argc, char** argv ) { /// Read image ( same size, same type ) src1 = imread("images/hicks01.jpg"); src2 = imread("images/hicks02.jpg"); if( !src1.data ) { printf("Error loading src1 \n"); return -1; } if( !src2.data ) { printf("Error loading src2 \n"); return -1; } /// Initialize values alpha_slider = 0; /// Create Windows namedWindow("Linear Blend", 1); /// Create Trackbars char TrackbarName[50]; sprintf( TrackbarName, "Alpha x %d", alpha_slider_max ); createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar ); /// Show some stuff on_trackbar( alpha_slider, 0 ); /// Wait until user press some key waitKey(0); return 0; } # -
Creating-C++-Image-blender-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CS 109A/STAT 121A/AC 209A/CSCI E-109A: Homework 5 # # Logistic Regression and PCA # # **Harvard University**<br/> # **Fall 2017**<br/> # **Instructors**: <NAME>, <NAME>, <NAME>, <NAME> # # --- # # ### INSTRUCTIONS # # - To submit your assignment follow the instructions given in canvas. # - Restart the kernel and run the whole notebook again before you submit. # - Do not include your name(s) in the notebook if you are submitting as a group. # - If you submit individually and you have worked with someone, please include the name of your [one] partner below. # # --- # Your partner's name (if you submit separately): # # Enrollment Status (109A, 121A, 209A, or E109A): 109A # Import libraries: import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.api import OLS from sklearn.decomposition import PCA from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegressionCV from sklearn.utils import resample from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score # %matplotlib inline # ## Cancer Classification from Gene Expressions # # In this homework assignment, we will build a classification model to distinguish between two related classes of cancer, acute lymphoblastic leukemia (ALL) and acute myeloid leukemia (AML), using gene expression measurements. The data set is provided in the file `dataset_hw5.csv`. Each row in this file corresponds to a tumor tissue sample from a patient with one of the two forms of Leukemia. The first column contains the cancer type, with 0 indicating the ALL class and 1 indicating the AML class. Columns 2-7130 contain expression levels of 7129 genes recorded from each tissue sample. # # In the following parts, we will use logistic regression to build a classification model for this data set. We will also use principal components analysis (PCA) to visualize the data and to reduce its dimensions. # # # ## Part (a): Data Exploration # # 1. First step is to split the observations into an approximate 50-50 train-test split. Below is some code to do this for you (we want to make sure everyone has the same splits). # # 2. Take a peak at your training set: you should notice the severe differences in the measurements from one gene to the next (some are negative, some hover around zero, and some are well into the thousands). To account for these differences in scale and variability, normalize each predictor to vary between 0 and 1. # # 3. Notice that the results training set contains more predictors than observations. Do you foresee a problem in fitting a classification model to such a data set? # # 4. A convenient tool to visualize the gene expression data is a heat map. Arrange the rows of the training set so that the 'AML' rows are grouped together and the 'ALL' rows are together. Generate a heat map of the data with expression values from the following genes: # `D49818_at`, `M23161_at`, `hum_alu_at`, `AFFX-PheX-5_at`, `M15990_at`. By observing the heat map, comment on which of these genes are useful in discriminating between the two classes. # # 5. We can also visualize this data set in two dimensions using PCA. Find the top two principal components for the gene expression data. Generate a scatter plot using these principal components, highlighting the AML and ALL points in different colors. How well do the top two principal components discriminate between the two classes? # # # train test split! np.random.seed(9001) df = pd.read_csv('dataset_hw5.csv') msk = np.random.rand(len(df)) < 0.5 data_train = df[msk] data_test = df[~msk] # # In this section I standardize the training data first by doing scaler.fit(xtrain), I then transform both the train and the test with this to ensure I do not allow test information to leak into training and to make sure training rules are being applied to the test set. I opt for standardization because some gene expressions have outliers and these may be significant in later stages of the modeling (normalizing may scale down my predictors to very small numbers if I have large outliers) # + # scaling entire dataset/train and test from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() newXtrain = pd.DataFrame.copy(data_train.loc[:,data_train.columns!='Cancer_type']) scaler.fit(newXtrain) Xtrain_scaled = pd.DataFrame(scaler.transform(newXtrain), columns=newXtrain.columns) Xtrain_scaled=Xtrain_scaled.set_index(data_train.index) Xtrain_scaled['Cancer_type'] = pd.DataFrame.copy(data_train['Cancer_type']) newXtest = pd.DataFrame.copy(data_test.loc[:,data_test.columns!='Cancer_type']) Xtest_scaled = pd.DataFrame(scaler.transform(newXtest), columns=newXtest.columns) Xtest_scaled=Xtest_scaled.set_index(data_test.index) Xtest_scaled['Cancer_type'] = pd.DataFrame.copy(data_test['Cancer_type']) # - # plot heatmap using seaborn (much easier) import seaborn as sns zz = Xtrain_scaled.sort_values(by='Cancer_type', ascending=0) fig, ax = plt.subplots(figsize=(5,10)) ax = sns.heatmap(zz[['D49818_at','M23161_at', 'hum_alu_at', 'AFFX-PheX-5_at', 'M15990_at']],cmap ='viridis') ax.set_yticklabels(reversed(zz[['Cancer_type']].values[:,0])) plt.title('Gene Expression Heatmap against ALL Response') plt.ylabel('Actual Response (ALL=1, AML =0)') # PCA section from sklearn.decomposition import PCA pca = PCA(n_components = 2) respca = pca.fit_transform(zz.drop('Cancer_type',axis =1)) respca = pd.DataFrame(respca, columns = ['pca1','pca2']) respca['Cancer_type'] = zz[['Cancer_type']].values sns.lmplot(x = 'pca1',y='pca2',data = respca,hue = 'Cancer_type',fit_reg = False,size = 10) # Q # # - Notice that the results training set contains more predictors than observations. Do you foresee a problem in fitting a classification model to such a data set? # # A # # - Yes, high probability of over fitting our data (hence high likelihood of large variance e.g. low test score and high training score where score refers to (1-misclassification rate)) # # Q # # - A convenient tool to visualize the gene expression data is a heat map. Arrange the rows of the training set so that the 'AML' rows are grouped together and the 'ALL' rows are together. Generate a heat map of the data with expression values from the following genes: D49818_at, M23161_at, hum_alu_at, AFFX-PheX-5_at, M15990_at. By observing the heat map, comment on which of these genes are useful in discriminating between the two classes. # # A # # - Out of interest, I ran the model using standardization and normalization and the only core difference between these models was the heatmap generated in this section. Therefore, it depends on the way you standardize/normalize data. When you $\textbf{normalize}$ the data 'M15990_at' and 'M23161_at' are dark almost everywhere so we do not think these two can be good predictors. The others don't seem to have clear demarcations but AFXX seems to be relatively good with darker regions with zeros and lighter with 1's. # - When we $\textbf{standardize}$, M23161_at gives us lighter yellows for 0 and darker colors for 1's meaning this could be a good predictor. # - Why this difference you ask? Well, we get this difference because when we scale with min max, we really are scaling by outliers in our data therefore, many of the points are being divided by these outliers. For the rest of the problem I have used standardization. # # Q # # - How do top 2 PCA components discriminate between 2 classes # # A # # - Top 2 pca components can discriminate a decent number of types. For example, in the lower left half (so low pca1 and pca2 values) we tend to get green dots and hence ALL. In the region to the top right of this, we generally get most of the blue dots with some level of misclassification. Therefore, 2 pca components aren't bad at prediction. We will see in a later section that this is quite true. # ## Part (b): Linear Regression vs. Logistic Regression # # Begin by analyzing the differences between using linear regression and logistic regression for classification. For this part, you shall work with a single gene predictor: `M23161_at`. # # 1. Fit a simple linear regression model to the training set using the single gene predictor `D29963_at`. We could interpret the scores predicted by regression model interpreted for a patient as an estimate of the probability that the patient has the `ALL` type cancer (class 1). Is there a problem with this interpretation? # # 2. The fitted linear regression model can be converted to a classification model (i.e. a model that predicts one of two binary labels 0 or 1) by classifying patients with predicted score greater than 0.5 into the `ALL` type (class 1), and the others into the `AML` type (class 0). Evaluate the classification accuracy (1 - misclassification rate) of the obtained classification model on both the training and test sets. # # 3. Next, fit a simple logistic regression model to the training set. How does the training and test calssification accuracy of this model compare with the linear regression model? Remember, you need to set the regularization parameter for sklearn's logistic regression function to be a very large value in order not to regularize (use 'C=100000'). # # 4. Plot the quantitative output from linear regression model and the probabilistic output from the logistic regression model (on the training set points) as a function of the gene predictor. Also, display the true binary response for the training set points in the same plot. # Based on these plots, does one of the models appear better suited for binary classification than the other? Explain. # # # part 1 # extract train and test response ytrain = Xtrain_scaled[['Cancer_type']].values ytest = Xtest_scaled[['Cancer_type']].values # fit a linear regression linreg = LinearRegression(fit_intercept = True) linreg.fit(Xtrain_scaled[['D29963_at']],ytrain) ypred = linreg.predict(Xtrain_scaled[['D29963_at']]) df = pd.DataFrame(np.c_[ypred,ytrain], columns = ['predicted%of1','actual']) df.T # use values as %'s and cast to 1's and 0's with 0.5 ypred = ypred[:,0] ypred[ypred>0.5] = 1 ypred[ypred<=0.5] = 0 print('Train Classification accuracy (linear) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred))) ypred = linreg.predict(Xtest_scaled[['D29963_at']]) ypred = ypred[:,0] ypred[ypred>0.5] = 1 ypred[ypred<=0.5] = 0 print('Test Classification accuracy (linear) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred))) # fit logistic regression to gene expre d29963 logreg = LogisticRegression(fit_intercept = True, C = 100000) logreg.fit(Xtrain_scaled[['D29963_at']],np.ravel(ytrain)) ypred = logreg.predict(Xtrain_scaled[['D29963_at']]) print('Train Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred))) ypred = logreg.predict(Xtest_scaled[['D29963_at']]) print('Test Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred))) # plot probabilities of log and linear regression with True response xvals = Xtrain_scaled[['D29963_at']] ypredlin = linreg.predict(Xtrain_scaled[['D29963_at']]) ypredlog = logreg.predict_proba(Xtrain_scaled[['D29963_at']]) ytrue = Xtrain_scaled[['Cancer_type']] plt.figure(figsize=(10,10)) plt.scatter(xvals,ytrue,label = 'true', c = 'b',alpha = 0.3) plt.scatter(xvals,ypredlin,label = 'lin',c = 'r',alpha = 0.5) plt.scatter(xvals,ypredlog[:,1],label = 'log',c='g',alpha = 0.6) plt.xlabel('D29963_at expression') plt.ylabel('probability of being ALL') plt.legend() # Q # # - Fit a simple linear regression model to the training set using the single gene predictor D29963_at. We could interpret the scores predicted by regression model interpreted for a patient as an estimate of the probability that the patient has the ALL type cancer (class 1). Is there a problem with this interpretation? # # A # # - Our model is not restricted in any way to be between 0 and 1, we could have negative predictions or predictions greater than 1 which are obviously not probabilities so while this interpretation allows us to classify we may not generate probabilities. Furthermore, if we had more classification regions, then our predictions can definitely not be interpreted in this way since our response could be in any range (depending on values of response variables). # # # Q # # - How does the training and test calssification accuracy of this model compare with the linear regression model? # # A # # - We can see from the results above that both models generate the same train and test scores. At first instance this seems odd since we were introduced with the notion that logistic regression is a classifier (and should do better) but when we dig deeper we can see that because the gene expression values aren't highly spread (e.g low expression values don't lead to 0's while high expression values don't lead to 1's) these two methods are comparable. A great source explaining the similarities can be found here: https://statisticalhorizons.com/linear-vs-logistic . Essentially, linear regression can often times do just as well if the probabilities don't have much spread and hence the log odds are linear. # # Q # # - Based on these plots, does one of the models appear better suited for binary classification than the other? Explain. # # A # # - In the center we can see there is similar performance but at the extrema, it depends so from this particular example neither model is 'better' at first glance. However, if the expression values were more spread out, logistic regression would be better since it can do better at boundaries if there is enough spread in expression. This goes back to the discussion listed in the answer to the previous question. # ## Part (c): Multiple Logistic Regression # # 1. Next, fit a multiple logistic regression model with all the gene predictors from the data set. How does the classification accuracy of this model compare with the models fitted in Part (b) with a single gene (on both the training and test sets)? # # 2. "Use the `visualize_prob` from `HW5_functions.py` to visualize the probabilties predicted by the fitted multiple logistic regression model on both the training and test data sets. The function creates a visualization that places the data points on a vertical line based on the predicted probabilities, with the `ALL` and `AML` classes shown in different colors, and with the 0.5 threshold highlighted using a dotted horizontal line. Is there a difference in the spread of probabilities in the training and test plots? Are there data points for which the predicted probability is close to 0.5? If so, what can you say about these points?" # + # fit to all predictors colz = Xtrain_scaled.columns[:-1] xtrain = Xtrain_scaled[colz] xtest = Xtest_scaled[colz] multilr = LogisticRegression(fit_intercept = True,C = 10**6) multilr.fit(xtrain,np.ravel(ytrain)) ypred = multilr.predict(xtrain) print('Train Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred))) ypred = multilr.predict(xtest) print('Test Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred))) # + #-------- visualize_prob # A function to visualize the probabilities predicted by a Logistic Regression model # Input: # model (Logistic regression model) # x (n x d array of predictors in training data) # y (n x 1 array of response variable vals in training data: 0 or 1) # ax (an axis object to generate the plot) def visualize_prob(model, x, y, ax): # Use the model to predict probabilities for y_pred = model.predict_proba(x) # Separate the predictions on the label 1 and label 0 points ypos = y_pred[y==1] yneg = y_pred[y==0] # Count the number of label 1 and label 0 points npos = ypos.shape[0] nneg = yneg.shape[0] # Plot the probabilities on a vertical line at x = 0, # with the positive points in blue and negative points in red pos_handle = ax.plot(np.zeros((npos,1)), ypos[:,1], 'bo', label = 'ALL') neg_handle = ax.plot(np.zeros((nneg,1)), yneg[:,1], 'ro', label = 'AML') # Line to mark prob 0.5 ax.axhline(y = 0.5, color = 'k', linestyle = '--') # Add y-label and legend, do not display x-axis, set y-axis limit ax.set_ylabel('Probability of ALL class') ax.legend(loc = 'best') ax.get_xaxis().set_visible(False) ax.set_ylim([0,1]) # - fig,ax = plt.subplots(1,2,figsize=(14,7)) visualize_prob(multilr,xtrain,np.ravel(ytrain),ax[0]) ax[0].set_title('Training Classification') visualize_prob(multilr,xtest,np.ravel(ytest),ax[1]) ax[1].set_title('Test Classification') # Q # # - Next, fit a multiple logistic regression model with all the gene predictors from the data set. How does the classification accuracy of this model compare with the models fitted in Part (b) with a single gene (on both the training and test sets)? # # A # # - Classification accuracy on both training and test set improves with classification on the training set moving to 100% accuracy (indicative of some level of overfitting). The test score improves from 0.829 to 0.97 suggesting that the increase in feature space adds some more complexity and improves our test score. # # Q # # - "Use the visualize_prob from HW5_functions.py to visualize the probabilties predicted by the fitted multiple logistic regression model on both the training and test data sets. The function creates a visualization that places the data points on a vertical line based on the predicted probabilities, with the ALL and AML classes shown in different colors, and with the 0.5 threshold highlighted using a dotted horizontal line. Is there a difference in the spread of probabilities in the training and test plots? Are there data points for which the predicted probability is close to 0.5? If so, what can you say about these points?" # # A # # - There is a difference in the spread of probabilities of training and test. In the test we can see a wider range of probabilities since our model is over fitting and is not handling 'unseen' data as well as the training. We can also see some misclassification due to this as we can see some values which are truly ALL being predicted with probabilities less than 0.5. # # There aren't any points close to 0.5 using standardization but when I ran this with normalization I did find some points close to 0.5. In that case it seems like those points have an equal likelihood of being classed into AML or ALL even though they are distinctly one type. Therefore, we may want to consider how we could potentially change this probability if we cared about increased accuracy of ALL vs AML for example. # ## Part (d): Analyzing Significance of Coefficients # # How many of the coefficients estimated by the multiple logistic regression in the previous problem are significantly different from zero at a *significance level of 95%*? # # Hint: To answer this question, use *bootstrapping* with 100 boostrap samples/iterations. # # # + coefsig =[] def sample(x, y, k): n = x.shape[0] # No. of training points # Choose random indices of size 'k' subset_ind = np.random.choice(np.arange(n), k) # Get predictors and reponses with the indices x_subset = x[subset_ind, :] y_subset = y[subset_ind] return (x_subset, y_subset) multilr = LogisticRegression(fit_intercept = True,C = 10**6) from random import randint for i in range(100): xx, yy = sample(xtrain.values,ytrain,32) multilr.fit(xx,np.ravel(yy)) coefsig.append(multilr.coef_) # + coefsig = np.array(coefsig) avgcoef = np.mean(coefsig,axis = 0)[0,:] stdcoef = np.std(coefsig,axis = 0)[0,:] z2 = avgcoef-2*stdcoef z1 = avgcoef+2*stdcoef print('Number of Statistically significant values:%s' %(np.shape(z2[z2>0])[0] + np.shape(z1[z1<0])[0])) # - # - In running the bootstrapping scheme we make no assumptions about our data, however it is possible that there is correlation between our data. So in many cases a t-test is not identical to a 95% confidence interval from bootstrapping. However, for the purpose of this problem we assume we can use this method. Therefore we bootstrap 100 times, take the mean and look at 2 standard deviations, if the value zero appears then the coefficient is not statistically significant. What we find is that 1100 coefficients are statistically significant at this confidence level. If I normalize the data in step 1, I find that number of significant coefficients is 1690. This already can be used to help us narrow down our feature space. # ## Part (e): Dimensionality Reduction using PCA # # A reasonable approach to reduce the dimensionality of the data is to use PCA and fit a logistic regression model on the first set of principal components contributing to 90% of the variance in the predictors. # # 1. How do the classification accuracy values on both the training and tests sets compare with the models fitted in Parts (c) and (d)? # # 2. Re-fit a logistic regression model using 5-fold cross-validation to choose the number of principal components, and comment on whether you get better test performance than the model fitted above (explain your observations). # # 3. Use the code provided in Part (c) to visualize the probabilities predicted by the fitted models on both the training and test sets. How does the spread of probabilities in these plots compare to those for the models in Part (c) and (d)? # pcavar = [] i = 1 while True: pca = PCA(n_components = i) pca.fit(xtrain) pcavar.append(pca.explained_variance_ratio_.sum()) if (pca.explained_variance_ratio_.sum()) >= 0.9: break i+=1 plt.figure(figsize=(10,8)) plt.plot(np.arange(1,25,1),pcavar) plt.title('#PCA components vs total variance') plt.ylabel('Variance') plt.xlabel('# of PCA components') # I choose 24 pca compoonents since it contributes to 91% of the variance # + multilr = LogisticRegression(fit_intercept = True, C = 10**6) pca = PCA(n_components = 24) pca.fit(xtrain) xpca = pca.transform(xtrain) xtst = pca.transform(xtest) multilr.fit(xpca,np.ravel(ytrain)) ypred = multilr.predict(xpca) print('Train Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred))) ypred = multilr.predict(xtst) print('Test Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred))) # - fig,ax = plt.subplots(1,2,figsize=(14,7)) visualize_prob(multilr,xpca,np.ravel(ytrain),ax[0]) ax[0].set_title('Training Classification') visualize_prob(multilr,xtst,np.ravel(ytest),ax[1]) ax[1].set_title('Test Classification') lrcv = LogisticRegressionCV(Cs = [10**8],fit_intercept = True,cv = 5) scores = [] stds = [] for i in range(23): lrcv.fit(xpca[:,0:i+1],np.ravel(ytrain)) scores.append(lrcv.scores_) scores = np.array(scores) lrcv_means = [np.mean(scores[i][1]) for i in range(23)] stds = [np.std(scores[i][1]) for i in range(23)] xx = np.arange(1,24,1) plt.figure(figsize=(12,10)) plt.title('cross validation score vs # of PCA components') plt.errorbar(xx,lrcv_means,yerr = stds,marker='o',linestyle=None) np.argmax(lrcv_means) +1 # 4 features yield the highest training accuracy # + multilr = LogisticRegression(fit_intercept = True, C = 10**6) multilr.fit(xpca[:,0:4],np.ravel(ytrain)) ypred = multilr.predict(xpca[:,0:4]) print('Train Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytrain[:,0]))/len(ypred))) ypred = multilr.predict(xtst[:,0:4]) print('Test Classification accuracy (logistic) =%s' %(1-np.sum(abs(ypred-ytest[:,0]))/len(ypred))) # - fig,ax = plt.subplots(1,2,figsize=(14,7)) visualize_prob(multilr,xpca[:,0:4],np.ravel(ytrain),ax[0]) ax[0].set_title('Training Classification') visualize_prob(multilr,xtst[:,0:4],np.ravel(ytest),ax[1]) ax[1].set_title('Test Classification') # Q # # - How do the classification accuracy values on both the training and tests sets compare with the models fitted in Parts (c) and (d)? # # A # # - I think you meant parts (b) and (c) above therefore: # - Classification accuracy with 24 PCA components on training is equivalent to multiple logistic regression (= 1) which are both higher than linear and logistic regression with 1 feature (~ 0.7). # - Classification accuracy on test is lower than multiple logistic regression (~0.92 < 0.97). However, with only 24 features we can do roughly just as well as 7129! which is great if we can afford some level of misclassification and we care about faster results(assuming we have a lot of data to parse and computational complexity grows with the number of features). Both these models (24PCA features and multiple logistic) do better than the single feature we had in part b. # # Q # # - Re-fit a logistic regression model using 5-fold cross-validation to choose the number of principal components, and comment on whether you get better test performance than the model fitted above (explain your observations). # # A # # - We carry out the above step because there may be an optimal subset of the 24 components that gives us a good cross validation score which could allow us to down select our features even more (since there may be overfitting with a train score of 1). We obtain a lower train score as expected since we are reducing over fitting by optimizing our model on the cross validation set. We obtain lower test performance than the 24 features with 4 principal components and this may be because we give up some complexity in the down selection process. Furthermore, because we have very few observations, our cross validation step shows us that there is a large variance in the cross validation score meaning that 4 features may not be the optimal subset since we could have just gotten lucky. Therefore, in this situation, having more data is very useful. This issue of few data points also means that while we can fit well to our training set, our test set may have many outliers which the model with 4 features is unable to account for (while the model with 24 is!). # # Q # # - Use the code provided in Part (c) to visualize the probabilities predicted by the fitted models on both the training and test sets. How does the spread of probabilities in these plots compare to those for the models in Part (c) and (d)? # # A # # - More spread in the training set with 4PCA components than with 24 since we have fewer features and the training accuracy is lower - we can thus see some misclassification with blue dots below the 0.5 line. In the test set we see a great degree of misclassification by 4 PCA components, with less spread indicated by 24 PCA components and multiple regression. What we can synthesize from this is that models which are more complex seem to predict with less spread in probabilities in that they are more 'decisive' so to speak. While fewer features predict with greater spread. # --- # # # APCOMP209a - Homework Question # Suppose we want to conduct PCA on the model matrix $X \in \Re^{n×p}$, where the columns have been suitably set to zero mean. In this question, we consider the squared reconstruction error: # # $$ \parallel XQ- XQ_m \parallel ^2 $$ # # for a suitable set of eigenvectors forming the matrix $Q_m$, as discussed below. Suppose that we conduct eigendecomposition of $X^T X$ and obtain eigenvalues $\lambda_1, \ldots , \lambda_p$ and principal components $Q$, i.e. # # $$ X^T X = Q \Lambda Q ^T $$ # # (1) Suppose that the matrix norm is simply the squared dot product, namely # # $$ \parallel A \parallel ^2 = A^T A $$ # # Then, express the reconstruction error as a sum of matrix products. # # (2) Simplify your result from (1) based on properties of the matrices $Q$. # # (3) Now let $Q_m$ be the matrix of the first $m < p$ eigenvectors, namely # # $$ Q_m = (q_1, \ldots, q_m, 0, \ldots, 0) \in \Re^{p \times p} $$ # # Thus, $X Q_m$ is the PCA projection of the data into the space spanned by the first $m$ principal components. Express the products $Q^T_m Q$ and $Q^T Q_m$, again using properties of the eigenbasis $q_1, \ldots, q_p$. # # (4) Use your results from (3) to finally fully simplify your expression from (2). # # (5) Note that the result you obtain should still be a matrix, i.e. this does not define a proper norm on the space of matrices (since the value should be a scalar). Consequently, the true matrix norm is actually the trace of the # above result, namely # # $$ \parallel A \parallel ^2 = {\rm trace} (A^T A) $$ # Use your result from (4) and this new definition to find a simple expression # for the reconstruction error in terms of the eigenvalues. # # (6) Interpret your result from (5). In light of your results, does our procedure for PCA (selecting the $m$ substantially larger eigenvalues) make sense? Why or why not?
cs109_hw5_submission.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sh # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Bash # language: bash # name: bash # --- # + [markdown] slideshow={"slide_type": "slide"} # # Processors Alone # # ![](images/cpu.jpg) # # $$\Huge\color{blue}{\text{Registers}}$$ # # $$\Huge\color{red}{\text{Scheduler}}$$ # # $$\Huge\color{green}{\text{Functional Units}}$$ # + [markdown] slideshow={"slide_type": "subslide"} # ### When introducing OpenMP (which we will look at in more depth later in the class), it's typical to start with a simple example of how easy it makes it to parallelize your code: # # ```C # /* We added one pragma and it's parallel! */ # #pragma omp parallel for # for (i = 0; i < N; i++) { # A[i] = func (b[i], c[i]); # } # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### In fact, parallelizing you code can be even simpler, you don't even have to change it. # # You simply have to change from this: # - cd $CSE6230_DIR/assignments/2-flops cc -g -c -std=c99 -o fma_loop_host.o fma_loop_host.c -O0 # + [markdown] slideshow={"slide_type": "fragment"} # To this: # - cc -g -c -std=c99 -o fma_loop_host_opt.o fma_loop_host.c -O3 # By asking the compiler to try its best to optimize my code, it is able to exploit parallelism within the CPU core, even for my serial program. # # --- # # Questions we'd like to answer today: # # - What kind of parallelism is available in a single core, and how much of it? # - How can I exploit it? # - Can all applications exploit it? # - How can I make the compiler do the work for me? # - How does the parallelism on a CPU core compare to the parallelism in a GPU? # # # Tools we will use today: # # - Code compilers, like `cc` above, focusing on their optimization options. `cc` is typically an alias for a major compiler (you can typically run `man CC` or `CC --help` to get big lists of optimization and other options) # * GNU `gcc` # * LLVM `clang` # * NVIDIA `nvcc` # * Vendor specific (like Intel `icc`) # (focus on their optimization options) # - Code decompilers and diagnostics (to see what the heck compilers are doing) # - Hardware counters for things that happen in the processor # **Note:** For many, many applications, optimal performance can't be achieved by optimizing just the processor's performance alone: we have to optimize it's interactions with the memory system. That is why I'd like to finish the "Processors alone" module today. # # Luckily, optimizing the processor in isolation is something that compilers are quite good at. # # If you have to take away one keep concept today, it is **Little's law**, which will often tell you how to structure your code to set the compiler up for success. # + [markdown] slideshow={"slide_type": "slide"} # ## Recall the end stage of compilation we discussed in the first lecture: machine code # # When I compiled `fma_loop_host.o` above, it created a file with those instructions. It's encoded in binary, so opening it up in a text editor won't tell us much, but I can still find out what those machine code instructions are by decompiling the binary into assembly language. The utility that let's me do that is called `objdump`. # # (We haven't talked about how CUDA code is different, but for now let's just mention that it comes with its own decompiler: `cuobjdump`) # # Here is the entirety of `fma_loop_host.c` from assignment 1: # - cat fma_loop_host.c | pygmentize objdump -Sd fma_loop_host.o | pygmentize -l c-objdump # (Note that, as illegible as this is, it would be much worse if we didn't have source code interspersed with instructions. You should always compile C code with `-g` and CUDA with ~~`-G`~~ `-lineinfo` [`-G` always turns off optimizations] for this reason and others.) # # For our purposes, this assembly code has three types of instructions: # # - Instructions that take **registers** as inputs (those things that are addressed like `%rax` and `%rbp`) and # write their outputs over the locations of their inputs. Examples are floating point operations like `mulss` (multiply two single precision numbers together), integer operations like `addl` (add two 32-bit integers), and logical operations like `cmp` (determine if one integer is less than another and write the output to a special register). # # * In hardware, registers are data locations in a register file: the storage closest to the execution units. # Register space is quite dear, so to reflect that, most instruction sets have a limited number of registers (see # e.g. the wikipedia page for the [AVX512](https://en.wikipedia.org/wiki/AVX-512#Extended_registers) instruction # set.). When a thread has too many computations to keep track of, data that would otherwise be stored in a register is *spilled* to memory, which slows things down. I mention all of this just to say that one things compilers are trying to do is figure out how to squeeze your complex instructions into the limited scratchpad space provided by the registers. # # - Instructions that load and store data from memory like `mov`: we're not going to talk about open can of worms today. # # - Branching instructions that control the flow of instructions like `jl` (jump to a given code location based on the outcome of a comparison) # Again, for our simple purposes today, a **thread** is: # # - a stream of instructions, with # - a limited set of registers as a workspace for partial computations # ### How a thread is executed # # (This is a simplification of the [classic RISC pipeline](https://en.wikipedia.org/wiki/Classic_RISC_pipeline)) # # 1. An instruction like `add %rdx,%rax` is: # # 1. *fetched* from the instruction queue, # 2. *decoded* (it's operation and register inputs / outputs are identified), # 3. **executed** (the part we care about), and # 4. *written back* to registers # # 2. Move to the next instruction and repeat # ### Pipelining # # If a *cycle* is the smallest unit of time of a processor, and an instruction has multiple steps (each step takes a cycle), does that means that an instruction takes multiple cycles? # # Yes! Let's say $k$ cycles. # # Does that mean that a processor takes $kN$ cycles to complete $N$ instructions? # # No! Instructions are **Pipelined:** # # ![pseudorisc pipeline](./images/pipeline-1.jpg) # # The key thing to understand about pipelined operations: # # **The results of an operation can't be inputs to another operation until they exit # the pipeline.** # # Any cycle of a pipeline when there isn't a new input is a *bubble*. # # The *efficiency* (work / cycle) of your pipelined algorithm is the *fraction of non-bubble cycles*. # # **A fully efficient pipelined algorithm has at least $k$ concurrent independent operations at any point in time, where $k$ is the depth of the pipeline** # (I think that the way that many diagrams show pipelined instructions (time axis horizontal, data axis vertical, instructions labeled) is not helpful, because the "pipe" in the pipeline is always moving, and because the diagram gets larger in both dimensions as time goes on. I prefer to have *instructions* on the vertical axis and *data* labeled on the diagram, because that way the diagram only grows on one axis, and each column looks like a time slice of the pipeline) # # ![pipeline](./images/pipeline-2.jpg) # ### Pipelines and branching # # Even in our simple program, we saw that my nice clean breakdown of register-register instructions and memory instructions wasn't respected: some instructions like `mulss -0x24(%rbp),%xmm0` combine a memory access # (`-0x24(%rbp)` accesses a memory location stored in `%rbp`, offset by a certain amount). # # More complex instructions require more decoding: the pipeline of operations before **execute** is quite long on a modern CPU. # # ![long pipeline](./images/pipeline-3.jpg) # # That is why branching instructions are hard to combine with pipelined execution. We don't know which instruction # should go into the pipeline, it depends on the output of a computation like a comparison. The CPU could: # # - Hold up everything to wait until it is known which branch to take (always stall the pipeline, bad) # - Try to *predict* which branch will be taken an keep feeding the pipeline with that branch (bad when there is a *misprediction*) # # Branch prediction is a complicated, sophisticated thing on modern CPUs. In your programming, you should assume the following: # # - Computers are good are recognizing patterns: there is a branch in every loop of a for-loop, but if you keep looping back, it will eventually start predicting that is the branch to take, and a branch will be a neglible part of the execution time. For loops with known bounds can also be **unrolled** meaning copy-pasted the right number of times with no branching at all. # # ```C # for (int i = 0; i < N; i++) { /* if N = 10000000000, branch prediction will almost always be right */ # /* ... */ # } # ``` # # ```C # for (int i = 0; i < 8; i++) { # /* If the bound is known at compile time, the loop can be unrolled with no branching */ # /* ... */ # } # ``` # # ```C # /* You can give the compiler hints about how you want to break up a loop in to unrolled sections, # reducing the number of branches */ # #pragma unroll(8) # for (int i = 0; i < N; i++) { # /* ... */ # } # ``` # # - If your branching has no patterns, then you should expect lots of branch misprediction: the instruction pipeline # has to be cleared out, leading to a stall in your code *proportional in length to the pipeline depth* (~10-20 cycles) # # - Branch misprediction is the kind of hardware event that can be counted by a performance counter like `perf` # cd $CSE6230_DIR/assignments/2-flops make run_fma_prof PERF="perf stat -v" # ### Executing instructions # # Like I said, the depth of the pipeline before and after execution really only affects us when there is branching. Let's talk about *execute*: # # - Different types of instructions are executed on different *functional units*: # # - *ALU*: arithmetic and logic unit # - *FPU*: floating point unit # - etc. # # See, e.g., the [Kaby Lake](https://en.wikichip.org/wiki/intel/microarchitectures/kaby_lake) diagram from Wikichip that we saw in the first lecture. This is what the cartoon at the top of the lecture is supposed to be a simplification of. # # ![Kaby Lake](./images/kabylake.png) # # ### Superscalarity # # There are multiple functional units in a processor. In the pipeline diagrams we've seen so far, there is only one `execute` instruction happening per cycle. That would mean that only one functional units is called on per cycle, leaving the others idle. Is that a waste of resources? # # It is, the diagrams are wrong! Modern CPUS are **superscalar:** there are multiple instruction pipelines that can happen at once. # # ![superscalar pipeline](./images/pipeline-super.jpg) # ### How can we exploit superscalarity? # # - Some combination of a smart **scheduler,** which is able to # 1. Look ahead several instructions, # 2. Identify *independent* operations, and # 3. Reorder for concurrent independence # # - And a smart **compiler**, which # 1. Knows the functional units that are available # 2. Knows the amount of register space available and the superscalar factor, and # 3. Tries to reorder and change which registers are used to solve the # optimal scheduling problem # # In almost all cases, the compiler is better than you are at this: don't try to out think it. # # If you think the compiler is getting it wrong: # # - Use the decompiler to see what it's doing # - Use *optimization reports* (like Intel `-qopt-report=5`) to ask the compiler to tell you what it's doing # # ### We can also exploit superscalarity with multiple threads # # When one thread has a pipeline stall, another can be issuing instructions. # # - If there is hardware support for multiple threads, that means they can both have their registers in the register file at the same time, and the scheduler can switch between them. If there is OS support for multiple threads, that means the OS switches which threads have their registers in the processor at a given time. We can talk more about this another day. # # ### Let's see how well the compiler optimizes a simple loop # # - **Note:** Most compilers have an options to compile with the specialized instruction set of the chip on which it is being compiled. On pace-ice, pass `-xHost` for compiling to your current chip with `icc`. cd $CSE6230_DIR/assignments/2-flops cat fma_loop_short.c | pygmentize module unload gcc module load intel/16.0 icc -g -c -std=c99 -xHost -o fma_loop_short.o fma_loop_short.c -O3 objdump -Sd fma_loop_short.o | pygmentize -l c-objdump # The compiler actually compiled several version of the loop, some optimized for different inputs. # # There are instructions that we didnt see before, like `vfmadd213ss`. Let's go to Intel's [intrinsics reference](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#!=undefined) to see what we can see. # What did we learn: # # - There are **vectorized** instructions, when a single instruction operates on multiple data at one time (SIMD). # - **Fused multiply add** is an instruction that counts as two flops at once! It is so fundamental to linear algebra that it deserves optimization. # - **Execution itself is pipelined**, with the pipeline depth depending on the instruction. # - Sometimes there are multiple functional units that can do the same instruction (2 FPUs on a modern Intel chip, for instance). # ### Putting it all together # # Because pipelined instructions have to be independent, how many independent FMAs do we need in order to issue one per cycle to each FPU on a core, thus achieving peak flops/cycle? # # An application of # # ## Little's Law # # $$\Huge L = \lambda W$$ # # - $L$: The concurrency, number of concurrent, independent operations that will fill the pipeline # - $\lambda$: the "width" of the data that can be entered into the pipeline in a single cycle # - $W$: the depth of the pipeline # # Let's do this for our setup: # # - $W$: the pipeline depth. Get this from the manufacturer for your chip (if you can). E.g. for an Intel Broadwell chip, the pipeline depth of `vfmadd*ps` instructions is 5. # - $\lambda$: the width of the data. Each vector FMA operation works on 8 sets of operands, and there are 2 functional units that can execute the command, so $\lambda = 8 * 2 = 16$. # - Therefore I need $5 * 16 = 80$ independent FMA operations to fill the pipeline. # - Another way to think of it: if my algorithm is composed mostly of FMAs, and it can be rewritten to be more concurrent, I expect to see a speedup up until about 80-way concurrency, and no benefit beyond that. If my algorithm has less that 80-way concurrency, there will be bubbles in the FMA pipeline. # ## So what is the peak flop/s per CPU core? # # Assuming that all floating point units (FPUs) can compute FMAs, and that each can be issued an FMA concurrently due to superscalarity, # # $$P_{\text{core}} = \#\text{FPUs} * \text{vector width (FMAs / FPU)} * 2 \text{ (flops / FMA)} * \text{throughput (1 / cycle)} * \text{clock rate (cycles / sec)}$$ # # So, putting in the numbers for my computer, # # $$P_{\text{core}} = 2 \text{ FPUs} * 8 \text{ FMAs / FPU} * 2 \text{ flops / FMA} * \text{1 / cycle} * 3.1 \text{ (Gigacycles / sec)} = 99.2\text{ Gigaflop/s.}$$ # # I have two cores, meaning $P_{\text{total}} = 198.4$ Gigaflop/s, what we calculated in the first lecture, hooray! # ## Comparing CPU cores and GPU streaming multiprocessors (SMs) # # Here is a link to Prof. Vuduc's Intro to GPUs and CUDA [slides](http://vuduc.org/cse6230/slides/cse6230-fa14--05-cuda.pdf). # # Relevant to today's discussion: # # - Slides 19-24 on the execution model # - Slides 48-51 on performance # Some key takeaways: # # - The CUDA programming model is Single Instruction Mutliple Thread: each thread has its own registers, but a shared instruction stream. # - One instruction is executed on a **warp** a group of 32 threads that mostly work in lock step # - Every instruction is vectorized, not just special instructions on the CPU. # - Mostly: any branch divergence between them is *serialized*, so in addition to misprediction, branching has another steep price on GPUs. # - Question: what are the depths of the pipelines on a Streaming Multiprocessor? # Here are relevant diagrams of NVIDIA streaming multiprocessors: # # ![SMs](./images/nvidia-kepler-vs-maxwell-sm.gif) # # - Every functional unit that is listed as a core handles integer and single precision floating point operations. Double precision operations are handled by separate units, of which there are some in Kepler (1 for every 3 single precision), and few in Maxwell (1 for every 32 single precision) # Here is a table from NVIDIA's white paper for the Pascal architecture: # # ![NVIDIA Table 1](./images/nvidia-table.png) # # Can you figure how the double and single precision flop/s are computed? # # - They are counting FMAs # - Each core is a non-vectorized FPU # ## Exploiting The Concurrency In Your Code # # Here is a link to Prof. Vuduc's GPU Performance Tuning [slides](http://vuduc.org/cse6230/slides/cse6230-fa14--07-gpu-tuning-1.pdf) # # - Relevant to today (and to your second assignment) are slides 27-40 # ## Discussion: given all this pipelining, how can I predict the throughput of my kernel? # # The answer I gave in class was, that we could find the critical path through the directed acyclic graph of computations. Something like this from [wikipedia](https://en.wikipedia.org/wiki/Directed_acyclic_graph#/media/File:Pert_chart_colored.svg), but with pipeline depth instead of months. # # ![dag](./images/Pert_chart_colored.svg) # # The experts in this type of thing say that this type of analysis is easier said that done: see Section 3.2 of this recent work from [Hoffman et al.](https://arxiv.org/pdf/1702.07554.pdf) # #
notes/processors/processors-alone.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## File I/O # + [markdown] slideshow={"slide_type": "slide"} # ### Reading from a file # # For convenience, we will call our file *pi.txt* and it will contain the digits of pi written in different lines. # + [markdown] slideshow={"slide_type": "fragment"} # To read the entire file, we can do this: # + slideshow={"slide_type": "fragment"} with open('pi.txt') as f: # Opens our file and we can refer to it using f contents = f.read() # Read everything in the file as a string using the read method print(contents) # + [markdown] slideshow={"slide_type": "slide"} # The `open()` function needs one argument: the name of the file you want to open and returns an object representing the file. Python looks for this file in the directory where the program that’s currently being executed is stored. # # The keyword `with` closes the file once access to it is no longer needed. We can explicitly open and close files as follows (though it is not usually done and the `with` method is used). # - f = open('pi.txt') # Do stuff on f f.close() # + [markdown] slideshow={"slide_type": "slide"} # It is not necessary that the text file is in the same directory, you can also provide absolute or relative *file paths*. For example # - path = r"C:\Users\shubh\Desktop\PyCk\Lecture 4\pi.txt" with open(path) as f: print(f.read()) # + [markdown] slideshow={"slide_type": "slide"} # It is often needed to read each line for a file, this can be done using a for loop # + slideshow={"slide_type": "fragment"} """ You'll notice that we get an extra blank line after each line. This is because each line in the file ends with a \n character, and print adds its own \n, so to avoid this, as said before, add rstrip(), or use the end="" keyword arg """ with open('pi.txt') as f: # Open the file for line in f: # Loop over all the lines print(line.rstrip()) # or # print(line, end="") # + [markdown] slideshow={"slide_type": "slide"} # To store the lines as a list, we can use the readlines() method # + slideshow={"slide_type": "fragment"} with open('pi.txt') as f: lines = f.readlines() lines[:3] # - # Alternate, Ditch the \n character with open('pi.txt') as f: lines = f.read().split("\n") lines[:3] # + [markdown] slideshow={"slide_type": "slide"} # To store all the digits of π (at least in the file) together, we can append it to a variable. Consider the lines variable above, we can just do # + pi_string = "" for line in lines: pi_string += line.strip() pi_string # + [markdown] slideshow={"slide_type": "slide"} # ### Writing to an Empty File # # To write text to a file, you need to call `open()` with a second argument telling # Python that you want to write to the file. # + slideshow={"slide_type": "fragment"} with open('my_file.txt', 'w') as f: f.write('Hello World\n') f.write('Good Night\n') # + # with open('my_file.txt', 'w') as f: # f.write('Adding another line\n') # # This is overwrite our file and previous data will be lost # + [markdown] slideshow={"slide_type": "fragment"} # Note that we can optionally use 'r' to open the file in read-mode, but that's the default so it can omit it if we only want to read from the file. # + [markdown] slideshow={"slide_type": "slide"} # ### Appending to a File # # We can use the `'a'` value of the second argument to open the file in append mode, where Python doesn’t erase the contents of the file before returning the file object. If the file doesn’t exist yet, Python will create an empty file for you. # + slideshow={"slide_type": "fragment"} with open('my_file.txt', 'a') as f: f.write('Oh I did not overwrite the file!\n') # + [markdown] slideshow={"slide_type": "slide"} # ## The Python `pickle` Module: Persisting Objects in Python # # You may sometimes need to share or transfer complex object hierarchies out of your session or save the internal state of your objects to a disk or database for later use. # # To accomplish this, you can use a process called **serialization**, which is fully supported by the python standard library thanks to the `pickle` module # + [markdown] slideshow={"slide_type": "slide"} # ### Serialization in Python # # The **serialization** process is a way to convert a data structure into a linear form that can be stored or transmitted over a network. # # In Python, serialization allows you to take a complex object structure and transform it into a stream of bytes that can be saved to a disk or sent over a network. # + [markdown] slideshow={"slide_type": "slide"} # Example class whose objects we'll learn to pickle: # + slideshow={"slide_type": "fragment"} class example_class: a_number = 35 a_string = "hey" a_list = [1, 2, 3] a_dict = {"first": "a", "second": 2, "third": [1, 2, 3]} a_tuple = (22, 23) my_object = example_class() # + [markdown] slideshow={"slide_type": "fragment"} # Importing the module: # + slideshow={"slide_type": "fragment"} import pickle # + [markdown] slideshow={"slide_type": "slide"} # # Methods inside the Python `pickle` module: # + [markdown] slideshow={"slide_type": "slide"} # ### `pickle.dumps()` and `pickle.loads()` # # These functions pickle an object to or load an object from a **string**. # # Let's see how this works: # + slideshow={"slide_type": "fragment"} my_pickled_object = pickle.dumps(my_object) # Pickling the object print(f"This is my pickled object:\n{my_pickled_object}\n") # + slideshow={"slide_type": "fragment"} my_unpickled_object = pickle.loads(my_pickled_object) # Unpickling the object print(f"This is a_dict of the unpickled object:\n{my_unpickled_object.a_dict}\n") print(f"This is a_list of the unpickled object:\n{my_unpickled_object.a_list}\n") # + [markdown] slideshow={"slide_type": "slide"} # ### `pickle.dump()` and `pickle.load()` # # These functions pickle an object to or load an object from a **file object**.\ # # Let's see how this works: # + slideshow={"slide_type": "fragment"} with open('my_pickled_object.pkl', 'wb') as f: pickle.dump(my_object, f) # Pickling the object # + slideshow={"slide_type": "fragment"} with open('my_pickled_object', 'rb') as f: my_unpickled_object = pickle.load(f) # Unpickling the object print(f"This is a_dict of the unpickled object:\n{my_unpickled_object.a_dict}\n") print(f"This is a_list of the unpickled object:\n{my_unpickled_object.a_list}\n") # + [markdown] slideshow={"slide_type": "slide"} # Most object types can be pickled but not all. Read more about pickling objects [here](https://realpython.com/python-pickle-module/). # + [markdown] slideshow={"slide_type": "slide"} # ## The Python `json` Module # # Since its inception, JSON has quickly become the de facto standard for information exchange. # # Read more about JSON and how it works [here](https://www.json.org/json-en.html). # + [markdown] slideshow={"slide_type": "slide"} # ### Differences between `json` and `pickle` serialization: # # - Most of the pickle module is written in C language and is specific to python only. JSON is derived from JavaScript, but it is not limited to JavaScript only (as the name suggests) # - Pickle supports binary serialization format, whereas JSON is for simple text serialization format. # - JSON is useful for common tasks and is limited to certain types of data. # Thus, JSON cannot serialize and de-serialize every python object. # But, pickle can serialize any arbitrary Python object like lists, tuples, and dictionaries. Even classes and methods can be serialized with pickle. # - Pickle's serialization process is faster than JSON # + [markdown] slideshow={"slide_type": "slide"} # Example dictionary object we will learn to json serialize: # + slideshow={"slide_type": "fragment"} data = { "user1": { "name": "<NAME>", "age": 21, "Place": "Mumbai", "Interests": ['Star Wars', 'Food', 'Tech'] }, "user2": { "name": "<NAME>", "age": 19, "Place": "Pune", "Interests": ["Cubing", "Piano", "Machine Learning"] } } # + [markdown] slideshow={"slide_type": "fragment"} # Importing the module: # + slideshow={"slide_type": "fragment"} import json # + [markdown] slideshow={"slide_type": "slide"} # # Methods inside the json module: # + [markdown] slideshow={"slide_type": "slide"} # ### `json.dumps()` and `json.loads()` # # These functions json serialize an object to or load an object from a **string**. # # Let's see how this works: # + slideshow={"slide_type": "fragment"} encoded_data = json.dumps(data) print(encoded_data) # + slideshow={"slide_type": "fragment"} decoded_data = json.loads(encoded_data) print(decoded_data["user1"]["name"]) # + [markdown] slideshow={"slide_type": "slide"} # ### `json.dump()` and `json.load()` # # These functions json serialize an object to or load an object from a **file object**. # # Let's see how this works: # + slideshow={"slide_type": "fragment"} with open('encoded_data.json', 'w') as f: json.dump(data, f) # Encoding the object # + slideshow={"slide_type": "fragment"} with open('encoded_data.json', 'r') as f: decoded_data = json.load(f) # Decoding the object print(decoded_data["user1"]["name"])
lecture4/fileIO_and_persisting_objects.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pickle import sys sentences_load = [] cont = 0 with open("sentences.txt", "rb") as rfp: # Unpickling while 1: try: if cont > 10: sentences_load.extend(pickle.load(rfp)) sys.stdout.write(str(cont) + ',') cont += 1 except EOFError: rfp.close() break len(sentences_load) # + import gensim # train model model = gensim.models.Word2Vec(sentences_tokens, size=300, workers=16, iter=10, negative=5) # trim memory model.init_sims(replace=True) # save model model.save('evol_word2vec.mdl') model.wv.save_word2vec_format('evol_word2vec.bin', binary=True) # - # creta a dict w2v = dict(zip(model.wv.index2word, model.wv.syn0)) print ("Number of tokens in Word2Vec:", len(w2v.keys())) model.most_similar_cosmul('queda',topn=10)
word2vec/4_w2v_save.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df = pd.read_excel("opendata/API_NY.GDP.MKTP.CD_DS2_en_excel_v2_2445330.xls","Data",header=3) df df.info() df["Indicator Name"].unique() df["Indicator Code"].unique() len(df["Country Name"].unique()) df.describe() df.head() df.iloc[:,:2].head() pd.DataFrame(df.iloc[:,4:].stack(dropna=False),columns=df["Indicator Name"].unique()) indicator_name = df["Indicator Name"].unique()[0] #del df["Indicator Name"] #del df["Indicator Code"] countries = df.iloc[:,:2] gdps = df.iloc[:,4:].stack(dropna=False).reset_index().rename(columns={"level_0":"country_index","level_1":"year",0:indicator_name}) gdps gdps = pd.merge(countries,gdps,left_index=True,right_on="country_index") del gdps["country_index"] gdps gdps.info() gdps.describe() gdps.loc[gdps.year=="2019"].sort_values("GDP (current US$)",ascending=False).head(20) c_org = pd.read_excel("opendata/API_NY.GDP.MKTP.CD_DS2_en_excel_v2_2445330.xls","Metadata - Countries") c_org countries = c_org.dropna(subset=["Region","IncomeGroup"],how="any") countries pd.DataFrame(countries.Region.unique()) pd.DataFrame(countries.IncomeGroup.unique()) countries.groupby("IncomeGroup").count().sort_values("TableName",ascending=False) countries.pivot_table(index="Region",columns="IncomeGroup", values="TableName", aggfunc="count", fill_value=0)
look_nominal_gdp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Titanic Kaggle # ## Introducción # # Este reto es parte del Platzi Data Challenged, en el cual se presenta un problema que de debe resolver, además de una serie de preguntas que debes contestar con los datos aportados. # # En este caso se usaran los datos de la pagina https://www.kaggle.com/c/titanic/data , Y se deben responder las siguientes preguntas: # # - ¿Cuántas personas iban en el titanic? # - ¿Cuántos hombres y mujeres sobrevivieron? # - ¿Cuál fue el top 10 de edad que más sobrevieron y el top 10 de edad que no lo lograron? 4. ¿Cuántos cargos o títulos iban en el barco? Ejemplo: Capitanes, Mrs. Miss, etc. (Acá usarás expresiones regulares) # - ¿Cuánto es la sumatoria del valor de los tickets en USD (yep en USD)? # ## Importando Librerias y Datos # Se importan la librerías puestas en el archivo requirements.txt, en ella encontraremos: # - Numpy: Librería enfocada en operaciones numéricas y estadisticas # - Pandas: Libreria enfocada a la manipulación de datos como Dataframes # import pandas as pd import numpy as np import matplotlib.pyplot as plt # Se importan los datos usando la libreria de pandas, estas se nombran de una forma que puedan ser facilmente identificadas. df_gender = pd.read_csv('gender_submission.csv') df_train = pd.read_csv('train.csv') df_test = pd.read_csv('test.csv') df_gender df_train df_test # ### Uniendo los datos en un solo DataFrame # Como parte del reto, se pide que se unan los tres dataframe en uno solo. Esto tambien es parte de la rutina de un analista de datos, ya que para realizar los analisis es recomendado que los datos se encuentren en la misma tabla, para poder cruzar los datos de mejor forma. # # Para ello se usara la función **merge**, que unira al **df_test** y **df_gender** a traves de la columna **PassengerID** df_1_merge = pd.merge(df_test, df_gender, on = ['PassengerId'], how = 'inner') df_1_merge # #### Reubicación de columnas # # Este paso es opcional, pero para mi comidad generare una variable con la columnas del df y las reubicare para poder concatenarla con el **df_train** cols = df_1_merge.columns.tolist() col = cols[-1:] + cols[:-1] col df_merge = df_1_merge[col] df_train = df_train[col] df_train # #### Uso de la función concat # # Para unir el df_train y el df_merge usamos la concatenación o la función **concat**, dado el dftrain hace referenia a una parte de los datos que se usan para **entrenar modelos de machine learning o de regresión**, por ello simplemente se unen uno bajo el otro df = pd.concat([df_train, df_merge], ignore_index = True) df # ## Generando Análisis # La función "describe" no da un analisis estadístico rápido de todas las variables numéricas del dataframe df.describe() df.columns # ## Pregunta 1 # # Número de pasajeros en el Titanic # Cuantos pasajeros habian ps = df['PassengerId'].count() plt.bar('PassagenderID', ps) # ## Pregunta 2 # # Hombre y mujeres que sobrevivieron # + # Cuantos sobrevivientes por género vivos = df.loc[df.Survived== 1] vivos_graph = vivos.groupby(['Sex'])['Survived'].count().values plt.pie(vivos_graph, labels = ['Female','Male']) ax.set_title('Distribución de Mujeres y Hombre que sobrevivieron') # - vivos.groupby(['Sex'])['Survived'].count() # ## Pregunta 3 # # Se tienen dos opciones: # - Análisis redondeando la edad de los niños menores de un año a 1 año exacto. # - Análisis sin ese cambio. # + # Tenemos dos posibilidades. Contar el top incluyendo a los niños menores de 1 año como si tuvieran 1 año o sin tomarlo # de esa forma df.loc[df['Age'] < 1, 'Age'] = 1 df # + # Este anáisis incluye a los mejores de 1 año como si tuvieran un año vivos_2 = df.loc[df.Survived== 1] top_survived_r = vivos_2.groupby(['Age'], as_index=False)['Survived'].count().sort_values( by = ['Survived'],ascending=False).head(10) top_survived_r # - label_list = labels.to_list() l_str = [str(x) for x in label_list] l_str # + labels = top_survived_r['Age'] sizes= top_survived_r['Survived'] fig, ax = plt.subplots() langs = l_str students = sizes plt.bar(langs,students, color=['black', 'red', 'green', 'blue', 'cyan','yellow', 'orange', 'brown','pink', 'chartreuse']) ax.set_title('Top Ten Sobrevivientes Según Edad') plt.show() # - # Top 10 de edades que más sobrevivieron top_survived= vivos.groupby(['Age'])['Survived'].count().sort_values(ascending=False).head(10) top_survived ten_suv = top_survived.to_frame() ten_suv # + # Top 10 edades que más murieron no_s = df.loc[df.Survived== 0] top_no_s = no_s.groupby(['Age'], as_index = False)['Survived'].count().sort_values( by = ['Survived'], ascending = False).head(10) #Transformar en una lista label = top_no_s['Age'].to_list() #Transformar la edad en una lista de STR l_str = [str(x) for x in label] #Graficar fig, ax = plt.subplots() langs = l_str students = sizes plt.bar(langs,students, color=['black', 'red', 'green', 'blue', 'cyan','yellow', 'orange', 'brown','pink', 'chartreuse']) ax.set_title('Top Ten Muertes Según Edad') plt.show() # - # ### Pregunta 4 #Titulos df['Title'] = df.Name.apply(lambda name: name.split(',')[1].split('.')[0].strip()) df.groupby( ['Title'], as_index = False)['PassengerId'].count().sort_values( by = ['PassengerId'] ,ascending=False) # + titles = df.groupby( ['Title'], as_index = False)['PassengerId'].count().sort_values( by = ['PassengerId'] ,ascending=False).head(4) labels = titles['Title'] sizes = titles['PassengerId'] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() # - # ### Pregunta 5 #Sumatoria df['Fare'].sum() # # ----------------------------------------------------------------------------- # # Kaggle Competition import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy.stats import norm # %matplotlib inline gender = pd.read_csv('gender_submission.csv') train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') train.info() test.info() train.head() 100*train.isnull().sum()/train.shape[0] 100*test.isnull().sum()/train.shape[0] train[['Sex', 'Survived']].groupby(['Sex'], as_index= False).mean() sns.barplot(x = "Sex", y="Survived", data=train) train['Sex'] = train['Sex'] == 'male' test['Sex'] = test['Sex'] == 'male' train['FamilySize'] = train['SibSp'] + train['Parch'] + 1 train[['FamilySize', 'Survived']].groupby(['FamilySize']).sum() # Generamos la columnas de FamiliSize para el test test['FamilySize'] = train['SibSp'] + train['Parch'] + 1 sns.barplot(x = "FamilySize", y = "Survived", data=train) train.drop(['Ticket'], axis=1, inplace=True) test.drop(['Ticket'], axis=1, inplace=True) train[['Embarked','Survived']].groupby(['Embarked']).count() train['Embarked'] =train['Embarked'].fillna('S') 100*test.isnull().sum()/train.shape[0] #Normalizar tarifa sns.distplot(train['Fare'], fit = norm) train['Fare'] = np.log1p(train['Fare']) sns.distplot(train['Fare'], fit = norm) train['FareGroup'] = pd.qcut(train['Fare'], 4 , labels=['A','B','C','D']) train[['FareGroup','Survived']].groupby(['FareGroup'], as_index = False).mean() test['Fare'] = np.log1p(test['Fare']) test['FareGroup'] = pd.qcut(test['Fare'], 4 , labels=['A','B','C','D']) train.drop(['Fare'], axis=1, inplace=True) test.drop(['Fare'], axis=1, inplace=True) train.info() # We can do a cabin analysis and generate groups for the differents cabin numbers and letters train['InCabin'] = ~train['Cabin'].isnull() test['InCabin'] = ~test['Cabin'].isnull() # Age # We fill the NA values with -0.5, group by the age and bins train["Age"] = train["Age"].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train["Age"], bins, labels = labels) sns.barplot(x="AgeGroup", y="Survived", data=train) plt.show() #Realizamos el mismo procedimieto para el DF de test test["Age"] = test["Age"].fillna(-0.5) test['AgeGroup'] = pd.cut(train["Age"], bins, labels = labels) # Remobemos la columna de edad, de este modo no debemos llenar las celdas vacias train.drop(['Age'], axis=1, inplace=True) test.drop(['Age'], axis=1, inplace=True) # + #Importamos la libreria de expresiones regulares para poder trabajar con textos import re #Definimos una funacion para obtener los titulos de las personas def get_title(name): title_search = re.search(' ([A-Za-z]+)\.', name) if title_search: return title_search.group(1) return "" # Aplicamos las funcion a traves del metodo apply en el set de train y el test train['Title'] = train['Name'].apply(get_title) test['Title'] = test['Name'].apply(get_title) # Check the results pd.crosstab(train['Title'], train['Sex']) # + # A la columna Title le cambiamos, a traves del metodo replace, los distintos titulos que son cercanos por significado train['Title'] = train['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') train['Title'] = train['Title'].replace('Mlle', 'Miss') train['Title'] = train['Title'].replace('Ms', 'Miss') train['Title'] = train['Title'].replace('Mme', 'Mrs') # We create a relative table train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean() # - # Realizamos los mismo para el DataFrame test test['Title'] = test['Title'].replace(['Lady','Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') test['Title'] = test['Title'].replace('Mlle','Miss') test['Title'] = test['Title'].replace('Ms','Miss') test['Title'] = test['Title'].replace('Mme','Mrs') sns.barplot(x="Title", y="Survived", data=train) plt.show() # + # Ya que tengo a cada persona con su titulo, podemos eliminar el nombre, ya que no es una varible que se pueda usar para # ayudar a entrenar a la red neuronal train.drop(['Name'], axis=1, inplace = True) test.drop(['Name'], axis=1, inplace = True) # - # Procedemos tambien a eliminar la columna de la cabina ya que faltan más del 70% de los datos train.drop(['Cabin'], axis=1, inplace = True) test.drop(['Cabin'], axis=1, inplace = True) train.head() test.head() train.drop('PassengerId', axis=1, inplace=True) PassengerId = test['PassengerId'] X = train Y = test.drop('PassengerId', axis=1).copy() dataset = [X, Y] col_norm = ['FamilySize','SibSp','Parch'] # ## Carga de libreria SkLearn # + # data mining #from sklearn.impute import KNNImputer, MissingIndicator, SimpleImputer from sklearn import impute from sklearn.pipeline import make_pipeline, make_union from sklearn import preprocessing from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, MinMaxScaler # machine learning from sklearn import linear_model from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.svm import LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.neural_network import MLPClassifier # + #Tomamos el primer set del dataframe, que es el set de train y seleccionamos # las columnas a normalizar almacenadas en la lista de col_norm y la # almacemaos en feature features = dataset[0][col_norm] # Luego generamos un scaler, donde estandarizamos con la funcion StandardScaler # de sklearn y aplicamos el metodo fit (las que calcula la media y la std) # a features values scaler = StandardScaler().fit(features.values) #Aplicamos esta funcion con el metodo transform a los features, de este modo features = scaler.transform(features.values) # - #Una ves aplicado el scaler a los valores de feature se transforman features # Luego se asignan estos valores de no dataset[0][col_norm] = features dataset[0].head(10) features = dataset[1][col_norm] scaler = StandardScaler().fit(features.values) features = scaler.transform(features.values) dataset[1][col_norm] = features dataset[1].head(10) cols = ['Pclass', 'Embarked', 'FareGroup', 'AgeGroup', 'Title'] titanic_cat = dataset[0][cols] titanic_cat = pd.concat([pd.get_dummies(titanic_cat[col], prefix = col) for col in titanic_cat], axis=1) titanic_cat.head() dataset[0] = pd.concat([dataset[0][dataset[0].columns[~dataset[0].columns.isin(cols)]], titanic_cat], axis=1) dataset[0].head() titanic_cat = dataset[1][cols] titanic_cat = pd.concat([pd.get_dummies(titanic_cat[col], prefix = col) for col in titanic_cat], axis=1) titanic_cat.head() dataset[1] = pd.concat([dataset[1][dataset[1].columns[~dataset[1].columns.isin(cols)]], titanic_cat], axis=1) dataset[1].head() dataset[0].columns.tolist() dataset[0] dataset[0].dtypes dataset[0].shape[1] # Generamos dos variables a partir del dataset 0 o de train. Al primero le damos las variables independientes # y_feat contendra la variable a predecir, la dependiente, con estos dos podemos entrenar el modelo x_feat = dataset[0].drop('Survived', axis=1).copy() y_feat = dataset[0]['Survived'] x_feat features = x_feat.columns.tolist() # ## Carga de modelos de sklearn # + from sklearn.datasets import make_friedman1 from sklearn.feature_selection import RFE from sklearn.svm import SVR # Para generar el entrenamiento usaremos la funcion de RFE de Sklearn, la cual es un ranking de funciones iteradoras # Estas asignan de manera externa los pesos a las funciones y de manera recursiva, va eliminando a las que tiene peor desempeño # rFE = Recursive Feature Elimination #RFE(estimator, *, n_features_to_select=None, step=1, verbose=0) # Asignamos el numero de funciones a seleccionar n = 15 # Creamos el modelo a traves de la Regresión logistica y calculamos el RMSE lr = LogisticRegression(n_jobs=4, verbose=2) rfe = RFE(lr, n, verbose=2) rfe = rfe.fit(x_feat, y_feat.values.ravel()) # El metodo ravel permite leer un array de numpy de nxm como si fuera de nx1, como hacer un reshape(-1,1) # + # Printear Support and Ranking # Support nos muestra la mascara de las funciones o caracteristicas seleccionadas print(rfe.support_) # Es el ranking de los features(func o caract), donde a los mejores se les asigna el valor 1 print(rfe.ranking_) # - # La función zip itera los elementos de las listas asignadas y devuelve una lista de listas, uniendo el element del indice 1 # de la primera lista con el elemento eb el inice 1 de la lista 2 z = zip(features, rfe.support_, rfe.ranking_) list(z) columnas_rank10 = [] for v, s, r in zip(features, rfe.support_, rfe.ranking_): if r >= 1 and r<=10: columnas_rank10.append(v) columnas_rank10 # # Modelos # + from sklearn.model_selection import train_test_split # generar dos nuevas variables para ahora si realizar el entrenmiento x_ = dataset[0][columnas_rank10] y_ = dataset[0]['Surviv ed'] # Crar el set de train y test 80-20, a traves del train:test_split, con un random_state=41, esto quiere decir que siempre # Tomara los mismos datos al separar, no lo hara de manera random x_train, x_test, y_train, y_test = train_test_split(x_, y_, test_size = 0.2, random_state=42) # - # Modelo 1: Arboles de decisión model1 = DecisionTreeClassifier().fit(x_train,y_train) model1 model2 = LogisticRegression().fit(x_train,y_train) model2 model = LinearSVC().fit(x_train,y_train) model # Score modelo score = round(model.score(x_train, y_train) * 100, 2) print(score) # Predicciones de modelo de Arbol de decision predict = model.predict(x_test) predict # + # Matriz de prediccion modelo from sklearn.metrics import classification_report print(classification_report(y_test,predict)) # + # Plotear matriz de confusion de arbol de desicion from sklearn.metrics import confusion_matrix matrix = confusion_matrix(y_test, predict) sns.heatmap(matrix, annot = True, fmt ='d', cmap ='Blues', square = True) plt.xlabel("predicted") plt.ylabel("actual") plt # - # Testing con arbol de decisión x_val_model = dataset[1][columnas_rank10] predicted = model.predict(x_val) predicted # Crear el submission de regresion logistica submission = pd.DataFrame({ 'PassengerID': PassengerId, 'Survived': predicted }) submission.to_csv('submission.csv', index = False) # ## Otros modelos #Score modelo 1 dectree_score1 = round(model1.score(x_train, y_train) * 100, 2) print(dectree_score1) # Score de Regresion logistica reglog_score1 = round(model2.score(x_train, y_train) * 100, 2) print(reglog_score1) # Predicciones de modelo de Arbol de decision predict_dt1 = model1.predict(x_test) predict_dt1 # Predicciones de modelo de regresion logistica predict_rg1 = model2.predict(x_test) predict_rg1 # + # Matriz de confusión arbol de desicion from sklearn.metrics import classification_report print(classification_report(y_test,predict_dt1)) # + # Matriz de confusión regresion logistica from sklearn.metrics import classification_report print(classification_report(y_test,predict_rg1)) # + # Plotear matriz de confusion de arbol de desicion from sklearn.metrics import confusion_matrix matrix = confusion_matrix(y_test, predict_dt1) sns.heatmap(matrix, annot = True, fmt ='d', cmap ='Blues', square = True) plt.xlabel("predicted") plt.ylabel("actual") plt # + # Plotear matriz de confusion de regresion logistica from sklearn.metrics import confusion_matrix matrix = confusion_matrix(y_test, predict_rg1) sns.heatmap(matrix, annot = True, fmt ='d', cmap ='Blues', square = True) plt.xlabel("predicted") plt.ylabel("actual") plt # - # ## Validacion # Testing con arbol de decisión x_val_dt = dataset[1][columnas_rank10] predicted_dt = model1.predict(x_val) predicted_dt # Testing con arbol de decisión x_val_rg = dataset[1][columnas_rank10] predicted_rg = model2.predict(x_val) predicted_rg len(predicted_dt) len(predicted_rg) # Crear el submission de desission three submission = pd.DataFrame({ 'PassengerID': PassengerId, 'Survived': predicted_dt }) submission.to_csv('submission_DT.csv', index = False) # Crear el submission de regresion logistica submission = pd.DataFrame({ 'PassengerID': PassengerId, 'Survived': predicted_rg }) submission.to_csv('submission_RG.csv', index = False) pd.read_csv('submission_DT.csv') pd.read_csv('submission_RG.csv') # !pip install --upgrade kaggle # !chmod 600 ~/.kaggle/kaggle.json
Titanic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # フィルタ処理の関数 import numpy as np import cv2 import matplotlib.pyplot as plt # + def conv2d(a,b): """ calculate convolution Args: a : array b : array Returns: result of convolution """ return np.multiply(a,b).sum() def filter_process(img,K): """ calculate convolution of img using filter K Args: img : image array K : filter array """ result = np.zeros_like(img) h_img,w_img = img.shape # get height and weight of img h_k,w_k = K.shape # get height and weight of K adj_size = h_k%2 # main process for y in range(adj_size,h_img-adj_size): for x in range(adj_size,w_img-adj_size): # write here what to do extract_array = img[y-adj_size:y+adj_size+1,x-adj_size:x+adj_size+1] tmp = conv2d(extract_array,K) if tmp>255: result[y,x] = 255 elif tmp<0: result[y,x] = 0 else: result[y,x] = tmp return result # - def gen_grayscale(img): green = img[:,:,0] blue = img[:,:,1] red = img[:,:,2] gray = 0.2126*red + 0.7152*green + 0.0722*blue gray = gray.astype(np.uint8) return gray img = cv2.imread("imori.jpg") img = gen_grayscale(img) print(img.shape) plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) plt.show() B = np.array([[-1,0,1],[-1,0,1],[-1,0,1]]) result = filter_process(img,B) plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) plt.show() # # フィルタの例(ガウシアンフィルタ) def gausian_filter(x,y,s): """ gausian filter Args: x,y : position s : standard deviation """ return (1/(2*np.pi*(s**2))) *np.exp( -(x ** 2 + y ** 2) / (2 * (sigma ** 2))) # + s = 1.3 K = np.zeros((3,3)) for y in range(-1,2): for x in range(-1,2): K[y+1,x+1] = gausian_filter(x,y,s) K # -
Question_11_20/.ipynb_checkpoints/filter_process-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: TESS Environment # language: python # name: tess # --- # # Example TESS Catalog Queries # This notebook shows you two example panels for exploring catalog information on a TESS target. One does a simple cone-search of the Tess Input Catalog. The second overlays a search of the TIC onto a TESS image and identifies if there is 2-minute data on nearby stars. These use the `panel` module to allow for interactive filtering. # # These examples rely on tic_viz and panel to run. panel.extension() must be run in the notebook before calling the tic_viz functions. import tic_viz as viz import panel as pn pn.extension() # ## Query TIC # Simple cone sarch of the TESS Input Catalog. # The table is sorted by distance from the requested location on the sky. viz.query_tic() # ## Target Overview # The `target_overview` function returns several tabs of information based on the selected target of interest. Since it is retrieving a TESS image, it takes a few seconds to load. You must input the target name or coordinates when calling the function, you cannot change the location after the panel loads. This function loads the following interactive tables and visualizations: # * TIC parameters of nearby stars # * 2-minute data availability of nearest targets # * Available planet search data (DV files) for nearest targets # * Plot of TESS image with overlayed stars. # # Input: # * name: name of the star or coordinates # * radius_deg: size of the cone search and image (0.4 maximum) # * num_search: number of nearby stars to search for 2-minute data around viz.target_overview(name="TOI-1002.01", radius_deg = 0.10, num_search = 20)
code/TIC_Panel_Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import heapq class binarynode: def __init__(self,data,freq): self.data=data self.freq=freq self.left=None self.right=None def __lt__(self,other): return self.freq < other.freq def __eq__(self,other): return self.freq == other.freq class huffmanCoding: def __init__(self,path): self.path=path self.__q=[] self.curr={} def __formDict(self,text): di={} for ele in text: if ele not in di: di[ele]=1 else: di[ele]+=1 return di def __makeHeap(self,dic): for ele in dic: f=dic[ele] node=binarynode(ele,f) heapq.heappush(self.__q,node) def __binaryT(self): while len(self.__q)>1: n1=heapq.heappop(self.__q) n2=heapq.heappop(self.__q) fSum=n1.freq+n2.freq new=binarynode(None,fSum) new.left=n1 new.right=n2 heapq.heappush(self.__q,new) def __helper(self,root,cbit): if root is None: return if root.data is None: self.curr[root.data]=cbit self.__helper(root.left,cbit+"0") self.__helper(root.right,cbit+"1") def __bCodes(self): root=heapq.heappop(self.__q) self.__helper(root,"") def __encodeText(self,text): encoded="" for ele in text: encoded+=self.curr[ele] return encoded def __padded(self,encoded_text): amount=8-((len(encoded_text))%8) for i in range(amount): encoded_text+="0" padded_info={"0:08b"}.format(amount) final=padded_info+encoded_text return final def __makeArray(self,padded_text): arr=[] for j in range(0,len(padded_text),8): byte=padded_text[i:i+8] arr.append(int(byte,2)) return arr def compress(self): #reading text from files text="dshbwehfewhfhvhefwefuewfofdhyscbjkvbdehbehfbehv" dic=self.__formDict(text) self.__makeHeap(dic) self.__binaryT() self.__bCodes() encoded_text=self.__encodeText(text) padded_text=self.__padded(encoded_text) bytesArray=self.__makeArray(padded_text) final_bytes=bytes(bytesArray) return final_bytes
Huffman_Coding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Correlation stability # # This notebook is meant to present an example of correlation stability, based on the demonstration by <NAME> at https://www.nicebread.de/at-what-sample-size-do-correlations-stabilize/. # # I will use the [NHANES dataset](https://www.cdc.gov/nchs/nhanes/index.htm) via the [nhanes Python package](https://pypi.org/project/nhanes/); you can install this using: # # ``pip install nhanes`` # # + import nhanes import pandas as pd import numpy as np import matplotlib.pyplot as plt from nhanes.load import load_NHANES_data, load_NHANES_metadata, open_dataset_page import scipy.stats from sklearn.preprocessing import quantile_transform from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.metrics import balanced_accuracy_score # + # get the data data_df = load_NHANES_data(year='2017-2018') metadata_df = load_NHANES_metadata(year='2017-2018') def get_good_variables(data_df, thresh=5000): float_columns = [i for i in data_df.columns if data_df[i].dtype == np.float64] data_df_float = data_df[float_columns] sum_nona = data_df_float.notna().sum() data_df_float_thresh = data_df[sum_nona[sum_nona > thresh].index] return(data_df_float_thresh) transform_data = False if transform_data: data_df_orig = get_good_variables(data_df) data_df = pd.DataFrame(quantile_transform(data_df_orig), index=data_df_orig.index, columns=data_df_orig.columns) data_df.shape # - # If you want to see all variables pairs that exceed some absolute correlation, set show_all_corrs to True # + cc=data_df.corr() corr_threshold = 0.2 show_all_corrs = False highcorr = np.where(cc.abs()>corr_threshold) if show_all_corrs: for i in range(len(highcorr[0])): x, y = highcorr[0][i], highcorr[1][i] if cc.iloc[x,y] < 1: print(cc.index[x], cc.columns[y], cc.iloc[x,y]) # - # Plot a selected example - in this case, the correlation between BMI and HbA1c. # + xvar = 'BodyMassIndexKgm2' yvar = 'Glycohemoglobin' plt.scatter(data_df[xvar], data_df[yvar]) # create a new data frame with the selected variables selected_df = data_df[[xvar, yvar]].dropna() print(selected_df.shape) corr = selected_df.corr().iloc[0, 1] print(corr) # - # We will take 1000 samples of size 500 from the NHANES dataset. Then we create increasingly large subsamples from each sample, starting with 10 and increasing in steps of 10. # # get corridor of stability # # # step_size = 10 # min_size = 10 # max_size = 500 # # sample_sizes = np.arange(min_size, max_size, step_size) # # corrs = pd.DataFrame(index = sample_sizes) # # nruns = 1000 # for run in range(nruns): # sample_df = selected_df.sample(max_size) # for sample_size in sample_sizes: # corrs.loc[sample_size, run] = sample_df.iloc[:sample_size,:].corr().iloc[0,1] # Now we compute the 95% empirical confidence interval for each subsample size, and then plot each sample across all of its subsamples as a separate line. We also plot the +/- 0.1 "corridor of stability". # + ci = pd.DataFrame({'upper': np.zeros(len(sample_sizes)), 'lower': np.zeros(len(sample_sizes))}, index = sample_sizes) for sample_size in sample_sizes: ci.loc[sample_size, 'upper'] = scipy.stats.scoreatpercentile(corrs.loc[sample_size,:], 97.5) ci.loc[sample_size, 'lower'] = scipy.stats.scoreatpercentile(corrs.loc[sample_size,:], 2.5) _ = plt.plot(corrs, linewidth=0.1, alpha=0.3, color='k') plt.xlabel('sample size') plt.ylabel('Correlation coefficient') plt.plot(ci['upper'], color='r', label='95% CI') plt.plot(ci['lower'], color='r') plt.hlines([corr - 0.1, corr + 0.1], xmin=0, xmax=500, linestyles='dashed', label='COS +/- 10%') _ = plt.legend() plt.savefig('correlation_stability.pdf') # - # Now let's do the same for a classification analysis using cross-validation. Here we will use a logistic regression model, in which we attempt to estimate lifetime smoking status from three variables: Age, Gender, and blood cadmium (which is strongly associated with smoking), r=0.38 in the full sample. # + selected_df = data_df[[ 'SmokedAtLeast100CigarettesInLife', 'BloodCadmiumUgl', 'AgeInYearsAtScreening', 'Gender']].dropna() selected_df['Gender'] = [1 if i == 'Female' else 0 for i in selected_df.Gender] # cadmium is badly skewed, so we use an empirical quantile transform to normalize selected_df['BloodCadmiumUgl'] = quantile_transform( selected_df['BloodCadmiumUgl'].values[:, np.newaxis], output_distribution='uniform') print(selected_df.shape) corr = selected_df.corr() print(corr) # - # First, we run it on the full sample. # + # predict smoking status from age, gender, and blood cadmium level # first run on full sample X = selected_df[['BloodCadmiumUgl', 'AgeInYearsAtScreening', 'Gender']].values y = selected_df[['SmokedAtLeast100CigarettesInLife']].values[:, 0] # + nruns = 1000 accuracy_fullsample = np.zeros(nruns) test_size = 0.25 pipe = Pipeline([('scaler', StandardScaler()), ('lr', LogisticRegression())]) for run in range(nruns): X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=test_size) pipe.fit(X_train, y_train) pred_full = pipe.predict(X_test) accuracy_fullsample[run] = balanced_accuracy_score(y_test, pred_full) # + # Plot distribution across splits sns.distplot(accuracy_fullsample) plt.xlabel('Accuracy (full sample)') # - # Next, we compute it on subsamples from 20 to 1000 in steps of 20. This could take a few minutes... # + # compute crossvalidation across sample sizes from 20 to 1000 sample_sizes = np.hstack((np.arange(20, 400, 20), np.arange(400, 4000, 100))) nruns = 1000 accuracy = pd.DataFrame() ctr = 0 for sample_size_index, sample_size in enumerate(sample_sizes): for run in range(nruns): sample_df = selected_df.sample(sample_size) X = sample_df[['BloodCadmiumUgl', 'AgeInYearsAtScreening', 'Gender']].values y = sample_df[['SmokedAtLeast100CigarettesInLife']].values[:, 0] X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=test_size) pipe.fit(X_train, y_train) pred = pipe.predict(X_test) accuracy.loc[ctr, 'sample_size'] = int(sample_size) accuracy.loc[ctr, 'score'] = balanced_accuracy_score(y_test, pred) ctr += 1 # - boxplot = sns.boxplot(x='sample_size', y='score', data=accuracy) _ = boxplot.set_xticklabels(boxplot.get_xticklabels(),rotation=90) for label in boxplot.xaxis.get_ticklabels()[1::2]: label.set_visible(False) plt.hlines([np.mean(accuracy_fullsample)], xmin=0, xmax=len(sample_sizes), label='full_sample (n = %d)' % selected_df.shape[0]) plt.legend() plt.tight_layout() plt.savefig('accuracy_by_samplesize.pdf')
Correlations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 使用手寫辨識資料集, 觀察 PCA 算法 # # [教學目標] # - 以 PCA + 邏輯斯迴歸判斷手寫辨識資料集, 觀察不同 component 下正確率的變化 # - 因為非監督模型的效果, 較難以簡單的範例看出來 # 所以非監督偶數日提供的範例與作業, 主要目的在於觀察非監督模型的效果, # 同學只要能感受到模型效果即可, 不用執著於搞懂程式的每一個部分 # # [範例重點] # - 以手寫辨識資料集, 觀察 PCA 算法取不同 component 時, PCA 解釋度與分類正確率如何變化 (In[5], Out[5]) # + # 載入套件 import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn import datasets from sklearn.decomposition import PCA from sklearn.linear_model import SGDClassifier from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV import warnings warnings.filterwarnings("ignore") # + # 定義 PCA 與隨後的邏輯斯迴歸函數 logistic = SGDClassifier(loss='log', penalty='l2', max_iter=10000, tol=1e-5, random_state=0) pca = PCA() pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)]) # 載入手寫數字辨識集 digits = datasets.load_digits() X_digits = digits.data y_digits = digits.target # - # 先執行 GridSearchCV 跑出最佳參數 param_grid = { 'pca__n_components': [4, 10, 20, 30, 40, 50, 64], 'logistic__alpha': np.logspace(-4, 4, 5), } search = GridSearchCV(pipe, param_grid, iid=False, cv=5, return_train_score=False) search.fit(X_digits, y_digits) print("Best parameter (CV score=%0.3f):" % search.best_score_) print(search.best_params_) # + # 繪製不同 components 的 PCA 解釋度 pca.fit(X_digits) fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6)) ax0.plot(pca.explained_variance_ratio_, linewidth=2) ax0.set_ylabel('PCA explained variance') ax0.axvline(search.best_estimator_.named_steps['pca'].n_components, linestyle=':', label='n_components chosen') ax0.legend(prop=dict(size=12)) # 繪製不同採樣點的分類正確率 results = pd.DataFrame(search.cv_results_) components_col = 'param_pca__n_components' best_clfs = results.groupby(components_col).apply(lambda g: g.nlargest(1, 'mean_test_score')) best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score', legend=False, ax=ax1) ax1.set_ylabel('Classification accuracy (val)') ax1.set_xlabel('n_components') plt.tight_layout() plt.show() # - # # 觀察結果 # * explained variance ratio : 解釋變異性隨著 components 穩定下降 # * 分類預測準確率 : 似乎也隨著 components 提升, 但是在 20 以後提升不多
D60_PCA 觀察_使用手寫辨識資料集/Day_060_PCA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="WLOavHFyrmHG" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 1, Module 4* # # --- # + [markdown] colab_type="text" id="7IXUfiQ2UKj6" # # Logistic Regression # # # ## Assignment 🌯 # # You'll use a [**dataset of 400+ burrito reviews**](https://srcole.github.io/100burritos/). How accurately can you predict whether a burrito is rated 'Great'? # # > We have developed a 10-dimensional system for rating the burritos in San Diego. ... Generate models for what makes a burrito great and investigate correlations in its dimensions. # # - [ x ] Do train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later. # - [ x ] Begin with baselines for classification. # - [ x ] Use scikit-learn for logistic regression. # - [ ] Get your model's validation accuracy. (Multiple times if you try multiple iterations.) # - [ ] Get your model's test accuracy. (One time, at the end.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # # + colab_type="code" id="o9eSnDYhUGD7" colab={} # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/' # !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # + id="gPrWD5qVrmHJ" colab_type="code" colab={} # Load data downloaded from https://srcole.github.io/100burritos/ import pandas as pd df = pd.read_csv(DATA_PATH+'burritos/burritos.csv') # + id="-FWd4JqgrmHL" colab_type="code" colab={} # Derive binary classification target: # We define a 'Great' burrito as having an # overall rating of 4 or higher, on a 5 point scale. # Drop unrated burritos. df = df.dropna(subset=['overall']) df['Great'] = df['overall'] >= 4 # + id="mf2Gg_wFrmHN" colab_type="code" colab={} # Clean/combine the Burrito categories df['Burrito'] = df['Burrito'].str.lower() california = df['Burrito'].str.contains('california') asada = df['Burrito'].str.contains('asada') surf = df['Burrito'].str.contains('surf') carnitas = df['Burrito'].str.contains('carnitas') df.loc[california, 'Burrito'] = 'California' df.loc[asada, 'Burrito'] = 'Asada' df.loc[surf, 'Burrito'] = 'Surf & Turf' df.loc[carnitas, 'Burrito'] = 'Carnitas' df.loc[~california & ~asada & ~surf & ~carnitas, 'Burrito'] = 'Other' # + id="D7XDy-J5rmHP" colab_type="code" colab={} # Drop some high cardinality categoricals df = df.drop(columns=['Notes', 'Location', 'Reviewer', 'Address', 'URL', 'Neighborhood']) # + id="AyjZxla1rmHQ" colab_type="code" colab={} # Drop some columns to prevent "leakage" df = df.drop(columns=['Rec', 'overall']) # + [markdown] id="GTiruaCgr1cr" colab_type="text" # ## 1 done # - [ ] Do train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later. # + [markdown] id="fLf8JWBntrN0" colab_type="text" # ### pre-vis # + id="whtqAzzIrmHS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 117} outputId="36be13a5-d358-458c-e9d9-2da4b490700d" # visualizing the dataset to gather the column names. df.sample() # + id="ozTAdW1otYSM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="6a0e4c57-9301-4b5a-87c7-820b065258df" # to see the type of data; such that you can treat it appropriately. df.dtypes # + [markdown] id="JI6unvKTtudN" colab_type="text" # ### pre-vis over # + id="9KdlNP17tdAz" colab_type="code" colab={} # converting the date object into a date-time file so it can be filtered. df['Date'] = pd.to_datetime(df['Date']) # + id="e-HiemHdtj3m" colab_type="code" colab={} # re-indexing the dataframe by the date. df = df.set_index(df['Date']) df = df.sort_index() # + id="DNeu0oPiv-sZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="2c2c3f26-74bf-46b7-ab6b-632ae79c1244" # confirming the re-index. df.sample(5) # + id="gvgcQE6qtnMa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="1446c184-3eee-4cdd-bc1f-1f6f1320896e" # doing the test/train/validation. train_2016 = df['2016-01-01':'2016-12-30'] validate_2017 = df['2017-01-01':'2017-12-30'] test_2018 = df['2018-01-01':'2020-12-30'] train_2016.shape, validate_2017.shape, test_2018.shape # + [markdown] id="3SdrxA0YwvE2" colab_type="text" # ## 2 - done # - [ ] Begin with baselines for classification. # + id="4LrhEAKPwuT-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="62790798-f8a8-4c84-e070-9063ea60a61d" # setting the targets and setting the x-y train splits. target = 'Great' y_train = train_2016[target] y_train.value_counts(normalize=True) # + id="q2tT4P-GxSQq" colab_type="code" colab={} # setting the majority type. calling it class here. majority_class = y_train.mode()[0] # + id="SvPEgxcjxXgo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="0d4a7b43-b275-4754-80b3-25a300939ccf" # gathering a baseline accuracy score. y_val = validate_2017[target] y_pred = [majority_class]*len(validate_2017) accuracy_score(y_val, y_pred) # + id="PYSLIyF4xZyv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f1025c1c-c51f-4ae1-fd97-7886309e6c0b" # calculating an accuracy score from the metrics score. using the y_train predict. from sklearn.metrics import accuracy_score, mean_absolute_error accuracy_score(y_train, y_train_pred) # + [markdown] id="nn5fMvkF0qnc" colab_type="text" # ## 3 - started # - [ x ] Use scikit-learn for logistic regression. # + id="HiYFmN6P0cki" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="940aebb6-9a17-4b88-be52-16e610565390" # 1. Import estimator class from sklearn.linear_model import LinearRegression # 2. Instantiate this class linear_reg = LinearRegression() # 3. Arrange X feature matrices (already did y target vectors) features = ['Volume', 'Tortilla', 'Temp', 'Meat', 'Fillings', 'Meat:filling', 'Uniformity', 'Salsa', 'Synergy', 'Wrap'] X_train = train_2016[features] X_val = validate_2017[features] # Impute missing values from sklearn.impute import SimpleImputer imputer = SimpleImputer() X_train_imputed = imputer.fit_transform(X_train) X_val_imputed = imputer.transform(X_val) # 4. Fit the model linear_reg.fit(X_train_imputed, y_train) # 5. Apply the model to new data. # The predictions look like this ... linear_reg.predict(X_val_imputed) # + id="vdCI0g_J2D_c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 208} outputId="9c456c97-3234-4e85-c743-d4a812fe8be9" # visualizing the coefficients of the features. pd.Series(linear_reg.coef_, features) # + id="521Rf5Ay2Liu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="93b95f16-6df2-490a-9d60-f347832f0db3" df.sample(4) # + id="SvIwjXOl2HP5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e297b146-ee42-4146-a042-375d03b1ccf7" # asking to make a linear prediction. test_case = [[0.90, 3, 5, 2, 5, 5, 4, 4, 1, 5]] # 1st class, 5-year old, Rich linear_reg.predict(test_case) # + [markdown] id="DiEOgT5G3d6S" colab_type="text" # # 4. # - [ ] Get your model's validation accuracy. (Multiple times if you try multiple iterations.) # + id="BSzdkFxF2xNi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="cd0fb81c-a738-4e1d-e3d3-4b52dd7ef435" # importing the relevant logisticregression library. from sklearn.linear_model import LogisticRegression log_reg = LogisticRegression(solver='lbfgs') log_reg.fit(X_train_imputed, y_train) #print('Validation Accuracy', log_reg.score(X_val_imputed, y_val)) y_pred = log_reg.predict(X_val_imputed) print(f'Validation accuracy: {accuracy_score(y_val, y_pred)}') # + id="MlJQhuQz3END" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="41a9a22a-d6fa-459c-9541-c409c44e50d4" # importing things again. import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegressionCV # + id="Fsa41W9b3wEY" colab_type="code" colab={} # encoding the data and doing some one-hot at the same time and some fit-transforms. encoder = ce.one_hot.OneHotEncoder(use_cat_names=True) X_train_enc = encoder.fit_transform(X_train) X_val_enc = encoder.transform(X_val) # + id="rwnJr2dO3x6z" colab_type="code" colab={} # imputing the data and doing fit-trasnforms imputer = SimpleImputer() X_train_imp = imputer.fit_transform(X_train_enc) X_val_imp = imputer.transform(X_val_enc) # + id="z2lhxfGP31h9" colab_type="code" colab={} # scaling the data and doing some fit-transforms. scaler = StandardScaler() X_train_sc = scaler.fit_transform(X_train_imp) X_val_sc = scaler.transform(X_val_imp) # + id="E-snX-q333kB" colab_type="code" colab={} # creating new dataframes for sc_train and sc_validation. X_train_sc = pd.DataFrame(X_train_sc, columns=X_train_enc.columns) X_val_sc = pd.DataFrame(X_val_sc, columns=X_val_enc.columns) # + id="NKjzKboK35ls" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8f9c08f5-8ea0-47a7-970a-a89aaf68027e" # using the CV version of logistic regression and pumping out an accuracy score. model = LogisticRegressionCV() model.fit(X_train_sc, y_train) y_pred = model.predict(X_val_sc) print(f'Validation accuracy: {accuracy_score(y_val, y_pred)}') # + id="PJFbar3e4CXF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 208} outputId="8cca22fa-bbcd-42b7-e019-5fbba9d3e3dd" # similar co-efficients from earlier --> but I want to plot em. coefs = model.coef_[0] coefs = pd.Series(coefs, X_train_sc.columns) coefs # + id="hGpBQ5Ih3_rc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="96b9753a-b438-4eed-e2d3-2facf10eebca" # plotting the importance of burritos. coefs.sort_values().plot.barh() # + id="ZCFeyXCs4Snv" colab_type="code" colab={} # defining some stuff for further studies. X_test = test_2018[features] X_test_enc = encoder.transform(X_test) X_test_imp = imputer.transform(X_test_enc) X_test_sc = scaler.transform(X_test_imp) # + id="4i_k_iqT4d8h" colab_type="code" colab={} y_pred = model.predict(X_test_sc) # I gave up; This is a 2. # + [markdown] id="ruEqdEd3rqAE" colab_type="text" # ## Stretch Goals # # - [ ] Add your own stretch goal(s) ! # - [ ] Make exploratory visualizations. # - [ ] Do one-hot encoding. # - [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html). # - [ ] Get and plot your coefficients. # - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
module4-logistic-regression/LS_DS_214_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="hZgLY6rMHGN3" # # If you are running on Google Colab, please install TensorFlow 2.0 by uncommenting below.. # try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x # except Exception: # pass # # If you are running on Google Colab, uncomment below to install the necessary dependencies # # before beginning the exercise. # print("Setting up colab environment") # # !pip install lz4 # # !pip install gputil # # !pip uninstall -y -q pyarrow # # !pip install -q -U ray[tune] # # !pip install -q ray[debug] # # A hack to force the runtime to restart, needed to include the above dependencies. # print("Done installing! Restarting via forced crash (this is not an issue).") # import os # os._exit(0) # + id="JNh216CCm9ID" from google.colab import drive drive.mount('/content/drive') # + id="BPkgcG3paKwS" import os os.chdir("/content/drive/MyDrive/semester_project_experiments/keras_reinforce_comm_reward") # + id="sporting-dover" import pickle import numpy as np from logger import info_logger, results_logger from envs.particle_rllib.environment import ParticleEnv from reinforce_agent import ReinforceAgent # + id="o-OYLh26XyMB" import matplotlib.pyplot as plt from IPython.display import clear_output # %matplotlib inline plt.rcParams['figure.figsize'] = 12, 8 # + [markdown] id="<KEY>" # ## Helpers # + id="extra-image" # Function that creates the environment def create_env_fn(): return ParticleEnv(n_listeners=n_listeners, n_landmarks=n_landmarks, render_enable=render_enable) class Results(dict): def __init__(self, *args, **kwargs): if 'filename' in kwargs: data = np.load(kwargs['filename']) super().__init__(data) else: super().__init__(*args, **kwargs) self.new_key = None self.plot_keys = None self.ylim = None def __setitem__(self, key, value): super().__setitem__(key, value) self.new_key = key def plot(self, window): clear_output(wait=True) for key in self: #Ensure latest results are plotted on top if self.plot_keys is not None and key not in self.plot_keys: continue elif key == self.new_key: continue self.plot_smooth(key, window) if self.new_key is not None: self.plot_smooth(self.new_key, window) plt.xlabel('Episode') plt.ylabel('Reward') plt.legend(loc='lower right') if self.ylim is not None: plt.ylim(self.ylim) plt.show() def plot_smooth(self, key, window): if len(self[key]) == 0: plt.plot([], [], label=key) return None y = np.convolve(self[key], np.ones((window,))/window, mode='valid') x = np.linspace(window/2, len(self[key]) - window/2, len(y)) plt.plot(x, y, label=key) def save(self, filename='results'): results_dir = 'results/' if not os.path.exists(results_dir): os.makedirs(results_dir) np.savez(results_dir + filename, **self) # + [markdown] id="retired-executive" # ## Parameters # + id="boring-giant" # pretraining parameters pretraining_n_epochs = 10 pretraining = False # training parameters training_n_epochs = 100 # common parameters n_episodes = 1000 # number of episodes in one epoch n_steps = 25 # number of steps in one episode policy_learning_rate = 0.002 value_learning_rate = 0.01 # for updating the target network gamma = 0.75 # discount factor n_layers = 3 n_neurons = 128 # environment config parameters n_listeners = 1 n_landmarks = 12 render_enable = False # convergence parameters window_size = 5 # size of the sliding window min_rel_delta_reward = 0.02 # minimum acceptable variation of the reward # + [markdown] id="KOJbJTdmRBZm" # ## Initialize the environment and the agents # + id="4q_xbbQGQ1Kx" env = create_env_fn() # According to environment implementation, there exists a different action space and observation space for each agent, # action_space[0] (resp. observations_space[0]) is allocated for the manager, while the others are allocated for the workers manager_action_space = env.action_space[0] manager_observation_space = env.observation_space[0] worker_action_space = env.action_space[1] worker_observation_space = env.observation_space[1] # Initiate the manager manager = ReinforceAgent(name='manager', n_obs=manager_observation_space.shape[0], action_space=manager_action_space, policy_learning_rate=policy_learning_rate, value_learning_rate=value_learning_rate, discount=gamma, n_layers=n_layers, n_neurons=n_neurons) info_logger.info("Manager agent initialized") # Initiate the listener worker = ReinforceAgent(name='worker', n_obs=worker_observation_space.shape[0], action_space=worker_action_space, policy_learning_rate=policy_learning_rate, value_learning_rate=value_learning_rate, discount=gamma, n_layers=n_layers, n_neurons=n_neurons) info_logger.info("Worker agent initialized") # + id="TzbfPMuIcx2u" def run_experiment(env, is_training, n_steps, n_episodes, past_worker_rewards, past_manager_rewards): manager_rewards = [] worker_rewards = [] probs_correct_goal = [] for episode in range(1, n_episodes+1): if episode % 10 == 0: results['worker'] = np.array(train_worker_rewards + worker_rewards) results['manager'] = np.array(train_manager_rewards + manager_rewards) results.plot(10) #Reset the environment to a new episode obs = env.reset() ext_comm_reward = 0 episode_manager_reward = 0 episode_worker_reward = 0 step = 1 correct_goals = [] action = {} while True: # 1. Decide on an action based on the observations action['worker_agent_1'] = worker.decide(obs['worker_agent_1']) if 'manager_agent' in obs: # if no observation for the manager, then extend communication action['manager_agent'] = manager.decide(obs['manager_agent']) # 2. Take action in the environment next_obs, rewards, done, _ = env.step(action) ext_comm_reward += rewards['manager_agent'] episode_worker_reward += rewards['worker_agent_1'] episode_manager_reward += rewards['manager_agent'] # 3. Store the information returned from the environment for training worker.observe(obs['worker_agent_1'], action['worker_agent_1'], rewards['worker_agent_1']) if is_training and 'manager_agent' in obs: if step > 1: manager.observe(manager_obs, manager_action, ext_comm_reward) ext_comm_reward = 0 manager_obs = obs['manager_agent'] manager_action = action['worker_agent_1'] # 4. When we reach a terminal state ("done"), use the observed episode to train the network if step == n_steps: manager_rewards.append(episode_manager_reward) worker_rewards.append(episode_worker_reward) worker.train() if is_training: manager.train() break # Reset for next step obs = next_obs step += 1 return manager_rewards, worker_rewards # + [markdown] id="indonesian-horizon" # ## Simulation loop # + id="collective-therapy" convergence = False convergence_counter = 0 train_manager_rewards = [] train_worker_rewards = [] epoch_mean_rewards = [] # mean reward of the epoch in time curr_epoch = 1 results = Results() while curr_epoch <= training_n_epochs: # loop for training_n_epochs info_logger.info("Current epoch: {}".format(curr_epoch)) if pretraining and curr_epoch <= pretraining_n_epochs: manager_rewards, worker_rewards = run_experiment( env, is_training=False, n_steps=n_steps, n_episodes=n_episodes, past_worker_rewards=train_worker_rewards, past_manager_rewards=train_manager_rewards ) else: manager_rewards, worker_rewards = run_experiment( env, is_training=True, n_steps=n_steps, n_episodes=n_episodes, past_worker_rewards=train_worker_rewards, past_manager_rewards=train_manager_rewards ) train_worker_rewards += worker_rewards train_manager_rewards += manager_rewards results.save(filename='results-ep{}'.format(curr_epoch)) curr_epoch_mean_manager_reward = np.mean(manager_rewards) curr_epoch_mean_worker_reward = np.mean(worker_rewards) results_logger.info("Epoch: {}".format(curr_epoch)) results_logger.info("\tmanager mean reward = {}".format(curr_epoch_mean_manager_reward)) results_logger.info("\tworker mean reward = {}".format(curr_epoch_mean_worker_reward)) epoch_mean_rewards.append(curr_epoch_mean_manager_reward) # check convergence conditions if curr_epoch > pretraining_n_epochs + window_size: window_reward = 0 for r in epoch_mean_rewards[-5:]: window_reward += r if abs(curr_epoch_mean_manager_reward - window_reward) / window_reward <= min_rel_delta_reward: convergence_counter += 1 if convergence_counter >= 5 and curr_epoch <= training_n_epochs - 10: convergence = True else: convergence = False convergence_counter = 0 curr_epoch +=1 manager.save() worker.save() if convergence: results_logger.info("Convergence! The mean reward has remained stable for {} epochs".format(convergence_counter)) elif convergence_counter > 0: results_logger.info("No convergence. The mean reward stabilized for the first time around epoch {}".format(1 + training_n_epochs - convergence_counter)) else: results_logger.info("No convergence. The mean reward has never stabilized.")
keras_reinforce_comm_reward/feudal-multi-agent-keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import seaborn as sb import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler import pandas as pd from sklearn.neighbors import NearestNeighbors link='/Users/afatade/Downloads/anime_cleaned.csv' data=pd.read_csv(link) data.head() len(data) # + #We have a lot of data. Lets see what we can do about our type and genre columns. # + #Now we want to make use of our genres and type columns. These are very important when considering the type of anime one #wishes to make use of. We can call the pd.get_dummies function to generate a one hot encoded dataset. #For the genres column, we will call str.get_dummies() and set our seperator to ','. This was we can #do the dame for the genres column. # - data.isnull().sum() # + #We see we're missing some entrie for certain variables, but thats okay. We wont be using these variables. #Lets isolate type, source,episodes and genre. These are really important features for recommending #similar anime. Lets keep it simple. # + #We need to generate one hot encoded columns for type, source and tv. Lets go type_encoded=pd.get_dummies(data['type']) # - type_encoded.head() source_encoded=pd.get_dummies(data['source']) source_encoded.head() # + #The type column has a lot of factors that the typical anime fanatic doesn't look into. #Typically we only look at manga for the most part. However, we will consider using these features later. #There are a lot of values per row in the genres column. It would be logical to make use of a #one hot encoding mechanism that makes use of a seperator. genre_encoded=data['genre'].str.get_dummies(sep=',') # - genre_encoded.head().values # + #We see our values have been encoded successfully. #Lets create a new data frame with our episodes list and our encoded values features=pd.concat([genre_encoded, type_encoded,data['episodes']],axis=1) # - features.head() # + #Now we know our data has features with differing magnitudes. It would be logical to scale this data. features_scaled=MinMaxScaler().fit_transform(features) # - features_scaled[0] # + #This is just one feature element that we are going to chuck into our KNN algorithm collaborative_filter=NearestNeighbors().fit(features_scaled) # - collaborative_filter.kneighbors([features_scaled[0]]) data['title'].iloc[[0, 4893, 2668, 3943, 4664],].values # + #Okay. We see that this filtering mechanism works very well! #Lets see if we can generate a function such that when a user enters a name, a new anime is recommended # - # + #Lets create some functions so this data looks neater overall. def preprocess_data(): link='/Users/afatade/Downloads/anime_cleaned.csv' data=pd.read_csv(link) type_encoded=pd.get_dummies(data['type']) source_encoded=pd.get_dummies(data['source']) genre_encoded=data['genre'].str.get_dummies(sep=',') features=pd.concat([genre_encoded, type_encoded,data['episodes']],axis=1) features_scaled=MinMaxScaler().fit_transform(features) return features_scaled # + #This function will return the name of the anime in the dataset even if it is entered partially def get_partial_names(title): names=list(data.title.values) for name in names: if title in name: return [name, names.index(name)] #This function will return features for recommendation def get_features(title): values=get_partial_names(title) return values[1] def get_vector(title): index=get_features(title) data=preprocess_data() return data[index] def collaborative_filter(): data=preprocess_data() filtering=NearestNeighbors().fit(data) return filtering def get_recommendations(title): vectorized_input=get_vector(title) filter_model=collaborative_filter() indices=filter_model.kneighbors([vectorized_input])[1] recommendations=data['title'].iloc[indices[0],].values return recommendations # - get_recommendations("One Piece") # + #Lovely! We'll work on the user interface later. # -
Improved_anime_recommender copy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''python-course'': virtualenv)' # name: python3 # --- # + [markdown] id="Hu-DQLCysvHt" # # "Chapitre 3 : Les boucles" # > "Python : Chapitre 3 - Lesson 2" # # - toc: false # - badges: true # - hide_binder_badge: true # - hide_github_badge: true # - comments: false # - layout: post # - author: AXI Academy # - permalink: /python-intro-gen/chapter/3/lesson/2/ # - # Dans ce chapitre, vous apprendrez comment faire en sorte que l'ordinateur exécute un groupe d'instructions encore et encore tant qu'un certain critère est vérifié. Le groupe d'instructions exécutées à plusieurs reprises est appelé une boucle. Il existe deux instructions de boucle en Python : `for` et `while`. Nous discuterons de la différence entre ces déclarations plus loin dans le chapitre, mais regardons d'abord un exemple de boucle dans le monde réel. # # # Un employé d'une station service effectue les actions suivantes lorsqu'il sert un client : # # - 1) Saluer le client # - 2) Demander le type d'essence requis et la quantité # - 3) Demander si le client a besoin d'autres services # - 4) Demander la somme d'argent requise # - 5) Donner de l'argent au caissier # - 6) Attendre le changement et la réception # - 7) Remettre la monnaie et le reçu au client # - 8) Dire merci et au revoir # # L'employé effectue ces étapes pour chaque client, mais il ne les suit pas lorsqu'il n'y a pas de client à servir. Il ne les exécute également que lorsque lors de son temps de travail. Si nous devions écrire un programme informatique pour simuler ce comportement, il ne suffirait pas de fournir les étapes et de demander à l'ordinateur de les répéter encore et encore. Nous aurions également besoin de lui dire quand arrêter de les exécuter. # # Il existe deux grands types de boucles de programmation : les boucles de comptage (`for`) et les boucles contrôlées par événement (`while`). # # Dans une boucle de comptage, l'ordinateur sait au début de l'exécution de la boucle combien de fois il doit exécuter la boucle. En Python, ce type de boucle est défini avec l'instruction `for`, qui exécute le corps de la boucle pour chaque élément d'une liste. # # Dans une boucle contrôlées par événement, l'ordinateur arrête l'exécution de la boucle lorsqu'une condition n'est plus vraie. En Python, vous pouvez utiliser l'instruction `while`. L'ordinateur exécute le corps de la boucle tant que la condition est vraie. L'instruction `while` vérifie la condition avant d'effectuer chaque itération de la boucle. # # Les boucles de comptage sont en fait un sous-ensemble de la boucle de contrôle d'événement, la boucle est répétée jusqu'à ce que le nombre requis d'itérations soit atteint. # # Si vous vouliez aller de Paris à Marseille, quel algorithme de boucle utiliseriez-vous ? Si vous avez commencé par mettre votre voiture sur la route de Marseille, vous pourriez : # # - Rouler exactement 7h30. Après 7h30, arréter la voiture et descendre. # - Rouler exactement 780km. Après 780km, arréter la voiture et descendre. # - Roulez tant que nous ne sommes pas arrivé à Marseille. Une fois arriver, arréter la voiture et descendre. # # Les deux premiers algorithmes sont basés sur le comptage, aucun de ces algorithmes ne garantit que vous arriverez à Marseille. Dans le premier cas, vous pourriez rencontrer un trafic dense ou pas du tout, et ne pas atteindre ou dépasser la destination souhaitée. Dans le second cas, vous pourriez trouver un détour et vous retrouver loin de Camps Bay. # # Le troisième algorithme est contrôlé par les événements. Vous continuez à conduire tant que vous n'êtes pas à Marseille. La condition que vous continuez à vérifier est : est-ce que je suis arrivé à Marseille ? # # ## 1. La boucle `while` # # L'instruction python pour une boucle controlé par des évenements est : `while`. Vous devez l'utiliser lorsque vous ne savez pas à l'avance combien de fois vous devrez exécuter le corps de la boucle. Tant que la condition est vraie, le corps `while` continue de se répéter. Exemple : # # + total = 0 i = 1 while i <=10: total += i i += 1 print(i, total) # - # La variable utilisée dans la condition de boucle est le nombre `i`, que vous utilisez pour compter les entiers de 1 à 10. Vous initialisez d'abord ce nombre à 1. Dans la condition, vous vérifiez si `i` est inférieur ou égal à 10, et si c'est vrai, vous exécutez le corps de la boucle. Ensuite, à la fin du corps de la boucle, vous mettez à jour `i` en l'incrémentant de 1. # # Il est très important que vous incrémentiez `i` à la fin. Sinon, `i` serait toujours égal à 1, la condition serait toujours vraie et votre programme ne se terminerait jamais. Nous appelons cela une boucle infinie. Chaque fois que vous écrivez une boucle `while`, assurez-vous que la variable que vous utilisez dans votre condition est mise à jour à l'intérieur du corps de la boucle. # # Voici quelques erreurs courantes pouvant entraîner une boucle infinie : # + x = 0 while x < 3: y += 1 # wrong variable updated product = 1 count = 1 while count <= 10: product *= count # forgot to update count x = 0 while x < 5: print(x) x += 1 # update statement is indented one level too little, so it's outside the loop body x = 0 while x != 5: print(x) x += 2 # x will never equal 5, because we are counting in even numbers! # - # Dans certains exemples ci-dessus, nous comptons jusqu'à un nombre prédéterminé, il serait donc plus approprié pour nous d'utiliser une boucle `for`, c'est la structure de boucle qui est la plus couramment utilisée pour compter les boucles. Voici un exemple plus réaliste : # + # numbers we don't know the size or the content of the list numbers = [23, 1, -2, 23, 9, 12] total = 0 i = 0 # while total < 100: while i < len(numbers) and total < 100: total += numbers[i] i += 1 print(total) # - # Ici, nous additionnons les nombres d'une liste jusqu'à ce que le total atteigne 100. Nous ne savons pas combien de fois nous devrons exécuter la boucle, car nous ne connaissons pas les valeurs des nombres. Notez que nous pourrions atteindre la fin de la liste des nombres avant que le total n'atteigne 100. Si nous essayons d'accéder à un élément au-delà de la fin de la liste, nous obtiendrons une erreur, nous devons donc ajouter une vérification pour nous assurer que cela ne fonctionne pas. ça n'arrive pas. # ## Exercice 1 : # # - 1) Écrivez un programme qui utilise une boucle `while` pour additionner les carrés d'entiers (à partir de 1) jusqu'à ce que le total dépasse 200. Affichez le total final et le dernier nombre à mettre au carré et ajouter. # - 2) Ecrivez un programme qui demande à l'utilisateur de deviner un mot. L'utilisateur a jusqu'à dix suppositions – écrivez votre code de telle sorte que le mot secret et le nombre de suppositions autorisées soient faciles à modifier. Affichez des messages pour donner votre avis à l'utilisateur. # # ## 2. La boucle `for` # # L'instruction python pour une boucle de comptage est : `for`. Vous devriez l'utiliser lorsque vous devez faire quelque chose pour un nombre prédéfini d'étapes. # # Exemple : for i in range(1, 9): print(i) # Comme nous l'avons vu dans le chapitre précédent, `range` est un type de séquence immuable utilisé pour générer des entiers. Dans ce cas, le `range` compte de 1 à 8. La boucle `for` parcourra chacun des nombres tour à tour , effectuant l'action d'affichage. Lorsque la fin du `range` est atteint, la boucle `for` se termine. # # Vous pouvez également utiliser un `for` pour parcourir d'autres types de séquences. Vous pouvez parcourir une liste de `string` comme celle-ci : # + pets = ["cat", "dog", "budgie"] for pet in pets: print(pet) # - # A chaque itération de la boucle, l'élément suivant de la liste pets est affecté à la variable pet, à laquelle vous pouvez alors accéder à l'intérieur du corps de la boucle. L'exemple ci-dessus est fonctionnellement identique à celui-ci : for i in range(len(pets)): # i will iterate over 0, 1 and 2 pet = pets[i] print(pet) # Vous devriez éviter de faire cela, car c'est plus difficile à lire et inutilement complexe. Si, pour une raison quelconque, vous avez besoin de l'index à l'intérieur de la boucle ainsi que de l'élément de liste lui-même, vous pouvez utiliser la fonction `enumerate` pour numéroter les éléments : for i, pet in enumerate(pets): pets[i] = pet.upper() # rewrite the list in all caps # ## Exercice 2 : # # - 1) Écrivez un programme qui additionne les nombres entiers de 1 à 10 en utilisant une boucle `for` (et affiche le total à la fin). # - 2) Pouvez-vous penser à un moyen de le faire sans utiliser de boucle ? # - 3) Écrire un programme qui trouve la factorielle d'un nombre donné. Par exemple. 3 factoriel, ou `3!` est égal à `3 x 2 x 1`; `5!` est égal à `5 ​​x 4 x 3 x 2 x 1`, etc. Votre programme ne doit contenir qu'une seule boucle. # - 4) Écrivez un programme qui demande à l'utilisateur 10 nombres à virgule flottante et calcule leur somme, leur produit et leur moyenne. Votre programme ne doit contenir qu'une seule boucle. # - 5) Réécrivez le programme précédent pour qu'il ait deux boucles, une qui collecte et stocke les nombres, et une qui les traite. # ## 3. Boucles imbriquées # # Nous avons vu dans le chapitre précédent que nous pouvons créer des séquences multidimensionnelles, des lesquelles chaque élément est une autre séquence. Comment itérer sur toutes les valeurs d'une séquence multidimensionnelle ? Nous devons utiliser des boucles à l'intérieur d'autres boucles. Lorsque nous faisons cela, nous disons que nous imbriquons des boucles. # # Ré-utilisons l'exemple de l'emploi du temps du chapitre précédent, nous avons une séquence multidimensionnelle qui contient sept jours et que chaque jour contient 24 plages horaires. Chaque créneau horaire est une chaîne, qui est vide s'il n'y a rien de prévu pour ce créneau. Comment itérer sur tous les créneaux horaires et afficher tous nos événements programmés ? # + # first let's define weekday names WEEKDAYS = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday') # Create & fill timetable timetable = [[""] * 24 for day in range(7)] timetable[5][16] = "Python lesson." # now we iterate over each day in the timetable for index_day, day in enumerate(timetable): # and over each timeslot in each day for hour, event in enumerate(day): if event: # if the slot is not an empty string print(f"{WEEKDAYS[index_day]} at {hour}:00 -- {event}") # - # ## Exercice 3 # # - 1) Ecrivez un programme qui utilise une boucle `for` imbriquée pour remplir une liste tridimensionnelle représentant un calendrier : la liste de niveau supérieur doit contenir une sous-liste pour chaque mois, et chaque mois doit contenir quatre semaines. Chaque semaine doit être une liste vide. # - 2) Modifiez votre code pour faciliter l'accès à un mois dans le calendrier par un nom de mois lisible par l'homme. Faire de même pour les semaines par un nom qui est numéroté à partir de 1. Ajoutez un événement (sous la forme d'une chaîne de description) à la deuxième semaine de juillet. # # # # ## 4. Itérables, itérateurs et générateurs # # En Python, tout type qui peut être itéré avec une boucle `for` est un itérable. Les listes, les tuples, les `string` et les dictionnaires sont tous des types itérables couramment utilisés. Itérer sur une liste ou un tuple signifie simplement traiter chaque valeur à son tour. # # Parfois, nous utilisons une séquence pour stocker une série de valeurs qui ne suivent aucun modèle particulier : chaque valeur est imprévisible et ne peut pas être calculée à la volée. Dans des cas comme celui-ci, nous n'avons pas d'autre choix que de stocker chaque valeur dans une liste ou un tuple. Si la liste est très longue, cela peut utiliser beaucoup de mémoire. # # Que se passe-t-il si les valeurs de notre séquence suivent un modèle et peuvent être calculées à la volée ? Nous pouvons économiser beaucoup de mémoire en ne calculant les valeurs que lorsque nous en avons besoin, au lieu de toutes les calculer à l'avance : au lieu de stocker une grande liste, nous pouvons stocker uniquement les informations dont nous avons besoin pour le calcul. # # Python a beaucoup de types itérables intégrés qui génèrent des valeurs à la demande - ils sont souvent appelés générateurs. Nous avons déjà vu quelques exemples, comme range et enumerate. Vous pouvez principalement traiter un générateur comme n'importe quelle autre séquence si vous n'avez besoin d'accéder qu'à ses éléments un à la fois - par exemple, si vous l'utilisez dans une boucle `for` : # + # These two loops will do exactly the same thing: for i in (1, 2, 3, 4, 5): print(i) for i in range(1, 6): print(i) # - # Vous remarquerez peut-être une différence si vous essayez d'afficher le contenu du générateur, par défaut, tout ce que vous obtiendrez est la représentation sous forme standard de Python de l'objet, qui vous montre le type de l'objet et son identifiant unique. Pour afficher toutes les valeurs du générateur, nous devons le convertir en un type de séquence comme une liste, ce qui forcera toutes les valeurs à être générées : # + # this will not be very helpful print(range(100)) # this will show you all the generated values print(list(range(100))) # - # Vous pouvez utiliser tous ces itérables de manière presque interchangeable car ils utilisent tous la même interface pour itérer sur les valeurs : chaque objet itérable a une méthode qui peut être utilisée pour renvoyer un itérateur sur cet objet. L'itérable et l'itérateur forment ensemble une interface cohérente qui peut être utilisée pour parcourir une séquence de valeurs, que ces valeurs soient toutes stockées en mémoire ou calculées selon les besoins : # # L'itérable a une méthode pour accéder à un élément par son index. Par exemple, une liste renvoie simplement l'élément qui est stocké dans une position particulière. Une plage, d'autre part, calcule l'entier dans la plage qui correspond à un indice particulier. # # L'itérateur garde l'index de lecture dans la séquence et dispose d'une méthode qui vous permet d'accéder à l'élément suivant. Il peut y avoir plusieurs itérateurs associés à un seul itérable en même temps. chacun à un endroit différent dans l'itération. Par exemple, vous pouvez parcourir la même liste dans les deux niveaux d'une boucle imbriquée - chaque boucle utilise son propre itérateur et elles n'interfèrent pas les unes avec les autres : # + numbers = [1, 2, 3] for a in numbers: for b in numbers: print(f"{a} + {b} = {a + b}") # - # Quelques exemples de générateurs intégrés définis dans le module `itertools` de Python : # + # we need to import the module in order to use it import itertools # unlike range, count doesn't have an upper bound, and is not restricted to integers for i in itertools.count(1): print(i) # 1, 2, 3.... for i in itertools.count(1, 0.5): print(i) # 1.0, 1.5, 2.0.... # cycle repeats the values in another iterable over and over for animal in itertools.cycle(['cat', 'dog']): print(animal) # 'cat', 'dog', 'cat', 'dog'... # repeat repeats a single item for i in itertools.repeat(1): # ...forever print(i) # 1, 1, 1.... for i in itertools.repeat(1, 3): # or a set number of times print(i) # 1, 1, 1 # chain combines multiple iterables sequentially for i in itertools.chain(numbers, animals): print(i) # print all the numbers and then all the animals # - # Il existe également une fonction intégrée appelée `zip` qui nous permet de combiner plusieurs itérables par paire. Il sort également un générateur : # + for i in zip((1, 2, 3), (4, 5, 6)): print(i) for i in zip(range(5), range(5, 10), range(10, 15)): print(i) # - # ## Exercice 4 : # # - 1) Créez un tuple de noms de mois et un tuple du nombre de jours de chaque mois (supposez que février a 28 jours). En utilisant une seule boucle `for`, construisez un dictionnaire qui a les noms de mois comme clés et les numéros de jour correspondants comme valeurs. # - 2) Faites maintenant la même chose sans utiliser de boucle `for`. # ## 5. Les compréhensions # # Supposons que nous ayons une liste de nombres et que nous voulions construire une nouvelle liste en doublant toutes les valeurs de la première liste. Ou que nous voulons extraire tous les nombres pairs d'une liste de nombres. Ou que nous voulons trouver et mettre en majuscule tous les noms d'animaux dans une liste de noms d'animaux qui commencent par une voyelle. Nous pouvons faire chacune de ces choses en itérant sur la liste d'origine, en effectuant une sorte de vérification sur chaque élément à tour de rôle et en ajoutant des valeurs à une nouvelle liste au fur et à mesure. # + numbers = [1, 5, 2, 12, 14, 7, 18] doubles = [] for number in numbers: doubles.append(2 * number) even_numbers = [] for number in numbers: if number % 2 == 0: even_numbers.append(number) animals = ['aardvark', 'cat', 'dog', 'opossum'] vowel_animals = [] for animal in animals: if animal[0] in 'aeiou': vowel_animals.append(animal.title()) # - # C'est une façon assez lourde de faire quelque chose de très simple. Heureusement, nous pouvons réécrire des boucles simples comme celle-ci pour utiliser une syntaxe plus propre et plus lisible en utilisant des compréhensions. # # Une compréhension est une sorte de filtre que nous pouvons définir sur un itérable basé sur une certaine condition. Le résultat est un autre itérable. Voici quelques exemples de compréhensions de liste : doubles = [2 * number for number in numbers] even_numbers = [number for number in numbers if number % 2 == 0] vowel_animals = [animal.title() for animal in animals if animal[0] in 'aeiou'] # La compréhension est la partie écrite entre crochets sur chaque ligne. Chacune de ces compréhensions se traduit par la création d'un nouvel objet liste. # # ## Exercice 5 # # - 1) Créez une `string` qui contient les dix premiers entiers positifs séparés par des virgules et des espaces. N'oubliez pas que vous ne pouvez pas joindre des nombres - vous devez d'abord les convertir en `string`. Affichez la `string`. # - 2) Réécrivez le programme calendaire de l'exercice 3 en utilisant des compréhensions imbriquées au lieu de boucles imbriquées. Essayez d'ajouter une chaîne à l'une des listes de semaines, pour vous assurer que vous n'avez pas réutilisé la même liste au lieu de créer une liste distincte pour chaque semaine. # - 3) Faites maintenant quelque chose de similaire pour créer un calendrier qui est une liste avec 52 sous-listes vides (une pour chaque semaine de l'année entière). Astuce : comment modifieriez-vous les boucles `for` imbriquées ? # ## 6. Les instructions break et continue # # ### L'instruction `break` # # À l'intérieur du corps de la boucle, vous pouvez utiliser l'instruction `break` pour quitter immédiatement la boucle. Vous voudrez peut-être tester un cas particulier qui entraînera une sortie immédiate de la boucle. Par exemple: # + x = 1 while x <= 10: if x == 5: break print(x) x += 1 # - # ### L'instruction `continue` # # L'instruction continue est similaire à l'instruction `break`, en ce sens qu'elle fait sortir le flux de contrôle du corps de la boucle actuelle au point de rencontre, mais la boucle elle-même n'est pas terminée. Par exemple: for x in range(1, 10 + 1): # this will count from 1 to 10 if x == 5: continue print(x) # ## Exercice 6 # # - 1) Ecrivez un programme qui demande à plusieurs reprises à l'utilisateur un nombre entier. Si l'entier est pair, affichez le. Si l'entier est impair, n'affichez rien. Quittez le programme si l'utilisateur entre l'entier 99. # # Certains programmes demandent à l'utilisateur d'entrer un nombre variable d'entrées de données, et enfin d'entrer un caractère ou une chaîne spécifique qui signifie qu'il n'y a plus d'entrées. Par exemple, vous pourriez être invité à entrer votre code PIN suivi d'un dièse (#). Le dièse indique que vous avez fini de saisir votre code PIN. # # - 2) Écrire un programme qui fait la moyenne des nombres entiers positifs. Votre programme doit inviter l'utilisateur à entrer des entiers jusqu'à ce que l'utilisateur entre un entier négatif. L'entier négatif doit être ignoré et vous devez imprimer la moyenne de tous les entiers précédemment entrés. # # - 3) Implémentez une calculatrice simple avec un menu. Affichez les options suivantes à l'utilisateur, demandez une sélection et effectuez l'action demandée (par exemple, demandez deux chiffres et ajoutez-les). Après chaque opération, retournez l'utilisateur au menu. Quittez le programme lorsque l'utilisateur sélectionne 0. Si l'utilisateur entre un nombre qui n'est pas dans le menu, ignorez l'entrée et réaffichez le menu. Vous pouvez supposer que l'utilisateur entrera un entier valide : # + active="" # -- Calculator Menu -- # 0. Quit # 1. Add two numbers # 2. Substract two numbers # 3. Multiple two numbers # 4. Divide two numbers # - # ### Utiliser des boucles pour simplifier le code # # Nous pouvons utiliser notre connaissance des boucles pour simplifier certains types de code redondant. Considérons cet exemple, dans lequel nous demandons à un utilisateur quelques informations personnelles : # This is a good but we can do better name = input("Please enter your name: ") surname = input("Please enter your surname: ") # let's store these as strings for now, and convert them to numbers later age = input("Please enter your age: ") height = input("Please enter your height: ") weight = input("Please enter your weight: ") # + # Better person = {} for prop in ["name", "surname", "age", "height", "weight"]: person[prop] = input("Please enter your %s: " % prop) # - # ## Exercice 7 # # - Modifiez l'exemple ci-dessus pour inclure la conversion de type des propriétés : l'âge doit être un nombre entier, la taille et le poids doivent être des nombres flottants, et le nom et le prénom doivent être des `string`.
_notebooks/2022-01-02-python-intro-gen-3-2-loops.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/10_feature_visualization/1)%20Feature%20Visualization%20using%20Mxnet-Gluon%20Backend.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Table of Contents # # # ## [0. Install](#0) # # # ## [1. Importing mxnet-gluoncv backend](#1) # # # ## [2. Creating and Managing experiments](#2) # # # ## [3. Training a Cat Vs Dog image classifier](#3) # # # ## [4. Visualizing trained Kernels](#4) # # # ## [5. Visualize features with image input](#5) # <a id='0'></a> # # Install Monk # # - git clone https://github.com/Tessellate-Imaging/monk_v1.git # # - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt # - (Select the requirements file as per OS and CUDA version) # !git clone https://github.com/Tessellate-Imaging/monk_v1.git # + # If using Colab install using the commands below # !cd monk_v1/installation/Misc && pip install -r requirements_colab.txt # If using Kaggle uncomment the following command # #!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt # Select the requirements file as per OS and CUDA version when using a local system or cloud # #!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt # - # <a id='1'></a> # # Imports # Monk import os import sys sys.path.append("monk_v1/monk/"); #Using mxnet-gluon backend from gluon_prototype import prototype # <a id='2'></a> # # Creating and managing experiments # - Provide project name # - Provide experiment name # - For a specific data create a single project # - Inside each project multiple experiments can be created # - Every experiment can be have diferent hyper-parameters attached to it gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1"); # ### This creates files and directories as per the following structure # # # workspace # | # |--------sample-project-1 (Project name can be different) # | # | # |-----sample-experiment-1 (Experiment name can be different) # | # |-----experiment-state.json # | # |-----output # | # |------logs (All training logs and graphs saved here) # | # |------models (all trained models saved here) # # <a id='2'></a> # # Training a Cat Vs Dog image classifier # ## Quick mode training # # - Using Default Function # - dataset_path # - model_name # - num_epochs # # # ## Dataset folder structure # # parent_directory # | # | # |------cats # | # |------img1.jpg # |------img2.jpg # |------.... (and so on) # |------dogs # | # |------img1.jpg # |------img2.jpg # |------.... (and so on) # + gtf.Default(dataset_path="monk_v1/monk/system_check_tests/datasets/dataset_cats_dogs_train", model_name="resnet18_v1", num_epochs=5); #Read the summary generated once you run this cell. # + #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed # - # # <a id='4'></a> # # Visualizing trained kernels gtf.Visualize_Kernels(); # <a id='4'></a> # # Visualizing features extracted from images gtf.Visualize_Feature_Maps("monk_v1/monk/system_check_tests/datasets/dataset_cats_dogs_test/1.jpg")
study_roadmaps/1_getting_started_roadmap/10_feature_visualization/1) Feature Visualization using Mxnet-Gluon Backend.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Raising Errors # # It is possible to tell Python to generate an error. This is useful if you want to verify input to a function or stop your code running when something happens that your code does not know how to process. # Take this very simple function for example. This function is only designed to be used with numbers, so we want to make sure we dont pass a string. def square(x): """ Return the square of x. """ if isinstance(x,str): raise ValueError("The argument x must not be a string") else: return x**2 # The function works as expected with a number: square(2) square("a") # When it is passed a string it raises an error, telling the user that the argument x can not be a string. raise TypeError("this is not a number") # You can raise many different kinds of exception, however `ValueError` is generally the most useful. Other useful types of error are: raise FileNotFoundError("I can't find that") # # <section class="challenge panel panel-success"> # <div class="panel-heading"> # <h2><span class="fa fa-pencil"></span> What Type of Error?</h2> # </div> # # # <div class="panel-body"> # # <p>The example above:</p> # <div class="codehilite"><pre><span></span><span class="k">def</span> <span class="nf">square</span><span class="p">(</span><span class="n">x</span><span class="p">):</span> # <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> # <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;x can not be a string&quot;</span><span class="p">)</span> # <span class="k">else</span><span class="p">:</span> # <span class="k">return</span> <span class="n">x</span><span class="o">**</span><span class="mi">2</span> # </pre></div> # # # <p>uses <code>ValueError</code>, what type of error would be more appropriate?</p> # # </div> # # </section> # # # <section class="solution panel panel-primary"> # <div class="panel-heading"> # <h2><span class="fa fa-eye"></span> Solution</h2> # </div> # # # <div class="panel-body"> # # <p><code>TypeError</code> should be raised when the type (i.e. <code>str</code>, <code>float</code>, <code>int</code>) is incorrect.</p> # # </div> # # </section> # # ## Silent Errors # # Not all programming errors raise an exception, some are errors in the functioning of the code. i.e. this: # This is obviously incorrect, but Python does not know any difference, it executes the code as written and returns a result. # # Most logical errors or "bugs" like this are not so easy to spot! As the complexity of your code increases the odds that mistakes and errors creep in increases. The best way to detect and prevent this kind of error is by writing tests.
05-writing-effective-tests/02-explicit-exceptions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Create DDF dataset from UN International migrant stock 2019 dataset # # In this notebook we are going to demonstrate how to create a DDF dataset with ddf_utils. We will use [International migrant stock: The 2019 revision][UN site] as source and convert the [migrant stock By destination and origin][download link]. Below are screenshots for this source file. # # [UN site]: https://www.un.org/en/development/desa/population/migration/data/estimates2/index.asp # [download link]: https://www.un.org/en/development/desa/population/migration/data/estimates2/data/UN_MigrantStockByOriginAndDestination_2019.xlsx # # Data Tables: # # ![Data Table](./files/image2.png) # # ANNX sheet: # # ![ANNX sheet](./files/image1.png) # ## The plan # # 1. from the excel tables we can see there is a "ANNEX" sheet, which # contains all country/region information. We will convert it to # a geo domain. # 2. 3 data Tables contains total/male/female data. We will create a # domain for gender. # 3. origin and destination are both regions/countries. So they will be # roles in the region/country domain. # 4. there is only one indicator in source: `migrant_stock`. But when # origin = total or destination = total, we can have # `immigrant_stock` or `emmigrant_stock`. So we will have 3 # indicators in the DDF dataset. Also each indicator will have 2 variants: # one with gender domain and one without gender domain. # 5. We will not keep Notes/Type of Data/Sort Order columns # ## ETL process # # Note: This notebook assumed you create a ddf dataset project with `ddf new` command and place the source file in `etl/source` and this notebook in `etl/notebooks` import numpy as np import pandas as pd # from ddf_utils.dsl import * source = '../source/UN_MigrantStockByOriginAndDestination_2019.xlsx' # first of all, let's create a data reader to load a table into pandas DataFrame with correct data types. # i.e. data starts from the `Total` column should all be numbers(float64) # # We can see there are a few things we should process: # # 1. we should skip a few lines at the beginning. # 2. there are `..` inside number columns. we should consider `..` as N/A values. # 3. the headers in data table span over 2 rows # NOTE: this function will be re-written below. def read_source(sheet, skip=14, header=[0, 1]): return pd.read_excel(source, skiprows=skip, sheet_name=sheet, header=header, na_values=['..', ''], keep_default_na=False) # now try to load Table 1 table1 = read_source('Table 1', 14) table1.head() # + # function to fix column names def fix_column_names(df_): df = df_.copy() new_cols = np.r_[ df.columns.get_level_values(0).values[:6], # column 6 is `Total` df.columns.get_level_values(1).values[6:]] df.columns = new_cols return df # - fix_column_names(table1).head() table1 = fix_column_names(table1) # see data types. table1.dtypes['Total':] # Now there is a problem. The `Albania` column have `object` type which is not the desired data type. # We need to double check what goes wrong. # # ### Note # # Besides checking data types, we should also check if the numbers are loaded correctly ("153,011,473" in excel table should be 153011473 in the DataFrame). We can check this by viewing the source excel and the DataFrame side by side. # # Depends on the actual data you are working on, other kind of double checking might be required. def isnumeric(x): """check if a value is numeric value""" if isinstance(x, (np.floating, np.int, int, float)): return True else: return False alb = table1['Albania'].dropna() alb.loc[~alb.map(isnumeric)] # This means that there are `-` in the data table. We will treat it as N/A. # + # redefine the read_source function def read_source(sheet, skip=14, header=[0, 1]): return pd.read_excel(source, skiprows=skip, sheet_name=sheet, header=header, na_values=['..', '', '-'], keep_default_na=False) # - table1 = fix_column_names(read_source('Table 1', 14)) all([x == 'float64' for x in table1.dtypes['Total':]]) table2 = fix_column_names(read_source('Table 2', 14)) all([x == 'float64' for x in table2.dtypes['Total':]]) table3 = fix_column_names(read_source('Table 3', 14)) all([x == 'float64' for x in table3.dtypes['Total':]]) # ### geo domain # Now let's load the ANNEX table and create geo domain: # # 1. we will use `code` as identifier. Now the column values are numbers, we will convert to str. # 2. rows where code is empty are group names (for example `UN development groups`). # We will treat them as entity sets # 3. columns are indicators for entity set membership too. # # In ddf_utils there are data classes for Entity and EntityDomain. from ddf_utils.model.ddf import Entity, EntityDomain from ddf_utils.str import to_concept_id country = read_source('ANNEX', 15, 0) country.head() # country.columns # + # load all entities ents = [] current_set = None sets_in_col = ['More Developed Regions', 'Less Developed Regions', 'Least developed countries', 'High-income Countries', 'Middle-income Countries', 'Upper-middle-income Countries', 'Lower-middle-income Countries', 'Low-income Countries', 'No income group available', 'Sub-Saharan Africa'] for i, row in country.iterrows(): name = row['Region, subregion, country or area'] if pd.isnull(row['Code']): # it's a set, and rows below this row belong to this set. current_set = to_concept_id(name) continue else: sets = set() for s in sets_in_col: if not pd.isnull(row[s]): sets.add(to_concept_id(s)) if current_set: sets.add(current_set) ents.append(Entity(id=str(int(row['Code'])), domain='geo', sets=list(sets), props={'name': name})) # + # Noticed that in data table there are "other south" and "other north", which are not in the ANNEX table. # So we append these 2 entity too ents.append( Entity(id='other_south', domain='geo', sets=[], props=dict(name='Other South'))) ents.append( Entity(id='other_north', domain='geo', sets=[], props=dict(name='Other North'))) # + domain = EntityDomain('geo', []) for e in ents: domain.add_entity(e) # - country_df = pd.DataFrame.from_dict(domain.to_dict()) country_df.head() # + # check: if origin and destination share same entities origin_cols = table1.columns[9:] for c in origin_cols: if c not in country_df['name'].values: print(f"missing {c}") else: print("all countries in Table 1 are in country domain") # + # save to file. # country_df.to_csv('../../ddf--entities--geo.csv', index=False) # - # ### Gender domain # # the gender domain is quite simple. We would just create a DataFrame manually. # + # gender domain sexdf = pd.DataFrame([ ['male', 'Male'], ['female', 'Female'] ], columns=['gender', 'name']) # + # sexdf.to_csv('../../ddf--entities--gender.csv', index=False) # - # ### datapoints # # Table 1-3 are loaded into DataFrame, we need to do some transformation to make them datapoints: # # - drop unneeded columns # - converted origins/destinations names to codes # - origins are in columns, but should be in rows as a dimension # - for table 2 and table 3, add the gender column and merge them together # - filter origin = total (destination = total) to create immigrant (emigrant) data # + # create a mapping from name to code. def _trans(x): if not pd.isnull(x): return str(int(x)) return x name_id_map = country.set_index('Region, subregion, country or area')['Code'].map(_trans).to_dict() # - # there are some area doesn't have codes, set here name_id_map['Total'] = '900' name_id_map['Other South'] = 'other_south' name_id_map['Other North'] = 'other_north' dp_cols = ['Code', 'Year'] for c in origin_cols: dp_cols.append(c) pd.Series(dp_cols) # + dp = table1[dp_cols].copy() dp = dp.dropna(subset=['Code', 'Year'], how='any') dp['Code'] = dp['Code'].map(int) dp['Year'] = dp['Year'].map(int) dp = dp.set_index(['Code', 'Year']) # rename remaining columns to geo id dp.columns = dp.columns.map(name_id_map) # create new dimension dp = dp.stack() dp.index.names = ['destination', 'year', 'origin'] dp.name = 'migrant_stock' # - dp.head() # + # double check: if there are duplicated index dp.index.has_duplicates # + # seems something goes wrong. digging in... # - dp[dp.index.duplicated(keep=False)].sort_index() dp[dp.index.duplicated(keep=False)].reset_index()['destination'].unique() # + # so only country id 909 has duplication. let's see what 909 means # - country[country['Code'] == 909] table1[table1['Code'] == 909] # + # so Oceania appear twice, and data values are same for both. We can safely drop these duplicates. # + # create a function for data tables. def create_datapoints(table): origin_cols = table.columns[6:] for c in origin_cols: # double check: if the country/region in the geo domain if c not in country_df['name'].values: print(f'column {c} is not in geo domain!') dp_cols = ['Code', 'Year'] [dp_cols.append(x) for x in origin_cols] dp = table[dp_cols].copy() dp = dp.dropna(subset=['Code', 'Year'], how='any') dp['Code'] = dp['Code'].map(int) dp['Year'] = dp['Year'].map(int) dp = dp.set_index(['Code', 'Year']) dp.columns = dp.columns.map(name_id_map) dp = dp.stack().reset_index() # print(dp.columns) dp.columns = ['destination', 'year', 'origin', 'migrant_stock'] dp = dp.drop_duplicates() return dp[['destination', 'origin', 'year', 'migrant_stock']] # - # also function for emrgrant def create_emgire_datapoints(dp): dp_emg = dp[(dp.destination == 900)].copy() dp_emg = dp_emg[['origin', 'year', 'migrant_stock']] dp_emg.columns = ['geo', 'year', 'emigrant_stock'] return dp_emg # and immigrant def create_imgire_datapoints(dp): dp_img = dp[(dp.origin == '900')].copy() dp_img = dp_img[['destination', 'year', 'migrant_stock']] dp_img.columns = ['geo', 'year', 'immigrant_stock'] return dp_img df = create_datapoints(table1) df_emg = create_emgire_datapoints(df) df_img = create_imgire_datapoints(df) df.head() # check: if there are still duplicated index np.all(df.duplicated(subset=['destination', 'origin', 'year'])) df_emg.head() df_img.head() # + # save to files # df.to_csv('../../ddf--datapoints--migrant_stock--by--destination--origin--year.csv', index=False) # df_emg.to_csv('../../ddf--datapoints--emigrant_stock--by--geo--year.csv', index=False) # df_img.to_csv('../../ddf--datapoints--immigrant_stock--by--geo--year.csv', index=False) # - # + # Table 2 and Table 3 # We will create gender dimension for them and merge them together. df2 = create_datapoints(table2) df2_emg = create_emgire_datapoints(df2) df2_img = create_imgire_datapoints(df2) df2['gender'] = 'male' df2 = df2[['destination', 'origin', 'gender', 'year', 'migrant_stock']] df2_emg['gender'] = 'male' df2_emg = df2_emg[['geo', 'gender', 'year', 'emigrant_stock']] df2_img['gender'] = 'male' df2_img = df2_img[['geo', 'gender', 'year', 'immigrant_stock']] # + df3 = create_datapoints(table3) df3_emg = create_emgire_datapoints(df3) df3_img = create_imgire_datapoints(df3) df3['gender'] = 'female' df3 = df3[['destination', 'origin', 'gender', 'year', 'migrant_stock']] df3_emg['gender'] = 'female' df3_emg = df3_emg[['geo', 'gender', 'year', 'emigrant_stock']] df3_img['gender'] = 'female' df3_img = df3_img[['geo', 'gender', 'year', 'immigrant_stock']] # - df_sex = pd.concat([df2, df3], ignore_index=True) df_sex_emg = pd.concat([df2_emg, df3_emg], ignore_index=True) df_sex_img = pd.concat([df2_img, df3_img], ignore_index=True) # + # save to files # df_sex.to_csv('../../ddf--datapoints--migrant_stock--by--destination--origin--gender--year.csv', index=False) # df_sex_emg.to_csv('../../ddf--datapoints--emigrant_stock--by--geo--gender--year.csv', index=False) # df_sex_img.to_csv('../../ddf--datapoints--immigrant_stock--by--geo--gender--year.csv', index=False) # - # ## Concepts # # The source file doesn't come with the properties of concepts so we need to create it manually. from ddf_utils.model.ddf import Concept # + strings = [ ['name', 'Name'], ['domain', 'Domain'], ] measures = [ ['migration_stock', 'Migration Stock'], ['emigrant_stock', 'Emigrant Stock'], ['immigrant_stock', 'Immigrant Stock'] ] entity_domains = [ ['geo', 'Geo Location'], ['gender', 'Gender'] ] entity_sets_geo_ids = list(map(lambda x: x[4:], filter(lambda x: x.startswith('is--'), country_df.columns))) entity_sets_geo_names = list(map(lambda x: x.replace('_', ' ').title(), entity_sets_geo_ids)) entity_sets_geo = list(zip(entity_sets_geo_ids, entity_sets_geo_names)) # - roles = [ ['destination', 'Destination'], ['origin', 'Origin'] ] # + concepts = list() for e, n in strings: concepts.append(Concept(e, 'string', dict(name=n))) for e, n in measures: concepts.append(Concept(e, 'measure', dict(name=n))) for e, n in entity_domains: concepts.append(Concept(e, 'entity_domain', dict(name=n))) for e, n in entity_sets_geo: concepts.append(Concept(e, 'entity_set', dict(name=n, domain='geo'))) for e, n in roles: concepts.append(Concept(e, 'role', dict(name=n, domain='geo'))) # - concepts.append(Concept('year', 'time', dict(name='Time'))) cdf = pd.DataFrame.from_records([x.to_dict() for x in concepts]) # + # cdf.to_csv('../../ddf--concepts.csv', index=False) # - # + # create datapackage # run below ddf command in a terminal # !ddf create_datapackage --update ../../ # -
examples/etl/migrant.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="najlW3aor7Tb" outputId="85ebadd9-6a3a-4fc5-9e07-ea88743df1f1" # %cd /content/ # %ls # + id="Xa9c3g7Fs6RC" #if already cloned PyCLFRTryColab is there # %rm -rf PyCLFRTryColab/ # + id="P6humKkWy9JE" colab={"base_uri": "https://localhost:8080/"} outputId="2fe78f2d-14ba-4d9c-91a3-607f70016116" # %ls # + id="fFD71SR-mWsb" outputId="9185a1b8-da31-42be-bc2f-a4932f2f5b5e" colab={"base_uri": "https://localhost:8080/"} # !apt update # !apt install -y cmake # !pip install dlib # + id="WIT4eQH4ho8l" # !git clone https://github.com/indrajitkurmi/PyCLFRTryColab # + id="6nMmfdfMjwSN" # %ls # + id="WDTTNy4LzxTb" # %cd PyCLFRTryColab/glesLFR/ # %mkdir lib # %ls # + id="6fCY7D33tQCr" # %cd lib/ # %mkdir BuildLib # %ls # + id="8JRGPo3A0GEM" # %cd /content/PyCLFRTryColab/glesLFR/lib/BuildLib # + id="OaJfmdCCnkZZ" # !git clone https://github.com/assimp/assimp # %ls # + id="NFk2lb3itYlx" # %ls # %cd assimp # + id="A0rJderZtzpa" # %ls # + id="f4xCQF5uuU4A" # %mkdir build # + id="NcsgGtWouYP4" # %ls # + id="lM4Eqom1umCn" # %cd build/ # + id="kvwimFwTupI_" # !cmake .. -G 'Unix Makefiles' # + id="_e3J26aItgth" # !make -j4 # + id="G2_qbzWjxTRI" # %ls # + id="bAg3MzM7xW2F" # %cd bin/ # + id="A8CSc9j9xaCW" # %ls # + id="HGfkImGdyHkL" # %mv -v /content/PyCLFRTryColab/glesLFR/lib/BuildLib/assimp/build/bin/* /content/PyCLFRTryColab/glesLFR/lib/ # + id="64js_w2B2DEx" # %cd /content/PyCLFRTryColab/glesLFR/lib/ # %ls # + id="GtckhOc8nnOw" outputId="f9451d7d-a318-4626-8e5e-36c43a26da11" colab={"base_uri": "https://localhost:8080/"} # %cd /content/PyCLFRTryColab/glesLFR/lib/BuildLib/ # + id="k9INKWcD6udF" outputId="f844fcb4-416a-4d78-8062-0d6850f62243" colab={"base_uri": "https://localhost:8080/"} # %ls # + id="tcr0H5Wa6wdD" # %rm -rf GLFW-CMake-starter/ # + id="jRj3n7f8607z" outputId="9c66e6a6-9e22-40c5-ae5a-40efd24281a1" colab={"base_uri": "https://localhost:8080/"} # %ls # + id="zLPVjrXy1xMK" outputId="3bdd4222-1f37-43c1-86e3-e1fd09ce3dae" colab={"base_uri": "https://localhost:8080/"} # !git clone https://github.com/glfw/glfw # %ls # + id="akDQHl2w2ZaY" outputId="dd6a383b-4411-407b-ba38-7b4197a57f64" colab={"base_uri": "https://localhost:8080/"} # %cd /content/PyCLFRTryColab/glesLFR/lib/BuildLib/glfw/ # %ls # + id="07MtMosD3ZYn" outputId="b6898709-3eb2-439c-fd92-4c01ffcab5c9" colab={"base_uri": "https://localhost:8080/"} # !apt-get install -y xorg-dev # + id="9V_PtSj68Tnz" outputId="151dca5f-0b5a-4f20-fd5b-96e2ea702ea3" colab={"base_uri": "https://localhost:8080/"} # !apt-get install -y pkg-config # + id="QgGWcvGd35t4" outputId="80ec52ae-0a20-4bf0-8d7e-37a04cfc8b55" colab={"base_uri": "https://localhost:8080/"} # %mkdir build # %ls # + id="aRbzw4VO4AHG" outputId="59ee718b-4554-4a04-b4ad-7f2b94d4f394" colab={"base_uri": "https://localhost:8080/"} # %cd build/ # + id="z6Yxiaow4F8G" outputId="81657aed-5dbb-4af5-9001-b343ddd20c70" colab={"base_uri": "https://localhost:8080/"} # !cmake .. # + id="GHyO36wZ9wuB" outputId="d10758a4-bf23-492e-c0fd-8ad0ba2b1ca7" colab={"base_uri": "https://localhost:8080/"} # %ls # + id="7JbXa6gn15JB" outputId="4b869833-debb-496e-b20d-af46b716db86" colab={"base_uri": "https://localhost:8080/"} # !make all -j4 # + id="Mb7qzaXh4akf" outputId="63dca804-87d2-453a-c61b-a15e5df9de92" colab={"base_uri": "https://localhost:8080/"} # %ls # + id="OU6DZoFb4ppO" outputId="3287afe8-32d7-426b-fce7-d232dbd9165a" colab={"base_uri": "https://localhost:8080/"} # %cd src/ # %ls # + id="WrYPnHsU-oMW" outputId="a6b2ac9d-102b-4337-c169-ed296756836f" colab={"base_uri": "https://localhost:8080/"} # !cmake .. -DBUILD_SHARED_LIBS=ON # + id="d2EvpCxp-AJp" outputId="b745fff4-5517-4084-fb80-c50703409d0c" colab={"base_uri": "https://localhost:8080/"} # !make # + id="zc_79ZpG-ERQ" outputId="24bfc45a-5515-4107-d4d1-145c87364e2d" colab={"base_uri": "https://localhost:8080/"} # %ls # + id="sTRQGMIL_S0H" outputId="83a1022e-c1ee-4a38-ecc0-9ddced10ffb5" colab={"base_uri": "https://localhost:8080/"} # %mv -v /content/PyCLFRTryColab/glesLFR/lib/BuildLib/glfw/build/src/* /content/PyCLFRTryColab/glesLFR/lib/ # + id="Z2Ov6p6M_dH3" outputId="c07286f2-7205-4696-f9db-5c86e677617c" colab={"base_uri": "https://localhost:8080/"} # %cd /content/PyCLFRTryColab/glesLFR/src/ # %ls # + id="fQ5423YpiqyI" outputId="f03471aa-56f4-48ca-995f-4e5de0aae6ea" colab={"base_uri": "https://localhost:8080/"} # !python TestRendererBind_setup_Indrajit.py build_ext --inplace # + id="cpdXal2xcYAJ" print('ProgramStarted') import glesLFR_Indrajit import json import numpy as np import os import cv2 print('glesLFR Import') # + id="2aa_N3B7cYAK" def ReadJsonPosesFiles(PyClassObject,PosesFilePath): with open(PosesFilePath) as PoseFile: PoseFileData = json.load(PoseFile) NoofPoses = len(PoseFileData['images']) PoseFileImagesData = PoseFileData['images'] if len(PoseFileData['images']) > 0: PyClassObject.Py_allocate(len(PoseFileData['images'])) for i in range (0,len(PoseFileData['images'])): ImageName = PoseFileImagesData[i]['imagefile'] #Convert List of List to Np array PoseMatrix = PoseFileImagesData[i]['M3x4'] PoseMatrixNumpyArray = np.array([], dtype=np.double) for k in range(0,len(PoseMatrix)): PoseMatrixNumpyArray = np.append(PoseMatrixNumpyArray, np.asarray(PoseMatrix[k],dtype=np.double)) PyClassObject.namespushback(ImageName) PyClassObject.posespushback(np.array(PoseMatrixNumpyArray)) return NoofPoses def RenderInitialize(PyClassObject,PosesFilePath,ImageLocation,ObjModelPath,ObjModelImagePath): PyClassObject.RenderInitializeP1() NofPoses = ReadJsonPosesFiles(PyClassObject, PosesFilePath) print(NofPoses) PyClassObject.Py_LoadDemModel(ObjModelPath,ObjModelImagePath) for i in range (0,NofPoses): ImageName = PyClassObject.GetnamesIndex(i) print(ImageName) LoadImageName = ImageName.replace('.tiff','.png') Image = cv2.imread(os.path.join(ImageLocation,LoadImageName),0) # height, width, number of channels in image height = Image.shape[0] width = Image.shape[1] if len(Image.shape) == 2 : nrComponents = 1 else : nrComponents = Image.shape[2] PyClassObject.LoadImageToC_8bit_1ch(Image) PyClassObject.Py_GenrateTextureID() PyClassObject.Py_BindImageWithTextureID(i,height,width,nrComponents) print('Binding Texture Successful') PyClassObject.RenderInitializeP2() print('Inititalization Finished') # + id="jGqr75D7cYAM" PosesFilePath = '../data/T20200207F2/thermal_GPS_Corr.json' ImageLocation = '../data/T20200207F2/thermal_ldr512' ObjModelPath = '../data/T20200207F2/dem.obj' ObjModelImagePath = '../data/T20200207F2/dem.png' FocalLength = 50.815436217896945 PyLFClass = glesLFR_Indrajit.PyLightfieldClass(0) PyLFClass.Py_setCameraFocalLength(FocalLength) PyLFClass.Py_setProjectionMatrix() RenderInitialize(PyLFClass,PosesFilePath,ImageLocation,ObjModelPath,ObjModelImagePath) ImageReturned1 = PyLFClass.RenderImageOnce('RenderedImage1.png') cv2.imwrite('Image1InMainApp.png', ImageReturned1) ImageReturned2 = PyLFClass.RenderImageOnce('RenderedImage2.png') cv2.imwrite('Image2InMainApp.png', ImageReturned2) PyLFClass.TerminateRendererOnceFinished() #ReadJsonPosesFiles('../data/T20200207F2/thermal_GPS_Corr.json') print('PY LightFieldClass generated') #PyLFClass = glesLFR_Indrajit.PyLightfieldClass(0) #glesLFR_Indrajit.Py_PrintPyLightFieldInstanceInfo(PyLFClass) #glesLFR_Indrajit.Py_Initiaterender() #PyLFClass.RenderImageOnce() #glesLFR_Indrajit.Py_Completerender() #glesLFR_Indrajit.Py_Initiaterender() del(PyLFClass)
glesLFR/src/TryPyCLFR_Colab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Time Series Analysis # Time series is a collection of data points collected at constant time intervals, such as the tempeture of london city centre at 1pm everyday or the closing value of a stock. These are analysed to determine the long term trend so as to forecast the future. # In the cell below is the import that will be required in the following tutorial import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from matplotlib.pylab import rcParams rcParams['figure.figsize'] = 15, 6 from statsmodels.tsa.stattools import adfuller from statsmodels.tsa.arima_model import ARIMA from datetime import datetime # We will now read in the Airpassengers csv that has been provided in the folder and examine the head of the dataframe. data = pd.read_csv('AirPassengers.csv') print data.head() print data.dtypes # We can see in the dataframe above that we have two columns, one represents the month of passengers flying and the second represents how many passengers flew during that month. When we look at the data type of the month column we see that it is being read in as an object. To read the month column as a time series we have to pass parameters that will fromat the colum into a datetime datatype dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m') data = pd.read_csv('AirPassengers.csv', parse_dates=['Month'], index_col='Month',date_parser=dateparse) print data.head() # The parameters above have been broken down here and purpose explained. # 1. parse_dates: This specifies the column which contains the date-time information. As we say above, the column name is ‘Month’. # # 2. index_col: A key idea behind using Pandas for TS data is that the index has to be the variable depicting date-time information. So this argument tells pandas to use the ‘Month’ column as index. # # 3. date_parser: This specifies a function which converts an input string into datetime variable. Be default Pandas reads data in format ‘YYYY-MM-DD HH:MM:SS’. If the data is not in this format, the format has to be manually defined. Something similar to the dataparse function defined here can be used for this purpose. # We will then convert the dataframe into a Series object to make it easier for us to index. This is simply making into a one dimensional array instead of the 2D array we had with the dataframe ts = data["#Passengers"] ts.head(10) # To get the value in the series object, this can be done in two ways: One by using the string constant of the index and the second method is to import the datetime function from the datetime library. # + #subset by string constatnt of the index ts['1949-01-01'] #using the datetime function from datetime import datetime ts[datetime(1949,1,1)] # + #1. Specify the entire range: ts['1949-01-01':'1949-05-01'] #2. Use ':' if one of the indices is at ends: ts[:'1949-05-01'] # - # ## Stationarity of a Time Series # A time series has stationarity if a shift in time doesn’t cause a change in the shape of the distribution. Basic properties of the distribution like the mean , variance and covariance are constant over time. It is important as most models make the assumption that the time seies is stationary. # The mean of the series should not be a function of time rather should be a constant. The image below has the left hand graph satisfying the condition whereas the graph in red has a time dependent mean. # ![title](Mean_nonstationary.png) # The variance of the series should not a be a function of time. Following graph depicts what is and what is not a stationary series. # ![title](Var_nonstationary.png) # The covariance of the i th term and the (i + m) th term should not be a function of time. In the following graph, you will notice the spread becomes closer as the time increases. Hence, the covariance is not constant with time for the ‘red series’. # ![title](Cov_nonstationary.png) # ## Testing Stationarity # The first step in seeing whether are data is stationary is to visualize the data, since we had previously turned the datframe into a series this is very easy to do and we can simply plot the series. plt.plot(ts) # From the graph above it is clear that there is an increasing trend, however in other datasets this may not be so clear to infer from the graph. We look at more formal methods of looking at testing stationarity which include: # Plotting Rolling Statistic: we can plot the moving average or variance and see if it varies with time # Dickey-Fuller Test: This is one of the statistical test of stationary. The results are composed of Test statistic # and a critical value. If test statistic is less than critical value we can say that the time # is stationary. from statsmodels.tsa.stattools import adfuller def test_stationarity(timeseries): #Determing rolling statistics rolmean = timeseries.rolling(window=12).mean() rolstd = timeseries.rolling(window=12).std() #Plot rolling statistics: orig = plt.plot(timeseries, color='blue',label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std') plt.legend(loc='best') plt.title('Rolling Mean & Standard Deviation') plt.show(block=False) #Perform Dickey-Fuller test: print 'Results of Dickey-Fuller Test:' dftest = adfuller(timeseries, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used']) for key,value in dftest[4].items(): dfoutput['Critical Value (%s)'%key] = value print dfoutput test_stationarity(ts) # ## Making the Data Stationary # In most real world situations the data is unlikely to be stationary from the outset, however there have been techniques to wrangle that data to be close to stationary. Factors that make a time series non-stationary are trend and seasonality. # Trend: Varying mean over time. The price of Freddos increasing over the previous years # Seasonality: A spike in retail close to holiday times such as christmas. # To try and eliminate trend we will use transformation functions on the data the one that we will try first is a log transformation as it will penalise higher values. ts_log = np.log(ts) plt.plot(ts_log) # ### Moving Average # In this approach, we take average of ‘k’ consecutive values depending on the frequency of time series. Here we can take the average over the past 1 year, i.e. last 12 values. Pandas has specific functions defined for determining rolling statistics. moving_avg = ts_log.rolling(window=12).mean() plt.plot(ts_log) plt.plot(moving_avg, color='red') # The red line shows the rolling mean. Lets subtract this from the original series. Note that since we are taking average of last 12 values, rolling mean is not defined for first 11 values. This can be observed as: ts_log_moving_avg_diff = ts_log - moving_avg ts_log_moving_avg_diff.head(12) # The first 11 values can be dropped and then we will check the stationarity ts_log_moving_avg_diff.dropna(inplace=True) test_stationarity(ts_log_moving_avg_diff) # This looks like a much better series. The rolling values appear to be varying slightly but there is no specific trend. Also, the test statistic is smaller than the 5% critical values so we can say with 95% confidence that this is a stationary series. # ## Differencing # To reduce the seasonality,in this approach we take the differnce of an observation at a paticular instant with the instant before it(t - (t-1)). ts_log_diff = ts_log - ts_log.shift() plt.plot(ts_log_diff) # We will now check the stationairty of the Residuals, which is again what is left after trend and sesonality have been modelled seperately ts_log_diff.dropna(inplace=True) test_stationarity(ts_log_diff) # We can see that the mean and std variations have small variations with time. Also, the Dickey-Fuller test statistic is less than the 10% critical value, thus the TS is stationary with 90% confidence # ## Forecasting a Time Series # We will be using an ARIMA model, which takes the parameters: timeseries, p,d and q, these are explained in the theory notebook as well as an explanation of what an ARIMA model is. To find the parameters p and q we perform the following methods: Autocorrelation function and a Partial Autocorrelation Function. #ACF and PACF plots: from statsmodels.tsa.stattools import acf, pacf lag_acf = acf(ts_log_diff, nlags=20) lag_pacf = pacf(ts_log_diff, nlags=20, method='ols') # + #Plot ACF: plt.subplot(121) plt.plot(lag_acf) plt.axhline(y=0,linestyle='--',color='gray') plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.title('Autocorrelation Function') #Plot PACF: plt.subplot(122) plt.plot(lag_pacf) plt.axhline(y=0,linestyle='--',color='gray') plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.title('Partial Autocorrelation Function') plt.tight_layout() # - # The dotted lines on the graph represent the confidence interval, these are used to determine P and Q. # # q- We get from the Autocorrelation Function graph where the line crosses the upper confidence interval for the first time which in this case is 2. # # p- We get from the Partial Autocorrelation Function graph where it crosses the upper confidence interval for the first time which is also 2. # ### Model # + from statsmodels.tsa.arima_model import ARIMA model = ARIMA(ts_log, order=(2, 1, 2)) results_ARIMA = model.fit(disp=-1) plt.plot(ts_log_diff) plt.plot(results_ARIMA.fittedvalues, color='red') plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-ts_log_diff)**2)) # - # Now that we have predicted results we will have to rescale them back to the original scale to compare to the original timer series, as we previously transformed them using the logarithim function. predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True) print predictions_ARIMA_diff.head() # If you notice the value of 1949-01-01 is missing this is because we took lag of one.The way to convert the differencing to log scale is to add these differences consecutively to the base number. An easy way to do it is to first determine the cumulative sum at index and then add it to the base number. predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum() print predictions_ARIMA_diff_cumsum.head() # Here the first element is base number itself and from thereon the values cumulatively added. Last step is to take the exponent and compare with the original series. predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index) predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0) predictions_ARIMA_log.head() # We will now plot our predictions against the original time series in its original scale. predictions_ARIMA = np.exp(predictions_ARIMA_log) plt.plot(ts) plt.plot(predictions_ARIMA) plt.title('RMSE: %.4f'% np.sqrt(sum((predictions_ARIMA-ts)**2)/len(ts)))
Time_Series_Analysis/Time_Series_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pygplates import numpy as np import matplotlib.pyplot as plt ######### # %matplotlib inline rotation_model = pygplates.RotationModel('Data/PALEOMAP_PlateModel.rot') coastlines = pygplates.FeatureCollection('Data/PALEOMAP_coastlines.gpmlz') cs = [] for coastline in coastlines: if coastline.get_reconstruction_plate_id()==101: for geometry in coastline.get_all_geometries(): if geometry.get_area()>0.01: cs.append(coastline) csf = pygplates.FeatureCollection(cs) plt.figure(figsize=(16,4)) offset = 0 offset_factor = 60 time_list = np.arange(500,-1,-50) for time in time_list: rcsf = [] pygplates.reconstruct(csf,rotation_model,rcsf,float(time)) for rcs in rcsf: #for geometry in rcs.get_reconstructed_geometry(): mean_lon = rcs.get_reconstructed_geometry().get_boundary_centroid().to_lat_lon_point().get_longitude() plt.plot(rcs.get_reconstructed_geometry().to_lat_lon_array()[:,1]-mean_lon+offset, rcs.get_reconstructed_geometry().to_lat_lon_array()[:,0],'r') offset += offset_factor plt.xticks(np.arange(0,len(time_list),1)*offset_factor) plt.gca().set_xticklabels(time_list) plt.gca().set_aspect('equal') plt.xlabel('Age [Ma]') plt.grid() plt.show() # + import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cfeature import cartopy.io.shapereader as shpreader import numpy as np # %matplotlib inline paleolithology_features = pygplates.FeatureCollection('Data/boucot_paleolithology_combined.shp') def get_paleolithology(point_features,CODE,age_min=-1.,age_max=4000): pX = [] pY = [] pAge = [] for point in point_features: lithcode = point.get_shapefile_attribute('LithCode') #print point.get_valid_time() if lithcode==CODE and point.get_reconstruction_plate_id()!=0 \ and point.get_valid_time()[0]<=age_max and point.get_valid_time()[1]>=age_min: BirthTime = np.mean(point.get_valid_time()) pAge.append(BirthTime) point_rotation = rotation_model.get_rotation(BirthTime, point.get_reconstruction_plate_id(), anchor_plate_id=0) reconstructed_point = point_rotation * point.get_geometry() pX.append(reconstructed_point.to_lat_lon()[1]) pY.append(reconstructed_point.to_lat_lon()[0]) return pX,pY,pAge pX,pY,pAge = get_paleolithology(paleolithology_features,'E') fig = plt.figure(dpi=100) ax_map = fig.add_subplot(projection=ccrs.Mollweide(central_longitude=0)) ax_map.coastlines(resolution='10m', color='grey', linewidth=0.25) I1 = ax_map.scatter(pX,pY, marker='o', c=np.asarray(pAge), s=40+(5000/len(pX)), transform=ccrs.PlateCarree(), edgecolor='none', alpha=0.5, vmin=0, vmax=500, cmap=plt.cm.gnuplot) ax_map.set_global() plt.title('Evaporites') plt.figure() plt.plot(pAge,np.abs(pY),'o',alpha=0.1) plt.show() # + min_time = 0 max_time = 500 cmap = plt.cm.gnuplot_r code = 'E' dat = [] BinSize = 5 for age in np.arange(0,501,50): pX,pY,pAge = get_paleolithology(paleolithology_features,code,age,age+50) res = np.histogram2d(pX,pY,bins=(np.arange(-180,180.1,BinSize),np.arange(-90,90.1,BinSize))) dat.append(np.nansum(res[0]/res[0],axis=0)) plt.stackplot(res[2][:-1]+BinSize/2,dat) plt.xlim(90,-90) plt.show() # -
notebooks/time-latitude-plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/celsomax/aulas-html-codigos/blob/master/matpltlib.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="GJ9lGI35AS9Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f39b62e0-d236-4b2d-cea0-f95c5291fc6d" print('Ola mundo') # + id="eGmIl8O7BxgC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8aac6efd-cc7c-4266-b6c2-c10dd00e7ddf" print('Ola mundo 02') # + [markdown] id="xj3xU8NfBwXj" colab_type="text" # # + id="aSRpK-oCAoGw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="75416e58-1607-4055-f513-b1f55c1ee5a1" import matplotlib.pyplot as plt x = [1, 2] y = [2, 3] plt.title("Primeiro gráfico matplotlib - TITLE") plt.xlabel('Eixo X') plt.ylabel('Eixo Y') plt.plot(x, y) plt.show()
matpltlib.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="q8vHMOtbxH4y" # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="txNTHnugxKGe" # # Colab demonstration of KIP and Label Solve algorithms # + [markdown] id="nHXSlyfOxRiL" # This colab implements the KIP (Kernel Inducing Point) and Label Solve algorithms introduced in the paper [Dataset Metalearning from Kernel Ridge-Regression](https://arxiv.org/abs/2011.00050). These algorithms allow for dataset distillation, which produces a small subset of learned images and/or labels which when trained upon perform as well as a much larger dataset for image classification classification tasks. # + [markdown] id="ETZ1YWxfn_q2" # # Imports # + id="eAsNM11umxLz" import functools from jax.experimental import optimizers import jax import jax.config from jax.config import config as jax_config jax_config.update('jax_enable_x64', True) # for numerical stability, can disable if not an issue from jax import numpy as jnp from jax import scipy as sp import numpy as np import tensorflow_datasets as tfds import matplotlib.pyplot as plt # %matplotlib inline # + id="8RRu_vG2nbjF" # !pip install -q git+https://www.github.com/google/neural-tangents # + id="6wgJY0hqncuM" import neural_tangents as nt from neural_tangents import stax # + [markdown] id="L5MsuE5poiWH" # # Define Parameters # + id="lbyEeP8FxOKM" # architecture params ARCHITECTURE = 'FC' #@param ['FC', 'Conv', 'Myrtle']; choice of neural network architecture yielding the corresponding NTK DEPTH = 1#@param {'type': int}; depth of neural network WIDTH = 1024 #@param {'type': int}; width of finite width neural network; only used if parameterization is 'standard' PARAMETERIZATION = 'ntk' #@param ['ntk', 'standard']; whether to use standard or NTK parameterization, see https://arxiv.org/abs/2001.07301 # dataset DATASET = 'cifar10' #@param ['cifar10', 'mnist'] # training params LEARNING_RATE = 4e-2 #@param {'type': float}; SUPPORT_SIZE = 100 #@param {'type': int}; number of images to learn TARGET_BATCH_SIZE = 5000 #@param {'type': int}; number of target images to use in KRR for each step LEARN_LABELS = False #@param {'type': bool}; whether to optimize over support labels during training # + [markdown] id="J_MMTsKRoNyR" # # Load Data # + id="HgF2vzW1oPZF" def get_tfds_dataset(name): assert name in ['cifar10', 'mnist'] ds_train, ds_test = tfds.as_numpy( tfds.load( name, split=['train', 'test'], batch_size=-1, as_dataset_kwargs={'shuffle_files': False})) return ds_train['image'], ds_train['label'], ds_test['image'], ds_test['label'] def one_hot(x, num_classes, center=True, dtype=np.float32): assert len(x.shape) == 1 one_hot_vectors = np.array(x[:, None] == np.arange(num_classes), dtype) if center: one_hot_vectors = one_hot_vectors - 1. / num_classes return one_hot_vectors def get_normalization_data(arr): channel_means = np.mean(arr, axis=(0, 1, 2)) channel_stds = np.std(arr, axis=(0, 1, 2)) return channel_means, channel_stds def normalize(arr, mean, std): return (arr - mean) / std X_TRAIN_RAW, LABELS_TRAIN, X_TEST_RAW, LABELS_TEST = get_tfds_dataset(DATASET) channel_means, channel_stds = get_normalization_data(X_TRAIN_RAW) X_TRAIN, X_TEST = normalize(X_TRAIN_RAW, channel_means, channel_stds), normalize(X_TEST_RAW, channel_means, channel_stds) Y_TRAIN, Y_TEST = one_hot(LABELS_TRAIN, 10), one_hot(LABELS_TEST, 10) # + [markdown] id="w8yR-mNeoP4K" # # Define Kernel # + id="9sLlI8URoTGv" # define architectures def FullyConnectedNetwork( depth, width, W_std = np.sqrt(2), b_std = 0.1, num_classes = 10, parameterization = 'ntk', activation = 'relu'): """Returns neural_tangents.stax fully connected network.""" activation_fn = stax.Relu() dense = functools.partial( stax.Dense, W_std=W_std, b_std=b_std, parameterization=parameterization) layers = [stax.Flatten()] for _ in range(depth): layers += [dense(width), activation_fn] layers += [stax.Dense(num_classes, W_std=W_std, b_std=b_std)] return stax.serial(*layers) def FullyConvolutionalNetwork( depth, width, W_std = np.sqrt(2), b_std = 0.1, num_classes = 10, parameterization = 'ntk', activation = 'relu'): """Returns neural_tangents.stax fully convolutional network.""" activation_fn = stax.Relu() conv = functools.partial( stax.Conv, W_std=W_std, b_std=b_std, padding='SAME', parameterization=parameterization) for _ in range(depth): layers += [conv(width, (3,3)), activation_fn] layers += [stax.Flatten(), stax.Dense(num_classes, W_std=W_std, b_std=b_std)] return stax.serial(*layers) def MyrtleNetwork( depth, width, W_std = np.sqrt(2), b_std = 0.1, num_classes = 10, parameterization = 'ntk', activation = 'relu'): """Returns neural_tangents.stax Myrtle network.""" layer_factor = {5: [1, 1, 1], 7: [1, 2, 2], 10: [2, 3, 3]} if depth not in layer_factor.keys(): raise NotImplementedError( 'Myrtle network withd depth %d is not implemented!' % depth) activation_fn = stax.Relu() layers = [] conv = functools.partial( stax.Conv, W_std=W_std, b_std=b_std, padding='SAME', parameterization=parameterization) layers += [conv(width, (3, 3)), activation_fn] # generate blocks of convolutions followed by average pooling for each # layer of layer_factor except the last for block_depth in layer_factor[depth][:-1]: for _ in range(block_depth): layers += [conv(width, (3, 3)), activation_fn] layers += [stax.AvgPool((2, 2), strides=(2, 2))] # generate final blocks of convolution followed by global average pooling for _ in range(layer_factor[depth][-1]): layers += [conv(width, (3, 3)), activation_fn] layers += [stax.GlobalAvgPool()] layers += [ stax.Dense(num_classes, W_std, b_std, parameterization=parameterization) ] return stax.serial(*layers) def get_kernel_fn(architecture, depth, width, parameterization): if architecture == 'FC': return FullyConnectedNetwork(depth=depth, width=width, parameterization=parameterization) elif architecture == 'Conv': return FullyConvolutionalNetwork(depth=depth, width=width, parameterization=parameterization) elif architecture == 'Myrtle': return MyrtleNetwork(depth=depth, width=width, parameterization=parameterization) else: raise NotImplementedError(f'Unrecognized architecture {architecture}') # + id="xvlwOxjC8I_9" _, _, kernel_fn = get_kernel_fn(ARCHITECTURE, DEPTH, WIDTH, PARAMETERIZATION) KERNEL_FN = functools.partial(kernel_fn, get='ntk') # + [markdown] id="hEOv9lzuoTv8" # # Run KIP # + id="XHfPTAAmrBIR" def class_balanced_sample(sample_size: int, labels: np.ndarray, *arrays: np.ndarray, **kwargs: int): """Get random sample_size unique items consistently from equal length arrays. The items are class_balanced with respect to labels. Args: sample_size: Number of elements to get from each array from arrays. Must be divisible by the number of unique classes labels: 1D array enumerating class label of items *arrays: arrays to sample from; all have same length as labels **kwargs: pass in a seed to set random seed Returns: A tuple of indices sampled and the corresponding sliced labels and arrays """ if labels.ndim != 1: raise ValueError(f'Labels should be one-dimensional, got shape {labels.shape}') n = len(labels) if not all([n == len(arr) for arr in arrays[1:]]): raise ValueError(f'All arrays to be subsampled should have the same length. Got lengths {[len(arr) for arr in arrays]}') classes = np.unique(labels) n_classes = len(classes) n_per_class, remainder = divmod(sample_size, n_classes) if remainder != 0: raise ValueError( f'Number of classes {n_classes} in labels must divide sample size {sample_size}.' ) if kwargs.get('seed') is not None: np.random.seed(kwargs['seed']) inds = np.concatenate([ np.random.choice(np.where(labels == c)[0], n_per_class, replace=False) for c in classes ]) return (inds, labels[inds].copy()) + tuple( [arr[inds].copy() for arr in arrays]) def make_loss_acc_fn(kernel_fn): @jax.jit def loss_acc_fn(x_support, y_support, x_target, y_target, reg=1e-6): y_support = jax.lax.cond(LEARN_LABELS, lambda y: y, jax.lax.stop_gradient, y_support) k_ss = kernel_fn(x_support, x_support) k_ts = kernel_fn(x_target, x_support) k_ss_reg = (k_ss + jnp.abs(reg) * jnp.trace(k_ss) * jnp.eye(k_ss.shape[0]) / k_ss.shape[0]) pred = jnp.dot(k_ts, sp.linalg.solve(k_ss_reg, y_support, sym_pos=True)) mse_loss = 0.5*jnp.mean((pred - y_target) ** 2) acc = jnp.mean(jnp.argmax(pred, axis=1) == jnp.argmax(y_target, axis=1)) return mse_loss, (mse_loss, acc) return loss_acc_fn def get_update_functions(init_params, kernel_fn, lr): opt_init, opt_update, get_params = optimizers.adam(lr) opt_state = opt_init(init_params) loss_acc_fn = make_loss_acc_fn(kernel_fn) grad_loss = jax.grad(lambda params, x_target, y_target: loss_acc_fn(params['x'], params['y'], x_target, y_target), has_aux=True) @jax.jit def update_fn(step, opt_state, params, x_target, y_target): dparams, aux = grad_loss(params, x_target, y_target) return opt_update(step, dparams, opt_state), aux return opt_state, get_params, update_fn def train(num_train_steps, log_freq=20, seed=1): _, labels_init, x_init_raw, y_init = class_balanced_sample(SUPPORT_SIZE, LABELS_TRAIN, X_TRAIN_RAW, Y_TRAIN, seed=seed) x_init = normalize(x_init_raw, channel_means, channel_stds) params_init = {'x': x_init, 'y': y_init} params_init_raw = {'x': x_init_raw, 'y': labels_init} opt_state, get_params, update_fn = get_update_functions(params_init, KERNEL_FN, LEARNING_RATE) params = get_params(opt_state) loss_acc_fn = make_loss_acc_fn(KERNEL_FN) _, (test_loss, test_acc) = loss_acc_fn(params['x'], params['y'], X_TEST, Y_TEST) # compute in batches for expensive kernels print('initial test loss:', test_loss) print('initial test acc:', test_acc) for i in range(1,num_train_steps+1): # full batch gradient descent _, _, x_target_batch, y_target_batch = class_balanced_sample(TARGET_BATCH_SIZE, LABELS_TRAIN, X_TRAIN, Y_TRAIN) opt_state, aux = update_fn(i, opt_state, params, x_target_batch, y_target_batch) train_loss, train_acc = aux params = get_params(opt_state) if i % log_freq == 0: print(f'----step {i}:') print('train loss:', train_loss) print('train acc:', train_acc) _, (test_loss, test_acc) = loss_acc_fn(params['x'], params['y'], X_TEST, Y_TEST) # compute in batches for expensive kernels print('test loss:', test_loss) print('test acc:', test_acc) return params, params_init, params_init_raw # + [markdown] id="T6AufNvmf5Y6" # ## Run KIP to learn SUPPORT_SIZE=100 number of images using FC1 kernel. Algorithm converges rapidly (less than 300 training steps needed in below run). # + id="GGH8WKdi8CZY" executionInfo={"status": "ok", "timestamp": 1613093073466, "user_tz": 480, "elapsed": 30490, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2RHg9CttdBvSLZKiiB1FAFpomEOdzYsOolaq=s64", "userId": "09280242061829821954"}} outputId="ec1b2a77-75aa-4973-8913-781c5c20666e" params_final, params_init, params_init_raw = train(300) # + colab={"height": 661} id="vtqrqHoYRDpE" executionInfo={"status": "ok", "timestamp": 1613093077454, "user_tz": 480, "elapsed": 3978, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2RHg9CttdBvSLZKiiB1FAFpomEOdzYsOolaq=s64", "userId": "09280242061829821954"}} outputId="78d8ff5c-d399-45a0-d834-bb1d70f54560" _, _, sample_raw, sample_init, sample_final = class_balanced_sample(10, params_init_raw['y'], params_init_raw['x'], params_init['x'], params_final['x'], seed=3) class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] fig = plt.figure(figsize=(33,10)) fig.suptitle('Image comparison.\n\nRow 1: Original uint8. Row2: Original normalized. Row 3: KIP learned images.', fontsize=16, y=1.02) for i, img in enumerate(sample_raw): ax = plt.subplot(3, 10, i+1) ax.set_title(class_names[i]) plt.imshow(np.squeeze(img)) for i, img in enumerate(sample_init, 1): plt.subplot(3, 10, 10+i) plt.imshow(np.squeeze(img)) for i, img in enumerate(sample_final, 1): plt.subplot(3, 10, 20+i) plt.imshow(np.squeeze(img)) # + [markdown] id="7mu-uhruoWRQ" # # Run Label Solve # + id="0P47zfIGoV3I" def make_label_solve_fn(kernel_fn): @jax.jit def label_solve(x_support, x_target, y_target, reg=1e-6): """Formula for label solve valid when |x_support| <= |x_target|. A regularized version of the pseudo-inverse is used for numerical stability.""" kss = kernel_fn(x_support, x_support) kst = kernel_fn(x_support, x_target) matrix = jnp.dot(kst, kst.T) matrix_reg = matrix + reg * jnp.eye(matrix.shape[0])/matrix.shape[0] reg_pinv = sp.linalg.solve(matrix_reg, jnp.dot(kst, y_target), sym_pos=True) return jnp.dot(kss + reg* jnp.eye(kss.shape[0])/kss.shape[0], reg_pinv) return label_solve # + [markdown] id="Wy82CFxFeQXx" # ## Label solve using 500 cifar10 images using FC1 kernel # + id="TNw44mmjcjrh" executionInfo={"status": "ok", "timestamp": 1613093081282, "user_tz": 480, "elapsed": 3435, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2RHg9CttdBvSLZKiiB1FAFpomEOdzYsOolaq=s64", "userId": "09280242061829821954"}} outputId="8b7aed7b-680e-4bce-fb63-b5ebd68ae068" _, _, x_support, y_support = class_balanced_sample(500, LABELS_TRAIN, X_TRAIN, Y_TRAIN, seed=2021) solved_labels = make_label_solve_fn(KERNEL_FN)(x_support, X_TEST, Y_TEST) loss_acc_fn = make_loss_acc_fn(KERNEL_FN) _, (loss_orig, acc_orig) = loss_acc_fn(x_support, y_support, X_TEST, Y_TEST) _, (loss_solved, acc_solved) = loss_acc_fn(x_support, solved_labels, X_TEST, Y_TEST) print(f'Original test loss: {loss_orig}, test acc: {acc_orig}') print(f'Label solved test loss: {loss_solved}, test acc: {acc_solved}') # + colab={"height": 264} id="lBUgEqBwW-b4" executionInfo={"status": "ok", "timestamp": 1613093081806, "user_tz": 480, "elapsed": 516, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiC2RHg9CttdBvSLZKiiB1FAFpomEOdzYsOolaq=s64", "userId": "09280242061829821954"}} outputId="90190f3e-e1ba-4c33-cd75-96eb57a4c5e1" from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable fig, ax = plt.subplots(figsize=(3, 3)) im = ax.imshow(np.dot(y_support.T, solved_labels)/solved_labels.shape[0], vmin=-0.01, vmax=0.01) ax.set_xticks(range(10)) ax.set_yticks(range(10)) ax.set_xticklabels(class_names, rotation = 90, ha="right") ax.set_yticklabels(class_names, rotation = 0, ha="right") ax_divider = make_axes_locatable(ax) # Add an axes to the right of the main axes. cax = ax_divider.append_axes("right", size="7%", pad="2%") fig.colorbar(im, cax=cax) ax.set_title('FC1 LS 500 support') plt.show() # + id="36en264qdzjB"
kip/KIP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pt1.9 # language: python # name: pt1.9 # --- import pandas as pd import numpy as np from collections import defaultdict, Counter from scipy.stats.mstats import gmean hvg = pd.read_csv('PBMC3k_HVG_seurat_250.csv', index_col=0) clusters = pd.read_csv('seurat_clusters.csv', index_col=0) hvg clusters cell_to_cluster = clusters.to_dict()['Cluster'] cell_types = set(list(cell_to_cluster.values())) cell_type_counter = defaultdict(int) new_cell_clusters = [] for cell_cluster in cell_to_cluster.values(): cnt = cell_type_counter[cell_cluster] cell_type_counter[cell_cluster] += 1 new_cell_clusters.append(cell_cluster + ' ' + str(cnt)) cell_to_cluster_new = dict(zip(cell_to_cluster.keys(), new_cell_clusters)) new_columns = [cell_to_cluster_new[col] for col in hvg.columns] hvg.columns = new_columns with open('PBMC3k_HVG_250_KNN_k20.txt', 'r') as f: edgelist = f.readlines() edgelist = [e.rstrip() for e in edgelist] edgelist = [(int(e.split()[0]), int(e.split()[1])) for e in edgelist] edgelist_with_names = [] for e in edgelist: n1 = hvg.iloc[:, e[0]].name n2 = hvg.iloc[:, e[1]].name edgelist_with_names.append((n1, n2)) platelet_edges = [] for en in edgelist_with_names: n1 = en[0] n2 = en[1] if 'Platelet' in n1 or 'Platelet' in n2: platelet_edges.append(en) len(platelet_edges) sorted_plt = set([tuple(sorted(i)) for i in platelet_edges]) len(edgelist_with_names) sorted_edgelist = set([tuple(sorted(i)) for i in edgelist_with_names]) all_names = [en[0] for en in edgelist_with_names] all_names = all_names + [en[1] for en in edgelist_with_names] all_names = sorted(list(set(all_names))) first_nodes = np.array([en[0] for en in edgelist_with_names]) second_nodes = np.array([en[1] for en in edgelist_with_names]) edgelist_with_names[:10] cell_to_connections = {} for cell in all_names: idxs_0 = np.where(first_nodes == cell) connections_0 = second_nodes[idxs_0] idxs_1 = np.where(second_nodes == cell) connections_1 = first_nodes[idxs_1] connections = np.unique(np.concatenate((connections_0, connections_1))) cell_to_connections[cell] = list(np.setdiff1d(connections, np.array([cell]))) cell_to_connections['B 0'] # + count_cell_types_per_cell = {} for cell, connections in cell_to_connections.items(): connections_only_type = [' '.join(c.split()[:-1]) for c in connections] count_cell_types_per_cell[cell] = Counter(connections_only_type) # - count_cell_types_per_cell['B 0'] # + frac_cell_types_per_cell = {} for cell, connectivities_count in count_cell_types_per_cell.items(): total = sum(dict(connectivities_count).values()) frac_dict = {} for conn_cell_type, conn_cell_count in dict(connectivities_count).items(): frac_dict[conn_cell_type] = conn_cell_count / total frac_cell_types_per_cell[cell] = frac_dict # - cell_gmeans = {} for cell, frac in frac_cell_types_per_cell.items(): cell_gmeans[cell] = gmean(list(dict(frac).values())) frac_cell_types_per_cell['T 238'] sorted_gmeans = {k: v for k, v in sorted(cell_gmeans.items(), key=lambda item: item[1])} filtered = {} for cell, cell_frac in frac_cell_types_per_cell.items(): if np.all(np.array(list(dict(cell_frac).values())) < 0.7): filtered[cell] = count_cell_types_per_cell[cell] filtered pd.DataFrame.from_dict(filtered, orient='index').to_csv('cell_knn_neighbours_counts_highest.csv') pd.DataFrame.from_dict(selected_dict, orient='index').to_csv('cell_knn_neighbours_fractions_highest.csv')
platelet_knn_analysis/platelet_knn_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # This notebook is part of the fixed length TPS example. It requires the file `alanine_dipeptide_tps_equil.nc`, which is written in the notebook `alanine_dipeptide_tps_first_traj.ipynb`. # # In this notebook, you will learn: # * how to set up a `FixedLengthTPSNetwork` # * how to extend a transition path to satisfy the fixed length TPS ensemble # * how to save specific objects to a file import openpathsampling as paths # ## Loading from storage # # First, we open the file we made in `alanine_dipeptide_tps_first_traj.ipynb` and load various things we need from that. old_storage = paths.Storage("tps_nc_files/alanine_dipeptide_tps_equil.nc", "r") engine = old_storage.engines[0] C_7eq = old_storage.volumes.find('C_7eq') alpha_R = old_storage.volumes.find('alpha_R') traj = old_storage.samplesets[len(old_storage.samplesets)-1][0].trajectory phi = old_storage.cvs.find('phi') psi = old_storage.cvs.find('psi') template = old_storage.snapshots[0] # ## Building a trajectory to suit the ensemble # # We're starting from a trajectory that makes the transition. However, we need that trajectory to be longer than it is. # # There's an important subtlety here: we can't just extend the trajectory in one direction until is satisfies our length requirement, because it is very possible that the final frame would be in the no-man's-land that isn't either state, and then it wouldn't satisfy the ensemble. (Additionally, without a shifting move, having the transition at the far edge of the trajectory time could be problematic.) # # So our approach here is to extend the trajectory in either direction by half the fixed length. That gives us a total trajectory length of the fixed length plus the length of the original trajectory. Within this trajectory, we try to find an subtrajectory that satisfies our ensemble. If we don't, then we add more frames to each side and try again. network = paths.FixedLengthTPSNetwork(C_7eq, alpha_R, length=400) trajectories = [] i=0 while len(trajectories) == 0 and i < 5: max_len = 200 + i*50 fwd_traj = engine.generate(traj[-1], [lambda traj, foo: len(traj) < max_len]) bkwd_traj = engine.generate(traj[0], [lambda traj, foo: len(traj) < max_len], direction=-1) new_traj = bkwd_traj[:-1] + traj + fwd_traj[1:] trajectories = network.sampling_ensembles[0].split(new_traj) print trajectories # raises an error if we still haven't found a suitable trajectory trajectory = trajectories[0] # ## Plot the trajectory # # This is exactly as done in `alanine_dipeptide_tps_first_traj.ipynb`. # Imports for plotting # %matplotlib inline import matplotlib.pyplot as plt plt.plot(phi(trajectory), psi(trajectory)) plt.plot(phi(traj), psi(traj)) # ## Save stuff # # When we do path sampling, the `PathSampling` object automatically handles saving for us. However, we can also save things explicitly. # # Saving works in two steps: first you mark an object as being something to save with `storage.save(object)`. But at this point, the object is not actually stored to disk. That only happens after `storage.sync()`. # save stuff storage = paths.Storage("tps_nc_files/alanine_dipeptide_fixed_tps_traj.nc", "w", old_storage.snapshots[0]) storage.save(engine) storage.save(C_7eq) storage.save(alpha_R) storage.save(phi) storage.save(psi) storage.save(trajectory) storage.sync()
examples/alanine_dipeptide_tps/AD_tps_1b_trajectory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Part 1 import numpy as np from calcule import Calcule # Number of consecutive numbers l = [4,2,3,3,0,89,89,89,10,4] Calcule.consecutive(l) f = set(l) f.update([1, 2, 3]) f # Sum of sequence Calcule.suma(15) # + # %%timeit Calcule.suma(n = 1e+9) # - # ### Part 2 from graf import Graf M = np.array([[1, 1, 0, 0, 1, 0], [1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 1, 1], [1, 1, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]]) G = Graf() G.set_adiacenta(M) # Get adjacency matrix of a graph G.get_adiacenta() # Get incidence matrix of a graph G.get_incidenta() # Get kirhoff matrix of a graph G.get_kirhoff() # Merge 2 graphs M_2 = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]) A = Graf() A.set_adiacenta(M) B = Graf() B.set_adiacenta(M_2) C = Graf.get_united_graph(A, B) C.get_adiacenta() # Try weighted graph M = np.array([[0, 2, 1, 5], [2, 0, 0, 0], [1, 0, 0, 6], [5, 0, 6, 0]]) G = Graf() G.set_adiacenta(M) G.get_adiacenta() G.get_incidenta() G.get_kirhoff() # Realization of Prim algorithm Prim = np.array([[ 0, 27, 0, 0, 0, 0, 21], [27, 0, 14, 13, 18, 0, 16], [ 0, 14, 0, 14, 0, 0, 0], [ 0, 13, 14, 0, 24, 0, 0], [ 0, 18, 0, 24, 0, 29, 0], [ 0, 0, 0, 0, 29, 0, 22], [21, 16, 0, 0, 0, 22, 0]]) P = Graf() P.set_adiacenta(Prim) P.get_arbore_partial_min() # Additional # suma3 is for 1,2,2,3,4,4,4,4,5,6,6,6,6,6 def s_even(n): return 2*n*(n + 1)*(2*n + 1)/3 def suma3(n: int) -> int: root = np.sqrt(n) if root.is_integer(): a = 2*root - 1 else: a = 2*np.floor(root) if a % 2 == 0: s = (a/2) ** 2 s += s_even(a/2 - 1) even = n - (a/2)*(a/2 - 1) - a/2 s += even * a else: s = ((a + 1)/2) ** 2 s += s_even((a-1)/2) n_even_numbers = 0 return s # + # %%timeit suma3(1e+6)
Homework #1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np # %matplotlib inline import otter grader = otter.Notebook("../tests") # **Question 1:** Write a function `square` that squares its argument. def square(x): return x**2 grader.check("q1") # **Question 2:** Write a function `negate` that negates its argument. # + otter={"tests": ["q2b"]} def negate(x): return bool(x) # - grader.check("q2") # **Question 3:** Assign `x` to the negation of `[]`. Use `negate`. x = negate([]) x grader.check("q3") # **Question 4:** Assign `x` to the square of 6.25. Use `square`. x = square(6.25) x grader.check("q4") # **Question 5:** Plot $f(x) = \cos (x e^x)$ on $(0,10)$. x = np.linspace(0, 10, 100) y = np.cos(x * np.exp(x)) plt.plot(x, y) # **Question 6:** Write a non-recursive infinite generator for the Fibonacci sequence `fiberator`. def fiberator(): yield 0 yield 1 a, b = 0, 1 while True: a, b = b, a + b yield a grader.check("q6") #
test/test-grade/notebooks/fails2and3and6H.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/murthylab/sleap/blob/main/docs/notebooks/Training_and_inference_on_an_example_dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Training and inference on an example dataset # + [markdown] colab_type="text" id="LlV70jDuWzea" # In this notebook we'll install SLEAP, download a sample dataset, run training and inference on that dataset using the SLEAP command-line interface, and then download the predictions. # + [markdown] colab_type="text" id="yX9noEb8m8re" # ## Install SLEAP # Note: Before installing SLEAP check [SLEAP releases](https://github.com/murthylab/sleap/releases) page for the latest version. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="DUfnkxMtLcK3" outputId="a6340ef1-eaac-42ef-f8d4-bcc499feb57b" # !pip install sleap==1.1.4 # + [markdown] id="iq7jrgUksLtR" # ## Download sample training data into Colab # Let's download a sample dataset from the SLEAP [sample datasets repository](https://github.com/murthylab/sleap-datasets) into Colab. # + colab={"base_uri": "https://localhost:8080/"} id="fm3cU1Bc0tWc" outputId="c0ac5677-e3c5-477c-a2f7-44d619208b22" # !apt-get install tree # !wget -O dataset.zip https://github.com/murthylab/sleap-datasets/releases/download/dm-courtship-v1/drosophila-melanogaster-courtship.zip # !mkdir dataset # !unzip dataset.zip -d dataset # !rm dataset.zip # !tree dataset # + [markdown] id="xZ-sr67av5uu" # ## Train models # For the top-down pipeline, we'll need train two models: a centroid model and a centered-instance model. # # Using the command-line interface, we'll first train a model for centroids using the default **training profile**. The training profile determines the model architecture, the learning rate, and other parameters. # # When you start training, you'll first see the training parameters and then the training and validation loss for each training epoch. # # As soon as you're satisfied with the validation loss you see for an epoch during training, you're welcome to stop training by clicking the stop button. The version of the model with the lowest validation loss is saved during training, and that's what will be used for inference. # # If you don't stop training, it will run for 200 epochs or until validation loss fails to improve for some number of epochs (controlled by the `early_stopping` fields in the training profile). # + id="QKf6qzMqNBUi" # !sleap-train baseline.centroid.json "dataset/drosophila-melanogaster-courtship/courtship_labels.slp" --run_name "courtship.centroid" --video-paths "dataset/drosophila-melanogaster-courtship/20190128_113421.mp4" # + [markdown] id="Vm3i0ry04IMx" # Let's now train a centered-instance model. # + id="ufbULTDw4Hbh" # !sleap-train baseline_medium_rf.topdown.json "dataset/drosophila-melanogaster-courtship/courtship_labels.slp" --run_name "courtship.topdown_confmaps" --video-paths "dataset/drosophila-melanogaster-courtship/20190128_113421.mp4" # + [markdown] id="whOf8PaFxYbt" # The models (along with the profiles and ground truth data used to train and validate the model) are saved in the `models/` directory: # + colab={"base_uri": "https://localhost:8080/", "height": 306} id="GBUTQ2Cm44En" outputId="ca298981-af65-43b3-f0f6-573f423acba8" # !tree models/ # + [markdown] id="nIsKUX661xFK" # ## Inference # Let's run inference with our trained models for centroids and centered instances. # + id="CLtjtq9E1Znr" # !sleap-track "dataset/drosophila-melanogaster-courtship/20190128_113421.mp4" --frames 0-100 -m "models/courtship.centroid" -m "models/courtship.topdown_confmaps" # + [markdown] id="nzObCUToEqwA" # When inference is finished, predictions are saved in a file. Since we didn't specify a path, it will be saved as `<video filename>.predictions.slp` in the same directory as the video: # + colab={"base_uri": "https://localhost:8080/", "height": 136} id="n6KVfWDIDEUe" outputId="b0633306-f24f-4e6e-e78f-e968a765a3c6" # !tree dataset/drosophila-melanogaster-courtship # + [markdown] id="3mf3KZQj_GhH" # You can inspect your predictions file using `sleap-inspect`: # + id="-jbVP_s06hMh" # !sleap-inspect dataset/drosophila-melanogaster-courtship/20190128_113421.mp4.predictions.slp # + [markdown] id="JFfHDVy7_iDz" # If you're using Chrome you can download your trained models like so: # + id="Ej2it8dl_BO_" # Zip up the models directory # !zip -r trained_models.zip models/ # Download. from google.colab import files files.download("/content/trained_models.zip") # + [markdown] id="iskOQI-r_zNO" # And you can likewise download your predictions: # + id="gdXCYnRV_omC" from google.colab import files files.download('dataset/drosophila-melanogaster-courtship/20190128_113421.mp4.predictions.slp') # + [markdown] id="7Fy26NVmCWFw" # In some other browsers (Safari) you might get an error and you can instead download using the "Files" tab in the side panel (it has a folder icon). Select "Show table of contents" in the "View" menu if you don't see the side panel.
docs/notebooks/Training_and_inference_on_an_example_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py3 # language: python # name: py3 # --- # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 4, "hidden": false, "row": 0, "width": 4}, "report_default": {"hidden": false}}}} # # Michaelis menten analysis # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}} # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 4, "height": 4, "hidden": false, "row": 0, "width": 4}, "report_default": {"hidden": false}}}} # ## Load our data # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 8, "height": 23, "hidden": false, "row": 0, "width": 4}, "report_default": {"hidden": false}}}} data = pd.read_csv("data.csv") data # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 4, "hidden": false, "row": 4, "width": 4}, "report_default": {"hidden": false}}}} # ## Plot the data # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}} # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 4, "height": 9, "hidden": false, "row": 4, "width": 4}, "report_default": {"hidden": false}}}} v = data["rate"] s = data["substrate"] plt.plot(s, v, '.') plt.xlabel("substrate") plt.ylabel("rate") # + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 5, "hidden": false, "row": 23, "width": null}, "report_default": {"hidden": false}}}} # ## Fitting the data # This is the model we're fitting: # # $$ # v = \frac{V_{max} \cdot [S]}{[S] + K_{M}} # $$ # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}} import scipy.optimize # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}} def func(s, Vmax, Km): return (Vmax * s) / (s + Km) # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}} def fitter(Vmax, Km): fitparams, fiterr = scipy.optimize.curve_fit(func, s, v, p0=[Vmax, Km]) xmodel = np.linspace(0, .1, 1000) ymodel = func(xmodel, *fitparams) plt.plot(s, v, '.') plt.plot(xmodel, ymodel, 'r') plt.xlabel("substrate") plt.ylabel("rate") plt.show() return fitparams # - # # Fitting widget to make life easy # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}} import ipywidgets # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 18, "hidden": false, "row": 28, "width": 4}, "report_default": {"hidden": false}}}} widget = ipywidgets.interactive(fitter, Vmax=(-10,10), Km=(-10,10)) widget # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}}
our-analysis-yo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dynamic Programming # Basically we solve the Bellman optimality equation using these methods: # * Value Iteration # * Policy Iteration # From the perspective of the quality of the policy found both methods will work, but they are the base of more advanced methodologies. # # ### References # * [Artificial Intelligence](https://github.com/aimacode/aima-python) # * [MDP code sample](http://aima.cs.berkeley.edu/python/mdp.html) # + from grid_world.grid_samples import * from grid_world.grid_actions import GridActions from grid_world.gridworld_mdp import * import matplotlib.pyplot as plt import collections #from collections import defaultdict # - # ## Grid World # ![title](imgs/GridWorldBook.png) grid_string = get_book_grid() print(grid_string) grid_world = GridWorld(grid_string) #grid_world.gamma = 0.1 grid_world.gamma = 0.9 print('Grid shape:', grid_world.shape) print('All actions:', grid_world.all_actions) print('Number of states:', grid_world.num_states) print('States:', grid_world.states) print('Start state:', grid_world.start_state) print('Rewards on each state') for st in grid_world.states: print('\tState:' , st,'Reward:', grid_world.R(st)) # ### Solve with Value iteration def value_iteration_iter(mdp, iterations=30): U_over_time = [] U1 = {s: 0 for s in mdp.states} R, T, gamma = mdp.R, mdp.T, mdp.gamma for _ in range(iterations): U = U1.copy() for s in mdp.states: U1[s] = R(s) + gamma * max([sum([p * U[s1] for (p, s1) in T(s, a)])for a in mdp.possible_actions(s)]) U_over_time.append(U) return U_over_time value_mdp = value_iteration(grid_world) policy_val = best_policy(grid_world, value_mdp) print('Value Function') for key, value in value_mdp.items(): print ('coordinate:',key, '==>',value) print('Policy(From Value iteration):') for st in grid_world.states: print('\tState:', st, 'action:', GridActions.action_to_str(policy_val[st])) # ### Solve with policy Iteration policy_iter = policy_iteration(grid_world) print('Policy(From Policy iteration):') for st in grid_world.states: print('\tState:', st, 'action:', GridActions.action_to_str(policy_iter[st])) # ## Iteractive Demo def make_plot_grid_step_function(columns, rows, U_over_time): """ipywidgets interactive function supports single parameter as input. This function creates and return such a function by taking as input other parameters.""" def plot_grid_step(iteration): # Get data data = U_over_time[iteration] # Fast dictionary data = collections.defaultdict(lambda: 0, data) grid = [] for row in range(rows): current_row = [] for column in range(columns): #current_row.append(data[(column, row)]) current_row.append(data[(row, column)]) grid.append(current_row) grid.reverse() # output like book fig = plt.imshow(grid, cmap=plt.cm.bwr, interpolation='nearest') plt.axis('off') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) for col in range(len(grid)): for row in range(len(grid[0])): magic = grid[col][row] fig.axes.text(row, col, "{0:.2f}".format(magic), va='center', ha='center') plt.show() return plot_grid_step # + import ipywidgets as widgets from IPython.display import display # Grid dimensions columns = 4 rows = 3 U_over_time = value_iteration_iter(grid_world) # Add Slider iteration_slider = widgets.IntSlider(min=1, max=29, step=1, value=0) w=widgets.interactive(make_plot_grid_step_function(columns, rows, U_over_time),iteration=iteration_slider) #w=widgets.interactive(make_plot_grid_step_function(rows, columns, U_over_time),iteration=iteration_slider) display(w)
DynamicProgramming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 ('base') # language: python # name: python3 # --- # # Exercices pour l'évaluation des dérivés sur un arbre binomial FINA60211(A) # # For the English version of this file, [click here](bino_exercises.ipynb). # # Ce carnet de notes interactif et le module python associé `pedagogical_binomial_model` vous fournissent des outils pour générer vos propres exercices sur l'évaluation des options sur un arbre binomial. Vous pouvez utiliser ce carnet de notes pour : # # - apprendre à construire des arbres binomiaux *forward* # - apprendre à calculer les probabilités d'états sur un arbre binomial # - apprendre à tarifier des options par la méthode récursive d'espérance risque-neutre et d'actualisation sur une seule période. # - apprendre à tarifier des options européennes en calculant l'espérance risque-neutre du valeur final. # # Le module `pedagogical_binomial_model` suit les conventions et la notation de nos diapositives de cours. # ## Travailler avec le module # # Afin d'apprendre à utiliser le module, vous pouvez simplement lire la documentation dans le fichier `pedagogical_binomial_model.py` ou essayer le code `help` suivant : import pedagogical_binomial_model as pbm help(pbm.binomial_tree) # ## Exemples # # ### Créer un arbre binomial pour le prix de l'action # # Créez un arbre pour décrire l'évolution du prix de l'action : # # - pendant trois mois # - par étapes d'un mois # # Utilisez les paramètres suivants pour l'arbre : # # - $S_0$ = 100 # - $r_f$ = 2% # - $\delta$ = 1% # - $\sigma$ = 20% # + stock_price = 100 number_of_model_steps = 3 # three months length_of_model_step = 1/12 # a month is 1/12 years risk_free_rate = 0.02 dividend_yield = 0.01 stock_volatility = 0.2 # annualized stock_price_tree = pbm.binomial_tree(stock_price, number_of_model_steps, length_of_model_step, risk_free_rate, dividend_yield, stock_volatility) # - # Pour inspecter l'arbre, vous pouvez utiliser la méthode associée `print`. Les périodes de l'arbre sont indexées de $0$ à `number_of_model_steps`. Jetons un coup d'oeil au résultat au temps $1$. # # Notez que les périodes à imprimer doivent être données sous forme de `list` : par exemple, `[0, 1, 2]`. # # Dans le résultat ci-dessous, vous verrez que # # - Les périodes sont numérotées et séparées par des en-têtes # - Pour chaque période, tous les états possibles du prix des actions sont imprimés. # - Tous les états sont nommés en suivant la convention qui dit combien de mouvements `U` et `D` sont nécessaires pour atteindre un état donné, par exemple `U1-D0` pour l'état "up" à la période $1$ et `U0-D1` pour l'état "down" à la période 1. # # Il y a des informations supplémentaires dans le résultat : # # - **"Up" Transition Probability**: $p^{\star}$ dans nos diapos # - **Probability of State** : la probabilité d'atteindre un état donné via toutes les trajectoires possibles # - **Multi-Period Discount Factor**: le facteur d'actualisation **de** la période dans l'en-tête **jusqu'à** la période finale ; $e^{-r_f (K-k) h}$ où la période finale est $K$ et la période actuelle est $k$. # - **Single-Period Discount Factor**: $e^{-r_f h}$ # - Les facteurs "Up" et "Down" sont $u$ et $d$. stock_price_tree.print([1]) # ### Exercice: # # Retournez à vos diapositives et utilisez les paramètres du prix des actions de la section précédente pour construire un arbre binomial forward à la main. Utilisez ensuite ce module pour vérifier vos résultats. # ### Prix d'une option de vente européenne # # Vous pouvez utiliser le modèle pour évaluer une option de vente européenne. L'idée est d'abord de construire l'arbre qui décrit l'évolution du prix de l'action, puis de tarifier l'option *sur cet arbre*. # # Cela signifie que l'un des paramètres clés de l'option, le **temps à l'échéance**, dépend de l'arbre ! # # Tarifions l'option avec les paramètres suivants : # # - Expiration dans six mois # - Prix d'exercice de \$95 # # #### Définir l'arbre des prix de l'action # # Notre arbre binomial existant, `stock_price_tree`, décrit l'évolution du prix sur trois mois. Par conséquent, nous ne pouvons pas l'utiliser pour tarifier l'option, et nous devons créer un nouvel arbre. # # Nous allons nous en tenir à un arbre à trois périodes. Cela signifie que nous avons besoin de périodes de deux mois, c'est-à-dire $h = 2/12$ sur l'arbre. # + stock_price = 100 number_of_model_steps = 3 # three steps length_of_model_step = 2/12 # a month is 1/12 years risk_free_rate = 0.02 dividend_yield = 0.01 stock_volatility = 0.2 # annualized stock_price_tree = pbm.binomial_tree(stock_price, number_of_model_steps, length_of_model_step, risk_free_rate, dividend_yield, stock_volatility) # - # #### Définir l'option # # Pour définir une option de vente européenne, nous allons utiliser la classe `european_put` du module `pbm`. Voici le résultat du `help(pbm.european_put)`. # # Un `european_put` est un `derivative`, donc si vous êtes intéressés, vous pouvez suivre en lisant `help(pbm.derivative)` et le code du module. help(pbm.european_put) # La liste d'aide ci-dessus nous indique comment définir un `european_put` : # ``` # | european_put(strike_price) # ... # | Methods defined here: # | # | __init__(self, strike_price) # | Define an European Put # | # | Parameters # | ---------- # | strike_price : float # | strike price of the option # ``` # # Il suffit de fixer un `strike_price` et d'écrire `mon_option = pbm.european_put(strike_price)`. strike_price = 95 my_euro_put = pbm.european_put(strike_price) # #### Tarifier l'option # # Après avoir défini l'arbre des prix des actions et l'option, nous pouvons tarifier l'option. # # Pour ce faire, nous utilisons la méthode `pricing()` de l'arbre des prix des actions` sur l'option. my_euro_put = stock_price_tree.pricing(my_euro_put) # Apparemment, rien ne s'est passé. # # Cependant, `my_euro_put` contient maintenant un arbre binomial complet avec toutes les informations de prix incrémentales ! # # La classe `derivative` et toutes les classes liées (c'est-à-dire les calls et les puts européens et américains dans `pbm`) ont une méthode `print` qui est identique à la méthode `print` pour l'arbre binomial que nous avons examiné ci-dessus. # # Pour connaître la valeur de l'option de vente au temps $0$, vous pouvez simplement taper `my_euro_put.print([0])` et voir dans le champ `"Derivative"` que l'option vaut $3.1966. my_euro_put.print([0]) # #### Exercice : Tarifier l'option en utilisant le flux monétaire terminal espéré # # Pour les produits dérivés européens, nous pouvons utiliser l'approche alternative à la tarification. Avec $K$ périodes de longueur $h$ années, le prix d'un dérivé $G(S_K)$ est : # $$ # P = e^{-r_f \times h \times K}E^{\star}\left[ G(S_K) \right]\, , # $$ # l'espérance risque-neutre du flux monétaire, actualisée au taux sans risque. # # Pour voir les probabilités des états terminaux ainsi que les prix des actions dans ces états, tapez `my_euro_put.print([3])`. my_euro_put.print([3]) # Dans le champ `"Derivative"` vous pouvez voir les valeurs du payoff $G(S_K)$ qui ici est $\max(95 - S_K, 0)$. Utilisez ensuite la `"Probability of State"` (Probabilité d'état) et le `"Multi-Period Discount Factor"` (Facteur d'actualisation multi-périodes) approprié de la période $0$ à la période $K$ pour calculer le prix du Put Européen avec un stylo et du papier. # #### Extra-scolaire (pour les nerds) # # Vous pouvez récupérer toutes les informations qui sont imprimées sur l'arbre pour les utiliser comme données dans votre cod. Les extraits de code suivants extraient les probabilités risque-neutres des états au dernier nœud de l'arbre, ainsi que les payoffs correspondants. # # Au final, l'attribut `.trunk` de l'arbre des prix de l'option est un `dictionnaire`. type(my_euro_put.pricing_tree.trunk) # + import numpy as np rn_probs = [my_euro_put.pricing_tree.trunk["Period 3"][state]["Probability of State"] for state in my_euro_put.pricing_tree.trunk["Period 3"].keys()] rn_probs = np.array(rn_probs) print("Voici les probabilités risque-neutres, converties en un array numpy.") print(rn_probs) # + payoffs = [my_euro_put.pricing_tree.trunk["Period 3"][state]["Derivative"] for state in my_euro_put.pricing_tree.trunk["Period 3"].keys()] payoffs = np.array(payoffs) print("Voici les payoffs") print(payoffs) # - # Vous pouvez maintenant calculer l'espérance risque-neutre et l'actualiser à la période 0$ : # + put_price = np.sum(rn_probs * payoffs) put_price = my_euro_put.pricing_tree.trunk["Period 0"]["State 0"]["Multi-Period Discount Factor"] * put_price print("Le prix de l'option de vente est ${0:1.4f}.".format(put_price)) # - # ### Prix d'une option de vente américaine et comparaison avec l'option européenne # # L'option américaine est plus *optionnelle* que l'option européenne, car elle peut être exercée à tout moment avant l'expiration. Si elle est plus optionnelle... alors sa valeur devrait être **supérieure** à celle de l'option européenne. # # La différence entre les prix des options américaines et européennes, par ailleurs identiques, s'appelle la **prime d'exercice anticipé** (*early exercise premium*). # # Nous pouvons utiliser notre modèle binomial pour évaluer la prime de l'option américaine. # # Une option de vente américaine peut être instanciée à partir de la classe `american_put`. Nous utiliserons le même arbre et le même prix d'exercice que précédemment. my_amer_put = pbm.american_put(95) my_amer_put = stock_price_tree.pricing(my_amer_put) my_amer_put.print([0]) my_euro_put.print([0]) # Le prix du put américain est de 3,2435 \$ et le prix du put européen est de 3,1966 \$. # # La **prime d'exercice anticipé** est égale à 0,0469 \$ ! # ## Autres exercices # # Jouez avec ce module. Utilisez-le pour répondre aux questions suivantes : # # ### Exercice 1 # # Quelle est la prime d'exercice anticipé pour les options d'achat si $\delta = 0 $ ? # # ### Exercice 2 # # Quelle est la prime d'exercice anticipé pour les options de vente si $r = 0$ ? # # ### Exercice 3 # # Que se passe-t-il avec le prix d'une option européenne si la grille de temps du modèle binomial devient *plus dense* ? # # Nous entendons par là que nous pourrions garder l'horizon du modèle constant mais le décrire en un plus grand nombre d'étapes. Ci-dessus, nous avons utilisé trois étapes pour un horizon de six mois. Et si nous en utilisions six (c'est-à-dire des étapes mensuelles) ? Et si nous utilisions 180 (c'est-à-dire des étapes quotidiennes) ? # # D'abord, définissez un arbre où la `length_of_model_step = 1/365` et le `number_of_model_steps = 180`. # # Ensuite, écrivez une fonction qui évalue la formule d'évaluation des options de Black-Scholes et comparez les prix de la formule aux prix d'un modèle binomial avec de très nombreux pas (disons, 1 pas par jour). # # Tarifiez une option européenne (call ou put) avec les deux méthodes. # # ### Exercice 4 # # Les prix des options européennes de vente et d'achat **avec le même prix d'exercice et la même échéance** sont fortement liés les uns aux autres ! Cette relation est appelée la **parité put-call** (*put-call parity*). Elle peut être énoncée comme suit : # $$ # C - P = S e^{-\delta T} - K \times e^{-r_f T}\, , # $$ # où $C$ et $P$ sont, respectivement, les prix des options d'achat et de vente, $S$ est le prix de l'action, $K$ est le prix d'exercice des deux options, $r_f$ est le taux sans risque, $\delta$ est le rendement du dividende et $T$ est la maturité des deux options. # # Utilisez le modèle binomial pour : # # 1. Démontrer que la relation de parité put-call existe bel et bien, # 2. Examiner comment l'écart par rapport à la relation dépend de $\delta$ pour les options américaines. # #
bino_exercises_fr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/SitwalaM/nlp-topic-modelling/blob/develop/MG_sklearn_topic_modelling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="pxgpSYaPqMcz" colab={"base_uri": "https://localhost:8080/"} outputId="e68848e4-69f4-413c-f37a-592d360de4ff" # install if not available # !pip install pyLDAvis # + id="uG4Y9Lovuzei" colab={"base_uri": "https://localhost:8080/"} outputId="21269772-8dfe-47f1-ae36-70ce40917acc" # import libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import sklearn import numpy as np import re import nltk from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import LatentDirichletAllocation import pickle import pyLDAvis import pyLDAvis.sklearn pyLDAvis.enable_notebook() # + [markdown] id="fuI3XTBM6aYP" # #Preprocessing # + colab={"base_uri": "https://localhost:8080/"} id="W61zBNzGwlJT" outputId="395df9f1-49de-4117-dad2-7453ea7b79f5" # download stopwords # !python -m nltk.downloader stopwords # + id="n1KoZxIg411k" colab={"base_uri": "https://localhost:8080/"} outputId="c87d714e-4dfc-4467-c984-b458e3d4086e" def clean2(text): """cleans the text to prepare for NLP""" text = str(text).lower() text = re.sub(r'@\w+', ' ', text) text = re.sub('https?://\S+|www\.\S+', '', text) text = re.sub(r'[^a-z A-Z]', ' ',text) text = re.sub(r'\b\w{1,2}\b', '', text) text = re.sub(r'[^\w\s]','',text) text = re.sub(r'^RT[\s]+', '', text) text = re.sub('\[.*?\]', '', text) text = re.sub('<.*?>+', '', text) text = re.sub('\n', '', text) text = re.sub('\w*\d\w*', '', text) text = re.sub(r'#', '', text) text = re.sub(r'[^\w\s]','',text) text = re.sub(r'@[A-Za-z0–9]+', '', text) text = re.sub(r' +', ' ', text) return text # + colab={"base_uri": "https://localhost:8080/"} id="WbuWF9xewjnz" outputId="aa996a8e-5325-4636-9da9-2caa9344c269" # download the tweet dataset # !wget https://dsiwork.s3.amazonaws.com/dataset.csv # + id="IW8-k8vUxgtq" data = pd.read_csv("dataset.csv", parse_dates=["date_created"], encoding="ISO-8859-1") # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="EbS1l-ZTxyay" outputId="da9f2b97-20c4-487c-99fd-0c36daaabba5" data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 336} id="kyFvMK2oyOFa" outputId="eb6e32ad-5da5-490d-92fe-3bb54ccbc901" data['clean_tweet'] = data.tweet.apply(clean2) data.head() # + id="tu7P-HA7vOLs" # Remove stopwords stop_words = set(stopwords.words("english")) data["clean_tweet"] = data["clean_tweet"].apply(lambda x : " ".join([w.lower() for w in x.split() if w not in stop_words and len(w) > 3])) # + id="kaWb4VD35-y8" #Tokenize tweet tweets = data["clean_tweet"].apply(lambda x : x.split()) # + colab={"base_uri": "https://localhost:8080/", "height": 336} id="6wa2UhU96qs7" outputId="1e2304d1-dd06-4aaf-cde2-d830de2c26f6" data.head() # + [markdown] id="zEo7SCrLfpex" # **Lemmatization** # + colab={"base_uri": "https://localhost:8080/"} id="HUlK8QRGg0xU" outputId="761e383a-f053-4110-c190-807d71a75392" # !python -m spacy download en_core_web_sm # + colab={"base_uri": "https://localhost:8080/"} id="UAcj-75Mfnqv" outputId="4860c789-799f-4958-9560-ae44445a5426" import spacy def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']): texts_out = [] for sent in texts: doc = nlp(" ".join(sent)) texts_out.append(" ".join([token.lemma_ if token.lemma_ not in ['-PRON-'] else '' for token in doc if token.pos_ in allowed_postags])) return texts_out # Initialize spacy 'en' model, keeping only tagger component (for efficiency) nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner']) # Do lemmatization keeping only Noun, Adj, Verb, Adverb data_lemmatized = lemmatization(tweets, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']) # + id="E6f05z0z6uSb" vectorizer = TfidfVectorizer(ngram_range=(2,3)) data_vectorized = vectorizer.fit_transform(data_lemmatized) # + colab={"base_uri": "https://localhost:8080/"} id="KwfWrQH-KxS1" outputId="1d75ae32-b359-4775-f124-b309af3f43ea" data_vectorized # + [markdown] id="t0WYpMI37osy" # # Modelling # + id="qXxB0vHG7k5l" # LDA Implementation number_of_topics = 10 model = LatentDirichletAllocation(n_components=number_of_topics, random_state=0) # + colab={"base_uri": "https://localhost:8080/"} id="atBHrT2M7t8q" outputId="425d0327-305e-403a-eed3-9f444d5a465a" model.fit(data_vectorized) # + id="DlHEGUs27379" def display_topics(model, feature_names, no_top_words): """ creates dataframe showing top words for each topic from the model Parameters ---------- model: object instance of the topic model feature_names: output feature names from vectorizer e.g CountVectorizer.get_feature_names() no_top_words: returns -------- dataframe showing topics and the weight for the top words specified """ topic_dict = {} for topic_idx, topic in enumerate(model.components_): topic_dict["Topic %d words" % (topic_idx)]= ['{}'.format(feature_names[i]) for i in topic.argsort()[:-no_top_words - 1:-1]] topic_dict["Topic %d weights" % (topic_idx)]= ['{:.1f}'.format(topic[i]) for i in topic.argsort()[:-no_top_words - 1:-1]] return pd.DataFrame(topic_dict) # + colab={"base_uri": "https://localhost:8080/"} id="6iZFHCnB79Qj" outputId="954799c5-f3a5-4638-b542-acb3e7c60bd4" # get the feature names from the vectorization tf_feature_names = vectorizer.get_feature_names() # + id="F6OOKfxs8BV7" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="3aca0f20-f72d-40f0-9a2d-8fda4d2b46ce" no_top_words = 20 display_topics(model, tf_feature_names, no_top_words) #df.to_excel("topics_output.xlsx") # + [markdown] id="35R2SYYWQJom" # **Model Performance Metrics** # + colab={"base_uri": "https://localhost:8080/"} id="uH1CADHQPmHA" outputId="d7219e73-ceb2-4750-c5ac-7f75f9630834" # log-likelihood print(model.score(data_vectorized)) # perplexity print(model.perplexity(data_vectorized)) # + [markdown] id="uOne7ThprEeL" # # pyLDAVis # + colab={"base_uri": "https://localhost:8080/", "height": 954} id="XqLohwxJrKBS" outputId="980464c6-9f9f-4834-f66e-462e36e3c92b" pyLDAvis.sklearn.prepare(model, data_vectorized, vectorizer) # + [markdown] id="wk-IdGhfebN7" # **Hyperparameter Tuning** # # # + [markdown] id="EkviJGYue9Xr" # **How to GridSearch the best LDA model** # + id="1nM-2_m-WlEa" from sklearn.decomposition import LatentDirichletAllocation from sklearn.model_selection import GridSearchCV # + id="DkZ9A1gefEjR" colab={"base_uri": "https://localhost:8080/"} outputId="c847aef3-cdd4-4f57-c4bd-6b7f482cf81f" # Define Search Param search_params = {'n_components': [10, 15, 20, 25, 30], 'learning_decay': [.5, .7, .9]} # Init the Model lda = LatentDirichletAllocation() # Init Grid Search Class model2 = GridSearchCV(lda, param_grid=search_params) # Do the Grid Search model2.fit(data_vectorized) # + [markdown] id="BSYNpXY_fQoy" # **How to see the best topic model and its parameters?** # + id="xXVqfl_KfUHr" colab={"base_uri": "https://localhost:8080/"} outputId="dc21f987-3611-482c-e74a-fc012b05d2e9" # Best Model best_lda_model = model2.best_estimator_ # Model Parameters print("Best Model's Params: ", model2.best_params_) # Log Likelihood Score print("Best Log Likelihood Score: ", model2.best_score_) # Perplexity print("Model Perplexity: ", best_lda_model.perplexity(data_vectorized)) # + [markdown] id="mMeWDlm7e_XE" # *This shows us that the best model is obtained with 10 topics as done above* # + [markdown] id="ybJGKcP2EbWX" # # Inference # + id="7PH2rFoREdbn" def get_inference(model, vectorizer, topics, text, threshold): """ runs inference on text input paramaters ---------- model: loaded model to use to transform the input vectorizer: instance of the vectorizer e.g TfidfVectorizer(ngram_range=(2, 3)) topics: the list of topics in the model text: input string to be classified threshold: float of threshold to use to output a topic returns ------- tuple => (top score, the scores for each topic) """ v_text = vectorizer.transform([text]) score = model.transform(v_text) labels = set() for i in range(len(score[0])): if score[0][i] > threshold: labels.add(topics[i]) if not labels: return 'None', -1, set() return topics[np.argmax(score)], score # + id="uvcW_ccF6PMo" colab={"base_uri": "https://localhost:8080/"} outputId="47cf533d-b045-4f15-b9ef-60f83c469afb" # test the model with some text topics = list(np.arange(0,10)) result = get_inference(model, vectorizer, topics, "operation dudula", 0 ) result # + [markdown] id="gxpQB7SL14xw" # # Testing inference from loading the model # + id="OkslOxJjFjyP" # Save the model then test it by loading it with open("lda_model.pk","wb") as f: pickle.dump(model, f) f.close() # then reload it with open("lda_model.pk","rb") as f: lda_model = pickle.load(f) # + id="LlWZLcLZ1zfC" colab={"base_uri": "https://localhost:8080/"} outputId="c86ffed5-d5fa-42eb-c8a1-e211b290c82f" # test example text result = get_inference(lda_model, vectorizer, topics, "operation dudula", 0 ) result # + id="GwU-6kuTBHdG" pickle.dump(vectorizer, open("vectorizer.pickle", "wb")) #pickle.load(open("models/vectorizer.pickle", 'rb')) // Load vectorizer
notebooks/MG_sklearn_topic_modelling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week 2 - Quantum Information Science # # * Quantum Information Science # * Classical bits/egisters # * Quantum bits/registers # * Hands-on on Qskit # * Visualizing circuits # * Qasm language # # # Exercises # * [QSKit basics and circuit visualization](exercises/w2_01.ipynb) # * Run a Qasm specification on IBM Q Experience, and on a Real Processor (see slides for instructions) # # ## Resources # * [PDF slides](slides.pdf) # * [slides src](latex/main.tex) Latex files and image resources used in the presentation (useful for PR on slide typos and such) #
community/awards/teach_me_quantum_2018/TeachMeQ/Week_2-Quantum_Information_Science/README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## AXON: Some benchmarks # + import json import axon try: import yaml use_yaml = 1 except: use_yaml = 0 import random import time import matplotlib.pyplot as plt # %matplotlib inline import gc import sys; print(sys.version) # - def random_string(n): text = ''.join(chr(ord('a')+random.randint(1,20)) for i in range(20)) text = axon.as_unicode(text) return text # + yaml_times_dump = [] json_times_dump = [] axon_times_dump = [] yaml_times_load = [] json_times_load = [] axon_times_load = [] def make_test(data): if type(data) == dict: data = [data] t0 = time.time() axon_text = axon.dumps(data, sorted=0) dt_axon_dump = time.time() - t0 axon_times_dump.append(dt_axon_dump) t0 = time.time() v = axon.loads(axon_text) dt_axon_load = time.time() - t0 axon_times_load.append(dt_axon_load) t0 = time.time() json_text = json.dumps(data) dt_json_dump = time.time() - t0 json_times_dump.append(dt_json_dump) t0 = time.time() v = json.loads(json_text) dt_json_load = time.time() - t0 json_times_load.append(dt_json_load) if use_yaml: t0 = time.time() yaml_text = yaml.dump(data, Dumper=yaml.CDumper) dt_yaml_dump = time.time() - t0 yaml_times_dump.append(dt_yaml_dump) t0 = time.time() v = yaml.load(json_text, Loader=yaml.CLoader) dt_yaml_load = time.time() - t0 yaml_times_load.append(dt_yaml_load) else: dt_yaml_dump, dt_yaml_load = float('nan'), float('nan') print('Dump:: axon: %.3f json: %.3f yaml: %.3f' % (dt_axon_dump, dt_json_dump, dt_yaml_dump)) print('Load:: axon: %.3f json: %.3f yaml: %.3f' % (dt_axon_load, dt_json_load, dt_yaml_load)) # + def test_1(): lst = [] for i in range(4000): lst.append([ random_string(8), random.random(), random_string(8), random.randint(1,99999), random_string(8), random_string(32), random_string(8), random.random(), random_string(8), random.randint(1,99999), random_string(8), random_string(32), random_string(8), random.random(), random_string(8), random.randint(1,99999), random_string(8), random_string(32), random_string(8), random.random(), random_string(8), random.randint(1,99999), random_string(8), random_string(32), ]) return lst make_test(test_1()) # + def test_2(): lst = [] for i in range(4000): lst.append({ random_string(8): random.random(), random_string(8): random.randint(1,99999), random_string(8): random_string(32), random_string(8): random.random(), random_string(8): random.randint(1,99999), random_string(8): random_string(32), random_string(8): random.random(), random_string(8): random.randint(1,99999), random_string(8): random_string(32), random_string(8): random.random(), random_string(8): random.randint(1,99999), random_string(8): random_string(32), }) return lst make_test(test_2()) # + def test_3(): d = {} for j in range(100): lst = [] for i in range(100): lst.append({ random_string(8): random.random(), random_string(8): random.randint(1,99999), random_string(8): random_string(32), random_string(8): random.random(), random_string(8): random.randint(1,99999), random_string(8): random_string(32), random_string(8): random.random(), random_string(8): random.randint(1,99999), random_string(8): random_string(32), }) d[random_string(8)] = lst return d make_test(test_3()) # + def test_4(): lst = [] for i in range(4000): lst.append([ random_string(8), random_string(32), random_string(32), random_string(8), random_string(8), random_string(32), random_string(32), random_string(32), random_string(8), random_string(8), random_string(32), random_string(32), random_string(8), random_string(8), random_string(32), random_string(8), random_string(8), random_string(32), random_string(32), random_string(8), random_string(8), random_string(8), random_string(32), random_string(32), random_string(8), random_string(8), random_string(32), random_string(8), random_string(8), random_string(32), ]) return lst make_test(test_4()) # + def test_5(): lst = [] for i in range(5000): lst.extend([ random_string(8), random_string(8), random_string(32), random_string(8), random_string(32), random_string(32), random_string(8), random_string(8), random_string(8), random_string(8), random_string(32), random_string(8), random_string(8), random_string(32), random_string(32), random_string(8), random_string(32), random_string(32), random_string(8), random_string(8), random_string(32), random_string(8), random_string(32), random_string(32), ]) return lst make_test(test_5()) # + def test_6(): lst = [] for i in range(5000): lst.extend([ random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), random.randint(1,99999), ]) lst = [lst] return lst make_test(test_6()) # + def test_7(): lst = [] for i in range(5000): lst.extend([ random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), random.random(), ]) lst = [lst] return lst make_test(test_7()) # + def make_plot(): tests = range(1,len(axon_times_dump)+1) plt.figure(figsize=(8.0, 10.0)) plt.subplot(2,1,1) plt.title('Dumping') plt.plot(tests, axon_times_dump, label='axon', marker='o') plt.plot(tests, json_times_dump, label='json', marker='o') if use_yaml: plt.plot(tests, yaml_times_dump, label='yaml', marker='o') plt.legend() plt.grid() plt.semilogy() plt.minorticks_on() plt.subplot(2,1,2) plt.title('Loading') plt.plot(tests, axon_times_load, label='axon', marker='o') plt.plot(tests, json_times_load, label='json', marker='o') if use_yaml: plt.plot(tests, yaml_times_load, label='yaml', marker='o') plt.legend() plt.semilogy() plt.grid() plt.minorticks_on() print('Dumping:') print('axon:', [('%.3f' % t) for t in axon_times_dump]) print('json:', [('%.3f' % t) for t in json_times_dump]) if use_yaml: print('yaml:', [('%.3f' % t) for t in yaml_times_dump]) print('Loading:') print('axon:', [('%.3f' % t) for t in axon_times_load]) print('json:', [('%.3f' % t) for t in json_times_load]) if use_yaml: print('yaml:', [('%.3f' % t) for t in yaml_times_load]) make_plot() # -
examples/axon_bench.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CNN for binary classification of twitter sentiment import pandas as pd import numpy as np data = pd.read_csv("../../core/data/tweet_global_warming.csv", encoding="latin") print("Full dataset: {}".format(data.shape[0])) data = data.dropna() print("dataset without NaN: {}".format(data.shape[0])) X = data.iloc[:,0] Y = data.iloc[:,1] print("Number of unique words: {}".format(len(np.unique(np.hstack(X))))) # In the following cell I use keras' Tokenizer class to convert the sentiment column (Yes, Y, No, or no) to binary 0 (yes) or 1 (no) # + import numpy from sklearn.model_selection import train_test_split from keras.preprocessing.text import Tokenizer from keras.models import Sequential from keras.layers import Dense from keras.layers import Flatten from keras.layers.convolutional import Convolution1D from keras.layers.convolutional import MaxPooling1D from keras.layers.embeddings import Embedding from keras.preprocessing import sequence # fix random seed for reproducibility seed = 7 numpy.random.seed(seed) # load the dataset but only keep the top n words, zero the rest top_words = 5000 def shift_one(obj): return obj - 1 X = data.iloc[:,0] Y = data.iloc[:,1] Y = list(map(str, Y)) token = Tokenizer(num_words=top_words, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True, split=' ', char_level=False, oov_token=None) token.fit_on_texts(texts=X) X = token.texts_to_sequences(texts=X) token = Tokenizer(num_words=None, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~eso', lower=True, split=' ', char_level=False, oov_token=None) token.fit_on_texts(texts=Y) Y = token.texts_to_sequences(texts=Y) Y = np.array(Y) Y = Y.reshape(Y.shape[0],) Y = list(map(shift_one, Y)) Y = np.array(Y) X_train, X_test, y_train, y_test = train_test_split(X,Y) # pad dataset to a maximum review length in words max_words = 500 X_train = sequence.pad_sequences(X_train, maxlen=max_words) X_test = sequence.pad_sequences(X_test, maxlen=max_words) # create the model model = Sequential() model.add(Embedding(top_words, 32, input_length=max_words)) model.add(Convolution1D(nb_filter=32, filter_length=3, border_mode='same', activation='relu')) model.add(MaxPooling1D(pool_length=2)) model.add(Flatten()) model.add(Dense(250, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) # Fit the model model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=2, batch_size=128, verbose=1) # Final evaluation of the model scores = model.evaluate(X_test, y_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1]*100)) # - from matplotlib import pyplot # Summarize review length print("average tweet length: ") result = map(len, X) print("{:.3f}".format(np.mean(list(map(len, X))))) pyplot.subplot(121) pyplot.boxplot(list(map(len, X))) pyplot.subplot(122) pyplot.hist(list(map(len, X))) pyplot.show()
examples/wesley_twitter_cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %matplotlib inline import matplotlib.pylab as plt import numpy as np from sigvisa import Sigvisa from sigvisa.graph.sigvisa_graph import SigvisaGraph from sigvisa.signals.io import fetch_waveform from sigvisa.source.event import Event # - def setup_graph(sta="PD31"): stime = 1239041000 etime = 1239041400 s = Sigvisa() cursor = s.dbconn.cursor() wave = fetch_waveform(sta, chan="auto", stime=stime, etime=etime, cursor=cursor) wave = wave.filter("freq_0.8_4.5;env;hz_2") sg = SigvisaGraph(template_model_type="param", template_shape="lin_polyexp", wiggle_model_type="dummy", wiggle_family="iid", dummy_fallback=True, runids = (3,), phases=["P", "S", "PcP", "ScP", "pP", "Pg", "Lg"], base_srate=2.0, assume_envelopes=True, smoothing=None, hack_param_constraint=True, uatemplate_rate=1e-4) sg.add_wave(wave) vals2 = dict([('mult_wiggle_std', 0.46665908122829375), ('coda_decay', -3.8817786855627583), ('peak_offset', 1.8717964476973965), ('coda_height', 4.6420525405366337), ('arrival_time', 1239041071.3935068), ('peak_decay', -3.3833567577066019)]) vals30 = dict([('mult_wiggle_std', 0.58921556906392525), ('coda_decay', -2.0648019615477597), ('peak_offset', 1.2059866915167161), ('coda_height', 4.9700447940413781), ('arrival_time', 1239041119.0734844), ('peak_decay', -1.8286789034181647)]) wn = sg.station_waves.values()[0][0] sg.create_unassociated_template(wn, vals2['arrival_time'], tmid=2, initial_vals=vals2) sg.create_unassociated_template(wn, vals2['arrival_time'], tmid=30, initial_vals=vals30) return sg sg = setup_graph() ev = Event(lon=-105.427, lat=43.731, depth=0.0, time=1239041017.07, mb=4.0) sg.current_log_p_breakdown() wn = sg.station_waves.values()[0][0] wn.plot(ax=plt.gca()) # + from collections import defaultdict import itertools from sigvisa.models.ttime import tt_predict from sigvisa.infer.event_birthdeath import template_association_logodds, associate_template, unassociate_template from sigvisa.utils.counter import Counter def joint_association_distribution(sg, wn, eid, phases, ignore_mb=False, max_ttr=25.0): ev = sg.get_event(eid) possible_associations = defaultdict(list) for phase in phases: pred_atime = ev.time + tt_predict(ev, wn.sta, phase=phase) possible_associations[phase].append((None, sg.uatemplate_rate)) for tmid in sg.uatemplate_ids[(wn.sta,wn.chan,wn.band)]: tmnodes = sg.uatemplates[tmid] atime = tmnodes['arrival_time'].get_value() if np.abs(atime - pred_atime) < max_ttr: odds = np.exp(template_association_logodds(sg, wn, tmid, eid, phase, ignore_mb=ignore_mb)) possible_associations[phase].append((tmid, odds)) print "odds for", phase, tmid, "are", odds vals = [possible_associations[k] for k in phases] joint_dist = Counter() for assoc in itertools.product(*vals): tmids, odds = zip(*assoc) nontrivial_tmids = [t for t in tmids if t is not None] if len(set(nontrivial_tmids)) != len(nontrivial_tmids): # duplicate tmid, assigned to two phases continue joint_dist[tmids] = np.prod(odds) joint_dist.normalize() return joint_dist # - # + from sigvisa.infer.event_birthdeath import smart_peak_time_proposal, heuristic_amplitude_posterior, ev_death_helper def clean_propose_phase_template(sg, wn, eid, phase, fix_result=None, debug_info=None, ev=None): # add the given phase to the graph, with appropriate proposal values (or fix_result) # return the tmvals and stuff if ev is None: ev = sg.get_event(eid) # add the template #sg._topo_sort() # sample most of the template params from the event-conditional prior tmnodes = sg.get_template_nodes(eid, wn.sta, phase, wn.band, wn.chan) k_ttr, n_ttr = tmnodes["tt_residual"] k_time, n_time = tmnodes["arrival_time"] k_amp, n_amp = tmnodes["amp_transfer"] k_height, n_height = tmnodes["coda_height"] tmvals = {} log_q = 0.0 for param, (k, n) in tmnodes.items(): if param in ("tt_residual", "amp_transfer") or n.deterministic(): continue if fix_result is not None: n.set_value(fix_result[param]) else: n.parent_sample() param_lp = n.log_p() log_q += param_lp tmvals[param] = n.get_value() if debug_info is not None: debug_info[param] = (tmvals[param], param_lp, param_lp) print "param", param, "val", tmvals[param], "lp", param_lp # then sample atime from the signal pred_atime = ev.time + tt_predict(ev, wn.sta, phase) debug_lps = {} if fix_result is not None: tmvals["arrival_time"] = fix_result["arrival_time"] peak_lp = smart_peak_time_proposal(sg, wn, tmvals, eid, phase, pred_atime, use_correlation=False, prebirth_unexplained=None, exclude_arrs=[], fix_result=(fix_result is not None)) n_time.set_value(key=k_time, value = tmvals["arrival_time"]) log_q += peak_lp if debug_info is not None: debug_info["tt_residual"] = (n_ttr.get_value(), peak_lp, n_ttr.log_p()) # then sample amplitude from the signal amp_dist = heuristic_amplitude_posterior(sg, wn, tmvals, eid, phase, exclude_arrs=[(eid, phase)], unexplained = None, full_tssm_proposal=False) if fix_result is not None: tmvals["coda_height"] = fix_result["coda_height"] else: tmvals["coda_height"] = amp_dist.sample() n_height.set_value(key=k_height, value=tmvals["coda_height"]) amp_lp = amp_dist.log_p(tmvals["coda_height"]) log_q += amp_lp tmvals["amp_transfer"] = n_amp.get_value(key=k_amp) if debug_info is not None: debug_info["amp_transfer"] = (n_amp.get_value(), amp_lp, n_amp.log_p()) return tmvals, log_q def current_template_proposal(sg, wn, eid, site="NVAR", phase="P", fix_result=False): from sigvisa.infer.event_birthdeath import propose_phase_template tg = sg.template_generator(phase) sg.add_event_site_phase(tg, site, phase, sg.evnodes[eid], sample_templates=True) sg._topo_sort() tmvals = sg.get_template_vals(eid, wn.sta, phase, wn.band, wn.chan) tmvals, tmpl_lp, debug_lps = propose_phase_template(sg, wn, eid, phase, use_correlation=False, prebirth_unexplained=None, include_presampled = True, tmvals = tmvals, return_debug=True, fix_result=fix_result) sg.set_template(eid, wn.sta, phase, wn.band, wn.chan, tmvals) log_qforward = tmpl_lp return tmvals, tmpl_lp def joint_birth_helper(sg, proposed_ev, fix_result=None, eid=None, associate_using_mb=True): s = Sigvisa() # insert bullshit about "unexplained kalman" and about debugging, etc. inverse_fns = [] if fix_result is None: evnodes = sg.add_event(proposed_ev, eid=eid, no_templates=True) eid = evnodes['mb'].eid log_qforward = 0.0 birth_record = {} for site,elements in sg.site_elements.items(): site_phases = sorted(sg.predict_phases_site(proposed_ev, site=site)) for sta in elements: for wn in sg.station_waves[sta]: band, chan = wn.band, wn.chan jd = joint_association_distribution(sg, wn, eid=eid, phases=site_phases) if fix_result is None: assoc_tmids = jd.sample() assoc_lp = np.log(jd[assoc_tmids]) else: assoc_tmids = fix_result[wn.label]["assoc_tmids"] assoc_lp = np.log(jd[assoc_tmids]) if assoc_tmids in jd else -np.inf print "using assoc", zip(site_phases, assoc_tmids), "with lp", assoc_lp log_qforward += assoc_lp for i_phase, phase in enumerate(site_phases): assoc_tmid = assoc_tmids[i_phase] if assoc_tmid is not None: associate_template(sg, wn, assoc_tmid, eid, phase, create_phase_arrival=True) inverse_fns.append(lambda wn=wn,phase=phase,tmid=assoc_tmid: unassociate_template(sg, wn, eid, phase, tmid=tmid)) else: fr_phase = None if fix_result is not None: fr_phase = fix_result[wn.label][phase] # propose values for this template and create it in the graph #tmvals, tmpl_lp = current_template_proposal(sg, wn, eid, site=site, phase=phase, fix_result=fr_phase) tmvals, tmpl_lp = clean_propose_phase_template(sg, wn, eid, phase=phase, fix_result=fr_phase) log_qforward += tmpl_lp birth_record[(wn, phase)] = (assoc_tmid is not None, None) inverse_fns.append(lambda : sg.remove_event(eid)) sg._topo_sort() def revert_move(): for fn in inverse_fns: fn() if fix_result is not None: return log_qforward, revert_move log_qbackward = ev_death_helper(sg, eid, use_correlation=False, associate_using_mb=associate_using_mb, fix_result=birth_record) return log_qforward, log_qbackward, revert_move, eid # + import copy from sigvisa.infer.event_birthdeath import sample_deassociation_proposal def ev_birth_move(sg, location_proposal, debug_info=None, fix_result=None): if fix_result is not None: ev, eid = fix_result log_qforward = location_proposal(sg, fix_result=ev) else: ev, log_qforward, extra = location_proposal(sg) eid = None evnodes = sg.add_event(ev, eid=eid, no_templates=True) eid = evnodes["loc"].eid if debug_info is not None: evnodes = set(sg.evnodes[eid].values()) evlps = [(n.label.split(";")[1], n.log_p()) for n in evnodes] evlps += [('nevents', np.log(sg.event_rate))] debug_info["ev"] = (ev, log_qforward, evlps) def replicate_move(): sg.add_event(ev, eid=eid, no_templates=True) return log_qforward, replicate_move, eid def ev_death_move(sg, fix_result=None): if fix_result is not None: eid = fix_result log_qforward = -np.log(len(sg.evnodes.keys())) def replicate_move(): sg.remove_event(eid) ev = sg.get_event(eid) replicate_move() return log_qforward, replicate_move, ev def ev_sta_template_birth_helper(sg, wn, eid, site_phases, fix_result=None, debug_info=None, associate_using_mb=True): log_qforward = 0 band, chan = wn.band, wn.chan jd = joint_association_distribution(sg, wn, eid=eid, phases=site_phases) if fix_result is None: assoc_tmids = jd.sample() assoc_lp = np.log(jd[assoc_tmids]) else: assoc_tmids = fix_result["assoc_tmids"] assoc_lp = np.log(jd[assoc_tmids]) if assoc_tmids in jd else -np.inf print "using assoc", zip(site_phases, assoc_tmids), "with lp", assoc_lp log_qforward += assoc_lp if debug_info is not None: debug_info["assoc"] = (zip(site_phases, assoc_tmids), assoc_lp, 0.0) replicate_fns = [] birth_record = {} for i_phase, phase in enumerate(site_phases): debug_phase = None if debug_info is not None: debug_info[phase] = {} debug_phase = debug_info[phase] assoc_tmid = assoc_tmids[i_phase] if assoc_tmid is not None: if debug_phase is not None: tmnodes = sg.get_template_nodes(-assoc_tmid, wn.sta, "UA", wn.band, wn.chan) ualps = dict([(param, n.log_p()) for (param, (k,n)) in tmnodes.items()]) associate_template(sg, wn, assoc_tmid, eid, phase, create_phase_arrival=True) replicate_fns.append(lambda sg=sg,wn=wn,phase=phase,eid=eid,tmid=assoc_tmid: \ associate_template(sg, wn, tmid, eid, phase, create_phase_arrival=True)) if debug_phase is not None: tmnodes = sg.get_template_nodes(eid, wn.sta, phase, wn.band, wn.chan) for (param, (k, n)) in tmnodes.items(): if n.deterministic(): continue evlp = n.log_p() uaparam = param if param == "amp_transfer": uaparam = "coda_height" elif param == "tt_residual": uaparam = "arrival_time" ualp = ualps[uaparam] if uaparam=="arrival_time": ualp = np.log(sg.uatemplate_rate) v = n.get_value(key=k) debug_phase[param] = (v, ualp, evlp) else: # add random variables for this phase to the graph (initially set to dummy values) site = Sigvisa().get_array_site(wn.sta) tg = sg.template_generator(phase) f1 = lambda tg=tg, site=site, phase=phase, eid=eid : \ sg.add_event_site_phase(tg, site, phase, sg.evnodes[eid], sample_templates=False) f1() replicate_fns.append(f1) # now propose values for this template fr_phase = None if fix_result is not None: fr_phase = fix_result[phase] tmvals, tmpl_lp = clean_propose_phase_template(sg, wn, eid, phase=phase, fix_result=fr_phase, debug_info=debug_phase) # to replay this move, we just need to reset the proposed values tmvals = copy.deepcopy(tmvals) print wn.label, phase, "amp_transfer", tmvals["amp_transfer"] def rf(eid=eid, wn=wn, tmvals=tmvals, phase=phase): sg.set_template(eid, wn.sta, phase, wn.band, wn.chan, tmvals) replicate_fns.append(rf) log_qforward += tmpl_lp birth_record[(phase)] = (assoc_tmid is not None, assoc_tmid, None) def replicate_fn(): for fn in replicate_fns: fn() return log_qforward, replicate_fn, birth_record def ev_sta_template_death_helper(sg, wn, eid, fix_result=None, debug_info=None): death_record = {} s = Sigvisa() site = s.get_array_site(wn.sta) sta = wn.sta replicate_fns = [] assoc_tmids = {} log_qforward = 0.0 site_phases = sg.ev_arriving_phases(eid, sta) for phase in site_phases: if fix_result is not None: deassociate, tmid, fixed_tmvals = fix_result[phase] else: deassociate = None tmid = None fixed_tmvals = None deassociate, deassociate_logprob = sample_deassociation_proposal(sg, wn, eid, phase, fix_result = deassociate) log_qforward += deassociate_logprob if deassociate: tmid = unassociate_template(sg, wn, eid, phase, tmid=tmid) replicate_fns.append(lambda wn=wn,phase=phase,eid=eid,tmid=tmid: \ unassociate_template(sg, wn, eid, phase, tmid=tmid)) print "proposing to deassociate %s at %s (lp %.1f)" % (phase, sta, deassociate_logprob) assoc_tmids[phase] = tmid else: if fix_result is None: template_param_array = sg.get_template_vals(eid, wn.sta, phase, wn.band, wn.chan) sg.delete_event_phase(eid, wn.sta, phase) replicate_fns.append(lambda eid=eid, sta=wn.sta, phase=phase: sg.delete_event_phase(eid, sta, phase)) print "proposing to delete %s at %s (lp %f)"% (phase, sta, deassociate_logprob) death_record[phase] = template_param_array sorted_tmids = [assoc_tmids[phase] if phase in assoc_tmids else None for phase in sorted(site_phases)] death_record["assoc_tmids"] = tuple(sorted_tmids) def replicate_move(): for fn in replicate_fns: fn() return log_qforward, replicate_move, death_record def ev_template_birth_helper(sg, eid, fix_result=None, associate_using_mb=True, debug_info=None): death_records = {} replicate_fns = [] log_qforward = 0 proposed_ev = sg.get_event(eid) for site,elements in sg.site_elements.items(): site_phases = sorted(sg.predict_phases_site(proposed_ev, site=site)) for sta in elements: debug_info_sta = None if fix_result is None and debug_info is not None: if sta not in debug_info: debug_info[sta] = dict() debug_info_sta = debug_info[sta] debug_info[sta]["wn_old"] = np.sum([wn.log_p() for wn in sg.station_waves[sta]]) for wn in sg.station_waves[sta]: fr_sta = None if fix_result is not None: fr_sta = fix_result[wn.label] lqf_sta, replicate_sta, death_sta = ev_sta_template_birth_helper(sg, wn, eid=eid, site_phases=site_phases, fix_result=fr_sta, associate_using_mb=associate_using_mb, debug_info=debug_info_sta) log_qforward += lqf_sta replicate_fns.append(replicate_sta) death_records[wn.label] = death_sta if debug_info_sta is not None: debug_info[sta]["wn_new"] = np.sum([wn.log_p() for wn in sg.station_waves[sta]]) def replicate_move(): for fn in replicate_fns: fn() return log_qforward, replicate_move, death_records def ev_template_death_helper(sg, eid, fix_result=None): birth_records = {} replicate_fns = [] log_qforward = 0.0 for site,elements in sg.site_elements.items(): for sta in elements: for wn in sg.station_waves[sta]: fr_sta = None if fix_result is not None: fr_sta = fix_result[wn.label] lqf_sta, replicate_sta, birth_sta = ev_sta_template_death_helper(sg, wn, eid, fix_result=fr_sta) replicate_fns.append(replicate_sta) log_qforward += lqf_sta birth_records[wn.label] = birth_sta def replicate_move(): for fn in replicate_fns: fn() return log_qforward, replicate_move, birth_records def dummy_proposal(sg, fix_result=None): ev = Event(lon=-105.427, lat=43.731, depth=0.0, time=1239041017.07, mb=4.0) if fix_result is not None: return 0.0 return ev, 0.0, () def birth_move(sg, location_proposal, associate_using_mb=True): log_qforward = 0.0 log_qbackward = 0.0 lp_old = sg.current_log_p() debug_info = {} lq_loc, replicate_birth, eid = ev_birth_move(sg, location_proposal, debug_info=debug_info) log_qforward += lq_loc lqf, replicate_tmpls, death_records = ev_template_birth_helper(sg, eid, associate_using_mb=True, debug_info=debug_info) log_qforward += lqf sg._topo_sort() lp_new = sg.current_log_p() sg.current_log_p_breakdown() print [(n.label, n.get_value(), n.log_p()) for n in sg.all_nodes.values() if "amp_transfer" in n.label] print [(n.label, n.get_value()) for n in sg.all_nodes.values() if "coda_height" in n.label] lqb, replicate_untmpls, birth_records = ev_template_death_helper(sg, eid, fix_result=death_records) log_qbackward += lqb lq_death, replicate_death, ev = ev_death_move(sg, fix_result=eid) log_qbackward += lq_death sg._topo_sort() lp_old2 = sg.current_log_p() assert(lp_old2 == lp_old) def rebirth(): replicate_birth() replicate_tmpls() sg._topo_sort() def redeath(): replicate_untmpls() # maybe not technically necessary replicate_death() sg._topo_sort() return log_qforward, log_qbackward, rebirth, redeath, lp_new, lp_old, lp_old2, debug_info # + sg = setup_graph() ev = Event(lon=-105.427, lat=43.731, depth=0.0, time=1239041017.07, mb=4.0) lp_old = sg.current_log_p() log_qforward, log_qbackward, rebirth, redeath, lp_new, lp_old1, lp_old2, debug_info = birth_move(sg, dummy_proposal) #lp_new = sg.current_log_p() # - def prettyprint_debug(birth_debug): s = "" # track lp_new - (lp_old + log_qforward) overall_score = 0 ev, lq_ev, lps_ev = birth_debug["ev"] s += "proposed ev %s\n" % ev s += "proposal logq %.2f, lps " % lq_ev total_lp = 0 for (param, lp) in lps_ev: s += "%s %.2f, " % (param, lp) total_lp += lp delta = total_lp - lq_ev s += "total %.2f, delta %.2f\n" % (total_lp, delta) overall_score = delta for sta in birth_debug.keys(): if sta=="ev": continue wn_old, wn_new = birth_debug[sta]["wn_old"], birth_debug[sta]["wn_new"] wn_delta = wn_new - wn_old phase_assocs, assoc_lp, _ = birth_debug[sta]["assoc"] s += "sta %s wn_old %.2f wn_new %.2f wn_delta %.2f assoc %.2f overall STASCORE\n" % (sta, wn_old, wn_new, wn_delta, assoc_lp) sta_score = wn_delta - assoc_lp phase_assocs = dict(phase_assocs) for phase in birth_debug[sta].keys(): if "wn" in phase or phase=="assoc" : continue phase_score = 0 tmid = phase_assocs[phase] if tmid is not None: s += " phase %s: associating tmid %d, score PHASESCORE\n" % (phase, tmid) else: s += " phase %s: birthing new template, score PHASESCORE\n" % (phase) d = birth_debug[sta][phase] for param, (v, lp1, lp2) in sorted(d.items()): if param=="assoc": continue delta = lp2-lp1 if tmid is not None: s += " %s %.2f ualp %.2f evlp %.2f delta %.2f\n" % (param, v, lp1, lp2, delta) else: s += " %s %.2f logq %.2f logp %.2f delta %.2f\n" % (param, v, lp1, lp2, delta) phase_score += delta s = s.replace("PHASESCORE", "%.2f" % phase_score) sta_score += phase_score overall_score += sta_score s = s.replace("STASCORE", "%.2f" % sta_score) s += "final proposal score %.2f\n" % overall_score s += "REMEMBER: the acceptance ratio also includes the reverse proposal probability from the death move.\n" return s # + print lp_old, lp_old2, lp_new print log_qforward, log_qbackward print (lp_new + log_qbackward) - (lp_old + log_qforward) # - rebirth() print sg.current_log_p() redeath() print sg.current_log_p() # + def ev_death_helper_new(sg, eid, use_correlation=False, associate_using_mb=True, fix_result=None): # fix_result is a dict mapping (wn, phase) -> (disassociated, tmvals) ev = sg.get_event(eid) next_uatemplateid = sg.next_uatemplateid move_logprob = 0 reverse_logprob = 0 forward_fns = [] inverse_fns = [] inverse_fns.append(lambda : sg.add_event(ev, eid=eid)) tmids = [] tmid_i = 0 death_record = {} for elements in sg.site_elements.values(): for sta in elements: for wn in sg.station_waves[sta]: death_record[wn.label] = {} s = Sigvisa() site = s.get_array_site(sta) template_param_array = None site_phases = sg.ev_arriving_phases(eid, sta) assoc_tmids = {} for phase in sg.ev_arriving_phases(eid, sta): #if (eid, phase) not in wn.arrivals(): # continue if fix_result is not None: deassociate, fixed_tmvals = fix_result[(wn, phase)] else: deassociate = None fixed_tmvals = None deassociate, deassociate_logprob = sample_deassociation_proposal(sg, wn, eid, phase, fix_result = deassociate) move_logprob += deassociate_logprob if deassociate: tmid = unassociate_template(sg, wn, eid, phase) inverse_fns.append(lambda wn=wn,phase=phase,tmid=tmid: associate_template(sg, wn, tmid, eid, phase)) print "proposing to deassociate %s at %s (lp %.1f)" % (phase, sta, deassociate_logprob) assoc_tmids[phase] = tmid else: if fix_result is None: template_param_array = sg.get_template_vals(eid, wn.sta, phase, wn.band, wn.chan) sg.delete_event_phase(eid, wn.sta, phase) print "TODO: inverse of delete phase is create phase" inverse_fns.append(lambda wn=wn,phase=phase,template_param_array=template_param_array : sg.set_template(eid,wn.sta, phase, wn.band, wn.chan, template_param_array)) print "proposing to delete %s at %s (lp %f)"% (phase, sta, deassociate_logprob) death_record[wn.label][phase] = template_param_array sorted_tmids = [assoc_tmids[phase] if phase in assoc_tmids else None for phase in sorted(site_phases)] death_record[wn.label]["assoc_tmids"] = tuple(sorted_tmids) sg.remove_event(eid) sg._topo_sort() if fix_result is not None: return move_logprob reverse_logprob = joint_birth_helper(sg, ev, associate_using_mb=associate_using_mb, eid=eid, fix_result=death_record) def revert_move(): for fn in inverse_fns: fn() sg._topo_sort() sg.next_uatemplateid = next_uatemplateid return move_logprob, reverse_logprob, revert_move # - s = prettyprint_debug(debug_info) print s revert_move() lpo = sg.current_log_p() print lpo, lp_old f = plt.figure(figsize=(20, 5)) ax = f.add_subplot(111) wn.plot(ax) ax.set_xlim([1239041000.0, 1239041400.0]) ax.set_xlim([123904090.0, 123904590.0])
notebooks/fancy_template_joint_association_proposals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Django Shell-Plus # language: python # name: django_extensions # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Name" data-toc-modified-id="Name-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Name</a></span></li><li><span><a href="#Search" data-toc-modified-id="Search-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Search</a></span><ul class="toc-item"><li><span><a href="#Load-Cached-Results" data-toc-modified-id="Load-Cached-Results-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Load Cached Results</a></span></li><li><span><a href="#Build-Model-From-Google-Images" data-toc-modified-id="Build-Model-From-Google-Images-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Build Model From Google Images</a></span></li></ul></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Gender-cross-validation" data-toc-modified-id="Gender-cross-validation-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Gender cross validation</a></span></li><li><span><a href="#Face-Sizes" data-toc-modified-id="Face-Sizes-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Face Sizes</a></span></li><li><span><a href="#Screen-Time-Across-All-Shows" data-toc-modified-id="Screen-Time-Across-All-Shows-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Screen Time Across All Shows</a></span></li><li><span><a href="#Appearances-on-a-Single-Show" data-toc-modified-id="Appearances-on-a-Single-Show-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Appearances on a Single Show</a></span></li><li><span><a href="#Other-People-Who-Are-On-Screen" data-toc-modified-id="Other-People-Who-Are-On-Screen-3.5"><span class="toc-item-num">3.5&nbsp;&nbsp;</span>Other People Who Are On Screen</a></span></li></ul></li><li><span><a href="#Persist-to-Cloud" data-toc-modified-id="Persist-to-Cloud-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Persist to Cloud</a></span><ul class="toc-item"><li><span><a href="#Save-Model-to-Google-Cloud-Storage" data-toc-modified-id="Save-Model-to-Google-Cloud-Storage-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Save Model to Google Cloud Storage</a></span></li><li><span><a href="#Save-Labels-to-DB" data-toc-modified-id="Save-Labels-to-DB-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Save Labels to DB</a></span><ul class="toc-item"><li><span><a href="#Commit-the-person-and-labeler" data-toc-modified-id="Commit-the-person-and-labeler-4.2.1"><span class="toc-item-num">4.2.1&nbsp;&nbsp;</span>Commit the person and labeler</a></span></li><li><span><a href="#Commit-the-FaceIdentity-labels" data-toc-modified-id="Commit-the-FaceIdentity-labels-4.2.2"><span class="toc-item-num">4.2.2&nbsp;&nbsp;</span>Commit the FaceIdentity labels</a></span></li></ul></li></ul></li></ul></div> # - from esper.prelude import * from esper.identity import * from esper import embed_google_images # # Name # Please add the person's name and their expected gender below (Male/Female). name = '<NAME>' gender = 'Male' # # Search # ## Load Cached Results # Reads cached identity model from local disk. Run this if the person has been labelled before and you only wish to regenerate the graphs. Otherwise, if you have never created a model for this person, please see the next section. assert name != '' results = FaceIdentityModel.load(name=name) imshow(tile_imgs([cv2.resize(x[1][0], (200, 200)) for x in results.model_params['images']], cols=10)) plt.show() plot_precision_and_cdf(results) # ## Build Model From Google Images # Run this section if you do not have a cached model and precision curve estimates. This section will grab images using Google Image Search and score each of the faces in the dataset. We will interactively build the precision vs score curve. # # It is important that the images that you select are accurate. If you make a mistake, rerun the cell below. # + assert name != '' # Grab face images from Google img_dir = embed_google_images.fetch_images(name) # If the images returned are not satisfactory, rerun the above with extra params: # query_extras='' # additional keywords to add to search # force=True # ignore cached images face_imgs = load_and_select_faces_from_images(img_dir) face_embs = embed_google_images.embed_images(face_imgs) assert(len(face_embs) == len(face_imgs)) reference_imgs = tile_imgs([cv2.resize(x[0], (200, 200)) for x in face_imgs if x], cols=10) def show_reference_imgs(): print('User selected reference images for {}.'.format(name)) imshow(reference_imgs) plt.show() show_reference_imgs() # - # Score all of the faces in the dataset (this can take a minute) face_ids_by_bucket, face_ids_to_score = face_search_by_embeddings(face_embs) precision_model = PrecisionModel(face_ids_by_bucket) # Now we will validate which of the images in the dataset are of the target identity. # # __Hover over with mouse and press S to select a face. Press F to expand the frame.__ show_reference_imgs() print(('Mark all images that ARE NOT {}. Thumbnails are ordered by DESCENDING distance ' 'to your selected images. (The first page is more likely to have non "{}" images.) ' 'There are a total of {} frames. (CLICK THE DISABLE JUPYTER KEYBOARD BUTTON ' 'BEFORE PROCEEDING.)').format( name, name, precision_model.get_lower_count())) lower_widget = precision_model.get_lower_widget() lower_widget show_reference_imgs() print(('Mark all images that ARE {}. Thumbnails are ordered by ASCENDING distance ' 'to your selected images. (The first page is more likely to have "{}" images.) ' 'There are a total of {} frames. (CLICK THE DISABLE JUPYTER KEYBOARD BUTTON ' 'BEFORE PROCEEDING.)').format( name, name, precision_model.get_lower_count())) upper_widget = precision_model.get_upper_widget() upper_widget # Run the following cell after labelling to compute the precision curve. Do not forget to re-enable jupyter shortcuts. # + # Compute the precision from the selections lower_precision = precision_model.compute_precision_for_lower_buckets(lower_widget.selected) upper_precision = precision_model.compute_precision_for_upper_buckets(upper_widget.selected) precision_by_bucket = {**lower_precision, **upper_precision} results = FaceIdentityModel( name=name, face_ids_by_bucket=face_ids_by_bucket, face_ids_to_score=face_ids_to_score, precision_by_bucket=precision_by_bucket, model_params={ 'images': list(zip(face_embs, face_imgs)) } ) plot_precision_and_cdf(results) # - # The next cell persists the model locally. results.save() # # Analysis # ## Gender cross validation # # Situations where the identity model disagrees with the gender classifier may be cause for alarm. We would like to check that instances of the person have the expected gender as a sanity check. This section shows the breakdown of the identity instances and their labels from the gender classifier. # + gender_breakdown = compute_gender_breakdown(results) print('Expected counts by gender:') for k, v in gender_breakdown.items(): print(' {} : {}'.format(k, int(v))) print() print('Percentage by gender:') denominator = sum(v for v in gender_breakdown.values()) for k, v in gender_breakdown.items(): print(' {} : {:0.1f}%'.format(k, 100 * v / denominator)) print() # - # Situations where the identity detector returns high confidence, but where the gender is not the expected gender indicate either an error on the part of the identity detector or the gender detector. The following visualization shows randomly sampled images, where the identity detector returns high confidence, grouped by the gender label. high_probability_threshold = 0.8 show_gender_examples(results, high_probability_threshold) # ## Face Sizes # # Faces shown on-screen vary in size. For a person such as a host, they may be shown in a full body shot or as a face in a box. Faces in the background or those part of side graphics might be smaller than the rest. When calculuating screentime for a person, we would like to know whether the results represent the time the person was featured as opposed to merely in the background or as a tiny thumbnail in some graphic. # # The next cell, plots the distribution of face sizes. Some possible anomalies include there only being very small faces or large faces. plot_histogram_of_face_sizes(results) # The histogram above shows the distribution of face sizes, but not how those sizes occur in the dataset. For instance, one might ask why some faces are so large or whhether the small faces are actually errors. The following cell groups example faces, which are of the target identity with probability, by their sizes in terms of screen area. high_probability_threshold = 0.8 show_faces_by_size(results, high_probability_threshold, n=10) # ## Screen Time Across All Shows # # One question that we might ask about a person is whether they received a significantly different amount of screentime on different shows. The following section visualizes the amount of screentime by show in total minutes and also in proportion of the show's total time. For a celebrity or political figure such as <NAME>, we would expect significant screentime on many shows. For a show host such as <NAME>, we expect that the screentime be high for shows hosted by Wolf Blitzer. screen_time_by_show = get_screen_time_by_show(results) plot_screen_time_by_show(name, screen_time_by_show) # We might also wish to validate these findings by comparing to the whether the person's name is mentioned in the subtitles. This might be helpful in determining whether extra or lack of screentime for a person may be due to a show's aesthetic choices. The following plots show compare the screen time with the number of caption mentions. caption_mentions_by_show = get_caption_mentions_by_show([name.upper()]) plot_screen_time_and_other_by_show(name, screen_time_by_show, caption_mentions_by_show, 'Number of caption mentions', 'Count') # ## Appearances on a Single Show # # For people such as hosts, we would like to examine in greater detail the screen time allotted for a single show. First, fill in a show below. show_name = 'The 11th Hour With <NAME>' # Compute the screen time for each video of the show screen_time_by_video_id = compute_screen_time_by_video(results, show_name) # One question we might ask about a host is "how long they are show on screen" for an episode. Likewise, we might also ask for how many episodes is the host not present due to being on vacation or on assignment elsewhere. The following cell plots a histogram of the distribution of the length of the person's appearances in videos of the chosen show. plot_histogram_of_screen_times_by_video(name, show_name, screen_time_by_video_id) # For a host, we expect screentime over time to be consistent as long as the person remains a host. For figures such as <NAME>, we expect the screentime to track events in the real world such as the lead-up to 2016 election and then to drop afterwards. The following cell plots a time series of the person's screentime over time. Each dot is a video of the chosen show. Red Xs are videos for which the face detector did not run. plot_screentime_over_time(name, show_name, screen_time_by_video_id) # We hypothesized that a host is more likely to appear at the beginning of a video and then also appear throughout the video. The following plot visualizes the distibution of shot beginning times for videos of the show. plot_distribution_of_appearance_times_by_video(results, show_name) # In the section 3.3, we see that some shows may have much larger variance in the screen time estimates than others. This may be because a host or frequent guest appears similar to the target identity. Alternatively, the images of the identity may be consistently low quality, leading to lower scores. The next cell plots a histogram of the probabilites for for faces in a show. plot_distribution_of_identity_probabilities(results, show_name) # ## Other People Who Are On Screen # # For some people, we are interested in who they are often portrayed on screen with. For instance, the White House press secretary might routinely be shown with the same group of political pundits. A host of a show, might be expected to be on screen with their co-host most of the time. The next cell takes an identity model with high probability faces and displays clusters of faces that are on screen with the target person. get_other_people_who_are_on_screen(results, k=25, precision_thresh=0.8) # # Persist to Cloud # # The remaining code in this notebook uploads the built identity model to Google Cloud Storage and adds the FaceIdentity labels to the database. # ## Save Model to Google Cloud Storage gcs_model_path = results.save_to_gcs() # To ensure that the model stored to Google Cloud is valid, we load it and print the precision and cdf curve below. gcs_results = FaceIdentityModel.load_from_gcs(name=name) imshow(tile_imgs([cv2.resize(x[1][0], (200, 200)) for x in gcs_results.model_params['images']], cols=10)) plt.show() plot_precision_and_cdf(gcs_results) # ## Save Labels to DB # # If you are satisfied with the model, we can commit the labels to the database. # + from django.core.exceptions import ObjectDoesNotExist def standardize_name(name): return name.lower() person_type = ThingType.objects.get(name='person') try: person = Thing.objects.get(name=standardize_name(name), type=person_type) print('Found person:', person.name) except ObjectDoesNotExist: person = Thing(name=standardize_name(name), type=person_type) print('Creating person:', person.name) labeler = Labeler(name='face-identity:{}'.format(person.name), data_path=gcs_model_path) # - # ### Commit the person and labeler # # The labeler and person have been created but not set saved to the database. If a person was created, please make sure that the name is correct before saving. person.save() labeler.save() # ### Commit the FaceIdentity labels # # Now, we are ready to add the labels to the database. We will create a FaceIdentity for each face whose probability exceeds the minimum threshold. commit_face_identities_to_db(results, person, labeler, min_threshold=0.001) print('Committed {} labels to the db'.format(FaceIdentity.objects.filter(labeler=labeler).count()))
app/notebooks/labeled_identities/robert_mueller.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MIMIC Notes Pre-Processing # # Pre-processing MIMIC notes for further use. # Below is a list of redacted items with an example and the replacement token. Replacement tokens are changeable. Check `preprocess_notes.py` for more details. # # Redacted items: # * [x] First Name: `[**First Name (Titles) 137**]`, `xxname` # * [x] Last Name: `[**Last Name (Titles) **]`, `xxln` # * [x] Initials: `[**Initials (NamePattern4) **]`, `xxinit` # * [x] Name: `[**Name (NI) **]`, `xxname` # * [x] Doctor First Name: `[**Doctor First Name 1266**]`, `xxdocfn` # * [x] Doctor Last Name: `[**Doctor Last Name 1266**]`, `xxdocln` # * [x] Known Last Name: `[**Known lastname 658**]`, `xxln` # * [x] Hospital: `[**Hospital1 **]`, `xxhosp` # * [x] Hospital Unit Name: `**Hospital Unit Name 10**`, `xxhosp` # * [x] Company: `[**Company 12924**]`, `xxwork` # * [x] University/College: `[**University/College **]`, `xxwork` # * [x] Date of format YYYY-M-DD: `[**2112-4-18**]`, `xxdate` # * [x] Year: `[**Year (4 digits) **]`, `xxyear` # * [x] Year YYYY format: `[**2119**]`, `xxyear` - I use a regex `\b\d{4}\b` that will match **any** 4 digits which might be problematic, but for the most part 4 digits by itself seems to indicate a year. # * [x] Date of format M-DD: `[**6-12**]`, `[**12/2151**]`, `xxmmdd` # * [x] Month/Day: `[**Month/Day (2) 509**]`, `xxmmdd` # * [x] Month (only): `[**Month (only) 51**]`, `xxmonth` # * [x] Holiday: `[**Holiday 3470**]`, `xxhols` # * [x] Date Range: `[**Date range (1) 7610**]`, `xxdtrnge` # * [x] Country: `[**Country 9958**]`, `xxcntry` # * [x] State: `[**State 3283**]`, `xxstate` # * [x] Location: `**Location (un) 2432**`, `xxloc` # * [x] Telephone/Fax: `[**Telephone/Fax (3) 8049**]`, `xxph` # * [x] Clip Number: `[**Clip Number (Radiology) 29923**]`, `xxradclip` # * [x] Pager Numeric Identifier: `[**Numeric Identifier 6403**]`, `xxpager` # * [x] Pager Number: `[**Pager number 13866**]`, `xxpager` # * [x] Social Security Number: `[**Security Number 10198**]`, `xxssn` # * [x] Serial Number: `[**Serial Number 3567**]`, `xxsno` # * [x] Medical Record Number: `[**Medical Record Number **]`, `xxmrno` # * [x] Provider Number: `[**Provider Number 12521**]`, `xxpno` # * [x] Age over 90: `[**Age over 90 **]`, `xxage90` # * [x] Contact Info: `[**Contact Info **]`, `xxcontact` # * [x] Job Number: `[**Job Number **]`, `xxjobno` # * [x] Dictator Number: `[**Dictator Info **]`, `xxdict` # * [x] Pharmacy MD Number/MD number: `[**Pharmacy MD Number **]`, `xxmdno` # * [x] Time: `12:52 PM`, split into 6 segments by the hour and replace with the following tokens: `midnight, dawn, forenoon, afternoon, dusk, night` # * 2-digit Numbers: `[** 84 **]`, `xx2digit` # * 3-digit Numbers: `[** 834 **]`, `xx3digit` # * Wardname # `886` notes are marked incorrect with `iserror` flag set to 1. Thus, there are total of `2,082,294` notes. I have set up a `view` called `correctnotes` in the database, which only includese the correct notes. All the data I grab is from that `view`. # ## Imports and Inits import pandas as pd import psycopg2 import numpy as np import re import random import datetime from pathlib import Path import pickle import numpy as np # Softlink `ln -s` your data path to a `data` variable in the current folder. That way we don't need to change the path in the notebook. PATH = Path('data') # !ls {PATH} from preprocess_notes import * # ## Grab Data from MIMIC # ### From Database # Here the data is grabbed from the MIMIC database. Data can also be grabbed from other sources # + hidden=true # %%time cats = pd.read_csv('note_categories.csv') max_limit = -1 queries = [] for category, n_notes in zip(cats['category'], cats['number_of_notes']): limit = min(max_limit, n_notes) if max_limit > 0 else n_notes if limit == max_limit: q = f""" select * from correctnotes where category=\'{category}\' order by random() limit {limit}; """ else: q = f""" select * from correctnotes where category=\'{category}\'; """ queries.append(q) dfs = [] con = psycopg2.connect(dbname='mimic', user='sudarshan', host='/var/run/postgresql') for q in queries: df = pd.read_sql_query(q, con) dfs.append(df) con.close() df = pd.concat(dfs) print(df.shape) # - # ### From Notes File # %%time df = pd.read_csv(PATH/'NOTEEVENTS.csv.gz') print(df.shape) # ## Preprocess df.columns = map(str.lower, df.columns) df.set_index('row_id', inplace=True) print(df.shape) # Confirm that the number of notes match the actual number. df[['category', 'text']].groupby(['category']).agg(['count']) # + # %%time df['proc_text'] = df['text'].apply(preprocess_note) with open(PATH/'preprocessed_noteevents.pkl', 'wb') as f: pickle.dump(df, f) # - # ## Create datasets for Language Modeling # To follow the FastAI language modeling lesson, I've created a subset of the original dataframe to sample for the datasets. In particular, I've included the `description` and `preprocessed_text` fields in the datasets. The `description` column is composed of free-text and has `3840` unique descriptions. I consider the description as a unique `field` which will be marked as such during tokenization as done in the FastAI library. sub_df = pd.DataFrame({'proc_text': df['proc_text'], 'category': df['category'], 'description': df['description'], 'labels': [0]*len(df)},\ columns=['labels', 'category', 'description', 'proc_text']) sub_df.sample(5) # Now we can just do a train/test split on the entire dataset for getting a 90/10 training and testing dataset. However, I would like the train/test set have a 90%/10% split in **each category**. So I chose to iterate over each entry of the `category` column and create masks to split data with a 90/10 split for training and testing so that I grab 10% of texts in each category for testing instead of a global 10%. # Set random seed for reproducible results. # + # %%time np.random.seed(42) dfs = [sub_df.loc[df['category'] == c] for c in sub_df['category'].unique()] msks = [np.random.rand(len(d)) < 0.9 for d in dfs] train_dfs = [None] * len(dfs) val_dfs = [None] * len(dfs) for i in range(len(dfs)): idf = dfs[i] mask = msks[i] train_dfs[i] = idf[mask] val_dfs[i] = idf[~mask] train_df = pd.concat(train_dfs) val_df = pd.concat(val_dfs) print(len(train_df), (len(df) - len(df)//10), len(train_df)-(len(df) - len(df)//10)) print(len(val_df), (len(df)//10), len(val_df)-(len(df)//10)) # - # Sanity check the aggregate count for each category over the 3 dataframes. Then write the `train` and `val` dataframes to disk. val_df[['category', 'proc_text']].groupby(['category']).agg(['count']) train_df[['category', 'proc_text']].groupby(['category']).agg(['count']) sub_df[['category', 'proc_text']].groupby(['category']).agg(['count']) # %%time train_df[['labels', 'description', 'proc_text']].to_csv(PATH/'train.csv', header=False, index=False) val_df[['labels', 'description', 'proc_text']].to_csv(PATH/'test.csv', header=False, index=False)
mimic-notes-preprocess.ipynb