code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # BEAST Workflow Example # # In this notebook we will be walking through a standard BEAST workflow example using some data from M31. # # You'll need a couple of datafiles to get started though. Please visit https://www.dropbox.com/sh/91aefrp9gzdc9z0/AAC9Gc4KIRIB520g6a0uLLama?dl=0 and download all the files (can omit wrangling_data.ipynb) into the same folder this Jupyter Notebook is in. # # Before we do anything, we have to import the following packages. This seems like a lot but they are all here to make our lives easier down the line. And running them all as the first cell means that if our kernel ever crashes halfway through, we can just reimport everything at once rather than stepping through the cells individually. # + import h5py import numpy as np from astropy import wcs from astropy.io import fits from astropy.table import Table #import tables import glob import os import types import argparse import matplotlib.pyplot as plt from beast.plotting import (plot_mag_hist, plot_ast_histogram, plot_noisemodel) from beast.tools.run import ( create_physicsmodel, make_ast_inputs, create_obsmodel, run_fitting, merge_files, create_filenames, ) from beast.physicsmodel.grid import FileSEDGrid from beast.fitting import trim_grid import beast.observationmodel.noisemodel.generic_noisemodel as noisemodel from beast.tools.run import ( run_fitting, merge_files, ) from beast.tools import ( create_background_density_map, split_ast_input_file, split_catalog_using_map, # subdivide_obscat_by_source_density, cut_catalogs, # split_asts_by_source_density, setup_batch_beast_trim, # setup_batch_beast_fit, ) import importlib # - # # Step -1. Obtain data file and convert to fits file # # Sometimes photometric catalogs are delivered as HDF5 files. While these are great for storing data in heirarchies, it's a little hard to work with directly, so we have to convert our HDF5 file to a FITS file. # # Thankfully, our photometric catalog for this example is already in a FITS format so we don't need to worry about this and can move straight on to Step 1. # # Step 1a. Make magnitude histograms # # The first thing we need to do is understand the range of stellar magnitudes we are working with in this data set. # # To do this we can make histograms of all the magnitudes of all the stars in all the different filters from the photometric catalog. This is done so that we know where the peaks of the histograms are in each filter. These peaks will then be used later when we make source density maps. # # Essentially what happens is that, for the density maps, we only count objects within a certain range, currently set to mag_cut = 15 - (peak_for_filter-0.5). So if the peak was 17.5, then the objects that would be counted would have to be in the range between 15 and 18. # # The reason we only count brighter sources is because dimmer sources tend to not be properly observed, especially as the magnitudes near the telescope limit. There will always be far more dim sources than bright sources, but if we know how many bright sources there are, then we can extrapolate as to how many dim sources there should be, and probably get a better understand from that than if we were to try and actually count all the dim sources we detect. # # **Variable Information** # # * **field_name** : the string name of the main photometric catalog we are working with. This variable will be used to rename a lot of different files in the future which is why we have it as a separate variable. # * **gst_file** : stands for good-stars, this is the full name for the original photometric catalog we are working with. field_name = "M31-B09-EAST_chunk" gst_file = "./%s.st.fits" %field_name # We can see what type of data this fits file holds by making a table. There should be around 50,000 sources in this calalog, which is quite small compared to the original file. # # *Note: **st** stands for stars. We also sometimes name things **gst** for good stars to signify when cuts have been made.* hdul = fits.open(gst_file) Table(hdul[1].data) # As we can see, there's a lot of columns and even more rows. For plotting the magnitude histograms, we're going to be interested in any column that contains the name VEGA. These are the columns with the magnitudes for each filter. # # We could also use the X and Y columns to plot where are the sources are located, or the RA and DEC to map their actual position in the sky. # # In larger projects we might have multiple fields to analyze during each run, so there would be multiple **field_names**. Since this is just a small example, we just have one field so our index will always be equal to **0**. # the list of fields (we only have 1 for this example.) field_names = [field_name] # Now we can create some histogram plots to visualize the magnitude distribution of our sources. # this 'if' statement just checks if there's already a histogram file if not os.path.isfile('./'+field_names[0]+'.st_maghist.pdf'): peak_mags = plot_mag_hist.plot_mag_hist(gst_file, stars_per_bin=70, max_bins=75) # You can check out the results for the histograms in the file ending with **_maghist.pdf** # # From this plot, we can also see what filters exist for the data. # # Step 1b. Make source density maps # Next we'll be creating source density maps. These are maps of our data field colored such that they show how many stars/sources there are in each degree field. The standard size is 5 arc seconds squared. The size can easily be changed by modifying the **pixsize** variable below. # + # Pick the filter with the dimmest peak from the histogram ref_filter =["F475W"] # choose a filter to use for removing artifacts # (remove catalog sources with filter_FLAG > 99) flag_filter = ["F275W"] # - # check to see if the sourde density file already exists if not os.path.isfile(gst_file.replace(".fits", "_source_den_image.fits")): # if not, run all this other code # - pixel size of 5 arcsec # - use ref_filter[b] between vega mags of 15 and peak_mags[ref_filter[b]]-0.5 # since we're only working with one field, our index b is set to 0 sourceden_args = types.SimpleNamespace( subcommand="sourceden", catfile=gst_file, pixsize=5, npix=None, mag_name=ref_filter[0]+ "_VEGA", mag_cut=[17, peak_mags[ref_filter[0]] - 0.5], flag_name=flag_filter[0]+'_FLAG', ) create_background_density_map.main_make_map(sourceden_args) # new file name with the source density column gst_file_sd = gst_file.replace(".fits", "_with_sourceden.fits") # This function should create 3 files: # * *M31-B09-EAST_subset.st_source_den_image.fits* : a file for viewing the source density information in ds9 or with matplotlib # # * *M31-B09-EAST_subset.st_sourceden_map.hd5* : the same file as source_den_image but now with even more data (the split_catalog_using_map function will end up using this file later on) # # * *M31-B09-EAST_subset.st_with_sourceden.fits* : the same as the original photometric file (gst_file) but now with an additional column for what density bin the source is located in # ### View the fits images of the source density maps # # Now that we have the source density maps outputted, we can plot the image and see that the density looks like. # + # open the fits file hdu_list = fits.open("./%s.st_source_den_image.fits"%field_name) hdu_list.info() # extract the image data image_data = hdu_list[0].data # take a look at what the image should look like print(type(image_data)) print(image_data.shape) # close the fits file hdu_list.close() # - # plot the extracted image data fig = plt.figure(0, [10,10]) im = plt.imshow(image_data, origin="lower") plt.colorbar(im) plt.xlabel("Pixel (originally RA)") plt.ylabel("Pixel (originally DEC)") plt.title("Density of Sources per 5 arcsec^2") # # Step 1c. Set up datamodel file # # At this point, we have a basic understanding of the information we are working with, so it's about time we set up our datamodel file. # # The datamodel file is a sort of catch-all file used to store any sort of infomation we might need to run the BEAST code on our data. We'll go through and talk about what all the different variables mean, and which ones would need to be changed for any future projects. # # Go ahead and open the datamodel.py file in a text editor now and ensure that the following variables match: # # * **project** : the same as the field_name variable we noted earlier # * *project = "M31-B09-EAST_chunk" * # * **surveyname** : the overall name for the survey (this variable isn't actually important for the code) # * *surveyname = "PHAT-M31"* # * **filters** : the full filter names from the photometric catalog, also the names that show up in our magnitude histograms so you can add them from there # * *filters = ["HST_WFC3_F475W", "HST_WFC3_F275W", "HST_WFC3_F336W", "HST_WFC3_F814W", "HST_WFC3_F110W", "HST_WFC3_F160W",]* # # * **base filters** : shortened versions of the filter names # * *basefilters = ["F475W", "F275W", "F336W", "F814W", "F110W", "F160W"]* # * **obsfile** : the name of the photometric catalog (now including the source density information # * *obsfile = "./M31-B09-EAST_chunk.st_with_sourceden_cut.fits"* # # * **ast_with_positions** : make sure is set to *True* if you have the locations included in your obsfile # # * **ast_density_table** : the source density map created in step 1b # * *ast_density_table = './M31-B09-EAST_chunk.st_sourceden_map.hd5'* # # * **ast_reference_image** : the original photometric FITS catalog which is required if you use the ast_with_positions as true # * *ast_reference_image = "./M31-B09-EAST_chunk_F475W_drz.chip1.fits"* # # * **astfile** : the file of ASTs we will be creating in step 3, but since ASTs normally have to be processed by a specialist, we have already included a finished AST file for us to use in this example # * *astfile = "M31-B09_EAST_chunk.gst.fake.fits"* # # * **n_subgrid** : the number of subgrids to use for generating the physics model later on (with 1 meaning no subgrids) # * *n_subgrid = 1* # This file is also where you specify the parameters and resolution of your physics model which will become relevant in step 2. The resolution of these parameters for your own runs will differ depending on what sorts of ASTs you want to model. There are 8 parameters that can be set. # # 1. **Distance** : either a fixed value or a range with stepsizes # 2. **Velocity** : what is the heliocentric velocity of your location or galaxy in km/s # 3. **Age** : the log10 age range of the ASTs being modeled # 4. **Mass** : the mass of the ASTs # 5. **Metallicity** : the metallicity range of the ASTs # # 6. **A(v)** : the range of dust extinction in magnitudes that could be dimming the intrinsic brightness of the ASTs # 7. **R(v)** : the range of dust grain sizes # 8. **f(A)** the mixture factor between the Milky Way and Small Magellanic Cloud extinction curves # # + import datamodel importlib.reload(datamodel) # - # Our goal after this would normally be to eventually run a bunch of **ASTs** (Artificial Star Tests), but before we can do that, we need to generate the fake stars to use. # # Since the ASTs would normally need to be analyzed by a specialist after being created and that's a little overkill for a small example, these next couple of steps are just to illustrate how the ASTs are actually generated. A finished file of the analyzed ASTs already exists so we will end up using that in step 4 and beyond. # # # # Step 2. Create physics model # # In order to generate a diverse and representative sample of fake stars to use for our ASTs, we need to set up a N-dimensional model of possible stellar parameters, so that we can easily and randomly select stars from the model. # # This model is called a **physics model**, and we will be using the parameters set in the datamodel.py file to create this N-dimensional grid. # # *As a quick note, the resolution on the stellar parameters (the step size, often specified as the third input e.g. logt = [6.0, 10.13, 1.0], where 1.0 is the step size) is the main factor driving how long this physics grid will take to set up. If things take a very long time to run, consider making the step size larger for testing's sake.* # # Sometimes we are able to have access to high-performance computing resources, meaning we can split the physics model into subgrids and run them in parallel, cutting a lot of the computation time. While we're like not running this notebook in parallel here, we've still specified a number of subgrids in the datamodel.py file. # # We can check how many subgrids are set up. datamodel.n_subgrid # So we can now see that we've asked for 1 grid in the datamodel.py file. # # If we've already generated a physics model, we certainly don't want to run it again, so the following code checks to make sure all the subgrids for the physics model are present. # + # set up the naming conventions for the physics model gs_str = "" # this is only relevant if we run with multiple subgrids if datamodel.n_subgrid > 1: gs_str = "sub*" # collects any physics models that have already been created # if none have, sed_files will be empty sed_files = glob.glob( "./{0}/{0}_seds.grid{1}.hd5".format(field_names[0], gs_str) ) # - # only make the physics model they don't already exist if len(sed_files) < datamodel.n_subgrid: # directly create physics model grids create_physicsmodel.create_physicsmodel(nprocs=1, nsubs=datamodel.n_subgrid) # list of SED files (physics models) model_grid_files = sorted( glob.glob( "./{0}/{0}_seds.grid{1}.hd5".format(field_names[0], gs_str) ) ) sed_files = model_grid_files # Hopefully a spectral grid and an SED grid should have started generating. In the end you should have a new folder with the same name as your project, with a one SED and spectral grid if you have only 1 subgrid. # # Step 3. Create Input ASTs! # # Now that we have our physics model generated, we can start to generate some input ASTs. ASTs are artificial sources inserted into the observations we have, which are then extracted with the same software that was used for the original photometry catalog. So the step that we're running now is just generating the artifical sources that will then later be inserted. # # We need to make sure that the ASTs cover the same range of magnitudes as our original photometric catalog does, so to do that # # # First thing's first, we're gonna check that there isn't already a file of AST inputs present in the folder we're working in. # + # only create an AST input list if the ASTs don't already exist ast_input_file = ("./{0}/{0}_inputAST.txt".format(field_names[0])) ast_input_file # - # Now we can create the ASTs if they don't already exist. # # The way that if not os.path.isfile(ast_input_file): make_ast_inputs.make_ast_inputs(flux_bin_method=True) ast = Table.read(ast_input_file, format="ascii") ast 33418/6 # ### Check to see how the SEDs and the ASTs compare # # The histogram that is produced should have both the SED distribution and the AST distribution plotted on it. The thing we want to test for is whether the AST distribution fully samples the SED range. plot_ast_histogram.plot_ast(ast_file = ast_input_file, sed_grid_file = model_grid_files[0]) # # Step 4. Edit/Split the Catalog # We have to remove sources from the input photometry catalog that are in regions without full imaging coverage or flagged as bad in flag_filter. This step should mostly just be removing any sources where one of the filters might not have a value. # + gst_file_cut = gst_file.replace(".fits", "_with_sourceden_cut.fits") # check to see if the trimmed catalog already exists if not os.path.isfile(gst_file_cut): # and if not cut_catalogs.cut_catalogs( gst_file_sd, gst_file_cut, partial_overlap=True, flagged=True, flag_filter=flag_filter[0], region_file=True, ) # - # # Step 4.5 Update Datamodel # **After making these cuts, we should now update the obs_file name in datamodel.py (~line 62) with this new trimmed filename: './M31-B09-EAST_chunk.st_with_sourceden_cut.fits'** importlib.reload(datamodel) # # Step 5. Edit/Split the ASTs # Now for this step, we're doing things a little unconventionally since actually placing all the input ASTs we generated in Step 3 back into our image and rerunning the analysis would take several days of computational time. # # Instead, we've already procurred a polished AST results file (kindly provided by <NAME> from the University of Washington) which we can use to complete our analysis. The AST file should be named *'./M31-B09-EAST_chunk.gst.fake.fits'* while the input ASTs we generated were named *'./M31-B09-EAST_chunk/M31-B09-EAST_chunk_beast_inputAST.txt'*. # # We will now use the same cutting procedure as for the catalog to trim down the AST file with the same criteria as in Step 4. ast_file = "./" + field_names[0] + ".gst.fake.fits" ast_file Table.read(ast_file) # + # - ASTs ast_file_cut = ast_file.replace(".fits", "_cut.fits") # check to see if the trimmed AST file already exists if not os.path.isfile(ast_file_cut): cut_catalogs.cut_catalogs( ast_file, ast_file_cut, partial_overlap=True, flagged=True, flag_filter=flag_filter[0], region_file=True, ) # so now we've generated the cut ast file # - # We can plot the AST magnitudes against our original source magnitudes again, just to check that we are within a reasonable range. # check to see if the plotted AST file already exists if not os.path.isfile(ast_file_cut.replace(".fits", "_maghist.pdf")): test = plot_mag_hist.plot_mag_hist(ast_file_cut, stars_per_bin=200, max_bins=30) # and so this should plot a histogram of the different asts that remain after cutting # # Step 5.5 Update Datamodel Again # # **Same with these cuts, we now have to update the astfile variable in datamodel.py (~line 144) with this new trimmed filename: './M31-B09-EAST_chunk.gst.fake_cut.fits'** importlib.reload(datamodel) # # Step 6. Split catalog by source density # For the next fitting step, we're going to have to break our catalog and AST file into bins based on the source density, and then further into sub-bins if there are more than ~6250 sources in the bins. # # We split things into source density bins so that we can later study how the actual source density of region effects the noise or bias. We further split things into sub-bins, just to make things a little more computationally accessible. # # One thing to note is that the source density bins are first sorted by magnitude (typically F475W if it's there) before being split into sub-bins. This means that the first sub-bin file (for a source density bin that has more than 6250 sources) will end up having all the dimmest sources or any sources with NAN values, and the last sub file will have all the brightest sources. This will become handy in Step 8 when we create physics (SED) models and noisemoels tailored specifically to each sub-bin file. # check to see if any sub files exist yet if len(glob.glob(gst_file_cut.replace('.fits','*sub*fits') )) == 0: # if no sub files exist, they can now be created # a smaller value for n_per_file will mean more individual files/runs, # but each run will take a shorter amount of time #split the gst file and ast file split_catalog_using_map.split_main( gst_file_cut, ast_file_cut, gst_file.replace('.fits','_sourceden_map.hd5'), #get full sourceden_mad.hd5 file from dust folder bin_width=1, n_per_file=6250, #this is the max number of sources per bin before it splits ) # So these are all the different source density bins, with some of them being split into sub bins to limit the number of entries to ~6250. # Rather than reading in all the files we just created, the developers of this code instead wrote this handy little function that generates a dictionary of all the files that have just been created (assuming the function ran correctly) and all the files that we hope to generate in the future. # # Because of this, I recommend not changing any of the naming for Step 6 or beyond, just because that then makes this dictionary point to incorrect files. # generate file name lists file_dict = create_filenames.create_filenames( use_sd=True, nsubs=datamodel.n_subgrid ) # If we take a look in our folder, we should be able to see some bins with sub-bins notation. We can do a quick check to see if the sub-binning generated from the dictionary matchs up with the files split in our data folder. sd_sub_info = file_dict["sd_sub_info"] sd_sub_info # **Hint: If sd_sub_info is empty, make sure you've updated the obsfile and astfile variables in datamodel (Step 4.5 and 5.5), reloaded the datamodel, and try to run create_filenames again.** # + # - number of SD bins temp = set([i[0] for i in sd_sub_info]) print("** total SD bins: " + str(len(temp))) # - the unique sets of SD+sub unique_sd_sub = [ x for i, x in enumerate(sd_sub_info) if i == sd_sub_info.index(x) ] print("** total SD subfiles: " + str(len(unique_sd_sub))) # - # Just another quick was to ensure that all the binning and sub-binning matches up. If it doesn't, none of the next steps will run properly. # # Step 7. Make Noise Models # We're now on to creating our observational noise models! These models will be used to adjust the bias and uncertainty in Steps 8 and 9. # # The **uncertainty** (also known as sigma) is the standard deviation calculated for all the detected sources. # # The **bias** is the average offset between the input flux we have for the ASTs and the measured flux. Bias tends to become more prominent in regions of high source density, where it's harder to detect all the faint stars if they get blended together. If this happens, then some of the stars are assumed to be part of the background (raising the average), which gets subtracted from the detected sources. If the background is raised, then the detected sources are measured to be systematically fainter than they should be. # these are what the noise files should be named once generated noise_files = file_dict["noise_files"] # gather up the split AST files ast_file_list = sorted(glob.glob(datamodel.astfile.replace(".fits", "*_bin*"))) ast_file_list # create the noise model with our ASTs create_obsmodel.create_obsmodel( use_sd=True, nsubs=datamodel.n_subgrid, nprocs=1 ) # # Step 7.5 Visualize Noise Models (Optional) # This next cell is some older plotting code for visualizing the noise models. It should (hopefully) work if you uncomment and run it, but the lack of a log scale for the y-axis makes the results a little harder to fully interpret. # # As an alternative, the same plot is recreated down below but the steps have been broken down to hopefully help you gain a better sense of what's going on (and plot the y-axis with a log scale). # # If you're not interested in visualizing the noise models, feel free to skip this step. # + # plot_noisemodel.plot_noisemodel(sed_file="M31-B09-EAST_chunk/M31-B09-EAST_chunk_seds.grid.hd5", # noise_file_list=noise_files, # plot_file="noise_model_plot.png") # - # ### Alternative plot # I'm going to try to recreate this noise model plot using some of the filters used in Dreiss' paper. # # + # set some basic plotting stuff samp=100 # makes it so we plot every 100th point from the SED files color=["black", "red", "gold", "lime", "xkcd:azure"] label=None # load in the physics model as an object sed_object = FileSEDGrid(sed_files[0]) # read the flux values for all the sources if hasattr(sed_object.seds, "read"): sed_grid = sed_object.seds.read() else: sed_grid = sed_object.seds sed_object.seds.shape # - # So this sed_grid comes from back in Step 2, where the physics model created ~500,000 points based off of the original parameters we specified in the datamodel, and for each point, the expected flux for each filter is calculated. We can now use the noise models we created with the ASTs to see how the bias and uncertainty is expected to scale with the flux from a specific filter. We'll plot the log10 of the flux on the x-axis and then the flux-normalized uncertainty and bias on the y-axis. We can also color our results based on what source density bin the ASTs came from, as well as compare how different filters compare to one another # # + # pull out the list of filters filter_list = sed_object.filters # for this plot, I just want to plot the first two filter # feel free to change this and see what the other filters look like filter_list_plot = filter_list[0:2] n_filter = len(filter_list_plot) # + # set up the figure frame work # have it scale with the number of filters we're plotting fig, axes = plt.subplots(2, len(filter_list_plot), sharex=True, figsize=(5*len(filter_list_plot),8)) # go through noise files for n, nfile in enumerate(noise_files): print("* reading " + nfile) # read in the values noisemodel_vals = noisemodel.get_noisemodelcat(nfile) # extract error and bias noise_err = noisemodel_vals.root.error[:] noise_bias = noisemodel_vals.root.bias[:] cmaps = plt.get_cmap('viridis') gradient = np.linspace(0, 1, len(noise_files)) # now we can start plotting things for f, filt in enumerate(filter_list_plot): # error is negative where it's been extrapolated -> trim those good_err = np.where(noise_err[:, f] > 0)[0] plot_sed = sed_grid[good_err, f][::samp] # only pulls every 100th point plot_err = noise_err[good_err, f][::samp] plot_bias = noise_bias[good_err, f][::samp] # plot bias axes[0, f].set_yscale('log') axes[0, f].plot( np.log10(plot_sed), np.abs(plot_bias) / plot_sed, marker="o", linestyle="none", mew=0, ms=2, alpha=1, c=cmaps(int(nfile[-10])/9.), label=noise_files[n][-10] ) axes[0, f].set_ylabel(r"Abs Bias ($\mu$/F)", fontsize=10) # xlabel is still in flux, not mag axes[0, f].legend() # plot error (uncertainty) axes[1, f].set_yscale('log') axes[1, f].plot( np.log10(plot_sed), plot_err / plot_sed, marker="o", linestyle="none", mew=0, ms=2, color=color[0 % len(color)], alpha=0.1,) axes[1, f].set_ylabel(r"Error ($\sigma$/F)", fontsize=10) axes[1, f].set_xlabel("log " + filt[-5:], fontsize=10) plt.tight_layout() #fig.colorbar(plt.cm.ScalarMappable(norm=np.arange(0,12), cmap=cmaps), ax=axes) # Need to figure out if it's worth comparing the bias and the uncertainty to one another. # - # As you can probably tell, this plot isn't the most beautiful plot in the world (especially that coloring and legend) but I'm proud of her. It does, however, let you see the scale of the bias and uncertainty (error) for different filters and how the source density and magnitudes are correlated. # # The most notable thing to note is that the uncertainty and bias tend to be larger at lower fluxes. This probably doesn't come as a shock to anyone, but it's important to accurately take this into consideration when we make our fittings in Step 9. # # Step 8. Trim Models # # Now that we have our SED and or noise models created, we can go ahead and trim them of any sources that are so bright or so faint (compared to min/max flux in the observation file) that they will by definition produce effectively zero likelihood fits. # # One thing to note is that, since our noise models are correlated with source density, we are in a sense 'convolving' each of our noise models with the original physics grid, meaning we will end up with a lot of physics grids trimmed for each source density scenario thanks to our noise models (and these physics grids are still essentially as large as the original physics grid, making this a very storage-intensive step). However, this trimming of the 'parameter space', as you could call it, will help speed up fittings in Step 9. # # **This step is very storage intensive so I'd make sure to have at least ~5GB of storage available.** # # check to see if any sub files exist yet if len(glob.glob(file_dict["noise_trim_files"][0].replace('bin2_sub0','bin*_sub*'))) == 0: for i, sub_files in enumerate(file_dict["noise_trim_files"]): # pull out physics grid modelsedgrid = FileSEDGrid(model_grid_files[0]) # trim for each noise file separately noisemodel_vals = noisemodel.get_noisemodelcat(noise_files[i]) obsdata = datamodel.get_obscat(gst_file_cut, modelsedgrid.filters) # need to iterate over all the sub-bins trim_grid.trim_models(modelsedgrid, noisemodel_vals, obsdata, file_dict["modelsedgrid_trim_files"][i], file_dict["noise_trim_files"][i]) # # Step 9. Fit Models (WARNING! This step takes a while) # Now we're going to fit all our sources from our observational photometric catalog to our new trimmed physics and noise models. This will take quite some time just because every source has to be evaluated at each step in its physics model. # # So for every sub-bin of sources (max 6250 sources), every source in that photometry file is evaluated at every potential step in the physics grid that has been trimmed to specifically fit that sub-bin (hence the data-intensive code we ran back in Step 8). From this, we essentially get a report of how well every point in the physics model (AKA combo of parameters) matched with a source, what is often referred to as a likelihood. If we then take these likelihoods and figure out what parameter values they point back to, we can create a distribution of parameter values (metallicity, distance, Av, Rv, etc.) that best model each source. I hope that made sense (and is the correct interpretation). # # This function uses the trimmed photometric files we have, the trimmed physics models, and the trimmed noise models to create statistic files for each sub-binned source density bin. # # It'll take a long time though (~5 hours for me at least, but maybe you have a better computer (8GB RAM, for reference)). #if len(glob.glob(file_dict["modelsedgrid_trim_files"][0].replace('bin2_sub0','bin*_sub*'))) == 0: run_fitting.run_fitting( use_sd = True, nsubs = 1, nprocs = 1, choose_sd_sub=None, choose_subgrid=None, pdf2d_param_list=['Av', 'Rv', 'f_A', 'M_ini', 'logA', 'Z', 'distance'], resume=False, ) # # Step 10. Merge fits # Whoo-hoo! You finished running the big Step 9! # # We are now onto the final step where we just have to merge all the trimmed SED model results together. This should produce one final **stats.fits** file which is very similar to our original photometric file, except now all the sources have estimates for what their metallicity, distance, age, mass, dust, etc. might be. # # Using these new columns of data, we can create lots of cool visuals which will be shown in the epilogue. merge_files.merge_files(use_sd=True, nsubs=datamodel.n_subgrid) # Hopefully there is now a stats.fits file in your folder. We can read it in to better understand what really happened. hdul = fits.open(sed_files[0].replace('seds.grid.hd5', 'stats.fits')) Table(hdul[1].data) # As you can hopefully see, for every source, there are now several parameters assigned to each one. These are all the parameters we originally had set up in our datamodel and specified in Step 9. # # Epilogue: Visualizating! from beast.plotting import ( plot_triangle, plot_indiv_fit, plot_cmd_with_fits, plot_completeness, plot_chi2_hist, ) # ### Triangle Plot # # This first plot displays a posterior distributions of the parameters of all the fitted stars. plot_triangle.plot_triangle("M31-B09-EAST_chunk/M31-B09-EAST_chunk_stats.fits") # ### CMD Plot # # You can also make a color-magnitude diagram of the observations and color-code the data points using one of the parameters from the BEAST fitting (feel free to change this from the example, just remember that the param must match a column name from the stat.fits file). # # Inputs are the photometry file, three filters, the BEAST stats file from Step 10, and the parameter to use and apply color to after taking the log10. plot_cmd_with_fits.plot(data_fits_file="M31-B09-EAST_chunk.st_with_sourceden_cut.fits", beast_fits_file="M31-B09-EAST_chunk/M31-B09-EAST_chunk_stats.fits", mag1_filter="F475W", mag2_filter="F814W", mag3_filter="F475W", param="Z_Best", #metallicity ) # ### Completeness Plot # This next plot shows the completeness (how many AST sources were detected out of the total number of AST that exist for that parameter bin) for each parameter, although it should be noted that the *distance* parameter was purposefully left out because all the sources have the same distance value, and thus the plotting code isn't sure how to handle it. plot_completeness.plot_completeness(physgrid_list=file_dict["modelsedgrid_trim_files"], noise_model_list=file_dict["noise_trim_files"], output_plot_filename="completeness_plot.pdf", param_list=['Av', 'Rv', 'logA', 'f_A', 'M_ini', 'Z'], #, 'distance'], compl_filter='F475W',) # ### Chi Squared Plot # Make a histogram of the best chi2 values (chi2=1 and the median chi2 are marked). Note that there is no plot of reduced chi2, because it is mathematically difficult to define the number of degrees of freedom. Inputs are the BEAST stats file and optionally the number of bins to use for the histogram. plot_chi2_hist.plot(beast_stats_file="M31-B09-EAST_chunk/M31-B09-EAST_chunk_stats.fits") # There's another cool plot for plotting the individual fits of stars, but unfortunately, this code works with a file that only gets generated when using multiple subgrids (remember how we checked that we had a subgrid = 1 back in Step 2?). If it had worked with the code below, it would have made a multi-panel plot that shows the PDFs and best fits of each parameter for any given star, as well as the SED (similar to Figure 14 in Gordon+16). # + #plot_indiv_fit.plot_beast_ifit(filter=datamodel.filters, waves, stats, pdf1d_hdu, starnum=0): # - # Sorry I wasn't able to show you all that last plot. But thanks for reading through this notebook til the end. Hopefully you found it to be somewhat helpful and if you have any suggestions for how to make it better, you can find me at <EMAIL>. # # Thanks,\ # <NAME>\ # (she/her)
M31_Example/.ipynb_checkpoints/M31_BEAST_workflow_example-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GSD: Calculating ambiguous nucleotides and %G+C for the mitochondrial genomes of NCYC3594 and NCYC3585 from Wolters et al 2015 # # I noted at the end of `GSD Calculating_GC_for_nuclear_and_mitochondrial_genomes_of_SGD_reference_and_PB_set.ipynb` where I was discussing the range in %G+C for the *cerevisiae* that although I knew the reported %G+C for the two *cerevisiae* strains sequenced by PacBio in Wolters et al, specifically, strains NCYC3594 and NCYC3585 from [Wolters et al 2015 PMID: 26062918]((https://www.ncbi.nlm.nih.gov/pubmed/26062918)), that I'd need to analyze them to assess ambiguous nts. This is that effort. They only did the mitochondrial DNA by single molecule sequencing and so I am not going to deal with the nuclear. # # # Reference for the sequence data: # - [Population structure of mitochondrial genomes in Saccharomyces cerevisiae. # Wolters JF, <NAME>, Fiumera HL. BMC Genomics. 2015 Jun 11;16:451. doi: 10.1186/s12864-015-1664-4. PMID: 26062918](https://www.ncbi.nlm.nih.gov/pubmed/26062918). Article [direct](https://bmcgenomics.biomedcentral.com/articles/10.1186/s12864-015-1664-4). # # Uses code developed for ``GSD Calculating_GC_for_nuclear_and_mitochondrial_genomes_of_SGD_reference_and_PB_set.ipynb`` and `GSD Assessing_ambiguous_nts_in_complete_PB_genomes.ipynb`, `GSD Assessing_ambiguous_nts_in_nuclear_PB_genomes.ipynb`, `GSD Assessing_ambiguous_nts_in_1011_collection_genomes.ipynb`, `GSD Calculating_GC_for_nuclear_and_mitochondrial_genomes_of_SGD_reference.ipynb`. # # Should be able to be run in almost any Jupyter environment with Pandas and the 'essential' modules. # # The sequence data is going to come directly from `SGDs288CplusPacBio_ADJUSTEDplusWoltersnW303forALIGNERS.fa`. I had made this for another purpose and adjusted the 'start' site to match the SGD reference sequence, see `Counting putative promoters and origins in current mito genomes collection.ipynb`; however, for counting the arrangment of start site doesn't matter by this is handy way to get the sequences I need with only need to upload one file. # # # ----- # ## Preparation # # Get the packages and sequence data necessary. # # Uploaded the following to where I was running this notebook in a Binder session using Jupyter Dashboard or JupyterLab's drag-and-drop: # - 'SGDs288CplusPacBio_ADJUSTEDplusWoltersnW303forALIGNERS.fa' # # !pip install pyfaidx # Get the genomes from the uploaded file available as single files by running these commands. (Additionally it makes the names of the files consistently end in `*.mito.fa`. import os import sys import fnmatch example_produced_file = "NCYC3594.mito.fa" if not os.path.isfile(example_produced_file): #so won't run again if already ran name_part_to_match = ".fa" name_part_to_expand_to = ".mito.fa" old_files_with_ext = [] for file in os.listdir('.'): if fnmatch.fnmatch(file, '*'+name_part_to_match): old_files_with_ext.append(file) files_to_not_touch_despite_match = old_files_with_ext seq_file = "SGDs288CplusPacBio_ADJUSTEDplusWoltersnW303forALIGNERS.fa" # !faidx --split-files {seq_file} new_fasta = [] for file in os.listdir('.'): if fnmatch.fnmatch(file, '*'+name_part_to_match) and file not in files_to_not_touch_despite_match: new_fasta.append(file) #fix name if it needs fixing for file in new_fasta: if not fnmatch.fnmatch(file, '*'+name_part_to_expand_to): new_file_name = file.split(".fa")[0] + name_part_to_expand_to # !mv {file} {new_file_name} # Make a list of the mitochondrial genomes. import os import sys import fnmatch name_part_to_match = ".mito.fa" genomes= [] for file in os.listdir('.'): if fnmatch.fnmatch(file, '*'+name_part_to_match): #print (file) #first_part_filen = file.rsplit(name_part_to_match,1)[0] genomes.append(file) len(genomes) # + import time def executeSomething(): #code here print ('.') time.sleep(480) #60 seconds times 8 minutes while True: executeSomething() # - # # Now you are prepared to analyze each genome. # ## Calculating ambiguous nts and %G+C for the mitochondrial genomes # # Mainly want NCYC3594 and NCYC3585. # First count all the letters present and make a dataframe and then add a column with %G+C: from pyfaidx import Fasta import pandas as pd import collections nt_counts = {} for g in genomes: if ".mito.fa" in g: strain_id = g.split(".mito.fa")[0] else: strain_id = g.split(".re.fa")[0][18:] concatenated_seqs = "" chrs = Fasta(g) for x in chrs: #print(x.name) concatenated_seqs += str(x) nt_counts[strain_id] = collections.Counter(concatenated_seqs) nt_count_df = pd.DataFrame.from_dict(nt_counts, orient='index').fillna(0) nt_count_df["Total_nts"] = nt_count_df.sum(1) def percent_GCcalc(items): ''' takes a list of three and calculates percentage of sum of first two itemswithin total (second item) ''' return (items[0] + items[1])/items[2] nt_count_df['%G+C'] = nt_count_df[['C','G','Total_nts']].apply(percent_GCcalc, axis=1) nt_count_df = nt_count_df.sort_values('Total_nts',ascending=False) #nt_count_df = nt_count_df.sort_values(['% N', 'Total_nts'],ascending=[0,0]) nt_count_df = nt_count_df.sort_index() # df.iloc[np.lexsort((df.index, df.A.values))] # from https://stackoverflow.com/a/49354905/8508004 #nt_count_df_styled = nt_count_df.style.format({'Total_nts':'{:.2E}','% N':'{:.2%}'}) nt_count_df_styled = nt_count_df.style.format({'Total_nts':'{:.2E}','%G+C':'{:.2%}'}) nt_count_df_styled # Done. Turns out there are no unknown nucleotides in NCYC3585 or NCYC3594. # Sort on %G+C. nt_count_df_alt = nt_count_df.sort_values('%G+C',ascending=False) #nt_count_df_styled = nt_count_df.style.format({'Total_nts':'{:.2E}','% N':'{:.2%}'}) nt_count_df_styled_alt = nt_count_df_alt.style.format({'Total_nts':'{:.2E}','%G+C':'{:.2%}'}) nt_count_df_styled_alt # I noted the lack of ambiguous nts at the end of `GSD Calculating_GC_for_nuclear_and_mitochondrial_genomes_of_SGD_reference_and_PB_set.ipynb`, where I had first pondered whether I could take the numbers reported in Wolters et al., 2015 at face value. Turns out I can because nothing but `GATC`s among the assembly. # ----
notebooks/GSD/GSD Calculating_ambiguous_nts_and_percentGC_for_mitochondrial_genomes_of_NCYC3594_and_NCYC3585fromWoltersETal2015.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np # %matplotlib inline # %load_ext autoreload # %autoreload 2 pd.set_option('float_format', '{:.2f}'.format) plt.style.use('seaborn-colorblind') cm = sns.light_palette("green", as_cmap=True) # - # Run bean-query to generate table # !bean-query -q ~/finance/money.beancount "SELECT year, month, root(account, 2) as Account, sum(number) as Total, currency WHERE account ~ 'Expenses' OR account ~ 'Liabilities:UK:HSBC:Mortgage' OR account ~ 'Income' GROUP BY year, month, Account, currency ORDER BY year, month, currency, Account FLATTEN" > monthly.txt # + # Process Data df = pd.read_csv("monthly.txt", delim_whitespace=True) df['Type'], df['Category'] = df['account'].str.split(':', 1).str df = df.drop(df.index[0]) df = df.drop(['account'], axis=1) df = df.rename(columns={'year':'Year','mo':'Month','total':'Net','cur':'Currency',}) df = df[['Year', 'Month', 'Type', 'Category', 'Net', 'Currency']] df['Net'] = pd.to_numeric(df['Net']) df['Net'] = df['Net'] * -1 # Make it normal #Fix mortgage category df.loc[df['Type'] == "Liabilities", ['Category']] = "Home" df['Type'] = df['Type'].replace(['Liabilities'], 'Expenses') df.head() # - # Summary Table #exp_summary = pd.pivot_table(df.query('Type == ["Expense"]'), index=['Master Category'], columns=['Month'], values=['Net'], aggfunc=np.sum, fill_value=0) summary = pd.pivot_table(df.query('Year == ["2017"]'), index=['Type','Category','Currency'], columns=['Month'], values=['Net'], aggfunc=np.sum, fill_value=0) #summary['Total'] = summary.sum(axis=1) # adds total to categories #summary.loc['Total']= summary.sum() # adds total row at bottom summary
Fava Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Artificial Intelligence Nanodegree # ## Machine Translation Project # # ## Introduction # A deep neural network that functions as part of an end-to-end machine translation pipeline. The pipeline accepts English text as input and return the French translation. # # - **Preprocess** - Convert text to sequence of integers. # - **Models** Create models which accepts a sequence of integers as input and returns a probability distribution over possible translations. # - **Prediction** Run the model on English text. # + import collections import numpy as np import os from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Model from keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional, Embedding from keras.optimizers import Adam from keras.losses import sparse_categorical_crossentropy # - # ### Verify access to the GPU from tensorflow.python.client import device_lib print(device_lib.list_local_devices()) # ## Dataset # We begin by investigating the dataset that will be used to train and evaluate your pipeline. # ### Load Data # The data is located in `data/small_vocab_en` and `data/small_vocab_fr`. The `small_vocab_en` file contains English sentences with their French translations in the `small_vocab_fr` file. def load_data(path): """ Load dataset """ input_file = os.path.join(path) with open(input_file, "r") as f: data = f.read() return data.split('\n') # + # Load English data english_sentences = load_data('data/small_vocab_en') # Load French data french_sentences = load_data('data/small_vocab_fr') print('Dataset Loaded') # - # ### Files # Each line in `small_vocab_en` contains an English sentence with the respective translation in each line of `small_vocab_fr`. View the first two lines from each file. for sample_i in range(2): print('small_vocab_en Line {}: {}'.format(sample_i + 1, english_sentences[sample_i])) print('small_vocab_fr Line {}: {}'.format(sample_i + 1, french_sentences[sample_i])) # The puncuations have been delimited using spaces. All the text have been converted to lowercase. But the text requires more preprocessing. # ### Vocabulary # The complexity of the problem is determined by the complexity of the vocabulary. A more complex vocabulary is a more complex problem. Let's look at the complexity of the dataset we'll be working with. # + english_words_counter = collections.Counter([word for sentence in english_sentences for word in sentence.split()]) french_words_counter = collections.Counter([word for sentence in french_sentences for word in sentence.split()]) print('{} English words.'.format(len([word for sentence in english_sentences for word in sentence.split()]))) print('{} unique English words.'.format(len(english_words_counter))) print('10 Most common words in the English dataset:') print('"' + '" "'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '"') print() print('{} French words.'.format(len([word for sentence in french_sentences for word in sentence.split()]))) print('{} unique French words.'.format(len(french_words_counter))) print('10 Most common words in the French dataset:') print('"' + '" "'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '"') # - # ## Preprocess # We convert the text into sequences of integers using the following preprocess methods: # 1. Tokenize the words into ids # 2. Add padding to make all the sequences the same length. # # ### Tokenize # # We can turn each character into a number or each word into a number. These are called character and word ids, respectively. Character ids are used for character level models that generate text predictions for each character. A word level model uses word ids that generate text predictions for each word. Word level models tend to learn better, since they are lower in complexity, so we use those. # # We turn each sentence into a sequence of words ids using Keras's [`Tokenizer`](https://keras.io/preprocessing/text/#tokenizer) function. # + def tokenize(x): x_tk = Tokenizer() x_tk.fit_on_texts(x) return x_tk.texts_to_sequences(x), x_tk # Tokenize Example output text_sentences = [ 'The quick brown fox jumps over the lazy dog .', 'By Jove , my quick study of lexicography won a prize .', 'This is a short sentence .'] text_tokenized, text_tokenizer = tokenize(text_sentences) print(text_tokenizer.word_index) print() for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)): print('Sequence {} in x'.format(sample_i + 1)) print(' Input: {}'.format(sent)) print(' Output: {}'.format(token_sent)) # - # ### Padding # When batching the sequence of word ids together, each sequence needs to be the same length. Since sentences are dynamic in length, we can add padding to the end of the sequences to make them the same length. # # We make sure all the English sequences have the same length and all the French sequences have the same length by adding padding to the **end** of each sequence using Keras's [`pad_sequences`](https://keras.io/preprocessing/sequence/#pad_sequences) function. # + def pad(x, length=None): if length is None: length = max([len(sentence) for sentence in x]) # Then, pass it to pad_sentences as the maxlen parameter x = pad_sequences(x, maxlen=length, padding="post") return x # Pad Tokenized output test_pad = pad(text_tokenized) for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)): print('Sequence {} in x'.format(sample_i + 1)) print(' Input: {}'.format(np.array(token_sent))) print(' Output: {}'.format(pad_sent)) # - # ### Preprocess Pipeline # + def preprocess(x, y): """ Preprocess x and y :param x: Feature List of sentences :param y: Label List of sentences :return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer) """ preprocess_x, x_tk = tokenize(x) preprocess_y, y_tk = tokenize(y) preprocess_x = pad(preprocess_x) preprocess_y = pad(preprocess_y) # Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1) return preprocess_x, preprocess_y, x_tk, y_tk preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\ preprocess(english_sentences, french_sentences) max_english_sequence_length = preproc_english_sentences.shape[1] max_french_sequence_length = preproc_french_sentences.shape[1] english_vocab_size = len(english_tokenizer.word_index) french_vocab_size = len(french_tokenizer.word_index) print('Data Preprocessed') print("Max English sentence length:", max_english_sequence_length) print("Max French sentence length:", max_french_sequence_length) print("English vocabulary size:", english_vocab_size) print("French vocabulary size:", french_vocab_size) # - # ## Models # Experimenting with various neural network architectures. # - Model 1: simple RNN # - Model 2: a RNN with Embedding # - Model 3: a Bidirectional RNN # - Model 4: an Encoder-Decoder RNN # # ### Ids Back to Text # The neural network will be translating the input to words ids, which isn't the final form we want. We want the French translation. The function `logits_to_text` bridges the gab between the logits from the neural network to the French translation. # + epochs = 10 def logits_to_text(logits, tokenizer): """ Turn logits from a neural network into text using the tokenizer """ index_to_words = {id: word for word, id in tokenizer.word_index.items()} index_to_words[0] = '<PAD>' return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)]) print('`logits_to_text` function loaded.') # - # ### Model 1: RNN # ![RNN](images/rnn.png) # A basic RNN model is a good baseline for sequence data. # + from keras import Sequential def simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): # Build the layers learning_rate = 1e-3 model = Sequential() model.add(GRU(512, return_sequences=True, input_shape=input_shape[1:])) model.add(TimeDistributed(Dense(french_vocab_size))) model.add(Activation('softmax')) model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model # Reshaping the input to work with a basic RNN tmp_x = pad(preproc_english_sentences, max_french_sequence_length) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1)) # Train the neural network simple_rnn_model = simple_model( tmp_x.shape, max_french_sequence_length, english_vocab_size, french_vocab_size) simple_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=512, epochs=epochs, validation_split=0.2) # Print predictions print(logits_to_text(simple_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) # - # ### Model 2: Embedding # ![RNN](images/embedding.png) # An embedding is a vector representation of the word that is close to similar words in n-dimensional space, where the n represents the size of the embedding vectors. # # We create a RNN model using embedding. # + def embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): learning_rate = 1e-3 model = Sequential() model.add(Embedding(english_vocab_size, output_sequence_length, input_length=input_shape[1])) model.add(GRU(512, return_sequences=True, input_shape=input_shape[1:])) model.add(TimeDistributed(Dense(french_vocab_size))) model.add(Activation('softmax')) model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model # Reshape the input tmp_x = pad(preproc_english_sentences, max_french_sequence_length) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2])) # Train the neural network embed_rnn_model = embed_model( tmp_x.shape, max_french_sequence_length, english_vocab_size, french_vocab_size) embed_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=512, epochs=epochs, validation_split=0.2) # Print predictions print(logits_to_text(embed_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) # - # ### Model 3: Bidirectional RNNs # ![RNN](images/bidirectional.png) # RNN can't see the future input, only the past.<br> # We try bidirectional recurrent neural networks as they are able to see the future data. # + def bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): learning_rate = 1e-3 model = Sequential() model.add(Bidirectional(GRU(512, return_sequences=True), input_shape=input_shape[1:])) model.add(TimeDistributed(Dense(french_vocab_size))) model.add(Activation('softmax')) model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model # Train and Print prediction(s) tmp_x = pad(preproc_english_sentences, max_french_sequence_length) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1)) # Train the neural network bd_rnn_model = bd_model( tmp_x.shape, max_french_sequence_length, english_vocab_size, french_vocab_size) bd_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=512, epochs=epochs, validation_split=0.2) # Print prediction(s) print(logits_to_text(bd_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) # - # ### Model 4: Encoder-Decoder # This model is made up of an encoder and decoder. The encoder creates a matrix representation of the sentence. The decoder takes this matrix as input and predicts the translation as output. # + def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): learning_rate = 1e-3 model = Sequential() # encoder model.add(Bidirectional(GRU(512, return_sequences=False), input_shape=input_shape[1:])) model.add(Dense(french_vocab_size)) model.add(Activation('relu')) # decoder model.add(RepeatVector(output_sequence_length)) model.add(GRU(512, return_sequences=True)) model.add(TimeDistributed(Dense(french_vocab_size))) model.add(Activation('softmax')) model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model tmp_x = pad(preproc_english_sentences, max_french_sequence_length) tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1)) # Build the model encdec_rnn_model = encdec_model( tmp_x.shape, max_french_sequence_length, english_vocab_size, french_vocab_size) # Train the neural network encdec_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=512, epochs=epochs, validation_split=0.2) # Print prediction(s) print(logits_to_text(encdec_rnn_model.predict(tmp_x[:1])[0], french_tokenizer)) # - # ### Model 5: Custom # + def model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): learning_rate = 1e-3 model = Sequential() # encoder model.add(Embedding(input_dim=english_vocab_size, output_dim=512, input_length=input_shape[1:][0])) model.add(Bidirectional(GRU(512), input_shape=input_shape[1:])) model.add(Dense(french_vocab_size)) model.add(Activation('relu')) # decoder model.add(RepeatVector(output_sequence_length)) model.add(Bidirectional(GRU(512, return_sequences=True))) model.add(TimeDistributed(Dense(french_vocab_size))) model.add(Activation('softmax')) model.compile(loss=sparse_categorical_crossentropy, optimizer=Adam(learning_rate), metrics=['accuracy']) return model print('Final Model Loaded') # - # ## Prediction # + def final_predictions(x, y, x_tk, y_tk): # Train neural network using model_final model = model_final(x.shape, y.shape[1], len(x_tk.word_index), len(y_tk.word_index)) model.fit(x, y, batch_size=512, epochs=epochs, validation_split=0.2) y_id_to_word = {value: key for key, value in y_tk.word_index.items()} y_id_to_word[0] = '<PAD>' sentence = 'he saw a old yellow truck' sentence = [x_tk.word_index[word] for word in sentence.split()] sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post') sentences = np.array([sentence[0], x[0]]) predictions = model.predict(sentences, len(sentences)) print('Sample 1:') print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]])) print('Il a vu un vieux camion jaune') print('Sample 2:') print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]])) print(' '.join([y_id_to_word[np.max(x)] for x in y[0]])) final_predictions(preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer)
machine_translation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jameschapman19/cca_zoo/blob/master/cca_zoo_sparsity.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="rpca4lWRvlwC" # # A tutorial on using cca-zoo to generate multiview models with sparsity on weights # + colab={"base_uri": "https://localhost:8080/"} id="riuTJcsuvRcS" outputId="2b9cf1e7-3195-47ec-e6b6-a5077f86bbcd" # !pip install cca-zoo # + id="LVmJ5X8RvV3_" from cca_zoo.models import PMD, SCCA, ElasticCCA, CCA, PLS, SCCA_ADMM, SpanCCA from cca_zoo.data import generate_covariance_data import matplotlib.pyplot as plt import numpy as np import itertools # + [markdown] id="IkMwUGzkwbQY" # ## Generate some data # set the true correlation and the sparsity of the true weights # + id="zdYep44wvtKo" np.random.seed(42) n=200 p=100 q=100 view_1_sparsity=0.1 view_2_sparsity=0.1 true_latent_dims=1 (X,Y),(tx, ty)=generate_covariance_data(n,view_features=[p,q],latent_dims=true_latent_dims, view_sparsity=[view_1_sparsity,view_2_sparsity],correlation=[0.9]) #normalize weights for comparability tx/=np.sqrt(n) ty/=np.sqrt(n) # + id="ijitQkskw_jw" def plot_true_weights_coloured(ax, weights, true_weights, title=''): ind = np.arange(len(true_weights)) mask = np.squeeze(true_weights == 0) ax.scatter(ind[~mask], weights[~mask], c='b') ax.scatter(ind[mask], weights[mask], c='r') ax.set_title(title) def plot_model_weights(wx,wy,tx,ty): fig,axs=plt.subplots(2,2,sharex=True,sharey=True) plot_true_weights_coloured(axs[0,0],tx,tx,title='true x weights') plot_true_weights_coloured(axs[0,1],ty,ty,title='true y weights') plot_true_weights_coloured(axs[1,0],wx,tx,title='model x weights') plot_true_weights_coloured(axs[1,1],wy,ty,title='model y weights') plt.tight_layout() plt.show() # + [markdown] id="x_-JR1lywpNO" # ## First try with CCA # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="as1irviNwnCW" outputId="75411229-f740-45cf-b4c6-627b301d03f7" #fit a cca model cca=CCA().fit(X,Y) plot_model_weights(cca.weights[0],cca.weights[1],tx,ty) # + [markdown] id="AMLK2z5C1bFf" # ## PLS # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="VqnBFLwFw1Fi" outputId="426f3edd-6883-47f2-b387-25d751d9a7fe" #fit a pls model pls=PLS().fit(X,Y) plot_model_weights(pls.weights[0],pls.weights[1],tx,ty) # + [markdown] id="maZg5LdP1l3H" # ## Penalized Matrix Decomposition (Sparse CCA by Witten) # Initially set c=2 for both views arbitrarily # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="2petCaj61ffh" outputId="99fbab7d-51e0-4282-8671-aac1fa4fc640" #fit a pmd model pmd=PMD(c=[2,2]).fit(X,Y) plot_model_weights(pmd.weights[0],pmd.weights[1],tx,ty) # + [markdown] id="gTZEL2SBTijd" # ## Tracking the objective # For these iterative algorithms, you can access the convergence over iterations # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="eMutm5DjTh_V" outputId="0e138697-d98e-42fb-bc53-2948388e0d90" #Convergence plt.figure() plt.title('Objective Convergence') plt.plot(np.array(pmd.objective).T) plt.ylabel('Objective') plt.xlabel('#iterations') # + [markdown] id="OusaWIn82Wb7" # ### We can also tune the hyperparameter using gridsearch_fit # + colab={"base_uri": "https://localhost:8080/", "height": 401} id="O7VXP9h21vyB" outputId="dafee9bc-93b3-42e3-fae0-c81095916f40" #Set up a grid. We can't use c<1 or c>sqrt(#features) c1 = [1, 3, 7, 9] c2 = [1, 3, 7, 9] param_candidates = {'c': list(itertools.product(c1, c2))} #gridsearch fit can use multiple cores (jobs) and takes folds (number of cv folds) as a parameter. It can also produce a plot. pmd.gridsearch_fit(X,Y,param_candidates=param_candidates, folds=3, verbose=True,jobs=2, plot=True) # + [markdown] id="DscFV-7P3dU2" # Also the model object now has a pandas dataframe containing the results from each fold # + colab={"base_uri": "https://localhost:8080/", "height": 527} id="2GNSiTjC21fB" outputId="353bd505-b2bd-41a0-fe89-4addbeaa60c6" pmd.cv_results_table # + [markdown] id="O7rWUBmb4apX" # ## Sparse CCA by iterative lasso (Mai) # + colab={"base_uri": "https://localhost:8080/", "height": 592} id="FimdDUDe3kML" outputId="4d947d0c-798c-4874-e36b-2dcb58136cad" #fit a scca model scca=SCCA(c=[1e-3,1e-3]).fit(X,Y) plot_model_weights(scca.weights[0],scca.weights[1],tx,ty) #Convergence plt.figure() plt.title('Objective Convergence') plt.plot(np.array(scca.objective).T) plt.ylabel('Objective') plt.xlabel('#iterations') # + [markdown] id="OjrY4zGuQQix" # ### Positivity Constraints # In this case it isn't helpful (the data were generated with positive and negative weights) but is a cool functionality! # + colab={"base_uri": "https://localhost:8080/", "height": 592} id="gdBFdkvfQUb6" outputId="5a1725a2-fb1e-447f-8edc-cb0dec667d91" #fit a scca model with positivity constraints scca_pos=SCCA(c=[1e-3,1e-3],positive=[True,True]).fit(X,Y) plot_model_weights(scca_pos.weights[0],scca_pos.weights[1],tx,ty) #Convergence plt.figure() plt.title('Objective Convergence') plt.plot(np.array(scca_pos.objective).T) plt.ylabel('Objective') plt.xlabel('#iterations') # + [markdown] id="KPC7uhls4ycW" # ## Sparse CCA by iterative elastic net (adapted from Waaijenborg) # + colab={"base_uri": "https://localhost:8080/", "height": 592} id="wuZjjyN24j0P" outputId="787bd093-fdd4-484b-c691-2b1ebb31eda0" #fit an elastic model #for some reason this model needs REALLY big l2 regularisation. This is actually #the same level of l1 regularisation as SCCA elasticcca=ElasticCCA(c=[10000,10000],l1_ratio=[0.000001,0.000001]).fit(X,Y) plot_model_weights(elasticcca.weights[0],elasticcca.weights[1],tx,ty) #Convergence plt.figure() plt.title('Objective Convergence') plt.plot(np.array(elasticcca.objective).T) plt.ylabel('Objective') plt.xlabel('#iterations') # + [markdown] id="E8TEaBhe7CYw" # ## Sparse CCA by ADMM # + colab={"base_uri": "https://localhost:8080/", "height": 592} id="7RkSPcWR7FY8" outputId="ade05163-1c25-4e20-c692-c208135444c1" #fit a scca_admm model scca_admm=SCCA_ADMM(c=[1e-3,1e-3]).fit(X,Y) plot_model_weights(scca_admm.weights[0],scca_admm.weights[1],tx,ty) #Convergence plt.figure() plt.title('Objective Convergence') plt.plot(np.array(scca_admm.objective).T) plt.ylabel('Objective') plt.xlabel('#iterations') # + [markdown] id="LcbbCAa_5q5C" # ## Sparse CCA by random projection (Span CCA) # This time the regularisation parameter c is the l0 norm of the weights i.e. the maximum number of non-zero weights. Let's cheat and give it the correct numbers. We can also change the rank of the estimation as described in the paper # + colab={"base_uri": "https://localhost:8080/", "height": 592} id="F_bT21qk5jA5" outputId="f475b6d9-cc6c-408b-c3e1-087d98e5e653" #fit a spancca model spancca=SpanCCA(c=[10,10],max_iter=2000,rank=20).fit(X,Y) plot_model_weights(spancca.weights[0],spancca.weights[1],tx,ty) #Convergence plt.figure() plt.title('Objective Convergence') plt.plot(np.array(spancca.objective).T) plt.ylabel('Objective') plt.xlabel('#iterations') # + id="eTEVZnXpQzFm"
tutorial_notebooks/cca_zoo_sparsity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/socd06/openvino_colab/blob/master/interview_prep.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="9cfBU_u7EiN-" colab_type="text" # # Interview Preparation using Intel OpenVINO Toolkit Pre-Trained Models # + colab_type="code" id="YEDdjgikf1rT" outputId="527a3e2e-bc67-49b6-fb7c-5da0c9365372" colab={"base_uri": "https://localhost:8080/", "height": 121} from google.colab import drive drive.mount('/content/drive') # + [markdown] colab_type="text" id="lew-AyWqEa19" # # Setup Project files # + colab_type="code" id="_BSwbZ7kgkgl" outputId="c32f4bfe-6f4a-4841-ab71-c1dda4d64d64" colab={"base_uri": "https://localhost:8080/", "height": 34} # cd /content # + colab_type="code" id="eWq1LTCrtpQn" outputId="8d64211c-3409-47e6-f567-d5c2d93bbf8b" colab={"base_uri": "https://localhost:8080/", "height": 134} # !git clone "https://github.com/socd06/openvino_colab.git" # + [markdown] id="RlawlgAqo24D" colab_type="text" # # Setting up environment paths # + colab_type="code" id="YkwgKjE8tcdA" colab={} install_dir = '/opt/intel/openvino/' model_optimizer = '/opt/intel/openvino/deployment_tools/model_optimizer/' deployment_tools = '/opt/intel/openvino/deployment_tools/' model_zoo = '/opt/intel/openvino/deployment_tools/open_model_zoo/' # + [markdown] colab_type="text" id="TmlgsYhxEkTW" # # OpenVINO Installation # + colab_type="code" id="DZDxtt-et6zY" outputId="cf59737a-4730-4595-e403-15c509d734f3" colab={"base_uri": "https://localhost:8080/", "height": 34} # cd openvino_colab/ # + colab_type="code" id="P1UL6ZJ1nOs5" outputId="6f3cd829-e3c8-4d42-9759-8e76712661ce" colab={"base_uri": "https://localhost:8080/", "height": 1000} # !python openvino_initialization_script.py # + [markdown] colab_type="text" id="662rwWCoDQzV" # # Downloading Models # + [markdown] id="C9qNdspf1-cZ" colab_type="text" # Human Pose Estimation model # * **human-pose-estimation-0001** ([Documentation](https://docs.openvinotoolkit.org/2020.1/_models_intel_human_pose_estimation_0001_description_human_pose_estimation_0001.html)) # + id="ZB92SYEt2AnP" colab_type="code" outputId="b98ebfec-d34c-4bde-9889-7ac3451b97ee" colab={"base_uri": "https://localhost:8080/", "height": 185} # !python $model_zoo'tools/downloader/'downloader.py --name human-pose-estimation-0001 --precisions FP32-INT8 -o /content/openvino_colab/models # + [markdown] id="J8ZRITuHuGj-" colab_type="text" # Fast Face Recognition model # # * **face-detection-adas-binary-0001** ([Documentation](https://docs.openvinotoolkit.org/2019_R1/_face_detection_adas_binary_0001_description_face_detection_adas_binary_0001.html)) # + id="zBpbYzSwT9Ph" colab_type="code" outputId="ac59ceed-3de1-4ad6-fbc9-c2515c5b6383" colab={"base_uri": "https://localhost:8080/", "height": 205} # !python $model_zoo'tools/downloader/'downloader.py --name face-detection-adas-binary-0001 -o /content/openvino_colab/models # + [markdown] id="BCAcKk-ITXZF" colab_type="text" # Emotions Recognition model # # * **emotions-recognition-retail-0003** ([Documentation](https://docs.openvinotoolkit.org/2020.1/_models_intel_emotions_recognition_retail_0003_description_emotions_recognition_retail_0003.html#outputs)) # + colab_type="code" id="dvcXlgVn6xCc" outputId="ec1abcc9-4a5a-491f-d8b1-42b2c37252b5" colab={"base_uri": "https://localhost:8080/", "height": 205} # !python $model_zoo'tools/downloader/'downloader.py --name emotions-recognition-retail-0003 --precisions FP32-INT8 -o /content/openvino_colab/models # + id="F9rA57J7KU8t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 205} outputId="2ec763c0-5172-4e4f-b38d-e8a1c242a163" # CAR META MODEL (FOR TESTING) # !python $model_zoo'tools/downloader/'downloader.py --name vehicle-attributes-recognition-barrier-0039 --precisions FP32-INT8 -o /content/OpenDevLibrary/demo_files/models # + [markdown] colab_type="text" id="fTICAE5UFEOl" # # Running Inference # + colab_type="code" id="xE2vFHXc-g6H" outputId="3f9528e9-2084-469b-c270-1d2e959d7543" colab={"base_uri": "https://localhost:8080/", "height": 34} # cd /content/openvino_colab/demo_files/ # + id="Q_rjqcT8KcvM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="d0a2a988-697c-4693-955b-4f54ea551ae0" #CAR META for testing # #!source /opt/intel/openvino/bin/setupvars.sh && python app.py -i "images/blue-car.jpg" -n "testing2" -t "CAR_META" -m "/content/OpenDevLibrary/demo_files/models/intel/vehicle-attributes-recognition-barrier-0039/FP32-INT8/vehicle-attributes-recognition-barrier-0039.xml" # + [markdown] id="gX8eY8uA9sbo" colab_type="text" # Pose Estimation # + id="tW6gEYrO9v2t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="9ddbf9e2-f6be-4e2e-c487-eccf246b70d5" # !source /opt/intel/openvino/bin/setupvars.sh && python app.py -i "/content/openvino_colab/img/frame2.png" -t "POSE" -n "fm2" -m "/content/openvino_colab/models/intel/human-pose-estimation-0001/FP32-INT8/human-pose-estimation-0001.xml" # + [markdown] id="RzbQjc8U938X" colab_type="text" # Face Detection # + colab_type="code" id="vrLkzIKv3e4_" outputId="c132a4d4-213b-4a16-c2d3-273cb8c3fe47" colab={"base_uri": "https://localhost:8080/", "height": 202} #Under development # #!source /opt/intel/openvino/bin/setupvars.sh && python app.py -i "/content/openvino_colab/frame (1).png" -t "FACE" -n "fm(1)" -m "/content/openvino_colab/models/intel/face-detection-adas-binary-0001/FP32-INT1/face-detection-adas-binary-0001.xml" # + [markdown] id="mi45KNU499yb" colab_type="text" # Emotion Recognition # + id="vQ1WJ1Tr-APx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="58c35116-6b76-4794-8541-1bb518f30cd7" # !source /opt/intel/openvino/bin/setupvars.sh && python app.py -i "/content/openvino_colab/img/frame (3).png" -t "EMO" -n "fm(3)" -m "/content/openvino_colab/models/intel/emotions-recognition-retail-0003/FP32-INT8/emotions-recognition-retail-0003.xml" # + [markdown] colab_type="text" id="Bp4CyJ__ELpB" # # Output and Results # + colab_type="code" id="3Fz6ZuQo5rzg" outputId="da039553-f971-4863-da19-fba6ab1e0453" colab={"base_uri": "https://localhost:8080/", "height": 34} # cd /content/openvino_colab/demo_files/outputs/ # + id="xCQXtYGrBSqC" colab_type="code" colab={} from google.colab.patches import cv2_imshow import cv2 # + [markdown] id="8nDk5WQvAD36" colab_type="text" # ## Pose Detection Results # + id="EnNlD84GCk-E" colab_type="code" outputId="29024ab3-16a5-4d8f-d4e3-3e4d50a572fd" colab={"base_uri": "https://localhost:8080/", "height": 497} img = cv2.imread("POSE-output-fm1.png", cv2.IMREAD_UNCHANGED) cv2_imshow(img) # + [markdown] id="Zsv9GS4nDZ5j" colab_type="text" # ## Emotion Recognition Results # + id="8c3iK43siXPh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="093f8c1c-4bcc-4c6b-d743-fda3843ae854" img = cv2.imread("EMO-output-fm1.png", cv2.IMREAD_UNCHANGED) cv2_imshow(img)
interview_prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="77obbTbf-1VE" # # CNN Classification with MR Dataset # <hr> # # The __modus operandi__ for text classification is to use __word embedding__ for representing words and a Convolutional neural network to learn how to discriminate documents on classification problems. # # __<NAME>__ commented in _A Primer on Neural Network Models for Natural Language Processing, 2015._ : # > _The non-linearity of the network, as well as the ability to easily integrate pre-trained # word embeddings, often lead to superior classification accuracy._ # # He also commented in _Neural Network Methods for Natural Language Processing, 2017_ : # > ... _the CNN is in essence a feature-extracting architecture. ... . The CNNs layer's responsibility is to extract meaningful sub-structures that are useful for the overall prediction task at hand._ # # We will build a text classification model using CNN model on the Movie Reviews Dataset. Since there is no standard train/test split for this dataset, we will use 10-Fold Cross Validation (CV). # # The CNN model is inspired by __Yoon Kim__ paper in his study on the use of Word Embedding + CNN for text classification. The hyperparameters we use based on his study are as follows: # - Transfer function: rectified linear. # - Kernel sizes: 1,2, 3, 4, 5. # - Number of filters: 100. # - Dropout rate: 0.5. # - Weight regularization (L2) constraint: 3. # - Batch Size: 50. # - Update Rule: Adam # # ## Load the library # + colab={"base_uri": "https://localhost:8080/"} id="i3UxviMiBpfz" outputId="aae9cc37-f488-4d2f-ed6c-1e820dcb79ed" from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} id="G7dhWqUO-1VS" outputId="ff3a40cd-3c76-42c1-af05-2bf7e65b5eda" import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import re import nltk import random # from nltk.tokenize import TweetTokenizer from sklearn.model_selection import KFold # %config IPCompleter.greedy=True # %config IPCompleter.use_jedi=False # nltk.download('twitter_samples') # + colab={"base_uri": "https://localhost:8080/"} id="UdNUUbiH-1VU" outputId="1097e84a-ce8d-45d6-99c4-2a19d1df2824" tf.config.list_physical_devices('GPU') # + [markdown] id="AAd1AK6q-1VX" # ## Load the Dataset # + colab={"base_uri": "https://localhost:8080/", "height": 436} id="mKLeesTG-1VX" outputId="17d2322f-dd8d-46ee-977d-196cb08f4eff" corpus = pd.read_pickle('/content/drive/MyDrive/Disertasi/0_data/MR/MR.pkl') corpus.label = corpus.label.astype(int) print(corpus.shape) corpus # + colab={"base_uri": "https://localhost:8080/"} id="TlE-WoHu-1VY" outputId="cdf00370-3749-4297-99e2-12c4230ea8ee" corpus.info() # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="bIALC5Ez-1VZ" outputId="99f53275-51e1-433d-8bad-b9e2aa88a06c" corpus.groupby( by='label').count() # + id="8dGbarl9-1Va" # Separate the sentences and the labels sentences, labels = list(corpus.sentence), list(corpus.label) # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="aXiK_u0s-1Va" outputId="b383df82-54f7-4a08-8f16-b0a845d41bdd" sentences[0] # + [markdown] id="Fb-EiTb0-1Vb" # <!--## Split Dataset--> # + [markdown] id="0KtDuWLo-1Vc" # # Data Preprocessing # <hr> # # Preparing data for word embedding, especially for pre-trained word embedding like Word2Vec or GloVe, __don't use standard preprocessing steps like stemming or stopword removal__. Compared to our approach on cleaning the text when doing word count based feature extraction (e.g. TFIDF) such as removing stopwords, stemming etc, now we will keep these words as we do not want to lose such information that might help the model learn better. # # __<NAME>__, one of the developers of Word2Vec, in _word2vec-toolkit: google groups thread., 2015_, suggests only very minimal text cleaning is required when learning a word embedding model. Sometimes, it's good to disconnect # In short, what we will do is: # - Puntuations removal # - Lower the letter case # - Tokenization # # The process above will be handled by __Tokenizer__ class in TensorFlow # # - <b>One way to choose the maximum sequence length is to just pick the length of the longest sentence in the training set.</b> # + id="2ydFj2Gx-1Vd" # Define a function to compute the max length of sequence def max_length(sequences): ''' input: sequences: a 2D list of integer sequences output: max_length: the max length of the sequences ''' max_length = 0 for i, seq in enumerate(sequences): length = len(seq) if max_length < length: max_length = length return max_length # + colab={"base_uri": "https://localhost:8080/"} id="26Kk0lII-1Vd" outputId="25acc785-5e8f-4dda-c39b-d41cc4e8a7b3" from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences trunc_type='post' padding_type='post' oov_tok = "<UNK>" # Separate the sentences and the labels sentences, labels = list(corpus.sentence), list(corpus.label) # Cleaning and Tokenization tokenizer = Tokenizer(oov_token=oov_tok) tokenizer.fit_on_texts(sentences) print("Example of sentence: ", sentences[8]) # Turn the text into sequence training_sequences = tokenizer.texts_to_sequences(sentences) max_len = max_length(training_sequences) print('Into a sequence of int:', training_sequences[8]) # Pad the sequence to have the same size training_padded = pad_sequences(training_sequences, maxlen=max_len, padding=padding_type, truncating=trunc_type) print('Into a padded sequence:', training_padded[8]) # + colab={"base_uri": "https://localhost:8080/"} id="Nj_ldvG4-1Ve" outputId="65aef3df-718d-4973-f70e-729ed2e955ca" # See the first 10 words in the vocabulary word_index = tokenizer.word_index for i, word in enumerate(word_index): print(word, word_index.get(word)) if i==9: break vocab_size = len(word_index)+1 print(vocab_size) # + [markdown] id="bZdWqdSK-1Vf" # # Model 1: Embedding Random # <hr> # # A __standard model__ for document classification is to use (quoted from __<NAME>__, the author of [machinelearningmastery.com](https://machinelearningmastery.com)): # >- Word Embedding: A distributed representation of words where different words that have a similar meaning (based on their usage) also have a similar representation. # >- Convolutional Model: A feature extraction model that learns to extract salient features from documents represented using a word embedding. # >- Fully Connected Model: The interpretation of extracted features in terms of a predictive output. # # # Therefore, the model is comprised of the following elements: # - __Input layer__ that defines the length of input sequences. # - __Embedding layer__ set to the size of the vocabulary and 100-dimensional real-valued representations. # - __Conv1D layer__ with 32 filters and a kernel size set to the number of words to read at once. # - __MaxPooling1D layer__ to consolidate the output from the convolutional layer. # - __Flatten layer__ to reduce the three-dimensional output to two dimensional for concatenation. # # The CNN model is inspired by __Yoon Kim__ paper in his study on the use of Word Embedding + CNN for text classification. The hyperparameters we use based on his study are as follows: # - Transfer function: rectified linear. # - Kernel sizes: 3, 4, 5. # - Number of filters: 100. # - Dropout rate: 0.5. # - Weight regularization (L2): 3. # - Batch Size: 50. # - Update Rule: Adam # # We will perform the best parameter using __grid search__ and 10-fold cross validation. # + [markdown] id="_hwOHQhl-1Vg" # ## CNN Model # # Now, we will build Convolutional Neural Network (CNN) models to classify encoded documents as either positive or negative. # # The model takes inspiration from `CNN for Sentence Classification` by *<NAME>*. # # Now, we will define our CNN model as follows: # - One Conv layer with 100 filters, kernel size 5, and relu activation function; # - One MaxPool layer with pool size = 2; # - One Dropout layer after flattened; # - Optimizer: Adam (The best learning algorithm so far) # - Loss function: binary cross-entropy (suited for binary classification problem) # # **Note**: # - The whole purpose of dropout layers is to tackle the problem of over-fitting and to introduce generalization to the model. Hence it is advisable to keep dropout parameter near 0.5 in hidden layers. # - https://missinglink.ai/guides/keras/keras-conv1d-working-1d-convolutional-neural-networks-keras/ # + id="CTJ3AUet-1Vh" from tensorflow.keras import regularizers from tensorflow.keras.constraints import MaxNorm def define_model(filters = 100, kernel_size = 3, activation='relu', input_dim = None, output_dim=300, max_length = None ): model = tf.keras.models.Sequential([ tf.keras.layers.Embedding(input_dim=vocab_size, output_dim=output_dim, input_length=max_length, input_shape=(max_length, )), tf.keras.layers.Conv1D(filters=filters, kernel_size = kernel_size, activation = activation, # set 'axis' value to the first and second axis of conv1D weights (rows, cols) kernel_constraint= MaxNorm( max_value=3, axis=[0,1])), tf.keras.layers.MaxPool1D(2), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation=activation, # set axis to 0 to constrain each weight vector of length (input_dim,) in dense layer kernel_constraint = MaxNorm( max_value=3, axis=0)), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(units=1, activation='sigmoid') ]) model.compile( loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy']) # model.summary() return model # + id="Td-7uoNr-1Vk" outputId="1faf6092-c935-483d-8064-69f169849f34" model_0 = define_model( input_dim=1000, max_length=100) model_0.summary() # + id="oHOOuqyR-1Vl" class myCallback(tf.keras.callbacks.Callback): # Overide the method on_epoch_end() for our benefit def on_epoch_end(self, epoch, logs={}): if (logs.get('accuracy') > 0.93): print("\nReached 93% accuracy so cancelling training!") self.model.stop_training=True callbacks = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', min_delta=0, patience=5, verbose=2, mode='auto', restore_best_weights=True) # + [markdown] id="0anU5cCh-1Vm" # ## Train and Test the Model # + id="azMioxNm-1Vn" outputId="fb392303-c5f7-4c4f-a352-232f68525db1" # Parameter Initialization trunc_type='post' padding_type='post' oov_tok = "<UNK>" activations = ['relu', 'tanh'] filters = 100 kernel_sizes = [1, 2, 3, 4, 5, 6] columns = ['Activation', 'Filters', 'acc1', 'acc2', 'acc3', 'acc4', 'acc5', 'acc6', 'acc7', 'acc8', 'acc9', 'acc10', 'AVG'] record = pd.DataFrame(columns = columns) # prepare cross validation with 10 splits and shuffle = True kfold = KFold(10, True) # Separate the sentences and the labels sentences, labels = list(corpus.sentence), list(corpus.label) for activation in activations: for kernel_size in kernel_sizes: # kfold.split() will return set indices for each split acc_list = [] for train, test in kfold.split(sentences): train_x, test_x = [], [] train_y, test_y = [], [] for i in train: train_x.append(sentences[i]) train_y.append(labels[i]) for i in test: test_x.append(sentences[i]) test_y.append(labels[i]) # Turn the labels into a numpy array train_y = np.array(train_y) test_y = np.array(test_y) # encode data using # Cleaning and Tokenization tokenizer = Tokenizer(oov_token=oov_tok) tokenizer.fit_on_texts(train_x) # Turn the text into sequence training_sequences = tokenizer.texts_to_sequences(train_x) test_sequences = tokenizer.texts_to_sequences(test_x) max_len = max_length(training_sequences) # Pad the sequence to have the same size Xtrain = pad_sequences(training_sequences, maxlen=max_len, padding=padding_type, truncating=trunc_type) Xtest = pad_sequences(test_sequences, maxlen=max_len, padding=padding_type, truncating=trunc_type) word_index = tokenizer.word_index vocab_size = len(word_index)+1 # Define the input shape model = define_model(filters, kernel_size, activation, input_dim=vocab_size, max_length=max_len) # Train the model model.fit(Xtrain, train_y, batch_size=50, epochs=15, verbose=2, callbacks=[callbacks], validation_data=(Xtest, test_y)) # evaluate the model loss, acc = model.evaluate(Xtest, test_y, verbose=0) print('Test Accuracy: {}'.format(acc*100)) acc_list.append(acc*100) mean_acc = np.array(acc_list).mean() parameters = [activation, kernel_size] entries = parameters + acc_list + [mean_acc] temp = pd.DataFrame([entries], columns=columns) record = record.append(temp, ignore_index=True) print() print(record) print() # + [markdown] id="q3Vo9yV6-1Vp" # ## Summary # + id="aE9_uFkL-1Vp" outputId="98f01fd7-4d49-45ba-81b9-7404edf913f8" record.sort_values(by='AVG', ascending=False) # + id="iUCtq82X-1Vq" outputId="90950922-40ff-45a9-ed58-2d68c808672c" record[['Activation', 'AVG']].groupby(by='Activation').max().sort_values(by='AVG', ascending=False) # + id="wsrLCqGz-1Vq" report = record.sort_values(by='AVG', ascending=False) report = report.to_excel('CNN_MR.xlsx', sheet_name='random') # + [markdown] id="Tu-PGn69-1Vr" # # Model 2: Word2Vec Static # + [markdown] id="KXDT243N-1Vr" # __Using and updating pre-trained embeddings__ # * In this part, we will create an Embedding layer in Tensorflow Keras using a pre-trained word embedding called Word2Vec 300-d tht has been trained 100 bilion words from Google News. # * In this part, we will leave the embeddings fixed instead of updating them (dynamic). # + [markdown] id="O8hsfSmB-1Vs" # 1. __Load `Word2Vec` Pre-trained Word Embedding__ # + id="qAe_7tNO-1Vs" from gensim.models import KeyedVectors word2vec = KeyedVectors.load_word2vec_format('/content/drive/MyDrive/Disertasi/WordEmbedding_Models/Word2Vec/GoogleNews-vectors-negative300.bin', binary=True) # + colab={"base_uri": "https://localhost:8080/"} id="j2VEEurx-1Vs" outputId="eeb4067b-effa-4de8-f856-a22f5229fcb7" # Access the dense vector value for the word 'handsome' # word2vec.word_vec('handsome') # 0.11376953 word2vec.word_vec('cool') # 1.64062500e-01 # + [markdown] id="xroQn89R-1Vt" # 2. __Check number of training words present in Word2Vec__ # + id="jIsW8cfb-1Vt" def training_words_in_word2vector(word_to_vec_map, word_to_index): ''' input: word_to_vec_map: a word2vec GoogleNews-vectors-negative300.bin model loaded using gensim.models word_to_index: word to index mapping from training set ''' vocab_size = len(word_to_index) + 1 count = 0 # Set each row "idx" of the embedding matrix to be # the word vector representation of the idx'th word of the vocabulary for word, idx in word_to_index.items(): if word in word_to_vec_map: count+=1 return print('Found {} words present from {} training vocabulary in the set of pre-trained word vector'.format(count, vocab_size)) # + colab={"base_uri": "https://localhost:8080/"} id="FJZYCpiO-1Vt" outputId="e5bd01c7-379e-41a3-9a69-21d61fde1689" # Separate the sentences and the labels sentences, labels = list(corpus.sentence), list(corpus.label) # Cleaning and Tokenization tokenizer = Tokenizer(oov_token=oov_tok) tokenizer.fit_on_texts(sentences) word_index = tokenizer.word_index training_words_in_word2vector(word2vec, word_index) # + [markdown] id="qZD5TIWg-1Vu" # 2. __Define a `pretrained_embedding_layer` function__ # + colab={"base_uri": "https://localhost:8080/"} id="61pxqiCZ-1Vu" outputId="ff18ed20-7c78-457e-94d5-7cf1c84d81d0" emb_mean = word2vec.vectors.mean() emb_std = word2vec.vectors.std() print('emb_mean: ', emb_mean) print('emb_std: ', emb_std) # + id="TnMROqOz-1Vv" from tensorflow.keras.layers import Embedding def pretrained_embedding_matrix(word_to_vec_map, word_to_index, emb_mean, emb_std): ''' input: word_to_vec_map: a word2vec GoogleNews-vectors-negative300.bin model loaded using gensim.models word_to_index: word to index mapping from training set ''' np.random.seed(2021) # adding 1 to fit Keras embedding (requirement) vocab_size = len(word_to_index) + 1 # define dimensionality of your pre-trained word vectors (= 300) emb_dim = word_to_vec_map.word_vec('handsome').shape[0] # initialize the matrix with generic normal distribution values embed_matrix = np.random.normal(emb_mean, emb_std, (vocab_size, emb_dim)) # Set each row "idx" of the embedding matrix to be # the word vector representation of the idx'th word of the vocabulary for word, idx in word_to_index.items(): if word in word_to_vec_map: embed_matrix[idx] = word_to_vec_map.get_vector(word) return embed_matrix # + colab={"base_uri": "https://localhost:8080/"} id="j_PJCQTR-1Vv" outputId="6e245f35-b32b-40ae-847b-7f3f0e095b3f" # Test the function w_2_i = {'<UNK>': 1, 'handsome': 2, 'cool': 3, 'shit': 4 } em_matrix = pretrained_embedding_matrix(word2vec, w_2_i, emb_mean, emb_std) em_matrix # + [markdown] id="jm3G32Ti-1Vv" # ## CNN Model # + id="1SFkg2l4-1Vw" from tensorflow.keras import regularizers from tensorflow.keras.constraints import MaxNorm def define_model_2(filters = 100, kernel_size = 3, activation='relu', input_dim = None, output_dim=300, max_length = None, emb_matrix = None): model = tf.keras.models.Sequential([ tf.keras.layers.Embedding(input_dim=input_dim, output_dim=output_dim, input_length=max_length, input_shape=(max_length, ), # Assign the embedding weight with word2vec embedding marix weights = [emb_matrix], # Set the weight to be not trainable (static) trainable = False), tf.keras.layers.Conv1D(filters=filters, kernel_size = kernel_size, activation = activation, # set 'axis' value to the first and second axis of conv1D weights (rows, cols) kernel_constraint= MaxNorm( max_value=3, axis=[0,1])), tf.keras.layers.MaxPool1D(2), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation=activation, # set axis to 0 to constrain each weight vector of length (input_dim,) in dense layer kernel_constraint = MaxNorm( max_value=3, axis=0)), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(units=1, activation='sigmoid') ]) model.compile( loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy']) # model.summary() return model # + colab={"base_uri": "https://localhost:8080/"} id="k7m_x87Q-1Vw" outputId="7e9ebead-3835-4e92-e5bc-fae1d6af303a" model_0 = define_model_2( input_dim=1000, max_length=100, emb_matrix=np.random.rand(1000, 300)) model_0.summary() # + [markdown] id="bgvyvm6Q-1Vx" # ## Train and Test the Model # + id="1BTEb6wh-1Vx" class myCallback(tf.keras.callbacks.Callback): # Overide the method on_epoch_end() for our benefit def on_epoch_end(self, epoch, logs={}): if (logs.get('accuracy') >= 0.9): print("\nReached 90% accuracy so cancelling training!") self.model.stop_training=True callbacks = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', min_delta=0, patience=10, verbose=2, mode='auto', restore_best_weights=True) # + colab={"base_uri": "https://localhost:8080/"} id="OhZQyy_m-1Vx" outputId="d0348834-1301-4727-ca7b-ba85d638188a" # Parameter Initialization trunc_type='post' padding_type='post' oov_tok = "<UNK>" activations = ['relu'] filters = 100 kernel_sizes = [1, 2, 3, 4, 5, 6, 7, 8] emb_mean = emb_mean emb_std = emb_std columns = ['Activation', 'Filters', 'acc1', 'acc2', 'acc3', 'acc4', 'acc5', 'acc6', 'acc7', 'acc8', 'acc9', 'acc10', 'AVG'] record2 = pd.DataFrame(columns = columns) # prepare cross validation with 10 splits and shuffle = True kfold = KFold(10, True) # Separate the sentences and the labels sentences, labels = list(corpus.sentence), list(corpus.label) for activation in activations: for kernel_size in kernel_sizes: # kfold.split() will return set indices for each split acc_list = [] for train, test in kfold.split(sentences): train_x, test_x = [], [] train_y, test_y = [], [] for i in train: train_x.append(sentences[i]) train_y.append(labels[i]) for i in test: test_x.append(sentences[i]) test_y.append(labels[i]) # Turn the labels into a numpy array train_y = np.array(train_y) test_y = np.array(test_y) # encode data using # Cleaning and Tokenization tokenizer = Tokenizer(oov_token=oov_tok) tokenizer.fit_on_texts(train_x) # Turn the text into sequence training_sequences = tokenizer.texts_to_sequences(train_x) test_sequences = tokenizer.texts_to_sequences(test_x) max_len = max_length(training_sequences) # Pad the sequence to have the same size Xtrain = pad_sequences(training_sequences, maxlen=max_len, padding=padding_type, truncating=trunc_type) Xtest = pad_sequences(test_sequences, maxlen=max_len, padding=padding_type, truncating=trunc_type) word_index = tokenizer.word_index vocab_size = len(word_index)+1 emb_matrix = pretrained_embedding_matrix(word2vec, word_index, emb_mean, emb_std) # Define the input shape model = define_model_2(filters, kernel_size, activation, input_dim=vocab_size, max_length=max_len, emb_matrix=emb_matrix) # Train the model model.fit(Xtrain, train_y, batch_size=50, epochs=100, verbose=1, callbacks=[callbacks], validation_data=(Xtest, test_y)) # evaluate the model loss, acc = model.evaluate(Xtest, test_y, verbose=0) print('Test Accuracy: {}'.format(acc*100)) acc_list.append(acc*100) mean_acc = np.array(acc_list).mean() parameters = [activation, kernel_size] entries = parameters + acc_list + [mean_acc] temp = pd.DataFrame([entries], columns=columns) record2 = record2.append(temp, ignore_index=True) print() print(record2) print() # + [markdown] id="jtL9tLzo-1Vy" # ## Summary # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="99YiSpR0-1Vy" outputId="9f37b1ba-5c4d-4d02-95d9-7168af4ab766" record2.sort_values(by='AVG', ascending=False) # + colab={"base_uri": "https://localhost:8080/", "height": 111} id="Srkqv_qc-1Vz" outputId="a16e8593-15c2-4027-b87e-8b2d629c19f2" record2[['Activation', 'AVG']].groupby(by='Activation').max().sort_values(by='AVG', ascending=False) # + id="C_m9I4OJ-1Vz" report = record2.sort_values(by='AVG', ascending=False) report = report.to_excel('CNN_MR_2.xlsx', sheet_name='static') # + [markdown] id="17QEE5oW-1V0" # # Model 3: Word2Vec - Dynamic # + [markdown] id="HVQjxZ7H-1V0" # * In this part, we will fine tune the embeddings while training (dynamic). # + [markdown] id="XWNOdOlb-1V0" # ## CNN Model # + id="UHqwHxEW-1V1" from tensorflow.keras import regularizers from tensorflow.keras.constraints import MaxNorm def define_model_3(filters = 100, kernel_size = 3, activation='relu', input_dim = None, output_dim=300, max_length = None, emb_matrix = None): model = tf.keras.models.Sequential([ tf.keras.layers.Embedding(input_dim=input_dim, output_dim=output_dim, input_length=max_length, input_shape=(max_length, ), # Assign the embedding weight with word2vec embedding marix weights = [emb_matrix], # Set the weight to be not trainable (static) trainable = True), tf.keras.layers.Conv1D(filters=filters, kernel_size = kernel_size, activation = activation, # set 'axis' value to the first and second axis of conv1D weights (rows, cols) kernel_constraint= MaxNorm( max_value=3, axis=[0,1])), tf.keras.layers.MaxPool1D(2), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation=activation, # set axis to 0 to constrain each weight vector of length (input_dim,) in dense layer kernel_constraint = MaxNorm( max_value=3, axis=0)), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(units=1, activation='sigmoid') ]) model.compile( loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy']) # model.summary() return model # + colab={"base_uri": "https://localhost:8080/"} id="scS6Mb19-1V1" outputId="2315e31c-55c9-468c-8d6d-bb95bdb15741" model_0 = define_model_3( input_dim=1000, max_length=100, emb_matrix=np.random.rand(1000, 300)) model_0.summary() # + [markdown] id="YqYq5eRR-1V2" # ## Train and Test the Model # + id="CX0-YHNZ-1V2" class myCallback(tf.keras.callbacks.Callback): # Overide the method on_epoch_end() for our benefit def on_epoch_end(self, epoch, logs={}): if (logs.get('accuracy') > 0.93): print("\nReached 93% accuracy so cancelling training!") self.model.stop_training=True callbacks = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', min_delta=0, patience=10, verbose=2, mode='auto', restore_best_weights=True) # + colab={"base_uri": "https://localhost:8080/"} id="YdyPwyp_-1V2" outputId="7cdeaa76-18e9-4588-8d2f-d319d45603b2" # Parameter Initialization trunc_type='post' padding_type='post' oov_tok = "<UNK>" activations = ['relu'] filters = 100 kernel_sizes = [1, 2, 3, 4, 5, 6] emb_mean = emb_mean emb_std = emb_std columns = ['Activation', 'Filters', 'acc1', 'acc2', 'acc3', 'acc4', 'acc5', 'acc6', 'acc7', 'acc8', 'acc9', 'acc10', 'AVG'] record3 = pd.DataFrame(columns = columns) # prepare cross validation with 10 splits and shuffle = True kfold = KFold(10, True) # Separate the sentences and the labels sentences, labels = list(corpus.sentence), list(corpus.label) for activation in activations: for kernel_size in kernel_sizes: # kfold.split() will return set indices for each split acc_list = [] for train, test in kfold.split(sentences): train_x, test_x = [], [] train_y, test_y = [], [] for i in train: train_x.append(sentences[i]) train_y.append(labels[i]) for i in test: test_x.append(sentences[i]) test_y.append(labels[i]) # Turn the labels into a numpy array train_y = np.array(train_y) test_y = np.array(test_y) # encode data using # Cleaning and Tokenization tokenizer = Tokenizer(oov_token=oov_tok) tokenizer.fit_on_texts(train_x) # Turn the text into sequence training_sequences = tokenizer.texts_to_sequences(train_x) test_sequences = tokenizer.texts_to_sequences(test_x) max_len = max_length(training_sequences) # Pad the sequence to have the same size Xtrain = pad_sequences(training_sequences, maxlen=max_len, padding=padding_type, truncating=trunc_type) Xtest = pad_sequences(test_sequences, maxlen=max_len, padding=padding_type, truncating=trunc_type) word_index = tokenizer.word_index vocab_size = len(word_index)+1 emb_matrix = pretrained_embedding_matrix(word2vec, word_index, emb_mean, emb_std) # Define the input shape model = define_model_3(filters, kernel_size, activation, input_dim=vocab_size, max_length=max_len, emb_matrix=emb_matrix) # Train the model model.fit(Xtrain, train_y, batch_size=50, epochs=100, verbose=1, callbacks=[callbacks], validation_data=(Xtest, test_y)) # evaluate the model loss, acc = model.evaluate(Xtest, test_y, verbose=0) print('Test Accuracy: {}'.format(acc*100)) acc_list.append(acc*100) mean_acc = np.array(acc_list).mean() parameters = [activation, kernel_size] entries = parameters + acc_list + [mean_acc] temp = pd.DataFrame([entries], columns=columns) record3 = record3.append(temp, ignore_index=True) print() print(record3) print() # + [markdown] id="_udF_b0p-1V4" # ## Summary # + colab={"base_uri": "https://localhost:8080/", "height": 235} id="gMfVBZNm-1V4" outputId="daadf480-90bb-44ea-b9ab-ec1e96548427" record3.sort_values(by='AVG', ascending=False) # + id="FbxsGHE_-1V5" report = record3.sort_values(by='AVG', ascending=False) report = report.to_excel('CNN_MR_3.xlsx', sheet_name='dynamic')
1_CNN/CNN_MR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="6ltsHY9dFy9-" # ## Pose Estimation Walkthrough # # this notebook contains a note while toying with data and the result, also the steps to produce the result # + colab={"base_uri": "https://localhost:8080/"} id="n-Bpcwei3jlQ" outputId="3adbf9c7-0143-46a9-ee4c-7d9c95cb4fc8" import torch import syft as sy import numpy as np from util import connect_to_workers hook = sy.TorchHook(torch) # hook PyTorch ie add extra functionalities to support Federated Learning LABEL = ['Standing still', 'Sitting and relaxing', 'Lying down', 'Walking', 'Climbing', 'Running'] N_WORKER = 8 BATCH_SIZE = 32 VALID_SIZE = 0.1 GPU_FOUND = torch.cuda.is_available() print(GPU_FOUND) # + id="ji1p6BHG-6L9" # simulate another remote client using VirtualWorker workers = connect_to_workers(hook,n_workers = N_WORKER) # + [markdown] id="issHMnOKGCSe" # ## Reading data # # 1. Split files detected by glob to train and test # 2. Read CSV # 3. Construct Dataset class # # For the first experiment, i will try to use all features that available so the input will be [1 x 21] # + id="qi4TznmK_BmA" import glob import csv import random import math # folder path PATH = 'data/Preprocessed' TEST_PATH = 'test' VALID_SIZE = 0.1 # seed for random random.seed(300) # + id="GoOA2DOa_E2N" files = glob.glob(PATH+"/*.csv") test_files = glob.glob(TEST_PATH+"/*.csv") # split into train and test # according to note, only 8 data for training, and the rest for testing. # so you need to separate manually, take data from /training/Preprocessed to /test count_valid = math.ceil(VALID_SIZE*len(files)) random.shuffle(files) valid_files,train_files, = files[0:count_valid],files[count_valid:] # + colab={"base_uri": "https://localhost:8080/"} id="lf177KTS_rNQ" outputId="219d0bdf-d6cf-4668-e514-d91745cd4d5b" index_to_read = 3 sample_read = [] sample_label = [] label = [] with open(train_files[index_to_read]) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: if int(row[21]) != 0: if int(row[21]) not in label: label.append(int(row[21])) sample_read.append([float(item) for item in row[0:21]]) sample_label.append(int(row[21])) print(train_files[index_to_read]) print('types of label inside: {}'.format(label)) # + colab={"base_uri": "https://localhost:8080/"} id="yRQc00BN_0Ci" outputId="3acee476-d2df-45c7-dce8-bf40db307c9d" # print sample data for index,rep in enumerate(sample_read[:3]): print('Chest \t\t\tLeft Angkle') print('{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f}'.format(*rep)) print('Label \t\t\tRight Angkle') print('{} \t\t\t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f} \t{:.2f}'.format(label[index],*rep[12:])) print('') # + [markdown] id="r0ed6c8JGOqr" # ### Use Constructed Dataset # # To make it easy, we create a dataloader for reading and returning value from csv files # + colab={"base_uri": "https://localhost:8080/"} id="ZU5qLcsG_1ch" outputId="9cbf31b2-c1c1-4817-9ddc-347f8dca6b76" from dataloader import ImuPoseDataset train_dataset = ImuPoseDataset(files=train_files) valid_dataset = ImuPoseDataset(files=valid_files) test_dataset = ImuPoseDataset(files=test_files,return_old_data = True) print("train dataset: {}".format(len(train_dataset))) print("valid dataset: {}".format(len(valid_dataset))) print("test dataset: {}".format(len(test_dataset))) # + [markdown] id="moV73cx3GaGU" # ### Construct Dataloader # # To iterate our dataset we will use Pytorch built-in dataloader. Since we are training with federated learning we will need to transform it into federatedDataset # + id="hOHmTabCABBR" federated_train_loader = sy.FederatedDataLoader( train_dataset .federate(workers), # <-- we distribute the dataset across all the workers, it's now a FederatedDataset batch_size=BATCH_SIZE, drop_last=True, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, drop_last=True, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1) # + colab={"base_uri": "https://localhost:8080/"} id="HlCQ50Y5AE_I" outputId="c46c5cd9-0618-481e-da2a-b3e26e879f84" test_iter = iter(federated_train_loader) data,label = next(test_iter) print(data.location) print("our training data shape: {}, {}".format(data.shape,data.type())) print("our training label shape: {}, {}".format(label.shape,label.type())) # + [markdown] id="3e1jHvCPGiPy" # ### CNN for classifier # # for estimating pose, i will be using shallow CNN. I will be using 3 convolutional layers and 2 fully connected dense layers # # I use the reference from # <b><NAME>, et al. “Human Activity Recognition Using Inertial Sensors in a Smartphone: An Overview.” Sensors, vol. 19, no. 14, 2019, p. 3213.</b> # # that reference a few methods to solve problems related to this. # # from # Goodfellow, Ian, et al. Deep Learning. 2016. mentioned that in recognizing human activity, CNN can be used for prediction by treating each row as timestamp data and processed by 1D convolution # . # # + id="ohejJCIyAGAH" from model import CnnModel model = CnnModel(input_size = 1,num_classes=len(LABEL)) # + id="6OwsA55iAPiU" # define some global param LEARNING_RATE = 0.01 criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(),lr=LEARNING_RATE) # + colab={"base_uri": "https://localhost:8080/"} id="Xy-cwrMPARzO" outputId="695770d1-b81c-47fb-b51d-257f334d8e1f" from trainer import train # Construct args for train param args={ "epoch":300, "batch_size":BATCH_SIZE, "learning_patience":10, "learning_rate": LEARNING_RATE, 'checkpoint':'', 'saved_model_name':'pose_estimator', 'save_folder':'', } # train the Model train(args, model, criterion, optimizer, federated_train_loader,False,valid_loader) # - # ### Evaluating againts test dataset # # after finish training, it will have the training log in the log/train_log folder according to training start time. Checkpoint also saved in log/checkpoint. # # Now let's evaluate it using another data, to view it's performance # + from evaluate import test args={ "batch_size":1, 'model_path':'model/pose_estimator.pt', 'save_result':'', 'test_folder':'test', 'include_null_class':False, } test(test_loader, args) # - # ## Result # # the shallow 1D CNN was able to predict pose based on timestamp data with <b>F1-score</b> reaching 94%. But take note that the label class is small only 6, and it doesn't learn outlier data (No activity). For improvement, the Null label can be considered a real class and trained into network to learn it's feature.
pose_estimation_walkthrough.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # Quiz 4 (10/10) # Forgot to print, sorry! # # ![](./media/quiz.png)
deepLSpec/c4/w4/quiz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Filtering and Scipy.Signal # Demonstration of FIR filtering using scipy.signal. Part of a lecture given in ELEG 306 Digital Signal Processing, Oct 6, 2014. # # <NAME>, <EMAIL> # %pylab inline import scipy.signal as sig x = randn(1200) plot(x) # ## Low Pass filter # + # #sig.firwin? # - h = sig.firwin(31,0.1) plot(h) # The connected dots plot above isn't as informative as the stem plot below: stem(range(len(h)),h) # Below we plot the magnitude response of the filter versus frequency (only versus positive frequencies--the negative half is the mirror image). # # Notice, this is a linear phase filter whose sidelobes are about 120 dB below the passband. Try building an analog filter like this! w, v = sig.freqz(h) plot(w,20*log(abs(v))) y=convolve(x,h) plot(y) # Below, we tighten the filter to have a passband of 0.01$\pi$ wide (ok, as well as we can do with 51 taps--more taps results in a response closer to ideal at the expense of more delay and more computation). h2 = sig.firwin(51,0.01) stem(range(len(h2)),h2) # Notice how the signal is much smoother now. This is clearer in the scatter plot below. # # One easily missed point: The y-axis autoscales, resulting in plots of about the same "visual magnitude". But look carefully: the original plot of x has a scale from -4 to 4 while this one goes from about -0.4 to 0.4. The signal has gotten much smaller. Why? Because the filter has removed about 99% of the energy! y2 = convolve(x,h2) plot(y2) # Show the dots. Notice how close consecutive dots are to each other. Also, notice how the dots have "momentum". This is a characteristic of a low pass signal like this. scatter(range(len(y2)),y2,s=2) # ## Bandpass Filter # Notice how the bandpass filter's impulse response matches the intended signal. It oscillates slowly between positive and negative values, much like a sinusoid of frequency 0.15$\pi$ does. hbp = sig.firwin(51, [0.1,0.2], pass_zero=False) stem(range(len(hbp)),hbp) w, v = sig.freqz(hbp) plot(w, 20*log(abs(v))) ybp = convolve(x,hbp) plot(ybp) stem(range(len(ybp)),ybp,s=1) # This plot is a total mess. It has too many points and the dots are too large. Let's fix both: segment = ybp[100:200] stem(range(len(segment)),segment) # Now we can see what happened. The signal looks like a sinusoid, oscillating slowly between positive and negative values. It isn't a perfect sinusoid for two reasons: 1) the signal is random (we starting with white Gaussian noise) and 2) the output has components at all frequencies within (and near) the passband. # ## High Pass Filter hhp = sig.firwin(51, 0.9, pass_zero=False) w, v = sig.freqz(hhp) plot(w, 20*log(abs(v))) yhp = convolve(x,hhp) plot(yhp) plot(yhp[500:600]) stem(yhp[500:600]) # ## Pole-Zero Plots # Let's do some pole-zero plots. Since the filters have no poles, we only have to worry about the zeros. z, p, k = sig.tf2zpk(h,1) z theta = linspace(-pi,pi,201) plot(cos(theta),sin(theta)) fig = scatter(real(z),imag(z)) axes().set_aspect('equal') z, p, k = sig.tf2zpk(hbp,[1]) theta = linspace(-pi,pi,201) plot(cos(theta),sin(theta)) fig = scatter(real(z),imag(z)) axes().set_aspect('equal') # Remember, zeros can be outside the unit circle without affecting stability.
Week14/Lecture12b_Filters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from aiida import load_dbenv, is_dbenv_loaded from aiida.backends import settings if not is_dbenv_loaded(): load_dbenv(profile=settings.AIIDADB_PROFILE) from aiida.orm import load_node from aiida.orm.querybuilder import QueryBuilder from aiida.orm.calculation.work import WorkCalculation from aiida.orm.calculation.job import JobCalculation import numpy as np import scipy.constants as const import ipywidgets as ipw from IPython.display import display, clear_output, HTML import re import gzip import matplotlib.pyplot as plt from collections import OrderedDict import urlparse import io import zipfile import StringIO import matplotlib.pyplot as plt from apps.scanning_probe import common from apps.scanning_probe import igor # + colormaps = ['afmhot', 'binary', 'gist_gray', 'gist_heat', 'seismic'] current = None fwhm = None heights = None voltages = None heightOptions = None extent = None figure_xy_ratio = None def load_pk(b): global current global fwhm, heights, voltages global extent, figure_xy_ratio global heightOptions try: workcalc = load_node(pk=pk_select.value) hrstm_calc = common.get_calc_by_label(workcalc, 'hrstm') except: print("Incorrect pk.") return fwhm = float(hrstm_calc.inp.parameters.dict['--fwhm_sam']) geom_info.value = common.get_slab_calc_info(workcalc) ase_geom = workcalc.inp.structure.get_ase() ### ---------------------------------------------------- ### Load data meta_data = np.load(hrstm_calc.out.retrieved.get_abs_path('hrstm_meta.npy')).item() dimGrid = meta_data['dimGrid'] lVec = meta_data['lVec'] heights = [np.round(lVec[0,2]+lVec[3,2]/dimGrid[-1]*idx-np.max(ase_geom.get_positions()[:,2]),1) for idx in range(dimGrid[-1])] heightOptions = {} for hIdx, height in enumerate(heights): heightOptions["h={:}".format(height)] = hIdx voltages = np.array(meta_data['voltages']) dimShape = dimGrid[:-1]+(len(heights),len(voltages),) try: current = np.abs(np.load(hrstm_calc.out.retrieved.get_abs_path('hrstm.npz'))['arr_0'].reshape(dimShape)) except OSError: current = np.abs(np.fromfile(hrstm_calc.out.retrieved.get_abs_path('hrstm.npy')).reshape(dimShape)) extent = [lVec[0,0], lVec[1,0], lVec[0,1], lVec[2,1]] figure_xy_ratio = (lVec[1,0]-lVec[0,0]) / (lVec[2,1]-lVec[0,1]) setup_hrstm_elements() setup_hrstm_single_elements() disc_zip_btn.disabled = False cont_zip_btn.disabled = False style = {'description_width': '50px'} layout = {'width': '70%'} pk_select = ipw.IntText(value=0, description='pk', style=style, layout=layout) load_pk_btn = ipw.Button(description='Load pk', style=style, layout=layout) load_pk_btn.on_click(load_pk) geom_info = ipw.HTML() display(ipw.HBox([ipw.VBox([pk_select, load_pk_btn]), geom_info])) # - # # High-resolution scanning tunneling microscopy # + import matplotlib class FormatScalarFormatter(matplotlib.ticker.ScalarFormatter): def __init__(self, fformat="%1.1f", offset=True, mathText=True): self.fformat = fformat matplotlib.ticker.ScalarFormatter.__init__(self,useOffset=offset, useMathText=mathText) def _set_format(self, vmin, vmax): self.format = self.fformat if self._useMathText: self.format = '$%s$' % matplotlib.ticker._mathdefault(self.format) def make_plot(fig, ax, data, title=None, title_size=None, center0=False, vmin=None, vmax=None, cmap='gist_heat', noadd=False): if center0: data_amax = np.max(np.abs(data)) im = ax.imshow(data.T, origin='lower', cmap=cmap, interpolation='bicubic', extent=extent, vmin=-data_amax, vmax=data_amax) else: im = ax.imshow(data.T, origin='lower', cmap=cmap, interpolation='bicubic', extent=extent, vmin=vmin, vmax=vmax) if noadd: ax.set_xticks([]) ax.set_yticks([]) else: ax.set_xlabel(r"x ($\AA$)") ax.set_ylabel(r"y ($\AA$)") if 1e-3 < np.max(data) < 1e3: cb = fig.colorbar(im, ax=ax) else: cb = fig.colorbar(im, ax=ax, format=FormatScalarFormatter("%.1f")) cb.formatter.set_powerlimits((-2, 2)) cb.update_ticks() ax.set_title(title) if title_size: ax.title.set_fontsize(title_size) ax.axis('scaled') # - # # Series # + def remove_from_tuple(tup, index): tmp_list = list(tup) del tmp_list[index] return tuple(tmp_list) def remove_line_row(b, elem_list, selections_vbox): rm_btn_list = [elem[2] for elem in elem_list] rm_index = rm_btn_list.index(b) del elem_list[rm_index] selections_vbox.children = remove_from_tuple(selections_vbox.children, rm_index) def add_selection_row(b, elem_list, selections_vbox): # Series: drop_full_series = ipw.Dropdown(description="height", options=sorted(heightOptions.keys()), style = {'description_width': 'auto'}) drop_cmap = ipw.Dropdown(description="colormap", options=colormaps, style = {'description_width': 'auto'}) rm_btn = ipw.Button(description='x', layout=ipw.Layout(width='30px')) rm_btn.on_click(lambda b: remove_line_row(b, elem_list, selections_vbox)) elements = [drop_full_series, drop_cmap, rm_btn] element_widths = ['180px', '240px', '35px'] boxed_row = ipw.HBox([ipw.HBox([row_el], layout=ipw.Layout(border='0.1px solid', width=row_w)) for row_el, row_w in zip(elements, element_widths)]) elem_list.append(elements) selections_vbox.children += (boxed_row, ) # + def setup_hrstm_elements(): add_selection_row(None, elem_list, selections_vbox) default_biases = [-1.0, -0.5, -0.1, 0.1, 0.5, 1.0] # filter based on energy limits default_biases = [v for v in default_biases if v >= np.min(voltages) and v <= np.max(voltages)] biases_text.value = " ".join([str(v) for v in default_biases]) energy_range_slider.min = np.min(voltages) energy_range_slider.max = np.max(voltages) energy_range_slider.step = voltages[1]-voltages[0] energy_range_slider.value = (np.min(voltages), np.max(voltages)) def make_discrete_plot(): biases = np.array(biases_text.value.split(), dtype=float) filtered_biases = [] for v in biases: if v >= np.min(voltages) and v <= np.max(voltages): filtered_biases.append(v) else: print("Voltage %.2f out of range, skipping" % v) fig_y_size = 5 fig = plt.figure(figsize=(fig_y_size*figure_xy_ratio*len(filtered_biases), fig_y_size*len(elem_list))) for i_ser in range(len(elem_list)): # TODO this gets the height, not the index! hIdx = heightOptions[elem_list[i_ser][0].value] cmap = elem_list[i_ser][1].value data = current[:,:,hIdx] for biasIdx, bias in enumerate(biases): ax = plt.subplot(len(elem_list), len(biases), i_ser*len(biases) + biasIdx + 1) vIdx = np.argmin(np.abs(voltages - bias)) make_plot(fig, ax, data[:, :, vIdx], title='h=%.1f Ang, E=%.2f eV'%(heights[hIdx],bias), title_size=22, cmap=cmap, noadd=True) return fig def plot_discrete_series(b): with discrete_output: fig = make_discrete_plot() plt.show() def plot_full_series(b): fig_y = 4 fig_y_in_px = 0.8*fig_y*matplotlib.rcParams['figure.dpi'] num_series = len(elem_list) box_layout = ipw.Layout(overflow_x='scroll', border='3px solid black', width='100%', height='%dpx' % (fig_y_in_px*num_series + 70), display='inline-flex', flex_flow='column wrap', align_items='flex-start') plot_hbox = ipw.Box(layout=box_layout) continuous_output.children += (plot_hbox, ) min_e, max_e = energy_range_slider.value ie_1 = np.abs(voltages - min_e).argmin() ie_2 = np.abs(voltages - max_e).argmin()+1 plot_hbox.children = () for i_e in range(ie_1, ie_2): plot_out = ipw.Output() plot_hbox.children += (plot_out, ) with plot_out: fig = plt.figure(figsize=(fig_y*figure_xy_ratio, fig_y*num_series)) for i_ser in range(len(elem_list)): hIdx = heightOptions[elem_list[i_ser][0].value] cmap = elem_list[i_ser][1].value title = 'h=%.1f Ang, E=%.2f eV'%(heights[hIdx], voltages[i_e]) data = current[:,:,hIdx] ax = plt.subplot(len(elem_list), 1, i_ser+1) make_plot(fig, ax, data[:, :, i_e], title=title, cmap=cmap, noadd=True) plt.show() def on_full_clear(b): continuous_output.children = () with discrete_output: clear_output() # + elem_list = [] selections_vbox = ipw.VBox([]) add_row_btn = ipw.Button(description='Add series row') add_row_btn.on_click(lambda b: add_selection_row(b, elem_list, selections_vbox)) style = {'description_width': '80px'} layout = {'width': '40%'} ### ----------------------------------------------- ### Plot discrete disc_plot_btn = ipw.Button(description='plot discrete') disc_plot_btn.on_click(plot_discrete_series) biases_text = ipw.Text(description='voltages (V)', value='', style=style, layout={'width': '80%'}) disc_plot_hbox = ipw.HBox([biases_text, disc_plot_btn], style=style, layout={'width': '60%'}) discrete_output = ipw.Output() ### ----------------------------------------------- ### Plot continuous cont_plot_btn = ipw.Button(description='plot continuous') cont_plot_btn.on_click(plot_full_series) energy_range_slider = ipw.FloatRangeSlider( value=[0.0, 0.0], min=0.0, max=0.0, step=0.1, description='energy range', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.2f', style=style, layout={'width': '80%'} ) cont_plot_hbox = ipw.HBox([energy_range_slider, cont_plot_btn], style=style, layout={'width': '60%'}) continuous_output = ipw.VBox() ### ----------------------------------------------- full_clear_btn = ipw.Button(description='clear') full_clear_btn.on_click(on_full_clear) display(add_row_btn, selections_vbox, disc_plot_hbox, cont_plot_hbox, full_clear_btn, discrete_output, continuous_output) # - # ## Single # + def setup_hrstm_single_elements(): drop_hrstm_height_singl.options=sorted(heightOptions.keys()) bias_slider.min = np.min(voltages) bias_slider.max = np.max(voltages) bias_slider.step = voltages[1]-voltages[0] bias_slider.value = np.min(voltages) def make_single_plot(voltage, height, cmap): title = height + ", v=%.1f"%voltage data = current[:,:,heightOptions[height]] vIdx = np.abs(voltages - voltage).argmin() fig_y_size = 6 fig = plt.figure(figsize=(fig_y_size*figure_xy_ratio+1.0, fig_y_size)) ax = plt.gca() make_plot(fig, ax, data[:, :, vIdx],title=title, cmap=cmap) return fig def plot_hrstm(c): if drop_hrstm_height_singl.value != None: with hrstm_plot_out: clear_output() cmap = drop_singl_cmap.value fig = make_single_plot(bias_slider.value, drop_hrstm_height_singl.value, cmap) plt.show() drop_hrstm_height_singl = ipw.Dropdown(description="heights", options=[]) drop_singl_cmap = ipw.Dropdown(description="colormap", options=colormaps) bias_slider = ipw.FloatSlider( value=0.0, min=0.0, max=0.0, step=0.1, description='voltage (V)', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.2f', ) single_plot_btn = ipw.Button(description='plot') single_plot_btn.on_click(plot_hrstm) hrstm_plot_out = ipw.Output() display(drop_hrstm_height_singl, drop_singl_cmap, bias_slider, single_plot_btn, hrstm_plot_out) # - # # Export # Export either the currently selected discrete or continuous series. # + def create_zip_link(figure_method, zip_progress, html_link_out, filename): zip_buffer = io.BytesIO() with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED, False) as zip_file: figure_method(zip_file, zip_progress) # ! mkdir -p tmp with open('tmp/'+filename, 'wb') as f: f.write(zip_buffer.getvalue()) with html_link_out: display(HTML('<a href="tmp/%s" target="_blank">download zip</a>' % filename)) def create_disc_zip_content(zip_file, zip_progress): biases = np.array(biases_text.value.split(), dtype=float) for i_v in range(len(biases)-1, -1): if biases[i_v] < np.min(voltages) or biases[i_v] > np.max(voltages): del biases[i_v] total_pics = len(biases)*len(elem_list) + 1 # the total image imgdata = StringIO.StringIO() fig = make_discrete_plot() fig.savefig(imgdata, format='png', dpi=200, bbox_inches='tight') zip_file.writestr("all.png", imgdata.getvalue()) plt.close() zip_progress.value += 1.0/float(total_pics-1) # individuals for i_s in range(len(elem_list)): height = elem_list[i_s][0].value cmap = elem_list[i_s][1].value series_name = "hrstm_" + height for i_v in range(len(biases)): bias = biases[i_v] plot_name = series_name + "_%dv%+.2f" % (i_v, bias) imgdata = StringIO.StringIO() fig = make_single_plot(bias, height, cmap) fig.savefig(imgdata, format='png', dpi=200, bbox_inches='tight') zip_file.writestr(plot_name+".png", imgdata.getvalue()) plt.close() # --------------------------------------------------- zip_progress.value += 1.0/float(total_pics-1) def create_cont_zip_content(zip_file, zip_progress): fig_y = 4 min_e, max_e = energy_range_slider.value ie_1 = np.abs(voltages - min_e).argmin() ie_2 = np.abs(voltages - max_e).argmin()+1 total_pics = len(elem_list)*(ie_2-ie_1) for i_ser in range(len(elem_list)): height = elem_list[i_ser][0].value cmap = elem_list[i_ser][1].value series_name = "hrstm_" + height for i_e in range(ie_1, ie_2): en = voltages[i_e] title = '%s, E=%.2f eV'%(height, voltages[i_e]) data = current[:,:,heightOptions[height]] plot_name = "%s_%de%.2f" % (series_name, i_e-ie_1, en) imgdata = StringIO.StringIO() fig = plt.figure(figsize=(fig_y*figure_xy_ratio, fig_y)) ax = plt.gca() make_plot(fig, ax, data[:, :, i_e], title=title, cmap=cmap, noadd=True) fig.savefig(imgdata, format='png', dpi=200, bbox_inches='tight') zip_file.writestr(plot_name+".png", imgdata.getvalue()) plt.close() # --------------------------------------------------- zip_progress.value += 1.0/float(total_pics-1) def create_disc_zip_link(b): disc_zip_btn.disabled = True create_zip_link(create_disc_zip_content, disc_zip_progress, disc_link_out, "hrstm_disc_%d.zip"%pk_select.value) def create_cont_zip_link(b): cont_zip_btn.disabled = True e1, e2 = energy_range_slider.value create_zip_link(create_cont_zip_content, cont_zip_progress, cont_link_out, "hrstm_cont_%d_e%.1f_%.1f.zip"% (pk_select.value, e1, e2)) disc_zip_btn = ipw.Button(description='Discrete zip', disabled=True) disc_zip_btn.on_click(create_disc_zip_link) disc_zip_progress = ipw.FloatProgress( value=0, min=0, max=1.0, description='progress:', bar_style='info', orientation='horizontal' ) disc_link_out = ipw.Output() display(ipw.HBox([disc_zip_btn, disc_zip_progress]), disc_link_out) cont_zip_btn = ipw.Button(description='Continuous zip', disabled=True) cont_zip_btn.on_click(create_cont_zip_link) cont_zip_progress = ipw.FloatProgress( value=0, min=0, max=1.0, description='progress:', bar_style='info', orientation='horizontal' ) cont_link_out = ipw.Output() display(ipw.HBox([cont_zip_btn, cont_zip_progress]), cont_link_out) def clear_tmp(b): # ! rm -rf tmp && mkdir tmp with disc_link_out: clear_output() with cont_link_out: clear_output() disc_zip_progress.value = 0.0 cont_zip_progress.value = 0.0 if current is not None: disc_zip_btn.disabled = False cont_zip_btn.disabled = False clear_tmp_btn = ipw.Button(description='clear tmp') clear_tmp_btn.on_click(clear_tmp) display(clear_tmp_btn) # - ### Load the URL after everything is set up ### try: url = urlparse.urlsplit(jupyter_notebook_url) pk_select.value = urlparse.parse_qs(url.query)['pk'][0] load_pk(0) except: pass
hrstm/view_hrstm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HW2xHW4 # # VECTORIZATION (Pandas style!) # ## STEP 1: Import ALL the things # ### Import libraries # + ########################################## # NOTE: I'm toying with the idea of requiring the library just above # when I use it so it makes more sense in context ########################################## # import os # import pandas as pd # from nltk.tokenize import word_tokenize, sent_tokenize # from nltk.sentiment import SentimentAnalyzer # from nltk.sentiment.util import * # from nltk.probability import FreqDist # from nltk.sentiment.vader import SentimentIntensityAnalyzer # sid = SentimentIntensityAnalyzer() # - # ### Import data from files # + import os def get_data_from_files(path): directory = os.listdir(path) results = [] for file in directory: f=open(path+file) results.append(f.read()) f.close() return results # neg = get_data_from_files('../neg_cornell/') # pos = get_data_from_files('../pos_cornell/') # neg = get_data_from_files('../neg_hw4/') # pos = get_data_from_files('../pos_hw4/') pos = get_data_from_files('../hw4_lie_false/') neg = get_data_from_files('../hw4_lie_true/') # - # ## STEP 2: Prep Data # ### STEP 2a: Turn that fresh text into a pandas DF import pandas as pd neg_df = pd.DataFrame(neg) pos_df = pd.DataFrame(pos) # ### STEP 2b: Label it pos_df['PoN'] = 'P' neg_df['PoN'] = 'N' # ### STEP 2c: Combine the dfs all_df = neg_df.append(pos_df) all_df[:3] # ## STEP 3: TOKENIZE (and clean)!! from nltk.tokenize import word_tokenize, sent_tokenize from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * # + ## Came back and added sentences for tokinization for "Summary experiment" def get_sentence_tokens(review): return sent_tokenize(review) all_df['sentences'] = all_df.apply(lambda x: get_sentence_tokens(x[0]), axis=1) all_df['num_sentences'] = all_df.apply(lambda x: len(x['sentences']), axis=1) # + def get_tokens(sentence): tokens = word_tokenize(sentence) clean_tokens = [word.lower() for word in tokens if word.isalpha()] return clean_tokens all_df['tokens'] = all_df.apply(lambda x: get_tokens(x[0]), axis=1) all_df['num_tokens'] = all_df.apply(lambda x: len(x['tokens']), axis=1) # - all_df[:3] # ## STEP 4: Remove Stopwords from nltk.corpus import stopwords stop_words = set(stopwords.words("english")) def remove_stopwords(sentence): filtered_text = [] for word in sentence: if word not in stop_words: filtered_text.append(word) return filtered_text all_df['no_sw'] = all_df.apply(lambda x: remove_stopwords(x['tokens']),axis=1) all_df['num_no_sw'] = all_df.apply(lambda x: len(x['no_sw']),axis=1) all_df[:5] # ## STEP 5: Create a Frequency Distribution from nltk.probability import FreqDist def get_most_common(tokens): fdist = FreqDist(tokens) return fdist.most_common(12) all_df['topwords_unfil'] = all_df.apply(lambda x: get_most_common(x['tokens']),axis=1) def get_most_common(tokens): fdist = FreqDist(tokens) return fdist.most_common(12) all_df['topwords_fil'] = all_df.apply(lambda x: get_most_common(x['no_sw']),axis=1) # + def get_fdist(tokens): return (FreqDist(tokens)) all_df['freq_dist'] = all_df.apply(lambda x: get_fdist(x['no_sw']),axis=1) all_df['freq_dist_unfil'] = all_df.apply(lambda x: get_fdist(x['tokens']),axis=1) # - all_df[:3] # ## STEP 6: Try Different Sentiment Analysis Tools # ### VADER # + from nltk.sentiment.vader import SentimentIntensityAnalyzer sid = SentimentIntensityAnalyzer() def get_vader_score(review): return sid.polarity_scores(review) all_df['vader_all'] = all_df.apply(lambda x: get_vader_score(x[0]),axis=1) # + def separate_vader_score(vader_score, key): return vader_score[key] all_df['v_compound'] = all_df.apply(lambda x: separate_vader_score(x['vader_all'], 'compound'),axis=1) all_df['v_neg'] = all_df.apply(lambda x: separate_vader_score(x['vader_all'], 'neg'),axis=1) all_df['v_neu'] = all_df.apply(lambda x: separate_vader_score(x['vader_all'], 'neu'),axis=1) all_df['v_pos'] = all_df.apply(lambda x: separate_vader_score(x['vader_all'], 'pos'),axis=1) # - # ### DIY SUMMARY all_df[0][17] # + def get_weighted_freq_dist(review, freq_dist): try: max_freq = max(freq_dist.values()) for word in freq_dist.keys(): freq_dist[word] = (freq_dist[word]/max_freq) return freq_dist except: return 'nope' all_df['weighted_freq_dist'] = all_df.apply(lambda x: get_weighted_freq_dist(x['sentences'], x['freq_dist']),axis=1) # + def get_sentence_score(review, freq_dist): sentence_scores = {} for sent in review: for word in nltk.word_tokenize(sent.lower()): if word in freq_dist.keys(): if len(sent.split(' ')) < 30: if sent not in sentence_scores.keys(): sentence_scores[sent] = freq_dist[word] else: sentence_scores[sent] += freq_dist[word] return sentence_scores all_df['sentence_scores'] = all_df.apply(lambda x: get_sentence_score(x['sentences'], x['freq_dist']),axis=1) # + def get_summary_sentences(sentence_scores): sorted_sentences = sorted(sentence_scores.items(), key=lambda kv: kv[1], reverse=True) return ''.join(sent[0] for sent in sorted_sentences[:5]) all_df['summary_sentences'] = all_df.apply(lambda x: get_summary_sentences(x['sentence_scores']), axis=1) # - summaries = all_df['summary_sentences'].tolist() summaries[3] # ### Doing VADER on the Summary Section all_df['vader_sum_all'] = all_df.apply(lambda x: get_vader_score(x['summary_sentences']),axis=1) all_df['v_compound_sum'] = all_df.apply(lambda x: separate_vader_score(x['vader_sum_all'], 'compound'),axis=1) all_df['v_neg_sum'] = all_df.apply(lambda x: separate_vader_score(x['vader_sum_all'], 'neg'),axis=1) all_df['v_neu_sum'] = all_df.apply(lambda x: separate_vader_score(x['vader_sum_all'], 'neu'),axis=1) all_df['v_pos_sum'] = all_df.apply(lambda x: separate_vader_score(x['vader_sum_all'], 'pos'),axis=1) # ### Doing VADER on the Most Frequent Words # + def get_freq_words(freq_dist): sorted_words = sorted(freq_dist.items(), key=lambda kv: kv[1], reverse=True) return ' '.join(word[0] for word in sorted_words[:50]) all_df['v_freq_words'] = all_df.apply(lambda x: get_freq_words(x['freq_dist']), axis=1) all_df['vader_fq_all'] = all_df.apply(lambda x: get_vader_score(x['v_freq_words']),axis=1) all_df['v_compound_fd'] = all_df.apply(lambda x: separate_vader_score(x['vader_fq_all'], 'compound'),axis=1) all_df['v_neg_fd'] = all_df.apply(lambda x: separate_vader_score(x['vader_fq_all'], 'neg'),axis=1) all_df['v_neu_fd'] = all_df.apply(lambda x: separate_vader_score(x['vader_fq_all'], 'neu'),axis=1) all_df['v_pos_fd'] = all_df.apply(lambda x: separate_vader_score(x['vader_fq_all'], 'pos'),axis=1) # - # ## STEP 7: Test `Step 6` with Machine Learning!! # ### Naive Bayes # + from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB, MultinomialNB from sklearn import metrics def get_NB(small_df, labels, no_negs): x_train, x_test, y_train, y_test = train_test_split(small_df.values, labels, test_size=0.3, random_state = 109) gnb = GaussianNB() gnb.fit(x_train, y_train) y_pred = gnb.predict(x_test) if no_negs: mnnb = MultinomialNB() mnnb.fit(x_train, y_train) y_pred_mn = mnnb.predict(x_test) print("Accuracy GNB:", metrics.accuracy_score(y_test, y_pred)) if no_negs: print("Accuracy MNNB:", metrics.accuracy_score(y_test, y_pred_mn)) # + # from sklearn.naive_bayes import MultinomialNB # clf = MultinomialNB() # clf.fit(x_train, y_train) # print(clf.predict(x_train[2:3])) # - # #### TEST 1: Vader Scores (Original) small_df = all_df.filter(['v_compound','v_pos', 'v_neg', 'v_neu']) # 0.645 get_NB(small_df, all_df['PoN'], False) small_df = all_df.filter(['v_pos', 'v_neu']) # 0.645 get_NB(small_df, all_df['PoN'], True) # #### TEST 2: Vader Scores (from Summary) small_df = all_df.filter(['v_compound_sum','v_pos_sum', 'v_neg_sum', 'v_neu_sum']) # 0.59 get_NB(small_df, all_df['PoN'], False) small_df = all_df.filter(['v_pos_sum','v_neu_sum']) # 0.59 get_NB(small_df, all_df['PoN'], True) # #### TEST 3: Vader Scores (original) AND Vader Scores (summary) small_df = all_df.filter(['v_compound_sum','v_pos_sum', 'v_neg_sum', 'v_neu_sum', 'v_compound','v_pos', 'v_neg', 'v_neu']) # 0.618 get_NB(small_df, all_df['PoN'], False) small_df = all_df.filter(['v_pos_sum', 'v_neu_sum', 'v_pos', 'v_neu']) # 0.618 get_NB(small_df, all_df['PoN'], True) # #### TEST 4: Vader Scores (50 most frequent -- filtered -- words) small_df = all_df.filter(['v_compound_fd','v_pos_fd', 'v_neu_fd', 'v_neg_fd']) # 0.598 get_NB(small_df, all_df['PoN'], False) small_df = all_df.filter(['v_pos_fd', 'v_neu_fd']) # 0.598 get_NB(small_df, all_df['PoN'], True) # #### TEST 5: All `compound` Vader Scores small_df = all_df.filter(['v_compound_fd','v_compound_sum', 'v_compound']) # 0.615 get_NB(small_df, all_df['PoN'], False) small_df = all_df.filter(['v_pos_fd','v_pos_sum', 'v_pos']) # 0.615 get_NB(small_df, all_df['PoN'], True) # #### TEST 6: ALL THE NUMBERS!! small_df = all_df.filter(['v_compound_sum','v_pos_sum', 'v_neg_sum', 'v_neu_sum', 'v_compound_fd','v_pos_fd', 'v_neg_fd', 'v_neu_fd', 'v_compound','v_pos', 'v_neg', 'v_neu']) # 0.613 get_NB(small_df, all_df['PoN'], False) # #### TEST 7: Test UNFILTERED most frequent words # + def get_freq_words(freq_dist): sorted_words = sorted(freq_dist.items(), key=lambda kv: kv[1], reverse=True) return ' '.join(word[0] for word in sorted_words[:50]) all_df['v_freq_words_unfil'] = all_df.apply(lambda x: get_freq_words(x['freq_dist_unfil']), axis=1) all_df['vader_fd_all_unfil'] = all_df.apply(lambda x: get_vader_score(x['v_freq_words_unfil']),axis=1) all_df['v_compound_fd_uf'] = all_df.apply(lambda x: separate_vader_score(x['vader_fd_all_unfil'], 'compound'),axis=1) all_df['v_neg_fd_uf'] = all_df.apply(lambda x: separate_vader_score(x['vader_fd_all_unfil'], 'neg'),axis=1) all_df['v_neu_fd_uf'] = all_df.apply(lambda x: separate_vader_score(x['vader_fd_all_unfil'], 'neu'),axis=1) all_df['v_pos_fd_uf'] = all_df.apply(lambda x: separate_vader_score(x['vader_fd_all_unfil'], 'pos'),axis=1) # - small_df = all_df.filter(['v_compound_sum','v_pos_sum', 'v_neg_sum', 'v_neu_sum', 'v_compound_fd','v_pos_fd', 'v_neg_fd', 'v_neu_fd', 'v_compound_fd_uf','v_pos_fd_uf', 'v_neg_fd_uf', 'v_neu_fd_uf', 'v_compound','v_pos', 'v_neg', 'v_neu']) # 0.618 get_NB(small_df, all_df['PoN'], False) small_df = all_df.filter(['v_compound_fd_uf','v_pos_fd_uf', 'v_neg_fd_uf', 'v_neu_fd_uf']) # 0.603 get_NB(small_df, all_df['PoN'], False) summaries_pos = all_df[all_df['PoN'] == 'P'] summaries_neg = all_df[all_df['PoN'] == 'N'] summaries_pos_list = summaries_pos['summary_sentences'].tolist() summaries_neg_list = summaries_neg['summary_sentences'].tolist() summaries_pos_list[:1] summaries_neg_list[:1] # + ### VERSION 1 # all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) # unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg) # sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # training_set = sentim_analyzer.apply_features(training_docs) # test_set = sentim_analyzer.apply_features(testing_docs) sentim_analyzer = SentimentAnalyzer() def get_nltk_negs(tokens): all_words_neg = sentim_analyzer.all_words([mark_negation(tokens)]) return all_words_neg def get_unigram_feats(neg_tokens): unigram_feats = sentim_analyzer.unigram_word_feats(neg_tokens) return unigram_feats all_df['nltk_negs'] = all_df.apply(lambda x: get_nltk_negs(x['tokens']), axis=1) all_df['unigram_feats'] = all_df.apply(lambda x: get_unigram_feats(x['nltk_negs']), axis=1) # all_df['nltk_unfil'] = all_df.apply(lambda x: get_nltk_data(x['tokens']), axis=1) # + ### VERSION 2 # all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) # unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg) # sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # training_set = sentim_analyzer.apply_features(training_docs) # test_set = sentim_analyzer.apply_features(testing_docs) sentim_analyzer = SentimentAnalyzer() def get_nltk_data(tokens): # print(tokens) neg_tokens = sentim_analyzer.all_words([mark_negation(tokens)]) unigram_feats = sentim_analyzer.unigram_word_feats(neg_tokens) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # print(sentim_analyzer.apply_features(tokens)) return sentim_analyzer.apply_features(tokens) # def get_unigram_feats(neg_tokens): # return unigram_feats nltk_df = pd.DataFrame() nltk_df['nltk_data'] = all_df.apply(lambda x: get_nltk_data(x['tokens']), axis=1) # all_df['nltk'] # all_df['unigram_feats'] = all_df.apply(lambda x: get_unigram_feats(x['nltk_negs']), axis=1) # all_df['nltk_unfil'] = all_df.apply(lambda x: get_nltk_data(x['tokens']), axis=1) # + # all_df['nltk_all'] = 0 # - nltk_df all_df['nltk_negs'] from nltk.tokenize import casual_tokenize from collections import Counter all_df['bow_nosw'] = all_df.apply(lambda x: Counter(casual_tokenize(x[0])), axis=1) all_df[:3]
assets/all_html/2019_10_25_HW2_HW4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from PIL import Image import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt # %matplotlib import matplotlib.gridspec as gridspec from matplotlib.text import Text from matplotlib.widgets import Button import os # - # ### Data preparation # #### models coefficients # + def get_linear_model(input_size): input1 = keras.layers.Input(shape=(input_size*input_size)) output = keras.layers.Dense(1, activation='sigmoid')(input1) linear_model = keras.Model(inputs=[input1], outputs=[output]) linear_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return linear_model input_sizes = [8, 16, 32, 64, 128] checkpoint_paths = [ "xinshuai_models/linearmodel/8\\0243.ckpt", "xinshuai_models/linearmodel/16\\0171.ckpt", "xinshuai_models/linearmodel/32\\0062.ckpt", "xinshuai_models/linearmodel/64\\0054.ckpt", "xinshuai_models/linearmodel/128\\0026.ckpt" ] def get_coefficients(i): input_size = input_sizes[i] checkpoint_path = checkpoint_paths[i] linear_model = get_linear_model(input_size) linear_model.load_weights(checkpoint_path) coefficients = list(np.reshape(linear_model.variables[0].numpy(), input_size*input_size)) return coefficients # 8x8 _8 = get_coefficients(0) # 16x16 _16 = get_coefficients(1) # 32x32 _32 = get_coefficients(2) # 64x64 _64 = get_coefficients(3) # 128x128 _128 = get_coefficients(4) models_coefficients = [_8, _16, _32, _64, _128] # - # #### models input sizes and accuracy results df = pd.read_csv("accuracy_in_different_input_size") input_sizes = df["input_sizes"].to_list() accuracys = df["accuracys"].to_list() # #### 火(Fire) and 水(water) images # + # base path train_base_path = os.path.join(os.curdir, "Train") # 火(Fire) path fire = "火" fire_base_path = os.path.join(train_base_path, fire) # 水(Water) path water = "水" water_base_path = os.path.join(train_base_path, water) fires = os.listdir(fire_base_path) waters = os.listdir(water_base_path) # - def get_random_fire_water(): fire_index = int(np.random.random() * len(fires)) water_index = int(np.random.random() * len(waters)) fire_image = Image.open(os.path.join(fire_base_path, fires[fire_index])) water_image = Image.open(os.path.join(water_base_path, waters[water_index])) # fire and water images fire_image = fire_image.convert('L') water_image = water_image.convert('L') return fire_image, water_image # #### models top N influential power pixels (by coefficients) def get_Nth_influential_power_pixel_coefficient(model_coefficients, N): top_N = model_coefficients[:N] top_N.sort() top_N = list(top_N) i = N while i < len(model_coefficients): top_N.append(model_coefficients[i]) top_N.sort() top_N.pop(0) i += 1 return top_N[0] def get_top_N_influential_power_pixels(which_model, N): model_coefficients = models_coefficients[which_model] model_input_size = input_sizes[which_model] the_Nth_influential_power_pixel_coefficient = get_Nth_influential_power_pixel_coefficient(model_coefficients, N) model_pixels_y_position = [] model_pixels_x_position = [] for i, model_coefficient in enumerate(model_coefficients): if model_coefficient >= the_Nth_influential_power_pixel_coefficient: # "model_input_size - i // model_input_size" because the lower y in list means higher position in image model_pixels_y_position.append(model_input_size - i // model_input_size) model_pixels_x_position.append(i % model_input_size) return model_pixels_x_position, model_pixels_y_position, model_input_size # + best_model = np.argmax(accuracys) N = 20 best_model_x, best_model_y, best_model_input_size = get_top_N_influential_power_pixels(best_model, N) #*0.05 because 64 pixels are small for I wanna show the most significant pixels _8model_x, _8model_y, _8model_input_size = get_top_N_influential_power_pixels(0, int(8*8*0.05)) _16model_x, _16model_y, _16model_input_size = get_top_N_influential_power_pixels(1, int(16*16*0.05)) # same reason _32model_x, _32model_y, _32model_input_size = get_top_N_influential_power_pixels(2, N) _64model_x, _64model_y, _64model_input_size = get_top_N_influential_power_pixels(3, N) _128model_x, _128model_y, _128model_input_size = get_top_N_influential_power_pixels(4, N) xs = [_8model_x, _16model_x, _32model_x, _64model_x, _128model_x] ys = [_8model_y, _16model_y, _32model_y, _64model_y, _128model_y] model_input_sizes = [_8model_input_size, _16model_input_size, _32model_input_size, _64model_input_size, _128model_input_size] # - # ### Data Interactive Visualization # #### Style graphs def format_axes(axes, is_image=False): for ax in axes: ax.tick_params(labelsize="xx-small", pad=-2) ax.tick_params(top=False, left=False, bottom=False, right=False) if is_image: ax.tick_params(labeltop=False, labelleft=False, labelbottom=False, labelright=False) ax.axis('off') def set_main_title(text): plt.suptitle(text, size="x-small", weight="bold") def set_sub_title(ax, text): ax.set_title(text, size="xx-small", pad=2, weight="bold") def set_label(ax, text, flag="x"): if flag == "x": ax.set_xlabel(text, size="xx-small") else: ax.set_ylabel(text, size="xx-small") def text_markers(ax, model_x, model_y, top_N_influential_power_pixels): i = 0 for coefficient in top_N_influential_power_pixels: ax.text(model_x[i], model_y[i], str(coefficient), size="xx-small") i += 1 # #### Contain graphs in Grid def create_grid(): fig = plt.figure() gs0 = gridspec.GridSpec(1, 2, figure=fig) gs00 = gridspec.GridSpecFromSubplotSpec(5, 6, subplot_spec=gs0[0]) ax1 = fig.add_subplot(gs00[0:1, 0:3]) ax2 = fig.add_subplot(gs00[0:1, 3:6]) ax3 = fig.add_subplot(gs00[1:3, :]) ax4 = fig.add_subplot(gs00[3:5, :]) gs01 = gridspec.GridSpecFromSubplotSpec(5, 6, subplot_spec=gs0[1]) ax_button = fig.add_subplot(gs01[0:1, 1:5]) ax5 = fig.add_subplot(gs01[1:3, :]) ax6 = fig.add_subplot(gs01[3:5, :]) return fig, ax1, ax2, ax3, ax4, ax_button, ax5, ax6 # #### Paint graphs and Make graphs interactive fire_image, water_image = get_random_fire_water() resized_fires = [fire_image.resize((8,8)), fire_image.resize((16,16)), fire_image.resize((32,32)), fire_image.resize((64,64)), fire_image.resize((128,128))] resized_waters = [water_image.resize((8,8)), water_image.resize((16,16)), water_image.resize((32,32)), water_image.resize((64,64)), water_image.resize((128,128))] # + def draw_ax1_ax2(fire_image, water_image): ax1.figure.canvas.draw_idle() ax1.cla() ax2.figure.canvas.draw_idle() ax2.cla() set_sub_title(ax1, "Chinese Handwritten Fire") set_sub_title(ax2, "Water") ax1.imshow(fire_image, cmap="gray") ax2.imshow(water_image, cmap="gray") format_axes([ax1, ax2], is_image=True) def draw_ax3(model_index=-1): colors = ['r', 'r', 'r', 'r', 'r'] ax3.figure.canvas.draw_idle() ax3.cla() colors[model_index] = 'green' set_sub_title(ax3, "Accuracy as function of input size in linear model") set_label(ax3, "input size", "x") set_label(ax3, "accuracy", "y") ax3.scatter(input_sizes, accuracys, marker="8", picker=True, c=['r', 'r', 'r', 'r', 'r']) ax3.legend(labels=["UNSELECTED"], labelcolor=["red"], loc="lower right", title="STATUS", fontsize="xx-small", title_fontsize="xx-small") ax3.scatter(input_sizes, accuracys, marker="8", picker=True, c=colors) ax3.plot(input_sizes, accuracys) format_axes([ax3]) def draw_ax4(model_index): ax4.cla() ax4.figure.canvas.draw_idle() model_x = xs[model_index] model_y = ys[model_index] model_input_size = model_input_sizes[model_index] set_sub_title(ax4, f"The pixels model({model_input_size}x{model_input_size}) focuses on") ax4.set_xlim(0, model_input_size) ax4.set_ylim(0, model_input_size) ax4.scatter(model_x, model_y, marker="8", color="red") set_label(ax4, "x", "x") set_label(ax4, "y", "y") format_axes([ax4]) def draw_image(ax, image, model_index): image_input_size = model_input_sizes[model_index] image = np.array(image) image = np.reshape(image, image_input_size*image_input_size) model_pixels_y_position = [] model_pixels_x_position = [] for i, pixel in enumerate(image): # < 255 can ignore white pixels if pixel < 255: model_pixels_y_position.append(image_input_size - i // image_input_size) model_pixels_x_position.append(i % image_input_size) ax.scatter(model_pixels_x_position, model_pixels_y_position, s=1, color="black") def draw_ax5_ax6(model_index): ax5.cla() ax5.figure.canvas.draw_idle() ax6.cla() ax6.figure.canvas.draw_idle() model_x = xs[model_index] model_y = ys[model_index] model_input_size = model_input_sizes[model_index] ax5.scatter(model_x, model_y, marker="8", color="red") ax5.set_xlim(0, model_input_size) ax5.set_ylim(0, model_input_size) draw_image(ax5, resized_fires[model_index], model_index) ax6.scatter(model_x, model_y, marker="8", color="red") ax6.set_xlim(0, model_input_size) ax6.set_ylim(0, model_input_size) draw_image(ax6, resized_waters[model_index], model_index) set_sub_title(ax5, f"The pixels model({model_input_size}x{model_input_size}) focuses on") set_label(ax5, "x", "x") set_label(ax5, "y", "y") set_label(ax6, "x", "x") set_label(ax6, "y", "y") format_axes([ax5, ax6]) selected_model_index = best_model def button_pressed(event): fire_image, water_image = get_random_fire_water() global resized_fires, resized_waters resized_fires = [fire_image.resize((8,8)), fire_image.resize((16,16)), fire_image.resize((32,32)), fire_image.resize((64,64)), fire_image.resize((128,128))] resized_waters = [water_image.resize((8,8)), water_image.resize((16,16)), water_image.resize((32,32)), water_image.resize((64,64)), water_image.resize((128,128))] draw_ax1_ax2(fire_image, water_image) draw_ax5_ax6(selected_model_index) # + fig, ax1, ax2, ax3, ax4, ax_button, ax5, ax6 = create_grid() main_title = '''How machine learning model distinguishes between Chinese handwritten Fire and Water in different sizes ''' set_main_title(main_title) draw_ax1_ax2(fire_image, water_image) draw_ax3(best_model) ax3_original_facecolor = ax3.get_facecolor() draw_ax4(best_model) pos_ax4 = ax4.get_position() ax4.set_position([pos_ax4.x0, pos_ax4.y0 - 0.05, pos_ax4.width, pos_ax4.height] ) myButton = Button(ax_button, 'Use other Fire/Water images', color='#34e5eb', hovercolor='#348feb') myButton.label.set_fontsize('x-small') myButton.on_clicked(button_pressed) draw_ax5_ax6(best_model) def onpick(event): global selected_model_index ind = event.ind model_index = ind[0] draw_ax3(model_index) draw_ax4(model_index) draw_ax5_ax6(model_index) selected_model_index = model_index fig.canvas.mpl_connect('pick_event', onpick) plt.show() plt.ion()
explainability_of_machine_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Python Basics with Numpy (optional assignment) # # Welcome to your first assignment. This exercise gives you a brief introduction to Python. Even if you've used Python before, this will help familiarize you with functions we'll need. # # **Instructions:** # - You will be using Python 3. # - Avoid using for-loops and while-loops, unless you are explicitly told to do so. # - Do not modify the (# GRADED FUNCTION [function name]) comment in some cells. Your work would not be graded if you change this. Each cell containing that comment should only contain one function. # - After coding your function, run the cell right below it to check if your result is correct. # # **After this assignment you will:** # - Be able to use iPython Notebooks # - Be able to use numpy functions and numpy matrix/vector operations # - Understand the concept of "broadcasting" # - Be able to vectorize code # # Let's get started! # ## About iPython Notebooks ## # # iPython Notebooks are interactive coding environments embedded in a webpage. You will be using iPython notebooks in this class. You only need to write code between the ### START CODE HERE ### and ### END CODE HERE ### comments. After writing your code, you can run the cell by either pressing "SHIFT"+"ENTER" or by clicking on "Run Cell" (denoted by a play symbol) in the upper bar of the notebook. # # We will often specify "(≈ X lines of code)" in the comments to tell you about how much code you need to write. It is just a rough estimate, so don't feel bad if your code is longer or shorter. # # **Exercise**: Set test to `"Hello World"` in the cell below to print "Hello World" and run the two cells below. ### START CODE HERE ### (≈ 1 line of code) test = 'Hello World' ### END CODE HERE ### print ("test: " + test) # **Expected output**: # test: Hello World # <font color='blue'> # **What you need to remember**: # - Run your cells using SHIFT+ENTER (or "Run cell") # - Write code in the designated areas using Python 3 only # - Do not modify the code outside of the designated areas # ## 1 - Building basic functions with numpy ## # # Numpy is the main package for scientific computing in Python. It is maintained by a large community (www.numpy.org). In this exercise you will learn several key numpy functions such as np.exp, np.log, and np.reshape. You will need to know how to use these functions for future assignments. # # ### 1.1 - sigmoid function, np.exp() ### # # Before using np.exp(), you will use math.exp() to implement the sigmoid function. You will then see why np.exp() is preferable to math.exp(). # # **Exercise**: Build a function that returns the sigmoid of a real number x. Use math.exp(x) for the exponential function. # # **Reminder**: # $sigmoid(x) = \frac{1}{1+e^{-x}}$ is sometimes also known as the logistic function. It is a non-linear function used not only in Machine Learning (Logistic Regression), but also in Deep Learning. # # <img src="images/Sigmoid.png" style="width:500px;height:228px;"> # # To refer to a function belonging to a specific package you could call it using package_name.function(). Run the code below to see an example with math.exp(). # + # GRADED FUNCTION: basic_sigmoid import math def basic_sigmoid(x): """ Compute sigmoid of x. Arguments: x -- A scalar Return: s -- sigmoid(x) """ ### START CODE HERE ### (≈ 1 line of code) s = 1/(1 + math.exp(-x)) ### END CODE HERE ### return s # - basic_sigmoid(3) # **Expected Output**: # <table style = "width:40%"> # <tr> # <td>** basic_sigmoid(3) **</td> # <td>0.9525741268224334 </td> # </tr> # # </table> # Actually, we rarely use the "math" library in deep learning because the inputs of the functions are real numbers. In deep learning we mostly use matrices and vectors. This is why numpy is more useful. ### One reason why we use "numpy" instead of "math" in Deep Learning ### x = [1, 2, 3] basic_sigmoid(x) # you will see this give an error when you run it, because x is a vector. # In fact, if $ x = (x_1, x_2, ..., x_n)$ is a row vector then $np.exp(x)$ will apply the exponential function to every element of x. The output will thus be: $np.exp(x) = (e^{x_1}, e^{x_2}, ..., e^{x_n})$ # + import numpy as np # example of np.exp x = np.array([1, 2, 3]) print(np.exp(x)) # result is (exp(1), exp(2), exp(3)) # - # Furthermore, if x is a vector, then a Python operation such as $s = x + 3$ or $s = \frac{1}{x}$ will output s as a vector of the same size as x. # + # example of vector operation x = np.array([1, 2, 3]) print (x + 3) # np.exp? # - # Any time you need more info on a numpy function, we encourage you to look at [the official documentation](https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.exp.html). # # You can also create a new cell in the notebook and write `np.exp?` (for example) to get quick access to the documentation. # # **Exercise**: Implement the sigmoid function using numpy. # # **Instructions**: x could now be either a real number, a vector, or a matrix. The data structures we use in numpy to represent these shapes (vectors, matrices...) are called numpy arrays. You don't need to know more for now. # $$ \text{For } x \in \mathbb{R}^n \text{, } sigmoid(x) = sigmoid\begin{pmatrix} # x_1 \\ # x_2 \\ # ... \\ # x_n \\ # \end{pmatrix} = \begin{pmatrix} # \frac{1}{1+e^{-x_1}} \\ # \frac{1}{1+e^{-x_2}} \\ # ... \\ # \frac{1}{1+e^{-x_n}} \\ # \end{pmatrix}\tag{1} $$ # + # GRADED FUNCTION: sigmoid import numpy as np # this means you can access numpy functions by writing np.function() instead of numpy.function() def sigmoid(x): """ Compute the sigmoid of x Arguments: x -- A scalar or numpy array of any size Return: s -- sigmoid(x) """ ### START CODE HERE ### (≈ 1 line of code) s = 1/(1+np.exp(-x)) ### END CODE HERE ### return s # - x = np.array([1, 2, 3]) sigmoid(x) # **Expected Output**: # <table> # <tr> # <td> **sigmoid([1,2,3])**</td> # <td> array([ 0.73105858, 0.88079708, 0.95257413]) </td> # </tr> # </table> # # ### 1.2 - Sigmoid gradient # # As you've seen in lecture, you will need to compute gradients to optimize loss functions using backpropagation. Let's code your first gradient function. # # **Exercise**: Implement the function sigmoid_grad() to compute the gradient of the sigmoid function with respect to its input x. The formula is: $$sigmoid\_derivative(x) = \sigma'(x) = \sigma(x) (1 - \sigma(x))\tag{2}$$ # You often code this function in two steps: # 1. Set s to be the sigmoid of x. You might find your sigmoid(x) function useful. # 2. Compute $\sigma'(x) = s(1-s)$ # + # GRADED FUNCTION: sigmoid_derivative def sigmoid_derivative(x): """ Compute the gradient (also called the slope or derivative) of the sigmoid function with respect to its input x. You can store the output of the sigmoid function into variables and then use it to calculate the gradient. Arguments: x -- A scalar or numpy array Return: ds -- Your computed gradient. """ ### START CODE HERE ### (≈ 2 lines of code) s = 1/(1 + np.exp(-x)) ds = s*(1-s) ### END CODE HERE ### return ds # - x = np.array([1, 2, 3]) print ("sigmoid_derivative(x) = " + str(sigmoid_derivative(x))) # **Expected Output**: # # # <table> # <tr> # <td> **sigmoid_derivative([1,2,3])**</td> # <td> [ 0.19661193 0.10499359 0.04517666] </td> # </tr> # </table> # # # ### 1.3 - Reshaping arrays ### # # Two common numpy functions used in deep learning are [np.shape](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html) and [np.reshape()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html). # - X.shape is used to get the shape (dimension) of a matrix/vector X. # - X.reshape(...) is used to reshape X into some other dimension. # # For example, in computer science, an image is represented by a 3D array of shape $(length, height, depth = 3)$. However, when you read an image as the input of an algorithm you convert it to a vector of shape $(length*height*3, 1)$. In other words, you "unroll", or reshape, the 3D array into a 1D vector. # # <img src="images/image2vector_kiank.png" style="width:500px;height:300;"> # # **Exercise**: Implement `image2vector()` that takes an input of shape (length, height, 3) and returns a vector of shape (length\*height\*3, 1). For example, if you would like to reshape an array v of shape (a, b, c) into a vector of shape (a*b,c) you would do: # ``` python # v = v.reshape((v.shape[0]*v.shape[1], v.shape[2])) # v.shape[0] = a ; v.shape[1] = b ; v.shape[2] = c # ``` # - Please don't hardcode the dimensions of image as a constant. Instead look up the quantities you need with `image.shape[0]`, etc. # GRADED FUNCTION: image2vector def image2vector(image): """ Argument: image -- a numpy array of shape (length, height, depth) Returns: v -- a vector of shape (length*height*depth, 1) """ ### START CODE HERE ### (≈ 1 line of code) v = image.reshape((image.shape[0]*image.shape[1]*image.shape[2],1)) ### END CODE HERE ### return v # + # This is a 3 by 3 by 2 array, typically images will be (num_px_x, num_px_y,3) where 3 represents the RGB values image = np.array([[[ 0.67826139, 0.29380381], [ 0.90714982, 0.52835647], [ 0.4215251 , 0.45017551]], [[ 0.92814219, 0.96677647], [ 0.85304703, 0.52351845], [ 0.19981397, 0.27417313]], [[ 0.60659855, 0.00533165], [ 0.10820313, 0.49978937], [ 0.34144279, 0.94630077]]]) print ("image2vector(image) = " + str(image2vector(image))) # - # **Expected Output**: # # # <table style="width:100%"> # <tr> # <td> **image2vector(image)** </td> # <td> [[ 0.67826139] # [ 0.29380381] # [ 0.90714982] # [ 0.52835647] # [ 0.4215251 ] # [ 0.45017551] # [ 0.92814219] # [ 0.96677647] # [ 0.85304703] # [ 0.52351845] # [ 0.19981397] # [ 0.27417313] # [ 0.60659855] # [ 0.00533165] # [ 0.10820313] # [ 0.49978937] # [ 0.34144279] # [ 0.94630077]]</td> # </tr> # # # </table> # ### 1.4 - Normalizing rows # # Another common technique we use in Machine Learning and Deep Learning is to normalize our data. It often leads to a better performance because gradient descent converges faster after normalization. Here, by normalization we mean changing x to $ \frac{x}{\| x\|} $ (dividing each row vector of x by its norm). # # For example, if $$x = # \begin{bmatrix} # 0 & 3 & 4 \\ # 2 & 6 & 4 \\ # \end{bmatrix}\tag{3}$$ then $$\| x\| = np.linalg.norm(x, axis = 1, keepdims = True) = \begin{bmatrix} # 5 \\ # \sqrt{56} \\ # \end{bmatrix}\tag{4} $$and $$ x\_normalized = \frac{x}{\| x\|} = \begin{bmatrix} # 0 & \frac{3}{5} & \frac{4}{5} \\ # \frac{2}{\sqrt{56}} & \frac{6}{\sqrt{56}} & \frac{4}{\sqrt{56}} \\ # \end{bmatrix}\tag{5}$$ Note that you can divide matrices of different sizes and it works fine: this is called broadcasting and you're going to learn about it in part 5. # # # **Exercise**: Implement normalizeRows() to normalize the rows of a matrix. After applying this function to an input matrix x, each row of x should be a vector of unit length (meaning length 1). # + # GRADED FUNCTION: normalizeRows def normalizeRows(x): """ Implement a function that normalizes each row of the matrix x (to have unit length). Argument: x -- A numpy matrix of shape (n, m) Returns: x -- The normalized (by row) numpy matrix. You are allowed to modify x. """ ### START CODE HERE ### (≈ 2 lines of code) # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True) x_norm = np.linalg.norm(x, ord = 2, axis = 1,keepdims = True) # Divide x by its norm. x = x / x_norm ### END CODE HERE ### return x # - x = np.array([ [0, 3, 4], [1, 6, 4]]) print("normalizeRows(x) = " + str(normalizeRows(x))) # **Expected Output**: # # <table style="width:60%"> # # <tr> # <td> **normalizeRows(x)** </td> # <td> [[ 0. 0.6 0.8 ] # [ 0.13736056 0.82416338 0.54944226]]</td> # </tr> # # # </table> # **Note**: # In normalizeRows(), you can try to print the shapes of x_norm and x, and then rerun the assessment. You'll find out that they have different shapes. This is normal given that x_norm takes the norm of each row of x. So x_norm has the same number of rows but only 1 column. So how did it work when you divided x by x_norm? This is called broadcasting and we'll talk about it now! # ### 1.5 - Broadcasting and the softmax function #### # A very important concept to understand in numpy is "broadcasting". It is very useful for performing mathematical operations between arrays of different shapes. For the full details on broadcasting, you can read the official [broadcasting documentation](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). # **Exercise**: Implement a softmax function using numpy. You can think of softmax as a normalizing function used when your algorithm needs to classify two or more classes. You will learn more about softmax in the second course of this specialization. # # **Instructions**: # - $ \text{for } x \in \mathbb{R}^{1\times n} \text{, } softmax(x) = softmax(\begin{bmatrix} # x_1 && # x_2 && # ... && # x_n # \end{bmatrix}) = \begin{bmatrix} # \frac{e^{x_1}}{\sum_{j}e^{x_j}} && # \frac{e^{x_2}}{\sum_{j}e^{x_j}} && # ... && # \frac{e^{x_n}}{\sum_{j}e^{x_j}} # \end{bmatrix} $ # # - $\text{for a matrix } x \in \mathbb{R}^{m \times n} \text{, $x_{ij}$ maps to the element in the $i^{th}$ row and $j^{th}$ column of $x$, thus we have: }$ $$softmax(x) = softmax\begin{bmatrix} # x_{11} & x_{12} & x_{13} & \dots & x_{1n} \\ # x_{21} & x_{22} & x_{23} & \dots & x_{2n} \\ # \vdots & \vdots & \vdots & \ddots & \vdots \\ # x_{m1} & x_{m2} & x_{m3} & \dots & x_{mn} # \end{bmatrix} = \begin{bmatrix} # \frac{e^{x_{11}}}{\sum_{j}e^{x_{1j}}} & \frac{e^{x_{12}}}{\sum_{j}e^{x_{1j}}} & \frac{e^{x_{13}}}{\sum_{j}e^{x_{1j}}} & \dots & \frac{e^{x_{1n}}}{\sum_{j}e^{x_{1j}}} \\ # \frac{e^{x_{21}}}{\sum_{j}e^{x_{2j}}} & \frac{e^{x_{22}}}{\sum_{j}e^{x_{2j}}} & \frac{e^{x_{23}}}{\sum_{j}e^{x_{2j}}} & \dots & \frac{e^{x_{2n}}}{\sum_{j}e^{x_{2j}}} \\ # \vdots & \vdots & \vdots & \ddots & \vdots \\ # \frac{e^{x_{m1}}}{\sum_{j}e^{x_{mj}}} & \frac{e^{x_{m2}}}{\sum_{j}e^{x_{mj}}} & \frac{e^{x_{m3}}}{\sum_{j}e^{x_{mj}}} & \dots & \frac{e^{x_{mn}}}{\sum_{j}e^{x_{mj}}} # \end{bmatrix} = \begin{pmatrix} # softmax\text{(first row of x)} \\ # softmax\text{(second row of x)} \\ # ... \\ # softmax\text{(last row of x)} \\ # \end{pmatrix} $$ # + # GRADED FUNCTION: softmax def softmax(x): """Calculates the softmax for each row of the input x. Your code should work for a row vector and also for matrices of shape (n, m). Argument: x -- A numpy matrix of shape (n,m) Returns: s -- A numpy matrix equal to the softmax of x, of shape (n,m) """ ### START CODE HERE ### (≈ 3 lines of code) # Apply exp() element-wise to x. Use np.exp(...). x_exp = np.exp(x) # Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True). x_sum = np.sum(x_exp, axis = 1, keepdims = True) # Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting. s = x_exp/x_sum ### END CODE HERE ### return s # - x = np.array([ [9, 2, 5, 0, 0], [7, 5, 0, 0 ,0]]) print("softmax(x) = " + str(softmax(x))) # **Expected Output**: # # <table style="width:60%"> # # <tr> # <td> **softmax(x)** </td> # <td> [[ 9.80897665e-01 8.94462891e-04 1.79657674e-02 1.21052389e-04 # 1.21052389e-04] # [ 8.78679856e-01 1.18916387e-01 8.01252314e-04 8.01252314e-04 # 8.01252314e-04]]</td> # </tr> # </table> # # **Note**: # - If you print the shapes of x_exp, x_sum and s above and rerun the assessment cell, you will see that x_sum is of shape (2,1) while x_exp and s are of shape (2,5). **x_exp/x_sum** works due to python broadcasting. # # Congratulations! You now have a pretty good understanding of python numpy and have implemented a few useful functions that you will be using in deep learning. # <font color='blue'> # **What you need to remember:** # - np.exp(x) works for any np.array x and applies the exponential function to every coordinate # - the sigmoid function and its gradient # - image2vector is commonly used in deep learning # - np.reshape is widely used. In the future, you'll see that keeping your matrix/vector dimensions straight will go toward eliminating a lot of bugs. # - numpy has efficient built-in functions # - broadcasting is extremely useful # ## 2) Vectorization # # In deep learning, you deal with very large datasets. Hence, a non-computationally-optimal function can become a huge bottleneck in your algorithm and can result in a model that takes ages to run. To make sure that your code is computationally efficient, you will use vectorization. For example, try to tell the difference between the following implementations of the dot/outer/elementwise product. # + import time x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0] x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0] ### CLASSIC DOT PRODUCT OF VECTORS IMPLEMENTATION ### tic = time.process_time() dot = 0 for i in range(len(x1)): dot+= x1[i]*x2[i] toc = time.process_time() print ("dot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC OUTER PRODUCT IMPLEMENTATION ### tic = time.process_time() outer = np.zeros((len(x1),len(x2))) # we create a len(x1)*len(x2) matrix with only zeros for i in range(len(x1)): for j in range(len(x2)): outer[i,j] = x1[i]*x2[j] toc = time.process_time() print ("outer = " + str(outer) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC ELEMENTWISE IMPLEMENTATION ### tic = time.process_time() mul = np.zeros(len(x1)) for i in range(len(x1)): mul[i] = x1[i]*x2[i] toc = time.process_time() print ("elementwise multiplication = " + str(mul) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### CLASSIC GENERAL DOT PRODUCT IMPLEMENTATION ### W = np.random.rand(3,len(x1)) # Random 3*len(x1) numpy array tic = time.process_time() gdot = np.zeros(W.shape[0]) for i in range(W.shape[0]): for j in range(len(x1)): gdot[i] += W[i,j]*x1[j] toc = time.process_time() print ("gdot = " + str(gdot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") # + x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0] x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0] ### VECTORIZED DOT PRODUCT OF VECTORS ### tic = time.process_time() dot = np.dot(x1,x2) toc = time.process_time() print ("dot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED OUTER PRODUCT ### tic = time.process_time() outer = np.outer(x1,x2) toc = time.process_time() print ("outer = " + str(outer) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED ELEMENTWISE MULTIPLICATION ### tic = time.process_time() mul = np.multiply(x1,x2) toc = time.process_time() print ("elementwise multiplication = " + str(mul) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") ### VECTORIZED GENERAL DOT PRODUCT ### tic = time.process_time() dot = np.dot(W,x1) toc = time.process_time() print ("gdot = " + str(dot) + "\n ----- Computation time = " + str(1000*(toc - tic)) + "ms") # - # As you may have noticed, the vectorized implementation is much cleaner and more efficient. For bigger vectors/matrices, the differences in running time become even bigger. # # **Note** that `np.dot()` performs a matrix-matrix or matrix-vector multiplication. This is different from `np.multiply()` and the `*` operator (which is equivalent to `.*` in Matlab/Octave), which performs an element-wise multiplication. # ### 2.1 Implement the L1 and L2 loss functions # # **Exercise**: Implement the numpy vectorized version of the L1 loss. You may find the function abs(x) (absolute value of x) useful. # # **Reminder**: # - The loss is used to evaluate the performance of your model. The bigger your loss is, the more different your predictions ($ \hat{y} $) are from the true values ($y$). In deep learning, you use optimization algorithms like Gradient Descent to train your model and to minimize the cost. # - L1 loss is defined as: # $$\begin{align*} & L_1(\hat{y}, y) = \sum_{i=0}^m|y^{(i)} - \hat{y}^{(i)}| \end{align*}\tag{6}$$ # + # GRADED FUNCTION: L1 def L1(yhat, y): """ Arguments: yhat -- vector of size m (predicted labels) y -- vector of size m (true labels) Returns: loss -- the value of the L1 loss function defined above """ ### START CODE HERE ### (≈ 1 line of code) loss = np.sum(np.abs(y - yhat)) ### END CODE HERE ### return loss # - yhat = np.array([.9, 0.2, 0.1, .4, .9]) y = np.array([1, 0, 0, 1, 1]) print("L1 = " + str(L1(yhat,y))) # **Expected Output**: # # <table style="width:20%"> # # <tr> # <td> **L1** </td> # <td> 1.1 </td> # </tr> # </table> # # **Exercise**: Implement the numpy vectorized version of the L2 loss. There are several way of implementing the L2 loss but you may find the function np.dot() useful. As a reminder, if $x = [x_1, x_2, ..., x_n]$, then `np.dot(x,x)` = $\sum_{j=0}^n x_j^{2}$. # # - L2 loss is defined as $$\begin{align*} & L_2(\hat{y},y) = \sum_{i=0}^m(y^{(i)} - \hat{y}^{(i)})^2 \end{align*}\tag{7}$$ # + # GRADED FUNCTION: L2 def L2(yhat, y): """ Arguments: yhat -- vector of size m (predicted labels) y -- vector of size m (true labels) Returns: loss -- the value of the L2 loss function defined above """ ### START CODE HERE ### (≈ 1 line of code) loss = np.sum(np.dot((y - yhat),(y - yhat))) ### END CODE HERE ### return loss # - yhat = np.array([.9, 0.2, 0.1, .4, .9]) y = np.array([1, 0, 0, 1, 1]) print("L2 = " + str(L2(yhat,y))) # **Expected Output**: # <table style="width:20%"> # <tr> # <td> **L2** </td> # <td> 0.43 </td> # </tr> # </table> # Congratulations on completing this assignment. We hope that this little warm-up exercise helps you in the future assignments, which will be more exciting and interesting! # <font color='blue'> # **What to remember:** # - Vectorization is very important in deep learning. It provides computational efficiency and clarity. # - You have reviewed the L1 and L2 loss. # - You are familiar with many numpy functions such as np.sum, np.dot, np.multiply, np.maximum, etc...
Deep Learning Specialization - deeplearning.ai/1- Neural Networks and Deep Learning/Python+Basics+With+Numpy+v3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.12 64-bit (''fidrox'': conda)' # name: python3 # --- # # Daily Births Forecasting # import the necessary libraries import pandas as pd import numpy as np import fbprophet from fbprophet.plot import add_changepoints_to_plot # Load the dataset data = pd.read_csv('./data/daily-total-female-births-CA.csv',parse_dates=['date'],date_parser=pd.to_datetime) data.columns =['ds','y'] data.head() data.isnull().any() data.shape # Visualize the data # + import plotly.graph_objects as go fig = go.Figure() fig.add_trace(go.Scatter(x =data['ds'],y=data['y'],mode='lines+markers')) fig.show() # - # # Apply Fbprophet model import warnings with warnings.catch_warnings(): warnings.simplefilter('ignore') m = fbprophet.Prophet(yearly_seasonality=True, daily_seasonality=False, changepoint_range=0.9, changepoint_prior_scale=0.5, seasonality_mode='multiplicative') m.fit(data) future = m.make_future_dataframe(periods=50,freq='d') future forcast = m.predict(future) forcast # visualize the seasonality effects we got after applying the model m.plot_components(forcast) # - From the trend plot we can say there is an upward trend overal # - From the seasonality plot we can say there is definitely a seasonality causing the female births to fluctuate over the period # # visualize the predictions made by the Facebook prophet model for daily births prediction m.plot(forcast) from fbprophet.plot import plot_plotly #let's plot the forecast plot_plotly(m, forcast) # *** #
Time series Forcasting/Daily birth forcasting/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Building Data Genome Project 2.0 # ## Buildings normalized consumption # Biam! (<EMAIL>) # + # data and numbers import numpy as np import pandas as pd import datetime as dt # Visualization import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib import ticker import matplotlib.dates as mdates # %matplotlib inline import pandas as pd from sklearn import preprocessing # - path_raw = "..//data//meters//raw//" path_meta = "..//data//metadata//" # # Introduction # In this notebook will be plot the normalized daily consumption. This is, daily energy consumption per area unit (square feet) of the building.<br> # Here is used the raw meter data, without processing outliers or missing values. # # Metadata meta = pd.read_csv( path_meta + "metadata.csv", usecols=[ "building_id", "site_id", "sqm" ] ) meta.info() # # Functions # This function normalize all meters data automatically def normalizeData(metername): # load data df = pd.read_csv(path_raw + metername + ".csv") # Transform timestamp to datetime object type df["timestamp"] = pd.to_datetime(df["timestamp"], format="%Y-%m-%d %H:%M:%S") # Melt meter dataset df = pd.melt( df, id_vars="timestamp", var_name="building_id", value_name="meter_reading", ) # Calculate daily consumption df = df.groupby(["building_id",df.timestamp.dt.date]).sum().reset_index() # Add area column df = pd.merge(df, meta[["building_id", "sqm"]], how="left", on="building_id") # Normalize meter reading df["norm_reading"] = df["meter_reading"] / df["sqm"] df = df.drop(["meter_reading", "sqm"], axis=1) # Unmelt df = df.pivot(index="timestamp", columns="building_id", values="norm_reading") # Scale (min-max) row_list = df.index # timestamp col_list = df.columns # building names x = df.values #returns a numpy array scaler = preprocessing.MinMaxScaler() x_scaled = scaler.fit_transform(x) # standard scaling df = pd.DataFrame(x_scaled) # as data frame # rename columns col_rename_dict = {i:j for i,j in zip(df.columns, col_list)} df.rename(columns=col_rename_dict, inplace=True) # rename rows row_rename_dict = {i:j for i,j in zip(df.index, row_list)} df.rename(index=row_rename_dict, inplace=True) # Sort df = df.T.loc[df.T.sum(axis=1).sort_values().index] return df # # One figure to subplot them all # + fig, axes = plt.subplots(2, 4, sharex = True, figsize=(8.27,11.69)) axes = axes.flatten() metername = ["electricity","water","chilledwater","hotwater","gas", "steam","solar","irrigation"] for i,j in enumerate(metername): df = normalizeData(j) # Get the data x = mdates.drange(df.columns[0], df.columns[-1] + dt.timedelta(days=1), dt.timedelta(days=1)) y = np.linspace(1, len(df), len(df)+1) # Plot ax = axes[i] data = np.array(df) cmap = plt.get_cmap('hot') qmesh = ax.pcolormesh(x, y, data, cmap=cmap, rasterized=True, vmin=0, vmax=1) # Axis ax.axis('tight') ax.xaxis_date() # Set up as dates ax.tick_params("x", labelrotation=90) ax.set_yticklabels([]) ax.set_title(j + " (" + str(int(max(y))) + " meters)", fontdict={'fontsize':10}) # Color bar cbaxes = fig.add_axes([0.025, 0.02, 0.96, 0.02]) cbar = fig.colorbar(qmesh, ax=ax, orientation='horizontal', cax = cbaxes) cbar.set_label('Min-max Scaled Normalized Consumption (KWh/sqm)') plt.tight_layout() plt.subplots_adjust(bottom=0.12) # - fig.savefig("..\\figures\\normalizedConsumption_all.jpg", dpi=300, bbox_inches='tight') # # Bibliography # - <NAME>., 2017. Screening Meter Data: Characterization of Temporal Energy Data from Large Groups of Non-Residential Buildings. ETH Zürich, Zurich, Switzerland.
notebooks/04_Normalized-consumption.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial 17: Self-Supervised Contrastive Learning with SimCLR # # ![Status](https://img.shields.io/static/v1.svg?label=Status&message=Finished&color=green) # # **Filled notebook:** # [![View on Github](https://img.shields.io/static/v1.svg?logo=github&label=Repo&message=View%20On%20Github&color=lightgrey)](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial17/SimCLR.ipynb) # [![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial17/SimCLR.ipynb) # **Pre-trained models:** # [![View files on Github](https://img.shields.io/static/v1.svg?logo=github&label=Repo&message=View%20On%20Github&color=lightgrey)](https://github.com/phlippe/saved_models/tree/main/tutorial17) # [![GoogleDrive](https://img.shields.io/static/v1.svg?logo=google-drive&logoColor=yellow&label=GDrive&message=Download&color=yellow)](https://drive.google.com/drive/folders/1BmisSPs5BXQKpolyHp5X4klSAhpDx479?usp=sharing) # **Recordings:** # [![YouTube - Part 1](https://img.shields.io/static/v1.svg?logo=youtube&label=YouTube&message=Part%201&color=red)](https://youtu.be/waVZDFR-06U) # [![YouTube - Part 2](https://img.shields.io/static/v1.svg?logo=youtube&label=YouTube&message=Part%202&color=red)](https://youtu.be/o3FktysLLd4) # In this tutorial, we will take a closer look at self-supervised contrastive learning. Self-supervised learning, or also sometimes called unsupervised learning, describes the scenario where we have given input data, but no accompanying labels to train in a classical supervised way. However, this data still contains a lot of information from which we can learn: how are the images different from each other? What patterns are descriptive for certain images? Can we cluster the images? And so on. Methods for self-supervised learning try to learn as much as possible from the data alone, so it can quickly be finetuned for a specific classification task. # The benefit of self-supervised learning is that a large dataset can often easily be obtained. For instance, if we want to train a vision model on semantic segmentation for autonomous driving, we can collect large amounts of data by simply installing a camera in a car, and driving through a city for an hour. In contrast, if we would want to do supervised learning, we would have to manually label all those images before training a model. This is extremely expensive, and would likely take a couple of months to manually label the same amount of data. Further, self-supervised learning can provide an alternative to transfer learning from models pretrained on ImageNet since we could pretrain a model on a specific dataset/situation, e.g. traffic scenarios for autonomous driving. # # Within the last two years, a lot of new approaches have been proposed for self-supervised learning, in particular for images, that have resulted in great improvements over supervised models when few labels are available. The subfield that we will focus on in this tutorial is contrastive learning. Contrastive learning is motivated by the question mentioned above: how are images different from each other? Specifically, contrastive learning methods train a model to cluster an image and its slightly augmented version in latent space, while the distance to other images should be maximized. A very recent and simple method for this is [SimCLR](https://arxiv.org/abs/2006.10029), which is visualized below (figure credit - [<NAME> et al.](https://simclr.github.io/)). # # <center width="100%"><img src="simclr_contrastive_learning.png" width="500px"></center> # # The general setup is that we are given a dataset of images without any labels, and want to train a model on this data such that it can quickly adapt to any image recognition task afterward. During each training iteration, we sample a batch of images as usual. For each image, we create two versions by applying data augmentation techniques like cropping, Gaussian noise, blurring, etc. An example of such is shown on the left with the image of the dog. We will go into the details and effects of the chosen augmentation techniques later. On those images, we apply a CNN like ResNet and obtain as output a 1D feature vector on which we apply a small MLP. The output features of the two augmented images are then trained to be close to each other, while all other images in that batch should be as different as possible. This way, the model has to learn to recognize the content of the image that remains unchanged under the data augmentations, such as objects which we usually care about in supervised tasks. # # We will now implement this framework ourselves and discuss further details along the way. Let's first start with importing our standard libraries below: # + ## Standard libraries import os from copy import deepcopy ## Imports for plotting import matplotlib.pyplot as plt plt.set_cmap('cividis') # %matplotlib inline from IPython.display import set_matplotlib_formats set_matplotlib_formats('svg', 'pdf') # For export import matplotlib matplotlib.rcParams['lines.linewidth'] = 2.0 import seaborn as sns sns.set() ## tqdm for loading bars from tqdm.notebook import tqdm ## PyTorch import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data import torch.optim as optim ## Torchvision import torchvision from torchvision.datasets import STL10 from torchvision import transforms # PyTorch Lightning try: import pytorch_lightning as pl except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary # !pip install --quiet pytorch-lightning>=1.4 import pytorch_lightning as pl from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint # Import tensorboard # %load_ext tensorboard # Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10) DATASET_PATH = "../data" # Path to the folder where the pretrained models are saved CHECKPOINT_PATH = "../saved_models/tutorial17" # In this notebook, we use data loaders with heavier computational processing. It is recommended to use as many # workers as possible in a data loader, which corresponds to the number of CPU cores NUM_WORKERS = os.cpu_count() # Setting the seed pl.seed_everything(42) # Ensure that all operations are deterministic on GPU (if used) for reproducibility torch.backends.cudnn.determinstic = True torch.backends.cudnn.benchmark = False device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") print("Device:", device) print("Number of workers:", NUM_WORKERS) # - # As in many tutorials before, we provide pre-trained models. Note that those models are slightly larger as normal (~100MB overall) since we use the default ResNet-18 architecture. If you are running this notebook locally, make sure to have sufficient disk space available. # + import urllib.request from urllib.error import HTTPError # Github URL where saved models are stored for this tutorial base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/tutorial17/" # Files to download pretrained_files = ["SimCLR.ckpt", "ResNet.ckpt", "tensorboards/SimCLR/events.out.tfevents.SimCLR", "tensorboards/classification/ResNet/events.out.tfevents.ResNet"] pretrained_files += [f"LogisticRegression_{size}.ckpt" for size in [10, 20, 50, 100, 200, 500]] # Create checkpoint path if it doesn't exist yet os.makedirs(CHECKPOINT_PATH, exist_ok=True) # For each file, check whether it already exists. If not, try downloading it. for file_name in pretrained_files: file_path = os.path.join(CHECKPOINT_PATH, file_name) if "/" in file_name: os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True) if not os.path.isfile(file_path): file_url = base_url + file_name print(f"Downloading {file_url}...") try: urllib.request.urlretrieve(file_url, file_path) except HTTPError as e: print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e) # - # ## SimCLR # # We will start our exploration of contrastive learning by discussing the effect of different data augmentation techniques, and how we can implement an efficient data loader for such. Next, we implement SimCLR with PyTorch Lightning, and finally train it on a large, unlabeled dataset. # ### Data Augmentation for Contrastive Learning # # To allow efficient training, we need to prepare the data loading such that we sample two different, random augmentations for each image in the batch. The easiest way to do this is by creating a transformation that, when being called, applies a set of data augmentations to an image twice. This is implemented in the class `ContrastiveTransformations` below: class ContrastiveTransformations(object): def __init__(self, base_transforms, n_views=2): self.base_transforms = base_transforms self.n_views = n_views def __call__(self, x): return [self.base_transforms(x) for i in range(self.n_views)] # The contrastive learning framework can easily be extended to have more _positive_ examples by sampling more than two augmentations of the same image. However, the most efficient training is usually obtained by using only two. # # Next, we can look at the specific augmentations we want to apply. The choice of the data augmentation to use is the most crucial hyperparameter in SimCLR since it directly affects how the latent space is structured, and what patterns might be learned from the data. Let's first take a look at some of the most popular data augmentations (figure credit - [<NAME> and <NAME>](https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html)): # # <center width="100%"><img src="simclr_data_augmentations.png" width="800px" style="padding-top: 10px; padding-bottom: 10px"></center> # # All of them can be used, but it turns out that two augmentations stand out in their importance: crop-and-resize, and color distortion. Interestingly, however, they only lead to strong performance if they have been used together as discussed by [<NAME>.](https://arxiv.org/abs/2006.10029) in their SimCLR paper. When performing randomly cropping and resizing, we can distinguish between two situations: (a) cropped image A provides a local view of cropped image B, or (b) cropped images C and D show neighboring views of the same image (figure credit - [<NAME> and <NAME>](https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html)). # # <center width="100%"><img src="crop_views.svg" width="400px" style="padding-top: 20px; padding-bottom: 0px"></center> # # While situation (a) requires the model to learn some sort of scale invariance to make crops A and B similar in latent space, situation (b) is more challenging since the model needs to recognize an object beyond its limited view. However, without color distortion, there is a loophole that the model can exploit, namely that different crops of the same image usually look very similar in color space. Consider the picture of the dog above. Simply from the color of the fur and the green color tone of the background, you can reason that two patches belong to the same image without actually recognizing the dog in the picture. In this case, the model might end up focusing only on the color histograms of the images, and ignore other more generalizable features. If, however, we distort the colors in the two patches randomly and independently of each other, the model cannot rely on this simple feature anymore. Hence, by combining random cropping and color distortions, the model can only match two patches by learning generalizable representations. # # Overall, for our experiments, we apply a set of 5 transformations following the original SimCLR setup: random horizontal flip, crop-and-resize, color distortion, random grayscale, and gaussian blur. In comparison to the [original implementation](https://github.com/google-research/simclr), we reduce the effect of the color jitter slightly (0.5 instead of 0.8 for brightness, contrast, and saturation, and 0.1 instead of 0.2 for hue). In our experiments, this setting obtained better performance and was faster and more stable to train. If, for instance, the brightness scale highly varies in a dataset, the original settings can be more beneficial since the model can't rely on this information anymore to distinguish between images. contrast_transforms = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop(size=96), transforms.RandomApply([ transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1) ], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.GaussianBlur(kernel_size=9), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ]) # After discussing the data augmentation techniques, we can now focus on the dataset. In this tutorial, we will use the [STL10 dataset](https://cs.stanford.edu/~acoates/stl10/), which, similarly to CIFAR10, contains images of 10 classes: airplane, bird, car, cat, deer, dog, horse, monkey, ship, truck. However, the images have a higher resolution, namely $96\times 96$ pixels, and we are only provided with 500 labeled images per class. Additionally, we have a much larger set of $100,000$ unlabeled images which are similar to the training images but are sampled from a wider range of animals and vehicles. This makes the dataset ideal to showcase the benefits that self-supervised learning offers. # # Luckily, the STL10 dataset is provided through torchvision. Keep in mind, however, that since this dataset is relatively large and has a considerably higher resolution than CIFAR10, it requires more disk space (~3GB) and takes a bit of time to download. For our initial discussion of self-supervised learning and SimCLR, we will create two data loaders with our contrastive transformations above: the `unlabeled_data` will be used to train our model via contrastive learning, and `train_data_contrast` will be used as a validation set in contrastive learning. unlabeled_data = STL10(root=DATASET_PATH, split='unlabeled', download=True, transform=ContrastiveTransformations(contrast_transforms, n_views=2)) train_data_contrast = STL10(root=DATASET_PATH, split='train', download=True, transform=ContrastiveTransformations(contrast_transforms, n_views=2)) # Finally, before starting with our implementation of SimCLR, let's look at some example image pairs sampled with our augmentations: # + # Visualize some examples pl.seed_everything(42) NUM_IMAGES = 6 imgs = torch.stack([img for idx in range(NUM_IMAGES) for img in unlabeled_data[idx][0]], dim=0) img_grid = torchvision.utils.make_grid(imgs, nrow=6, normalize=True, pad_value=0.9) img_grid = img_grid.permute(1, 2, 0) plt.figure(figsize=(10,5)) plt.title('Augmented image examples of the STL10 dataset') plt.imshow(img_grid) plt.axis('off') plt.show() plt.close() # - # We see the wide variety of our data augmentation, including randomly cropping, grayscaling, gaussian blur, and color distortion. Thus, it remains a challenging task for the model to match two, independently augmented patches of the same image. # ### SimCLR implementation # # Using the data loader pipeline above, we can now implement SimCLR. At each iteration, we get for every image $x$ two differently augmented versions, which we refer to as $\tilde{x}_i$ and $\tilde{x}_j$. Both of these images are encoded into a one-dimensional feature vector, between which we want to maximize similarity which minimizes it to all other images in the batch. The encoder network is split into two parts: a base encoder network $f(\cdot)$, and a projection head $g(\cdot)$. The base network is usually a deep CNN as we have seen in e.g. [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html) before, and is responsible for extracting a representation vector from the augmented data examples. In our experiments, we will use the common ResNet-18 architecture as $f(\cdot)$, and refer to the output as $f(\tilde{x}_i)=h_i$. The projection head $g(\cdot)$ maps the representation $h$ into a space where we apply the contrastive loss, i.e., compare similarities between vectors. It is often chosen to be a small MLP with non-linearities, and for simplicity, we follow the original SimCLR paper setup by defining it as a two-layer MLP with ReLU activation in the hidden layer. Note that in the follow-up paper, [SimCLRv2](https://arxiv.org/abs/2006.10029), the authors mention that larger/wider MLPs can boost the performance considerably. This is why we apply an MLP with four times larger hidden dimensions, but deeper MLPs showed to overfit on the given dataset. The general setup is visualized below (figure credit - [<NAME> et al.](https://arxiv.org/abs/2006.10029)): # # <center width="100%"><img src="simclr_network_setup.svg" width="350px"></center> # # After finishing the training with contrastive learning, we will remove the projection head $g(\cdot)$, and use $f(\cdot)$ as a pretrained feature extractor. The representations $z$ that come out of the projection head $g(\cdot)$ have been shown to perform worse than those of the base network $f(\cdot)$ when finetuning the network for a new task. This is likely because the representations $z$ are trained to become invariant to many features like the color that can be important for downstream tasks. Thus, $g(\cdot)$ is only needed for the contrastive learning stage. # # Now that the architecture is described, let's take a closer look at how we train the model. As mentioned before, we want to maximize the similarity between the representations of the two augmented versions of the same image, i.e., $z_i$ and $z_j$ in the figure above, while minimizing it to all other examples in the batch. SimCLR thereby applies the InfoNCE loss, originally proposed by [<NAME> et al.](https://arxiv.org/abs/1807.03748) for contrastive learning. In short, the InfoNCE loss compares the similarity of $z_i$ and $z_j$ to the similarity of $z_i$ to any other representation in the batch by performing a softmax over the similarity values. The loss can be formally written as: # # $$ # \ell_{i,j}=-\log \frac{\exp(\text{sim}(z_i,z_j)/\tau)}{\sum_{k=1}^{2N}\mathbb{1}_{[k\neq i]}\exp(\text{sim}(z_i,z_k)/\tau)}=-\text{sim}(z_i,z_j)/\tau+\log\left[\sum_{k=1}^{2N}\mathbb{1}_{[k\neq i]}\exp(\text{sim}(z_i,z_k)/\tau)\right] # $$ # # The function $\text{sim}$ is a similarity metric, and the hyperparameter $\tau$ is called temperature determining how peaked the distribution is. Since many similarity metrics are bounded, the temperature parameter allows us to balance the influence of many dissimilar image patches versus one similar patch. The similarity metric that is used in SimCLR is cosine similarity, as defined below: # # $$ # \text{sim}(z_i,z_j) = \frac{z_i^\top \cdot z_j}{||z_i||\cdot||z_j||} # $$ # # The maximum cosine similarity possible is $1$, while the minimum is $-1$. In general, we will see that the features of two different images will converge to a cosine similarity around zero since the minimum, $-1$, would require $z_i$ and $z_j$ to be in the exact opposite direction in all feature dimensions, which does not allow for great flexibility. # # Finally, now that we have discussed all details, let's implement SimCLR below as a PyTorch Lightning module: class SimCLR(pl.LightningModule): def __init__(self, hidden_dim, lr, temperature, weight_decay, max_epochs=500): super().__init__() self.save_hyperparameters() assert self.hparams.temperature > 0.0, 'The temperature must be a positive float!' # Base model f(.) self.convnet = torchvision.models.resnet18(pretrained=False, num_classes=4*hidden_dim) # Output of last linear layer # The MLP for g(.) consists of Linear->ReLU->Linear self.convnet.fc = nn.Sequential( self.convnet.fc, # Linear(ResNet output, 4*hidden_dim) nn.ReLU(inplace=True), nn.Linear(4*hidden_dim, hidden_dim) ) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay) lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.hparams.max_epochs, eta_min=self.hparams.lr/50) return [optimizer], [lr_scheduler] def info_nce_loss(self, batch, mode='train'): imgs, _ = batch imgs = torch.cat(imgs, dim=0) # Encode all images feats = self.convnet(imgs) # Calculate cosine similarity cos_sim = F.cosine_similarity(feats[:,None,:], feats[None,:,:], dim=-1) # Mask out cosine similarity to itself self_mask = torch.eye(cos_sim.shape[0], dtype=torch.bool, device=cos_sim.device) cos_sim.masked_fill_(self_mask, -9e15) # Find positive example -> batch_size//2 away from the original example pos_mask = self_mask.roll(shifts=cos_sim.shape[0]//2, dims=0) # InfoNCE loss cos_sim = cos_sim / self.hparams.temperature nll = -cos_sim[pos_mask] + torch.logsumexp(cos_sim, dim=-1) nll = nll.mean() # Logging loss self.log(mode+'_loss', nll) # Get ranking position of positive example comb_sim = torch.cat([cos_sim[pos_mask][:,None], # First position positive example cos_sim.masked_fill(pos_mask, -9e15)], dim=-1) sim_argsort = comb_sim.argsort(dim=-1, descending=True).argmin(dim=-1) # Logging ranking metrics self.log(mode+'_acc_top1', (sim_argsort == 0).float().mean()) self.log(mode+'_acc_top5', (sim_argsort < 5).float().mean()) self.log(mode+'_acc_mean_pos', 1+sim_argsort.float().mean()) return nll def training_step(self, batch, batch_idx): return self.info_nce_loss(batch, mode='train') def validation_step(self, batch, batch_idx): self.info_nce_loss(batch, mode='val') # Alternatively to performing the validation on the contrastive learning loss as well, we could also take a simple, small downstream task, and track the performance of the base network $f(\cdot)$ on that. However, in this tutorial, we will restrict ourselves to the STL10 dataset where we use the task of image classification on STL10 as our test task. # ### Training # # Now that we have implemented SimCLR and the data loading pipeline, we are ready to train the model. We will use the same training function setup as usual. For saving the best model checkpoint, we track the metric `val_acc_top5`, which describes how often the correct image patch is within the top-5 most similar examples in the batch. This is usually less noisy than the top-1 metric, making it a better metric to choose the best model from. def train_simclr(batch_size, max_epochs=500, **kwargs): trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, 'SimCLR'), gpus=1 if str(device)=='cuda:0' else 0, max_epochs=max_epochs, callbacks=[ModelCheckpoint(save_weights_only=True, mode='max', monitor='val_acc_top5'), LearningRateMonitor('epoch')], progress_bar_refresh_rate=1) trainer.logger._default_hp_metric = None # Optional logging argument that we don't need # Check whether pretrained model exists. If yes, load it and skip training pretrained_filename = os.path.join(CHECKPOINT_PATH, 'SimCLR.ckpt') if os.path.isfile(pretrained_filename): print(f'Found pretrained model at {pretrained_filename}, loading...') model = SimCLR.load_from_checkpoint(pretrained_filename) # Automatically loads the model with the saved hyperparameters else: train_loader = data.DataLoader(unlabeled_data, batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True, num_workers=NUM_WORKERS) val_loader = data.DataLoader(train_data_contrast, batch_size=batch_size, shuffle=False, drop_last=False, pin_memory=True, num_workers=NUM_WORKERS) pl.seed_everything(42) # To be reproducable model = SimCLR(max_epochs=max_epochs, **kwargs) trainer.fit(model, train_loader, val_loader) model = SimCLR.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Load best checkpoint after training return model # A common observation in contrastive learning is that the larger the batch size, the better the models perform. A larger batch size allows us to compare each image to more negative examples, leading to overall smoother loss gradients. However, in our case, we experienced that a batch size of 256 was sufficient to get good results. simclr_model = train_simclr(batch_size=256, hidden_dim=128, lr=5e-4, temperature=0.07, weight_decay=1e-4, max_epochs=500) # To get an intuition of how training with contrastive learning behaves, we can take a look at the TensorBoard below: # %tensorboard --logdir ../saved_models/tutorial17/tensorboards/SimCLR/ # <center width="100%"><img src="tensorboard_simclr.png" width="1200px"></center> # # One thing to note is that contrastive learning benefits a lot from long training. The shown plot above is from a training that took approx. 1 day on a NVIDIA TitanRTX. Training the model for even longer might reduce its loss further, but we did not experience any gains from it for the downstream task on image classification. In general, contrastive learning can also benefit from using larger models, if sufficient unlabeled data is available. # ## Logistic Regression # # After we have trained our model via contrastive learning, we can deploy it on downstream tasks and see how well it performs with little data. A common setup, which also verifies whether the model has learned generalized representations, is to perform Logistic Regression on the features. In other words, we learn a single, linear layer that maps the representations to a class prediction. Since the base network $f(\cdot)$ is not changed during the training process, the model can only perform well if the representations of $h$ describe all features that might be necessary for the task. Further, we do not have to worry too much about overfitting since we have very few parameters that are trained. Hence, we might expect that the model can perform well even with very little data. # # First, let's implement a simple Logistic Regression setup for which we assume that the images already have been encoded in their feature vectors. If very little data is available, it might be beneficial to dynamically encode the images during training so that we can also apply data augmentations. However, the way we implement it here is much more efficient and can be trained within a few seconds. Further, using data augmentations did not show any significant gain in this simple setup. class LogisticRegression(pl.LightningModule): def __init__(self, feature_dim, num_classes, lr, weight_decay, max_epochs=100): super().__init__() self.save_hyperparameters() # Mapping from representation h to classes self.model = nn.Linear(feature_dim, num_classes) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay) lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(self.hparams.max_epochs*0.6), int(self.hparams.max_epochs*0.8)], gamma=0.1) return [optimizer], [lr_scheduler] def _calculate_loss(self, batch, mode='train'): feats, labels = batch preds = self.model(feats) loss = F.cross_entropy(preds, labels) acc = (preds.argmax(dim=-1) == labels).float().mean() self.log(mode + '_loss', loss) self.log(mode + '_acc', acc) return loss def training_step(self, batch, batch_idx): return self._calculate_loss(batch, mode='train') def validation_step(self, batch, batch_idx): self._calculate_loss(batch, mode='val') def test_step(self, batch, batch_idx): self._calculate_loss(batch, mode='test') # The data we use is the training and test set of STL10. The training contains 500 images per class, while the test set has 800 images per class. # + img_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) train_img_data = STL10(root=DATASET_PATH, split='train', download=True, transform=img_transforms) test_img_data = STL10(root=DATASET_PATH, split='test', download=True, transform=img_transforms) print("Number of training examples:", len(train_img_data)) print("Number of test examples:", len(test_img_data)) # - # Next, we implement a small function to encode all images in our datasets. The output representations are then used as inputs to the Logistic Regression model. @torch.no_grad() def prepare_data_features(model, dataset): # Prepare model network = deepcopy(model.convnet) network.fc = nn.Identity() # Removing projection head g(.) network.eval() network.to(device) # Encode all images data_loader = data.DataLoader(dataset, batch_size=64, num_workers=NUM_WORKERS, shuffle=False, drop_last=False) feats, labels = [], [] for batch_imgs, batch_labels in tqdm(data_loader): batch_imgs = batch_imgs.to(device) batch_feats = network(batch_imgs) feats.append(batch_feats.detach().cpu()) labels.append(batch_labels) feats = torch.cat(feats, dim=0) labels = torch.cat(labels, dim=0) # Sort images by labels labels, idxs = labels.sort() feats = feats[idxs] return data.TensorDataset(feats, labels) # Let's apply the function to both training and test set below. train_feats_simclr = prepare_data_features(simclr_model, train_img_data) test_feats_simclr = prepare_data_features(simclr_model, test_img_data) # Finally, we can write a training function as usual. We evaluate the model on the test set every 10 epochs to allow early stopping, but the low frequency of the validation ensures that we do not overfit too much on the test set. def train_logreg(batch_size, train_feats_data, test_feats_data, model_suffix, max_epochs=100, **kwargs): trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "LogisticRegression"), gpus=1 if str(device)=="cuda:0" else 0, max_epochs=max_epochs, callbacks=[ModelCheckpoint(save_weights_only=True, mode='max', monitor='val_acc'), LearningRateMonitor("epoch")], progress_bar_refresh_rate=0, check_val_every_n_epoch=10) trainer.logger._default_hp_metric = None # Data loaders train_loader = data.DataLoader(train_feats_data, batch_size=batch_size, shuffle=True, drop_last=False, pin_memory=True, num_workers=0) test_loader = data.DataLoader(test_feats_data, batch_size=batch_size, shuffle=False, drop_last=False, pin_memory=True, num_workers=0) # Check whether pretrained model exists. If yes, load it and skip training pretrained_filename = os.path.join(CHECKPOINT_PATH, f"LogisticRegression_{model_suffix}.ckpt") if os.path.isfile(pretrained_filename): print(f"Found pretrained model at {pretrained_filename}, loading...") model = LogisticRegression.load_from_checkpoint(pretrained_filename) else: pl.seed_everything(42) # To be reproducable model = LogisticRegression(**kwargs) trainer.fit(model, train_loader, test_loader) model = LogisticRegression.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Test best model on train and validation set train_result = trainer.test(model, test_dataloaders=train_loader, verbose=False) test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False) result = {"train": train_result[0]["test_acc"], "test": test_result[0]["test_acc"]} return model, result # Despite the training dataset of STL10 already only having 500 labeled images per class, we will perform experiments with even smaller datasets. Specifically, we train a Logistic Regression model for datasets with only 10, 20, 50, 100, 200, and all 500 examples per class. This gives us an intuition on how well the representations learned by contrastive learning can be transfered to a image recognition task like this classification. First, let's define a function to create the intended sub-datasets from the full training set: def get_smaller_dataset(original_dataset, num_imgs_per_label): new_dataset = data.TensorDataset( *[t.unflatten(0, (10, -1))[:,:num_imgs_per_label].flatten(0, 1) for t in original_dataset.tensors] ) return new_dataset # Next, let's run all models. Despite us training 6 models, this cell could be run within a minute or two without the pretrained models. results = {} for num_imgs_per_label in [10, 20, 50, 100, 200, 500]: sub_train_set = get_smaller_dataset(train_feats_simclr, num_imgs_per_label) _, small_set_results = train_logreg(batch_size=64, train_feats_data=sub_train_set, test_feats_data=test_feats_simclr, model_suffix=num_imgs_per_label, feature_dim=train_feats_simclr.tensors[0].shape[1], num_classes=10, lr=1e-3, weight_decay=1e-3) results[num_imgs_per_label] = small_set_results # Finally, let's plot the results. # + dataset_sizes = sorted([k for k in results]) test_scores = [results[k]["test"] for k in dataset_sizes] fig = plt.figure(figsize=(6,4)) plt.plot(dataset_sizes, test_scores, '--', color="#000", marker="*", markeredgecolor="#000", markerfacecolor="y", markersize=16) plt.xscale("log") plt.xticks(dataset_sizes, labels=dataset_sizes) plt.title("STL10 classification over dataset size", fontsize=14) plt.xlabel("Number of images per class") plt.ylabel("Test accuracy") plt.minorticks_off() plt.show() for k, score in zip(dataset_sizes, test_scores): print(f'Test accuracy for {k:3d} images per label: {100*score:4.2f}%') # - # As one would expect, the classification performance improves the more data we have. However, with only 10 images per class, we can already classify more than 60% of the images correctly. This is quite impressive, considering that the images are also higher dimensional than e.g. CIFAR10. With the full dataset, we achieve an accuracy of 81%. The increase between 50 to 500 images per class might suggest a linear increase in performance with an exponentially larger dataset. However, with even more data, we could also finetune $f(\cdot)$ in the training process, allowing for the representations to adapt more to the specific classification task given. # # To set the results above into perspective, we will train the base network, a ResNet-18, on the classification task from scratch. # ## Baseline # # As a baseline to our results above, we will train a standard ResNet-18 with random initialization on the labeled training set of STL10. The results will give us an indication of the advantages that contrastive learning on unlabeled data has compared to using only supervised training. The implementation of the model is straightforward since the ResNet architecture is provided in the torchvision library. class ResNet(pl.LightningModule): def __init__(self, num_classes, lr, weight_decay, max_epochs=100): super().__init__() self.save_hyperparameters() self.model = torchvision.models.resnet18( pretrained=False, num_classes=num_classes) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay) lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(self.hparams.max_epochs*0.7), int(self.hparams.max_epochs*0.9)], gamma=0.1) return [optimizer], [lr_scheduler] def _calculate_loss(self, batch, mode='train'): imgs, labels = batch preds = self.model(imgs) loss = F.cross_entropy(preds, labels) acc = (preds.argmax(dim=-1) == labels).float().mean() self.log(mode + '_loss', loss) self.log(mode + '_acc', acc) return loss def training_step(self, batch, batch_idx): return self._calculate_loss(batch, mode='train') def validation_step(self, batch, batch_idx): self._calculate_loss(batch, mode='val') def test_step(self, batch, batch_idx): self._calculate_loss(batch, mode='test') # It is clear that the ResNet easily overfits on the training data since its parameter count is more than 1000 times larger than the dataset size. To make the comparison to the contrastive learning models fair, we apply data augmentations similar to the ones we used before: horizontal flip, crop-and-resize, grayscale, and gaussian blur. Color distortions as before are not used because the color distribution of an image showed to be an important feature for the classification. Hence, we observed no noticeable performance gains when adding color distortions to the set of augmentations. Similarly, we restrict the resizing operation before cropping to the max. 125% of its original resolution, instead of 1250% as done in SimCLR. This is because, for classification, the model needs to recognize the full object, while in contrastive learning, we only want to check whether two patches belong to the same image/object. Hence, the chosen augmentations below are overall weaker than in the contrastive learning case. # + train_transforms = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop(size=96, scale=(0.8, 1.0)), transforms.RandomGrayscale(p=0.2), transforms.GaussianBlur(kernel_size=9, sigma=(0.1, 0.5)), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ]) train_img_aug_data = STL10(root=DATASET_PATH, split='train', download=True, transform=train_transforms) # - # The training function for the ResNet is almost identical to the Logistic Regression setup. Note that we allow the ResNet to perform validation every 2 epochs to also check whether the model overfits strongly in the first iterations or not. def train_resnet(batch_size, max_epochs=100, **kwargs): trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "ResNet"), gpus=1 if str(device)=="cuda:0" else 0, max_epochs=max_epochs, callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"), LearningRateMonitor("epoch")], progress_bar_refresh_rate=1, check_val_every_n_epoch=2) trainer.logger._default_hp_metric = None # Data loaders train_loader = data.DataLoader(train_img_aug_data, batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True, num_workers=NUM_WORKERS) test_loader = data.DataLoader(test_img_data, batch_size=batch_size, shuffle=False, drop_last=False, pin_memory=True, num_workers=NUM_WORKERS) # Check whether pretrained model exists. If yes, load it and skip training pretrained_filename = os.path.join(CHECKPOINT_PATH, "ResNet.ckpt") if os.path.isfile(pretrained_filename): print("Found pretrained model at %s, loading..." % pretrained_filename) model = ResNet.load_from_checkpoint(pretrained_filename) else: pl.seed_everything(42) # To be reproducable model = ResNet(**kwargs) trainer.fit(model, train_loader, test_loader) model = ResNet.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Test best model on validation set train_result = trainer.test(model, test_dataloaders=train_loader, verbose=False) val_result = trainer.test(model, test_dataloaders=test_loader, verbose=False) result = {"train": train_result[0]["test_acc"], "test": val_result[0]["test_acc"]} return model, result # Finally, let's train the model and check its results: resnet_model, resnet_result = train_resnet(batch_size=64, num_classes=10, lr=1e-3, weight_decay=2e-4, max_epochs=100) print(f"Accuracy on training set: {100*resnet_result['train']:4.2f}%") print(f"Accuracy on test set: {100*resnet_result['test']:4.2f}%") # The ResNet trained from scratch achieves 73.31% on the test set. This is almost 8% less than the contrastive learning model, and even slightly less than SimCLR achieves with 1/10 of the data. This shows that self-supervised, contrastive learning provides considerable performance gains by leveraging large amounts of unlabeled data when little labeled data is available. # ## Conclusion # # In this tutorial, we have discussed self-supervised contrastive learning and implemented SimCLR as an example method. We have applied it to the STL10 dataset and showed that it can learn generalizable representations that we can use to train simple classification models. With 500 images per label, it achieved an 8% higher accuracy than a similar model solely trained from supervision and performs on par with it when only using a tenth of the labeled data. Our experimental results are limited to a single dataset, but recent works such as [<NAME> et al.](https://arxiv.org/abs/2006.10029) showed similar trends for larger datasets like ImageNet. Besides the discussed hyperparameters, the size of the model seems to be important in contrastive learning as well. If a lot of unlabeled data is available, larger models can achieve much stronger results and come close to their supervised baselines. Further, there are also approaches for combining contrastive and supervised learning, leading to performance gains beyond supervision (see [Khosla et al.](https://arxiv.org/abs/2004.11362)). Moreover, contrastive learning is not the only approach to self-supervised learning that has come up in the last two years and showed great results. Other methods include distillation-based methods like [BYOL](https://arxiv.org/abs/2006.07733) and redundancy reduction techniques like [Barlow Twins](https://arxiv.org/abs/2103.03230). There is a lot more to explore in the self-supervised domain, and more, impressive steps ahead are to be expected. # # ### References # # [1] <NAME>., <NAME>., <NAME>., and <NAME>. (2020). A simple framework for contrastive learning of visual representations. In International conference on machine learning (pp. 1597-1607). PMLR. ([link](https://arxiv.org/abs/2002.05709)) # # [2] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2020). Big self-supervised models are strong semi-supervised learners. NeurIPS 2021 ([link](https://arxiv.org/abs/2006.10029)). # # [3] <NAME>., <NAME>., and <NAME>. (2018). Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748. ([link](https://arxiv.org/abs/1807.03748)) # # [4] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2020). Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733. ([link](https://arxiv.org/abs/2006.07733)) # # [5] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2020). Supervised contrastive learning. arXiv preprint arXiv:2004.11362. ([link](https://arxiv.org/abs/2004.11362)) # # [6] <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2021). Barlow twins: Self-supervised learning via redundancy reduction. arXiv preprint arXiv:2103.03230. ([link](https://arxiv.org/abs/2103.03230))
docs/tutorial_notebooks/tutorial17/SimCLR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Use the Mapbox API to Create a Geospatial Visualization # ## <font color=MediumSlateBlue>1) Import Libraries</font> import plotly.express as px import pandas as pd import os from pathlib import Path from dotenv import load_dotenv # ## <font color=MediumSlateBlue>2) Load Mapbox API Token</font> # + # Read Mapbox API from from .env load_dotenv() mapbox_api_access_token = os.getenv("MAPBOX_API_ACCESS_TOKEN") # Confirm Mapbox token available if not mapbox_api_access_token: print("Error with the Mapbox API access token: Check the .env file.") # - # Set the Mapbox API access token px.set_mapbox_access_token(mapbox_api_access_token) # ## <font color=MediumSlateBlue>3) Load CSV into DataFrame</font> # Read the csv into a DataFrame used_car_df = pd.read_csv(Path('../Resources/vehicles_mod.csv'), index_col='posting_date') # Verify import of the DataFrame used_car_df # ## <font color=MediumSlateBlue>4) Cull Incorrect Lat/Long Values</font> # + # Boundaries of the continental United States top = 49.3457868 # north lat left = -124.7844079 # west long right = -66.9513812 # east long bottom = 24.7433195 # south lat within_us_borders_df = used_car_df.loc[ (bottom <= used_car_df['lat']) & (used_car_df['lat'] <= top) & (left <= used_car_df['long']) & (used_car_df['long'] <= right) ] # - within_us_borders_df # ## <font color=MediumSlateBlue>~~4) Plot the DataFrame~~</font> px.scatter_mapbox( used_car_df, lat = 'lat', lon = 'long', color = 'model', zoom = 8, title = 'Map of US Used Car Sales' )
notebooks/data_viz_with_no_desc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Build Hot 100 archive # # > There are some significant flaws in the files resulting from these scripts that make the data fairly unusable. Since titles and artists mix quotes and include commas, columns are not parsed correctly. This could possible be fixed in the script, or after the fact, but I don't have it in me to figure it out. # # Idea of this notebook is to build an archive of the Hot 100 from the the current date to the oldest date __1958-08-02__. It's currently set up to pull a year's worth. The idea is to stack them later. (I started doing it by year after hitting the timeouts listed above, just in case it was a reading error locally on a large file. The doesn't appear to be the case.) # # There is a rate limit on requests to the billboard site. I've had it time out after 10 requests, but I've also had it time out after one if I've run other requests recently. # # For each chart, we have `chart.previousDate` to work with, which allows us to walk back in time. The loop works like this: # # - Open our file # - check for the oldest date, start new if not results already # - Find the next oldest chart # - Start a loop and counter and write the results of that week's chart # - Set the chart date to the next oldest date # - Check if that is in our current year. Break if not. # - Wait a time interval and loop again if counter is not maxed # # This doesn't completely solve the rate limit, but does pretty well at 10 seconds a week. # import billboard from datetime import datetime, timedelta, date import os import pandas as pd import time # ## Settings # + # chart type from api chart_type = 'hot-100' # year we are working on output_year = "2021" # output path outfilename = "../data/hot-100-" + output_year + ".csv" print(outfilename) # - # ## Create the file # + # headers header = 'date,title,artist,current,previous,peak,weeks\n' # set exists flag file_exists = os.path.exists(outfilename) # checks if file exists and writes if not if file_exists != True: with open(outfilename, 'a') as outputfile: outputfile.write(header) print("File created with header") # checks if file empty and writes header if not else: file_empty = os.stat(outfilename).st_size == 0 if file_empty: with open(outfilename, 'a') as outputfile: outputfile.write(header) print("Added header") else: print("File has data") # - # ## Chart loop # # This loop checks the most recent date of the current year's file. If it is new, it starts with the last chart in December and then through older charts. If there are charts already, it picks up where it left off. # # Beyond `output_year` above, there are two settings to help control rate limiting: # # - counter: How many loops it will do before stopping. # - timer_interval: How long to wait before getting the next chart. # + # set the counter counter = 53 # set the time intervval timer_interval = 10 # read in file top_100 = pd.read_csv(outfilename) # find most oldest week in output oldest_date = top_100.date.min() # if oldest_date isnull, then use begin_chart date if pd.isnull(oldest_date): begin_chart_date = output_year + "-12-25" chart = billboard.ChartData(chart_type, date=begin_chart_date) print("Starting new year") print("Beginning date: " + chart.date) # else, use next previous date else: chart = billboard.ChartData(chart_type, date=oldest_date) chart = billboard.ChartData(chart_type, str(chart.previousDate)) print("Picking up after: " + oldest_date) print("Beginning date: " + chart.date) with open(outfilename, 'a') as outputfile: start_time = time.time() for i in range (1,counter+1): for position in range (0,100): song = chart[position] line_out = str(chart.date) + ',' + '"' + song.title + '"' + ',' + '"' \ + song.artist + '"' + ',' + str(song.rank) + ',' + str(song.lastPos) \ + ',' + str(song.peakPos) + ',' + str(song.weeks) + '\n' with open(outfilename, 'a') as outputfile: outputfile.write(line_out) print(chart.date + ": " + str(chart[0])) chart = billboard.ChartData(chart_type, str(chart.previousDate)) # check if year is over if chart.date[:4] != output_year: print("Year is over") break else: time.sleep(timer_interval) print('done') outputfile.close() # - print(chart.) # ## Some testing # # This checks the lengh of the last file processed. Should be 5200, unless it is a leap year that starts on a Saturday, like 2016. # + # read in the file chart_peek = pd.read_csv(outfilename) # check the length len(chart_peek) # -
notebooks/01-build-archive-hot100.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # A first example of machine learning # == # In this notebook we'll apply a scikit-learn pipeline to a simple dataset (the listing of apartments in Airbnb of Berlin), and see how overfitting looks like. import numpy as np # We can invoke system commands by prepending them with a `!`, commands like `head`, `tail`, `wc` can be useful to quickly inspect a text file. Most of them are not present on Windows. # !head listings.csv # numpy provides the function `loadtxt` to load simple CSV files # + #np.loadtxt('listings.csv', delimiter=',', usecols=(54, 59, 48, 49, 79 ), skiprows=1) # - # It does not work because this file contains newlines inside the fields. Luckily the Python CSV module can still process it. # # This code loads some columns from the CSV into separate numpy arrays. # # First, we create plain Python lists, then replace them with proper arrays (faster and smaller). # # Don;t worry: with Pandas this kind of operation becomes much easier. # + from csv import DictReader review_scores_rating = [] price = [] latitude = [] longitude = [] bathrooms = [] for l in DictReader(open('listings.csv')): price.append(l['price']) review_scores_rating.append(l['review_scores_rating']) latitude.append(l['latitude']) longitude.append(l['longitude']) bathrooms.append(l['bathrooms']) latitude = np.array([float(l) for l in latitude]) longitude = np.array([float(l) for l in longitude]) price = np.array([float(l[1:].replace(',', '')) for l in price]) # We assume the rating is 1 if not specified review_scores_rating = np.array([int(l) if l != '' else 0 for l in review_scores_rating]) # We assume there's 1 bathroom if not stated otherwise bathrooms = np.array([float(l) if l != '' else 1 for l in bathrooms]) # - # It's very useful to have a look at the shape of the numpy arrays. print(latitude.shape) print(bathrooms.shape) # + # %matplotlib inline import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression # change the figure size from matplotlib.pyplot import figure figure(num=None, figsize=(8, 6), dpi=80) # reshape is needed to create a second dimension of size 1 X = price.T.reshape(-1, 1) Y = review_scores_rating.T model = LinearRegression() model.fit(X, Y) model.score(X,Y) plt.scatter(X, Y, marker='X') x_plot = np.linspace(0, 9000, 200) y_plot = model.predict(x_plot.reshape(-1, 1)) plt.plot(x_plot, y_plot, color='red') plt.show() # - # Turns out there are prices much much greater than the rest, making the visualization and the model less effective. Let's ignore them by placing a cap of 500 on the price. # too_high = np.argwhere(price > 500) print(f'shape before: {price.shape}') Ylow = np.delete(Y, too_high) Xlow = np.delete(price, too_high).reshape(-1, 1) print(f'shape after: {Xlow.shape}') # + model = LinearRegression() model.fit(Xlow, Ylow) model.score(Xlow, Ylow) plt.scatter(Xlow, Ylow, marker='X') x_plot = np.linspace(0, 500, 200) y_plot = model.predict(x_plot.reshape(-1, 1)) plt.plot(x_plot, y_plot, color='red') plt.show() # - # In scikit you can combine models using `make_pipeline`, in this case we combine `PolynomialFeatures` with `LinearRegression`, to run a linear regression on the features generated by the first step, which are the original ones multiplied and to various powers. # + from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline X = np.vstack((latitude, longitude, bathrooms)).T print(f'the shape of X is {X.shape}') Y = review_scores_rating.T print(f'the shape of Y is {Y.shape}') for degree in range(1, 20): model = make_pipeline(PolynomialFeatures(degree), LinearRegression()) model.fit(X, Y) score = model.score(X, Y) print(f'with degree {degree} the score was {score:.5f}') # - # The model reaches the best score at degree 11 (notice it could change with other cities). This seems the best result, but what is happening here is that we have overfitting. The dataset we use to check the model is the same we used to train it. # # Let's try instead by partitioning the data in train and test datasets. # + train_X = X[:21000,:] test_X = X[21000:,:] train_Y = Y[:21000] test_Y = Y[21000:] for degree in range(1, 20): model = make_pipeline(PolynomialFeatures(degree), LinearRegression()) model.fit(train_X, train_Y) score = model.score(test_X, test_Y) print(f'with degree {degree} the score was {score}') # + # change the figure size from matplotlib.pyplot import figure figure(num=None, figsize=(8, 6), dpi=80) # reshape is needed to create a second dimension of size 1 X = price.T.reshape(-1, 1) model = make_pipeline(PolynomialFeatures(20), LinearRegression()) #model = LinearRegression() model.fit(X, Y) model.score(X,Y) plt.scatter(X, Y, marker='X') x_plot = np.linspace(0, 9000, 200) y_plot = model.predict(x_plot.reshape(-1, 1)) plt.plot(x_plot, y_plot, color='red') plt.show() # - # Turns out there are prices much much greater than the rest, making the visualization and the model pointless. Let's ignore them by placing a cap of 500 on the data. # + figure(num=None, figsize=(8, 6), dpi=80) model = make_pipeline(PolynomialFeatures(30), LinearRegression()) #model = LinearRegression() model.fit(Xlow, Ylow) model.score(Xlow, Ylow) plt.scatter(Xlow, Ylow, marker='X') x_plot = np.linspace(0, 500, 200) y_plot = model.predict(x_plot.reshape(-1, 1)) plt.plot(x_plot, y_plot, color='red') plt.show() # - # Just for fun, let's draw a map of prices # + figure(num=None, figsize=(9, 7), dpi=80) plt.scatter(latitude, longitude, c=review_scores_rating, marker='.', cmap=plt.cm.get_cmap('inferno'))
scikit_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Regression Week 2: Multiple Regression (Interpretation) # The goal of this first notebook is to explore multiple regression and feature engineering with existing graphlab functions. # # In this notebook you will use data on house sales in King County to predict prices using multiple regression. You will: # * Use SFrames to do some feature engineering # * Use built-in graphlab functions to compute the regression weights (coefficients/parameters) # * Given the regression weights, predictors and outcome write a function to compute the Residual Sum of Squares # * Look at coefficients and interpret their meanings # * Evaluate multiple models via RSS # # Fire up graphlab create import graphlab # # Load in house sales data # # Dataset is from house sales in King County, the region where the city of Seattle, WA is located. sales = graphlab.SFrame('kc_house_data.gl/') # # Split data into training and testing. # We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you). train_data,test_data = sales.random_split(.8,seed=0) # # Learning a multiple regression model # Recall we can use the following code to learn a multiple regression model predicting 'price' based on the following features: # example_features = ['sqft_living', 'bedrooms', 'bathrooms'] on training data with the following code: # # (Aside: We set validation_set = None to ensure that the results are always the same) example_features = ['sqft_living', 'bedrooms', 'bathrooms'] example_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features, validation_set = None) # Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows: example_weight_summary = example_model.get("coefficients") print example_weight_summary # # Making Predictions # # In the gradient descent notebook we use numpy to do our regression. In this book we will use existing graphlab create functions to analyze multiple regressions. # # Recall that once a model is built we can use the .predict() function to find the predicted values for data we pass. For example using the example model above: example_predictions = example_model.predict(train_data) print example_predictions[0] # should be 271789.505878 # # Compute RSS # Now that we can make predictions given the model, let's write a function to compute the RSS of the model. Complete the function below to calculate RSS given the model, data, and the outcome. import math def get_residual_sum_of_squares(model, data, outcome): # First get the predictions predictions = model.predict(data) # Then compute the residuals/errors residuals = (outcome - predictions) # Then square and add them up RSS = sum(residuals ** 2) return(RSS) # Test your function by computing the RSS on TEST data for the example model: rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price']) print rss_example_train # should be 2.7376153833e+14 # # Create some new features # Although we often think of multiple regression as including multiple different features (e.g. # of bedrooms, squarefeet, and # of bathrooms) but we can also consider transformations of existing features e.g. the log of the squarefeet or even "interaction" features such as the product of bedrooms and bathrooms. # You will use the logarithm function to create a new feature. so first you should import it from the math library. from math import log # Next create the following 4 new features as column in both TEST and TRAIN data: # * bedrooms_squared = bedrooms\*bedrooms # * bed_bath_rooms = bedrooms\*bathrooms # * log_sqft_living = log(sqft_living) # * lat_plus_long = lat + long # ### As an example here's the first one: train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2) test_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2) # + # create the remaining 3 features in both TEST and TRAIN data train_data['bed_bath_rooms'] = train_data['bedrooms'] * train_data['bathrooms'] test_data['bed_bath_rooms'] = test_data['bedrooms'] * test_data['bathrooms'] train_data['log_sqft_living'] = train_data['sqft_living'].apply(lambda x : log (x)) test_data['log_sqft_living'] = test_data['sqft_living'].apply(lambda x : log (x)) train_data['lat_plus_long'] = train_data['lat'] + train_data['long'] test_data['lat_plus_long'] = test_data['lat'] + test_data['long'] # - # * Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms. # * bedrooms times bathrooms gives what's called an "interaction" feature. It is large when *both* of them are large. # * Taking the log of squarefeet has the effect of bringing large values closer together and spreading out small values. # * Adding latitude to longitude is totally non-sensical but we will do it anyway (you'll see why) # **Quiz Question: What is the mean (arithmetic average) value of your 4 new features on TEST data? (round to 2 digits)** mean_of_bedrooms_squared = test_data['bedrooms_squared'].mean() mean_of_bed_bath_rooms = test_data['bed_bath_rooms'].mean() mean_of_log_sqft_living = test_data['log_sqft_living'].mean() mean_of_lat_plus_long = test_data['lat_plus_long'].mean() mean_of_all_new_features = (test_data['bedrooms_squared'] + test_data['bed_bath_rooms'] + test_data['log_sqft_living'] + test_data['lat_plus_long'] ).mean() print(mean_of_bedrooms_squared) print(mean_of_bed_bath_rooms) print(mean_of_log_sqft_living) print(mean_of_lat_plus_long) print(mean_of_all_new_features) # # Learning Multiple Models # Now we will learn the weights for three (nested) models for predicting house prices. The first model will have the fewest features the second model will add one more feature and the third will add a few more: # * Model 1: squarefeet, # bedrooms, # bathrooms, latitude & longitude # * Model 2: add bedrooms\*bathrooms # * Model 3: Add log squarefeet, bedrooms squared, and the (nonsensical) latitude + longitude model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long'] model_2_features = model_1_features + ['bed_bath_rooms'] model_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long'] # Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients: # Learn the three models: (don't forget to set validation_set = None) model_1 = graphlab.linear_regression.create(train_data, target = 'price', features = model_1_features, validation_set = None) model_2 = graphlab.linear_regression.create(train_data, target = 'price', features = model_2_features, validation_set = None) model_3 = graphlab.linear_regression.create(train_data, target = 'price', features = model_3_features, validation_set = None) # + # Examine/extract each model's coefficients: model_1_weight_summary = model_1.get("coefficients") model_2_weight_summary = model_2.get("coefficients") model_3_weight_summary = model_3.get("coefficients") print model_1_weight_summary print model_2_weight_summary print model_3_weight_summary # - # **Quiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 1?** # # **Quiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 2?** # # Think about what this means. # # Comparing multiple models # # Now that you've learned three models and extracted the model weights we want to evaluate which model is best. # First use your functions from earlier to compute the RSS on TRAINING Data for each of the three models. # Compute the RSS on TRAINING data for each of the three models and record the values: rss_model_1_train = get_residual_sum_of_squares(model_1, train_data, train_data['price']) rss_model_2_train = get_residual_sum_of_squares(model_2, train_data, train_data['price']) rss_model_3_train = get_residual_sum_of_squares(model_3, train_data, train_data['price']) print rss_model_1_train print rss_model_2_train print rss_model_3_train # **Quiz Question: Which model (1, 2 or 3) has lowest RSS on TRAINING Data?** Is this what you expected? # Now compute the RSS on on TEST data for each of the three models. # Compute the RSS on TESTING data for each of the three models and record the values: rss_model_1_test = get_residual_sum_of_squares(model_1, test_data, test_data['price']) rss_model_2_test = get_residual_sum_of_squares(model_2, test_data, test_data['price']) rss_model_3_test = get_residual_sum_of_squares(model_3, test_data, test_data['price']) print rss_model_1_test print rss_model_2_test print rss_model_3_test # **Quiz Question: Which model (1, 2 or 3) has lowest RSS on TESTING Data?** Is this what you expected? Think about the features that were added to each model from the previous.
2- Regression/projects/week 2/week-2-multiple-regression-assignment-1-blank.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Patient Overlap and Data Leakage # # Patient overlap in medical data is a part of a more general problem in machine learning called **data leakage**. To identify patient overlap in this notebook, we'll check to see if a patient's ID appears in both the training set and the test set. We should also verify that we don't have patient overlap in the training and validation sets, which is what we'll do here. # # Below is a simple example showing how we can check for and remove patient overlap in our training and validations sets. # ### Import necessary packages import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import os import seaborn as sns sns.set() # ### Read in the data from a csv file # # First, we'll read in our training and validation datasets from csv files. Running the next two cells to read these csvs into `pandas` dataframes. # Read csv file containing training data train_df = pd.read_csv("nih/train-small.csv") # Print first 5 rows print(f'There are {train_df.shape[0]} rows and {train_df.shape[1]} columns in the training dataframe') train_df.head() # ### Read csv file containing validation data valid_df = pd.read_csv("nih/valid-small.csv") # Print first 5 rows print(f'There are {valid_df.shape[0]} rows and {valid_df.shape[1]} columns in the validation dataframe') valid_df.head() # ### Extract and compare the PatientId columns from the train and validation sets # By running the next four cells we will do the following: # 1. Extract patient IDs from the train and validation sets # 2. Convert these arrays of numbers into `set()` datatypes for easy comparison # 3. Identify patient overlap in the intersection of the two sets # ### Extract patient id's for the training set # ids_train = train_df.PatientId.values # ### Extract patient id's for the validation set ids_valid = valid_df.PatientId.values # ### Create a "set" datastructure of the training & Test set id's to identify unique id's # + ids_train_set = set(ids_train) print(f'There are {len(ids_train_set)} unique Patient IDs in the training set') ids_valid_set = set(ids_valid) print(f'There are {len(ids_valid_set)} unique Patient IDs in the validation set') # - # ### Identify patient overlap by looking at the intersection between the sets patient_overlap = list(ids_train_set.intersection(ids_valid_set)) n_overlap = len(patient_overlap) print(f'There are {n_overlap} Patient IDs in both the training and validation sets') print('') print(f'These patients are in both the training and validation datasets:') print(f'{patient_overlap}') # ### Identify rows (indices) of overlapping patients and remove from either the train or validation set # Running the next two cells to do the following: # 1. Create lists of the overlapping row numbers in both the training and validation sets. # 2. Drop the overlapping patient records from the validation set (could also choose to drop from train set) # ### Creating lists of the overlapping row numbers in both the training and validation sets # + train_overlap_idxs = [] valid_overlap_idxs = [] for idx in range(n_overlap): train_overlap_idxs.extend(train_df.index[train_df['PatientId'] == patient_overlap[idx]].tolist()) valid_overlap_idxs.extend(valid_df.index[valid_df['PatientId'] == patient_overlap[idx]].tolist()) print(f'These are the indices of overlapping patients in the training set: ') print(f'{train_overlap_idxs}') print(f'These are the indices of overlapping patients in the validation set: ') print(f'{valid_overlap_idxs}') # - # ### Dropping the overlapping patient records from the validation set (could also choose to drop from train set) # + train_overlap_idxs = [] valid_overlap_idxs = [] for idx in range(n_overlap): train_overlap_idxs.extend(train_df.index[train_df['PatientId'] == patient_overlap[idx]].tolist()) valid_overlap_idxs.extend(valid_df.index[valid_df['PatientId'] == patient_overlap[idx]].tolist()) print(f'These are the indices of overlapping patients in the training set: ') print(f'{train_overlap_idxs}') print(f'These are the indices of overlapping patients in the validation set: ') print(f'{valid_overlap_idxs}') # - # ### Drop the overlapping rows from the validation set valid_df.drop(valid_overlap_idxs, inplace=True) # ### Checking that everything worked as planned by rerunning the patient ID comparison between train and validation sets. # # When we run the next two cells we should see that there are now fewer records in the validation set and that the overlap problem has been removed! # ## # Extract patient id's for the validation set ids_valid = valid_df.PatientId.values # Create a "set" datastructure of the validation set id's to identify unique id's ids_valid_set = set(ids_valid) print(f'There are {len(ids_valid_set)} unique Patient IDs in the validation set') # ### Identify patient overlap by looking at the intersection between the sets patient_overlap = list(ids_train_set.intersection(ids_valid_set)) n_overlap = len(patient_overlap) print(f'There are {n_overlap} Patient IDs in both the training and validation sets') # ### Congratulations! We have removed overlapping patients from the validation set! # # We could have just as well removed them from the training set. Always be sure to check for patient overlap in the train, validation and test sets. # # Thank you !!!
Notebooks/Handling Patient Overlap & Data Leakage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dellielo/test/blob/master/test_lib.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="DBMh81wc-vOp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="c7eea32d-55f9-4a84-e8e9-fb3df84274ae" from google.colab import drive drive.mount('/content/drive/') # + id="S74nLGnEOSCS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="590530b6-d367-4e55-9619-a792bef57bd2" # !ls "/content/drive/My Drive/" # + id="k5uenYBvOWfI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f81c6624-2f47-4fa4-e446-1d32b43a7fd7" with open("/content/drive/My Drive/Colab Notebooks/toto.txt", "w") as fic: fic.write("Hello") # !ls "/content/drive/My Drive/Colab Notebooks" # + id="y9TwZRl2-v_u" colab_type="code" outputId="cb1c63b8-c6d3-4287-9a72-2f2791de4a53" colab={"base_uri": "https://localhost:8080/", "height": 731} # !pip install https://github.com/dellielo/kahelo/archive/v.0.1.0.zip # !pip install -U segmentation-models import cv2 import kahelo # + id="o_6kaFbcCaKk" colab_type="code" outputId="e9de9010-9e22-4887-d352-3d071cc1eefe" colab={"base_uri": "https://localhost:8080/", "height": 51} print(cv2.__version__) print(dir(kahelo)) # + id="Dhgz0POjCgUK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="e80c9504-34e8-461b-9153-23e3b47f82b3" # !pip install -U albumentations>=0.3.0 --user # !pip install -U --pre segmentation-models --user # + [markdown] id="TW2H1fwKCf-6" colab_type="text" # # + id="qBCHb6ZhIBVG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="95d4b2be-953c-4c77-b06f-4c8bab15456f" import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' import cv2 import keras import numpy as np import matplotlib.pyplot as plt # + id="7XXVceExIF1N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="12c0a977-61f3-4abc-e928-887401760a2a" DATA_DIR = './data/CamVid/' # load repo with data if it is not exists if not os.path.exists(DATA_DIR): print('Loading data...') os.system('git clone https://github.com/alexgkendall/SegNet-Tutorial ./data') print('Done!') # + id="UaFc9WZ2ITCF" colab_type="code" colab={} x_train_dir = os.path.join(DATA_DIR, 'train') y_train_dir = os.path.join(DATA_DIR, 'trainannot') x_valid_dir = os.path.join(DATA_DIR, 'val') y_valid_dir = os.path.join(DATA_DIR, 'valannot') x_test_dir = os.path.join(DATA_DIR, 'test') y_test_dir = os.path.join(DATA_DIR, 'testannot') # + id="TOl13hn2IW81" colab_type="code" colab={} # helper function for data visualization def visualize(**images): """PLot images in one row.""" n = len(images) plt.figure(figsize=(16, 5)) for i, (name, image) in enumerate(images.items()): plt.subplot(1, n, i + 1) plt.xticks([]) plt.yticks([]) plt.title(' '.join(name.split('_')).title()) plt.imshow(image) plt.show() # helper function for data visualization def denormalize(x): """Scale image to range 0..1 for correct plot""" x_max = np.percentile(x, 98) x_min = np.percentile(x, 2) x = (x - x_min) / (x_max - x_min) x = x.clip(0, 1) return x # classes for data loading and preprocessing class Dataset: """CamVid Dataset. Read images, apply augmentation and preprocessing transformations. Args: images_dir (str): path to images folder masks_dir (str): path to segmentation masks folder class_values (list): values of classes to extract from segmentation mask augmentation (albumentations.Compose): data transfromation pipeline (e.g. flip, scale, etc.) preprocessing (albumentations.Compose): data preprocessing (e.g. noralization, shape manipulation, etc.) """ CLASSES = ['sky', 'building', 'pole', 'road', 'pavement', 'tree', 'signsymbol', 'fence', 'car', 'pedestrian', 'bicyclist', 'unlabelled'] def __init__( self, images_dir, masks_dir, classes=None, augmentation=None, preprocessing=None, ): self.ids = os.listdir(images_dir) self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids] self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids] # convert str names to class values on masks self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes] self.augmentation = augmentation self.preprocessing = preprocessing def __getitem__(self, i): # read data image = cv2.imread(self.images_fps[i]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) mask = cv2.imread(self.masks_fps[i], 0) # extract certain classes from mask (e.g. cars) masks = [(mask == v) for v in self.class_values] mask = np.stack(masks, axis=-1).astype('float') # add background if mask is not binary if mask.shape[-1] != 1: background = 1 - mask.sum(axis=-1, keepdims=True) mask = np.concatenate((mask, background), axis=-1) # apply augmentations if self.augmentation: sample = self.augmentation(image=image, mask=mask) image, mask = sample['image'], sample['mask'] # apply preprocessing if self.preprocessing: sample = self.preprocessing(image=image, mask=mask) image, mask = sample['image'], sample['mask'] return image, mask def __len__(self): return len(self.ids) class Dataloder(keras.utils.Sequence): """Load data from dataset and form batches Args: dataset: instance of Dataset class for image loading and preprocessing. batch_size: Integet number of images in batch. shuffle: Boolean, if `True` shuffle image indexes each epoch. """ def __init__(self, dataset, batch_size=1, shuffle=False): self.dataset = dataset self.batch_size = batch_size self.shuffle = shuffle self.indexes = np.arange(len(dataset)) self.on_epoch_end() def __getitem__(self, i): # collect batch data start = i * self.batch_size stop = (i + 1) * self.batch_size data = [] for j in range(start, stop): data.append(self.dataset[j]) # transpose list of lists batch = [np.stack(samples, axis=0) for samples in zip(*data)] return batch def __len__(self): """Denotes the number of batches per epoch""" return len(self.indexes) // self.batch_size def on_epoch_end(self): """Callback function to shuffle indexes each epoch""" if self.shuffle: self.indexes = np.random.permutation(self.indexes) # + id="j6T2qmY_IaRk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 196} outputId="8a3eeb34-6799-4c90-d2d9-c2d27c1570d7" # Lets look at data we have dataset = Dataset(x_train_dir, y_train_dir, classes=['car', 'pedestrian']) image, mask = dataset[5] # get some sample visualize( image=image, cars_mask=mask[..., 0].squeeze(), sky_mask=mask[..., 1].squeeze(), background_mask=mask[..., 2].squeeze(), ) # + id="HtdI-JVzIdWd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 598} outputId="2b36fbcd-f86f-4c54-9233-eea933788fcb" # !pip install -U albumentations==0.3.0 --user import albumentations as A print(A.__version__) # + id="orFUX4vwIgCN" colab_type="code" colab={} def round_clip_0_1(x, **kwargs): return x.round().clip(0, 1) # define heavy augmentations def get_training_augmentation(): train_transform = [ A.HorizontalFlip(p=0.5), A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0), A.PadIfNeeded(min_height=320, min_width=320, always_apply=True, border_mode=0), A.RandomCrop(height=320, width=320, always_apply=True), A.IAAAdditiveGaussianNoise(p=0.2), A.IAAPerspective(p=0.5), A.OneOf( [ A.CLAHE(p=1), A.RandomBrightness(p=1), A.RandomGamma(p=1), ], p=0.9, ), A.OneOf( [ A.IAASharpen(p=1), A.Blur(blur_limit=3, p=1), A.MotionBlur(blur_limit=3, p=1), ], p=0.9, ), A.OneOf( [ A.RandomContrast(p=1), A.HueSaturationValue(p=1), ], p=0.9, ), A.Lambda(mask=round_clip_0_1) ] return A.Compose(train_transform) def get_validation_augmentation(): """Add paddings to make image shape divisible by 32""" test_transform = [ A.PadIfNeeded(384, 480) ] return A.Compose(test_transform) def get_preprocessing(preprocessing_fn): """Construct preprocessing transform Args: preprocessing_fn (callbale): data normalization function (can be specific for each pretrained neural network) Return: transform: albumentations.Compose """ _transform = [ A.Lambda(image=preprocessing_fn), ] return A.Compose(_transform) # + id="OEMQUm-AIkSN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="6f006fc7-d2b9-47f7-9360-691d5949a4f2" # Lets look at augmented data we have dataset = Dataset(x_train_dir, y_train_dir, classes=['car', 'sky'], augmentation=get_training_augmentation()) print(dir(A)) image, mask = dataset[12] # get some sample visualize( image=image, cars_mask=mask[..., 0].squeeze(), sky_mask=mask[..., 1].squeeze(), background_mask=mask[..., 2].squeeze(), ) # + id="_gxB7jikKUI4" colab_type="code" colab={} # !pip install -U segmentation-models # !pip install git+https://github.com/qubvel/segmentation_models import segmentation_models as sm # + id="QyA2igfoKw4r" colab_type="code" colab={} BACKBONE = 'efficientnetb3' BATCH_SIZE = 8 CLASSES = ['car'] LR = 0.0001 EPOCHS = 40 preprocess_input = sm.get_preprocessing(BACKBONE) # + id="nCkGsXELKyhh" colab_type="code" colab={} n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1) # case for binary and multiclass segmentation activation = 'sigmoid' if n_classes == 1 else 'softmax' #create model model = sm.Unet(BACKBONE, classes=n_classes, activation=activation) # + id="km_uqvWkK1TL" colab_type="code" colab={} # define optomizer optim = keras.optimizers.Adam(LR) # Segmentation models losses can be combined together by '+' and scaled by integer or float factor dice_loss = sm.losses.DiceLoss() focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss() total_loss = dice_loss + (1 * focal_loss) # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)] # compile keras model with defined optimozer, loss and metrics model.compile(optim, total_loss, metrics) # + id="_iMHxrshK_yq" colab_type="code" colab={} # Dataset for train images train_dataset = Dataset( x_train_dir, y_train_dir, classes=CLASSES, augmentation=get_training_augmentation(), preprocessing=get_preprocessing(preprocess_input), ) # Dataset for validation images valid_dataset = Dataset( x_valid_dir, y_valid_dir, classes=CLASSES, augmentation=get_validation_augmentation(), preprocessing=get_preprocessing(preprocess_input), ) train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True) valid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False) # check shapes for errors assert train_dataloader[0][0].shape == (BATCH_SIZE, 320, 320, 3) assert train_dataloader[0][1].shape == (BATCH_SIZE, 320, 320, n_classes) # define callbacks for learning rate scheduling and best checkpoints saving callbacks = [ keras.callbacks.ModelCheckpoint('./best_model.h5', save_weights_only=True, save_best_only=True, mode='min'), keras.callbacks.ReduceLROnPlateau(), ] # + id="f1MEI8YZLENT" colab_type="code" colab={} # train model history = model.fit_generator( train_dataloader, steps_per_epoch=len(train_dataloader), epochs=EPOCHS, callbacks=callbacks, validation_data=valid_dataloader, validation_steps=len(valid_dataloader), )
test_lib.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.model_selection import KFold import pandas as pd import sklearn.metrics import autosklearn.classification import autosklearn.metrics import matplotlib.pyplot as plt import seaborn as sns df_clean = pd.read_csv('noshowappointments-kagglev2-may-2016_clean.csv') print(df_clean.head(3)) kept_columns = ['Scholarship', 'Diabetes', 'Alcoholism', 'SMS_received', 'WaitDays', 'Age'] df_features = df_clean[kept_columns].copy() print(df_features.head()) df_labels = df_clean['No-show'].map({True:1, False:0}).copy() df_labels.head() # + kf = KFold(n_splits=5, shuffle=True, random_state=42) for train_index, test_index in kf.split(df_features): features_train, features_test = df_features.loc[train_index], df_features.loc[test_index] labels_train, labels_test = df_labels.loc[train_index], df_labels.loc[test_index] # - automl = autosklearn.classification.AutoSklearnClassifier() automl.fit(features_train, labels_train) pred = automl.predict(features_test) # Scores print("Accuracy score:{}".format(sklearn.metrics.accuracy_score(labels_test, pred))) print("Precision:{}".format(autosklearn.metrics.precision(labels_test, pred))) print("Recall:{}".format(autosklearn.metrics.recall(labels_test, pred))) print("F1 Score:{}".format(sklearn.metrics.f1_score(labels_test, pred))) # visualizing confusion matrics confusion_matrix_automl = sklearn.metrics.confusion_matrix(labels_test, pred) sns.heatmap(confusion_matrix_automl, annot=True, cmap="Oranges") plt.ylabel("True Label") plt.xlabel("Predicted Label") plt.show() # + automl.show_models() # + # auto-sklearn statistics automl.sprint_statistics() # -
auto-sklearn/Brazil-autosklearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Twitter Sentiment Analysis # ![twitter2.jpg](http://blog.datumbox.com/wp-content/uploads/2013/09/twitter2.jpg) # ## Importing Library import pandas as pd import numpy as np import matplotlib as nlp tweets=pd.read_csv('../input/sentiment140/training.1600000.processed.noemoticon.csv',encoding='latin', names = ['sentiment','id','date','query','user','tweet']) tweets tweets = tweets.sample(frac=1) tweets = tweets[:200000] print("Dataset shape:", tweets.shape) tweets['sentiment'].unique() # ## Step 1 : Currently (0=negative,4=Positive) changing the notation to (0=Negative,1=Positive) # ### So that we can understand the data tweets['sentiment']=tweets['sentiment'].replace(4,1) tweets # ## Removing the unnecessary columns. tweets.drop(['date','query','user'], axis=1, inplace=True) tweets.drop('id', axis=1, inplace=True) tweets.head(10) # ## Checking if any null values present (tweets.isnull().sum() / len(tweets))*100 #converting pandas object to a string type tweets['tweet'] = tweets['tweet'].astype('str') # ## Check the number of positive vs. negative tagged sentences # + positives = tweets['sentiment'][tweets.sentiment == 1 ] negatives = tweets['sentiment'][tweets.sentiment == 0 ] print('Total length of the data is: {}'.format(tweets.shape[0])) print('No. of positve tagged sentences is: {}'.format(len(positives))) print('No. of negative tagged sentences is: {}'.format(len(negatives))) # - # nltk import nltk from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from nltk.tokenize import word_tokenize #Stop Words: A stop word is a commonly used word (such as “the”, “a”, “an”, “in”) #that a search engine has been programmed to ignore, #both when indexing entries for searching and when retrieving them as the result of a search query. nltk.download('stopwords') stopword = set(stopwords.words('english')) print(stopword) # ## Data Cleaning import warnings warnings.filterwarnings('ignore') import re import string import pickle urlPattern = r"((http://)[^ ]*|(https://)[^ ]*|( www\.)[^ ]*)" userPattern = '@[^\s]+' some = 'amp,today,tomorrow,going,girl' def process_tweets(tweet): # Lower Casing tweet = re.sub(r"he's", "he is", tweet) tweet = re.sub(r"there's", "there is", tweet) tweet = re.sub(r"We're", "We are", tweet) tweet = re.sub(r"That's", "That is", tweet) tweet = re.sub(r"won't", "will not", tweet) tweet = re.sub(r"they're", "they are", tweet) tweet = re.sub(r"Can't", "Cannot", tweet) tweet = re.sub(r"wasn't", "was not", tweet) tweet = re.sub(r"don\x89Ûªt", "do not", tweet) tweet = re.sub(r"aren't", "are not", tweet) tweet = re.sub(r"isn't", "is not", tweet) tweet = re.sub(r"What's", "What is", tweet) tweet = re.sub(r"haven't", "have not", tweet) tweet = re.sub(r"hasn't", "has not", tweet) tweet = re.sub(r"There's", "There is", tweet) tweet = re.sub(r"He's", "He is", tweet) tweet = re.sub(r"It's", "It is", tweet) tweet = re.sub(r"You're", "You are", tweet) tweet = re.sub(r"I'M", "I am", tweet) tweet = re.sub(r"shouldn't", "should not", tweet) tweet = re.sub(r"wouldn't", "would not", tweet) tweet = re.sub(r"i'm", "I am", tweet) tweet = re.sub(r"I\x89Ûªm", "I am", tweet) tweet = re.sub(r"I'm", "I am", tweet) tweet = re.sub(r"Isn't", "is not", tweet) tweet = re.sub(r"Here's", "Here is", tweet) tweet = re.sub(r"you've", "you have", tweet) tweet = re.sub(r"you\x89Ûªve", "you have", tweet) tweet = re.sub(r"we're", "we are", tweet) tweet = re.sub(r"what's", "what is", tweet) tweet = re.sub(r"couldn't", "could not", tweet) tweet = re.sub(r"we've", "we have", tweet) tweet = re.sub(r"it\x89Ûªs", "it is", tweet) tweet = re.sub(r"doesn\x89Ûªt", "does not", tweet) tweet = re.sub(r"It\x89Ûªs", "It is", tweet) tweet = re.sub(r"Here\x89Ûªs", "Here is", tweet) tweet = re.sub(r"who's", "who is", tweet) tweet = re.sub(r"I\x89Ûªve", "I have", tweet) tweet = re.sub(r"y'all", "you all", tweet) tweet = re.sub(r"can\x89Ûªt", "cannot", tweet) tweet = re.sub(r"would've", "would have", tweet) tweet = re.sub(r"it'll", "it will", tweet) tweet = re.sub(r"we'll", "we will", tweet) tweet = re.sub(r"wouldn\x89Ûªt", "would not", tweet) tweet = re.sub(r"We've", "We have", tweet) tweet = re.sub(r"he'll", "he will", tweet) tweet = re.sub(r"Y'all", "You all", tweet) tweet = re.sub(r"Weren't", "Were not", tweet) tweet = re.sub(r"Didn't", "Did not", tweet) tweet = re.sub(r"they'll", "they will", tweet) tweet = re.sub(r"they'd", "they would", tweet) tweet = re.sub(r"DON'T", "DO NOT", tweet) tweet = re.sub(r"That\x89Ûªs", "That is", tweet) tweet = re.sub(r"they've", "they have", tweet) tweet = re.sub(r"i'd", "I would", tweet) tweet = re.sub(r"should've", "should have", tweet) tweet = re.sub(r"You\x89Ûªre", "You are", tweet) tweet = re.sub(r"where's", "where is", tweet) tweet = re.sub(r"Don\x89Ûªt", "Do not", tweet) tweet = re.sub(r"we'd", "we would", tweet) tweet = re.sub(r"i'll", "I will", tweet) tweet = re.sub(r"weren't", "were not", tweet) tweet = re.sub(r"They're", "They are", tweet) tweet = re.sub(r"Can\x89Ûªt", "Cannot", tweet) tweet = re.sub(r"you\x89Ûªll", "you will", tweet) tweet = re.sub(r"I\x89Ûªd", "I would", tweet) tweet = re.sub(r"let's", "let us", tweet) tweet = re.sub(r"it's", "it is", tweet) tweet = re.sub(r"can't", "cannot", tweet) tweet = re.sub(r"don't", "do not", tweet) tweet = re.sub(r"you're", "you are", tweet) tweet = re.sub(r"i've", "I have", tweet) tweet = re.sub(r"that's", "that is", tweet) tweet = re.sub(r"i'll", "I will", tweet) tweet = re.sub(r"doesn't", "does not", tweet) tweet = re.sub(r"i'd", "I would", tweet) tweet = re.sub(r"didn't", "did not", tweet) tweet = re.sub(r"ain't", "am not", tweet) tweet = re.sub(r"you'll", "you will", tweet) tweet = re.sub(r"I've", "I have", tweet) tweet = re.sub(r"Don't", "do not", tweet) tweet = re.sub(r"I'll", "I will", tweet) tweet = re.sub(r"I'd", "I would", tweet) tweet = re.sub(r"Let's", "Let us", tweet) tweet = re.sub(r"you'd", "You would", tweet) tweet = re.sub(r"It's", "It is", tweet) tweet = re.sub(r"Ain't", "am not", tweet) tweet = re.sub(r"Haven't", "Have not", tweet) tweet = re.sub(r"Could've", "Could have", tweet) tweet = re.sub(r"youve", "you have", tweet) tweet = re.sub(r"donå«t", "do not", tweet) tweet = re.sub(r"some1", "someone", tweet) tweet = re.sub(r"yrs", "years", tweet) tweet = re.sub(r"hrs", "hours", tweet) tweet = re.sub(r"2morow|2moro", "tomorrow", tweet) tweet = re.sub(r"2day", "today", tweet) tweet = re.sub(r"4got|4gotten", "forget", tweet) tweet = re.sub(r"b-day|bday", "b-day", tweet) tweet = re.sub(r"mother's", "mother", tweet) tweet = re.sub(r"mom's", "mom", tweet) tweet = re.sub(r"dad's", "dad", tweet) tweet = re.sub(r"hahah|hahaha|hahahaha", "haha", tweet) tweet = re.sub(r"lmao|lolz|rofl", "lol", tweet) tweet = re.sub(r"thanx|thnx", "thanks", tweet) tweet = re.sub(r"goood", "good", tweet) tweet = re.sub(r"some1", "someone", tweet) tweet = re.sub(r"some1", "someone", tweet) tweet = tweet.lower() tweet=tweet[1:] # Removing all URls tweet = re.sub(urlPattern,'',tweet) # Removing all @username. tweet = re.sub(userPattern,'', tweet) #remove some words tweet= re.sub(some,'',tweet) #Remove punctuations tweet = tweet.translate(str.maketrans("","",string.punctuation)) #tokenizing words tokens = word_tokenize(tweet) #tokens = [w for w in tokens if len(w)>2] #Removing Stop Words final_tokens = [w for w in tokens if w not in stopword] #reducing a word to its word stem wordLemm = WordNetLemmatizer() finalwords=[] for w in final_tokens: if len(w)>1: word = wordLemm.lemmatize(w) finalwords.append(word) return ' '.join(finalwords) abbreviations = { "$" : " dollar ", "€" : " euro ", "4ao" : "for adults only", "a.m" : "before midday", "a3" : "anytime anywhere anyplace", "aamof" : "as a matter of fact", "acct" : "account", "adih" : "another day in hell", "afaic" : "as far as i am concerned", "afaict" : "as far as i can tell", "afaik" : "as far as i know", "afair" : "as far as i remember", "afk" : "away from keyboard", "app" : "application", "approx" : "approximately", "apps" : "applications", "asap" : "as soon as possible", "asl" : "age, sex, location", "atk" : "at the keyboard", "ave." : "avenue", "aymm" : "are you my mother", "ayor" : "at your own risk", "b&b" : "bed and breakfast", "b+b" : "bed and breakfast", "b.c" : "before christ", "b2b" : "business to business", "b2c" : "business to customer", "b4" : "before", "b4n" : "bye for now", "b@u" : "back at you", "bae" : "before anyone else", "bak" : "back at keyboard", "bbbg" : "bye bye be good", "bbc" : "british broadcasting corporation", "bbias" : "be back in a second", "bbl" : "be back later", "bbs" : "be back soon", "be4" : "before", "bfn" : "bye for now", "blvd" : "boulevard", "bout" : "about", "brb" : "be right back", "bros" : "brothers", "brt" : "be right there", "bsaaw" : "big smile and a wink", "btw" : "by the way", "bwl" : "bursting with laughter", "c/o" : "care of", "cet" : "central european time", "cf" : "compare", "cia" : "central intelligence agency", "csl" : "can not stop laughing", "cu" : "see you", "cul8r" : "see you later", "cv" : "curriculum vitae", "cwot" : "complete waste of time", "cya" : "see you", "cyt" : "see you tomorrow", "dae" : "does anyone else", "dbmib" : "do not bother me i am busy", "diy" : "do it yourself", "dm" : "direct message", "dwh" : "during work hours", "e123" : "easy as one two three", "eet" : "eastern european time", "eg" : "example", "embm" : "early morning business meeting", "encl" : "enclosed", "encl." : "enclosed", "etc" : "and so on", "faq" : "frequently asked questions", "fawc" : "for anyone who cares", "fb" : "facebook", "fc" : "fingers crossed", "fig" : "figure", "fimh" : "forever in my heart", "ft." : "feet", "ft" : "featuring", "ftl" : "for the loss", "ftw" : "for the win", "fwiw" : "for what it is worth", "fyi" : "for your information", "g9" : "genius", "gahoy" : "get a hold of yourself", "gal" : "get a life", "gcse" : "general certificate of secondary education", "gfn" : "gone for now", "gg" : "good game", "gl" : "good luck", "glhf" : "good luck have fun", "gmt" : "greenwich mean time", "gmta" : "great minds think alike", "gn" : "good night", "g.o.a.t" : "greatest of all time", "goat" : "greatest of all time", "goi" : "get over it", "gps" : "global positioning system", "gr8" : "great", "gratz" : "congratulations", "gyal" : "girl", "h&c" : "hot and cold", "hp" : "horsepower", "hr" : "hour", "hrh" : "his royal highness", "ht" : "height", "ibrb" : "i will be right back", "ic" : "i see", "icq" : "i seek you", "icymi" : "in case you missed it", "idc" : "i do not care", "idgadf" : "i do not give a damn fuck", "idgaf" : "i do not give a fuck", "idk" : "i do not know", "ie" : "that is", "i.e" : "that is", "ifyp" : "i feel your pain", "IG" : "instagram", "iirc" : "if i remember correctly", "ilu" : "i love you", "ily" : "i love you", "imho" : "in my humble opinion", "imo" : "in my opinion", "imu" : "i miss you", "iow" : "in other words", "irl" : "in real life", "j4f" : "just for fun", "jic" : "just in case", "jk" : "just kidding", "jsyk" : "just so you know", "l8r" : "later", "lb" : "pound", "lbs" : "pounds", "ldr" : "long distance relationship", "lmao" : "laugh my ass off", "lmfao" : "laugh my fucking ass off", "lol" : "laughing out loud", "ltd" : "limited", "ltns" : "long time no see", "m8" : "mate", "mf" : "motherfucker", "mfs" : "motherfuckers", "mfw" : "my face when", "mofo" : "motherfucker", "mph" : "miles per hour", "mr" : "mister", "mrw" : "my reaction when", "ms" : "miss", "mte" : "my thoughts exactly", "nagi" : "not a good idea", "nbc" : "national broadcasting company", "nbd" : "not big deal", "nfs" : "not for sale", "ngl" : "not going to lie", "nhs" : "national health service", "nrn" : "no reply necessary", "nsfl" : "not safe for life", "nsfw" : "not safe for work", "nth" : "nice to have", "nvr" : "never", "nyc" : "new york city", "oc" : "original content", "og" : "original", "ohp" : "overhead projector", "oic" : "oh i see", "omdb" : "over my dead body", "omg" : "oh my god", "omw" : "on my way", "p.a" : "per annum", "p.m" : "after midday", "pm" : "prime minister", "poc" : "people of color", "pov" : "point of view", "pp" : "pages", "ppl" : "people", "prw" : "parents are watching", "ps" : "postscript", "pt" : "point", "ptb" : "please text back", "pto" : "please turn over", "qpsa" : "what happens", "ratchet" : "rude", "rbtl" : "read between the lines", "rlrt" : "real life retweet", "rofl" : "rolling on the floor laughing", "roflol" : "rolling on the floor laughing out loud", "rotflmao" : "rolling on the floor laughing my ass off", "rt" : "retweet", "ruok" : "are you ok", "sfw" : "safe for work", "sk8" : "skate", "smh" : "shake my head", "sq" : "square", "srsly" : "seriously", "ssdd" : "same stuff different day", "tbh" : "to be honest", "tbs" : "tablespooful", "tbsp" : "tablespooful", "tfw" : "that feeling when", "thks" : "thank you", "tho" : "though", "thx" : "thank you", "tia" : "thanks in advance", "til" : "today i learned", "tl;dr" : "too long i did not read", "tldr" : "too long i did not read", "tmb" : "tweet me back", "tntl" : "trying not to laugh", "ttyl" : "talk to you later", "u" : "you", "u2" : "you too", "u4e" : "yours for ever", "utc" : "coordinated universal time", "w/" : "with", "w/o" : "without", "w8" : "wait", "wassup" : "what is up", "wb" : "welcome back", "wtf" : "what the fuck", "wtg" : "way to go", "wtpa" : "where the party at", "wuf" : "where are you from", "wuzup" : "what is up", "wywh" : "wish you were here", "yd" : "yard", "ygtr" : "you got that right", "ynk" : "you never know", "zzz" : "sleeping bored and tired" } def convert_abbrev_in_text(tweet): t=[] words=tweet.split() t = [abbreviations[w.lower()] if w.lower() in abbreviations.keys() else w for w in words] return ' '.join(t) # ## Text Processing Completed tweets['processed_tweets'] = tweets['tweet'].apply(lambda x: process_tweets(x)) tweets['processed_tweets'] = tweets['processed_tweets'].apply(lambda x: convert_abbrev_in_text(x)) print('Text Preprocessing complete.') tweets #removing shortwords tweets['processed_tweets']=tweets['processed_tweets'].apply(lambda x: " ".join([w for w in x.split() if len(w)>3])) tweets.head(5) from sklearn.utils import shuffle tweets = shuffle(tweets).reset_index(drop=True) # ## Tokenization tokenized_tweet=tweets['processed_tweets'].apply(lambda x: x.split()) tokenized_tweet.head(5) from sklearn.feature_extraction.text import CountVectorizer from nltk.tokenize import RegexpTokenizer token = RegexpTokenizer(r'[a-zA-Z0-9]+') cv = CountVectorizer(stop_words='english',ngram_range = (1,1),tokenizer = token.tokenize) text_counts = cv.fit_transform(tweets['processed_tweets'].values.astype('U')) # ## Train/Test Split from sklearn.model_selection import train_test_split X=text_counts y=tweets['sentiment'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20,random_state=19) # ## Naive Bayes from sklearn.naive_bayes import ComplementNB from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn import metrics from math import * cnb = ComplementNB() cnb.fit(X_train, y_train) cross_cnb = cross_val_score(cnb, X, y,n_jobs = -1) print("Cross Validation score = ",cross_cnb) print ("Train accuracy ={:.2f}%".format(cnb.score(X_train,y_train)*100)) print ("Test accuracy ={:.2f}%".format(cnb.score(X_test,y_test)*100)) train_acc_cnb=cnb.score(X_train,y_train) test_acc_cnb=cnb.score(X_test,y_test) #plotting the best parameters import matplotlib.patches as mpatches import numpy as np import pandas as pd from pandas import Series, DataFrame import matplotlib.pyplot as plt data_cnb = [train_acc_cnb,test_acc_cnb] labels = ['Train Accuracy','Test Accuracy'] plt.xticks(range(len(data_cnb)), labels) plt.ylabel('Accuracy') plt.title('Accuracy plot with best parameters') plt.bar(range(len(data_cnb)), data_cnb,color=['blue','darkorange']) Train_acc = mpatches.Patch(color='blue', label='Train_acc') Test_acc = mpatches.Patch(color='darkorange', label='Test_acc') plt.legend(handles=[Train_acc, Test_acc],loc='best') plt.gcf().set_size_inches(8, 8) plt.show() # ## Confusion Matrix # + from sklearn.metrics import * #Predict test data set y_pred_cnb =cnb.predict(X_test) #This is the confusion matrix : from sklearn.metrics import confusion_matrix print(confusion_matrix(y_test,y_pred_cnb)) # + #Checking performance our model with classification report print(classification_report(y_test, y_pred_cnb)) #Checking performance our model with ROC Score roc_score_cnb=roc_auc_score(y_test, y_pred_cnb) print("Area Under the Curve = ",roc_score_cnb) # - # ## Calculating F1, prescision and recall scores # + from sklearn.metrics import * print("F1 score ={:.2f}%".format(f1_score(y_test, y_pred_cnb, average="macro")*100)) f1_cnb=f1_score(y_test, y_pred_cnb, average="macro") print("Precision score ={:.2f}%".format(precision_score(y_test, y_pred_cnb, average="macro")*100)) precision_cnb=precision_score(y_test, y_pred_cnb, average="macro") print("Recall score ={:.2f}%".format(recall_score(y_test, y_pred_cnb, average="macro")*100)) recall_cnb=recall_score(y_test, y_pred_cnb, average="macro") # - # ## Drawing the ROC curve import matplotlib.pyplot as plt from sklearn.metrics import roc_curve import numpy as np fpr_dt_1, tpr_dt_1,_=roc_curve(y_test,cnb.predict_proba(X_test)[:,1]) plt.plot(fpr_dt_1,tpr_dt_1,label="ROC curve") plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend() plt.gcf().set_size_inches(8, 8) plt.show()
twitter-sentiment-analysis-with-naive-bayes-85-acc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WeatherPy # ---- # # #### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # ## Analysis # # * Observed trend 1: It sounds obvious but the max temperature is higher as you get closer to the equator. There are also more sample data gathered in the north hemisphere due to higher populations # # * Observed trend 2: There is no strong evidence that there is a correlation between latitude and wind speed/cloudness # * Observed trend 3: Cities around the equater seem to have higher humidity level # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time # Import API key from api_keys import api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) # - # ## Generate Cities List # + # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=2000) lngs = np.random.uniform(low=-180.000, high=180.000, size=2000) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # print(city) # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count count = len(cities) count # - # ### Perform API Calls # * Perform a weather check on each city using a series of successive API calls. # * Include a print log of each city as it'sbeing processed (with the city number and city name). # # Your objective is to build a series of scatter plots to showcase the following relationships: # # #Temperature (F) vs. Latitude # #Humidity (%) vs. Latitude # #Cloudiness (%) vs. Latitude # #Wind Speed (mph) vs. Latitude # + base_url = "http://api.openweathermap.org/data/2.5/weather?" params = { "appid": api_key, "units": "imperial" # "q":"Klaksvik" } # response = requests.get(base_url, params=params) # print(response.url) # + weather =[] count = 0 setcount=1 print("Beginning Data Retrieval") print("-----------------------------") for city in cities: params["q"] = city, response = requests.get(base_url,params=params) response_json = response.json() # print(response.url) if response_json.get("id"): cityname = response_json["name"] if count < 49: setcount = setcount count = count +1 else: setcount = setcount + 1 count = 0 print(f"Processing Record {count} of Set {setcount} | {cityname}") weather.append({"CityID": response_json["id"], "City": response_json["name"], "Country": response_json["sys"]["country"], "Date": response_json["dt"], "Lat": response_json["coord"]["lat"], "Lng" : response_json["coord"]["lon"], "Max Temp": response_json["main"]["temp_max"], "Humidity": response_json["main"]["humidity"], "Cloudiness": response_json["clouds"]["all"], "Wind Speed": response_json["wind"]["speed"] }) else: print("City not found. Skipping...") print("-----------------------------") print("Data Retrieval Complete") print("-----------------------------") # print(weather_df) # try: # - # ### Convert Raw Data to DataFrame # * Export the city data into a .csv. # * Display the DataFrame # + weather_df = pd.DataFrame(weather) # # weather_df.count() 689 # cityid cloudness country humidity lat lon name temp windspeed weather_no_dups_df = weather_df.drop_duplicates(subset="CityID", keep='first', inplace=False) # weather_no_dups_df.count() 687 weather_no_dups_df.count() # + # weather_no_dups_df.dtypes # Remove amomalies anomalies = weather_no_dups_df[weather_no_dups_df["Humidity"]>100] # anomalies weather_no_dups_df = weather_no_dups_df.drop(anomalies.index, axis=0) # weather_no_dups_df_test # - weather_no_dups_df.to_csv("WeatherPy.csv") weather_no_dups_df.head() # ### Plotting the Data # * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels. # * Save the plotted figures as .pngs. # #### Latitude vs. Temperature Plot # + # help(plt.scatter) # + x = weather_no_dups_df["Lat"] y = weather_no_dups_df["Max Temp"] plt.scatter(x, y, marker="o", facecolors="steelblue", edgecolors="black", linewidths=1) plt.grid() plt.title("City Latitude vs. Max Temperature (8/27/2019)") plt.xlabel("Latitude") plt.ylabel("Max Temperature (F)") plt.savefig("latitude_temperature.png") # - # #### Latitude vs. Humidity Plot # + x = weather_no_dups_df["Lat"] y = weather_no_dups_df["Humidity"] plt.scatter(x, y, marker="o", facecolors="steelblue", edgecolors="black", linewidths=1) plt.grid() plt.title("City Latitude vs. Humidity (8/27/2019)") plt.xlabel("Latitude") plt.ylabel("Humidity (%)") plt.savefig("latitude_humidity.png") # - # #### Latitude vs. Cloudiness Plot # + x = weather_no_dups_df["Lat"] y = weather_no_dups_df["Cloudiness"] plt.scatter(x, y, marker="o", facecolors="steelblue", edgecolors="black", linewidths=1) plt.grid() plt.title("City Latitude vs. Cloudiness (8/27/2019)") plt.xlabel("Latitude") plt.ylabel("Cloudiness (%)") plt.savefig("latitude_cloudiness.png") # - # #### Latitude vs. Wind Speed Plot # + x = weather_no_dups_df["Lat"] y = weather_no_dups_df["Wind Speed"] plt.scatter(x, y, marker="o", facecolors="steelblue", edgecolors="black", linewidths=1) plt.grid() plt.title("City Latitude vs. Wind Speed (8/27/2019)") plt.xlabel("Latitude") plt.ylabel("Wind Speed (mpg)") plt.savefig("latitude_wind.png") # -
WeatherPy_junghyunson.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python3 # name: python3 # --- import struct, bluetooth # + # Check if our unit can be detected (for validating bluetooth.connect()) devs = bluetooth.discover_devices() print(devs) # + # Connect to GSR unit gsr_address = '00:06:66:D7:C6:F2' shimmer_gsr = bluetooth.BluetoothSocket(bluetooth.RFCOMM) shimmer_gsr.connect((gsr_address, 1)) # + # Utilities def pack_char(val): return struct.pack('B', val) # Packet type variables DATA_PACKET = pack_char(0x00) INQUIRY_COMMAND = pack_char(0x01) INQUIRY_RESPONSE = pack_char(0x02) SET_SENSORS_COMMAND = pack_char(0x08) ACKNOWLEDGE = pack_char(0xFF) # Sensor variables SENSOR_GSR = pack_char(0x04) def wait_ACK(): while True: response = shimmer_gsr.recv(1) print('wait for ack', response) if response == ACKNOWLEDGE: print('Shimmer: ACK') return # + # Enable GSR readings # https://raw.githubusercontent.com/ShimmerResearch/shimmer3/LogAndStream_v0.11.0/LogAndStream/shimmer_btsd.h set_gsr_command = SET_SENSORS_COMMAND + SENSOR_GSR + 2*DATA_PACKET # SET_SENSORS_COMMAND = 0x08 # SENSOR_GSR = 0x04 # set_gsr_command = struct.pack('BBBB', SET_SENSORS_COMMAND, SENSOR_GSR, 0x00, 0x00) shimmer_gsr.send(set_gsr_command) wait_ACK() # + # Shitty way of finding our GSR channel through inquiry command shimmer_gsr.send(INQUIRY_COMMAND) wait_ACK() def read_inqury_response(): counter = 0 num_channels = 0 packet_begin = False packet_end = 999 # Arbitrary large enough number response = b'' while counter <= packet_end: data = shimmer_gsr.recv(1) if data == INQUIRY_RESPONSE: packet_begin = True if packet_begin: if counter == 7: packet_end = 8 + data[0] response += data counter += 1 return response def print_inquiry_response(response): print('Full response', list(response)) print('Response length', len(response)) print('Packet type', hex(response[0])) print('Sample rate', hex(response[1])) print('Sample rate', hex(response[2])) print('Config byte', hex(response[3])) print('Config byte', hex(response[4])) print('Config byte', hex(response[5])) print('Config byte', hex(response[6])) print('Number of channels', hex(response[7])) print('Buffer size', hex(response[8])) for i in range(response[7]): print('Channel %s' % (i + 1), hex(response[9 + i])) inquiry_response = read_inqury_response() print_inquiry_response(inquiry_response) # + # # Check for packet begin outside func # def read_stream_packet(sock, packet_channel_size, overflow_data=b''): # packet = overflow_data # for i in range(3 + packet_size): # Add timestamp to size # packet += sock.recv(1) # return packet # def read_stream_cmd_response(): # def read_stream(sock, shimmer_config): # while True: # data = sock.recv(1) # if data == DATA_PACKET: # packet = read_stream_packet(sock, shimmer_config[7]) # - shimmer_gsr.close()
snippets/shimmer_data/shimmer-bluetooth-gsr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 from common import * RESULT_JSON = "/Users/law/repos/viper/results/update/update_bm_revision.json" # + from collections import defaultdict runs = defaultdict(list) BMS = get_all_runs(RESULT_JSON) IN_PLACE = 'in_place' COPY_WRITE = 'get_update' for bm in BMS: if IN_PLACE in bm['name']: runs[IN_PLACE].append(bm) elif COPY_WRITE in bm['name']: runs[COPY_WRITE].append(bm) else: raise RuntimeError(f"Unknown benchmark type {bm['name']}") runs[IN_PLACE].sort(key=lambda x: x['threads']) runs[COPY_WRITE].sort(key=lambda x: x['threads']) # pprint(runs) # + in_place_w_threads = [(run['threads'], run['items_per_second'] / MILLION) for run in runs[IN_PLACE]] copy_write = [run['items_per_second'] / MILLION for run in runs[COPY_WRITE]] threads, in_place = zip(*in_place_w_threads) fig, ax = plt.subplots(1, 1, figsize=(4, 3)) ax.plot(threads, in_place, label="In-Place", ls='-', color=PMEM_COLOR, lw=3) ax.plot(threads, copy_write, label="CoW", ls='--', color=DRAM_COLOR, lw=3) ax.set_yticks(range(0, 16, 5)) ax.set_ylabel("Throughput\n(Mops/s)", fontsize=18) ax.set_xlabel("# Threads", fontsize=18) ax.legend(frameon=False, loc='upper left', fontsize=18, columnspacing=1.3, handletextpad=0.3, labelspacing=0.1, bbox_to_anchor=(-0.05, 1.25)) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(18) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(18) # ax.yaxis.set_label_coords(-0.12, 0.45) ax.set_axisbelow(True) ax.grid(axis='y', which='major') hide_border(ax) plt.tight_layout() fig.savefig('charts/update.pdf', bbox_inches='tight') fig.savefig('charts/update.svg', bbox_inches='tight')
eval/update.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import the dependencies. import pandas as pd import matplotlib.pyplot as plt import numpy as np # Create a set of random latitude and longitude combinations. lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) lat_lngs # Add the latitudes and longitudes to a list. coordinates = list(lat_lngs) # Use the citipy module to determine city based on latitude and longitude. from citipy import citipy # Create a list for holding the cities. cities = [] # Identify the nearest city for each latitude and longitude combination. for coordinate in coordinates: city = citipy.nearest_city(coordinate[0], coordinate[1]).city_name # If the city is unique, then we will add it to the cities list. if city not in cities: cities.append(city) # Print the city count to confirm sufficient count. len(cities) import requests requests.__version__ # + # Import the requests library. import requests # Import the API key. from config import weather_api_key # - # Starting URL for Weather Map API Call. url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + weather_api_key print(url) # Create an endpoint URL for a city. city_url = url + "&q=" + "Boston" print(city_url) # Import the datetime module from the datetime library. from datetime import datetime # + #Get the City Weather Data # + # Import the dependencies. import pandas as pd import matplotlib.pyplot as plt import numpy as np import requests from datetime import datetime from config import weather_api_key #Use the citipy module to determine city based on latitude and longitude from citipy import citipy # - # Create a set of random latitude and longitude combinations. lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) lat_lngs # Add the latitudes and longitudes to a a list. coordinates = list(lat_lngs) # Create a list for holding the cities. cities = [] # Identify the nearest city for each latitude and longitude combination. for coordinate in coordinates: city = citipy.nearest_city(coordinate[0], coordinate[1]).city_name # If the city is unique, then we will add it to the cities list. if city not in cities: cities.append(city) # Print the city count to confirm sufficient count. len(cities) url = f"http://api.openweathermap.org/data/2.5/weather?unit=Imperial&APPID={weather_api_key}" # + city_data = [] # Print the beginning of the logging. print("Beginning Data Retrieval ") print("-----------------------------") # Create counters. record_count = 1 set_count = 1 for i, city in enumerate(cities): # Group cities in sets of 50 for logging purposes if (i % 50 == 0 and i>= 50): set_count +=1 record_count = 1 # Create endpoint URL with each city city_url = f'{url}&q={city.replace(" ", "+")}' #Log the url record and set numbers print(f"Processing Record {record_count} of Set {set_count} | {city}") # Add 1 to record count record_count +=1 # Run an API request for each of the cities. try: # Parse the JSON and retrieve data. city_weather = requests.get(city_url).json() # Parse out the needed data. city_lat = city_weather["coord"]["lat"] city_lng = city_weather["coord"]["lon"] city_max_temp = city_weather["main"]["temp_max"] city_humidity = city_weather["main"]["humidity"] city_clouds = city_weather["clouds"]["all"] city_wind = city_weather["wind"]["speed"] city_country = city_weather["sys"]["country"] # Convert the date to ISO standard. city_date = datetime.utcfromtimestamp(city_weather["dt"]).strftime('%Y-%m-%d %H:%M:%S') # Append the city information into city_data list. city_data.append({"City": city.title(), "Lat": city_lat, "Lng": city_lng, "Max Temp": city_max_temp, "Humidity": city_humidity, "Cloudiness": city_clouds, "Wind Speed": city_wind, "Country": city_country, "Date": city_date}) # If an error is experienced, skip the city. except: print("City not found. Skipping...") pass # Indicate that Data Loading is complete. print("-----------------------------") print("Data Retrieval Complete ") print("-----------------------------") # - len(city_data) city_data city_data_df = pd.DataFrame(city_data) city_data_df.head() # Create the output file (CSV). output_data_file = "weather_data/cities.csv" # Export the City_Data into a CSV. city_data_df.to_csv(output_data_file, index_label="City_ID") # Extract relevant fields from the DataFrame for plotting. lats = city_data_df["Lat"] max_temps = city_data_df["Max Temp"] humidity = city_data_df["Humidity"] cloudiness = city_data_df["Cloudiness"] wind_speed = city_data_df["Wind Speed"] city_data_df = pd.read_csv("weather_data/cities.csv") city_data_df.head() city_data_df.count() import time # + # Build scatter plot for latitude vs. temperature plt.scatter(lats, max_temps, edgecolor="black", linewidth=1, marker="o", alpha=0.8, label="Cities") # Incorporate the other graph properties plt.title(f'City Latitudes vs. Max Temperature {time.strftime("%x")}') plt.ylabel("Max Temperature (F)") plt.xlabel("Latitude") plt.grid(True) plt.savefig("weather_data/Fig1.png") plt.show() # + # Build the scatter plots for latitude vs. humidity. plt.scatter(lats, humidity, edgecolor="black", linewidths=1, marker="o", alpha=0.8, label="Cities") # Incorporate the other graph properties. plt.title(f"City Latitude vs. Humidity "+ time.strftime("%x")) plt.ylabel("Humidity (%)") plt.xlabel("Latitude") plt.grid(True) # Save the figure. plt.savefig("weather_data/Fig2.png") # Show plot. plt.show() # + # Build the scatter plots for latitude vs. cloudiness. plt.scatter(lats, cloudiness, edgecolor="black", linewidths=1, marker="o", alpha=0.8, label="Cities") # Incorporate the other graph properties. plt.title(f"City Latitude vs. Cloudiness (%) "+ time.strftime("%x")) plt.ylabel("Cloudiness (%)") plt.xlabel("Latitude") plt.grid(True) # Save the figure. plt.savefig("weather_data/Fig3.png") # Show plot. plt.show() # + # Build the scatter plots for latitude vs. wind speed. plt.scatter(lats, wind_speed, edgecolor="black", linewidths=1, marker="o", alpha=0.8, label="Cities") # Incorporate the other graph properties. plt.title(f"City Latitude vs. Wind Speed "+ time.strftime("%x")) plt.ylabel("Wind Speed (mph)") plt.xlabel("Latitude") plt.grid(True) # Save the figure. plt.savefig("weather_data/Fig4.png") # Show plot. plt.show() # - # Import linear regression from the SciPy stats module. from scipy.stats import linregress # Create an equal number of latitudes and temperatures. lats = [42.5, 43.9, 8.1, 36.8, 79.9, 69.1, 25.7, 15.3, 12.7, 64.5] temps = [80.5, 75.3, 90.9, 90.0, 40.4, 62.3, 85.4, 79.6, 72.5, 72.0] # Perform linear regression. (slope, intercept, r_value, p_value, std_err) = linregress(lats, temps) # Get the equation of the line. line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) print(line_eq) print(f"The p-value is: {p_value:.3f}") # Calculate the regression line "y values" from the slope and intercept. regress_values = [(lat * slope + intercept) for lat in lats] regress_values # Import Matplotlib. import matplotlib.pyplot as plt # Create a scatter plot of the x and y values. plt.scatter(lats,temps) # Plot the regression line with the x-values and the y coordinates based on the intercept and slope. plt.plot(lats,regress_values,"r") # Annotate the text for the line equation and add its coordinates. plt.annotate(line_eq, (10,40), fontsize=15, color="red") plt.xlabel('Latitude') plt.ylabel('Temp') plt.show() # Create a function to create perform linear regression on the weather data # and plot a regression line and the equation with the data. def plot_linear_regression(x_values, y_values, title, y_label, text_coordinates): # Run regression on hemisphere weather data. (slope, intercept, r_value, p_value, std_err) = linregress(x_values, y_values) # Calculate the regression line "y values" from the slope and intercept. regress_values = x_values * slope + intercept # Get the equation of the line. line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) # Create a scatter plot and plot the regression line. plt.scatter(x_values,y_values) plt.plot(x_values,regress_values,"r") # Annotate the text for the line equation. plt.annotate(line_eq, text_coordinates, fontsize=15, color="red") plt.xlabel('Latitude') plt.ylabel(y_label) plt.show() index13 = city_data_df.loc[13] index13 city_data_df["Lat"]>=0 city_data_df.loc[(city_data_df["Lat"]>=0)] # Create Northern and Southern Hemisphere DataFrames. northern_hemi_df = city_data_df.loc[(city_data_df["Lat"] >= 0)] southern_hemi_df = city_data_df.loc[(city_data_df["Lat"] < 0)] southern_hemi_df # Linear regression on the Northern Hemisphere x_values = northern_hemi_df["Lat"] y_values = northern_hemi_df["Max Temp"] # Call the function. plot_linear_regression(x_values, y_values,'Linear Regression on the Northern Hemisphere \n for Maximum Temperature', 'Max Temp',(10,40)) # Linear regression on the Southern Hemisphere x_values = southern_hemi_df["Lat"] y_values = southern_hemi_df["Max Temp"] # Call the function. plot_linear_regression(x_values, y_values,'Linear Regression on the Southern Hemisphere \n for Maximum Temperature', 'Max Temp',(-50,90)) # Linear regression on the Northern Hemisphere x_values = northern_hemi_df["Lat"] y_values = northern_hemi_df["Humidity"] # Call the function. plot_linear_regression(x_values, y_values,'Linear Regression on the Northern Hemisphere \n for % Humidity', '% Humidity',(40,10)) # Linear regression on the Southern Hemisphere x_values = southern_hemi_df["Lat"] y_values = southern_hemi_df["Humidity"] # Call the function. plot_linear_regression(x_values, y_values,'Linear Regression on the Southern Hemisphere \n for % Humidity', '% Humidity',(-50,15)) # Correlation between latitude and % Cloudiness x_values = northern_hemi_df["Lat"] y_values = northern_hemi_df["Cloudiness"] plot_linear_regression(x_values, y_values, 'Linear Regression on the Northern Hemisphere for % Cloudiness', "% Cloudiness",(10,55)) x_values = southern_hemi_df["Lat"] y_values = southern_hemi_df["Cloudiness"] plot_linear_regression(x_values, y_values, 'Linear Regression on the Southern Hemisphere for % Cloudiness', "% Cloudiness",(-50,60)) # Linear regression on the Northern Hemisphere x_values = northern_hemi_df["Lat"] y_values = northern_hemi_df["Wind Speed"] # Call the function. plot_linear_regression(x_values, y_values, 'Linear Regression on the Northern Hemisphere \n for Wind Speed', 'Wind Speed',(40,35)) # Linear regression on the Southern Hemisphere x_values = southern_hemi_df["Lat"] y_values = southern_hemi_df["Wind Speed"] # Call the function. plot_linear_regression(x_values, y_values, 'Linear Regression on the Southern Hemisphere \n for Wind Speed', 'Wind Speed',(-50,35)) # !pip install gmaps # Import the dependencies. import pandas as pd import gmaps import requests # Import the API key. from config import g_key # Store the CSV you saved created in part one into a DataFrame. city_data_df = pd.read_csv("weather_data/cities.csv") city_data_df.head() city_data_df.dtypes # Configure gmaps to use your Google API key. gmaps.configure(api_key=g_key) # + # 1. Assign the locations to an array of latitude and longitude pairs. locations = city_data_df[["Lat", "Lng"]] # 2. Assign the weights variable to some values. temperatures = city_data_df["Max Temp"] # 3. Assign the figure variable to the gmaps.figure() attribute. fig = gmaps.figure() # 4. Assign the heatmap_layer variable to the heatmap_layer attribute and add in the locations. heatmap_layer = gmaps.heatmap_layer(locations, weights=temperatures) # 5. Add the heatmap layer. fig.add_layer(heatmap_layer) # 6. Call the figure to plot the data. fig # - # Heatmap of temperature # Get the latitude and longitude. locations = city_data_df[["Lat", "Lng"]] # Get the maximum temperature. max_temp = city_data_df["Max Temp"] # Assign the figure variable. fig = gmaps.figure() # Assign the heatmap variable. heat_layer = gmaps.heatmap_layer(locations, weights=temps) # Add the heatmap layer. fig.add_layer(heat_layer) # Call the figure to plot the data. fig # Get the maximum temperature. max_temp = city_data_df["Max Temp"] temps = [] for temp in max_temp: temps.append(max(temp, 0)) temps # + # Heatmap of percent humidity locations = city_data_df[["Lat", "Lng"]] humidity = city_data_df["Humidity"] fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5) heat_layer = gmaps.heatmap_layer(locations, weights=humidity, dissipating=False, max_intensity=300, point_radius=4) fig.add_layer(heat_layer) # Call the figure to plot the data. fig # + # Heatmap of percent humidity locations = city_data_df[["Lat", "Lng"]] clouds = city_data_df["Cloudiness"] fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5) heat_layer = gmaps.heatmap_layer(locations, weights=clouds, dissipating=False, max_intensity=300, point_radius=4) fig.add_layer(heat_layer) # Call the figure to plot the data. fig # + # Heatmap of percent humidity locations = city_data_df[["Lat", "Lng"]] wind = city_data_df["Wind Speed"] fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5) heat_layer = gmaps.heatmap_layer(locations, weights=wind, dissipating=False, max_intensity=300, point_radius=4) fig.add_layer(heat_layer) # Call the figure to plot the data. fig # - # Ask the customer to add a minimum and maximum temperature value. min_temp = float(input("What is the minimum temperature you would like for your trip? ")) max_temp = float(input("What is the maximum temperature you would like for your trip? ")) # Filter the dataset to find the cities that fit the criteria. preferred_cities_df = city_data_df.loc[(city_data_df["Max Temp"] <= max_temp) & \ (city_data_df["Max Temp"] >= min_temp)] preferred_cities_df.head(10) preferred_cities_df.count() # Create DataFrame called hotel_df to store hotel names along with city, country, max temp, and coordinates. hotel_df = preferred_cities_df[["City", "Country", "Max Temp", "Lat", "Lng"]].copy() hotel_df["Hotel Name"] = "" hotel_df.head(10) # + # Set parameters to search for a hotel. params = { "radius": 5000, "type": "lodging", "key": g_key } for index, row in hotel_df.iterrows(): # get lat, lng from df lat = row["Lat"] lng = row["Lng"] params["location"] = f"{lat},{lng}" base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" # Make request and get the JSON data from the search. hotels = requests.get(base_url, params).json() try: hotel_df.loc[index, "Hotel Name"] = hotels["results"][0]["name"] except: print("Hotel not found") # - hotel_df # Add a heatmap of temperature for the vacation spots and marker for each city. locations = hotel_df[["Lat", "Lng"]] max_temp = hotel_df["Max Temp"] fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5) heat_layer = gmaps.heatmap_layer(locations, weights=max_temp, dissipating=False, max_intensity=300, point_radius=4) marker_layer = gmaps.marker_layer(locations) fig.add_layer(heat_layer) fig.add_layer(marker_layer) # Call the figure to plot the data. fig # + info_box_template = """ <dl> <dt>Hotel Name</dt><dd>{Hotel Name}</dd> <dt>City</dt><dd>{City}</dd> <dt>Country</dt><dd>{Country}</dd> <dt>Max Temp</dt><dd>{Max Temp} °F</dd> </dl> """ # Store the DataFrame Row. hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()] # Add a heatmap of temperature for the vacation spots and a pop-up marker for each city. locations = hotel_df[["Lat", "Lng"]] max_temp = hotel_df["Max Temp"] fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5) heat_layer = gmaps.heatmap_layer(locations, weights=max_temp,dissipating=False, max_intensity=300, point_radius=4) marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info) fig.add_layer(heat_layer) fig.add_layer(marker_layer) # Call the figure to plot the data. fig
WeatherPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The RWTH-1 Borehole # In this notebook, we take a look at temperature data from the RWTH-1 Borehole, drilled near the main building of the RWTH. Its initial purpose was to provide geothermal energy for space heating the newly built [Super-C](https://de.wikipedia.org/wiki/SuperC). Unfotunately, originally predicted **production** temperatures of 60 °C were not met. The water, pumped from a depth of about 2 km, has a temperature of just 31 °C when reaching the surface. # But the borehole is not _cold_. Bottom Hole Temperatures (BHTs) of about 78.6 °C at a depth of about 2500 m suggest a normal average geothermal gradient at this location. # # The task is to visualize the average temperature gradient in the borehole. For this, we need at least information about temperatures at the surface and at depth: # * T$_{surface}$ = 10.5 °C $\pm$ 2 °C # * T$_{2550 m}$ = 78.6 °C $\pm$ 3 °C # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # in order to show the plot directly below this code cell #from pylab import * temp = np.array([10.5, 78.6]) depth = np.array([0, 2550]) err = np.array([2, 3]) fig = plt.figure(figsize=[5,8]) plt.fill_betweenx(depth,temp-err,temp+err, color=(.8,.8,.8), label='uncertainty interval') plt.plot(temp,depth,'-', linewidth=4, color='red', label='mean') plt.plot(temp-err,depth,'--', color='gray') plt.plot(temp+err,depth,'--', color='gray') plt.legend() plt.xlabel('Temperature (°C)') plt.ylabel('Depth (m)') plt.ylim(np.max(depth),0) plt.grid(True) # - # Using this data, what is the average geothermal gradient? grad = np.round(((temp[1]-temp[0])/(depth[1]-depth[0])*1000),1) print("The mean geothermal gradient over the borehole is {} K/km".format(grad)) # This is obviously just an approximation/simplification, - what could be possible reasons for a deviation from a straight line? # The real geothermal gradient shows variation with depth, reasons for this may be: # # * layers of different thermal conductivities, i.e. heteorogeneity # * (vertical) advective heat transport # # Core measurements of the RWTH-1 well yielded an average thermal conductivity of the rocks of 2.99 W m${-1}$ K$^{-1}$ (using the geometric mean). If we neglect possible advective heat transport, we can estimate the specific heat flow in this area by applying _Fourier's Law of Heat Conduction_: # # $$q = -\lambda \nabla T$$ # # As we assess just the vertical specific heat flow, $\nabla T$ becomes $\frac{\partial T}{\partial z}$. tc = 2.99 # thermal conductivity in W/(mK) q = -tc * grad print("The magnitude of the average specific heat flow is {} mW/m²" .format(np.round(np.abs(q),1))) # If we compare this value to the position on Aachen on a [Heat flow map](http://www.geni.org/globalenergy/library/renewable-energy-resources/europe/Geothermal/Geothermal%20heat%20-%20Potential_files/6-1-100.gif), we see that despite **a lot** of simplifications, the calculated specific heat flow is still in the right interval.
01_T-gradient_RWTH-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutoriel "Calculate_SRF.py" # # Auteurs: [<NAME>](mailto:<EMAIL>), [<NAME>](mailto:<EMAIL>) # # [INSA Lyon](https://www.insa-lyon.fr), France, 08/10/2021 # # ## Introduction # # Dans le but de pondérer les critères par la méthode SRF (Simos-Roy-Figueira), le code Python [**Calculate_SRF.py**](Calculate_SRF.py) présenté ci-dessous permet d'automatiser les calculs à partir d'un fichier Excel, où l'utilisateur a préalablement classer ses critères. # ## Présentation de la méthode SRF # # La méthode Simos-Roy-Figueira (SRF) est une amélioration de la méthode de Simos, dites la "méthode des cartes". # # La méthode des cartes permet de pondérer les critères. Le décideur choisit la position relative des cartes, représentant les critères, en les classant sous forme pyramidale. Il peut positionner des cartes blanches pour indiquer une importance supplémentaire entre deux rangs. Visuelle et concrète, cette technique est souvent utilisée comme moyen de communication et de pondération des critères. Un traitement mathématique des rangs permet la détermination du poids de chaque critère. # # Cependant, cette méthode a des limites, invisibles pour le décideur. # # Tout d'abord, la pondération n'est pas forcément normalisée (la somme des poids doit valoir 1). # # Ensuite, la position visuelle des critères est modifiée par le traitement mathématique. Deux cartes sur le dernier rang (numéro 1) ont une position évaluée à 1.5. Cette déformation mathématique réduit fortement la liste des poids pouvant être obtenus. # # Enfin, l'importance relative entre le premier et le dernier critère est imposée, ce qui induit des fortes disparités dans la pondération lorsque le nombre de critères est élevé. # # La méthode SRF améliore le traitement mathématique des critères, avec l'ajout d'un ratio Z représentant l'importance relative mentionnée précédemment. # ## Présentation du code # # Les différentes étapes du code de la méthode SRF sont présentées ici. # Les modules panda et openpyxl seront utilisés pour extraire les données de la feuille de calcul. import pandas as pd # ## 1. Première étape : Définition des fonctions # # #### 1.1 Récupération du fichier # La première étape est de récupérer les données d'un tableau Excel donné. # Ce dernier se décompose en cinq feuilles : # # 1. Critères : présentation des critères, de leurs indices, leurs descriptions, leurs intérêts et les moyens d'évaluation # 2. Pondération Critères Systèmes : l'utilisateur classe les critères du système et donne un ratio Z # 3. Pondération Critères Enveloppe : l'utilisateur classe les critères de l'enveloppe et donne un ratio Z # 4. Calculs : les étapes de calcul, ici automatisés par un code python, sont réalisées. # 5. Résultats : les résultats sont présentés. # # Le ratio Z correspond à l'importance relative entre le premier et le dernier critère. # # Dans la fonction suivante, 'recuperate_file', le nom 'name' du fichier Excel est indiqué, ainsi que le numéro de la feuille à extraire. Les données sont ensuite renvoyées sous forme de liste pour faciliter le traitement des données. def recuperate_file(name, index_sheet): """Récupère une feuille du fichier Excel pour la transformer en objet liste """ feuille_recup = pd.read_excel(name, sheet_name=index_sheet) return feuille_recup.values.tolist() # #### 1.2 Définition des dictionnaires # La correspondance entre le nom des critères et leur indice est réalisé par les fonctions dico_. Une fonction est écrite pour chaque thème. # # Attention ! Les indices utilisés ne sont valables uniquement pour le fichier Excel "Pondération Critères.xlsm" # Cette limite à la méthode de Simos pourrait être améliorée par l'encodage du système de choix, réalisé pour l'instant sur Excel. # + def fdico_economie(feuille): """ Définition du dictionnaire Economie contenant la correspondance entre l'index et son critère """ dico_economie = {} for x in range(7,15): dico_economie[(feuille[x][2])] = (feuille[x][1]) return dico_economie def fdico_technique(feuille): """ Définition du dictionnaire Technique contenant la correspondance entre l'index et son critère """ dico_technique = {} for x in range(18, 22): dico_technique[(feuille[x][2])] = (feuille[x][1]) return dico_technique def fdico_social(feuille): """ Définition du dictionnaire Social contenant la correspondance entre l'index et son critère """ dico_social = {} for x in range(25,30): dico_social[(feuille[x][2])] = (feuille[x][1]) return dico_social def fdico_environnement(feuille): """ Définition du dictionnaire Environnement contenant la correspondance entre l'index et son critère """ dico_environnement = {} for x in range(33,40): dico_environnement[(feuille[x][2])] = (feuille[x][1]) return dico_environnement # - # #### 1.3 Récupération des choix de l'utilisateur # Les choix de l'utilisateur sont récupérés sous forme de liste. Pour cela, la fonction tableau_choix reçoit la 'feuille' où se situent les données à récupérer, et les indices des lignes du tableau de choix. # def tableau_choix(feuille,dico,index): """Récupération des choix de l'utilisateur sous forme d'une liste. Retourne également le ratio Z""" tableau_choix_environnement = [[0 for j in range(4)] for i in range(15)] a = index[0] b = index[1] for x in range(a,b): tableau_choix_environnement[x - a][0]= x - a+1 for y in range(2,5): tableau_choix_environnement[x - a][y - 1]=str(feuille[x][y]) if (dico.get(tableau_choix_environnement[x - a][y - 1], 1) != 1) : tableau_choix_environnement[x - a][y - 1] = dico.get(tableau_choix_environnement[x - a][y - 1]) return tableau_choix_environnement, feuille[index[1]+1][2] # #### 1.4 Définition des fonctions de calcul # Les lignes ci-dessous présentent les fonctions de calcul utilisées pour déterminer les paramètres de la méthode SRF. # # Premièrement, nous recevons une matrice contenant les critères rangés par l'utilisateur. Une premire fonction permet de supprimer les lignes ne servant pas au classement des critères. def traitement(l): """Permet de retirer les cartes blanches inutiles""" while l[-1][1] == 'nan': del l[-1] return l # Une seconde méthode permet d'affecter la numérotation des rangs, à l'inverse du classement. Le dernier classement se voit donc affecter le numéro 1. def calcul_rang(l): listnouveaurang = [None]*len(l) for i in range(len(l)): listnouveaurang[i] = l[len(l)-i-1] listnouveaurang[i][0]=i+1 return listnouveaurang # La méthode calcul_n permet de calculer le nombre de rang dans le classement donné 'l'. # A chaque ligne non vide, le paramètre n est incrémenté. def calcul_n(l): n = 0 for i in range(len(l)): for j in range(1, len(l[i])): if l[i][j] != 'nan': n +=1 return n # Cette fonction 'calcul_er_prime' permet de déterminer le nombre de cartes blanches entre deux rangs successifs. # Si la ligne est vide (remplie de 'nan'), la matrice 'l' ajoute un 1. Sinon, on ajoute 0 carte blanche. # Le n correspond au nombre de cartes blanches au rang précédent. def calcul_er_prime(l,n): for i in range(len(l)-1): if l[i+1][1] == 'nan': l[i].append(1) else: l[i].append(0) if n == 0: l[len(l)-1].append(1) else: l[len(l)-1].append(0) for i in range(1,len(l)): if l[len(l)-i-1][-1] != 0: l[len(l) - i - 1][-1] = l[len(l)-i-1][-1] + l[len(l)-i][-1] for i in range(len(l)): if l[i][1] == 'nan' and l[i][2] == 'nan' and l[i][3] == 'nan': l[i][-1] = 0 return l # La méthode calcul_er permet de calculer le paramètre 'er' à partir de la liste des er' calculée précédemment. def calcul_er(l): e = 0 for i in range(len(l)): l[i].append(l[i][-1]+1) e += l[i][-1] return l,e # La méthode calcul_u permet de calculer le paramètre 'u' à partir de la liste des er calculée précédemment. def calcul_u(z,e): u = (z-1)/e return u # La méthode calcul_k_i_prime permet de calculer le paramètre ki'. def calcul_k_i_prime(u,l): somme_er = 0 for i in range(len(l)): if l[i][1] != 'nan': l[i].append(1+u*somme_er) somme_er += l[i][-2] else: l[i].append('nan') somme_er += l[i][-2] return l # Enfin, la méthode traitement2 permet de créer un tableau associant le critère et le ki' correspondant. def traitement2(l): """Permet d'obtenir un tableau avec le critère et son ki' associé""" liste_traitee = [] for i in range(len(l)): for j in range(1 , 4): if l[i][j] != 'nan': liste_traitee.append([l[i][j],l[i][-1]]) return liste_traitee # Une fois le tableau [critère, ki'] obtenu, la somme des ki' est calculée. def calcul_K_prime(l): K_prime = 0 for i in range(len(l)): K_prime += l[i][1] return K_prime # Le ki*, correspondant à la pondération finale non arrondie, est calculé. def calcul_ki_etoile(l,K_prime): for i in range(len(l)): l[i].append((100/K_prime)*l[i][1]) return l # Un traitement mathématique est effectué pour normaliser les ki* obtenus. Pour cela, un ki" est calculé en tronquant le ki*. def calcul_ki_2prime(l): for i in range(len(l)): l[i].append(int(l[i][2]*100)/100) return l # La somme des ki" est calculée. def calcul_K_2prime(l): K_2prime = 0 for i in range(len(l)): K_2prime += l[i][3] return K_2prime # La pondération finale est ensuite calculée. Pour cela, des opérateurs [di, M, L, F] sont utilisés. Un paramètre w est défini pour choisir le nombre de décimales. Enfin, les matrices F+ et F- indiquent si le poids doit être arrondi au supérieur ou à l'inférieur. def calcul_ki(l,K_2prime,n): w = 2 for i in range(len(l)): l[i].append((10 ** (-w) - (l[i][2] - l[i][3])) / l[i][2]) # Calcul di l[i].append((l[i][2] - l[i][3]) / l[i][2]) # Calcul di_barre if l[i][4] > l[i][5]: #Calcul M l[i].append(1) else: l[i].append(0) if l[i][6] == 1: #Calcul di_M l[i].append(l[i][4]) else: l[i].append('-') v = int(10 ** (w) * (100 - K_2prime)) # Calcul v m = 0 # Calcul m for i in range(len(l)): m += l[i][6] L = [] # Calcul L for i in range(len(l)): if l[i][7] != '-': L.append(l[i][7]) L.sort() if L == []: L = [0] * n vmn = v + m - n if m+v > n: for i in range(vmn,len(L)): L[i] = 0 else: L = [0]*n #Calcul flèche L for i in range(len(L)): compteur_di = 0 if L[i] == l[0][4]: compteur_di +=1 if compteur_di != 0: l[0].append(1) else: l[0].append(0) vmn = v + m -n for i in range(1,len(l)): compteur_di = 0 for j in range(len(L)): if L[j] == l[i][4]: compteur_di +=1 if compteur_di != 0: s=0 j = 0 while j != i and i < len(L): s+=L[j] j +=1 if s>vmn : l[i].append(0) else: l[i].append(1) else: l[i].append(0) for i in range(len(l)): #Calcul dibarre_différent_M if l[i][6] == 0: l[i].append(l[i][5]) else: l[i].append('-') Lbarre = [] # Calcul Lbarre nvm = n - v -m for i in range(len(l)): if l[i][9] != '-': Lbarre.append(l[i][9]) Lbarre.sort() if Lbarre == []: Lbarre = [0] * n if m + v < n: for i in range(nvm, len(Lbarre)): Lbarre[i] = 0 else: Lbarre = [0] * n #Calcul flèche Lbarre for i in range(len(Lbarre)): compteur_dibarre = 0 if Lbarre[i] == l[0][5]: compteur_di += 1 if compteur_di != 0: l[0].append(1) else: l[0].append(0) for i in range(1, len(l)): compteur_dibarre = 0 for j in range(len(Lbarre)): if Lbarre[j] == l[i][5]: compteur_dibarre += 1 if compteur_dibarre != 0: s = 0 j = 0 while j != i and i < len(Lbarre): s += Lbarre[j] j +=1 if s > nvm: l[i].append(0) else: l[i].append(1) else: l[i].append(0) #Calcul F+ / F- for i in range(len(l)): # F+ if m + v < n: conditionF = n - v - m if l[i][6] == 0: l[i].append(1) else: l[i].append(0) else: conditionF = n - v fbis = [0] * len(l) verif = 0 j = n-1 while verif < conditionF and j!=-1: if l[j][10]==1 and l[j][6]==0: fbis[j] = 0 if verif <= nvm: l[j][11] = fbis[j] verif = verif +1 j = j-1 # Calcul F- puis ki for i in range(len(l)): if l[i][11] == 0: #F- l[i].append(1) else: l[i].append(0) if l[i][12] == 1: #ki l[i].append(int(l[i][2]*(10**w))/10**w) else: l[i].append(int(l[i][2]*(10**w)+1)/(10**w)) veri = 0 for i in range(len(l)): veri += l[i][13] return l,veri # ## 2. Deuxième étape : Méthode de Simos-Roy-Figueira # # #### 2.1 Récupération des données # La première étape est de récupérer les choix de l'utilisateur à partir du tableau excel 'Pondération Critères. xlsm'. Pour cela, les fonctions de récupération de données (présentées parties 1.1 et 1.2) sont utilisées. # # Les feuilles 1, 2 et 3 sont récupérées. Un message d'avertissement apparaît pour indiquer que le module Openpyxl ne supporte pas les données 'Data Validation' dans Excel. Nos données ne sont pas concernées. feuille1 = recuperate_file('Pondération critères.xlsm', 0) feuille2 = recuperate_file('Pondération critères.xlsm', 1) feuille3 = recuperate_file('Pondération critères.xlsm', 2) # Les dictionnaires associant critères et indices sont créés. dico_economie = fdico_economie(feuille1) dico_environnement = fdico_environnement(feuille1) dico_social = fdico_social(feuille1) dico_technique = fdico_technique(feuille1) # Les index permettant d'identifier la plage de données dans la feuille de calcul sont définis index_environnement = [92,107] index_economie = [21,36] index_social = [69,84] index_technique = [45,60] # Les tableaux contenant le classement des critères sont créés. tableau_choix_environnement, z_environnement = tableau_choix(feuille2,dico_environnement,index_environnement) tableau_choix_economie, z_economie = tableau_choix(feuille2,dico_economie,index_economie) tableau_choix_technique, z_technique = tableau_choix(feuille2,dico_technique,index_technique) tableau_choix_social, z_social = tableau_choix(feuille2,dico_social,index_social) # #### 2.2 Traitement des données # La deuxième étape permet de traiter les données pour déterminer la pondération des critères avec les fonctions définies partie 1.3 # + tableau = traitement(tableau_choix_economie) tableau = calcul_rang(tableau) n = calcul_n(tableau) tableau = calcul_er_prime(tableau,n) tableau,e = calcul_er(tableau) u = calcul_u(z_economie,e) tableau = calcul_k_i_prime(u,tableau) tableau = traitement2(tableau) K_prime = calcul_K_prime(tableau) tableau = calcul_ki_etoile(tableau,K_prime) tableau = calcul_ki_2prime(tableau) K_2prime = calcul_K_2prime(tableau) tableau,total = calcul_ki(tableau,K_2prime,n) # - # Finalement, le critère associé à sa pondération est affiché. for i in range(0,len(tableau)): print(str(tableau[i][0])+" "+str(tableau[i][13])) print("Somme des poids = "+str(total))
Tutoriel Calculate_SRF .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Module 3 - Accessing Data on the Web # # _My suggested use for this file is as an extra tab you keep open on the right side of your JupyterLab screen, and treat it like a notepad you'd use in a classroom. The left half of the screen will be good for files that have involved coding that should be stand alone files._ # # ## Skills AND CONCEPTS to learn in this Module # # You can find the tasks for this module [on the dashboard page]. # # Below are the key skills and concepts I want you to learn in this module in _roughly_ the order we will cover them. # # _**Note: This list is the key stuff, but not everything. We are scraping the surface **_ # - [ ] Scraping data # - [ ] download webpages with Pandas # - [ ] know how to use APIs to scrape data # - [ ] Opening and parsing webpages # - [ ] retrieve URLs and send manual API queries # - [ ] parse single pages for data/text/links # - [ ] build spiders to scrape and save many webpages # - [ ] Working with textual data # - [ ] turn unstructured data into structured data # - [ ] learn to work with Python strings # - [ ] use regex to search for patterns in strings # - [ ] use `near_regex` to search for patterns close to each other # + [markdown] tags=[] # ## Web Scraping Notes # - put your notes here # ## Opening/Parsing Webpages Notes put your notes here # ## Strings/Regex/Near_Regex Notes put your notes here (including common functions/methods)
handouts/Module 3 Notes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import numpy as np image = cv2.imread(r'C:\Users\<NAME>\Downloads\shapessss.png') cv2.imshow("Image",image) img = image cv2.waitKey() cv2.destroyAllWindows() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # # Drawing Contours ret,thresh = cv2.threshold(gray,127,225,1) contours,hi = cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cv2.drawContours(img, contours, -1, (0,255,0), 3) cv2.imshow("Contours",img) cv2.waitKey(0) cv2.destroyAllWindows() len(contours) # # Identifying shapes and writing their names and number of sides (sorted) length=[] for cnt in contours: approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True) length.append(len(approx)) length.sort() length type(len(contours)) # + image = cv2.imread(r'C:\Users\<NAME>\Downloads\shapessss.png') img = image for i in range(0,5): for cnt in contours: approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True) #print(len(approx) if len(approx) == length[i]: if len(approx) == 3: shape_ = 'Triangle-3' cv2.drawContours(image,[cnt],0,(250,10,100),-1) M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cv2.putText(image, shape_, (cx-40,cy+10), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1) elif len(approx) == 4: shape_ = "Rectangle-4" cv2.drawContours(image,[cnt],0,(0,155,255),-1) M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cv2.putText(image,shape_,(cx-60, cy+5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 1) elif len(approx) == 12: shape_ = "Star-12" cv2.drawContours(image,[cnt],0,(78,255,78),-1) M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cv2.putText(image, shape_, (cx-50,cy+5), cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,0),1) elif len(approx) >= 15: shape_ = "Circle" cv2.drawContours(image,[cnt],0,(0,255,255),-1) M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cv2.putText(image,shape_,(cx-30, cy+10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1) elif len(approx) ==6: shape_ = "Hexagon-6" cv2.drawContours(img,[cnt],0,(125,5,255),-1) M = cv2.moments(cnt) cx = int (M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) cv2.putText(image,shape_,(cx-45, cy+5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1) cv2.imshow('identified shapes',image) cv2.waitKey(0) cv2.waitKey(0) cv2.destroyAllWindows()
Assignments/Tanya/Task-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Sample for running TensorFlow DeepFM example # # The notebook shows how to use Submarine ctr library to train and evaluate a model # + pycharm={"name": "#%%\n"} # !cat deepfm.json # - from submarine.ml.tensorflow.model import DeepFM # + pycharm={"name": "#%%\n"} model = DeepFM(json_path="deepfm.json") model.train() result = model.evaluate() print("Model metrics : ", result)
submarine-sdk/pysubmarine/example/deepfm_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="3jkemgL719gT" outputId="263ce4e2-965a-4ea0-bcfa-e3b09c6e668a" import pandas as pd from string import punctuation # !pip install stop_words from stop_words import get_stop_words # !pip install pymorphy2 from pymorphy2 import MorphAnalyzer import re import numpy as np import keras from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Embedding, Conv1D, GlobalMaxPool1D, SimpleRNN, LSTM, GRU, Masking, Flatten from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.callbacks import TensorBoard # from keras.objectives import categorical_crossentropy from keras.callbacks import EarlyStopping # + colab={"base_uri": "https://localhost:8080/"} id="yRPJQc2aBJC8" outputId="011d4774-d26a-4232-b7a7-391c03fd1c88" from google.colab import drive drive.mount('/content/gdrive') # + id="rsm1B7L_BTnC" df_train = pd.read_csv("/content/gdrive/MyDrive/train.csv") df_test = pd.read_csv("/content/gdrive/MyDrive/test.csv") df_val = pd.read_csv("/content/gdrive/MyDrive/val.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="RT8RxAQD19gU" outputId="16aa87d1-8bea-499d-f8e4-b54d0246a0c0" df_train # + id="SH7LEQ0V19gV" sw = set(get_stop_words("ru")) exclude = set(punctuation) morpher = MorphAnalyzer() def preprocess_text(txt): txt = str(txt) txt = "".join(c for c in txt if c not in exclude) txt = txt.lower() txt = re.sub("\sне", "не", txt) txt = [morpher.parse(word)[0].normal_form for word in txt.split() if word not in sw] return " ".join(txt) df_train['text'] = df_train['text'].apply(preprocess_text) df_val['text'] = df_val['text'].apply(preprocess_text) df_test['text'] = df_test['text'].apply(preprocess_text) # + id="27Fh3TG019gW" text_corpus_train = df_train['text'].values text_corpus_valid = df_val['text'].values text_corpus_test = df_test['text'].values # + id="PgIxY7nr19gY" tokenizer = Tokenizer(num_words=None, filters='#$%&()*+-<=>@[\\]^_`{|}~\t\n', lower = False, split = ' ') tokenizer.fit_on_texts(text_corpus_train) sequences_train = tokenizer.texts_to_sequences(text_corpus_train) sequences_val = tokenizer.texts_to_sequences(text_corpus_valid) sequences_test = tokenizer.texts_to_sequences(text_corpus_test) word_count = len(tokenizer.index_word) + 1 training_length = max([len(i.split()) for i in text_corpus_train]) X_train = pad_sequences(sequences_train, maxlen=training_length) X_valid = pad_sequences(sequences_val, maxlen=training_length) # + id="5ZORrTlk19gY" y_train = df_train['class'].values y_val = df_val['class'].values # + id="7E8zI7k67_PA" def make_note(name, model, df): score = model.evaluate(X_valid, y_val, batch_size=512, verbose=1) lenght = len(df.Name) df.loc[lenght] = name, score[1] return df # + id="-jBAWFPM8Bwk" df_score = pd.DataFrame(columns=['Name', 'Score']) # + id="uaoFZfxH19ga" model = Sequential() model.add( Embedding(input_dim=word_count, input_length=training_length, output_dim=30, trainable=True, mask_zero=True)) model.add(Masking(mask_value=0.0)) model.add(SimpleRNN(64)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="iPwXc30m5L2t" outputId="17dc0c10-95aa-468c-8746-b1464d641810" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="tG-TTOcf19ga" outputId="afc45d22-7022-4bf0-9cc1-7cbb203751f3" early_stopping=EarlyStopping(monitor='val_loss') history = model.fit(X_train, y_train, batch_size=512, epochs=10, verbose=1, validation_split=0.1, callbacks=[early_stopping]) # + colab={"base_uri": "https://localhost:8080/"} id="O7Nss5et19ga" outputId="1df9e981-052a-44ba-8fd6-989285420231" score = model.evaluate(X_valid, y_val, batch_size=512, verbose=1) print('\n') print('Test score:', score[0]) print('Test accuracy:', score[1]) # + colab={"base_uri": "https://localhost:8080/", "height": 97} id="sjYm5aYD8ErI" outputId="4961e478-92c7-43d0-ceff-305ae9cde07b" make_note('SimpleRNN', model, df_score) # + colab={"base_uri": "https://localhost:8080/"} id="PmXaGHbN19gb" outputId="074a1afe-2b48-4ad6-e34f-c3702c21694b" model = Sequential() model.add( Embedding(input_dim=word_count, input_length=training_length, output_dim=30, trainable=True, mask_zero=True)) model.add(Masking(mask_value=0.0)) model.add(LSTM(64, recurrent_dropout=0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) early_stopping=EarlyStopping(monitor='val_loss') history = model.fit(X_train, y_train, batch_size=2048, epochs=10, verbose=1, validation_split=0.1, callbacks=[early_stopping]) # + colab={"base_uri": "https://localhost:8080/"} id="kmdp6ACX19gc" outputId="23a83a24-ed68-4d23-be0e-09adae8f8572" score = model.evaluate(X_valid, y_val, batch_size=512, verbose=1) print('\n') print('Test score:', score[0]) print('Test accuracy:', score[1]) # + colab={"base_uri": "https://localhost:8080/", "height": 128} id="2NhxvfFn8JCC" outputId="50903b01-c02d-4cc2-a2ff-e2bfa4c5a8c1" make_note('LSTM', model, df_score) # + colab={"base_uri": "https://localhost:8080/"} id="11ZlUVqc4xub" outputId="11e62e8a-bd6f-4a7d-a569-ea01f19d9008" model = Sequential() model.add( Embedding(input_dim=word_count, input_length=training_length, output_dim=30, trainable=True, mask_zero=True)) model.add(Masking(mask_value=0.0)) model.add(GRU(64, recurrent_dropout=0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) early_stopping=EarlyStopping(monitor='val_loss') history = model.fit(X_train, y_train, batch_size=512, epochs=10, verbose=1, validation_split=0.1, callbacks=[early_stopping]) # + colab={"base_uri": "https://localhost:8080/"} id="kDsyLFsR4xwm" outputId="e85dadaf-cb97-4f97-8203-0f1959798761" score = model.evaluate(X_valid, y_val, batch_size=512, verbose=1) print('\n') print('Test score:', score[0]) print('Test accuracy:', score[1]) # + colab={"base_uri": "https://localhost:8080/", "height": 159} id="hwr7BeQ08LTn" outputId="6a15f72b-47b3-4921-a36e-6cdd1c37087f" make_note('GRU', model, df_score) # + colab={"base_uri": "https://localhost:8080/"} id="-rytXXsS7Lba" outputId="39e3dfd6-4883-4992-d829-5b8e616e2a5b" model = Sequential() model.add( Embedding(input_dim=word_count, input_length=training_length, output_dim=30, trainable=True, mask_zero=True)) model.add(Masking(mask_value=0.0)) model.add(Conv1D(32, kernel_size=5, activation='relu')) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) early_stopping=EarlyStopping(monitor='val_loss') history = model.fit(X_train, y_train, batch_size=2048, epochs=10, verbose=1, validation_split=0.1, callbacks=[early_stopping]) # + colab={"base_uri": "https://localhost:8080/"} id="QIynrSzB_FaP" outputId="58ecaec2-0868-44e5-df4a-a4b06243049c" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="oGs_tABsAATw" outputId="933a9cea-bd90-4cf0-8391-6583b371ed41" score = model.evaluate(X_valid, y_val, batch_size=512, verbose=1) print('\n') print('Test score:', score[0]) print('Test accuracy:', score[1]) print(type(score[1])) # + colab={"base_uri": "https://localhost:8080/", "height": 190} id="udZEmp4X8OwH" outputId="5e3c4590-1176-41bf-924a-b56ed1353c8f" make_note('1_Conv', model, df_score) # + colab={"base_uri": "https://localhost:8080/"} id="Lwe0cV5644jz" outputId="bcb1dedf-431f-4879-aae4-138c40f0f45f" model = Sequential() model.add( Embedding(input_dim=word_count, input_length=training_length, output_dim=30, trainable=True, mask_zero=True)) model.add(Masking(mask_value=0.0)) model.add(Conv1D(64, kernel_size=7, activation='relu')) model.add(Conv1D(128, kernel_size=5, activation='relu')) model.add(Flatten()) model.add(Dense(1344, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(128, activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) early_stopping=EarlyStopping(monitor='val_loss') history = model.fit(X_train, y_train, batch_size=512, epochs=2, verbose=1, validation_split=0.1, callbacks=[early_stopping]) # + colab={"base_uri": "https://localhost:8080/"} id="fBulLZR8Fm44" outputId="0da3bea6-c253-4b63-a3a8-0095747efd4e" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="gzD40Dru-QZ4" outputId="0c4d2a4c-0f9a-4f70-fea6-4ca6f5f6468b" score = model.evaluate(X_valid, y_val, batch_size=512, verbose=1) print('\n') print('Test score:', score[0]) print('Test accuracy:', score[1]) # + colab={"base_uri": "https://localhost:8080/", "height": 221} id="2A0BtpY2-jOe" outputId="c194cb84-df6a-4d28-9bc1-0c43dbaf2270" make_note('2_Conv', model, df_score) # + colab={"base_uri": "https://localhost:8080/"} id="_D5WWJa744mQ" outputId="893e5316-8ecc-4bf5-a10b-06ee541cb71b" model = Sequential() model.add( Embedding(input_dim=word_count, input_length=training_length, output_dim=30, trainable=True, mask_zero=True)) model.add(Masking(mask_value=0.0)) model.add(Conv1D(32, kernel_size=7, activation='relu')) model.add(LSTM(64, recurrent_dropout=0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) early_stopping=EarlyStopping(monitor='val_loss') history = model.fit(X_train, y_train, batch_size=2048, epochs=2, verbose=1, validation_split=0.1, ) # + id="koz-Y9EInFni" colab={"base_uri": "https://localhost:8080/", "height": 252} outputId="4121eb81-6f47-4fa9-fc22-4f29301d485b" make_note('Conv_LSTM', model, df_score) # + colab={"base_uri": "https://localhost:8080/", "height": 266} id="y7y5wG1TB3fS" outputId="3fda6a84-0ea5-4a8a-ff2f-45648e7ffaab" df_score.sort_values(by=['Score']) # + colab={"base_uri": "https://localhost:8080/"} id="tCa-AS0EAXEK" outputId="53ab6e5b-4ce0-4103-e8a6-728c2f67f0af" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="ghuxqnqcAIhQ" outputId="e15c17e8-6b73-4637-d4c1-7124dc989209" model = Sequential() model.add( Embedding(input_dim=word_count, input_length=training_length, output_dim=30, trainable=True, mask_zero=True)) model.add(Masking(mask_value=0.0)) model.add(Conv1D(64, kernel_size=11, activation='relu')) model.add(Conv1D(128, kernel_size=7, activation='relu')) model.add(LSTM(256, recurrent_dropout=0.2)) model.add(Dense(512, activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(128, activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(10, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) early_stopping=EarlyStopping(monitor='val_loss') history = model.fit(X_train, y_train, batch_size=512, epochs=2, verbose=1, validation_split=0.1, callbacks=[early_stopping]) # + colab={"base_uri": "https://localhost:8080/"} id="9cIlFdoOIkoC" outputId="54d42bd8-e94f-4d0b-d6fd-dc802925e959" score = model.evaluate(X_valid, y_val, batch_size=512, verbose=1) print('\n') print('Test score:', score[0]) print('Test accuracy:', score[1]) # + colab={"base_uri": "https://localhost:8080/"} id="2tvTTwvdG11v" outputId="d2ed766e-9f56-4cfc-babb-79795c5f0869" model.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="7vxJvaqdAIjL" outputId="b464a89f-3354-4b88-ce67-<PASSWORD>" make_note('2_Conv_LSTM', model, df_score) # + [markdown] id="6ZxRKyFjI59A" # Слой SimpleRNN справляется так же, как и два слоя свертки + lstm. Выше таблица с результатами. # + id="pR3bgvTzJJM9"
nlp/hw_lesson_8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from numpy import * from numpy import random # random data import csv import pandas as pd import matplotlib.pyplot as plt from collections import Counter from itertools import chain import nltk nltk.download('punkt') import codecs import string from nltk.corpus import stopwords import re import pprint import random from urllib import request from nltk import word_tokenize from nltk.corpus import brown from nltk.corpus import wordnet as wn from nltk.stem import SnowballStemmer snowball_stemmer = SnowballStemmer("english") from nltk.stem import PorterStemmer from nltk.stem.wordnet import WordNetLemmatizer wordnet_lemmatizer = WordNetLemmatizer() # - import gensim # the model is organized like this: word = embeddings small_model = gensim.models.KeyedVectors.load_word2vec_format('/Users/schap/Desktop/Text Analysis/small-embeddings.txt', binary=False) # + exclude = set(string.punctuation) stop_word_list = stopwords.words('english') # input should be a string - we need a simple pipeline for getting word embeddings def nlp_simple_pipeline(text): text = str(text) #it depends if the words have been lowercased or not text = text.lower() text = nltk.word_tokenize(text) text = [token for token in text if token not in exclude and token.isalpha()] text = [token for token in text if token not in stop_word_list] return text # - def article_embedding(cleaned_article): article_embedd = [] # for each word in the article, you take the embeddings for word in cleaned_article: try: embed_word = small_model[word] article_embedd.append(embed_word) except KeyError as e: print (e,word) continue # average vectors of all words avg = [float(sum(col))/len(col) for col in zip(*article_embedd)] avg = np.array(avg).reshape(1, -1) return avg #d4april211992 is the political parties decision used to make dictionaries politicalparties = codecs.open("/Users/schap/Desktop/TA_Data/AC/1992/TXT_Files/D4.Apr21.1992.MAJ.txt", "r").read().strip().split('\n') # + #pp = str(politicalparties) - can add this straight into the cleaning pipeline # - cleaned_politicalparties = nlp_simple_pipeline(politicalparties) article_embedding(cleaned_politicalparties) #this is the average word embedding array for the political parties decision avg_pp = article_embedding(cleaned_politicalparties) #varna free university case D17.Nov24.1992.MAJ varnafreeuniversity = codecs.open("/Users/schap/Desktop/TA_Data/AC/1992/TXT_Files/D17.Nov24.1992.MAJ.txt", "r").read().strip().split('\n') cleaned_vfu = nlp_simple_pipeline(varnafreeuniversity) avg_vfu = article_embedding(cleaned_vfu) #D5.April6.1993.MAJ, civil service case civilservice = codecs.open("/Users/schap/Desktop/TA_Data/AC/1993/TXT_Files/D5.Apr6.1993.MAJ.txt", "r").read().strip().split('\n') cleaned_cs = nlp_simple_pipeline(civilservice) avg_cs = article_embedding(cleaned_cs) from sklearn.metrics.pairwise import cosine_similarity # # Cosine Similarity Between the 'Political' Cases used to make dictionaries print("compare political parties case and varna free university case",cosine_similarity(avg_pp, avg_vfu)) print("compare political parties case and civil service case",cosine_similarity(avg_pp, avg_cs)) print("compare political parties case and political parties case", cosine_similarity(avg_pp, avg_pp)) #for control # ## code to get avg embedding for appropriations cases # + #this is a state budget case D11.July17.1995.MAJ.txt # - statebudget = codecs.open("/Users/schap/Desktop/TA_Data/AC/1995/TXT_Files/D11.July17.1995.MAJ.txt", "r").read().strip().split('\n') cleaned_sb = nlp_simple_pipeline(statebudget) avg_sb = article_embedding(cleaned_sb) # # Cosine Similarity between Political Cases and Appropriations print("compare state budget case and varna free university case",cosine_similarity(avg_sb, avg_vfu)) print("compare state budget case and civil service case",cosine_similarity(avg_sb, avg_cs)) print("compare state budget case and political parties case", cosine_similarity(avg_sb, avg_pp)) # ## Code to get avg embedding for random case # this is a random case I chose D4.Feb27.1996.MAJ.txt, this is the summary on the spreadsheet: unconstitutionality of the provisions of Article 90 of the Succession Act in so far as it is provided that testament , drawn up after inclusion in the labor co farms or other entities based on these agricultural organizations of cooperative property , the ownership of which is recovered in Art 0.10 , paragraph 1 OUFLA no effect on these properties randomcase = codecs.open("/Users/schap/Desktop/TA_Data/AC/1996/TXT_Files/D4.Feb27.1996.MAJ.txt", "r").read().strip().split('\n') cleaned_rc = nlp_simple_pipeline(randomcase) avg_rc = article_embedding(cleaned_rc) # # Cosine Similarity between Political Cases and Random Case print("compare random case and varna free university case",cosine_similarity(avg_rc, avg_vfu)) print("compare random case and civil service case",cosine_similarity(avg_rc, avg_cs)) print("compare random case and political parties case", cosine_similarity(avg_rc, avg_pp)) # # Similarities using the dictionaries we created dict_legal = "admissible admission allegation argument authorized compatible condition convention dispute eligible establish establishment exact facts hypothesis inadmissible incompatible ineligible jurisdiction literal mandatory norm obligation opinion precise prerequisite regulation request requirement right rules text threshold unauthorized consistent registered status prohibit purpose violation facts protective norm function interpretation illegal rejected ruling legal sense competence sanction inviolable meaning guarantee protection proof definition factual authority strict regulation regulating instructions law contradiction obligation objective formal procedure compliance principle legislation application objectively justified violation merit argument right binding rules jurisdiction must not be ruled that restrictions dismissed amendment inadmissible mandatory incompatible procedure pursuant adopted procedural appeals interpretation obligation" dict_political = "democracy democratization legislature minority movement organization party policy public public opinion rallies security sympathizer platform opposition totalitarian political party ethnic religious social political will exposing exposure objectified political activity ban minority Turkish gross exhaustive separate powers separation of powers nature intimate atheist community nature believers tolerance respect natural absolutely policy vote of no confidence suffrage legislative exercised legitimate interests fundamental equal footing candidate citizenship agricultural property land owner community borders equivalent quality" dict_fuzzy = "community comparison conscience cultural depend discretion ethnic history/historical ideas identity inherent integrity intent interest language natural pluralism power racial religious respect scope tolerance unity values virtues clear not clear literally contradiction substantiate decisively incidentally conformity implications satisfied occupy participation disputes right remedies composition constitutional illegitimate legitimate" #legal dictionary cleaned_legal = nlp_simple_pipeline(dict_legal) avg_legal = article_embedding(cleaned_legal) avg_legal #Political dictionary cleaned_political = nlp_simple_pipeline(dict_political) avg_political = article_embedding(cleaned_political); avg_political #Fuzzy dictionary cleaned_fuzzy = nlp_simple_pipeline(dict_fuzzy) avg_fuzzy = article_embedding(cleaned_fuzzy); avg_fuzzy # # Cosine Similarities Between Dictionaries print("compare legal and political dictionaries",cosine_similarity(avg_legal, avg_political)) print("compare legal and fuzzy dictionaries",cosine_similarity(avg_legal, avg_fuzzy)) print("compare political and fuzzy dictionaries", cosine_similarity(avg_political, avg_fuzzy)) # ## Salient case versus all three dictionaries # ### Political Parties print("compare political parties case and legal dictionary",cosine_similarity(avg_pp, avg_legal)) print("compare political parties case and political dictionary",cosine_similarity(avg_pp, avg_political)) print("compare political parties case and fuzzy dictionary",cosine_similarity(avg_pp, avg_fuzzy)) # ### Varna Free University print("compare varna free university case and legal dictionary",cosine_similarity(avg_vfu, avg_legal)) print("compare varna free university case and political dictionary",cosine_similarity(avg_vfu, avg_political)) print("compare varna free university case and fuzzy dictionary",cosine_similarity(avg_vfu, avg_fuzzy)) # ### Civil Service print("compare civil service case and legal dictionary",cosine_similarity(avg_cs, avg_legal)) print("compare civil service case and political dictionary",cosine_similarity(avg_cs, avg_political)) print("compare civil service case and fuzzy dictionary",cosine_similarity(avg_cs, avg_fuzzy)) # ## Appropriations case versus all three dictionaries print("compare state budget case and legal dictionary",cosine_similarity(avg_sb, avg_legal)) print("compare state budget case and political dictionary",cosine_similarity(avg_sb, avg_political)) print("compare state budget case and fuzzy dictionary",cosine_similarity(avg_sb, avg_fuzzy)) # ## Random case versus all three dictionaries print("compare random case and legal dictionary",cosine_similarity(avg_rc, avg_legal)) print("compare random case and political dictionary",cosine_similarity(avg_rc, avg_political)) print("compare random case and fuzzy dictionary",cosine_similarity(avg_rc, avg_fuzzy))
models/cosine_similarities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The purpose of these notebooks is to demonstrate and justify a series of computational methods used to quantify cytoplasmic and membrane concentrations from midplane confocal images, as well as providing instructions on how to perform similar analysis. # # I would recommend going through the following notebooks in order: # - [Notebook 1: Autofluorescence calibration and correction](./1_autofluorescence.ipynb) # - [Notebook 2: Optical models of cytoplasmic and membrane protein](./2_models_of_membrane_and_cytoplasmic_protein.ipynb) # - [Notebook 3: Performing quantification using a simple optical model](./3_simple_model_fitting.ipynb) # - [Notebook 4: Training an adaptable model to improve accuracy of quantification](./4_custom_model.ipynb) # - [Notebook 5: Instructions for performing analysis](./5_intro_to_imagequant.ipynb) # # # I also provide several notebooks that demonstrate some control experiments using these methods: # - [Control 1: Quantification of N2s](./control_n2_quantification.ipynb) # - [Control 2: Robustness of the method to pixel noise](./control_response_to_noise.ipynb) # - [Control 3: Inter-embryo reference profile variation](./control_profile_variation.ipynb) # - [Control 4: Reference profile spatial variation](./control_profile_spatial_variation.ipynb) # # Additional notebooks: # - [Appendix 1: Specifying ROIs and straightening algorithm](./appendix_rois_and_straightening.ipynb)
notebooks/INDEX.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Basic Calculator II # # Given a string s which represents an expression, evaluate this expression and return its value. # # The integer division should truncate toward zero. # # Note: You are not allowed to use any built-in function which evaluates strings as mathematical expressions, such as eval(). # Example 1: # # Input: s = "3+2*2" # Output: 7 # <br> # Example 2: # # Input: s = " 3/2 " # Output: 1 # <br> # Example 3: # # Input: s = " 3+5 / 2 " # Output: 5 import re class Solution: def calculate(self, s: str) -> int: characters=re.findall(r"\d+|\-|\+|\/|\*", s) stack=[] op="+" for i in characters: if i.isdigit(): if op=="+":stack.append(int(i)) elif op=="-":stack.append(-int(i)) elif op=="*":stack.append(int(i)*stack.pop()) elif op=="/":stack.append(int(stack.pop()/float(i))) else: op=i return sum(stack)
Exercises/17. Leetcode 227.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # + [markdown] slideshow={"slide_type": "skip"} # NOTE: # ----- # # Please run the below cells first before proceeding- you'll need them soon! # + slideshow={"slide_type": "skip"} # %load_ext sql # %sql sqlite:// # + language="sql" # DROP TABLE IF EXISTS Movies; # CREATE TABLE Movies(title VARCHAR(50), year INT, director VARCHAR(50), length INT); # INSERT INTO Movies VALUES('Database Wars', 1967, '<NAME>', 123); # INSERT INTO Movies VALUES('The Databaser', 1992, '<NAME>', 190); # INSERT INTO Movies VALUES('Database Wars', 1998, '<NAME>', 176); # - # %sql DROP TABLE IF EXISTS A; DROP TABLE IF EXISTS B; # %sql CREATE TABLE A (x int, y int); CREATE TABLE B (x int, y int); for i in range(1,6): # %sql INSERT INTO A VALUES (:i, :i+1) for i in range(1,11,3): # %sql INSERT INTO B VALUES (:i, :i+2) # Activity 2-4: # ------------ # # ORDER BY semantics, set operators & nested queries # %sql SELECT * FROM movies # Exercise #1 # ----------- # # **Can you write the movie query from lecture as a single SFW query?** # # Recall that we are trying to find **all movie titles that were used for more than one movie.** You may assume that no two movies in the same year have the same title. Our schema for the `movies` table is: # # > * title STRING # > * year INT # > * director STRING # > * length INT # # Let's try to write the nested query that solves this from lecture: # + language="sql" # SELECT m.title # FROM Movies m # WHERE m.year <> ANY(SELECT year FROM Movie WHERE title = m.title); # - # What? This doesn't work? Why? # # **ANY doesn't exist in SQLite!** Can we do this query without nesting? Write your query here: # + language="sql" # SELECT DISTINCT m1.title # FROM movies m1, movies m2 # WHERE m1.title = m2.title AND m1.year <> m2.year; # - # Exercise #2 # -------------------- # # Consider the two relations $A$ and $B$ below: # %sql SELECT * FROM A; # %sql SELECT * FROM B; # Assuming no duplicates, can you write an `INTERSECT` query, **just over the $x$ attribute**, without using `INTERSECT` OR nested queries? Write your query here: # %sql SELECT A.x FROM A, B WHERE A.x = B.x; # What is this operation called? # # Next, using set operators again as well, can you return all the _full_ tuples in $A$ and $B$ that overlap in $x$ attributes? Write your query here: # + language="sql" # SELECT x, y FROM ( # SELECT A.x, A.y FROM A, B WHERE A.x = B.x # UNION # SELECT B.x, B.y FROM A, B WHERE A.x = B.x # );
Activity-2-4-Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import random import numpy as np import matplotlib.pyplot as plt # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. # %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython # %load_ext autoreload # %autoreload 2 # + from sklearn import preprocessing, metrics import utils import scipy.io import numpy as np from linear_classifier import LinearSVM_twoclass # load the SPAM email training dataset X,y = utils.load_mat('data/spamTrain.mat') yy = np.ones(y.shape) yy[y==0] = -1 # load the SPAM email test dataset test_data = scipy.io.loadmat('data/spamTest.mat') X_test = test_data['Xtest'] y_test = test_data['ytest'].flatten() ################################################################################## # YOUR CODE HERE for training the best performing SVM for the data above. # # what should C be? What should num_iters be? Should X be scaled? # # should X be kernelized? What should the learning rate be? What should the # # number of iterations be? # ################################################################################## svm = LinearSVM_twoclass() svm.theta = np.zeros((X.shape[1],)) ################################################################################## # YOUR CODE HERE for testing your best model's performance # # what is the accuracy of your best model on the test set? On the training set? # ################################################################################## ################################################################################## # ANALYSIS OF MODEL: Print the top 15 words that are predictive of spam and for # # ham. Hint: use the coefficient values of the learned model # ################################################################################## words, inv_words = utils.get_vocab_dict() ################################################################################## # END OF YOUR CODE # ################################################################################## # -
HW4/hw4/svm_spam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- my_iterable = [1,2,3] for item_name in my_iterable: print(item_name) mylist = [1,2,3,4,5,6,7,8,9,10] for var in mylist: print(var) for var in mylist: if var%2 == 0: print(var) else: print(f'Odd number : {var}') listsum = 0 for num in mylist: listsum += num print(listsum) mystring = 'hello world' for letter in "hello world": print(letter) tup = (1,2,3) for item in tup: print(item) mylist = [(1,2),(3,4),(5,6),(7,8)] len(mylist) for item in mylist: print(item) for item in mylist: for num in item: print(num) print(item) # ## Tuple unpacking for (a,b) in mylist: print(a) print(b) for a,b in mylist: print(f'{a} {b}') mylist = [(1,2,3),(5,6,7),(8,9,0)] for a,b,c in mylist: print(b) d = {"k1":1,'k2':2,"k3":3} for item in d: print(item) for item in d.items(): print(item) for key, value in d.items(): print (value) # + x = 0 while x <= 10: print(x) x +=1 else: print('all done') # - for i in 1,2,56: print('hi') print(i ) # #### break , continue ,pass mystring = 'sammyamm' for letter in mystring: if letter == 'a': continue #print(letter) if letter == 'm': pass # print(letter) if letter == 'y': break print(letter) x = 50 while x<5 : if x == 2: break print(x) x +=1
python notebooks by Akshit Ostwal/.ipynb_checkpoints/Loops-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Incremental Bitstring Kernel Encoding # $[(A/B/C)(+/*/Stop)] + [(A/B/C)(+/*/Stop)] + ... + [(A/B/C)(Stop)]$ # # $[(|\mathcal{B}|D)(|\mathcal{O}|)] + ...$ # # $A_1 = 1 = 0x0$ # # $A_2 = 2 = 0x1$ # # $A_3 = 3 = 0x10$ # # $A_4 = 4 = 0x11$ # # $B_1 = 5 = 0x101$ # # $...$ # # TODO: # - fix dtypes # - clarify notation and examples # - think about how to handle stop and see if this is an encoding worth pursuing # - refactor import numpy as np # + # define operations add = np.array([0, 0]) mult = np.array([0, 1]) stop = np.array([1, 0]) ops_n_bits = len(add) kernel_families = ['A', 'B', 'C'] D = 4 k_map = np.arange(len(kernel_families) * D).reshape(len(kernel_families), D) # + # number of bits to represent the kernel encoding kern_bit_length = int(np.ceil(np.log2(len(kernel_families) * D))) def encode_kernel(family, dim): d = dim - 1 i = kernel_families.index(family) binary_str = bin(k_map[i, d])[2:] n_bits = len(binary_str) kern = np.zeros(kern_bit_length) binary_arr = np.array(list(binary_str)) kern[-n_bits:] = binary_arr return kern # - for family in kernel_families: for d in range(1, D + 1): kern_encoding = encode_kernel(family, d) print(family + str(d) + ':', kern_encoding) print('') # + A1 = encode_kernel('A', 1) B2 = encode_kernel('B', 2) C4 = encode_kernel('C', 4) A1_plus = np.hstack((A1, add)) B2_mult = np.hstack((B2, mult)) C4_stop = np.hstack((C4, stop)) full_kernel = np.hstack((A1_plus, B2_mult, C4_stop)) print('A1 + B2 * C4 = ', full_kernel) # - def decode_kernel(full_kernel): # first split into units A1_plus, B2_mult, C4_stop # define unit = kernel + operation. e.g. A3* or B4 stop n_bits_per_unit = kern_bit_length + ops_n_bits unit_sections = [hop_size for hop_size in range(n_bits_per_unit, len(full_kernel), n_bits_per_unit)] units = np.split(full_kernel, unit_sections) kernel_dec = '' for unit in units: # split into kernel and operation kernel, op = np.split(unit, [kern_bit_length]) # decode kernel binary_str = ''.join(str(int(b)) for b in kernel.tolist()) kern_int = int(binary_str, 2) i = (int)(kern_int / D) d = kern_int % D dim = d + 1 family = kernel_families[i] kernel_dec += family + str(dim) # decode operation operation = '' if np.array_equal(op, add): operation = ' + ' elif np.array_equal(op, mult): operation = ' * ' elif np.array_equal(op, stop): operation = ' Stop' kernel_dec += operation return kernel_dec decode_kernel(full_kernel) for family in kernel_families: for d in range(1, D + 1): kern_encoding = encode_kernel(family, d) kern_decoding = decode_kernel(kern_encoding) print(family + str(d) + ':', kern_encoding, 'decoded as:', kern_decoding) print('') # Problems: # what about unused part of bitstring space? # why do I need stop operation?
notebooks/exploratory/kernel-incremental-encoding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- """ Given the following dictionary; temps = {'Mon': [68, 89],'Tue':[71,93],'Wed':[66,82],'Thu':[75,97],'Fri':[62,79]} a) Convert the dictionary into the DataFrame named temperatures with 'Low' and 'High' as the indices. b) Use the column names to select only the columns for ' Mon' through ’ Wed'. c) Use the row index ' Low' to select only the low temperatures for each day. d) Set the floating-point precision to 2, then calculate the average temperature for each day. e) Calculate the average low and high temperatures. """ import numpy as np import pandas as pd temps = {'Mon': [68, 89],'Tue':[71,93],'Wed':[66,82],'Thu':[75,97],'Fri':[62,79]} temperatures = pd.DataFrame(temps, index=["Low","High"]) temperatures temperatures.loc[:,"Mon":"Wed"] #loc is first row then column temperatures.loc["Low",:] pd.set_option("precision",2) temperatures.mean(0) temperatures.mean(1)
convert dictionary to dataframe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('dif_env') # language: python # name: python3 # --- # # Understanding the dataset # Undestanding this dataset has been quite complex due to the lack of a comprehensive documentation. However, we feel like it's important to understand which data is given to the model: a complete black-box approach is at least dangerous. # ## The folders # We have not fully understood the difference between test and training data yet. However, we now know that the data is structured in **tiles**. # Each tile is a folder and corresponds to a geographical area of around 110km × 110km. The tile name (which is really the folder name), look something like `29SND`. One can use `en.get_coords_from_tile("29SND")` to get the coordinates of the bounds for such tile. # Each folder contains many files with a name like `29SND_2017-06-20_2017-11-16_2105_2233_1721_1849_32_112_26_106`. Each of such files contains information about a location inside the tile. # ## The cubes # To be precise, each file like `29SND_2017-06-20_2017-11-16_2105_2233_1721_1849_32_112_26_106` contains photos (and other infos) of a location inside tile `29SND` at different time-steps from `2017-06-20` to `2017-11-16` (readable in the file name). Finally, the rest of the name encodes the location addressed by the file. One can use `en.get_coords_from_cube(filename, return_meso=True)` to view the coordinates of the boundary of such location. **NOTE: this function is broken, and sometimes it outputs the wrong location**. # Finally, what is inside a file? Four cubes: # * highresdynamic (5 dimensional) # * x-axis (128) # * y-axis (128) # * channel (5 or 7) # * B, G, R, near-infrared, Sen2Cor Cloud Mask, ESA Scene Classification, EarthNet2021 Data Quality Mask # * time (20, 40 or 70) # * mesodynamic # * x-axis (80) # * y-axis (80) # * channel (5) # * Precipitation (RR), Sea pressure (PP), Mean temperature (TG), Minimum temperature (TN), Maximum temperature (TX) # * time (20, 40 or 70) # * highresstatic (the value represents the altitude) # * x-axis (128) # * y-axis (128) # * mesostatic (the value represents the altitude) # * x-axis (80) # * y-axis (80) # # The main thing to highlight here is the relation between the highres cubes and the meso cubes. The highres cubes map an area (bounded by `en.get_coords_from_cube(filename, return_meso=True)` if it was correct) of 2.56km × 2.56km, with a resolution of 20m (128*20m = 2560m). The meso cubes, on the other hand, map a much larger area: around 100km × 100km. With a resolution of 80px this yealds that each pixel cover an area of around 1.25km × 1.25km. Hence, the highres location only occupies the central 4 pixel of the meso image. # + import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import earthnet as en from os import walk import os from my_coord import get_coords_from_cube as my_coord import os import json from pyproj import Transformer from earthnet.coords_dict import COORDS import pickle # - #plt.imshow(hrd[:,:,0,0]) context = "../Data/extreme/extreme_test_split/context/32UQC" filenames = next(walk(context), (None, None, []))[2] for file in filenames: print(file[8:][:-4]) with open( "extreme_test_split_context_data_paths.pkl",'rb') as f: filenames = pickle.load(f) def mc(cubename: str, return_meso: bool = False): cubetile,_, _, hr_y_min, hr_y_max,hr_x_min, hr_x_max, meso_x_min, meso_x_max, meso_y_min, meso_y_max = os.path.splitext(cubename)[0].split("_") tile = NEWCOORD[cubetile] transformer = Transformer.from_crs(tile["EPSG"], 4326, always_xy = True) tile_x_min, tile_y_max = transformer.transform(tile["MinLon"],tile["MaxLat"], direction = "INVERSE") a = 20 tile_x_min = tile_x_min cube_x_min = tile_x_min + a * float(hr_x_min) cube_x_max = tile_x_min + a * float(hr_x_max) cube_y_min = tile_y_max - a * float(hr_y_min) cube_y_max = tile_y_max - a * float(hr_y_max) cube_lon_min, cube_lat_min = transformer.transform(cube_x_min, cube_y_max) cube_lon_max, cube_lat_max = transformer.transform(cube_x_max, cube_y_min) #print("{0}, {1}".format(cube_lat_min,cube_lon_min)) cube_lat_min return cube_lon_min, cube_lat_min, cube_lon_max, cube_lat_max # + cubetiles = ["32UPC","32UMC","32UNC","32UQC"] north_west_cube = ["","","",""] west = 4*[100000] north = 4*[0] for fn in filenames: cubetile,_, _, hr_y_min, hr_y_max, hr_x_min, hr_x_max, meso_x_min, meso_x_max, meso_y_min, meso_y_max = os.path.splitext(fn[158:])[0].split("_") idx = cubetiles.index(cubetile) hr_x_min = float(hr_x_min) if hr_x_min <= west[idx] : north_west_cube[idx] = fn[158:] west[idx] = hr_x_min print(north_west_cube) print(west) print(north) # - print(COORDS["32UPC"]) print(COORDS["32UMC"]) print(COORDS["32UNC"]) print(COORDS["32UQC"]) # + shift = 158 print(filenames[0][shift:]) legend = [] for i, file in enumerate(filenames): if file[shift:shift+5] == "32UPC": #cord = my_coord(file[158:], return_meso=True) cord_n = mc(file[shift:], return_meso=True) #plt.plot([cord[0],cord[0],cord[2],cord[2],cord[0]], [cord[1],cord[3],cord[3],cord[1], cord[1]]) plt.plot([ cord_n[0],cord_n[0] , cord_n[2] , cord_n[2] , cord_n[0]], [cord_n[1],cord_n[3],cord_n[3],cord_n[1], cord_n[1]], '-') #print("{0} -> ({1})".format(i,file[8:])) cord = cord_n #print(" east: {0} - {1}".format(cord[0], cord[2])) #print(" nord: {0} - {1}".format(cord[1], cord[3])) legend.append(str(i)) #legend.append("{0} - new".format(i)) #plt.plot([cord[4],cord[4],cord[6],cord[6],cord[4]], [cord[5],cord[7],cord[7],cord[5], cord[5]]) plt.axis('equal') #plt.legend(legend) plt.show() # - # Here we look at some files inside the tile `29SND` filenames[0][shift:shift+5] NEWCOORD = { "32UPC" : { "MinLon" : 10.4361, "MaxLat" : 52.3414, 'EPSG': 32632 }, "32UMC" : { "MinLon" : 7.5315, "MaxLat" : 52.3504, 'EPSG': 32632 }, "32UNC" : { "MinLon" : 8.9997, "MaxLat" : 52.314, 'EPSG': 32632 }, "32UQC" : { "MinLon" : 11.9352, "MaxLat" : 52.315, 'EPSG': 32632 } } # + ind = 1 print(filenames[ind][8:]) coord = mc(filenames[ind][8:]) print("{0}, {1}".format(coord[3],coord[0])) sample = np.load(os.path.join(context , filenames[ind])) hrd = sample['highresdynamic'] md = sample["mesodynamic"] # Accessing high-resolution and mesoscale static variables (the EUDEM digital elevation model) hrs = sample["highresstatic"] ms = sample["mesostatic"] time = 3 pic = np.flip(hrd[:,:,:3,time].astype(float),2)*3 plt.rcParams['figure.figsize'] = [15, 7] im = plt.imshow(pic*2) plt.show() #51.308834, 13.230445 # - locs = { "32UQC_2018-01-28_2018-11-23_5305_5433_4665_4793_82_162_72_152.npz":{ "lon_max": 13.242973, "lon_min": 13.208611, "lat_max": 51.320081, "lat_min": 51.296943 }, "32UQC_2018-01-28_2018-11-23_5305_5433_4409_4537_82_162_68_148.npz":{ "lon_max": 13.171049, "lon_min": 13.136258, "lat_max": 51.323078, "lat_min": 51.299554 } } # + # single file ind = 0 print(filenames[ind]) sample = np.load(os.path.join(context , filenames[ind])) hrd = sample['highresdynamic'] md = sample["mesodynamic"] # Accessing high-resolution and mesoscale static variables (the EUDEM digital elevation model) hrs = sample["highresstatic"] ms = sample["mesostatic"] # - # Here we draw the satellite picture at the time `time` on the left. On the right we draw the elevation map. All of this is in the highres cubes. Furthermore, we print the coordinates of the supposed boundary of the highres area interested. However, checking on google earth, it seems that those locations are wrong. # + time = 3 pic = np.flip(hrd[:,:,:3,time].astype(float),2)*2 #Times 2 makes it lighter fig, axs = plt.subplots(nrows=1,ncols = 2) fig.set_size_inches(8,4) axs[0].imshow(pic) axs[1].imshow(hrs.astype(float)) #cord = en.get_coords_from_cube(filenames[ind], return_meso=True) #print(str(cord[1]) +','+ str(cord[0])) #print(str(cord[3]) +','+ str(cord[2])) plt.show() # - time = 3 pic = np.flip(hrd[:,:,:3,time].astype(float),2)*3 plt.rcParams['figure.figsize'] = [15, 7] im = plt.imshow(pic) plt.savefig("im"+str(time),bbox_inches="tight", pad_inches = 0) pic = md[:,:,0,19].astype(float) #Times 2 makes it lighter pic2 = ms.astype(float) pic2[40:41,40:41] = np.max(pic2) #pic2[41,41] = np.max(pic2) fig, axs = plt.subplots(nrows=1,ncols = 2) fig.set_size_inches(8,4) axs[0].imshow(pic) axs[1].imshow(pic2) #cord = en.get_coords_from_cube(filenames[ind], return_meso=True) plt.show() ndvi = ((hrd[:, :, 3, :] - hrd[ :, :, 2, :]) / ( hrd[ :, :, 3, :] + hrd[ :, :, 2, :] + 1e-6)) cloud_mask = 1 - np.nan_to_num(hrd[:, :, 6, :], nan = 0) vag_mask = 1 - (1 - (hrd[:, :, 5, :] == 4)) * (1 - (hrd[:, :, 5, :] == 5)) ndvi = np.nan_to_num(ndvi, nan=0) vag_mask = 1 - ((1 - np.nan_to_num(vag_mask, nan=0))*(1-cloud_mask)) #vag_mask = np.repeat(vag_mask.prod(axis=2)[...,np.newaxis], 30, axis=-1) ndvi=ndvi*cloud_mask*vag_mask ims = plt.imshow(ndvi[...,time]) plt.colorbar(ims) plt.plot() np.linspace(0.1,1,10) # + # Take out cloudy days splits = np.linspace(0.1,1,10) q = np.quantile(ndvi, splits, axis = (0,1)) valid_ndvi_time = q[0]!=0 q[0,valid_ndvi_time] x = np.arange(30)[valid_ndvi_time] for i in range(len(splits)): plt.plot(x, q[i,valid_ndvi_time], '-*') plt.plot(np.arange(150)/5 -1, md[40,40,0,:]) #plt.plot(np.arange(150)/5 -1, md[40,40,2,:]) plt.plot(np.arange(150)/5 -1, md[40,40,3,:]) #plt.plot(np.arange(150)/5 -1, md[40,40,4,:]) # - q = np.quantile(ndvi, [0.1,0.25,0.5,0.75,0.9], axis = (0,1)) print(q.shape) plt.plot(q[0],'*') plt.plot(q[1],'*') plt.plot(q[2],'*') plt.plot(q[3],'*') plt.plot(q[4],'*') plt.plot(np.arange(150)/5 -1, md[40,40,0,:]) plt.plot(np.arange(150)/5 -1, md[40,40,2,:]) plt.plot(np.arange(150)/5 -1, md[40,40,3,:]) plt.plot(np.arange(150)/5 -1, md[40,40,4,:]) import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [15, 7] np.repeat(vag_mask.prod(axis=2)[...,np.newaxis], 30, axis=-1).shape
demos/data_observation_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from manage_path import * from topic_model_analysis import * # - # # Dc_v3 result_directory = get_result_directory() get_document_item_vectorize = np.vectorize(get_document_item) Dc_v3 = pd.read_csv(result_directory/'Dc_v3_50topics.csv',index_col=0) Dc_v3['dealer'] = pd.Series(list(zip(get_document_item_vectorize(Dc_v3.index,0),get_document_item_vectorize(Dc_v3.index,2)))).values Dc_v3.index = pd.to_datetime(get_document_item_vectorize(Dc_v3.index,1)) # ### Snakey THRESHOLD = 0.75 Dc_v3_dealerXtopic_sum = Dc_v3.groupby(by='dealer').mean() Dc_v3_dealerXtopic_sum.index = Dc_v3_dealerXtopic_sum.index.format() Dc_v3_dealerXtopic_sum = pd.DataFrame(Dc_v3_dealerXtopic_sum.stack().reset_index().rename({'level_0':'dealer','level_1':'topicID',0:'values'},axis=1)) #Dc_v3_dealerXtopic_sum['topicID'] = Dc_v3_dealerXtopic_sum['topicID'].astype(np.int) Dc_v3_dealerXtopic_sum = Dc_v3_dealerXtopic_sum[Dc_v3_dealerXtopic_sum['values']>=THRESHOLD].copy() Dc_v3_dealerXtopic_sum['B/S'] = Dc_v3_dealerXtopic_sum.apply(lambda x: str(x['dealer'])[-2] ,axis=1) Dc_v3_dealerXtopic_sum['dealer'] = Dc_v3_dealerXtopic_sum.apply(lambda x: str(x['dealer']).split(',')[0][1:] ,axis=1) """ # Dc_v3_dealerXtopic_sum = Dc_v3_dealerXtopic_sum[(Dc_v3_dealerXtopic_sum['topicID']=='0') | (Dc_v3_dealerXtopic_sum['topicID']=='30')| (Dc_v3_dealerXtopic_sum['topicID']=='15') | (Dc_v3_dealerXtopic_sum['topicID']=='46')| (Dc_v3_dealerXtopic_sum['topicID']=='48') | (Dc_v3_dealerXtopic_sum['topicID']=='35')| (Dc_v3_dealerXtopic_sum['topicID']=='41') | (Dc_v3_dealerXtopic_sum['topicID']=='43')].copy() """ """ # Topics on top Dc_v3_dealerXtopic_sum = Dc_v3_dealerXtopic_sum[(Dc_v3_dealerXtopic_sum['topicID']=='36') | (Dc_v3_dealerXtopic_sum['topicID']=='21')| (Dc_v3_dealerXtopic_sum['topicID']=='13') | (Dc_v3_dealerXtopic_sum['topicID']=='28')| (Dc_v3_dealerXtopic_sum['topicID']=='12') | (Dc_v3_dealerXtopic_sum['topicID']=='39')| (Dc_v3_dealerXtopic_sum['topicID']=='1') | (Dc_v3_dealerXtopic_sum['topicID']=='18')].copy() """ """ # Topics in middle (wide dealers) Dc_v3_dealerXtopic_sum = Dc_v3_dealerXtopic_sum[(Dc_v3_dealerXtopic_sum['topicID']=='14') | (Dc_v3_dealerXtopic_sum['topicID']=='15')| (Dc_v3_dealerXtopic_sum['topicID']=='47') | (Dc_v3_dealerXtopic_sum['topicID']=='26')| (Dc_v3_dealerXtopic_sum['topicID']=='8') | (Dc_v3_dealerXtopic_sum['topicID']=='38')| (Dc_v3_dealerXtopic_sum['topicID']=='34') | (Dc_v3_dealerXtopic_sum['topicID']=='46')| (Dc_v3_dealerXtopic_sum['topicID']=='49') | (Dc_v3_dealerXtopic_sum['topicID']=='22')].copy() """ """ # Buy topics Dc_v3_dealerXtopic_sum = Dc_v3_dealerXtopic_sum[(Dc_v3_dealerXtopic_sum['topicID']=='1') | (Dc_v3_dealerXtopic_sum['topicID']=='16')| (Dc_v3_dealerXtopic_sum['topicID']=='33') | (Dc_v3_dealerXtopic_sum['topicID']=='17')| (Dc_v3_dealerXtopic_sum['topicID']=='7') | (Dc_v3_dealerXtopic_sum['topicID']=='22')| (Dc_v3_dealerXtopic_sum['topicID']=='27') | (Dc_v3_dealerXtopic_sum['topicID']=='2')| (Dc_v3_dealerXtopic_sum['topicID']=='9')].copy() """ """ # Sell topics Dc_v3_dealerXtopic_sum = Dc_v3_dealerXtopic_sum[(Dc_v3_dealerXtopic_sum['topicID']=='0') | (Dc_v3_dealerXtopic_sum['topicID']=='45')| (Dc_v3_dealerXtopic_sum['topicID']=='15') | (Dc_v3_dealerXtopic_sum['topicID']=='46')| (Dc_v3_dealerXtopic_sum['topicID']=='35') | (Dc_v3_dealerXtopic_sum['topicID']=='41')].copy() """ Dc_v3_dealerXtopic_sum = Dc_v3_dealerXtopic_sum.sort_values(by=['topicID','dealer']) Dc_v3_dealerXtopic_sum.head() # + from sklearn import preprocessing dealer_le = preprocessing.LabelEncoder() dealer_le.fit(Dc_v3_dealerXtopic_sum['dealer']) dealer_transform = dealer_le.transform(Dc_v3_dealerXtopic_sum['dealer']) dealer_inverse_transform = dealer_le.inverse_transform(dealer_transform) topic_le = preprocessing.LabelEncoder() topic_le.fit(Dc_v3_dealerXtopic_sum['topicID']) topic_transform = topic_le.transform(Dc_v3_dealerXtopic_sum['topicID']) topic_inverse_transform = topic_le.inverse_transform(topic_transform) # - Dc_v3_dealerXtopic_sum['topicID'].nunique() # + active="" # Dc_v3_dealerXtopic_sum['dealer_encoding'] = dealer_transform # Dc_v3_dealerXtopic_sum['dealer_label_position'] = Dc_v3_dealerXtopic_sum['dealer_encoding'] +len(topicID_list) +1 # Dc_v3_dealerXtopic_sum['topic_encoding'] = topic_transform # - # Adjust dealer_label_position Dc_v3_dealerXtopic_sum['dealer_encoding'] = dealer_transform topicID_size = Dc_v3_dealerXtopic_sum['topicID'].nunique() Dc_v3_dealerXtopic_sum['dealer_label_position'] = Dc_v3_dealerXtopic_sum.apply(lambda x: x['dealer_encoding'] + topicID_size, axis=1) Dc_v3_dealerXtopic_sum['topic_encoding'] = topic_transform Dc_v3_dealerXtopic_sum['topic_position'] = topic_transform label = Dc_v3_dealerXtopic_sum['topicID'].values.tolist().append(Dc_v3_dealerXtopic_sum['dealer'].values.tolist()) # Create labels topic_label = list(Dc_v3_dealerXtopic_sum['topicID'].unique()) dealer_label = list(Dc_v3_dealerXtopic_sum.sort_values(by=['dealer_encoding'])['dealer'].unique()) label = [] label.extend(topic_label) label.extend(dealer_label) # Create Label's Colors label_color = len(topic_label)*['darkred',] label_color.extend(len(dealer_label)*['black',]) # Create Link Colors based on B/S b_count = Dc_v3_dealerXtopic_sum.groupby(['B/S']).count()['dealer'][0] s_count = Dc_v3_dealerXtopic_sum.groupby(['B/S']).count()['dealer'][1] link_color = b_count*['deepskyblue',] link_color.extend(s_count*['orange',]) # Sort by B/S to match link color arrays Dc_v3_dealerXtopic_sum.sort_values(by=['B/S'],inplace=True) # + active="" # # Reverse order of dealer_label_position and topic_encoding if B/S is B # def reverse_BS(df): # if df['B/S'] == 'B': # return [df['topic_position'],df['dealer_label_position']] # else: # return [df['dealer_label_position'],df['topic_position']] # # Dc_v3_dealerXtopic_sum[['dealer_label_position','topic_position']] = Dc_v3_dealerXtopic_sum.apply(reverse_BS,axis=1,result_type='expand') # - Dc_v3_dealerXtopic_sum.head() Dc_v3_dealerXtopic_sum.groupby(['topicID','B/S'])['dealer'].count() # + #Dc_v3_dealerXtopic_sum.to_csv(str("Dc_v3_50topics_THRESHOLD={}").format(THRESHOLD)) # + data = dict( type='sankey', orientation = "v", valueformat = ".4f", node = dict( pad = 100, thickness = 30, line = dict( color = "black", width = 0.5 ), label = label, color = label_color ), link = dict( source = Dc_v3_dealerXtopic_sum['dealer_label_position'], target = Dc_v3_dealerXtopic_sum['topic_position'], value = Dc_v3_dealerXtopic_sum['values'], #label = inverse_transform color = link_color )) title = str("Dc_v2_50topics_THRESHOLD={}").format(THRESHOLD) layout = dict( title = title, font = dict( size = 20 ), width=1500, height=5000, ) fig = dict(data=[data], layout=layout) #plotly.offline.iplot(fig, validate=False) pio.write_image(fig, "{}.png".format(title)) plotly.offline.plot(fig, filename = "{}.html".format(title), auto_open=False) # - Dc_v3_dealerXtopic_sum['topicID'].values
TopicModeling/count_Dc_v3_50topics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys s = input().strip() t = input().strip() k = int(input().strip()) lead = 0 for i in range(min(len(s),len(t))): if s[i] != t[i]: lead = i break else: lead = i + 1 d = len(s) - lead + len(t) - lead if k >= len(s) + len(t): print("Yes") elif d <= k and (d % 2) == (k % 2): print("Yes") else: print("No")
Algorithm/36. append and delete.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:python3] # language: python # name: conda-env-python3-py # --- # + [markdown] id="OPxkBOetCFr_" # # Microsoft-Vision-ResNet50 based Hybrid Model using Linear Classifier # + id="6FOhnnunCFsB" #conda install pytorch torchvision cudatoolkit=10.1 -c pytorch #pip install progressbar2 # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1537, "status": "ok", "timestamp": 1622459609197, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjaCV5m2H_od77fWW_j8T9E5_Nd_t82pjGdydJF=s64", "userId": "14896880010903859463"}, "user_tz": 420} id="H3GmH1aSyVH7" outputId="51f77a58-c78a-49b3-a3c2-4e3eb4c6187c" # assignment folder, e.g. 'cs231n/assignments/assignment1/' FOLDERNAME = 'home/ubuntu/Vision-Classifiers/Microsoft-Vision-Classifier/' assert FOLDERNAME is not None, "[!] Enter the foldername." import sys sys.path.append('/home/ubuntu/Vision-Classifiers/Microsoft-Vision-Classifier') # %cd /$FOLDERNAME/datasets/ # !bash get_datasets.sh # %cd /$FOLDERNAME # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 117, "status": "ok", "timestamp": 1622459612212, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjaCV5m2H_od77fWW_j8T9E5_Nd_t82pjGdydJF=s64", "userId": "14896880010903859463"}, "user_tz": 420} id="CDy8Ue0-yVIO" outputId="c2ca3e66-086d-4dd0-f193-80f6e3fc8516" tags=["pdf-ignore"] import numpy as np import matplotlib.pyplot as plt from data_utils import get_CIFAR10_data # %load_ext autoreload # %autoreload 2 def calculate_error(x, y): return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # + id="8xq1JdtICFsA" import torch import torchvision from torch.utils.data import DataLoader from torchvision.datasets import CIFAR10 import torchvision.transforms as transforms import progressbar from progressbar import progressbar from sklearn.neighbors import KNeighborsClassifier import numpy as np from sklearn.linear_model import LogisticRegression # - import microsoftvision class Wrangling: def __init__(self): self.wrangling = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229])]) def __call__(self, x): return self.wrangling(x)[[2,1,0],:,:] # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 6734, "status": "ok", "timestamp": 1622459628416, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjaCV5m2H_od77fWW_j8T9E5_Nd_t82pjGdydJF=s64", "userId": "14896880010903859463"}, "user_tz": 420} id="ldvJzyFdyVIQ" outputId="b2bfa803-7982-45db-8e21-29a663a38d40" tags=["pdf-ignore"] data = get_CIFAR10_data() for k, v in list(data.items()): print(f"{k}: {v.shape}") # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3790, "status": "ok", "timestamp": 1622459633241, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjaCV5m2H_od77fWW_j8T9E5_Nd_t82pjGdydJF=s64", "userId": "14896880010903859463"}, "user_tz": 420} id="LBIz9lbbCFsC" outputId="1cd889a1-2420-4f29-b5cd-d40cefa0daf9" train = CIFAR10('./path', download=True, train=True, transform=Wrangling()) test = CIFAR10('./path', download=True, train=False, transform=Wrangling()) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1902, "status": "ok", "timestamp": 1622459637704, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjaCV5m2H_od77fWW_j8T9E5_Nd_t82pjGdydJF=s64", "userId": "14896880010903859463"}, "user_tz": 420} id="QXqOBk-wCFsE" outputId="23954262-d01d-4e25-bb18-60b44425ff0b" model = microsoftvision.models.resnet50(pretrained=True) # + id="jGhy43gpCFsF" model.eval() model.cuda() # + id="Wy9C-gEXCFsG" def extractor(dataset, model): all_features = [] all_labels = [] import time import logging import progressbar torch.cuda.empty_cache() with torch.no_grad(): for point, labels in progressbar.progressbar(DataLoader(dataset, batch_size=128, num_workers=8)): point = point.cuda() labels = labels.cuda() features = model(point) all_features.append(features) all_labels.append(labels) return torch.cat(all_features).cpu().numpy(), torch.cat(all_labels).cpu().numpy() # + colab={"base_uri": "https://localhost:8080/", "height": 180} executionInfo={"elapsed": 131, "status": "error", "timestamp": 1622484091466, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjaCV5m2H_od77fWW_j8T9E5_Nd_t82pjGdydJF=s64", "userId": "14896880010903859463"}, "user_tz": 420} id="gZKiELh2CFsI" outputId="f639e076-e90e-431a-9453-8788ab3e6933" train_features, train_labels = extractor(train, model) test_features, test_labels = extractor(test, model) # + id="ReNnjgUkCFsK" classifier = LogisticRegression(random_state=0, max_iter=1000, verbose=1, n_jobs=16) # + id="Jj9aVXAJCFsK" classifier.fit(train_features, train_labels) predictions = classifier.predict(test_features) result = 100 * np.mean((test_labels == predictions).astype(np.float)) print(f"Hybrid Model Performance Accuracy: {result}") # + id="Qir_p-tiCFsL"
Vision_Linear_Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Use pyquery-ql.py # # Send a graphql query to GitHub # and pretty print output. # # Supports Python 3.6+ # + import json import os import pprint import requests # - # get api token and set authorization api_token = os.environ['GITHUB_API_TOKEN'] headers = {'Authorization': f'token {api_token}'} # set url to a graphql endpoint url = 'https://api.github.com/graphql' # add a json query query = """ { organization(login: "jupyterhub") { repositories(first: 30) { totalCount edges { node { name url issues(states: OPEN) { totalCount } pullRequests(states: OPEN) { totalCount } } } } } } """ # submit the request r = requests.post(url=url, json={'query': query}, headers=headers) r.text data = json.loads(r.text) import requests import json from itertools import zip_longest import pandas as pd import numpy as np #This is a little helper funciton that will take x of something, and let you chunk through a subset #Note that 'n' is the chunk size def _grouper(n, iterable, fillvalue=None): args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) # + #Since you want a report by month, lets just create a function to get all the chat IDs for a two digit month and 4 digit year def get_ids(data): all_ids = [] response = json.loads(data) for id in response['data']: all_ids.append('id') return all_ids # - all_ids = [] for id in data['data']: print(id) all_ids.append(id) for id in data['data'][all_ids[0]]: print(id) all_ids.append(id) print(all_ids) for id in data['data'][all_ids[0]][all_ids[1]]: print(id) all_ids.append(id) print(all_ids) print(data['data']['edges']) def build_csv_data(queried_id_list): all_chats_for_csv = [] for chunk in _grouper(50, queried_id_list): filtered = [id for id in chunk if id is not None] #Only needed for last set of 50 that will have some sort of remainder filled with 'None' values by the grouper() function url = "{0}/chats".format(config['base_path']) stringified_ids = ",".join(filtered) params = {"ids":stringified_ids} r = s.get(url, params=params) if r.status_code == 200: response = json.loads(r.text) docs = response['docs'] for k in docs: doc_obj = docs[k] #This is the 'doc' object. Note that some keys may not exist for all items. Duration does not exist for offline messages for example all_chats_for_csv.append(doc_obj) else: print("error getting bulk chats") return all_chats_for_csv # + #grab the ids for the specified range NOTE!! This is where you can change the year/month for the date range. queried_id_list = get_chat_ids_for_month(2016, 10) #turn those into raw objects csv_obj_data = build_csv_data(queried_id_list) #create a list to hold our rows all_rows_as_obj = [] #turn the raw objects into nice and tasty ones that pandas can digest for record in csv_obj_data: try: response_time = record.get('response_time', {}) count = record.get('count',{}) csv_obj = { "id": record['id'], "agent": ",".join(record.get('agent_names', "N/A")), "visitor": record['visitor']['name'], "department": record['department_name'], "url": record.get('webpath', "N/A"), "missed": record.get('missed', "N/A"), "resp_first": response_time.get('first', []), "resp_max": response_time.get('max', []), "resp_avg": response_time.get('avg', []), "start timestamp": record['session']['start_date'], "end timestamp": record['session']['end_date'], "total messages": count.get('total', 0), "Agent Msg Count": count.get('agent', 0), "Visitor Msg Count": count.get('visitor', 0), "rating": record.get('rating',[]), "ticket_id": record['zendesk_ticket_id'] } except KeyError as e: print(e) print("key error occured for record with id: {}".format(record['id'])) print(record) pass all_rows_as_obj.append(csv_obj) #with prettier objects our column names can just come from a call to the first object's keys col_keys = all_rows_as_obj[0].keys() #create the dataframe, list comprehension creates an array of rows based on the column names we got #Let pandas do the hard part with csvs csv_frame = pd.DataFrame([[i[j] for j in col_keys] for i in all_rows_as_obj], columns=col_keys) #coerce ticket_id to be int not float csv_frame['ticket_id'] = csv_frame['ticket_id'].fillna(0).astype(np.int64) #fill blank values with the string 'N/A' csv_frame = csv_frame.fillna("N/A") #output to a file csv_frame.to_csv('october_stats.csv') # - # Walk the JSON response contents issue_data = data['data'] org = issue_data['organization'] repos = org['repositories'] edges = repos['edges'] # + # edges[0]['node'] # + # print(edges[0]['node']['name'] + '---' + edges[0]['node']['url']) # + # for edge in edges: # pprint.pprint(edge) # - # Bring into pandas import pandas as pd df = pd.DataFrame.from_records(edges) df.columns # + # df['node'] # - # Generate basic report of total open issues # + print(f"{'Repo':30} {'OpenIssues':11} {'OpenPRs':9} {'URL'}") line = 26 while line > 0: lineout = f"{df['node'][line]['name']:30} {(df['node'][line]['issues']['totalCount']):8} {(df['node'][line]['pullRequests']['totalCount']):8} {df['node'][line]['url']}" print(lineout) line -= 1 # - df.dtypes df.head() df.index df.columns df.values df.sort_index # + # output data to a csv # df.to_csv('issue_data.csv')
notebooks/use-pyquery-ql-Copy1.ipynb
# ##### Copyright 2020 The OR-Tools Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # rabbits_and_pheasants_sat # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/sat/rabbits_and_pheasants_sat.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/ortools/sat/samples/rabbits_and_pheasants_sat.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # Copyright 2010-2018 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rabbits and Pheasants quizz.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from ortools.sat.python import cp_model def RabbitsAndPheasantsSat(): """Solves the rabbits + pheasants problem.""" model = cp_model.CpModel() r = model.NewIntVar(0, 100, 'r') p = model.NewIntVar(0, 100, 'p') # 20 heads. model.Add(r + p == 20) # 56 legs. model.Add(4 * r + 2 * p == 56) # Solves and prints out the solution. solver = cp_model.CpSolver() status = solver.Solve(model) if status == cp_model.OPTIMAL: print('%i rabbits and %i pheasants' % (solver.Value(r), solver.Value(p))) RabbitsAndPheasantsSat()
examples/notebook/sat/rabbits_and_pheasants_sat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Only needed in a Jupyter Notebook # %matplotlib inline # Optional plot styling import matplotlib matplotlib.style.use('bmh') import matplotlib.pyplot as plt from pycalphad import equilibrium from pycalphad import Database, Model import pycalphad.variables as v import numpy as np db = Database('alfe_sei.TDB') my_phases = list(set(db.phases.keys()) - {'BCC_A2'}) eq = equilibrium(db, ['AL', 'FE', 'VA'], my_phases, {v.X('AL'): (0, 1, 0.01), v.T: (300, 2000, 10), v.P: 101325}, output=['heat_capacity', 'degree_of_ordering', 'curie_temperature']) print(eq) from pycalphad import eqplot fig = plt.figure(figsize=(9,6)) eqplot(eq, ax=fig.gca()) tc_indices = np.logical_and(np.abs(eq['curie_temperature'].values - eq['T'].values[..., None]) < 10, np.any(eq.Phase.values == 'B2_BCC', axis=-1)) tc_indices = np.nonzero(np.logical_and(tc_indices, np.sum(eq.Phase.values != '', axis=-1, dtype=np.int) == 1)) bcc_indices = np.logical_and(np.any(eq.Phase.values == 'B2_BCC', axis=-1), np.sum(eq.Phase.values != '', axis=-1, dtype=np.int) == 1) tc_arr = np.array([eq['X'].sel(component='AL', vertex=0).values[tc_indices], np.take(eq['T'].values, tc_indices[1])]) tc_arr = tc_arr[:, tc_arr[0].argsort()] fig.gca().plot(tc_arr[0], tc_arr[1], '--', color='grey', linewidth=2) X, Y = np.meshgrid(eq['X_AL'].values, eq['T'].values) CS = fig.gca().contour(X, Y, np.squeeze(np.ma.array(eq['degree_of_ordering'].sel(vertex=0).values, mask=~bcc_indices)), colors='k') fig.gca().clabel(CS, inline=1, fontsize=10) plt.show()
ContourMap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="GGMnr4rcvyiE" import numpy as np import pandas as pd import itertools # + id="4HJ-r68jvyiG" df=pd.read_csv('news.csv') # + id="PemxMQH_vyiH" outputId="4dc6fc21-df99-4d21-b4f7-9c1f7b045e39" df.shape df.head(10) # + id="DonBTXGivyiJ" outputId="110debde-1f94-4d81-d72e-37c658d6e1e0" labels=df.label labels.head(10) # + id="ZzRtYy6-vyiK" #splitting data into trainig test from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(df['text'], labels, test_size=0.2, random_state=7) # + id="WYppwjOLvyiK" from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vectorizer=TfidfVectorizer(stop_words='english', max_df=0.7) # + id="Tv47ahfmvyiL" tfidf_train=tfidf_vectorizer.fit_transform(x_train) tfidf_test=tfidf_vectorizer.transform(x_test) # + id="kqCOgcPKvyiM" outputId="0df21d8f-e3dd-4547-c2e9-93e18f65aded" #Model =passiveAggresiveClassifier from sklearn.linear_model import PassiveAggressiveClassifier pac=PassiveAggressiveClassifier(max_iter=50) pac.fit(tfidf_train,y_train) # + id="7RELzW0fvyiN" outputId="8fc70756-dfc5-40f9-f7ea-a4763b793119" y_pred=pac.predict(tfidf_test) from sklearn.metrics import accuracy_score, confusion_matrix score=accuracy_score(y_test,y_pred) print(f'Accuracy: {round(score*100,2)}%') # + id="VEIZelP4vyiO" outputId="e3e672eb-c743-4d67-cd5f-513b41059c09" confusion_matrix(y_test,y_pred, labels=['FAKE','REAL']) # + id="W9T5nj9tvyiO" # + id="ZV5aSKGrvyiP"
fakenewdetection (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] slideshow={"slide_type": "slide"} # ### Load tensorflow # - import tensorflow as tf tf.reset_default_graph() tf.set_random_seed(42) # + [markdown] slideshow={"slide_type": "slide"} # ### Collect Data # - (trainX, trainY),(testX, testY) = tf.keras.datasets.mnist.load_data() # + [markdown] slideshow={"slide_type": "slide"} # ### Convert Output label to multiple values # - trainY = tf.keras.utils.to_categorical(trainY, num_classes=10) testY = tf.keras.utils.to_categorical(testY, num_classes=10) # + [markdown] slideshow={"slide_type": "slide"} # ## Build the Graph # + #Initialize Sequential model model = tf.keras.models.Sequential() #Reshape data from 2D to 1D -> 28x28 to 784 model.add(tf.keras.layers.Reshape((784,),input_shape=(28,28,))) #Normalize the data model.add(tf.keras.layers.BatchNormalization()) # + [markdown] slideshow={"slide_type": "slide"} # ## Build the Graph... # + slideshow={"slide_type": "fragment"} #Add 1st hidden layer model.add(tf.keras.layers.Dense(200, activation='sigmoid')) # + slideshow={"slide_type": "fragment"} #Add 2nd hidden layer model.add(tf.keras.layers.Dense(100, activation='sigmoid')) # + slideshow={"slide_type": "fragment"} #Add 3rd hidden layer model.add(tf.keras.layers.Dense(60, activation='sigmoid')) # + slideshow={"slide_type": "fragment"} #Add 4th hidden layer model.add(tf.keras.layers.Dense(30, activation='sigmoid')) # + slideshow={"slide_type": "fragment"} #Add OUTPUT layer model.add(tf.keras.layers.Dense(10, activation='softmax')) # + [markdown] slideshow={"slide_type": "slide"} # ## Build the Graph... # + slideshow={"slide_type": "fragment"} #Create optimizer with non-default learning rate sgd_optimizer = tf.keras.optimizers.SGD(lr=0.03) #Compile the model model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy', metrics=['accuracy']) # + [markdown] slideshow={"slide_type": "slide"} # ## Review model # - model.summary() # + [markdown] slideshow={"slide_type": "slide"} # ## Train the model # - model.fit(trainX,trainY, validation_data=(testX,testY), epochs=30, batch_size=32)
CourseContent/11-Introduction.to.Neural.Network.and.Deep.Learning/Week1/6. Classification_MNIST_DNN_Keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline train = pd.read_csv("../data/input/train.csv") test = pd.read_csv("../data/input/test.csv") train.head() train.isnull().sum() test.isnull().sum()
notebooks/eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="9aUTR9hD9QuN" # # 1. Apresentação: # + [markdown] id="tPluP35E8VQo" # O desafio deste módulo envolve ajudar a criar um programa em Python que soma três números. Uma pessoa fez parte do código, mas ele precisa ser finalizado. Existem partes do código faltando e correções precisam ser realizadas, pois o código contém 3 erros. **Você deve alterar o código neste notebook.** # # # # # # # + [markdown] id="t-BkgXHeYW3x" # # 2. Dicas # + [markdown] id="m0J6ZTlWQt9p" # * Antes de ir pro código, leia a descrição do desafio e busque definir pequenos passos para chegar a solução. # * Leia o código para se familiarizar com a solução que está em desenvolvimento; # # * Tente entender o que o código faz ou significa. # # * Busque encontrar as partes que faltam ser colocadas e erros no código que precisam ser corrigidos; # # * Se aparecer erros, procure corrigir e/ou repita os passos acima. # # + [markdown] id="KxyVKC7wYbv-" # # 3. Descrição do desafio # + [markdown] id="78VQdbBeXgVN" # Faça um programa que Calcule a **soma** de **três números inteiros**. O programa deve exibir na tela uma mensagem como a indicada abaixo de exemplo: # # OBS: os valores 15, 10, 30 representam números de exemplo. Seu programa deve funcionar para quaisquer outros valores. # # # ``` # Entrada: 15, 10, 30 # Processamento: 15 + 10 + 30 # Saída: 55 # ``` # # # # # # + [markdown] id="OGYH7v5VTc4A" # # 4. Desafio # # Complete e ajuste o código abaixo para solucionar o desafio. # + id="bJh04OEnPb_2" num1 = int(input('Digite o 1º número: ')) num2 = int(input('Digite o 2º número: ')) num3 = int(input('Digite o 3º número: ')) soma = num1 + num2 + num3 print(f'Entrada: {num1}, {num2}, {num3}') print(f'processamento: {num1} + {num2} + {num3}') print(f'Saída: {soma}' ) # -
M0_desafio.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:mpmp] # language: python # name: conda-env-mpmp-py # --- # ## Plot gene set comparison # In this notebook, we want to compare prediction of mutations in the genes from our cancer gene set, derived from [Vogelstein et al. 2013](https://science.sciencemag.org/content/339/6127/1546), with two other sets of potentially relevant genes. These are: # # * The most frequently mutated genes in TCGA # * A set of random genes in TCGA, that meet our mutation count threshold for 2 or more cancer types # # We selected enough genes in each of these gene sets to match the count of the Vogelstein et al. gene set. # # In these experiments we only used gene expression data, and we used the set of TCGA samples that have both gene expression and MC3 somatic mutation data. The files analyzed in this notebook were generated by the `run_mutation_prediction.py` script. # # Notebook parameters: # * SIG_ALPHA (float): significance cutoff (after FDR correction) # * PLOT_AUROC (bool): if True plot AUROC, else plot AUPR # + from pathlib import Path import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from adjustText import adjust_text import mpmp.config as cfg import mpmp.utilities.analysis_utilities as au import mpmp.utilities.plot_utilities as plu # %load_ext autoreload # %autoreload 2 # + # set results directory vogelstein_results_dir = Path(cfg.results_dirs['mutation'], 'shuffle_cancer_type', 'expression_vogelstein', 'gene').resolve() top_50_results_dir = Path(cfg.results_dirs['mutation'], 'shuffle_cancer_type', 'expression_top', 'gene').resolve() random_50_results_dir = Path(cfg.results_dirs['mutation'], 'shuffle_cancer_type', 'expression_random', 'gene').resolve() # set significance cutoff after FDR correction SIG_ALPHA = 0.001 # if True, save figures to ./images directory SAVE_FIGS = True # if True, plot AUROC instead of AUPR PLOT_AUROC = False if PLOT_AUROC: plot_metric = 'auroc' images_dir = Path(cfg.images_dirs['mutation'], 'auroc') else: plot_metric = 'aupr' images_dir = Path(cfg.images_dirs['mutation']) # + # load raw data vogelstein_df = au.load_stratified_prediction_results(vogelstein_results_dir, 'gene') vogelstein_df = vogelstein_df[vogelstein_df.training_data.isin(['expression'])] vogelstein_df['gene_set'] = 'vogelstein' # make sure that we're correctly pointing to raw data for non-methylation data types # and that we have data for two replicates (two random seeds) print(vogelstein_df.shape) print(vogelstein_df.seed.unique()) print(vogelstein_df.training_data.unique()) vogelstein_df.head() # + # load raw data top_50_df = au.load_stratified_prediction_results(top_50_results_dir, 'gene') top_50_df = top_50_df[top_50_df.training_data.isin(['expression'])] top_50_df['gene_set'] = 'top_50' # make sure that we're correctly pointing to raw data for non-methylation data types # and that we have data for two replicates (two random seeds) print(top_50_df.shape) print(top_50_df.seed.unique()) print(top_50_df.training_data.unique()) top_50_df.head() # + # load raw data random_50_df = au.load_stratified_prediction_results(random_50_results_dir, 'gene') random_50_df = random_50_df[random_50_df.training_data.isin(['expression'])] random_50_df['gene_set'] = 'random_50' # make sure that we're correctly pointing to raw data for non-methylation data types # and that we have data for two replicates (two random seeds) print(random_50_df.shape) print(random_50_df.seed.unique()) print(random_50_df.training_data.unique()) random_50_df.head() # + # combine results dataframes results_df = ( pd.concat((vogelstein_df, top_50_df, random_50_df)) .drop(columns=['training_data', 'experiment']) .rename(columns={'gene_set': 'training_data'}) ) all_results_df = au.compare_all_data_types(results_df, SIG_ALPHA, filter_genes=False, metric=plot_metric) all_results_df['nlog10_p'] = -np.log10(all_results_df.corr_pval) all_results_df.sort_values(by='p_value').head(10) # + sns.set({'figure.figsize': (24, 6)}) sns.set_style('whitegrid') fig, axarr = plt.subplots(1, 3) gene_set_map = { 'random_50': 'random', 'top_50': 'most mutated', 'vogelstein': 'Vogelstein et al.' } all_results_df.training_data.replace(to_replace=gene_set_map, inplace=True) plu.plot_volcano_baseline(all_results_df, axarr, gene_set_map, SIG_ALPHA, metric=plot_metric, verbose=True, mark_overlap=True) if SAVE_FIGS: images_dir.mkdir(exist_ok=True) plt.savefig(images_dir / 'expression_vs_shuffled.svg', bbox_inches='tight') plt.savefig(images_dir / 'expression_vs_shuffled.png', dpi=300, bbox_inches='tight') # + sns.set({'figure.figsize': (7, 6)}) sns.set_style('whitegrid') fig, axarr = plt.subplots(1, 1) gene_set_map = { 'vogelstein': 'Vogelstein et al.' } all_results_df.training_data.replace(to_replace=gene_set_map, inplace=True) plu.plot_volcano_baseline(all_results_df, axarr, gene_set_map, SIG_ALPHA, metric=plot_metric, verbose=True) if SAVE_FIGS: images_dir.mkdir(exist_ok=True) plt.savefig(images_dir / 'expression_vogelstein.svg', bbox_inches='tight') plt.savefig(images_dir / 'expression_vogelstein.png', dpi=300, bbox_inches='tight') # + sns.set({'figure.figsize': (10, 5)}) sns.set_style('whitegrid') # we want these colors to be different than the expression/methylation ones sns.set_palette('Set2') fig, axarr = plt.subplots(1, 1) gene_set_map = { 'random_50': 'random', 'top_50': 'most mutated', 'vogelstein': 'Vogelstein et al.' } all_results_df = (all_results_df .rename(columns={'training_data': 'gene_set'}) .sort_values(by='gene_set', ascending=False) ) # plot mean performance over all genes in Vogelstein dataset ax = axarr sns.boxplot(data=all_results_df, x='gene_set', y='delta_mean', ax=ax) ax.set_title('Performance distribution for all genes in each gene set, gene expression only', size=14) ax.set_xlabel('Target gene set', size=13) ax.set_ylabel('{}(signal) - {}(shuffled)'.format( plot_metric.upper(), plot_metric.upper()), size=13) ax.set_ylim(-0.2, max(all_results_df.delta_mean + 0.05)) for tick in ax.get_xticklabels(): tick.set_fontsize(13) plt.tight_layout() tests_df = plu.add_annotation(ax, all_results_df.rename( columns={'gene_set': 'training_data'} ), all_pairs=(gene_set_map.values()), metric=plot_metric, box_pairs=[('random', 'most mutated'), ('most mutated', 'Vogelstein et al.'), ('random', 'Vogelstein et al.')]) if SAVE_FIGS: plt.savefig(images_dir / 'expression_boxes.svg', bbox_inches='tight') plt.savefig(images_dir / 'expression_boxes.png', dpi=300, bbox_inches='tight') # - # pairwise rank sum tests comparing results distributions # H0: results distributions are the same between the data types tests_df.sort_values('p_value') # + sns.set({'figure.figsize': (18, 6)}) sns.set_style('whitegrid') # we want these colors to be different than the expression/methylation ones sns.set_palette('Set2') fig, axarr = plt.subplots(1, 2) all_results_df.replace({'gene_set': gene_set_map}, inplace=True) # plot mean performance over all genes in Vogelstein dataset ax = axarr[0] sns.stripplot(data=all_results_df, x='gene_set', y='delta_mean', ax=ax) ax.set_title('Prediction for all genes, performance vs. gene set') ax.set_xlabel('Target gene set') ax.set_ylabel('{}(signal) - {}(shuffled)'.format( plot_metric.upper(), plot_metric.upper())) ax.set_ylim(-0.2, max(all_results_df.delta_mean + 0.05)) # plot mean performance for genes that are significant for at least one data type ax = axarr[1] gene_list = all_results_df[all_results_df.reject_null == True].gene.unique() print(gene_list.shape) print(gene_list) sns.stripplot(data=all_results_df[all_results_df.gene.isin(gene_list)], x='gene_set', y='delta_mean', ax=ax) ax.set_title('Prediction for significant genes, performance vs. gene set') ax.set_xlabel('Target gene set') ax.set_ylabel('{}(signal) - {}(shuffled)'.format( plot_metric.upper(), plot_metric.upper())) ax.set_ylim(-0.2, max(all_results_df.delta_mean + 0.05)) # - # ### Calculate gene set overlap # # Of the significantly predictable genes in the top/random gene sets, how many of them are in the Vogelstein gene set? # + from venn import venn # first look at overlap of all genes genes_in_gene_set = {} for gene_set in all_results_df.gene_set.unique(): gene_list = all_results_df[all_results_df.gene_set == gene_set].gene.unique() print(gene_set, len(gene_list)) genes_in_gene_set[gene_set] = set(gene_list) venn(genes_in_gene_set) plt.title('Gene overlap between all genes in gene set', size=14) # + # now look at overlap of significant genes genes_in_gene_set = {} for gene_set in all_results_df.gene_set.unique(): gene_list = all_results_df[(all_results_df.gene_set == gene_set) & (all_results_df.reject_null)].gene.unique() print(gene_set, len(gene_list)) genes_in_gene_set[gene_set] = set(gene_list) venn(genes_in_gene_set) plt.title('Gene overlap between significantly predictable genes in gene set', size=14)
02_classify_mutations/plot_expression_gene_sets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial "Algorithmic Methods for Network Analysis with NetworKit" (Part 3) # # ## Determining Important Nodes # There are a number of ways to measure the importance of nodes in a network. Possibly the easiest is the degree, i.e. the number of neighbors. In a social network, for example, a person who knows many others could be an important person. However, is this notion really meaningful? Probably not, since it does not consider the importance of the neighbors. Also, there is an interesting effect in social networks with respect to neighborhood sizes. Let us investigate this effect a little bit: # # ### Q&A Session #4 # 1. Do you think your number of online friends is above/below/on average? (You do not have to answer this question openly.) # **Answer (may be secret):** # # 2. What do you expect: How many people (in percent) in a social network have fewer friends than their friends on average? # **Answer (choose one):** # a) 0 - 25% # b) 26 - 50% # c) 51 - 75% # d) 76 - 100% # # 3. Use the Facebook graph. Compute for each vertex the average degree of its neighbors. # **Answer:** # # 4. Count the number of persons whose friends have on average more friends. What is their percentage in this network? # **Answer:** # from networkit import * # %matplotlib inline # cd ~/workspace/NetworKit G = readGraph("input/MIT8.edgelist", Format.EdgeListTabZero) # + # Code for 3-3) and 3-4) # %matplotlib inline import matplotlib.pyplot as plt # def avgFriendDegree(v): # - count = 0 # count the number of persons whose friends have on average more friends # Thus, **... % of the users in this network have fewer friends than their friends have on average**. While this result cannot be generalized exactly like this to other networks, the qualitative effect is often seen in social (and other) networks. Thus, let us now consider measures that broaden the rather narrow scope of the degree. # # ### $k$-core Decomposition # Thus, the next concept we consider is the $k$-core decomposition. To answer the following Q&A session, go back to the lecture slides. # # ### Q&A Session #5 # # 1. What is the definition of an $i$-core? (Note that $k$ is often used for the largest core only!) # **Answer:** # # 2. Why do you think it can be considered a more robust measure for importance compared to the degree? # **Answer:** # # 3. Compute the $k$-core decomposition of the three networks used before. Then print the non-empty $i$-shells by using the method coreNumbers(). What results (similarities/differences) do you expect? Are these expectations met by the results? # **Answer:** # # 4. What disadvantage do you see when using $k$-core decomposition to rate nodes according to their importance? # **Answer:** # + # Code for 5-3) mit8 = readGraph("input/MIT8.edgelist", Format.EdgeListTabZero) airf1 = readGraph("input/airfoil1.graph", Format.METIS) gen = generators.ErdosRenyiGenerator(1000, 0.01) er1000 = gen.generate() # for g in {mit8, airf1, er1000}: # - # ### Centrality Measures # The $k$-core decomposition is rather, as the name suggests, a decomposition of the vertices into discrete subsets. Nodes with the same coreness (i.e. in the same shell) have equal importance. Rankings where many vertices are equally important are often not very meaningful. That is why the $k$-core decomposition should *not* be interpreted as a *fine-grained* ranking mechanism. # # ### Q&A Session #6 # # 1. Take the Facebook graph MIT8 and find the most central nodes. Take the relevance of their neighbors into account. Consider that MIT8 models a social network, not a web graph. Which algorithm would you choose? (Hint: Look at the lecture slides!) # **Answer:** # # 2. What are the 15 most important nodes according to the method in 1)? # **Answer:** # # 3. What other centrality measures do you recall? # **Answer:** # # # After you answered the questions, proceed with Tutorial #4. # # Code for 6-1) and 6-2)
Doc/Notebooks/NetworKit_Tutorial_Part_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="23R0Z9RojXYW" # # Ungraded Lab: Iterative Schema with TFX and ML Metadata # # # In this notebook, you will get to review how to update an inferred schema and save the result to the metadata store used by TFX. As mentioned before, the TFX components get information from this database before running executions. Thus, if you will be curating a schema, you will need to save this as an artifact in the metadata store. You will get to see how that is done in the following exercise. # # Afterwards, you will also practice accessing the TFX metadata store and see how you can track the lineage of an artifact. # + [markdown] id="2GivNBNYjb3b" # ## Setup # + [markdown] id="N-ePgV0Lj68Q" # ### Imports # + id="YIqpWK9efviJ" import tensorflow as tf import tensorflow_data_validation as tfdv from tfx.components import CsvExampleGen from tfx.components import ExampleValidator from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import ImporterNode from tfx.types import standard_artifacts from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext from google.protobuf.json_format import MessageToDict from tensorflow_metadata.proto.v0 import schema_pb2 import os import pprint pp = pprint.PrettyPrinter() # - # ### Define paths # # For familiarity, you will again be using the [Census Income dataset](https://archive.ics.uci.edu/ml/datasets/Adult) from the previous weeks' ungraded labs. You will use the same paths to your raw data and pipeline files as shown below. # + # location of the pipeline metadata store _pipeline_root = './pipeline/' # directory of the raw data files _data_root = './data/census_data' # path to the raw training data _data_filepath = os.path.join(_data_root, 'adult.data') # - # ## Data Pipeline # # Each TFX component you use accepts and generates artifacts which are instances of the different artifact types TFX has configured in the metadata store. The properties of these instances are shown neatly in a table in the outputs of `context.run()`. TFX does all of these for you so you only need to inspect the output of each component to know which property of the artifact you can pass on to the next component (e.g. the `outputs['examples']` of `ExampleGen` can be passed to `StatisticsGen`). # # Since you've already used this dataset before, we will just quickly go over `ExampleGen`, `StatisticsGen`, and `SchemaGen`. The new concepts will be discussed after the said components. # + [markdown] id="8ONIE_hdkPS4" # ### Create the Interactive Context # + id="0Rh6K5sUf9dd" # Initialize the InteractiveContext. # If you leave `_pipeline_root` blank, then the db will be created in a temporary directory. context = InteractiveContext(pipeline_root=_pipeline_root) # + [markdown] id="L9fwt9gQk3BR" # ### ExampleGen # + id="PyXjuMt8f-9u" # Instantiate ExampleGen with the input CSV dataset example_gen = CsvExampleGen(input_base=_data_root) # Execute the component context.run(example_gen) # + [markdown] id="csM6BFhtk5Aa" # ### StatisticsGen # + id="MAscCCYWgA-9" # Instantiate StatisticsGen with the ExampleGen ingested dataset statistics_gen = StatisticsGen( examples=example_gen.outputs['examples']) # Execute the component context.run(statistics_gen) # + [markdown] id="HLKLTO9Nk60p" # ### SchemaGen # + id="ygQvZ6hsiQ_J" # Instantiate SchemaGen with the StatisticsGen ingested dataset schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], ) # Run the component context.run(schema_gen) # + id="Ec9vqDXpXeMb" # Visualize the schema context.show(schema_gen.outputs['schema']) # - # ### Curating the Schema # Now that you have the inferred schema, you can proceed to revising it to be more robust. For instance, you can restrict the age as you did in Week 1. First, you have to load the `Schema` protocol buffer from the metadata store. You can do this by getting the schema uri from the output of `SchemaGen` then use TFDV's `load_schema_text()` method. # + # Get the schema uri schema_uri = schema_gen.outputs['schema']._artifacts[0].uri # Get the schema pbtxt file from the SchemaGen output schema = tfdv.load_schema_text(os.path.join(schema_uri, 'schema.pbtxt')) # - # With that, you can now make changes to the schema as before. For the purpose of this exercise, you will only modify the age domain but feel free to add more if you want. # + # Restrict the range of the `age` feature tfdv.set_domain(schema, 'age', schema_pb2.IntDomain(name='age', min=17, max=90)) # Display the modified schema. Notice the `Domain` column of `age`. tfdv.display_schema(schema) # - # ### Schema Environments # # By default, your schema will watch for all the features declared above including the label. However, when the model is served for inference, it will get datasets that will not have the label because that is the feature that the model will be trying to predict. You need to configure the pipeline to not raise an alarm when this kind of dataset is received. # # You can do that with [schema environments](https://www.tensorflow.org/tfx/tutorials/data_validation/tfdv_basic#schema_environments). First, you will need to declare training and serving environments, then configure the serving schema to not watch for the presence of labels. See how it is implemented below. # + # Create schema environments for training and serving schema.default_environment.append('TRAINING') schema.default_environment.append('SERVING') # Omit label from the serving environment tfdv.get_feature(schema, 'label').not_in_environment.append('SERVING') # - # You can now freeze the curated schema and save to a local directory. # + # Declare the path to the updated schema directory _updated_schema_dir = f'{_pipeline_root}/updated_schema' # Create the said directory # !mkdir -p {_updated_schema_dir} # Declare the path to the schema file schema_file = os.path.join(_updated_schema_dir, 'schema.pbtxt') # Save the curated schema to the said file tfdv.write_schema_text(schema, schema_file) # - # ### ImporterNode # # Now that the schema has been saved, you need to create an artifact in the metadata store that will point to it. TFX provides the [ImporterNode](https://www.tensorflow.org/tfx/guide/statsgen#using_the_statsgen_component_with_a_schema) component used to import external objects to ML Metadata. You will need to pass in the URI of the object and what type of artifact it is. See the syntax below. # + # Use an ImporterNode to put the curated schema to ML Metadata user_schema_importer = ImporterNode( instance_name='import_user_schema', source_uri=_updated_schema_dir, artifact_type=standard_artifacts.Schema ) # Run the component context.run(user_schema_importer, enable_cache=False) # - # If you pass in the component output to `context.show()`, then you should see the schema. # See the result context.show(user_schema_importer.outputs['result']) # + [markdown] id="V1qcUuO9k9f8" # ### ExampleValidator # # You can then use this new artifact as input to the other components of the pipeline. See how it is used as the `schema` argument in `ExampleValidator` below. # + id="XRlRUuGgiXks" # Instantiate ExampleValidator with the StatisticsGen and SchemaGen ingested data example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=user_schema_importer.outputs['result']) # Run the component. context.run(example_validator) # + id="TDyAAozQcrk3" # Visualize the results context.show(example_validator.outputs['anomalies']) # - # ### Practice with ML Metadata # # At this point, you should now take some time exploring the contents of the metadata store saved by your component runs. This will let you practice tracking artifacts and how they are related to each other. This involves looking at artifacts, executions, and events. This skill will let you recover related artifacts even without seeing the code of the training run. All you need is access to the metadata store. # # See how the input artifact IDs to an instance of `ExampleAnomalies` are tracked in the following cells. If you have this notebook, then you will already know that it uses the output of StatisticsGen for this run and also the curated schema you imported. However, if you already have hundreds of training runs and parameter iterations, you may find it hard to track which is which. That's where the metadata store will be useful. Since it records information about a specific pipeline run, you will be able to track the inputs and outputs of a particular artifact. # # You will start by setting the connection config to the metadata store. # + # Import mlmd and utilities import ml_metadata as mlmd from ml_metadata.proto import metadata_store_pb2 # Get the connection config to connect to the context's metadata store connection_config = context.metadata_connection_config # Instantiate a MetadataStore instance with the connection config store = mlmd.MetadataStore(connection_config) # - # Next, let's see what artifact types are available in the metadata store. # + # Get artifact types artifact_types = store.get_artifact_types() # Print the results [artifact_type.name for artifact_type in artifact_types] # - # If you get the artifacts of type `Schema`, you will see that there are two entries. One is the inferred and the other is the one you imported. At the end of this exercise, you can verify that the curated schema is the one used for the `ExampleValidator` run we will be investigating. # + # Get artifact types schema_list = store.get_artifacts_by_type('Schema') [(f'schema uri: {schema.uri}', f'schema id:{schema.id}') for schema in schema_list] # - # Let's get the first instance of `ExampleAnomalies` to get the output of `ExampleValidator`. # + # Get 1st instance of ExampleAnomalies example_anomalies = store.get_artifacts_by_type('ExampleAnomalies')[0] # Print the artifact id print(f'Artifact id: {example_anomalies.id}') # - # You will use the artifact ID to get events related to it. Let's just get the first instance. # + # Get first event related to the ID anomalies_id_event = store.get_events_by_artifact_ids([example_anomalies.id])[0] # Print results print(anomalies_id_event) # - # As expected, the event type will be an `OUTPUT` because this is the output of the `ExampleValidator` component. Since we want to get the inputs, we can track it through the execution id. # + # Get execution ID anomalies_execution_id = anomalies_id_event.execution_id # Get events by the execution ID events_execution = store.get_events_by_execution_ids([anomalies_execution_id]) # Print results print(events_execution) # - # We see the artifacts which are marked as `INPUT` above representing the statistics and schema inputs. We can extract their IDs programmatically like this. You will see that you will get the artifact ID of the curated schema you printed out earlier. # + # Filter INPUT type events inputs_to_exval = [event.artifact_id for event in events_execution if event.type == metadata_store_pb2.Event.INPUT] # Print results print(inputs_to_exval) # - # **Congratulations!** You have now completed this notebook on iterative schemas and saw how it can be used in a TFX pipeline. You were also able to track an artifact's lineage by looking at the artifacts, events, and executions in the metadata store. These will come in handy in this week's assignment!
Course 2. Machine Learning Data Lifecycle in Production/Labs/C2_W3_Lab_2_IterativeSchema.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import cv2 import numpy as np from tqdm import tqdm import time import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import matplotlib.pyplot as plt # %config Completer.use_jedi = False # + REBUILD_DATA = True # to not reprocess everything class DogsVSCats(): IMG_SIZE = 50 CATS = "data/PetImages/Cat" DOGS = "data/PetImages/Dog" TESTING = "data/PetImages/Testing" LABELS = {CATS: 0, DOGS: 1} training_data = [] catcount = 0 dogcount = 0 def make_training_data(self): for label in self.LABELS: for f in tqdm(os.listdir(label)): if "jpg" in f: try: path = os.path.join(label, f) img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (self.IMG_SIZE, self.IMG_SIZE)) self.training_data.append([np.array(img), np.eye(2)[self.LABELS[label]]]) # np.eye creates an identity matrix (of dim 2) if label == self.CATS: self.catcount += 1 elif label == self.DOGS: self.dogcount += 1 except Exception as e: pass np.random.shuffle(self.training_data) np.save("data/PetImages/training_data.npy", self.training_data) print('Cats:',dogsvcats.catcount) print('Dogs:',dogsvcats.dogcount) if REBUILD_DATA: dogsvcats = DogsVSCats() dogsvcats.make_training_data() # - training_data = np.load("data/PetImages/training_data.npy", allow_pickle=True) print(len(training_data)) X = torch.Tensor([i[0] for i in training_data]).view(-1,50,50) X = X / 255.0 # normalize y = torch.Tensor([i[1] for i in training_data]) plt.imshow(X[0], cmap="gray") y[0] # # Model # + class Net(nn.Module): def __init__(self): super().__init__() # just run the init of parent class (nn.Module) self.conv1 = nn.Conv2d(1, 32, 5) # input is 1 image, 32 output channels, 5x5 kernel / window self.conv2 = nn.Conv2d(32, 64, 5) # input is 32, bc the first layer output 32. Then we say the output will be 64 channels, 5x5 conv self.conv3 = nn.Conv2d(64, 128, 5) x = torch.randn(50,50).view(-1,1,50,50) # small routine to get the flattening result self._to_linear = None # todo: pytorch has a flatten method, it might work better self.convs(x) self.fc1 = nn.Linear(self._to_linear, 512) #flattening. self.fc2 = nn.Linear(512, 2) # 512 in, 2 out bc we're doing 2 classes (dog vs cat). def convs(self, x): # max pooling over 2x2 x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2)) x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2)) #print(x[0].shape) if self._to_linear is None: self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2] return x def forward(self, x): x = self.convs(x) x = x.view(-1, self._to_linear) # .view is reshape ... this flattens X before x = F.relu(self.fc1(x)) x = self.fc2(x) # bc this is our output layer. No activation here. return F.softmax(x, dim=1) Net() # + net = Net() optimizer = optim.Adam(net.parameters(), lr=0.001) loss_function = nn.MSELoss() # - X = torch.Tensor([i[0] for i in training_data]).view(-1,50,50) X = X / 255.0 y = torch.Tensor([i[1] for i in training_data]) VAL_PCT = 0.1 # lets reserve 10% of our data for validation val_size = int(len(X)*VAL_PCT) print(val_size) # + train_X = X[:-val_size] train_y = y[:-val_size] test_X = X[-val_size:] test_y = y[-val_size:] print(len(train_X), len(test_X)) # + # %%time BATCH_SIZE = 100 EPOCHS = 1 for epoch in range(EPOCHS): for i in tqdm(range(0, len(train_X), BATCH_SIZE)): # from 0, to the len of x, stepping BATCH_SIZE at a time. #print(f"{i}:{i+BATCH_SIZE}") batch_X = train_X[i:i+BATCH_SIZE].view(-1, 1, 50, 50) batch_y = train_y[i:i+BATCH_SIZE] net.zero_grad() outputs = net(batch_X) loss = loss_function(outputs, batch_y) loss.backward() optimizer.step() # Does the update print(f"Epoch: {epoch}. Loss: {loss}") # + # %%time correct = 0 total = 0 with torch.no_grad(): for i in tqdm(range(len(test_X))): real_class = torch.argmax(test_y[i]) net_out = net(test_X[i].view(-1, 1, 50, 50))[0] # returns a list, predicted_class = torch.argmax(net_out) if predicted_class == real_class: correct += 1 total += 1 print("Accuracy: ", round(correct/total, 3)) # - # # Run on GPU if torch.cuda.is_available(): device = torch.device("cuda") print("Running on the GPU") else: device = torch.device("cpu") print("Running on the CPU") device torch.cuda.device_count() # + net = Net() net.to(device) # + # %%time EPOCHS = 1 def train(net, EPOCHS): optimizer = optim.Adam(net.parameters(), lr=0.001) BATCH_SIZE = 100 for epoch in range(EPOCHS): for i in tqdm(range(0, len(train_X), BATCH_SIZE)): # from 0, to the len of x, stepping BATCH_SIZE at a time. #print(f"{i}:{i+BATCH_SIZE}") batch_X = train_X[i:i+BATCH_SIZE].view(-1, 1, 50, 50) batch_y = train_y[i:i+BATCH_SIZE] batch_X, batch_y = batch_X.to(device), batch_y.to(device) net.zero_grad() optimizer.zero_grad() # zero the gradient buffers outputs = net(batch_X) loss = loss_function(outputs, batch_y) loss.backward() optimizer.step() # Does the update print(f"Epoch: {epoch}. Loss: {loss}") train(net, EPOCHS) # + # %%time test_X.to(device) test_y.to(device) def test(net): correct = 0 total = 0 with torch.no_grad(): for i in tqdm(range(len(test_X))): real_class = torch.argmax(test_y[i]).to(device) net_out = net(test_X[i].view(-1, 1, 50, 50).to(device))[0] # returns a list, predicted_class = torch.argmax(net_out) if predicted_class == real_class: correct += 1 total += 1 print("Accuracy: ", round(correct/total, 3)) test(net) # + # %%time EPOCHS = 3 train(net, EPOCHS) # + # %%time test(net) # - # # Model Analysis def fwd_pass(X, y, net, optimizer, train=False): if train: net.zero_grad() outputs = net(X) matches = [torch.argmax(i) == torch.argmax(j) for i, j in zip(outputs, y)] acc = matches.count(True) / len(matches) loss = loss_function(outputs, y) if train: loss.backward() optimizer.step() return acc, loss # + def test(net, optimizer, size=32): random_start = np.random.randint(len(test_X) - size) X, y = test_X[random_start:random_start + size], test_y[random_start:random_start + size] with torch.no_grad(): val_acc, val_loss = fwd_pass(X.view(-1, 1, 50, 50).to(device), y.to(device), net, optimizer) return val_acc, val_loss val_acc, val_loss = test(net, optimizer) print(val_acc, val_loss) # + MODEL_NAME = f"model-{int(time.time())}" net = Net().to(device) print(MODEL_NAME) def train(net): BATCH_SIZE = 100 EPOCHS = 8 optimizer = optim.Adam(net.parameters(), lr=0.001) loss_function = nn.MSELoss() with open("data/PetImages/model.log", "a") as f: for epoch in range(EPOCHS): for i in tqdm(range(0, len(train_X), BATCH_SIZE)): batch_X = train_X[i : i + BATCH_SIZE].view(-1, 1, 50, 50) batch_y = train_y[i : i + BATCH_SIZE] batch_X, batch_y = batch_X.to(device), batch_y.to(device) acc, loss = fwd_pass(batch_X, batch_y, net, optimizer, train=True) if i % 50 == 0: val_acc, val_loss = test(net, optimizer, size=100) f.write(f"{MODEL_NAME},{round(time.time(),3)},{round(float(acc),2)},{round(float(loss), 4)},{round(float(val_acc),2)},{round(float(val_loss),4)}\n") train(net) # + def create_acc_loss_graph(model_name): contents = open("data/PetImages/model.log", "r").read().split("\n") times = [] accuracies = [] losses = [] val_accs = [] val_losses = [] for c in contents: if model_name in c: name, timestamp, acc, loss, val_acc, val_loss = c.split(",") times.append(float(timestamp)) accuracies.append(float(acc)) losses.append(float(loss)) val_accs.append(float(val_acc)) val_losses.append(float(val_loss)) fig = plt.figure() ax1 = plt.subplot2grid((2,1), (0,0)) ax2 = plt.subplot2grid((2,1), (1,0), sharex=ax1) ax1.plot(times, accuracies, label="acc") ax1.plot(times, val_accs, label="val_acc") ax1.legend(loc=2) ax2.plot(times,losses, label="loss") ax2.plot(times,val_losses, label="val_loss") ax2.legend(loc=2) plt.show() create_acc_loss_graph(MODEL_NAME) # - MODEL_NAME
04_cnn_pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="qHiqEClOQw47" # # GPU # + id="OWS_Y0_oQxwo" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611019523257, "user_tz": -540, "elapsed": 1781, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="e0a45ed0-3b97-472a-f4b1-5528dd0caadd" # gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) print(gpu_info) # + [markdown] papermill={"duration": 0.036797, "end_time": "2021-01-06T02:37:55.540973", "exception": false, "start_time": "2021-01-06T02:37:55.504176", "status": "completed"} tags=[] id="2KlGxhybFNXf" # # CFG # + id="Cd1XrpOyHaV3" executionInfo={"status": "ok", "timestamp": 1611019532873, "user_tz": -540, "elapsed": 2235, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} CONFIG_NAME = 'config16.yml' from requests import get filename = get('http://1172.16.17.32:9000/api/sessions').json()[0]['name'] TITLE = filename.split('.')[0] # + id="gBGBfH7wI0mh" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611019533675, "user_tz": -540, "elapsed": 2993, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="06d32926-052f-4ac7-ac26-4ea46c5c8d88" # ! rm -r cassava # + papermill={"duration": 0.060245, "end_time": "2021-01-06T02:37:55.649947", "exception": false, "start_time": "2021-01-06T02:37:55.589702", "status": "completed"} tags=[] id="UAs3iytNFNXf" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611019534028, "user_tz": -540, "elapsed": 3327, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="b115c7c2-c5ad-4e9a-9377-0f88724fcf7d" # ! git clone https://github.com/raijin0704/cassava.git # ==================================================== # CFG # ==================================================== import yaml CONFIG_PATH = f'./cassava/config/{CONFIG_NAME}' with open(CONFIG_PATH) as f: config = yaml.load(f) INFO = config['info'] TAG = config['tag'] CFG = config['cfg'] CFG['train'] = True CFG['inference'] = False # CFG['debug'] = True if CFG['debug']: CFG['epochs'] = 1 assert INFO['TITLE'] == TITLE, f'{TITLE}, {INFO["TITLE"]}' # + [markdown] id="P0Tbn-ZzJt-6" # # colab & kaggle notebookでの環境面の処理 # + [markdown] id="K6AY_ibRGngj" # ## colab # + id="B5o7mKoTGj3S" executionInfo={"status": "ok", "timestamp": 1611019534515, "user_tz": -540, "elapsed": 3810, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} def _colab_kaggle_authority(): from googleapiclient.discovery import build import io, os from googleapiclient.http import MediaIoBaseDownload drive_service = build('drive', 'v3') results = drive_service.files().list( q="name = 'kaggle.json'", fields="files(id)").execute() kaggle_api_key = results.get('files', []) filename = "/root/.kaggle/kaggle.json" os.makedirs(os.path.dirname(filename), exist_ok=True) request = drive_service.files().get_media(fileId=kaggle_api_key[0]['id']) fh = io.FileIO(filename, 'wb') downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() print("Download %d%%." % int(status.progress() * 100)) os.chmod(filename, 600) def _install_apex(): import os import subprocess import sys # import time subprocess.run('git clone https://github.com/NVIDIA/apex'.split(' ')) # time.sleep(10) os.chdir('apex') subprocess.run('pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .'.split(' ')) os.chdir('..') def process_colab(): import subprocess # ドライブのマウント from google.colab import drive drive.mount('/content/drive') # Google Cloudの権限設定 from google.colab import auth auth.authenticate_user() # kaggle設定 # _colab_kaggle_authority() # subprocess.run('pip install --upgrade --force-reinstall --no-deps kaggle'.split(' ')) # ライブラリ関係 subprocess.run('pip install --upgrade opencv-python'.split(' ')) subprocess.run('pip install --upgrade albumentations'.split(' ')) subprocess.run('pip install timm'.split(' ')) # if CFG['apex']: # print('installing apex') # _install_apex() # print('done') # 各種pathの設定 # DATA_PATH = '/content/drive/Shareddrives/便利用/kaggle/cassava/input/' DATA_PATH = '/content/input' OUTPUT_DIR = './output/' NOTEBOOK_PATH = f'/content/drive/Shareddrives/便利用/kaggle/cassava/notebook/{TITLE}.ipynb' return DATA_PATH, OUTPUT_DIR, NOTEBOOK_PATH # + [markdown] id="rSm3Aw9uLB0K" # ## kaggle notebook # + id="fc-eIAxgNfrF" executionInfo={"status": "ok", "timestamp": 1611019534516, "user_tz": -540, "elapsed": 3807, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} def _kaggle_gcp_authority(): from kaggle_secrets import UserSecretsClient user_secrets = UserSecretsClient() user_credential = user_secrets.get_gcloud_credential() user_secrets.set_tensorflow_credential(user_credential) def process_kaggle(): # GCP設定 _kaggle_gcp_authority() # 各種pathの設定 DATA_PATH = '../input/cassava-leaf-disease-classification/' # ! mkdir output OUTPUT_DIR = './output/' NOTEBOOK_PATH = './__notebook__.ipynb' # system path import sys sys.path.append('../input/pytorch-image-models/pytorch-image-models-master') return DATA_PATH, OUTPUT_DIR, NOTEBOOK_PATH # + [markdown] id="OGyvvV8RNg9n" # ## 共通 # + id="5xwRpQIxNgsJ" executionInfo={"status": "ok", "timestamp": 1611019534517, "user_tz": -540, "elapsed": 3805, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} def process_common(): # ライブラリ関係 import subprocess subprocess.run('pip install mlflow'.split(' ')) # 環境変数 import os os.environ["GCLOUD_PROJECT"] = INFO['PROJECT_ID'] # + id="lyKiokdILDyk" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611019603324, "user_tz": -540, "elapsed": 72592, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="03c4c02b-b392-4e50-fafc-860bd5197341" try: from google.colab import auth except ImportError: DATA_PATH, OUTPUT_DIR, NOTEBOOK_PATH = process_kaggle() env = 'kaggle' else: DATA_PATH, OUTPUT_DIR, NOTEBOOK_PATH = process_colab() env = 'colab' finally: process_common() # + id="eFmnrL03efjE" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611019603332, "user_tz": -540, "elapsed": 72581, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="7a096bbf-5209-4a92-dff6-36608d70f49a" # !rm -r /content/input # + id="sYRQjxxtbllC" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611019775344, "user_tz": -540, "elapsed": 244573, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="d4c8a7aa-8dc4-4930-a8cc-41ca889dea42" import os if env=='colab': # ! cp /content/drive/Shareddrives/便利用/kaggle/cassava/input.zip /content/input.zip # ! unzip input.zip # ! rm input.zip train_num = len(os.listdir(DATA_PATH+"/train_images")) assert train_num == 21397 # + [markdown] id="Hbix74N7-aZW" # # install apex # + id="rK4cYpfaMSOu" executionInfo={"status": "ok", "timestamp": 1611019775347, "user_tz": -540, "elapsed": 244573, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} if CFG['apex']: try: import apex except Exception: # ! git clone https://github.com/NVIDIA/apex.git % cd apex # !pip install --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . # %cd .. # + [markdown] papermill={"duration": 0.037398, "end_time": "2021-01-06T02:37:55.725061", "exception": false, "start_time": "2021-01-06T02:37:55.687663", "status": "completed"} tags=[] id="hJbO5kDvFNXg" # # Library # + papermill={"duration": 3.932267, "end_time": "2021-01-06T02:37:59.695044", "exception": false, "start_time": "2021-01-06T02:37:55.762777", "status": "completed"} tags=[] id="1ZSZBsn1FNXg" executionInfo={"status": "ok", "timestamp": 1611019781525, "user_tz": -540, "elapsed": 250748, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # Library # ==================================================== import os import datetime import math import time import random import glob import shutil from pathlib import Path from contextlib import contextmanager from collections import defaultdict, Counter import scipy as sp import numpy as np import pandas as pd from matplotlib import pyplot as plt import seaborn as sns from sklearn import preprocessing from sklearn.metrics import accuracy_score from sklearn.model_selection import StratifiedKFold from tqdm.auto import tqdm from functools import partial import cv2 from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import Adam, SGD import torchvision.models as models from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau from albumentations import ( Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip, RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout, IAAAdditiveGaussianNoise, Transpose ) from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform import timm import mlflow import warnings warnings.filterwarnings('ignore') if CFG['apex']: from apex import amp if CFG['debug']: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') else: device = torch.device('cuda') start_time = datetime.datetime.now() start_time_str = start_time.strftime('%m%d%H%M') # + [markdown] papermill={"duration": 0.036384, "end_time": "2021-01-06T02:37:55.382118", "exception": false, "start_time": "2021-01-06T02:37:55.345734", "status": "completed"} tags=[] id="2X4s8CWqFNXe" # # Directory settings # + papermill={"duration": 0.047783, "end_time": "2021-01-06T02:37:55.466963", "exception": false, "start_time": "2021-01-06T02:37:55.419180", "status": "completed"} tags=[] id="knrCq_4TFNXf" executionInfo={"status": "ok", "timestamp": 1611019781528, "user_tz": -540, "elapsed": 250747, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # Directory settings # ==================================================== if os.path.exists(OUTPUT_DIR): shutil.rmtree(OUTPUT_DIR) if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) # + [markdown] id="ao33hxdBVs5P" # # save basic files # + id="eCy-LtWHVShX" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1611019782237, "user_tz": -540, "elapsed": 251435, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="5a3b207f-d80d-4ef2-ace8-3bf15197ee56" # with open(f'{OUTPUT_DIR}/{start_time_str}_TAG.json', 'w') as f: # json.dump(TAG, f, indent=4) # with open(f'{OUTPUT_DIR}/{start_time_str}_CFG.json', 'w') as f: # json.dump(CFG, f, indent=4) import shutil notebook_path = f'{OUTPUT_DIR}/{start_time_str}_{TITLE}.ipynb' shutil.copy2(NOTEBOOK_PATH, notebook_path) # + [markdown] papermill={"duration": 0.024923, "end_time": "2021-01-06T02:37:53.909851", "exception": false, "start_time": "2021-01-06T02:37:53.884928", "status": "completed"} tags=[] id="RRKUs51UFNXd" # # Data Loading # + papermill={"duration": 0.424992, "end_time": "2021-01-06T02:37:55.305508", "exception": false, "start_time": "2021-01-06T02:37:54.880516", "status": "completed"} tags=[] id="RmexI3afFNXe" executionInfo={"status": "ok", "timestamp": 1611019782239, "user_tz": -540, "elapsed": 251432, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} train = pd.read_csv(f'{DATA_PATH}/train.csv') test = pd.read_csv(f'{DATA_PATH}/sample_submission.csv') label_map = pd.read_json(f'{DATA_PATH}/label_num_to_disease_map.json', orient='index') if CFG['debug']: train = train.sample(n=1000, random_state=CFG['seed']).reset_index(drop=True) # + [markdown] papermill={"duration": 0.047427, "end_time": "2021-01-06T02:38:32.101093", "exception": false, "start_time": "2021-01-06T02:38:32.053666", "status": "completed"} tags=[] id="_UbCb4FRFNXi" # # Utils # + papermill={"duration": 0.081799, "end_time": "2021-01-06T02:38:32.230951", "exception": false, "start_time": "2021-01-06T02:38:32.149152", "status": "completed"} tags=[] id="j24f9krmFNXi" executionInfo={"status": "ok", "timestamp": 1611019782240, "user_tz": -540, "elapsed": 251430, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # Utils # ==================================================== def get_score(y_true, y_pred): return accuracy_score(y_true, y_pred) @contextmanager def timer(name): t0 = time.time() LOGGER.info(f'[{name}] start') yield LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s.') def init_logger(log_file=OUTPUT_DIR+'train.log'): from logging import getLogger, FileHandler, Formatter, StreamHandler from logging import INFO as INFO_ logger = getLogger(__name__) logger.setLevel(INFO_) handler1 = StreamHandler() handler1.setFormatter(Formatter("%(message)s")) handler2 = FileHandler(filename=log_file) handler2.setFormatter(Formatter("%(message)s")) logger.addHandler(handler1) logger.addHandler(handler2) return logger logger_path = OUTPUT_DIR+f'{start_time_str}_train.log' LOGGER = init_logger(logger_path) def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_torch(seed=CFG['seed']) class EarlyStopping: """Early stops the training if validation loss doesn't improve after a given patience.""" def __init__(self, patience=7, verbose=False, save_path='checkpoint.pt', counter=0, best_score=None, save_latest_path=None): """ Args: patience (int): How long to wait after last time validation loss improved. Default: 7 verbose (bool): If True, prints a message for each validation loss improvement. Default: False save_path (str): Directory for saving a model. Default: "'checkpoint.pt'" """ self.patience = patience self.verbose = verbose self.save_path = save_path self.counter = counter self.best_score = best_score self.save_latest_path = save_latest_path self.early_stop = False self.val_loss_min = np.Inf def __call__(self, val_loss, model, preds, epoch): score = -val_loss if self.best_score is None: self.best_score = score self.save_checkpoint(val_loss, model, preds, epoch) self.save_latest(val_loss, model, preds, epoch, score) elif score >= self.best_score: self.counter = 0 self.best_score = score self.save_checkpoint(val_loss, model, preds, epoch) self.save_latest(val_loss, model, preds, epoch, score) # nanになったら学習ストップ elif math.isnan(score): self.early_stop = True else: self.counter += 1 if self.save_latest_path is not None: self.save_latest(val_loss, model, preds, epoch, score) if self.verbose: print(f'EarlyStopping counter: {self.counter} out of {self.patience}') if self.counter >= self.patience: self.early_stop = True def save_checkpoint(self, val_loss, model, preds, epoch): '''Saves model when validation loss decrease.''' if self.verbose: print(f'Validation loss decreased ({self.val_loss_min:.10f} --> {val_loss:.10f}). Saving model ...') torch.save({'model': model.state_dict(), 'preds': preds, 'epoch' : epoch, 'best_score' : self.best_score, 'counter' : self.counter}, self.save_path) self.val_loss_min = val_loss def save_latest(self, val_loss, model, preds, epoch, score): '''Saves latest model.''' torch.save({'model': model.state_dict(), 'preds': preds, 'epoch' : epoch, 'score' : score, 'counter' : self.counter}, self.save_latest_path) self.val_loss_min = val_loss # + id="22Zz6gVTEW2J" executionInfo={"status": "ok", "timestamp": 1611019782241, "user_tz": -540, "elapsed": 251427, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} def remove_glob(pathname, recursive=True): for p in glob.glob(pathname, recursive=recursive): if os.path.isfile(p): os.remove(p) def rand_bbox(size, lam): W = size[2] H = size[3] cut_rat = np.sqrt(1. - lam) cut_w = np.int(W * cut_rat) cut_h = np.int(H * cut_rat) # uniform cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return bbx1, bby1, bbx2, bby2 # + [markdown] papermill={"duration": 0.047349, "end_time": "2021-01-06T02:38:32.326137", "exception": false, "start_time": "2021-01-06T02:38:32.278788", "status": "completed"} tags=[] id="OVI1rntBFNXi" # # CV split # + papermill={"duration": 0.081845, "end_time": "2021-01-06T02:38:32.455329", "exception": false, "start_time": "2021-01-06T02:38:32.373484", "status": "completed"} tags=[] id="2i-B_S-rFNXj" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611019783058, "user_tz": -540, "elapsed": 252225, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="9a375f17-c2dc-4ad6-bf49-fcd3f4a99dec" folds = train.copy() Fold = StratifiedKFold(n_splits=CFG['n_fold'], shuffle=True, random_state=CFG['seed']) for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[CFG['target_col']])): folds.loc[val_index, 'fold'] = int(n) folds['fold'] = folds['fold'].astype(int) print(folds.groupby(['fold', CFG['target_col']]).size()) # + [markdown] papermill={"duration": 0.048776, "end_time": "2021-01-06T02:38:32.552092", "exception": false, "start_time": "2021-01-06T02:38:32.503316", "status": "completed"} tags=[] id="HBJfw0hAFNXj" # # Dataset # + papermill={"duration": 0.065435, "end_time": "2021-01-06T02:38:32.665450", "exception": false, "start_time": "2021-01-06T02:38:32.600015", "status": "completed"} tags=[] id="pAt_9vPoFNXj" executionInfo={"status": "ok", "timestamp": 1611019783060, "user_tz": -540, "elapsed": 252223, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # Dataset # ==================================================== class TrainDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.file_names = df['image_id'].values self.labels = df['label'].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{DATA_PATH}/train_images/{file_name}' image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: augmented = self.transform(image=image) image = augmented['image'] label = torch.tensor(self.labels[idx]).long() return image, label class TestDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.file_names = df['image_id'].values self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): file_name = self.file_names[idx] file_path = f'{DATA_PATH}/test_images/{file_name}' image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: augmented = self.transform(image=image) image = augmented['image'] return image # + papermill={"duration": 0.361965, "end_time": "2021-01-06T02:38:33.075516", "exception": false, "start_time": "2021-01-06T02:38:32.713551", "status": "completed"} tags=[] id="9C9af6q2FNXj" executionInfo={"status": "ok", "timestamp": 1611019783062, "user_tz": -540, "elapsed": 252222, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # train_dataset = TrainDataset(train, transform=None) # for i in range(1): # image, label = train_dataset[i] # plt.imshow(image) # plt.title(f'label: {label}') # plt.show() # + [markdown] papermill={"duration": 0.053528, "end_time": "2021-01-06T02:38:33.182402", "exception": false, "start_time": "2021-01-06T02:38:33.128874", "status": "completed"} tags=[] id="Erl7SEcJFNXk" # # Transforms # + id="l6HJnB8DCJ7A" executionInfo={"status": "ok", "timestamp": 1611019783064, "user_tz": -540, "elapsed": 252221, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} def _get_augmentations(aug_list): process = [] for aug in aug_list: if aug == 'Resize': process.append(Resize(CFG['size'], CFG['size'])) elif aug == 'RandomResizedCrop': process.append(RandomResizedCrop(CFG['size'], CFG['size'])) elif aug == 'Transpose': process.append(Transpose(p=0.5)) elif aug == 'HorizontalFlip': process.append(HorizontalFlip(p=0.5)) elif aug == 'VerticalFlip': process.append(VerticalFlip(p=0.5)) elif aug == 'ShiftScaleRotate': process.append(ShiftScaleRotate(p=0.5)) elif aug == 'Cutout': process.append(Cutout(max_h_size=CFG['CutoutSize'], max_w_size=CFG['CutoutSize'], p=0.5)) elif aug == 'Normalize': process.append(Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], )) elif aug in ['mixup', 'cutmix', 'fmix']: pass else: raise ValueError(f'{aug} is not suitable') process.append(ToTensorV2()) return process # + papermill={"duration": 0.065472, "end_time": "2021-01-06T02:38:33.300562", "exception": false, "start_time": "2021-01-06T02:38:33.235090", "status": "completed"} tags=[] id="6RWHRr9DFNXk" executionInfo={"status": "ok", "timestamp": 1611019783065, "user_tz": -540, "elapsed": 252218, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # Transforms # ==================================================== def get_transforms(*, data): if data == 'train': return Compose( _get_augmentations(TAG['augmentation']) ) elif data == 'valid': return Compose( _get_augmentations(['Resize', 'Normalize']) ) # + papermill={"duration": 0.250555, "end_time": "2021-01-06T02:38:33.604161", "exception": false, "start_time": "2021-01-06T02:38:33.353606", "status": "completed"} tags=[] id="2YfZrOJOFNXk" colab={"base_uri": "https://localhost:8080/", "height": 281} executionInfo={"status": "ok", "timestamp": 1611019783067, "user_tz": -540, "elapsed": 252201, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="f00b03a6-a8ea-42d8-d897-470fe6e23134" train_dataset = TrainDataset(train, transform=get_transforms(data='train')) for i in range(1): image, label = train_dataset[i] plt.imshow(image[0]) plt.title(f'label: {label}') plt.show() # + [markdown] papermill={"duration": 0.058737, "end_time": "2021-01-06T02:38:33.721346", "exception": false, "start_time": "2021-01-06T02:38:33.662609", "status": "completed"} tags=[] id="YGdLkzuBFNXl" # # Bi-tempered logistic loss # + papermill={"duration": 0.10922, "end_time": "2021-01-06T02:38:33.888392", "exception": false, "start_time": "2021-01-06T02:38:33.779172", "status": "completed"} tags=[] id="7AlhTvRLFNXl" executionInfo={"status": "ok", "timestamp": 1611019783376, "user_tz": -540, "elapsed": 252506, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} def log_t(u, t): """Compute log_t for `u'.""" if t==1.0: return u.log() else: return (u.pow(1.0 - t) - 1.0) / (1.0 - t) def exp_t(u, t): """Compute exp_t for `u'.""" if t==1: return u.exp() else: return (1.0 + (1.0-t)*u).relu().pow(1.0 / (1.0 - t)) def compute_normalization_fixed_point(activations, t, num_iters): """Returns the normalization value for each example (t > 1.0). Args: activations: A multi-dimensional tensor with last dimension `num_classes`. t: Temperature 2 (> 1.0 for tail heaviness). num_iters: Number of iterations to run the method. Return: A tensor of same shape as activation with the last dimension being 1. """ mu, _ = torch.max(activations, -1, keepdim=True) normalized_activations_step_0 = activations - mu normalized_activations = normalized_activations_step_0 for _ in range(num_iters): logt_partition = torch.sum( exp_t(normalized_activations, t), -1, keepdim=True) normalized_activations = normalized_activations_step_0 * \ logt_partition.pow(1.0-t) logt_partition = torch.sum( exp_t(normalized_activations, t), -1, keepdim=True) normalization_constants = - log_t(1.0 / logt_partition, t) + mu return normalization_constants def compute_normalization_binary_search(activations, t, num_iters): """Returns the normalization value for each example (t < 1.0). Args: activations: A multi-dimensional tensor with last dimension `num_classes`. t: Temperature 2 (< 1.0 for finite support). num_iters: Number of iterations to run the method. Return: A tensor of same rank as activation with the last dimension being 1. """ mu, _ = torch.max(activations, -1, keepdim=True) normalized_activations = activations - mu effective_dim = \ torch.sum( (normalized_activations > -1.0 / (1.0-t)).to(torch.int32), dim=-1, keepdim=True).to(activations.dtype) shape_partition = activations.shape[:-1] + (1,) lower = torch.zeros(shape_partition, dtype=activations.dtype, device=activations.device) upper = -log_t(1.0/effective_dim, t) * torch.ones_like(lower) for _ in range(num_iters): logt_partition = (upper + lower)/2.0 sum_probs = torch.sum( exp_t(normalized_activations - logt_partition, t), dim=-1, keepdim=True) update = (sum_probs < 1.0).to(activations.dtype) lower = torch.reshape( lower * update + (1.0-update) * logt_partition, shape_partition) upper = torch.reshape( upper * (1.0 - update) + update * logt_partition, shape_partition) logt_partition = (upper + lower)/2.0 return logt_partition + mu class ComputeNormalization(torch.autograd.Function): """ Class implementing custom backward pass for compute_normalization. See compute_normalization. """ @staticmethod def forward(ctx, activations, t, num_iters): if t < 1.0: normalization_constants = compute_normalization_binary_search(activations, t, num_iters) else: normalization_constants = compute_normalization_fixed_point(activations, t, num_iters) ctx.save_for_backward(activations, normalization_constants) ctx.t=t return normalization_constants @staticmethod def backward(ctx, grad_output): activations, normalization_constants = ctx.saved_tensors t = ctx.t normalized_activations = activations - normalization_constants probabilities = exp_t(normalized_activations, t) escorts = probabilities.pow(t) escorts = escorts / escorts.sum(dim=-1, keepdim=True) grad_input = escorts * grad_output return grad_input, None, None def compute_normalization(activations, t, num_iters=5): """Returns the normalization value for each example. Backward pass is implemented. Args: activations: A multi-dimensional tensor with last dimension `num_classes`. t: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support). num_iters: Number of iterations to run the method. Return: A tensor of same rank as activation with the last dimension being 1. """ return ComputeNormalization.apply(activations, t, num_iters) def tempered_sigmoid(activations, t, num_iters = 5): """Tempered sigmoid function. Args: activations: Activations for the positive class for binary classification. t: Temperature tensor > 0.0. num_iters: Number of iterations to run the method. Returns: A probabilities tensor. """ internal_activations = torch.stack([activations, torch.zeros_like(activations)], dim=-1) internal_probabilities = tempered_softmax(internal_activations, t, num_iters) return internal_probabilities[..., 0] def tempered_softmax(activations, t, num_iters=5): """Tempered softmax function. Args: activations: A multi-dimensional tensor with last dimension `num_classes`. t: Temperature > 1.0. num_iters: Number of iterations to run the method. Returns: A probabilities tensor. """ if t == 1.0: return activations.softmax(dim=-1) normalization_constants = compute_normalization(activations, t, num_iters) return exp_t(activations - normalization_constants, t) def bi_tempered_binary_logistic_loss(activations, labels, t1, t2, label_smoothing = 0.0, num_iters=5, reduction='mean'): """Bi-Tempered binary logistic loss. Args: activations: A tensor containing activations for class 1. labels: A tensor with shape as activations, containing probabilities for class 1 t1: Temperature 1 (< 1.0 for boundedness). t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support). label_smoothing: Label smoothing num_iters: Number of iterations to run the method. Returns: A loss tensor. """ internal_activations = torch.stack([activations, torch.zeros_like(activations)], dim=-1) internal_labels = torch.stack([labels.to(activations.dtype), 1.0 - labels.to(activations.dtype)], dim=-1) return bi_tempered_logistic_loss(internal_activations, internal_labels, t1, t2, label_smoothing = label_smoothing, num_iters = num_iters, reduction = reduction) def bi_tempered_logistic_loss(activations, labels, t1, t2, label_smoothing=0.0, num_iters=5, reduction = 'mean'): """Bi-Tempered Logistic Loss. Args: activations: A multi-dimensional tensor with last dimension `num_classes`. labels: A tensor with shape and dtype as activations (onehot), or a long tensor of one dimension less than activations (pytorch standard) t1: Temperature 1 (< 1.0 for boundedness). t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support). label_smoothing: Label smoothing parameter between [0, 1). Default 0.0. num_iters: Number of iterations to run the method. Default 5. reduction: ``'none'`` | ``'mean'`` | ``'sum'``. Default ``'mean'``. ``'none'``: No reduction is applied, return shape is shape of activations without the last dimension. ``'mean'``: Loss is averaged over minibatch. Return shape (1,) ``'sum'``: Loss is summed over minibatch. Return shape (1,) Returns: A loss tensor. """ if len(labels.shape)<len(activations.shape): #not one-hot labels_onehot = torch.zeros_like(activations) labels_onehot.scatter_(1, labels[..., None], 1) else: labels_onehot = labels if label_smoothing > 0: num_classes = labels_onehot.shape[-1] labels_onehot = ( 1 - label_smoothing * num_classes / (num_classes - 1) ) \ * labels_onehot + \ label_smoothing / (num_classes - 1) probabilities = tempered_softmax(activations, t2, num_iters) loss_values = labels_onehot * log_t(labels_onehot + 1e-10, t1) \ - labels_onehot * log_t(probabilities, t1) \ - labels_onehot.pow(2.0 - t1) / (2.0 - t1) \ + probabilities.pow(2.0 - t1) / (2.0 - t1) loss_values = loss_values.sum(dim = -1) #sum over classes if reduction == 'none': return loss_values if reduction == 'sum': return loss_values.sum() if reduction == 'mean': return loss_values.mean() # + [markdown] papermill={"duration": 0.055989, "end_time": "2021-01-06T02:38:34.000897", "exception": false, "start_time": "2021-01-06T02:38:33.944908", "status": "completed"} tags=[] id="kB1l1ZVGFNXo" # # MODEL # + papermill={"duration": 0.066384, "end_time": "2021-01-06T02:38:34.123417", "exception": false, "start_time": "2021-01-06T02:38:34.057033", "status": "completed"} tags=[] id="MVo-ToP0FNXp" executionInfo={"status": "ok", "timestamp": 1611019783377, "user_tz": -540, "elapsed": 252504, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # MODEL # ==================================================== class CustomModel(nn.Module): def __init__(self, model_name, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=pretrained) if hasattr(self.model, 'classifier'): n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, CFG['target_size']) elif hasattr(self.model, 'fc'): n_features = self.model.fc.in_features self.model.fc = nn.Linear(n_features, CFG['target_size']) def forward(self, x): x = self.model(x) return x # + papermill={"duration": 2.88901, "end_time": "2021-01-06T02:38:37.068697", "exception": false, "start_time": "2021-01-06T02:38:34.179687", "status": "completed"} tags=[] id="5_zEsjzRFNXq" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611019798731, "user_tz": -540, "elapsed": 267839, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="01b499ff-8e2b-4d9c-8cd3-7a07e5a5c33b" model = CustomModel(model_name=TAG['model_name'], pretrained=False) train_dataset = TrainDataset(train, transform=get_transforms(data='train')) train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=4, pin_memory=True, drop_last=True) for image, label in train_loader: output = model(image) print(output) break # + [markdown] papermill={"duration": 0.062652, "end_time": "2021-01-06T02:38:37.194541", "exception": false, "start_time": "2021-01-06T02:38:37.131889", "status": "completed"} tags=[] id="lXDndnWuFNXq" # # Helper functions # + papermill={"duration": 0.071476, "end_time": "2021-01-06T02:38:37.323404", "exception": false, "start_time": "2021-01-06T02:38:37.251928", "status": "completed"} tags=[] id="hRe7UNh4FNXq" executionInfo={"status": "ok", "timestamp": 1611019798733, "user_tz": -540, "elapsed": 267837, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # Helper functions # ==================================================== class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (remain %s)' % (asMinutes(s), asMinutes(rs)) # + papermill={"duration": 0.066978, "end_time": "2021-01-06T02:38:37.456403", "exception": false, "start_time": "2021-01-06T02:38:37.389425", "status": "completed"} tags=[] id="EALsPGfJFNXq" executionInfo={"status": "ok", "timestamp": 1611019798735, "user_tz": -540, "elapsed": 267835, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # loss # ==================================================== def get_loss(criterion, y_preds, labels): if TAG['criterion']=='CrossEntropyLoss': loss = criterion(y_preds, labels) elif TAG['criterion'] == 'bi_tempered_logistic_loss': loss = criterion(y_preds, labels, t1=CFG['bi_tempered_loss_t1'], t2=CFG['bi_tempered_loss_t2']) return loss # + papermill={"duration": 0.095524, "end_time": "2021-01-06T02:38:37.609204", "exception": false, "start_time": "2021-01-06T02:38:37.513680", "status": "completed"} tags=[] id="yXEOU7TYFNXr" executionInfo={"status": "ok", "timestamp": 1611019798999, "user_tz": -540, "elapsed": 268096, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # Helper functions # ==================================================== def train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() scores = AverageMeter() # switch to train mode model.train() start = end = time.time() global_step = 0 for step, (images, labels) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) r = np.random.rand(1) is_aug = r < 0.5 # probability of augmentation if is_aug & ('cutmix' in TAG['augmentation']): # generate mixed sample # inference from https://github.com/clovaai/CutMix-PyTorch/blob/master/train.py lam = np.random.beta(CFG['CutmixAlpha'], CFG['CutmixAlpha']) rand_index = torch.randperm(images.size()[0]).to(device) labels_a = labels labels_b = labels[rand_index] bbx1, bby1, bbx2, bby2 = rand_bbox(images.size(), lam) images[:, :, bbx1:bbx2, bby1:bby2] = images[rand_index, :, bbx1:bbx2, bby1:bby2] # adjust lambda to exactly match pixel ratio lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (images.size()[-1] * images.size()[-2])) # compute output y_preds = model(images) loss = get_loss(criterion, y_preds, labels_a) * lam + \ get_loss(criterion, y_preds, labels_b) * (1. - lam) else: y_preds = model(images) loss = get_loss(criterion, y_preds, labels) # record loss losses.update(loss.item(), batch_size) if CFG['gradient_accumulation_steps'] > 1: loss = loss / CFG['gradient_accumulation_steps'] if CFG['apex']: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # clear memory del loss, y_preds torch.cuda.empty_cache() grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CFG['max_grad_norm']) if (step + 1) % CFG['gradient_accumulation_steps'] == 0: optimizer.step() optimizer.zero_grad() global_step += 1 # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG['print_freq'] == 0 or step == (len(train_loader)-1): print('Epoch: [{0}][{1}/{2}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' 'Grad: {grad_norm:.4f} ' #'LR: {lr:.6f} ' .format( epoch+1, step, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, remain=timeSince(start, float(step+1)/len(train_loader)), grad_norm=grad_norm, #lr=scheduler.get_lr()[0], )) return losses.avg def valid_fn(valid_loader, model, criterion, device): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() scores = AverageMeter() # switch to evaluation mode model.eval() preds = [] start = end = time.time() for step, (images, labels) in enumerate(valid_loader): # measure data loading time data_time.update(time.time() - end) images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) # compute loss with torch.no_grad(): y_preds = model(images) loss = get_loss(criterion, y_preds, labels) losses.update(loss.item(), batch_size) # record accuracy preds.append(y_preds.softmax(1).to('cpu').numpy()) if CFG['gradient_accumulation_steps'] > 1: loss = loss / CFG['gradient_accumulation_steps'] # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG['print_freq'] == 0 or step == (len(valid_loader)-1): print('EVAL: [{0}/{1}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' .format( step, len(valid_loader), batch_time=batch_time, data_time=data_time, loss=losses, remain=timeSince(start, float(step+1)/len(valid_loader)), )) predictions = np.concatenate(preds) return losses.avg, predictions def inference(model, states, test_loader, device): model.to(device) tk0 = tqdm(enumerate(test_loader), total=len(test_loader)) probs = [] for i, (images) in tk0: images = images.to(device) avg_preds = [] for state in states: # model.load_state_dict(state['model']) model.load_state_dict(state) model.eval() with torch.no_grad(): y_preds = model(images) avg_preds.append(y_preds.softmax(1).to('cpu').numpy()) avg_preds = np.mean(avg_preds, axis=0) probs.append(avg_preds) probs = np.concatenate(probs) return probs # + [markdown] papermill={"duration": 0.057553, "end_time": "2021-01-06T02:38:37.724992", "exception": false, "start_time": "2021-01-06T02:38:37.667439", "status": "completed"} tags=[] id="NncmvLUNFNXr" # # Train loop # + papermill={"duration": 0.070111, "end_time": "2021-01-06T02:38:37.852935", "exception": false, "start_time": "2021-01-06T02:38:37.782824", "status": "completed"} tags=[] id="6xXt-iYrFNXr" executionInfo={"status": "ok", "timestamp": 1611019799000, "user_tz": -540, "elapsed": 268094, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # scheduler # ==================================================== def get_scheduler(optimizer): if TAG['scheduler']=='ReduceLROnPlateau': scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=CFG['factor'], patience=CFG['patience'], verbose=True, eps=CFG['eps']) elif TAG['scheduler']=='CosineAnnealingLR': scheduler = CosineAnnealingLR(optimizer, T_max=CFG['T_max'], eta_min=CFG['min_lr'], last_epoch=-1) elif TAG['scheduler']=='CosineAnnealingWarmRestarts': scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=CFG['T_0'], T_mult=1, eta_min=CFG['min_lr'], last_epoch=-1) return scheduler # ==================================================== # criterion # ==================================================== def get_criterion(): if TAG['criterion']=='CrossEntropyLoss': criterion = nn.CrossEntropyLoss() elif TAG['criterion'] == 'bi_tempered_logistic_loss': criterion = bi_tempered_logistic_loss return criterion # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.087131, "end_time": "2021-01-06T02:38:37.998246", "exception": false, "start_time": "2021-01-06T02:38:37.911115", "status": "completed"} tags=[] id="IsI4SRttFNXr" executionInfo={"status": "ok", "timestamp": 1611019799001, "user_tz": -540, "elapsed": 268092, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # Train loop # ==================================================== def train_loop(folds, fold): LOGGER.info(f"========== fold: {fold} training ==========") if not CFG['debug']: mlflow.set_tag('running.fold', str(fold)) # ==================================================== # loader # ==================================================== trn_idx = folds[folds['fold'] != fold].index val_idx = folds[folds['fold'] == fold].index train_folds = folds.loc[trn_idx].reset_index(drop=True) valid_folds = folds.loc[val_idx].reset_index(drop=True) train_dataset = TrainDataset(train_folds, transform=get_transforms(data='train')) valid_dataset = TrainDataset(valid_folds, transform=get_transforms(data='valid')) train_loader = DataLoader(train_dataset, batch_size=CFG['batch_size'], shuffle=True, num_workers=CFG['num_workers'], pin_memory=True, drop_last=True) valid_loader = DataLoader(valid_dataset, batch_size=CFG['batch_size'], shuffle=False, num_workers=CFG['num_workers'], pin_memory=True, drop_last=False) # ==================================================== # model & optimizer & criterion # ==================================================== best_model_path = OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_best.pth' latest_model_path = OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_latest.pth' model = CustomModel(TAG['model_name'], pretrained=True) model.to(device) # 学習途中の重みがあれば読み込み if os.path.isfile(latest_model_path): state_latest = torch.load(latest_model_path) state_best = torch.load(best_model_path) model.load_state_dict(state_latest['model']) epoch_start = state_latest['epoch']+1 # er_best_score = state_latest['score'] er_counter = state_latest['counter'] er_best_score = state_best['best_score'] LOGGER.info(f'Retrain model in epoch:{epoch_start}, best_score:{er_best_score:.3f}, counter:{er_counter}') else: epoch_start = 0 er_best_score = None er_counter = 0 optimizer = Adam(model.parameters(), lr=CFG['lr'], weight_decay=CFG['weight_decay'], amsgrad=False) scheduler = get_scheduler(optimizer) criterion = get_criterion() # ==================================================== # apex # ==================================================== if CFG['apex']: model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) # ==================================================== # loop # ==================================================== # best_score = 0. # best_loss = np.inf early_stopping = EarlyStopping( patience=CFG['early_stopping_round'], verbose=True, save_path=best_model_path, counter=er_counter, best_score=er_best_score, save_latest_path=latest_model_path) for epoch in range(epoch_start, CFG['epochs']): start_time = time.time() # train avg_loss = train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device) # eval avg_val_loss, preds = valid_fn(valid_loader, model, criterion, device) valid_labels = valid_folds[CFG['target_col']].values # early stopping early_stopping(avg_val_loss, model, preds, epoch) if early_stopping.early_stop: print(f'Epoch {epoch+1} - early stopping') break if isinstance(scheduler, ReduceLROnPlateau): scheduler.step(avg_val_loss) elif isinstance(scheduler, CosineAnnealingLR): scheduler.step() elif isinstance(scheduler, CosineAnnealingWarmRestarts): scheduler.step() # scoring score = get_score(valid_labels, preds.argmax(1)) elapsed = time.time() - start_time LOGGER.info(f'Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s') LOGGER.info(f'Epoch {epoch+1} - Accuracy: {score}') # log mlflow if not CFG['debug']: mlflow.log_metric(f"fold{fold} avg_train_loss", avg_loss, step=epoch) mlflow.log_metric(f"fold{fold} avg_valid_loss", avg_val_loss, step=epoch) mlflow.log_metric(f"fold{fold} score", score, step=epoch) mlflow.log_metric(f"fold{fold} lr", scheduler.get_last_lr()[0], step=epoch) mlflow.log_artifact(best_model_path) if os.path.isfile(latest_model_path): mlflow.log_artifact(latest_model_path) check_point = torch.load(best_model_path) valid_folds[[str(c) for c in range(5)]] = check_point['preds'] valid_folds['preds'] = check_point['preds'].argmax(1) return valid_folds # + id="DhXVTks3gNk9" executionInfo={"status": "ok", "timestamp": 1611019799002, "user_tz": -540, "elapsed": 268089, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} def get_trained_fold_preds(folds, fold, best_model_path): val_idx = folds[folds['fold'] == fold].index valid_folds = folds.loc[val_idx].reset_index(drop=True) check_point = torch.load(best_model_path) valid_folds[[str(c) for c in range(5)]] = check_point['preds'] valid_folds['preds'] = check_point['preds'].argmax(1) return valid_folds def save_confusion_matrix(oof): from sklearn.metrics import confusion_matrix cm_ = confusion_matrix(oof['label'], oof['preds'], labels=[0,1,2,3,4]) label_name = ['0 (CBB)', '1 (CBSD)', '2 (CGM)', '3 (CMD)', '4 (Healthy)'] cm = pd.DataFrame(cm_, index=label_name, columns=label_name) cm.to_csv(OUTPUT_DIR+'oof_confusion_matrix.csv', index=True) # + papermill={"duration": 0.07567, "end_time": "2021-01-06T02:38:38.131698", "exception": false, "start_time": "2021-01-06T02:38:38.056028", "status": "completed"} tags=[] id="O8qeV0zAFNXr" executionInfo={"status": "ok", "timestamp": 1611019799497, "user_tz": -540, "elapsed": 268572, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # ==================================================== # main # ==================================================== def get_result(result_df): preds = result_df['preds'].values labels = result_df[CFG['target_col']].values score = get_score(labels, preds) LOGGER.info(f'Score: {score:<.5f}') return score def main(): """ Prepare: 1.train 2.test 3.submission 4.folds """ if CFG['train']: # train oof_df = pd.DataFrame() for fold in range(CFG['n_fold']): best_model_path = OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_best.pth' if fold in CFG['trn_fold']: _oof_df = train_loop(folds, fold) elif os.path.exists(best_model_path): _oof_df = get_trained_fold_preds(folds, fold, best_model_path) else: _oof_df = None if _oof_df is not None: oof_df = pd.concat([oof_df, _oof_df]) LOGGER.info(f"========== fold: {fold} result ==========") _ = get_result(_oof_df) # CV result LOGGER.info(f"========== CV ==========") score = get_result(oof_df) # save result oof_df.to_csv(OUTPUT_DIR+'oof_df.csv', index=False) save_confusion_matrix(oof_df) # log mlflow if not CFG['debug']: mlflow.log_metric('oof score', score) mlflow.delete_tag('running.fold') mlflow.log_artifact(OUTPUT_DIR+'oof_df.csv') if CFG['inference']: # inference model = CustomModel(TAG['model_name'], pretrained=False) states = [torch.load(OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_best.pth') for fold in CFG['trn_fold']] test_dataset = TestDataset(test, transform=get_transforms(data='valid')) test_loader = DataLoader(test_dataset, batch_size=CFG['batch_size'], shuffle=False, num_workers=CFG['num_workers'], pin_memory=True) predictions = inference(model, states, test_loader, device) # submission test['label'] = predictions.argmax(1) test[['image_id', 'label']].to_csv(OUTPUT_DIR+'submission.csv', index=False) # + [markdown] id="ATy14yKn0hvy" # # rerun # + id="bSFRb_b50hY_" executionInfo={"status": "ok", "timestamp": 1611019799499, "user_tz": -540, "elapsed": 268567, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} def _load_save_point(run_id): # どこで中断したか取得 stop_fold = int(mlflow.get_run(run_id=run_id).to_dictionary()['data']['tags']['running.fold']) # 学習対象のfoldを変更 CFG['trn_fold'] = [fold for fold in CFG['trn_fold'] if fold>=stop_fold] # 学習済みモデルがあれば.pthファイルを取得(学習中も含む) client = mlflow.tracking.MlflowClient() artifacts = [artifact for artifact in client.list_artifacts(run_id) if ".pth" in artifact.path] for artifact in artifacts: client.download_artifacts(run_id, artifact.path, OUTPUT_DIR) def check_have_run(): results = mlflow.search_runs(INFO['EXPERIMENT_ID']) run_id_list = results[results['tags.mlflow.runName']==TITLE]['run_id'].tolist() # 初めて実行する場合 if len(run_id_list) == 0: run_id = None # 既に実行されている場合 else: assert len(run_id_list)==1 run_id = run_id_list[0] _load_save_point(run_id) return run_id # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 307.853884, "end_time": "2021-01-06T02:43:46.043411", "exception": false, "start_time": "2021-01-06T02:38:38.189527", "status": "completed"} tags=[] id="30aXBy9CFNXs" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1611032247789, "user_tz": -540, "elapsed": 12716833, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} outputId="363583fb-f53f-4bed-e854-29d194e47272" if __name__ == '__main__': if CFG['debug']: main() else: mlflow.set_tracking_uri(INFO['TRACKING_URI']) mlflow.set_experiment('single model') # 既に実行済みの場合は続きから実行する run_id = check_have_run() with mlflow.start_run(run_id=run_id, run_name=TITLE): if run_id is None: mlflow.log_artifact(CONFIG_PATH) mlflow.log_param('device', device) mlflow.set_tag('env', env) mlflow.set_tags(TAG) mlflow.log_params(CFG) mlflow.log_artifact(notebook_path) main() mlflow.log_artifacts(OUTPUT_DIR) remove_glob(f'{OUTPUT_DIR}/*latest.pth') if env=="kaggle": shutil.copy2(CONFIG_PATH, f'{OUTPUT_DIR}/{CONFIG_NAME}') # ! rm -r cassava elif env=="colab": shutil.copytree(OUTPUT_DIR, f'{INFO["SHARE_DRIVE_PATH"]}/{TITLE}') shutil.copy2(CONFIG_PATH, f'{INFO["SHARE_DRIVE_PATH"]}/{TITLE}/{CONFIG_NAME}') # + id="SOgtt7_vGZ4L" executionInfo={"status": "ok", "timestamp": 1611032247797, "user_tz": -540, "elapsed": 12716837, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}} # remove_glob(f'{INFO["SHARE_DRIVE_PATH"]}/{TITLE}/*latest.pth') # + id="5Q6S-dO2d_YO" executionInfo={"status": "ok", "timestamp": 1611032247799, "user_tz": -540, "elapsed": 12716834, "user": {"displayName": "\u30b5\u30d6\u516d", "photoUrl": "", "userId": "11539026887080079999"}}
notebook/16t-efficientnet_b3-cutmix.ipynb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CNN Surrogate model for mapping JV curves and to material descriptors # We will show how the surrogate model works. # The surrogate models replaces numerical PDE with NN that enables >100x accelerating in computation speed and capability of handling noisy data. # ##### Libraries and dependencies: # + from keras import backend as K from keras.models import Model from keras.callbacks import ReduceLROnPlateau from keras.layers import Input, Dense, Lambda,Conv1D,Conv2DTranspose, LeakyReLU,Activation,Flatten,Reshape import matplotlib.pyplot as plt import numpy as np import os from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from keras.models import load_model from keras import optimizers import warnings warnings.filterwarnings('ignore') plt.rcParams["figure.figsize"] = [8, 6] plt.rcParams.update({'font.size': 16}) plt.rcParams["font.family"] = "calibri" # - # ##### Load data and preprocess # + # Load simulated and unormalized JV dataset JV_raw = np.loadtxt('.\data\GaAs_sim_nJV.txt') # Load material parameters that generated the JV dataset par = np.loadtxt('.\data\GaAs_sim_label.txt') #Data normalization for the whole JV dataset def min_max(x): min = np.min(x) max = np.max(x) return (x-min)/(max-min),max,min #Normalize raw JV data JV_norm,JV_max,JV_min = min_max(JV_raw) #Normalize JV descriptors column-wise scaler = MinMaxScaler() par_n = scaler.fit_transform(par) #create training and testing datset X_train, X_test, y_train, y_test = train_test_split(JV_norm,par_n, test_size=0.2) # - # Let's take a look at our data plt.plot(X_train[0,:],label = 'original input', c= 'blue') plt.xlabel('voltage(a.u.)') plt.ylabel('current(a.u.)') plt.legend() # ##### build the CNN regressor # + #build the NN for parameter extraction input_dim = X_train.shape[1] label_dim = y_train.shape[1] x = Input(shape=(input_dim,)) y = Input(shape =(label_dim,)) max_filter = 256 strides = [5,2,2] kernel = [7,5,3] def encoder(x): x = Lambda(lambda x: K.expand_dims(x, axis=2))(x) en0 = Conv1D(max_filter//4,kernel[0],strides= strides[0], padding='SAME')(x) en0 = LeakyReLU(0.2)(en0) en1 = Conv1D(max_filter//2,kernel[1],strides=strides[1], padding='SAME')(en0) en1 = LeakyReLU(0.2)(en1) en2 = Conv1D(max_filter,kernel[2], strides=strides[2],padding='SAME')(en1) en2 = LeakyReLU(0.2)(en2) en3 = Flatten()(en2) en3 = Dense(100,activation = 'sigmoid')(en3) z_mean = Dense(label_dim,activation = 'sigmoid')(en3) return z_mean z_mean = encoder(x) encoder_ = Model(x,z_mean) encoder_.summary() map_size = K.int_shape(encoder_.layers[-4].output)[1] z_in = Input(shape=(label_dim,)) z1 = Dense(100,activation = 'relu')(z_in) z1 = Dense(max_filter*map_size,activation='relu')(z1) z1 = Reshape((map_size,1,max_filter))(z1) z2 = Conv2DTranspose( max_filter//2, (kernel[2],1), strides=(strides[2],1),padding='SAME')(z1) z2 = Activation('relu')(z2) z3 = Conv2DTranspose(max_filter//4, (kernel[1],1), strides=(strides[1],1),padding='SAME')(z2) z3 = Activation('relu')(z3) z4 = Conv2DTranspose(1, (kernel[0],1), strides=(strides[0],1),padding='SAME')(z3) decoded_x = Activation('sigmoid')(z4) decoded_x = Lambda(lambda x: K.squeeze(x, axis=2))(decoded_x) decoded_x = Lambda(lambda x: K.squeeze(x, axis=2))(decoded_x) decoder_ = Model(z_in,decoded_x) decoder_.summary() decoded_x = decoder_(y) ae = Model(inputs= [x,y],outputs= [decoded_x,z_mean]) def ae_loss(x, decoded_x): #encoder loss encoder_loss = K.sum(K.square(z_mean-y),axis=-1)/label_dim #decoder loss decoder_loss = 0.01*K.sum(K.square(x- decoded_x),axis=-1)/input_dim ae_loss = K.mean(encoder_loss+decoder_loss) return ae_loss ae.compile(optimizer = optimizers.rmsprop(lr=1e-3), loss= ae_loss) ae.summary() reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor=0.5, patience=5, min_lr=0.00001) ae.fit(([X_train,y_train]),([X_train,y_train]),shuffle=True, batch_size=64,epochs = 200, validation_split=0.0, validation_data=None, callbacks=[reduce_lr]) encoder_ = Model(x,z_mean) x_test_encoded_1 = encoder_.predict(X_test) # - # ##### plot the prediction vs ground truth for parameter extraction # + x_train_encoded = encoder_.predict(X_train) x_test_encoded = encoder_.predict(X_test) #parameters to be extracted para = ['lifetime', 'FSRV', 'RSRV', 'Rs'] for i in range(len(para)) : plt.figure() plt.scatter(x_test_encoded[:, i], y_test[:, i]) plt.xlabel ('Prediction') plt.ylabel('Ground Truth') plt.xlim(0,1) plt.ylim(0,1) plt.title (para[i] + ' prediction') plt.show() # - # ##### Compute the MSE for the parameter extraction # + from sklearn.metrics import mean_squared_error mse = mean_squared_error mse_train = mse(x_train_encoded,y_train) mse_test = mse(x_test_encoded,y_test) print ('train mse: %.6f' % (mse_train)) print ('test mse: %.6f' % (mse_test)) # - # ##### save the NN model # + ae.save('./model/AE.h5') encoder_.save('./model/para_extract.h5') decoder_.save('./model/JV_extract.h5') # - # ##### plot the prediction vs ground truth for JV reconstruction # This is to show that the decoder (CNN) can reproduce the PDE solver's generated JVs i_par= np.random.randint(100) # + y_hat_train = decoder_.predict(y_train) y_hat_test = decoder_.predict(y_test) plt.figure() plt.plot(X_test[i_par,:], label = 'Numerical solver') plt.plot(y_hat_test[i_par,:],'--', label = 'NN Prediction') plt.xlabel('voltage(a.u.)') plt.ylabel('current(a.u.)') plt.legend() # -
Week7/JV_demo.ipynb
# ##### Copyright 2021 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # nurses_sat # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/sat/nurses_sat.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/ortools/sat/samples/nurses_sat.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # #!/usr/bin/env python3 # Copyright 2010-2021 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of a simple nurse scheduling problem.""" # [START program] # [START import] from ortools.sat.python import cp_model # [END import] # [START solution_printer] class NursesPartialSolutionPrinter(cp_model.CpSolverSolutionCallback): """Print intermediate solutions.""" def __init__(self, shifts, num_nurses, num_days, num_shifts, sols): cp_model.CpSolverSolutionCallback.__init__(self) self._shifts = shifts self._num_nurses = num_nurses self._num_days = num_days self._num_shifts = num_shifts self._solutions = set(sols) self._solution_count = 0 def on_solution_callback(self): if self._solution_count in self._solutions: print('Solution %i' % self._solution_count) for d in range(self._num_days): print('Day %i' % d) for n in range(self._num_nurses): is_working = False for s in range(self._num_shifts): if self.Value(self._shifts[(n, d, s)]): is_working = True print(' Nurse %i works shift %i' % (n, s)) if not is_working: print(' Nurse {} does not work'.format(n)) print() self._solution_count += 1 def solution_count(self): return self._solution_count # [END solution_printer] # Data. # [START data] num_nurses = 4 num_shifts = 3 num_days = 3 all_nurses = range(num_nurses) all_shifts = range(num_shifts) all_days = range(num_days) # [END data] # Creates the model. # [START model] model = cp_model.CpModel() # [END model] # Creates shift variables. # shifts[(n, d, s)]: nurse 'n' works shift 's' on day 'd'. # [START variables] shifts = {} for n in all_nurses: for d in all_days: for s in all_shifts: shifts[(n, d, s)] = model.NewBoolVar('shift_n%id%is%i' % (n, d, s)) # [END variables] # Each shift is assigned to exactly one nurse in the schedule period. # [START exactly_one_nurse] for d in all_days: for s in all_shifts: model.Add(sum(shifts[(n, d, s)] for n in all_nurses) == 1) # [END exactly_one_nurse] # Each nurse works at most one shift per day. # [START at_most_one_shift] for n in all_nurses: for d in all_days: model.Add(sum(shifts[(n, d, s)] for s in all_shifts) <= 1) # [END at_most_one_shift] # [START assign_nurses_evenly] # Try to distribute the shifts evenly, so that each nurse works # min_shifts_per_nurse shifts. If this is not possible, because the total # number of shifts is not divisible by the number of nurses, some nurses will # be assigned one more shift. min_shifts_per_nurse = (num_shifts * num_days) // num_nurses if num_shifts * num_days % num_nurses == 0: max_shifts_per_nurse = min_shifts_per_nurse else: max_shifts_per_nurse = min_shifts_per_nurse + 1 for n in all_nurses: num_shifts_worked = 0 for d in all_days: for s in all_shifts: num_shifts_worked += shifts[(n, d, s)] model.Add(min_shifts_per_nurse <= num_shifts_worked) model.Add(num_shifts_worked <= max_shifts_per_nurse) # [END assign_nurses_evenly] # Creates the solver and solve. # [START solve] solver = cp_model.CpSolver() solver.parameters.linearization_level = 0 # Enumerate all solutions. solver.parameters.enumerate_all_solutions = True # Display the first five solutions. a_few_solutions = range(5) solution_printer = NursesPartialSolutionPrinter(shifts, num_nurses, num_days, num_shifts, a_few_solutions) solver.Solve(model, solution_printer) # [END solve] # Statistics. print() print('Statistics') print(' - conflicts : %i' % solver.NumConflicts()) print(' - branches : %i' % solver.NumBranches()) print(' - wall time : %f s' % solver.WallTime()) print(' - solutions found : %i' % solution_printer.solution_count())
examples/notebook/sat/nurses_sat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 9.5 多尺度目标检测 # 1. 减少锚框个数 # - 在输入图像中均匀采样一小部分像素,并以采样的像素为中心生成锚框 # - 可以生成不同数量和不同大小的锚框 # # + # %matplotlib inline from PIL import Image import numpy as np import torch import sys sys.path.append("..") import d2lzh_pytorch.utils as d2l img = Image.open('../img/catdog.jpg') w, h = img.size # (728, 561) # + d2l.set_figsize() def display_anchors(fmap_w, fmap_h, s): fmap = torch.zeros((1, 10, fmap_w, fmap_h), dtype=torch.float32) # 平移所有锚框使均匀分布在图片上 offset_x, offset_y = 1.0/fmap_w, 1.0/fmap_h anchors = d2l.MultiBoxPrior(fmap, sizes=s, ratios=[1, 2, 0.5]) + \ torch.tensor([offset_x/2, offset_y/2, offset_x/2, offset_y/2]) bbox_scale = torch.tensor([[w, h, w, h]], dtype=torch.float32) d2l.show_bboxes(d2l.plt.imshow(img).axes, anchors[0] * bbox_scale) # - display_anchors(fmap_w=4, fmap_h=2, s=[0.15]) display_anchors(fmap_w=2, fmap_h=1, s=[0.4]) display_anchors(fmap_w=1, fmap_h=1, s=[0.8])
dl/dive-into-dl/chapter09_computer-version/9.05_multiscale-object-detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jamesjun/ironclust/blob/master/spikeforest_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="xfQAJFPFgbzE" colab_type="text" # ## SpikeForest bionet analysis # + [markdown] id="Fcx4kQoVt374" colab_type="text" # This notebook represents a complete spikeforest analysis of the bionet studies. You should execute the first few cells and then skip down to the section of interest below. # + id="vtsE5P3EgXYr" colab_type="code" colab={} # JJJ was here # JFM was here too # Only run this cell if you are running this on a hosted runtime that does not have these packages installed # # %%capture is used to suppress the output... this should take up to a minute to complete # %%capture # !pip install spikeforest # !pip install git+https://github.com/magland/spikeforest_batch_run # + id="P3-FpNWUgWln" colab_type="code" colab={} # Import the python packages -- autoreload is used for development purposes # %load_ext autoreload # %autoreload 2 import spikeforest as sf from kbucket import client as kb # + id="_JHpizoTul8M" colab_type="code" colab={} ## Configure readonly access to kbucket -- use this if you only want to browse the results --- sf.kbucketConfigRemote(name='spikeforest1-readonly') # + id="3pHRiMtggkaT" colab_type="code" outputId="d8b2b93b-c7b4-4a15-db29-bd690ad4815c" colab={"base_uri": "https://localhost:8080/", "height": 51} ## Configure read/write access to kbucket -- use this if you are preparing the studies or the processing batches sf.kbucketConfigRemote(name='spikeforest1-readwrite',ask_password=True) # + [markdown] id="blWcqWOIhzkc" colab_type="text" # ## Prepare recordings # + id="HtbjJUirgm0s" colab_type="code" colab={} def read_text_file(path): path2=kb.realizeFile(path) if path2 is None: raise Exception('Unable to realize file: '+path) with open(path2,'r') as f: return f.read() def prepare_bionet_studies(*,basedir,channels): study_set_name='bionet' studies=[] recordings=[] names=['bionet_drift','bionet_shuffle','bionet_static'] for name in names: study_name=name study_dir=basedir+'/bionet/'+name description=read_text_file(study_dir+'/readme.txt') study0=dict( name=study_name, study_set=study_set_name, directory=study_dir, description=description ) studies.append(study0) dd=kb.readDir(study_dir) for dsname in dd['dirs']: dsdir='{}/{}'.format(study_dir,dsname) rec0=dict( name=dsname, study=study_name, description='', directory=dsdir, channels=channels ) if len(rec0['channels'])>0: units=sf.sf_batch.select_units_on_channels( recording_dir=dsdir, firings=dsdir+'/firings_true.mda', channels=rec0['channels'] ) rec0['units_true']=units recordings.append(rec0) return studies, recordings def prepare_magland_synth_studies(*,basedir): study_set_name='magland_synth' studies=[] recordings=[] names=[] names=names+['datasets_noise10_K10_C4','datasets_noise10_K10_C8'] names=names+['datasets_noise10_K20_C4','datasets_noise10_K20_C8'] names=names+['datasets_noise20_K10_C4','datasets_noise20_K10_C8'] names=names+['datasets_noise20_K20_C4','datasets_noise20_K20_C8'] description=read_text_file(basedir+'/magland_synth/readme.txt') for name in names: study_name='magland_synth_'+name[9:] study_dir=basedir+'/magland_synth/'+name study0=dict( name=study_name, study_set=study_set_name, directory=study_dir, description=description ) studies.append(study0) dd=kb.readDir(study_dir) for dsname in dd['dirs']: dsdir='{}/{}'.format(study_dir,dsname) recordings.append(dict( name=dsname, study=study_name, directory=dsdir, description='One of the recordings in the {} study'.format(study_name) )) return studies, recordings # + id="BpXkEoS8gaMn" colab_type="code" colab={} basedir='kbucket://15734439d8cf/groundtruth' # + id="b92joharg0G5" colab_type="code" colab={} channels=[0,1,2,3,4,5,6,7] studies,recordings=prepare_bionet_studies(basedir=basedir,channels=channels) kb.saveObject(dict(studies=studies,recordings=recordings),key=dict(name='spikeforest_bionet_recordings')) # + id="XCUMPH8JgXK7" colab_type="code" outputId="98e5a738-2d09-4a88-e29c-01211964fb92" colab={"base_uri": "https://localhost:8080/", "height": 34} studies,recordings=prepare_magland_synth_studies(basedir=basedir) kb.saveObject(dict(studies=studies,recordings=recordings),key=dict(name='spikeforest_magland_synth_recordings')) # + [markdown] id="rvCvUJ1JiZX1" colab_type="text" # ## Create summarize recordings batches # + id="MWIJU5Gxic_f" colab_type="code" colab={} def create_summarize_recordings_batch(*,recordings_name,batch_name): print('Creating summarize_recordings batch: '+batch_name) SF=sf.SFData() SF.loadRecordings(key=dict(name=recordings_name)) jobs=[] for name in SF.studyNames(): study=SF.study(name) for recname in study.recordingNames(): R=study.recording(recname) job=dict( command='summarize_recording', label=R.name(), recording=R.getObject() ) jobs.append(job) batch=dict(jobs=jobs) print('Number of jobs: {}'.format(len(jobs))) kb.saveObject(key=dict(batch_name=batch_name),object=batch) # + id="Vrw88QA5hACU" colab_type="code" outputId="71ece4cc-ad87-4c7e-c7f8-3aed3a64d1ca" colab={"base_uri": "https://localhost:8080/", "height": 102} create_summarize_recordings_batch(recordings_name='spikeforest_bionet_recordings',batch_name='summarize_recordings_bionet') create_summarize_recordings_batch(recordings_name='spikeforest_magland_synth_recordings',batch_name='summarize_recordings_magland_synth') # + [markdown] id="weikHp2gjbwJ" colab_type="text" # To run these batches, go to a computer with resources somewhere and run something like: # # ``` # bin/sf_run_batch [name_of_batch] --run_prefix "srun -c 2 -n 40" # ``` # # where bin/sf_run_batch is found in the spikeforest_batch_run repository. # # Alternatively, you can test run it in this notebook using the following commands: # + id="z5k_8AHGk0mu" colab_type="code" outputId="416499f3-7ffc-467e-b994-34f66e226d2e" colab={"base_uri": "https://localhost:8080/", "height": 731} ## Note: usually you would not run this cell -- see the note above. import spikeforest_batch_run as sbr # Execute prepareBatch once (serially) sbr.prepareBatch(batch_name='summarize_recordings_bionet') # Execute runBatch many times in parallel sbr.runBatch(batch_name='summarize_recordings_bionet') # Execute assembleBatchResults once (serially) sbr.assembleBatchResults(batch_name='summarize_recordings_bionet') # + [markdown] id="TKwgGlPKh5uE" colab_type="text" # ## Browse recordings # + id="-qq56y38hJpv" colab_type="code" outputId="43ad502f-c6b8-407f-90a9-90c1e9429cc6" colab={"base_uri": "https://localhost:8080/", "height": 68} SF=sf.SFData() SF.loadRecordings(key=dict(name='spikeforest_bionet_recordings')) SF.loadRecordings(key=dict(name='spikeforest_magland_synth_recordings')) SF.loadProcessingBatch(key=dict(batch_name='summarize_recordings_bionet',name='job_results')) SF.loadProcessingBatch(key=dict(batch_name='summarize_recordings_magland_synth',name='job_results')) # + id="6o8vClJ9hrRa" colab_type="code" outputId="0379a936-5617-446d-ed25-3d1e8dde8cd1" colab={"base_uri": "https://localhost:8080/", "height": 97} X=sf.SFSelectWidget(sfdata=SF,mode='recording') display(X) # + id="CwiDbAZbiF48" colab_type="code" colab={} R=X.recording() display(R.plot('timeseries')) display(R.plot('waveforms_true')) display(R.trueUnitsInfo()) # + id="WBdqbkSNiJyS" colab_type="code" outputId="91e60e18-03d8-49a2-b780-a1bbdb1117a2" colab={"base_uri": "https://localhost:8080/", "height": 34} R.plotNames() # + [markdown] id="1vvFYgXjmetF" colab_type="text" # ## Create spike sorting batches # + id="7toleMsBnMy6" colab_type="code" outputId="0c2d92e4-6754-4f8d-8edb-866ea1dae07a" colab={"base_uri": "https://localhost:8080/", "height": 34} SF=sf.SFData() SF.loadRecordings(key=dict(name='spikeforest_bionet_recordings')) SF.loadRecordings(key=dict(name='spikeforest_magland_synth_recordings')) SF.loadProcessingBatch(key=dict(batch_name='summarize_recordings_bionet',name='job_results')) SF.loadProcessingBatch(key=dict(batch_name='summarize_recordings_magland_synth',name='job_results')) # + id="2PHEdeuBnVJl" colab_type="code" colab={} sorter_ms4_thr3=dict( name='MountainSort4-thr3', processor_name='MountainSort4', params=dict( detect_sign=-1, adjacency_radius=100, detect_threshold=3 ) ) sorter_irc_tetrode=dict( name='IronClust-tetrode', processor_name='IronClust', params=dict( detect_sign=-1, adjacency_radius=100, detect_threshold=5, prm_template_name="tetrode_template.prm" ) ) sorter_irc_drift=dict( name='IronClust-drift', processor_name='IronClust', params=dict( detect_sign=-1, adjacency_radius=100, prm_template_name="drift_template.prm" ) ) sorter_sc=dict( name='SpykingCircus', processor_name='SpykingCircus', params=dict( detect_sign=-1, adjacency_radius=100 ) ) # + id="NhvWMXK7mSYu" colab_type="code" colab={} def create_sorting_batch(*,recordings_name,batch_name,sorters): print('Creating sorting batch: '+batch_name) SF=sf.SFData() SF.loadRecordings(key=dict(name=recordings_name)) jobs=[] for name in SF.studyNames(): study=SF.study(name) for rname in study.recordingNames(): R=study.recording(rname) for sorter in sorters: job=dict( command='sort_recording', label=sorter['name']+': '+R.name(), recording=R.getObject(), sorter=sorter ) jobs.append(job) batch=dict(jobs=jobs) print('Number of jobs: {}'.format(len(jobs))) kb.saveObject(key=dict(batch_name=batch_name),object=batch) # + id="yb-zTMQgnK98" colab_type="code" outputId="b6bb6fbc-f252-4cd3-ddb2-0ad124f17828" colab={"base_uri": "https://localhost:8080/", "height": 323} create_sorting_batch(recordings_name='spikeforest_magland_synth_recordings',batch_name='ms4_magland_synth',sorters=[sorter_ms4_thr3]) create_sorting_batch(recordings_name='spikeforest_magland_synth_recordings',batch_name='irc_magland_synth',sorters=[sorter_irc_tetrode]) create_sorting_batch(recordings_name='spikeforest_magland_synth_recordings',batch_name='sc_magland_synth',sorters=[sorter_sc]) create_sorting_batch(recordings_name='spikeforest_bionet_recordings',batch_name='ms4_bionet',sorters=[sorter_ms4_thr3]) create_sorting_batch(recordings_name='spikeforest_bionet_recordings',batch_name='irc_bionet',sorters=[sorter_irc_drift]) create_sorting_batch(recordings_name='spikeforest_bionet_recordings',batch_name='sc_bionet',sorters=[sorter_sc]) # + [markdown] id="W2VTozxFnoE3" colab_type="text" # To run these sorting batches, follow the instructions above. # + [markdown] id="dfDRhV7jpVgy" colab_type="text" # ## Browse sorting results # + id="4ga6PepCpXKF" colab_type="code" outputId="13268d3e-76a9-4ff0-e888-88f9c2e70897" colab={"base_uri": "https://localhost:8080/", "height": 190} SF=sf.SFData() SF.loadRecordings(key=dict(name='spikeforest_bionet_recordings')) SF.loadRecordings(key=dict(name='spikeforest_magland_synth_recordings')) SF.loadProcessingBatch(key=dict(batch_name='summarize_recordings_bionet',name='job_results')) SF.loadProcessingBatch(key=dict(batch_name='summarize_recordings_magland_synth',name='job_results')) SF.loadProcessingBatch(key=dict(batch_name='ms4_magland_synth',name='job_results')) SF.loadProcessingBatch(key=dict(batch_name='sc_magland_synth',name='job_results')) SF.loadProcessingBatch(key=dict(batch_name='irc_magland_synth',name='job_results')) SF.loadProcessingBatch(key=dict(batch_name='ms4_bionet',name='job_results')) #SF.loadProcessingBatch(key=dict(batch_name='sc_bionet',name='job_results')) ## Spyking circus not working yet -- need to put into singularity container SF.loadProcessingBatch(key=dict(batch_name='irc_bionet',name='job_results')) # + id="D7Ilcyb0pki5" colab_type="code" outputId="febd1b8a-828d-40b0-8fa7-23fcdf93b7b8" colab={"base_uri": "https://localhost:8080/", "height": 120} X=sf.SFSelectWidget(sfdata=SF,mode='sorting_result') display(X) # + id="Sganjg9cp56c" colab_type="code" colab={} R=X.sortingResult() display(R.plot('unit_waveforms')) display(R.plot('autocorrelograms')) display(R.comparisonWithTruth()) # + [markdown] id="fL2QjjBPqXS7" colab_type="text" # ## Aggregate sorting results # + id="bBl4NBQCrXl3" colab_type="code" outputId="4531b9bd-7cda-436f-c963-d9fbfaa90c33" colab={"base_uri": "https://localhost:8080/", "height": 136} SF=sf.SFData() SF.loadRecordings(key=dict(name='spikeforest_bionet_recordings')) SF.loadProcessingBatch(key=dict(batch_name='summarize_recordings_bionet',name='job_results')) SF.loadProcessingBatch(key=dict(batch_name='ms4_bionet',name='job_results')) #SF.loadProcessingBatch(key=dict(batch_name='sc_bionet',name='job_results')) ## Spyking circus not working yet -- need to put into singularity container SF.loadProcessingBatch(key=dict(batch_name='irc_bionet',name='job_results')) # + id="XyBRKILPp-0h" colab_type="code" colab={} import pandas as pd import random import altair as alt alt.renderers.enable('colab') # Accumulate the sorting results def accumulate_comparison_with_ground_truth(*,SF,studies,sorter_name,fieldnames): ret=[] for study in studies: recordings=[study.recording(name) for name in study.recordingNames()] for R in recordings: result=R.sortingResult(sorter_name) A=result.comparisonWithTruth(format='json') B=R.trueUnitsInfo(format='json') snr_by_true_unit=dict() for b in B: snr_by_true_unit[b['unit_id']]=b['snr'] for i in A: a=A[i] rec=dict() rec['recording_name']=R.name() rec['unit_id']=a['Unit ID'] rec['snr']=snr_by_true_unit[rec['unit_id']] for fieldname in fieldnames: rec[fieldname]=float(a[fieldname]) ret.append(rec) return ret def show_accuracy_plot(*,SF,study_name,sorter_name,title): study=SF.study(study_name) X=accumulate_comparison_with_ground_truth( SF=SF, studies=[study], sorter_name=sorter_name, fieldnames=['Accuracy'] ) # Display the accumulated sorting results cc=alt.Chart(pd.DataFrame(X),title=title).mark_point().encode( x='snr', y='Accuracy', color='recording_name', tooltip='recording_name' ).interactive() display(cc) # + id="8-bIDoH1rkYW" colab_type="code" colab={} import vdomr as vd class SelectBox(vd.Component): def __init__(self,options=[]): vd.Component.__init__(self) self._on_change_handlers=[] self._value=None self.setOptions(options) def setOptions(self,options): self._options=options if self._value not in options: self._value=options[0] if options else None self.refresh() def value(self): return self._value def setValue(self,value): self._value=value self.refresh() def onChange(self,handler): self._on_change_handlers.append(handler) def _on_change(self,value): self._value=value for handler in self._on_change_handlers: handler(value=value) def render(self): opts=[] for option in self._options: if option==self._value: opts.append(vd.option(option,selected='selected')) else: opts.append(vd.option(option)) X=vd.select(opts,onchange=self._on_change) return X # + id="THJyQ4V4rGT6" colab_type="code" outputId="b6cc5a40-723e-4064-a1ab-0c73b285e754" colab={"base_uri": "https://localhost:8080/", "height": 55} STUDY=SelectBox(options=SF.studyNames()) SORTER=SelectBox(options=['MountainSort4-thr3','IronClust-drift']) display(STUDY) display(SORTER) # + id="fI6RItt-q5pM" colab_type="code" outputId="5df12ff5-914d-415e-ffa0-797bf9c147f3" colab={"base_uri": "https://localhost:8080/", "height": 815} show_accuracy_plot( SF=SF, study_name=STUDY.value(), sorter_name=SORTER.value(), title=SORTER.value()+' '+STUDY.value() ) # + id="Opz2K0VPrZAm" colab_type="code" colab={}
spikeforest_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 from bagnets.utils import plot_heatmap, generate_heatmap_pytorch from bagnets.utils import pad_image, convert2channel_last, imagenet_preprocess, extract_patches, bagnet_predict, compare_heatmap from foolbox.utils import samples import bagnets.keras import numpy as np import matplotlib.pyplot as plt import numpy as np import time # load pretrained model bagnet33 = bagnets.keras.bagnet33() bagnet33.summary() # + original, label = samples(dataset='imagenet', index=1, batchsize=1, shape=(224, 224), data_format='channels_first') # preprocess sample image sample = original / 255. sample -= np.array([0.485, 0.456, 0.406])[:, None, None] sample /= np.array([0.229, 0.224, 0.225])[:, None, None] # - def generate_heatmap_keras(model, image, target, patchsize): """ Generates high-resolution heatmap for a BagNet by decomposing the image into all possible patches and by computing the logits for each patch. Parameters ---------- model : Pytorch Model This should be one of the BagNets. image : Numpy array of shape [1, 3, X, X] The image for which we want to compute the heatmap. target : int Class for which the heatmap is computed. patchsize : int The size of the receptive field of the given BagNet. """ _, c, x, y = image.shape padded_image = np.zeros((c, x + patchsize - 1, y + patchsize - 1)) padded_image[:, (patchsize-1)//2:(patchsize-1)//2 + x, (patchsize-1)//2:(patchsize-1)//2 + y] = image[0] image = padded_image[None].astype(np.float32) # extract patches patches = np.transpose(image, (0, 2, 3, 1)) patches = patches.unfold(1, patchsize, 1).unfold(2, patchsize, 1) num_rows = patches.shape[1] num_cols = patches.shape[2] patches = patches.reshape((-1, 3, patchsize, patchsize)) # compute logits for each patch logits_list = [] for batch_patches in torch.split(patches, 1000): logits = model(batch_patches) logits = logits[:, target][:, 0] logits_list.append(logits.data.cpu().numpy().copy()) logits = np.hstack(logits_list) return logits.reshape((224, 224)) # generate heatmap heatmap = generate_heatmap_keras(bagnet33, sample, label, 33)
code/notebooks/.ipynb_checkpoints/2019-5-24_check_keras_model-checkpoint.ipynb
// -*- coding: utf-8 -*- // --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Scala // language: scala // name: scala // --- // <a name="top"></a><img src="images/chisel_1024.png" alt="Chisel logo" style="width:480px;" /> // # Module 3.3: Higher-Order Functions // **Prev: [Interlude: Chisel Standard Library](3.2_interlude.ipynb)**<br> // **Next: [Functional Programming](3.4_functional_programming.ipynb)** // // ## Motivation // Those pesky `for` loops in the previous module are verbose and defeat the purpose of functional programming! In this module, your generators will get funct-ky. // // ## Setup val path = System.getProperty("user.dir") + "/source/load-ivy.sc" interp.load.module(ammonite.ops.Path(java.nio.file.FileSystems.getDefault().getPath(path))) import chisel3._ import chisel3.util._ import chisel3.iotesters.{ChiselFlatSpec, Driver, PeekPokeTester} // --- // # A Tale of Two FIRs <a name="compact-fir"></a> // From the last module, we had the convolution part of the FIR filter written like this: // // ```scala // val muls = Wire(Vec(length, UInt(8.W))) // for(i <- 0 until length) { // if(i == 0) muls(i) := io.in * io.consts(i) // else muls(i) := regs(i - 1) * io.consts(i) // } // // val scan = Wire(Vec(length, UInt(8.W))) // for(i <- 0 until length) { // if(i == 0) scan(i) := muls(i) // else scan(i) := muls(i) + scan(i - 1) // } // // io.out := scan(length - 1) // ``` // // As a recap, the idea is to multiply each element of `io.in` with the corresponding element of `io.consts`, and store it in `muls`. // Then, the elements in `muls` are accumulated into `scan`, with `scan(0) = muls(0)`, `scan(1) = scan(0) + muls(1) = muls(0) + muls(1)`, and in general `scan(n) = scan(n-1) + muls(n) = muls(0) + ... + muls(n-1) + muls(n)`. // The last element in `scan` (equal to the sum of all `muls`) is assigned to `io.out`. // // However, it's very verbose for what might be considered quite a simple operation. In fact, all that could be written in one line: // // ```scala // io.out := (taps zip io.consts).map { case (a, b) => a * b }.reduce(_ + _) // ``` // // What is it doing?! Let's break it down: // - `(taps zip io.consts)` takes two lists, `taps` and `io.consts`, and combines them into one list where each element is a tuple of the elements at the inputs at the corresponding position. Concretely, its value would be `[(taps(0), io.consts(0)), (taps(1), io.consts(1)), ..., (taps(n), io.consts(n))]`. Remember that periods are optional, so this is equivalent to `(taps.zip(io.consts))`. // - `.map { case (a, b) => a * b }` applies the anonymous function (takes a tuple of two elements returns their product) to the elements of the list, and returns the result. In this case, the result is equivalent to `muls` in the verbose example, and has the value `[taps(0) * io.consts(0), taps(1) * io.consts(1), ..., taps(n) * io.consts(n)]`. You'll revisit anonymous functions in the next module. For now, just learn this syntax. // - Finally, `.reduce(_ + _)` also applies the function (addition of elements) to elements of the list. However, it takes two arguments: the first is the current accumulation, and the second is the list element (in the first iteration, it just adds the first two elements). These are given by the two underscores in the parentheses. The result would then be, assuming left-to-right traversal, `(((muls(0) + muls(1)) + muls(2)) + ...) + muls(n)`, with the result of deeper-nested parentheses evaluated first. This is the output of the convolution. // // --- // # Functions as Arguments // Formally, functions like `map` and `reduce` are called _higher-order functions_ : they are functions that take functions as arguments. // As it turns out (and hopefully, as you can see from the above example), these are very powerful constructs that encapsulate a general computational pattern, allowing you to concentrate on the application logic instead of flow control, and resulting in very concise code. // // ## Different ways of specifying functions // You may have noticed that there were two ways of specifying functions in the examples above: // - For functions where each argument is referred to exactly once, you *may* be able to use an underscore (`_`) to refer to each argument. In the example above, the `reduce` argument function took two arguments and could be specified as `_ + _`. While convenient, this is subject to an additional set of arcane rules, so if it does't work, try: // - Specifying the inputs argument list explicitly. The reduce could have been explicitly written as `(a, b) => a + b`, with the general form of putting the argument list in parentheses, followed by `=>`, followed by the function body referring to those arguments. // - When tuple unpacking is needed, using a `case` statement, as in `case (a, b) => a * b`. That takes a single argument, a tuple of two elements, and unpacks it into variables `a` and `b`, which can then be used in the function body. // // ## Practice in Scala // In the last module, we've seen major classes in the Scala Collections API, like `List`s. // These higher-order functions are part of these APIs - and in fact, the above example uses the `map` and `reduce` API on `List`s. // In this section, we'll familiarize ourselves with these methods through examples and exercises. // In these examples, we'll operate on Scala numbers (`Int`s) for the sake of simpliciy and clarify, but because Chisel operators behave similarly, the concepts should generalize. // // <span style="color:blue">**Example: Map**</span><br> // `List[A].map` has type signature `map[B](f: (A) ⇒ B): List[B]`. You'll learn more about types in a later module. For now, think of types A and B as `Int`s or `UInt`s, meaning they could be software or hardware types. // // In plain English, it takes an argument of type `(f: (A) ⇒ B)`, or a function that takes one argument of type `A` (the same type as the element of the input List) and returns a value of type `B` (which can be anything). `map` then returns a new list of type `B` (the return type of the argument function). // // As we've already explained the behavior of List in the FIR explanation, let's get straight into the examples and exercises: // + println(List(1, 2, 3, 4).map(x => x + 1)) // explicit argument list in function println(List(1, 2, 3, 4).map(_ + 1)) // equivalent to the above, but implicit arguments println(List(1, 2, 3, 4).map(_.toString + "a")) // the output element type can be different from the input element type println(List((1, 5), (2, 6), (3, 7), (4, 8)).map { case (x, y) => x*y }) // this unpacks a tuple, note use of curly braces // Related: Scala has a syntax for constructing lists of sequential numbers println(0 to 10) // to is inclusive , the end point is part of the result println(0 until 10) // until is exclusive at the end, the end point is not part of the result // Those largely behave like lists, and can be useful for generating indices: val myList = List("a", "b", "c", "d") println((0 until 4).map(myList(_))) // - // <span style="color:red">**Exercise: Map**</span><br><a name="map-exercise"></a> // Now you try: // Fill in the blanks (the ???) such that this doubles the elements of the input list. // This should return: List(2, 4, 6, 8) println(List(1, 2, 3, 4).map(???)) // <span style="color:blue">**Example: zipWithIndex**</span><br> // `List.zipWithIndex` has type signature `zipWithIndex: List[(A, Int)]`. // // It takes no arguments, but returns a list where each element is a tuple of the original elements, and the index (with the first one being zero). // So `List("a", "b", "c", "d").zipWithIndex` would return `List(("a", 0), ("b", 1), ("c", 2), ("d", 3))` // // This is useful when then element index is needed in some operation. // // Since this is a pretty straightforward, we'll just have some examples: println(List(1, 2, 3, 4).zipWithIndex) // note indices start at zero println(List("a", "b", "c", "d").zipWithIndex) println(List(("a", "b"), ("c", "d"), ("e", "f"), ("g", "h")).zipWithIndex) // tuples nest // <span style="color:blue">**Example: Reduce**</span><br> // `List[A].map` has type signature similar to `reduce(op: (A, A) ⇒ A): A`. (it's actually more lenient, `A` only has to be a supertype of the List type, but we're not going to deal with that syntax here) // // As it's also been explained above, here are some examples: println(List(1, 2, 3, 4).reduce((a, b) => a + b)) // returns the sum of all the elements println(List(1, 2, 3, 4).reduce(_ * _)) // returns the product of all the elements println(List(1, 2, 3, 4).map(_ + 1).reduce(_ + _)) // you can chain reduce onto the result of a map // Important note: reduce will fail with an empty list println(List[Int]().reduce(_ * _)) // <span style="color:red">**Exercise: Reduce**</span><br><a name="reduce-exercise"></a> // Now you try: // Fill in the blanks (the ???) such that this returns the product of the double of the elements of the input list. // This should return: (1*2)*(2*2)*(3*2)*(4*2) = 384 println(List(1, 2, 3, 4).map(???).reduce(???)) // <span style="color:blue">**Example: Fold**</span><br> // `List[A].fold` is very similar to reduce, except that you can specify the initial accumulation value. // It has type signature similar to `fold(z: A)(op: (A, A) ⇒ A): A`. (like `reduce`, the type of `A` is also more lenient) // // Notably, it takes two argument lists, the first (`z`) is the initial value, and the second is the accumulation function. // Unlike `reduce`, it will not fail with an empty list, instead returning the initial value directly. // // Here's some examples: println(List(1, 2, 3, 4).fold(0)(_ + _)) // equivalent to the sum using reduce println(List(1, 2, 3, 4).fold(1)(_ + _)) // like above, but accumulation starts at 1 println(List().fold(1)(_ + _)) // unlike reduce, does not fail on an empty input // <span style="color:red">**Exercise: Fold**</span><br><a name="fold-exercise"></a> // Now you try: // Fill in the blanks (the ???) such that this returns the double the product of the elements of the input list. // This should return: 2*(1*2*3*4) = 48 // Note: unless empty list tolerance is needed, reduce is a much better fit here. println(List(1, 2, 3, 4).fold(???)(???)) // <span style="color:red">**Exercise: Decoupled Arbiter**</span><br> // Let's put everything together now into an exercise. // // For this example, we're going to build a Decoupled arbiter: a module that has _n_ Decoupled inputs and one Decoupled output. // The arbiter selects the lowest channel that is valid and forwards it to the output. // // Some hints: // - Architecturally: // - `io.out.valid` is true if any of the inputs are valid // - Consider having an internal wire of the selected channel // - Each input's `ready` is true if the output is ready, AND that channel is selected (this does combinationally couple ready and valid, but we'll ignore it for now...) // - These constructs may help: // - `map`, especially for returning a Vec of sub-elements, for example `io.in.map(_.valid)` returns a list of valid signals of the input Bundles // - `PriorityMux(List[Bool, Bits])`, which takes in a list of valid signals and bits, returning the first element that is valid // - Dynamic index on a Vec, by indexing with a UInt, for example `io.in(0.U)` // + class MyRoutingArbiter(numChannels: Int) extends Module { val io = IO(new Bundle { val in = Vec(numChannels, Flipped(Decoupled(UInt(8.W)))) val out = Decoupled(UInt(8.W)) } ) // Your code here ??? } // verify that the computation is correct class MyRoutingArbiterTester(c: MyRoutingArbiter) extends PeekPokeTester(c) { // Set input defaults for(i <- 0 until 4) { poke(c.io.in(i).valid, 0) poke(c.io.in(i).bits, i) poke(c.io.out.ready, 1) } expect(c.io.out.valid, 0) // Check single input valid behavior with backpressure for (i <- 0 until 4) { poke(c.io.in(i).valid, 1) expect(c.io.out.valid, 1) expect(c.io.out.bits, i) poke(c.io.out.ready, 0) expect(c.io.in(i).ready, 0) poke(c.io.out.ready, 1) poke(c.io.in(i).valid, 0) } // Basic check of multiple input ready behavior with backpressure poke(c.io.in(1).valid, 1) poke(c.io.in(2).valid, 1) expect(c.io.out.bits, 1) expect(c.io.in(1).ready, 1) expect(c.io.in(0).ready, 0) poke(c.io.out.ready, 0) expect(c.io.in(1).ready, 0) } val works = Driver(() => new MyRoutingArbiter(4)) { c => new MyRoutingArbiterTester(c) } assert(works) // Scala Code: if works == false, will throw an error println("SUCCESS!!") // Scala Code: if we get here, our tests passed! // - // <div id="container"><section id="accordion"><div> // <input type="checkbox" id="check-1" /> // <label for="check-1"><strong>Solution</strong></label> // <article> // <pre style="background-color:#f7f7f7"> // class MyRoutingArbiter(numChannels: Int) extends Module { // val io = IO(new Bundle { // val in = Vec(Flipped(Decoupled(UInt(8.W))), numChannels) // val out = Decoupled(UInt(8.W)) // } ) // // // YOUR CODE BELOW // io.out.valid := io.in.map(\_.valid).reduce(\_ || \_) // val channel = PriorityMux( // io.in.map(\_.valid).zipWithIndex.map { case (valid, index) => (valid, index.U) } // ) // io.out.bits := io.in(channel).bits // io.in.map(\_.ready).zipWithIndex.foreach { case (ready, index) => // ready := io.out.ready && channel === index.U // } // } // </pre></article></div></section></div> // --- // # You're done! // // [Return to the top.](#top)
3.3_higher-order_functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os from PIL import Image def png_to_jpg(source_folder, target_folder): for filename in os.listdir(source_folder): if filename[-4:] == '.png': if not os.path.exists(os.path.dirname(target_folder)): try: os.makedirs(os.path.dirname(target_folder)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise img = Image.open(source_folder + filename).convert('RGB') new_filename = filename[:-4] + '.jpg' img.save(target_folder + new_filename) # Image conversion for Experiment 1 foldernames = ['cows', 'birds', 'balloons', 'zebra'] total_scale = 6 for foldername in foldernames: for scale in range(7): real_source_path = './raw/{}{}/real/'.format(foldername, total_scale) real_target_path = './processed/{}{}/real/'.format(foldername, total_scale) fake_source_path = './raw/{}{}/gen_start_scale={}/'.format(foldername, total_scale, scale) fake_target_path = './processed/{}{}/gen_start_scale={}/'.format(foldername, total_scale, scale) png_to_jpg(fake_source_path, fake_target_path) png_to_jpg(real_source_path, real_target_path) # + # Image conversion for Experiment 2 foldernames = ['cows', 'birds', 'balloons', 'zebra'] total_scales = [2, 4, 6, 8] fake_root_path = '../Output/RandomSamples/' for foldername in foldernames: for scale in total_scales: real_source_path = f'./raw/{foldername}/real/' real_target_path = f'./processed/{foldername}{scale}/real/' fake_source_path = f'{fake_root_path}{foldername}{scale}/gen_start_scale={scale}/' fake_target_path = f'./processed/{foldername}{scale}/gen_start_scale={scale}/' png_to_jpg(fake_source_path, fake_target_path) png_to_jpg(real_source_path, real_target_path) # - # Image conversion for Experiment 3 foldernames = ['birds', 'cows', 'zebra'] start_scale = 2 cycles = range(1, 21) fake_root_path = '../Output/RandomSamples/' for foldername in foldernames: for cycle in cycles: fake_source_path = '{}{}_cyclic_{}/gen_start_scale={}/'.format(fake_root_path, foldername, cycle, start_scale) fake_target_path = './processed/{}_cyclic_{}/gen_start_scale={}/'.format(foldername, cycle, start_scale) real_source_path = './raw/reals/{}/'.format(foldername) real_target_path = './processed/{}_cyclic_{}/real/'.format(foldername, cycle) png_to_jpg(fake_source_path, fake_target_path) png_to_jpg(real_source_path, real_target_path) # Image conversion for Experiment 4 foldernames = ['cows', 'birds', 'balloons', 'zebra'] start_scale = 6 skips = range(1, 6) fake_root_path = '../Output/RandomSamples/' for foldername in foldernames: for skip in skips: fake_source_path = '{}{}_skip_{}/gen_start_scale={}/'.format(fake_root_path, foldername, skip, start_scale) fake_target_path = './processed/{}_skip_{}/gen_start_scale={}/'.format(foldername, skip, start_scale) real_source_path = './raw/reals/{}/'.format(foldername) real_target_path = './processed/{}_skip_{}/real/'.format(foldername, skip) png_to_jpg(fake_source_path, fake_target_path) png_to_jpg(real_source_path, real_target_path)
SIFID/.ipynb_checkpoints/image_converter-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline from skimage import io from skimage.transform import resize, pyramid_gaussian import numpy as np import random from sklearn import svm from skimage.feature import hog from sklearn.naive_bayes import GaussianNB import os from sklearn.externals import joblib import time from collections import defaultdict class GestureRecognizer(object): """class to perform gesture recognition""" def __init__(self, data_directory): """ data_directory : path like /home/sanket/mlproj/dataset/ includes the dataset folder with '/' Initialize all your variables here """ self.base_dir = data_directory # self.base_dir = os.path.abspath('.') + '/dataset/' self.win_size = 128 self.clf_gesture = None self.clf_hnh = None def IOU(self,boxA, boxB): xA = max(boxA[0], boxB[0]) yA = max(boxA[1], boxB[1]) xB = min(boxA[2], boxB[2]) yB = min(boxA[3], boxB[3]) # compute the area of intersection rectangle interArea = (xB - xA + 1) * (yB - yA + 1) # compute the area of both the prediction and ground-truth # rectangles boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) iou = interArea / float(boxAArea + boxBArea - interArea) # return the intersection over union value return iou def train(self, train_list): """ train_list : list of users to use for training eg ["user_1", "user_2", "user_3"] The train function should train all your classifiers, both binary and multiclass on the given list of users """ train_x_pos_ = [] train_x_neg_ = [] train_y = [] for user in train_list: csv_file = self.base_dir + user + '/' + user + '_loc.csv' with open(csv_file,'r') as f: f.readline() for line in f: data = line.strip().split(',') file_name = data[0] x1,y1,x2,y2 = map(int, data[1:]) img = io.imread(self.base_dir + file_name,as_grey=True) h,w = img.shape[:2] imgg = img[y1:y2,x1:x2] imgg = resize(imgg, (self.win_size, self.win_size)) imgg_hog = hog(imgg) label = ord(file_name.split('/')[1][0]) train_x_pos_.append(imgg_hog) train_y.append(label) count = 0 A = [x1,y1,x2,y2] while True: x1_r = random.randrange(0,w - self.win_size) y1_r = random.randrange(0,h - self.win_size) x2_r = x1_r + self.win_size y2_r = y1_r + self.win_size if y2_r >= h or x2_r>=w: continue B = [x1_r, y1_r, x2_r, y2_r] if self.IOU(A,B) <= 0.5: train_x_neg_.append(hog(img[y1_r:y2_r,x1_r:x2_r])) count += 1 if count >= 2: break self.train_x_pos = np.asarray(train_x_pos_) del train_x_pos_ self.train_x_neg = np.asarray(train_x_neg_) del train_x_neg_ train_y = np.asarray(train_y) self.clf_gesture = svm.LinearSVC() self.clf_gesture.fit(self.train_x_pos, train_y) score_gesture = self.clf_gesture.score(self.train_x_pos, train_y) print 'Training accuracy for gesture classifier : %f' %(score_gesture) # self.clf_hnh = svm.LinearSVC() self.clf_hnh = GaussianNB() train_x_hnh = np.concatenate((self.train_x_pos , self.train_x_neg)) train_y_hnh = np.asarray( [1] * len(self.train_x_pos) + [0] * len(self.train_x_neg)) self.clf_hnh.partial_fit(train_x_hnh, train_y_hnh, classes = np.asarray([1,0])) score_hnh = self.clf_hnh.score(train_x_hnh, train_y_hnh) print 'Training accuracy for Hand/Non-hand classifier : %f' %(score_hnh) def test(self, test_list): """ train_list : list of users to use for training eg ["user_1", "user_2", "user_3"] The train function should train all your classifiers, both binary and multiclass on the given list of users """ if self.clf_hnh is None or self.clf_gesture is None: print 'Classifiers not trained' return test_x_pos = [] test_x_neg = [] test_y = [] for user in test_list: csv_file = self.base_dir + user + '/' + user + '_loc.csv' with open(csv_file,'r') as f: f.readline() for line in f: data = line.strip().split(',') file_name = data[0] x1,y1,x2,y2 = map(int, data[1:]) img = io.imread(self.base_dir + file_name,as_grey=True) h,w = img.shape[:2] imgg = img[y1:y2,x1:x2] imgg = resize(imgg, (self.win_size, self.win_size)) imgg_hog = hog(imgg) label = ord(file_name.split('/')[1][0]) test_x_pos.append(imgg_hog) test_y.append(label) count = 0 A = [x1,y1,x2,y2] while True: x1_r = random.randrange(0,w - self.win_size) y1_r = random.randrange(0,h - self.win_size) x2_r = x1_r + self.win_size y2_r = y1_r + self.win_size if y2_r >= h or x2_r>=w: continue B = [x1_r, y1_r, x2_r, y2_r] if self.IOU(A,B) <= 0.5: test_x_neg.append(hog(img[y1_r:y2_r,x1_r:x2_r])) count += 1 if count >= 2: break test_x_pos = np.asarray(test_x_pos) test_x_neg = np.asarray(test_x_neg) test_y = np.asarray(test_y) score_gesture = self.clf_gesture.score(test_x_pos, test_y) print 'Testing accuracy for gesture classifier : %f' %(score_gesture) test_x_hnh = np.concatenate((test_x_pos, test_x_neg)) test_y_hnh = np.asarray( [1] * len(test_x_pos) + [0] * len(test_x_neg) ) score_hnh = self.clf_hnh.score(test_x_hnh,test_y_hnh) print 'Testing accuracy for Hand/Non-hand classifier : %f' %(score_hnh) def hard_negative_mining(self, no_iter, threshold): if self.clf_hnh is None or self.clf_gesture is None: print 'Classifiers not trained' return for i in xrange(no_iter): count = 0 FP = [] for data in self.train_x_neg: if self.clf_hnh.predict([data])[0] == 1: count+=1 FP.append(data) if i%10==0: print i, count if count <= threshold: break self.clf_hnh.partial_fit(np.asarray(FP), np.asarray([0] * len(FP))) Y = np.asarray([1] * self.train_x_pos.shape[0] + [0] * self.train_x_neg.shape[0]) score_ = self.clf_hnh.score(np.concatenate((self.train_x_pos, self.train_x_neg)), Y) print 'Accuracy for Hand/Non-hand classifier after Hard Negative Mining : %f' %(score_) def sliding_window(self, img): # conf_map = np.zeros(img.shape) h,w = img.shape[:2] stride = 10 win_size = self.win_size bbox = defaultdict(list) # Y is for rows and X is for the column print 'Dimension test : ', (h-win_size > 10) for row in xrange(0,h-win_size+1,stride): for col in xrange(0,w-win_size+1,stride): imgg = img[row:row+win_size, col:col+win_size] hog_ = hog(imgg) # 1th index is giving the prob. of the class 'hand' class_ = self.clf_hnh.predict_proba(np.asarray([hog_]))[0][1] if class_ > 0.7: bbox[class_].append([row, col, row + win_size, col + win_size]) return bbox[max(bbox.keys())] def non_maximal_supression(self, boxes, overlapThresh): if len(boxes) == 0: return [] pick = [] x1 = boxes[:,0] y1 = boxes[:,1] x2 = boxes[:,2] y2 = boxes[:,3] area = (x2 - x1 + 1) * (y2 - y1 + 1) idxs = np.argsort(y2) while len(idxs) > 0: last = len(idxs) - 1 i = idxs[last] pick.append(i) suppress = [last] for pos in xrange(0, last): j = idxs[pos] xx1 = max(x1[i], x1[j]) yy1 = max(y1[i], y1[j]) xx2 = min(x2[i], x2[j]) yy2 = min(y2[i], y2[j]) w = max(0, xx2 - xx1 + 1) h = max(0, yy2 - yy1 + 1) overlap = float(w * h) / area[j] if overlap > overlapThresh: suppress.append(pos) idxs = np.delete(idxs, suppress) return boxes[pick] def get_bbox(self, image): if self.clf_hnh is None or self.clf_gesture is None: print 'Classifiers not trained' return downscale_ = 1.2 py = pyramid_gaussian(image, downscale = downscale_) py_img = [py.next(), py.next(), py.next()] bbox = [] for i in xrange(len(py_img)): boxes = self.sliding_window(py_img[i]) factor = downscale_**i scale_box = lambda x : int(x*factor) boxes = map(lambda x : map(scale_box, x), boxes) bbox += boxes pos = self.non_maximal_supression(np.asarray(bbox), 0.7) print 'pos len', len(pos) return pos[0] def recognize_gesture(self, image): """ image : a 320x240 pixel RGB image in the form of a numpy array This function should locate the hand and classify the gesture. returns : (position, label) position : a tuple of (x1,y1,x2,y2) coordinates of bounding box x1,y1 is top left corner, x2,y2 is bottom right label : a single character. eg 'A' or 'B' """ try: position = self.get_bbox(image) imgg = image[position[0]:position[2], position[1] : position[3]] imgg = resize(imgg, (self.win_size, self.win_size)) label = chr(self.clf_gesture.predict(np.asarray( [hog(imgg)]))[0]) print 'POS : ', position, 'label : ', label return position, label except Exception as e: print e def translate_video(self, image_array): """ image_array : a list of images as described above. can be of arbitrary length This function classifies the video into a 5 character string returns : word (a string of 5 characters) no two consecutive characters are identical """ return word def test_labelled_images(self): if self.clf_hnh is None or self.clf_gesture is None: print 'Classifiers not trained' return # test_list = [3,4,5,6,7,9,10,11,12,13,14,15,16,17,18,19] test_list = [3,4,5,6,7] test_list = map(lambda x : 'user_' + str(x), test_list) count = 0 total = 0 for user in test_list: no_user = 0 IOU_val = 0.0 print user csv_file = self.base_dir + user + '/' + user + '_loc.csv' with open(csv_file,'r') as f: f.readline() for line in f: data = line.strip().split(',') file_name = data[0] x1,y1,x2,y2 = map(int, data[1:]) img = io.imread(self.base_dir + file_name,as_grey=True) total+=1 h,w = img.shape[:2] label = file_name.split('/')[1][0] pos, label_ = self.recognize_gesture(img) IOU_val += self.IOU([x1,y1,x2,y2],pos) no_user += 1 if label == label_: count+=1 print user, 'IOU : ',IOU_val/no_user score_gesture = (count*1.0)/(total*1.0) print 'Testing accuracy for gesture classifier class : %f' %(score_gesture) def store_clfs(self): label = time.ctime().split()[3] path = self.base_dir + 'clf_models/' + label joblib.dump(self.clf_hnh, path + '_hnh.pkl') joblib.dump(self.clf_gesture, path + '_gesture.pkl') def load_clfs(self, label): path = self.base_dir + 'clf_models/' + label self.clf_hnh = joblib.load(path + '_hnh.pkl') self.clf_gesture = joblib.load(path + '_gesture.pkl') G = GestureRecognizer(os.path.abspath('.') + '/dataset/') print G.base_dir G.load_clfs('06:18:36') img = io.imread('/home/ayush/GPU_ML/ML/workspace/project/complete_data/dataset/user_3/B0.jpg', as_grey=True) # print img.shape pos, label = G.recognize_gesture(img) # print pos, label io.imshow(img[pos[0]:pos[2],pos[1]:pos[3]]) G.clf_hnh.predict_proba([hog(resize(img[139:289,12:162], (128,128)))])[0] G.clf_hnh.classes_ G.IOU(pos, [12,139,162,289]) io.imshow(img[12:162,139:289]) # + user = [3,4,5,6,7,9,10,11,12,13,14,15] user = map(lambda x : 'user_' + str(x), user) print user user_test = [16,17,18,19] user_test = map(lambda x : 'user_' + str(x), user_test) print user_test # - G.train(user) G.hard_negative_mining(100,5) G.test(user_test) G.store_clfs() io.imshow(img[,]) G.test_labelled_images() del G
complete_data/Integrated_revision2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Задание 2.1 - Нейронные сети # # В этом задании вы реализуете и натренируете настоящую нейроную сеть своими руками! # # В некотором смысле это будет расширением прошлого задания - нам нужно просто составить несколько линейных классификаторов вместе! # # <img src="https://i.redd.it/n9fgba8b0qr01.png" alt="Stack_more_layers" width="400px"/> # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - from dataset import load_svhn, random_split_train_val from gradient_check import check_layer_gradient, check_layer_param_gradient, check_model_gradient from layers import FullyConnectedLayer, ReLULayer from model import TwoLayerNet from trainer import Trainer, Dataset from optim import SGD, MomentumSGD from metrics import multiclass_accuracy # # Загружаем данные # # И разделяем их на training и validation. # + def prepare_for_neural_network(train_X, test_X): train_flat = train_X.reshape(train_X.shape[0], -1).astype(float) / 255.0 test_flat = test_X.reshape(test_X.shape[0], -1).astype(float) / 255.0 # Subtract mean mean_image = np.mean(train_flat, axis = 0) train_flat -= mean_image test_flat -= mean_image return train_flat, test_flat train_X, train_y, test_X, test_y = load_svhn("data", max_train=10000, max_test=1000) train_X, test_X = prepare_for_neural_network(train_X, test_X) # Split train into train and val train_X, train_y, val_X, val_y = random_split_train_val(train_X, train_y, num_val = 1000) # - # # Как всегда, начинаем с кирпичиков # # Мы будем реализовывать необходимые нам слои по очереди. Каждый слой должен реализовать: # - прямой проход (forward pass), который генерирует выход слоя по входу и запоминает необходимые данные # - обратный проход (backward pass), который получает градиент по выходу слоя и вычисляет градиент по входу и по параметрам # # Начнем с ReLU, у которого параметров нет. # + # TODO: Implement ReLULayer layer in layers.py # Note: you'll need to copy implementation of the gradient_check function from the previous assignment X = np.array([[1, -2, 3], [-1, 2, 0.1] ]) assert check_layer_gradient(ReLULayer(), X) # - # А теперь реализуем полносвязный слой (fully connected layer), у которого будет два массива параметров: W (weights) и B (bias). # # Все параметры наши слои будут использовать для параметров специальный класс `Param`, в котором будут храниться значения параметров и градиенты этих параметров, вычисляемые во время обратного прохода. # # Это даст возможность аккумулировать (суммировать) градиенты из разных частей функции потерь, например, из cross-entropy loss и regularization loss. # TODO: Implement FullyConnected layer forward and backward methods assert check_layer_gradient(FullyConnectedLayer(3, 4), X) # TODO: Implement storing gradients for W and B assert check_layer_param_gradient(FullyConnectedLayer(3, 4), X, 'W') assert check_layer_param_gradient(FullyConnectedLayer(3, 4), X, 'B') # ## Создаем нейронную сеть # # Теперь мы реализуем простейшую нейронную сеть с двумя полносвязным слоями и нелинейностью ReLU. Реализуйте функцию `compute_loss_and_gradients`, она должна запустить прямой и обратный проход через оба слоя для вычисления градиентов. # # Не забудьте реализовать очистку градиентов в начале функции. # + # TODO: In model.py, implement compute_loss_and_gradients function model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 3, reg = 0) loss = model.compute_loss_and_gradients(train_X[:2], train_y[:2]) # TODO Now implement backward pass and aggregate all of the params check_model_gradient(model, train_X[:2], train_y[:2]) # - # Теперь добавьте к модели регуляризацию - она должна прибавляться к loss и делать свой вклад в градиенты. # + # TODO Now implement l2 regularization in the forward and backward pass / done model_with_reg = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 3, reg = 1e1) loss_with_reg = model_with_reg.compute_loss_and_gradients(train_X[:2], train_y[:2]) assert loss_with_reg > loss and not np.isclose(loss_with_reg, loss), \ "Loss with regularization (%2.4f) should be higher than without it (%2.4f)!" % (loss, loss_with_reg) check_model_gradient(model_with_reg, train_X[:2], train_y[:2]) # - # Также реализуем функцию предсказания (вычисления значения) модели на новых данных. # # Какое значение точности мы ожидаем увидеть до начала тренировки? # + # Finally, implement predict function! # TODO: Implement predict function # What would be the value we expect? cut = 30 multiclass_accuracy(model_with_reg.predict(train_X[:cut]), train_y[:cut]) # - # 0.1 т.к. сеть еще нетренирована # # Допишем код для процесса тренировки # # Если все реализовано корректно, значение функции ошибки должно уменьшаться с каждой эпохой, пусть и медленно. Не беспокойтесь пока про validation accuracy. # + model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e1) dataset = Dataset(train_X, train_y, val_X, val_y) trainer = Trainer(model, dataset, SGD(), learning_rate = 1e-2) # TODO Implement missing pieces in Trainer.fit function # You should expect loss to go down every epoch, even if it's slow loss_history, train_history, val_history = trainer.fit() # - plt.plot(loss_history) # reg = 1e1 plt.plot(train_history) plt.plot(val_history) # + model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-1) dataset = Dataset(train_X, train_y, val_X, val_y) trainer = Trainer(model, dataset, SGD(), learning_rate = 1e-2) # TODO Implement missing pieces in Trainer.fit function # You should expect loss to go down every epoch, even if it's slow loss_history, train_history, val_history = trainer.fit() # - plt.plot(loss_history) # reg = 1e-1 # # Улучшаем процесс тренировки # # Мы реализуем несколько ключевых оптимизаций, необходимых для тренировки современных нейросетей. # ## Уменьшение скорости обучения (learning rate decay) # # Одна из необходимых оптимизаций во время тренировки нейронных сетей - постепенное уменьшение скорости обучения по мере тренировки. # # Один из стандартных методов - уменьшение скорости обучения (learning rate) каждые N эпох на коэффициент d (часто называемый decay). Значения N и d, как всегда, являются гиперпараметрами и должны подбираться на основе эффективности на проверочных данных (validation data). # # В нашем случае N будет равным 1. # + # TODO Implement learning rate decay inside Trainer.fit method # Decay should happen once per epoch model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-1) dataset = Dataset(train_X, train_y, val_X, val_y) trainer = Trainer(model, dataset, SGD(), learning_rate_decay=0.99) initial_learning_rate = trainer.learning_rate loss_history, train_history, val_history = trainer.fit() assert trainer.learning_rate < initial_learning_rate, "Learning rate should've been reduced" assert trainer.learning_rate > 0.5*initial_learning_rate, "Learning rate shouldn'tve been reduced that much!" # - # # Накопление импульса (Momentum SGD) # # Другой большой класс оптимизаций - использование более эффективных методов градиентного спуска. Мы реализуем один из них - накопление импульса (Momentum SGD). # # Этот метод хранит скорость движения, использует градиент для ее изменения на каждом шаге, и изменяет веса пропорционально значению скорости. # (Физическая аналогия: Вместо скорости градиенты теперь будут задавать ускорение, но будет присутствовать сила трения.) # # ``` # velocity = momentum * velocity - learning_rate * gradient # w = w + velocity # ``` # # `momentum` здесь коэффициент затухания, который тоже является гиперпараметром (к счастью, для него часто есть хорошее значение по умолчанию, типичный диапазон -- 0.8-0.99). # # Несколько полезных ссылок, где метод разбирается более подробно: # http://cs231n.github.io/neural-networks-3/#sgd # https://distill.pub/2017/momentum/ # + # TODO: Implement MomentumSGD.update function in optim.py model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-1) dataset = Dataset(train_X, train_y, val_X, val_y) trainer = Trainer(model, dataset, MomentumSGD(), learning_rate=1e-4, learning_rate_decay=0.99) # You should see even better results than before! loss_history, train_history, val_history = trainer.fit() # - # # Ну что, давайте уже тренировать сеть! # ## Последний тест - переобучимся (overfit) на маленьком наборе данных # # Хороший способ проверить, все ли реализовано корректно - переобучить сеть на маленьком наборе данных. # Наша модель обладает достаточной мощностью, чтобы приблизить маленький набор данных идеально, поэтому мы ожидаем, что на нем мы быстро дойдем до 100% точности на тренировочном наборе. # # Если этого не происходит, то где-то была допущена ошибка! # + data_size = 15 model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-1) dataset = Dataset(train_X[:data_size], train_y[:data_size], val_X[:data_size], val_y[:data_size]) trainer = Trainer(model, dataset, SGD(), learning_rate=1e-1, num_epochs=150, batch_size=5) # You should expect this to reach 1.0 training accuracy loss_history, train_history, val_history = trainer.fit() # - plt.plot(train_history) plt.plot(val_history) # Теперь найдем гипепараметры, для которых этот процесс сходится быстрее. # Если все реализовано корректно, то существуют параметры, при которых процесс сходится в **20** эпох или еще быстрее. # Найдите их! # + # Now, tweak some hyper parameters and make it train to 1.0 accuracy in 20 epochs or less model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 300, reg = 1e-6) dataset = Dataset(train_X[:data_size], train_y[:data_size], val_X[:data_size], val_y[:data_size]) # TODO: Change any hyperparamers or optimizators to reach training accuracy in 20 epochs trainer = Trainer(model, dataset, SGD(), learning_rate=1e-1, num_epochs=20, batch_size=10) loss_history, train_history, val_history = trainer.fit() # - # ez :) # # Итак, основное мероприятие! # # Натренируйте лучшую нейросеть! Можно добавлять и изменять параметры, менять количество нейронов в слоях сети и как угодно экспериментировать. # # Добейтесь точности лучше **60%** на validation set. # + # Let's train the best one-hidden-layer network we can learning_rates = [1e-1, 1e-2, 1e-3] reg_strength = [1e-3, 1e-5] hidden_layer_sizes = [32, 64, 128] num_epochs = 16 batch_size = [64, 128, 256] best_classifier = None best_val_accuracy = 0 loss_history = [] train_history = [] val_history = [] dataset = Dataset(train_X, train_y, val_X, val_y) for lr in learning_rates: for rs in reg_strength: for ls in hidden_layer_sizes: for bs in batch_size: model = TwoLayerNet(n_input=train_X.shape[1], n_output=10, hidden_layer_size=ls, reg=rs) trainer = Trainer(model, dataset, MomentumSGD(), learning_rate=lr, num_epochs=num_epochs, batch_size=bs) temp_loss_history, temp_train_history, temp_val_history = trainer.fit() if temp_val_history[-1] > best_val_accuracy: best_classifier = model best_val_accuracy = temp_val_history[-1] loss_history = temp_loss_history.copy() train_history = temp_train_history.copy() val_history = temp_val_history.copy() print('best validation accuracy achieved: %f' % best_val_accuracy) # - plt.figure(figsize=(15, 7)) plt.subplot(211) plt.title("Loss") plt.plot(loss_history) plt.subplot(212) plt.title("Train/validation accuracy") plt.plot(train_history) plt.plot(val_history) # # Как обычно, посмотрим, как наша лучшая модель работает на тестовых данных test_pred = best_classifier.predict(test_X) test_accuracy = multiclass_accuracy(test_pred, test_y) print('Neural net test set accuracy: %f' % (test_accuracy, ))
assignments/assignment2/Neural Network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Star shaped qubit # # We'll be creating a 2D design and adding a star shaped qubit QComponent. # # # There's a central star shaped component. This component is essentially a circle with trpezoids subtracted from it. A T-junction qubit is attached to this central shape. Then there are four coupling resonators and one readout resonator present in the default setting. The shape of the central component and the number coupling resonators are customizable. Let's try out a few diferent ways to create and render this component. # So, let us dive right in. For convenience, let's begin by enabling # automatic reloading of modules when they change. # %load_ext autoreload # %autoreload 2 # Next, we import the relevant modules. You can add new modules as and when needed. import qiskit_metal as metal import numpy as np from math import * from qiskit_metal import designs, draw, Dict from qiskit_metal.qlibrary.core import BaseQubit, QComponent # Next we import the GUI from qiskit_metal import MetalGUI # A QDesign class must be instantiated each time a new quantum circuit design is created. The design class `DesignPlanar` is best for 2D circuit designs. design = designs.DesignPlanar() gui = MetalGUI(design) gui.rebuild() # # A Star Qubit # # You can create a ready-made star qubit from the QComponent Library, qiskit_metal.qlibrary.qubits. star_qubit.py is the file containing our qubit so StarQubit is the module we import. # from qiskit_metal.qlibrary.qubits.star_qubit import StarQubit # The following default_options can be overridden by user. StarQubit.get_template_options(design) # To force overwrite a QComponent with an existing name. # This is useful when re-running cells in a notebook. design.overwrite_enabled = True # Let us place the qubit at (x,y) =(5,5), and change the default rotation values. As this design is meant to have up to 5 contacts including one readout and up to 4 coupling resonators, these angles should be 72 degrees apart. However, the number of connectors may be changed to any value between 0 and 4. # + qubit_options = dict( pos_x = '7um', pos_y = '7um', number_of_connectors = 4 # Change the number of connectors ) # Create a new Concentric Transmon object with name 'Q1' q1 = StarQubit(design, 'Star', options=qubit_options) gui.rebuild() # rebuild the design and plot gui.autoscale() #resize GUI to see QComponent gui.zoom_on_components(['Star']) #Can also gui.zoom_on_components([q1.name]) # + #Let's see what the Q1 object looks like q1 #print Q1 information # - #Save screenshot as a .png formatted file. gui.screenshot() # + # Screenshot the canvas only as a .png formatted file. gui.figure.savefig('shot.png') from IPython.display import Image, display _disp_ops = dict(width=500) display(Image('shot.png', **_disp_ops)) # - # # Closing the Qiskit Metal GUI # + # gui.main_window.close() # -
tutorials/Appendix C Circuit examples/A. Qubits/11-Star_shaped_qubit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/sthalles/SimCLR/blob/simclr-refactor/feature_eval/mini_batch_logistic_regression_evaluator.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="YUemQib7ZE4D" import torch import sys import numpy as np import os import yaml import matplotlib.pyplot as plt import torchvision # + colab={"base_uri": "https://localhost:8080/"} id="WSgRE1CcLqdS" outputId="48a2ae15-f672-495b-8d43-9a23b85fa3b8" # !pip install gdown # + id="NOIJEui1ZziV" def get_file_id_by_model(folder_name): file_id = {'resnet18_100-epochs_stl10': '14_nH2FkyKbt61cieQDiSbBVNP8-gtwgF', 'resnet18_100-epochs_cifar10': '1lc2aoVtrAetGn0PnTkOyFzPCIucOJq7C', 'resnet50_50-epochs_stl10': '1ByTKAUsdm_X7tLcii6oAEl5qFRqRMZSu'} return file_id.get(folder_name, "Model not found.") # + colab={"base_uri": "https://localhost:8080/"} id="G7YMxsvEZMrX" outputId="59475430-69d2-45a2-b61b-ae755d5d6e88" folder_name = 'resnet50_50-epochs_stl10' file_id = get_file_id_by_model(folder_name) print(folder_name, file_id) # + colab={"base_uri": "https://localhost:8080/"} id="PWZ8fet_YoJm" outputId="fbaeb858-221b-4d1b-dd90-001a6e713b75" # download and extract model files os.system('gdown https://drive.google.com/uc?id={}'.format(file_id)) os.system('unzip {}'.format(folder_name)) # !ls # + id="3_nypQVEv-hn" from torch.utils.data import DataLoader import torchvision.transforms as transforms from torchvision import datasets # + colab={"base_uri": "https://localhost:8080/"} id="lDfbL3w_Z0Od" outputId="7532966e-1c4a-4641-c928-4cda14c53389" device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Using device:", device) # + id="BfIPl0G6_RrT" def get_stl10_data_loaders(download, shuffle=False, batch_size=256): train_dataset = datasets.STL10('./data', split='train', download=download, transform=transforms.ToTensor()) train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=0, drop_last=False, shuffle=shuffle) test_dataset = datasets.STL10('./data', split='test', download=download, transform=transforms.ToTensor()) test_loader = DataLoader(test_dataset, batch_size=2*batch_size, num_workers=10, drop_last=False, shuffle=shuffle) return train_loader, test_loader def get_cifar10_data_loaders(download, shuffle=False, batch_size=256): train_dataset = datasets.CIFAR10('./data', train=True, download=download, transform=transforms.ToTensor()) train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=0, drop_last=False, shuffle=shuffle) test_dataset = datasets.CIFAR10('./data', train=False, download=download, transform=transforms.ToTensor()) test_loader = DataLoader(test_dataset, batch_size=2*batch_size, num_workers=10, drop_last=False, shuffle=shuffle) return train_loader, test_loader # + id="6N8lYkbmDTaK" with open(os.path.join('./config.yml')) as file: config = yaml.load(file) # + id="a18lPD-tIle6" if config.arch == 'resnet18': model = torchvision.models.resnet18(pretrained=False, num_classes=10).to(device) elif config.arch == 'resnet50': model = torchvision.models.resnet50(pretrained=False, num_classes=10).to(device) # + id="4AIfgq41GuTT" checkpoint = torch.load('checkpoint_0040.pth.tar', map_location=device) state_dict = checkpoint['state_dict'] for k in list(state_dict.keys()): if k.startswith('backbone.'): if k.startswith('backbone') and not k.startswith('backbone.fc'): # remove prefix state_dict[k[len("backbone."):]] = state_dict[k] del state_dict[k] # + id="VVjA83PPJYWl" log = model.load_state_dict(state_dict, strict=False) assert log.missing_keys == ['fc.weight', 'fc.bias'] # + colab={"base_uri": "https://localhost:8080/", "height": 117, "referenced_widgets": ["149b9ce8fb68473a837a77431c12281a", "88cd3db2831e4c13a4a634709700d6b2", "a88c31d74f5c40a2b24bcff5a35d216c", "60c6150177694717a622936b830427b5", "dba019efadee4fdc8c799f309b9a7e70", "5901c2829a554c8ebbd5926610088041", "957362a11d174407979cf17012bf9208", "a4f82234388e4701a02a9f68a177193a"]} id="_GC0a14uWRr6" outputId="4c2558db-921c-425e-f947-6cc746d8c749" if config.dataset_name == 'cifar10': train_loader, test_loader = get_cifar10_data_loaders(download=True) elif config.dataset_name == 'stl10': train_loader, test_loader = get_stl10_data_loaders(download=True) print("Dataset:", config.dataset_name) # + id="pYT_KsM0Mnnr" # freeze all layers but the last fc for name, param in model.named_parameters(): if name not in ['fc.weight', 'fc.bias']: param.requires_grad = False parameters = list(filter(lambda p: p.requires_grad, model.parameters())) assert len(parameters) == 2 # fc.weight, fc.bias # + id="aPVh1S_eMRDU" optimizer = torch.optim.Adam(model.parameters(), lr=3e-4, weight_decay=0.0008) criterion = torch.nn.CrossEntropyLoss().to(device) # + id="edr6RhP2PdVq" def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res # + colab={"base_uri": "https://localhost:8080/"} id="qOder0dAMI7X" outputId="5f723b91-5a5e-43eb-ca01-a9b5ae2f1346" epochs = 100 for epoch in range(epochs): top1_train_accuracy = 0 for counter, (x_batch, y_batch) in enumerate(train_loader): x_batch = x_batch.to(device) y_batch = y_batch.to(device) logits = model(x_batch) loss = criterion(logits, y_batch) top1 = accuracy(logits, y_batch, topk=(1,)) top1_train_accuracy += top1[0] optimizer.zero_grad() loss.backward() optimizer.step() top1_train_accuracy /= (counter + 1) top1_accuracy = 0 top5_accuracy = 0 for counter, (x_batch, y_batch) in enumerate(test_loader): x_batch = x_batch.to(device) y_batch = y_batch.to(device) logits = model(x_batch) top1, top5 = accuracy(logits, y_batch, topk=(1,5)) top1_accuracy += top1[0] top5_accuracy += top5[0] top1_accuracy /= (counter + 1) top5_accuracy /= (counter + 1) print(f"Epoch {epoch}\tTop1 Train accuracy {top1_train_accuracy.item()}\tTop1 Test accuracy: {top1_accuracy.item()}\tTop5 test acc: {top5_accuracy.item()}") # + id="dtYqHZirMNZk"
feature_eval/mini_batch_logistic_regression_evaluator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/gabrielvieiraf/ProjetosPython/blob/master/GoogleColab/MachineLearning/Machine_Learning04.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Fs2tri8QoLZA" # ># Iniciando com Machine Learning 04 # >### Introdução a sistemas de recomendações # + [markdown] id="D0xHDPWcofmD" # > ## Importando dados do Google Drive # Utilizaremos a base de dados disponível para download em: # # # https://grouplens.org/datasets/movielens/latest/ # + colab={"base_uri": "https://localhost:8080/"} id="l0X_ueiMl8Vc" outputId="322e18a4-37b9-4dcf-c098-165eeea4439f" # Permite Importar dados do Google Drive from google.colab import drive drive.mount('/content/drive') # + [markdown] id="nahZac6ho24h" # > ## Iniciando visualização com Pandas # > Em primeiro momento, vamos importar os filmes # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="QSridjHHo7LB" outputId="46a63edc-97de-4a1b-fc72-402fdf665a07" import pandas as pd # Caminho para dados do arquivo csv csv = '/content/drive/My Drive/Colab Notebooks/Alura/filme_100mil/movies.csv' filmes = pd.read_csv(csv) filmes.head() # + [markdown] id="Ya21xJuHpGzG" # > ### Traduzindo Colunas # # Como queremos trazer maior facilidade de visualização de dados, vamos traduzir as colunas de nosso dataset, de uma maneira bem simples, utilizando o próprio pandas # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="ILOnuEoApWb4" outputId="782bbd10-60dd-4498-85a5-fa4ab68fead6" filmes.columns = ['id_filme','titulo','genero'] # Definindo id como index do dataset filmes = filmes.set_index("id_filme") filmes.head() # + [markdown] id="cSgoDRN8rLLb" # > ## Prosseguindo com o Pandas # > Agora iremos importar também as notas # # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="tvZL339LrTMZ" outputId="e939a251-4fff-4ba4-e60c-0451039e35a1" # Caminho para dados do arquivo csv csv = '/content/drive/My Drive/Colab Notebooks/Alura/filme_100mil/ratings.csv' notas = pd.read_csv(csv) notas.head() # + [markdown] id="n1wP0zpBsARK" # > ### Traduzindo Colunas # # Do mesmo modo como fizemos com os filmes, vamos traduzir as colunas das notas # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="YUFMBKeDsIxL" outputId="62e79eff-b2ef-4541-9980-0ba8c9229314" notas.columns = ['id_usuario','id_filme','nota','data'] notas.head() # + [markdown] id="i4MeR3OXstQJ" # > ## Primeira Tentativa de Recomendação # >Verificando a popularidade de filmes para um usuário com dados desconhecidos # + colab={"base_uri": "https://localhost:8080/"} id="xA9e0ZADs9pY" outputId="f2dddb4a-d6a9-402c-e321-d595a8851861" # Verificando número notas por filme id total_votos = notas['id_filme'].value_counts() total_votos # + [markdown] id="8xU3pV66uwUK" # > ### Verificando Número de Votos por Filme # # Isso nos ajudará a conhecer os filmes mais famosos para recomendar ao usuário # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="EGGE21dpthYA" outputId="03a3ac46-37b1-469d-fd6d-d3eeb8a6fcf3" filmes['total_votos'] = total_votos filmes.head() # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="SxZJNrPxvHGm" outputId="f8db59bf-eacb-4ff3-88ad-31d721d78d18" # Ordenando valores filmes.sort_values('total_votos',ascending = False).head() # + [markdown] id="oizfziSfwGKy" # > ### Verificando a média das notas dos filmes # + id="LWroI9ZiwNH-" media_notas = notas.groupby("id_filme").mean()['nota'] # + [markdown] id="Rixy4bPYxkm9" # > ### Criando coluna para notas em filmes # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="zNphgs78xj4B" outputId="23b19fea-dcec-4638-c627-d2107d491ea3" filmes['media_nota'] = media_notas filmes.head() # + [markdown] id="lyzcpQoqzhU-" # >### Ordenando por filmes por notas # # Verificando problema de quantidade de numero total de votos # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="Z2Nc8ONyzpRl" outputId="7cba8e4f-4826-4b06-f273-332478e01c50" # Ordenando valores filmes.sort_values('media_nota',ascending = False).head() # + [markdown] id="elRdIXF70SOD" # > #### Filtrando numero de notas por Query # # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="AXqd1UNSz4_B" outputId="f3182bc1-4a6f-4c1f-ade6-c8007d7e12ad" filmes_200 = filmes.query('total_votos >= 200').sort_values('media_nota',ascending = False).head(10) filmes_200 # + [markdown] id="Tl03HFDm1_kd" # > ## Segunda Tentativa de Recomendação # > Recomendação baseada em similaridade de genero do filme # # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="F09il61h2EAh" outputId="321665c5-95ce-48b8-8e21-824244141320" filmes_assistidos = [1,20,156,42,6] filmes.loc[filmes_assistidos] # + id="D8Y6G1252dQC" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="0f2434f4-fc3e-4e4f-eaa7-16ddfdc4e9ef" # Filtramos nossos filmes acima de 200 avaliações pelos gêneros comédia e drama filmes_comedia_drama = filmes_200.query('genero.str.contains("Comedy|Drama")', engine='python') # Retira filmes já assistidos, ordena pela maior nota os filmes de comédia ou/e drama filmes_comedia_drama = filmes_comedia_drama.drop(filmes_assistidos,errors='ignore').sort_values('media_nota', ascending = False) filmes_comedia_drama # + [markdown] id="RQPRK8MIgzOV" # > ## Terceira tentativa de Recomendação # Procurando usuários similares # + [markdown] id="Pv-pRvNYi6ej" # > ### Calculando a distância entre os pontos # + colab={"base_uri": "https://localhost:8080/"} id="C6z7Vx2Dg5JP" outputId="98334eb7-3992-4da0-e53b-07b07df1159d" import numpy as np Gabriel_Vieira = np.array([4,5,2,0]) Carla = np.array([0,2,5,4]) Paulo = np.array([5,4,2,0]) def distancia(a,b): return np.linalg.norm(a -b) print('Distância entre Gabriel e Carla:') print('%.2f' % distancia(Gabriel_Vieira,Carla)) print() print('Distância entre Gabriel e Paulo:') print('%.2f' % distancia(Gabriel_Vieira,Paulo)) # + [markdown] id="B6_W5MbFk9yP" # > ### Implementando distância de pontos com os usuários # + colab={"base_uri": "https://localhost:8080/"} id="d-vRvmFklEPc" outputId="b70cb309-8fec-48f0-912c-6d71419f8bd2" # Notas do usuario com id 1 notas_usuario = notas.query('id_usuario == 1') # Dataframe com nota e id do filme como index notas_usuario = notas_usuario[['id_filme','nota']].set_index('id_filme') # Localizando filme de id 47 notas_usuario.loc[47] # + [markdown] id="hlvHgnWzpXx7" # > ### Generalizando em uma função notas_usuario # # # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="gnSeOwWXpcWz" outputId="5a1d3c4e-f14f-4e00-de06-4e01c9cb1bf0" def notas_usuario(usuario): # Notas do usuario notas_usuario = notas.query('id_usuario == %d' % usuario) # Dataframe com nota e id do filme como index notas_usuario = notas_usuario[['id_filme','nota']].set_index('id_filme') return notas_usuario usuario_1 = notas_usuario(1) usuario_4 = notas_usuario(4) # juntando dois dataframes e filtrando notas que apenas um dos usuarios submeteu usuarios = usuario_1.join(usuario_4, lsuffix = '_user01', rsuffix = '_user04' ).dropna() display(usuarios.head()) # Calculando distancia entre o usuario 01 e usuario 04 print('\nA distância entre os usuários 01 e 04 é de:') print('%.2f' % distancia(usuarios['nota_user01'],usuarios['nota_user04'])) # + [markdown] id="Ah6NB3DdsUX2" # > ### Generalizando em uma função distância entre dois usuarios # + colab={"base_uri": "https://localhost:8080/"} id="dv_d6dymsZTd" outputId="e050deab-4eab-4ac8-9c5e-ebfa89d144a8" def distancia_usuarios(id_01,id_02): usuario_1 = notas_usuario(id_01) usuario_2 = notas_usuario(id_02) # juntando dois dataframes e filtrando notas que apenas um dos usuarios submeteu usuarios = usuario_1.join(usuario_2, lsuffix = '_user%d' % id_01, rsuffix = '_user%d' % id_02 ).dropna() distancia_usuarios = distancia(usuarios['nota_user%d' % id_01],usuarios['nota_user%d' % id_02]) return [id_01,id_02,distancia_usuarios] # Calculando distancia entre o usuario 01 e usuario 04 print('Lista com retorno id 01, id 02 e distância:') print(distancia_usuarios(1,4)) # + [markdown] id="uu_ENADPwUdg" # > ### Verificando quantidade de usuários # + colab={"base_uri": "https://localhost:8080/"} id="plW4-LzQwXaH" outputId="89f18d74-0520-4083-d1d6-95c8ea7b51c5" quantidade_usuarios = len(notas['id_usuario'].unique()) print('A quantidade de usuários é de:', quantidade_usuarios) # + [markdown] id="8lcN7FHxw4H8" # > ### Calculando a distância entre um e todos os usuários # # + id="Wk3jliD1w5Y9" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="4c1220cd-1d60-4ef8-ab5d-2779dac97e59" gabriel_id = 1 # Maneira imperativa def distancia_todos(ref_id): distancias = list() for usuario_id in notas['id_usuario'].unique(): infos = distancia_usuarios(ref_id,usuario_id) distancias.append(infos) distancias = pd.DataFrame(distancias, columns = ['ref_id','outro_usuario_id','distancia']) return distancias def distancia_todos(ref_id): todos_users = notas['id_usuario'].unique() distancias = [ distancia_usuarios(ref_id,usuario_id) for usuario_id in todos_users] distancias = pd.DataFrame(distancias, columns = ['ref_id','outro_usuario_id','distancia']) return distancias distancia_todos(gabriel_id) # + [markdown] id="dRR1d__l0r2c" # > ### Buscando usuários próximos # + colab={"base_uri": "https://localhost:8080/", "height": 335} id="8VqcwPLnxlTn" outputId="b59855cf-7fc5-44b2-fbf8-2d440f81bee1" dt = distancia_todos(gabriel_id) df_distancias = dt.query('outro_usuario_id != 1') df_distancias.sort_values('distancia').head(10) # + [markdown] id="LCk5II6m1eTF" # > ### Arrumando problema de pessoas que não deram notas em filmes # + id="mHn_lUQf1jrC" def distancia_usuarios(id_01,id_02, minimo = 6): usuario_1 = notas_usuario(id_01) usuario_2 = notas_usuario(id_02) # juntando dois dataframes e filtrando notas que apenas um dos usuarios submeteu usuarios = usuario_1.join(usuario_2, lsuffix = '_user%d' % id_01, rsuffix = '_user%d' % id_02 ).dropna() if (len(usuarios) < minimo): return [id_01,id_02,100000] distancia_usuarios = distancia(usuarios['nota_user%d' % id_01],usuarios['nota_user%d' % id_02]) return [id_01,id_02,distancia_usuarios] # Maneira imperativa def distancia_todos(ref_id): distancias = list() for usuario_id in notas['id_usuario'].unique(): infos = distancia_usuarios(ref_id,usuario_id) distancias.append(infos) distancias = pd.DataFrame(distancias, columns = ['ref_id','outro_usuario_id','distancia']) return distancias def proximos(ref_id): distancia = distancia_todos(ref_id) return distancia.sort_values('distancia').query('outro_usuario_id != %d' % ref_id) # + colab={"base_uri": "https://localhost:8080/", "height": 191} id="VOeZ3_XO-IH3" outputId="b26b086a-ea26-4bbc-c2e7-3e9e1fbfd5e8" proximos(1).head() # + [markdown] id="oI7sGcw7_rRI" # > ### Parâmetros para teste # + id="dOcgxLRN_vfY" def distancia_usuarios(id_01,id_02, minimo = 6): usuario_1 = notas_usuario(id_01) usuario_2 = notas_usuario(id_02) # juntando dois dataframes e filtrando notas que apenas um dos usuarios submeteu usuarios = usuario_1.join(usuario_2, lsuffix = '_user%d' % id_01, rsuffix = '_user%d' % id_02 ).dropna() if (len(usuarios) < minimo): return None distancia_usuarios = distancia(usuarios['nota_user%d' % id_01],usuarios['nota_user%d' % id_02]) return [id_01,id_02,distancia_usuarios] def distancia_todos(ref_id, n = None): todos_users = notas['id_usuario'].unique() if n: todos_users = todos_users[:n] distancias = [ distancia_usuarios(ref_id,usuario_id) for usuario_id in todos_users] # Filtrando distancias = list(filter(None,distancias)) distancias = pd.DataFrame(distancias, columns = ['ref_id','outro_usuario_id','distancia']) return distancias def proximos(ref_id, n = None): distancia = distancia_todos(ref_id, n = n) return distancia.sort_values('distancia').query('outro_usuario_id != %d' % ref_id) # + colab={"base_uri": "https://localhost:8080/", "height": 191} id="OdvhmsSIA7u0" outputId="a96d5d37-61ea-4594-e541-80f3a2404c2f" proximos(1, n = 48).head() # + [markdown] id="bSWdh1AFCmXM" # > ### Buscando dados sobre usuário mais similar dentro de um conjunto de 48 # + id="1l2gtAwACYoH" similares = proximos(1, n = 48) similar = similares.iloc[0].name # + [markdown] id="n2qZlcRmIU5T" # > ### Eliminando filmes já vistos e criando recomendações em ordem crescente dos filmes com maior nota do usuário semelhante # # + colab={"base_uri": "https://localhost:8080/", "height": 420} id="RG9Py8z9DdVD" outputId="df8dc8fc-daea-4c92-f6f5-b8ddb0c1c43b" notas_similar = notas_usuario(similar) filmes_ref = notas_usuario(1).index recomendacoes = notas_similar.drop(filmes_ref, errors='ignore').sort_values('nota', ascending = False) recomendacoes # + [markdown] id="hZdB3Aw9IvLy" # > ### Generalizando em uma função # + id="PmoCCrDVIyQi" def sugestao(id_ref, n_ref): proximos(id_ref, n = n_ref) similares = proximos(id_ref, n = n_ref) similar = similares.iloc[0].name notas_similar = notas_usuario(similar) filmes_ref = notas_usuario(id_ref).index recomendacoes = notas_similar.drop(filmes_ref, errors='ignore').sort_values('nota', ascending = False) return recomendacoes # + colab={"base_uri": "https://localhost:8080/", "height": 420} id="O2GMTWMgJdUi" outputId="54027cb1-7702-4204-dc06-fccdbe710b4f" sugestao(1,48) # + [markdown] id="McWy52LbKDXd" # > ## Finalizando Recomendação # # Recomendando o filme baseado em mais de um usuário # + colab={"base_uri": "https://localhost:8080/", "height": 420} id="8W0WkOpkKN2L" outputId="4d763489-d024-484f-8412-e8445e8e1f52" def proximos(ref_id, n = None, n_proximos = 10): distancia = distancia_todos(ref_id, n = n) return distancia.sort_values('distancia').query('outro_usuario_id != %d' % ref_id).head(n_proximos) def sugestao(id_ref, n_ref = None): notas_ref = notas_usuario(id_ref) filmes_ref = notas_usuario(id_ref).index similares = proximos(id_ref, n = n_ref).index notas_similares = notas.set_index(['id_usuario']).loc[similares] recomendacoes = notas_similares.groupby('id_filme').mean()[['nota']] recomendacoes = recomendacoes.sort_values('nota', ascending = False) return recomendacoes.join(filmes) sugestao(1, 500)
GoogleColab/MachineLearning/Machine_Learning04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## End-to-End Scenario: Scenario Survival Analysis # Author: TI HA DB ML China - SAP HANA PAL Team # # Date: 2020/06/18 # # In clinical trials or community trials, the effect of an intervention is assessed by measuring the number of subjects who have survived or are saved after that intervention over a period of time. We wish to measure the survival probability of Dukes’C colorectal cancer patients after treatment and evaluate statistically whether the patients who accept treatment can survive longer than those who are only controlled conservatively. # # ## 1. Setup the Connection to SAP HANA # First, create a connetion to SAP HANA. To create a such connection, a config file, config/e2edata.ini is used to control the connection parameters.A sample section in the config file is shown below which includes HANA url, port, user and password information.<br> # # ###################<br> # [hana]<br> # url=host-url<br> # user=username<br> # passwd=<PASSWORD><br> # port=3xx15<br> # ###################<br> from hana_ml.dataframe import ConnectionContext from hana_ml.algorithms.pal.utility import Settings url, port, user, pwd = Settings.load_config("../../../config/e2edata.ini") connection_context = ConnectionContext(url, port, user, pwd) # Connection status: print(connection_context.connection.isconnected()) # ## 2. Dataset # This scenarios describes a clinical trial of 49 patients for the treatment of Dukes’C colorectal cancer. The following data shows the survival time in 49 patients with Dukes’C colorectal cancer who are randomly assigned to either linoleic acid or control treatment. # # ![](patient.png) # # The + sign indicates censored data. Until 6 months after treatment, there are no deaths. The effect of the censoring is to remove from the alive group those that are censored. At time 6 months two subjects have been censored so the number alive just before 6 months is 23. There are two deaths at 6 months. Thus, # # We now reduce the number alive (“at risk”) by two. The censored event at 9 months reduces the “at risk” set to 20. At 10 months there are two deaths. So the proportion surviving is 18/20 = 0.9, and the cumulative proportion surviving is 0.913*0.90 = 0.8217. # # ## 3. Implementation Steps # # ### Option 1: Kaplan-Meier Estimate # **Technology Background** # # Kaplan-Meier estimate is one of the simplest way to measure the fraction of subjects living for a certain amount of time after treatment. The time starting from a defined point to the occurrence of a given event, for example death, is called as survival time. # # **Step 1** # # The trial data can then be loaded into table as follows: # # + import pandas as pd from hana_ml.dataframe import create_dataframe_from_pandas data = {'TIME': [1,5,6,6,9,10,10,10,12,12,13,15,16,20,24,24,27,32,34,36,44,3,6,8,12,12,15,16,18,20,22,24,28,30,30,33,42], 'STATUS': [0,0,1,1,0,1,1,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,1,1,1,0,0,0,0,1,0,1,0,0,1,0,1], 'OCCURRENCES': [1,1,1,1,1,1,1,1,4,1,1,1,1,1,1,1,1,1,1,2,1,1,4,2,2,1,1,1,2,1,1,1,3,1,1,1,1], 'GROUP': ["linoleic acid","linoleic acid","linoleic acid","linoleic acid","linoleic acid","linoleic acid", "linoleic acid","linoleic acid","linoleic acid","linoleic acid","linoleic acid","linoleic acid", "linoleic acid","linoleic acid","linoleic acid","linoleic acid","linoleic acid","linoleic acid", "linoleic acid","linoleic acid","linoleic acid","control","control","control","control","control", "control","control","control","control","control","control","control","control","control","control", "control"] } trial = pd.DataFrame (data, columns = ['TIME','STATUS','OCCURRENCES', 'GROUP']) trial_df = create_dataframe_from_pandas(connection_context, pandas_df=trial, table_name='PAL_TRIAL_DATA_TBL', force=True, replace=True) trial_df.head(5).collect() # - # **Step 2** # # Input customer data and use the Kaplan-Meier function to get the survival estimates and log-rank test statistics. # # To compare survival estimates produced from two groups, we use log-rank test. # It is a hypothesis test to compare the survival distribution of two groups (some of the observations may be censored) # and is used to test the null hypothesis that there is no difference between the populations (treatment group and control group) # in the probability of an event (here a death) at any time point. The methods are nonparametric in # that they do not make assumptions about the distributions of survival estimates. # The analysis is based on the times of events (here deaths). For each such time # we calculate the observed number of deaths in each group and the number expected # if there were in reality no difference between the groups. It is widely used in clinical trials # to establish the efficacy of a new treatment in comparison with a control treatment when the measurement # is the time to event (such as the time from initial treatment to death). # # Because the log-rank test is purely a test of significance, it cannot provide an estimate of the size of the difference between the groups. # # from hana_ml.algorithms.pal.stats import kaplan_meier_survival_analysis result = kaplan_meier_survival_analysis(trial_df) print(result[0].collect()) print(result[1].collect()) print(result[2].collect()) # ### Option 2: Weibull Distribution # **Technology Background** # # Weibull distribution is often used for reliability and survival analysis. It is defined by 3 parameters: shape, scale, and location. Scale works as key to magnify or shrink the curve. Shape is the crucial factor to define how the curve looks like, as described below: # # - Shape = 1: The failure rate is constant over time, indicating random failure. # # - Shape < 1: The failure rate decreases over time. # # - Shape > 1: The failure rate increases over time. # # **Step 1** # # Get Weibull distribution and statistics from the linoleic acid treatment data: # + cursor = connection_context.connection.cursor() try: cursor.execute("DROP TABLE PAL_LINO_ACID_TBL") except: pass cursor.execute('CREATE COLUMN TABLE PAL_LINO_ACID_TBL (\"LEFT\" DOUBLE, \"RIGHT\" DOUBLE);') values = [(1,None), (5,None), (6,6), (6,6), (9,None), (10,10), (10,10), (10,None), (12,12), (12,12), (12,12), (12,12), (12,None), (13,None), (15,None), (16,None), (20,None), (24,24), (24,None), (27,None), (32,32), (34,None), (36,None), (36,None), (44,None)] try: cursor.executemany("INSERT INTO " + "{} VALUES ({})".format('PAL_LINO_ACID_TBL', ', '.join(['?']*len(values[0]))), values) connection_context.connection.commit() finally: cursor.close() linoleic_acid_df = connection_context.table("PAL_LINO_ACID_TBL") print(linoleic_acid_df.head(5).collect()) # - # Call Weibull Distribution function and show the results: from hana_ml.algorithms.pal.stats import distribution_fit result = distribution_fit(linoleic_acid_df, distr_type = "weibull", optimal_method = "maximum_likelihood", censored=True) print(result[0].collect()) print(result[1].collect()) # **Step 2** # # Get Weibull distribution and statistics from the control treatment data: # + cursor = connection_context.connection.cursor() try: cursor.execute("DROP TABLE PAL_CONTROL_TBL") except: pass cursor.execute('CREATE COLUMN TABLE PAL_CONTROL_TBL (\"LEFT\" DOUBLE, \"RIGHT\" DOUBLE);') values = [(3,None), (6,6), (6,6), (6,6), (6,6), (8,8), (8,8), (12,12), (12,12), (12,None), (15,None), (16,None), (18,None), (18,None), (20,20), (22,None), (24,24), (28,None), (28,None), (28,None), (30,30), (30,None), (33,None), (42,42)] try: cursor.executemany("INSERT INTO " + "{} VALUES ({})".format('PAL_CONTROL_TBL', ', '.join(['?']*len(values[0]))), values) connection_context.connection.commit() finally: cursor.close() control_df = connection_context.table("PAL_CONTROL_TBL") print(control_df.head(5).collect()) # - result = distribution_fit(control_df, distr_type = "weibull", optimal_method = "maximum_likelihood", censored=True) print(result[0].collect()) print(result[1].collect()) # **Step 3** # # Get the CDF (cumulative distribution function) of Weibull distribution for the linoleic acid treatment data: # + cursor = connection_context.connection.cursor() try: cursor.execute("DROP TABLE PAL_DISTRPROB_DATA_TBL") except: pass cursor.execute('CREATE COLUMN TABLE PAL_DISTRPROB_DATA_TBL (\"DATACOL\" DOUBLE);') values = [(6,),(8,),(12,),(20,),(24,),(30,),(42,)] try: cursor.executemany("INSERT INTO " + "{} VALUES ({})".format('PAL_DISTRPROB_DATA_TBL', ', '.join(['?']*len(values[0]))), values) connection_context.connection.commit() finally: cursor.close() distri_prob_df = connection_context.table("PAL_DISTRPROB_DATA_TBL") print(distri_prob_df.collect()) # - # Invoke CDF and show the result: from hana_ml.algorithms.pal.stats import cdf distr_info = {'name' : 'weibull', 'shape' : 1.40528, 'scale': 36.3069} result = cdf(distri_prob_df, distr_info, complementary=False) print(result.collect()) # **Step 4** # # Get the CDF (cumulative distribution function) of Weibull distribution for the control treatment data: # + cursor = connection_context.connection.cursor() try: cursor.execute("DROP TABLE PAL_DISTRPROB_DATA_TBL") except: pass cursor.execute('CREATE COLUMN TABLE PAL_DISTRPROB_DATA_TBL (\"DATACOL\" DOUBLE);') values = [(6,),(10,),(12,),(24,),(32,)] try: cursor.executemany("INSERT INTO " + "{} VALUES ({})".format('PAL_DISTRPROB_DATA_TBL', ', '.join(['?']*len(values[0]))), values) connection_context.connection.commit() finally: cursor.close() distri_prob_df = connection_context.table("PAL_DISTRPROB_DATA_TBL") print(distri_prob_df.collect()) # - # Invoke CDF and show the result: distr_info = {'name' : 'weibull', 'shape' : 1.71902, 'scale': 20.444} result = cdf(distri_prob_df, distr_info, complementary=False) print(result.collect()) # ## Drop Tables and Close HANA Connection cursor = connection_context.connection.cursor() try: cursor.execute("DROP TABLE PAL_TRIAL_DATA_TBL") cursor.execute("DROP TABLE PAL_DISTRPROB_DATA_TBL") cursor.execute("DROP TABLE PAL_LINO_ACID_TBL") cursor.execute("DROP TABLE PAL_CONTROL_TBL") except: pass connection_context.close()
Python-API/pal/notebooks/e2e_senarios/Scenario-Survival-Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simulate LSST light curves and recover. # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import rubin_rotation as rr import starspot as ss import exoplanet as xo import time as tm from tqdm import trange import fastprogress as fp import warnings warnings.filterwarnings('ignore') plotpar = {'axes.labelsize': 30, 'font.size': 22, 'legend.fontsize': 15, 'xtick.labelsize': 30, 'ytick.labelsize': 30, 'text.usetex': True} plt.rcParams.update(plotpar) # - # Simulate a light curve def sim_lc(prot, err, Nvisits=80, tspan=1, seed=42, tau_range=(1, 3)): Nvisits = 80 np.random.seed(42) time = rr.generate_visits(Nvisits=Nvisits, tspan=1) sin2incl = np.random.uniform(np.sin(0)**2, np.sin(np.pi/2)**2) incl = np.arcsin(sin2incl**.5) tau = np.exp(np.random.uniform(np.log(tau_range[0]*prot), np.log(tau_range[1]*prot))) # Get LC res0, res1 = rr.mklc(time, incl=incl, tau=tau, p=prot) nspot, ff, amp_err = res0 _, area_tot, dF_tot, dF_tot0 = res1 pure_flux = dF_tot0 / np.median(dF_tot0) - 1 flux = pure_flux + np.random.randn(Nvisits) * err flux_err = np.ones_like(flux)*err return time, flux, pure_flux, flux_err time, flux, pure_flux, flux_err = sim_lc(100, .05) plt.errorbar(time, flux, flux_err, fmt=".") plt.figure(figsize=(20, 4)) plt.errorbar(time, flux, flux_err, fmt=".") plt.plot(time, pure_flux, color="k") # + results = xo.estimators.lomb_scargle_estimator(time, flux, max_peaks=1, min_period=1.0, max_period=150.0, samples_per_peak=50) peak = results["peaks"][0] freq, power = results["periodogram"] plt.plot(1./freq, power) print(peak["period"]) # - Star = rr.Star(time, flux, flux_err, init_period=100) start = tm.time() map_soln, model = Star.singleband_gp(lower=1, upper=150) end = tm.time() print(end - start) # + # # Star.mcmc() # with model: # trace = xo.sample(tune=500, draws=1000, start=map_soln, target_accept=0.95) # + # period_samples = trace["period"] # bins = np.linspace(20, 45, 40) # plt.hist(period_samples, bins, histtype="step", color="k") # plt.yticks([]) # plt.xlim(bins.min(), bins.max()) # plt.xlabel("rotation period [days]") # plt.ylabel("posterior density"); # - plt.figure(figsize=(20, 4)) plt.errorbar(time, flux, flux_err, fmt=".") plt.plot(time, pure_flux, color="k"); plt.plot(time, map_soln["pred"] + map_soln["mean"], label="model"); # + nstars = 5000 prots = np.random.uniform(1, 150, nstars) ls_prots, gp_prots, powr = [np.zeros(nstars) for i in range(3)] for i in fp.progress_bar(range(nstars)): time, flux, pure_flux, flux_err = sim_lc(prots[i], .05) results = xo.estimators.lomb_scargle_estimator(time, flux, max_peaks=1, min_period=3.0, max_period=150.0, samples_per_peak=50) peak = results["peaks"][0] powr[i] = peak["log_power"] freq, power = results["periodogram"] # map_soln = rr.singleband_gp(time, flux, flux_err, peak, lower=1, upper=50) # print("true period = ", prots[i]) # print("ls period = ", peak["period"]) # print("gp period = ", map_soln["period"]) # plt.plot(1./freq, power) # plt.show() # plt.figure(figsize=(20, 4)) # plt.errorbar(time, flux, flux_err, fmt=".") # plt.plot(time, pure_flux, color="k") # plt.plot(time, map_soln["pred"] + map_soln["mean"], label="model") # plt.show() ls_prots[i] = peak["period"] # gp_prots[i] = map_soln["period"] # - plt.figure(figsize=(10, 8), dpi=200) m = powr/2 > -10 # -3.8 plt.scatter(prots[m], ls_prots[m], c=powr[m]/2, s=5) m = prots < 15 plt.plot(prots[m], ls_prots[m], ".") print(len(prots[m])/5000.*100) plt.colorbar(label="$\mathrm{log(amplitude)}$") # plt.plot(prots, gp_prots, ".") xs = np.linspace(0, 150, 150) plt.plot(xs, xs, "k") plt.xlabel("$\mathrm{Injected~rotation~period~[days]}$"); plt.ylabel("$\mathrm{Recovered~rotation~period~[days]}$"); # plt.savefig("injection_recovery") # plt.savefig("injection_recovery.pdf") plt.hist(prots, 100); plt.hist(ls_prots, 100, alpha=.5); print(sum(ls_prots < 20)/np.float(nstars)) print(sum((prots < 15) & (ls_prots > 20))/np.float(nstars)) sum((prots < 15) & (ls_prots > 3))
code/notebooks/injection_recovery.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="UncprnB0ymAE" # Below is code with a link to a happy or sad dataset which contains 80 images, 40 happy and 40 sad. # Create a convolutional neural network that trains to 100% accuracy on these images, which cancels training upon hitting training accuracy of >.999 # # Hint -- it will work best with 3 convolutional layers. # + import tensorflow as tf import os import zipfile from os import path, getcwd, chdir # DO NOT CHANGE THE LINE BELOW. If you are developing in a local # environment, then grab happy-or-sad.zip from the Coursera Jupyter Notebook # and place it inside a local folder and edit the path to that location path = f"{getcwd()}/../tmp2/happy-or-sad.zip" zip_ref = zipfile.ZipFile(path, 'r') zip_ref.extractall("/tmp/h-or-s") zip_ref.close() # - # GRADED FUNCTION: train_happy_sad_model def train_happy_sad_model(): # Please write your code only where you are indicated. # please do not remove # model fitting inline comments. DESIRED_ACCURACY = 0.999 class myCallback(# your code): # Your Code callbacks = myCallback() # This Code Block should Define and Compile the Model. Please assume the images are 150 X 150 in your implementation. model = tf.keras.models.Sequential([ # Your Code Here ]) from tensorflow.keras.optimizers import RMSprop model.compile(# Your Code Here #) # This code block should create an instance of an ImageDataGenerator called train_datagen # And a train_generator by calling train_datagen.flow_from_directory from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = # Your Code Here # Please use a target_size of 150 X 150. train_generator = train_datagen.flow_from_directory( # Your Code Here) # Expected output: 'Found 80 images belonging to 2 classes' # This code block should call model.fit_generator and train for # a number of epochs. # model fitting history = model.fit_generator( # Your Code Here) # model fitting return history.history['acc'][-1] # The Expected output: "Reached 99.9% accuracy so cancelling training!"" train_happy_sad_model() # + # Now click the 'Submit Assignment' button above. # Once that is complete, please run the following two cells to save your work and close the notebook # + language="javascript" # <!-- Save the notebook --> # IPython.notebook.save_checkpoint(); # + language="javascript" # IPython.notebook.session.delete(); # window.onbeforeunload = null # setTimeout(function() { window.close(); }, 1000);
week4/Exercise4-Question.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hash Tables: Ice Cream Parlor # # <br> # # ![image](https://user-images.githubusercontent.com/50367487/83125848-08a98780-a113-11ea-8315-986421b92f4b.png) # + # #!/bin/python3 import math import os import random import re import sys # Complete the whatFlavors function below. def whatFlavors(cost, money): d = dict() for idx, val in enumerate(cost): if d.get(money - val): print(d[money - val], idx + 1) return else: d[val] = idx + 1 if __name__ == '__main__': t = int(input()) for t_itr in range(t): money = int(input()) n = int(input()) cost = list(map(int, input().rstrip().split())) whatFlavors(cost, money)
Interview Preparation Kit/7. Search/Hash Tables, Ice Cream Parlor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ###### Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c)2014 <NAME> # <h1 align="center">Orbital</h1> # <h1 align="center">Perturbations</h1> # In this lesson, we will discuss the orbits of bodies in space, and how those bodies can be affected by others as they fly by. We will look at Encke's method, which was created by <NAME> in 1851, and uses a second order ODE to describe the true orbit of a body when affected by the pull of an additional body flying by. # # In traditional orbital dynamics, the standard two-body problem is used to describe two bodies in motion with one orbiting the other. This fails to take into account the affect of outside bodies on the orbital trajectory and can produce an orbit very different than the "true" orbit. # # These orbits play a large role in our daily lives. There are numerous satellites currently orbiting Earth, which are used for communications, GPS, as well as other data grabbers. These satellites can have slight changes to their orbits around Earth caused by other satellites, planets, moons, or comets that need to be taken into consideration when designing orbital parameters for them. # We will compare the traditional two-body motion and Encke's method to see how much the orbits vary over time. # # For our example, we will use Mars and Jupiter orbiting the Sun, with Jupiter being the disturbing body. These two planets were chosen for their proximity to one another and for Jupiter's large mass (Over 300 times greater than Earth!) # <h2 align="center">Encke's Method</h2> # ![Screen Shot 2015-12-01 at 10.42.54 AM](./Figures/Screen Shot 2015-12-01 at 10.42.54 AM.png) # <h4 align="center">Figure 1. Visualization of Encke's Method ([Analytical Mechanics of Aerospace Systems Pg 342](http://www.control.aau.dk/~jan/undervisning/MechanicsI/mechsys/chapter10.pdf))</h4> # Encke's method uses the difference between the standard Keplerian orbit and the true orbit affected by a third body, and can be represented by the following equations. # # The Keplerian orbit or the osculating orbit is represented by the equation: # # $$\frac{d^2 \vec{r}_{osc}}{dt^2} = - \frac{\mu}{r^3 _{osc}} \vec{r}_{osc}$$ # # The perturbed orbit is represented by a similar equation: # # $$\frac{d^2 \vec{r}}{dt^2} = - \frac{\mu}{r^3} \vec{r} + \vec{a}_d$$ # # When looking at the two equations, the only difference between the osculating orbit and the perturbed orbit is the term $\vec{a}_d$, which is the acceleration vector caused by the third body flyby. # The acceleration vector can be found using the following: # # $$\vec{a}_d = \frac{1}{m_2} \vec{f}_{d_2} - \frac{1}{m_1} \vec{f}_{d_1}$$ # # The two accelerations cancel out many times. (Schaub pg 257) # # This leads to: # # $$\vec{a}_d = \frac{1}{m_2} \frac{G m_2 m_3}{|\vec{r}_23|^3} \vec{r}_23 - \frac{1}{m_1} \frac{G m_1 m_3}{|\vec{r}_13|^3} \vec{r}_13$$ # # Where $m_1$ is the mass of the central body, $m_2$ is the mass of the body orbiting around $m_1$, and $m_3$ is the mass of the disturbing body. # Initially, at time $t_0 = 0$, the osculating and perturbed orbits are equal. The change occurs at a time $t = t_0 + \Delta t$. # # Let's define the difference between the radius of the osculating and perturbed orbits as $\delta$ and the difference between the velocities of the two orbits as $\gamma$ # # Therefore at time $t$, which we just defined, the radial and velocity components are: # # $$\vec{\delta}(t) = \vec{r}(t) - \vec{r}_{osc} (t)$$ # $$\vec{\gamma}(t) = \vec{v}(t) - \vec{v}_{osc} (t)$$ # We have some initial conditions as well. As mentioned before the obrbits are equal at $t_0$ which gives us $\vec{\delta} (t_0) = 0$. The velocity difference at $t_0$ is also zero. $\frac{d \vec{\delta} (t_0)}{dt} = \vec{\gamma} (t_0) = 0$ # If we subtract our two initial equations we get: # # $$\frac{d^2 \vec{\delta}}{dt^2} = \vec{a}_d + \mu \left( \frac{\vec{r}_{osc}}{r_{osc} ^3} + \frac{\vec{r}}{r^3} \right)$$ # # This can be simiplied to: # # $$\frac{d^2 \vec{\delta}}{dt^2} + \frac{\mu}{r_{osc} ^3} \vec{\delta} = \frac{\mu}{r_{osc} ^3} \left( 1 - \frac{r_{osc} ^3}{r^3} \right) \vec{r} + \vec{a}_d$$ # Our term $1 - \frac{r_{osc} ^3}{r^3}$ can be an issue because at the beginning of flight $r_{osc}$ and $r$ are approximately equal. That can't be too good, can it? We'll take a look at that a little later on. # We can find the radial and velocity components using the initial values of the radius and velocity along with the Legrangian coefficients in terms of the eccentric anomaly $E$, where the eccentric anomaly is the angle between the major axis and any point on the orbit. # # $$\vec{r} = F \vec{r}_0 + G \vec{v}_0$$ # $$\vec{v} = \dot{F} \vec{r}_0 + \dot{G} \vec{v}_0$$ # # Where # # $$F = 1 + \frac{a}{r_0} \left[ cos(E - E_0) - 1 \right]$$ # $$G = \frac{a \alpha _0}{\mu} \left[ 1 - cos(E - E_0) \right] + r_0 \sqrt{\frac{a}{\mu}} sin(E - E_0)$$ # $$\dot{F} = - \frac{\sqrt{\mu a}}{r r_0} sin(E - E_0)$$ # $$\dot{G} = 1 + \frac{a}{r} \left[ cos(E - E_0) - 1 \right]$$ # # (Tewari pg 104) # # The eccentric anomaly, $E$, is equal to $M + e sin(E)$. $M$ is the mean anomaly and $e$ is the eccentricity. # ![Kepler1](./Figures/Kepler1.png) # <h4 align="center">Figure 2. Orbital anomalies for elliptic motion ([AIAA pg158]) # As you can see from the equation, the eccentric anomaly is a function of itself. In order to calculate it we will have to start with a guess and then iterate until the difference between our new value of E and the guess is within a certain tolerance. This is based on Newton's approximation using a Taylor series expansion of $f(E) = E -e sin E - M$ # The expansion is: $f(E + \Delta E) = \sum_{k = 0} ^{\infty} f^{(k)} (E) \frac{(\Delta E) ^k}{k!}$, in which $f(k) \approx \frac{d^k f(E)}{dE^k}$ The first two terms of the Taylor series can be used for Newton's approximation: # # $$f(E + \Delta E) \approx f(E) + f^{(1)} (E) (\Delta E)$$ # The following method and code for the eccentric anomaly approximation, are based upon those from Ashish Tewari's book, _Atmospheric and Space Flight Dynamics_. The initial guess we will start with uses the mean anomaly $M$ to give $E$: # # $$E + M + e sin M$$ # # $\Delta E$ will be calculated using $-\frac{f(E)}{f^{(1)} (E)} = \frac{-E + e sin E + M}{1 - e cos E}$ so $f(E + \Delta E)$ is equal to $0$. After that, the value of E is updated, where $E = E + \Delta E$. This operation is repeated until a small enough difference is found, and our $E$ value is found. # # Let's get to the coding! from matplotlib import pyplot import numpy from numpy import linalg # %matplotlib inline from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 16 def Kepler_eqn(e, M): """Takes the eccentricity and mean anomaly of an orbit to solve Kepler's equation Parameters: ---------- e : float eccentricity of orbit M : float Mean anomaly of orbit Returns: ------- E : float Eccentric anomaly """ E = M + e * numpy.sin(M) # eccentric anomoaly fofE = E - e * numpy.sin(E) - M #eccentric anomaly as a function of E fdotE = 1 - e * numpy.cos(E) #derivative with respect to E of fofE dE = - fofE / fdotE # change in E Enew = E + dE tolerance = 1e-2 while abs(fofE) > tolerance: E = M + e * numpy.sin(Enew) fofE = E - numpy.sin(E) - M fdotE = 1 - e * numpy.cos(E) dE = - fofE / fdotE Enew = E + dE return E #Based on code from <NAME> # The Kepler_eqn function uses the eccentricity, $e$, and mean anomaly, $M$, and then uses the iterative method to spit out our value of $E$. # The eccentric anomaly $E$ is the basis for the elliptical trajectory and is the value that changes at each time step, in turn changing the radial and velocity vectors. # # Now let's define a function, that given our orbital parameters, will give us our trajectory at a time $t$. # # The orbital elements that we will need to input into the function are: # # * $a$, the semi-major axis, the distance from the center of the orbit to either of the foci # * $P$, the period, the time to complete one orbit # * $\mu$, the gravitational parameter, the gravitational constant $G$ times the mass of the body, # * $e$, the eccentricity # * $t_0$, an initial time # * $r_0$, an initial radial vector # * $v_0$, an initial velocity vector # * $t$, a time, where the new radial and velocity vectors will be found. # Using these initial values, we have equations that are set up within the function to give us our radial and velocity vectors. We also have to calculate our Legrangian coefficients, described earlier, $F$, $G$, $\dot{F}$, and $\dot{G}$ # We will use the following equations: # # * $\alpha _0 = r_0 \circ v_0$ : a constant that is used in the coefficient equations # # * $\theta _0 = \pi$: our initial true anomaly (We are starting at the semi-major axis where it is equal to $\pi$) # # * $n = \frac{2 \pi}{P}$: the mean motion, which is $2 \pi$ over the period # # * $E_0 = 2 \tan ^{-1} (\sqrt{\frac{1 - e}{1 + e}} \tan (0.5 \theta _0))$: initial eccentric anomaly # # * $\tau = t_0 + \frac{- E_0 + e \sin (E_0)}{n}$: where $t - \tau$ is the time since the closest point of orbit # # * $M = n (t - \tau)$ : the mean anomaly which is used for the eccentric anomaly iteration in Kepler's equation def ellip_orb(a, Period, mu, e, t0, r0, v0, t): """Calculates the orbital position for an elliptical orbit Parameters: ---------- a : float Semi-major axis Period : float Period of planetary orbit mu : float Gravitational parameter t0 : float Initial time t = 0 r0 : array of float Initial positional array v0 : array of float Initial velocity array t : float time Returns: ------- r : array of float Array of radius at each time t v : array of float Array of velocity at each time t """ r0_norm = numpy.linalg.norm(r0) # Normalized initial radius v0_norm = numpy.linalg.norm(v0) # Normalized initial velocity alpha = r0 * v0 # Constant used for Legrangian coefficients theta0 = numpy.pi # Initial true anomaly n = 2 * numpy.pi / (Period) # Mean motion, given the period E0 = 2 * numpy.arctan(numpy.sqrt((1 - e) / (1 + e)) * numpy.tan(0.5 * theta0)) # Initial eccentric anomaly tau = t0 + (- E0 + e * numpy.sin(E0)) / n # t - tau is time since Perigee M = n * (t - tau) # Mean anomaly E = Kepler_eqn(e, M) # Eccentric anomaly found through Kepler's equation r_leg = a * (1 - e * numpy.cos(E)) # Radius used for legrangian coefficients #Legrangian Coefficients F = 1 + a * (numpy.cos(E - E0) - 1) * r0_norm G = a * alpha * (1 - numpy.cos(E - E0)) / mu + r0_norm * numpy.sqrt(a / mu) * numpy.sin(E - E0) F_dot = - numpy.sqrt(mu * a) * (numpy.sin(E - E0)) / (r_leg * r0_norm) G_dot = 1 + a * (numpy.cos(E - E0) - 1) / r_leg r = numpy.zeros_like(r0) v = numpy.zeros_like(v0) r = F * r0 + G * v0 # Radial value of orbit for specified time v = F_dot * r0 + G_dot * v0 # Velocity value of orbit for specified time return r, v # Now that we have a function to solve for the orbital trajectory of an elliptical orbit (which Mars and Jupiter both have) we have to create a function for the disturbing acceleration caused by the third body, Jupiter. # # The equation for $a_d$ as mentioned earlier, is: # # $$\vec{a}_d = \frac{1}{m_2} \frac{G m_2 m_3}{|\vec{r}_23|^3} \vec{r}_23 - \frac{1}{m_1} \frac{G m_1 m_3}{|\vec{r}_13|^3} \vec{r}_13$$ # # Our inputs for this function will be the mass of the Sun ($m_1$), the mass of Mars ($m_2$), the mass of Jupiter ($m_3$), and the radial vectors of Mars and Jupiter with respect to the Sun. def acceleration_d(m1, m2, m3, r, r3): """Calculates the acceleration due to the disturbing orbit Parameters: ---------- m1 : float Mass of central body m2 : float Mass of second body m3 : float Mass of third (disturbing) body r : array of float Radial distance between body two and one r3: array of float Radial distance between body three and one Returns: ------- a_d : array of float Acceleration due to the disturbing orbit """ a_d = numpy.zeros((2, 1)) G = 6.674e-11 # Gravitational constant r13 = r3 # Radial distance between Jupiter and the Sun r23 = r - r3 # Radial distance between Jupiter and Mars r23_norm = numpy.linalg.norm(r23) # Normalized radius between Jupiter and Mars r13_norm = numpy.linalg.norm(r13) # Normalized radius between Jupiter and the Sun a_d = (((1 / m2) * ((G* m2 * m3)/ (r23_norm ** 3))) * r23) - (((1 / m1) * ((G * m1 * m3) / (r13_norm ** 3))) * r13) return a_d # Now that we have our Kepler function, our elliptical orbit function, and our acceleration function, we can set up our initial conditions. After that we will be able to get our final radial and velocity vectors for the osculating and perturbed orbits in addition to Jupiter's orbit. # <h3 align="center">Initial Conditions</h3> # + mu3 = 1.2669e17 # Standard gravitational parameter of Jupiter in m^3 / s^2 m3 = 1.8983e27 # Mass of Jupiter in kg e3 = .0489 # Eccentricity of Jupiter a3 = 778000000. # Semi-major Axis of Jupiter in km Period3 = 4332.589 * 3600 * 24 # Period of Jupiter Orbit in seconds mu = 4.2828e13 # Standard gravitational parameter of Mars in m^3 / s^2 m2 = 6.4174e23 # Mass of Mars in kg e = .0934 # Eccentricity of Mars a = 228000000. # Semi-major Axis of Mars in km Period = 686.980 * 3600 * 24 # Period of Mars Orbit in seconds mu1 = 1.3271e20 # Standard gravitational parameters of the Sun in m^3 / s^2 m1 = 1.989e30 # Mass of the Sun in kg dt = 24 * 3600 # Time step tfinal = 4000 * dt # Final time N = int(tfinal / dt) + 1 # Number of time steps t = numpy.linspace(0,tfinal,N) # Time array r0 = numpy.array([228000000., 0.]) # Initial radius of Mars v0 = numpy.array([-21.84, -10.27]) # Initial velocity r3_0 = numpy.array([778000000., 0.]) # Initial radius of Jupiter v3_0 = numpy.array([-13.04, -.713]) # Initial velocity of Jupiter # Set arrays for radial and velocity components r = numpy.empty((N, 2)) v = numpy.empty((N, 2)) gamma = numpy.empty((N, 2)) delta = numpy.empty((N, 2)) r_n = numpy.empty((N, 2)) v_n = numpy.empty((N, 2)) a_d = numpy.empty((N, 2)) r_osc = numpy.empty((N, 2)) r_osc_n = numpy.empty((N, 2)) v_osc = numpy.empty((N, 2)) v_osc_n = numpy.empty((N, 2)) r3_n = numpy.empty((N, 2)) # - # Next we will have a for-loop that will give us our solution at every time step for each of our orbits. In this case, we will use a final time of 4000 days. Each time step will be one day (in seconds), and we will be able to see the trajectories over that time period. # # The loop uses a crude method of integration (Tewari pg 166) to solve for $\gamma$ and $\delta$, the difference between the osculating and perturbed radial and velocity vectors respectively. # # The osculating orbit is used here as our base orbit. The orbit for Jupiter is calculated as well and used with the osculating orbit to get our disturbing acceleration. The acceleration is then used to find $\gamma$, which in turn is used to calculate $\delta$. # # $\delta$ is added to the radial vector of the osculating orbit at every time step to give us our perturbed orbit. The same is done with $\gamma$ for the velocity vector for the perturbed orbit. The solutions are then entered into arrays so that we can plot them! for i,ts in enumerate(t): delta = numpy.zeros_like(r0) gamma = numpy.zeros_like(r0) r_osc, v_osc = ellip_orb(a, Period, mu1, e, t[0], r0, v0, ts) # Trajectory of the osculating orbit of Mars r_osc_norm = numpy.linalg.norm(r_osc) # Normalized osculating orbit of Mars r0_norm = numpy.linalg.norm(r0) # Normalized initial orbit of Mars r3, v3 = ellip_orb(a3, Period3, mu3, e3, t[0], r3_0, v3_0, ts) # Trajectory of Jupiter a_d = acceleration_d(m1, m2, m3, r_osc, r3) # Acceleration due to Jupiter gamma = mu3 * (dt) * ((1 - (r_osc_norm / r0_norm) ** 3) / r_osc_norm ** 3) + a_d * (dt) # Difference in velocity between osculating orbit and perturbed delta = gamma * (dt) # Difference between osculating orbit and perturbed orbit radius r = r_osc + delta # Perturbed orbital radius v = v_osc + gamma # Perturbed orbital velocity r_osc_n[i,:] = r_osc # Value of osculating orbital radius for every time step v_osc_n[i,:] = v_osc # Value of osculating orbital velocity for every time step r3_n[i,:] = r3 # Value of Jupiter's radius for every time step r_n[i,:] = r # Value of the perturbed orbital radius for every time step v_n[i,:] = v # Value of the perturbed orbital velocity for every time step # We mentioned earlier that sometimes the term $1 - \frac{r_osc ^3}{r^3}$ can cause issues because towards the beginning of orbit the two terms are approximately equal and can cause the solution to blow up, but this can be solved as follows: # # $$1 - \frac{r_{osc} ^3}{r^3} = -B \frac{3 + 3B + B^2}{1 + (1 + B) ^{\frac{3}{2}}}$$ # Where $$B = \frac{\vec{\delta} (\vec{\delta} - 2 \vec{r})}{r^2}$$ # Now that we have our solutions arrays, we can plot our answers. Each array contains an x and y component for each time step. We will look at a plot of the osculating orbit and the perturbed orbit of Mars, and in a separate plot, we will look at those two orbits along with Jupiter's orbit. # + x = numpy.linspace(t[0], t[-1], N) pyplot.figure(figsize = (10,10)) pyplot.grid(True) pyplot.xlabel(r'X Distance (km)', fontsize = 18) pyplot.ylabel(r'Y Distance (km)', fontsize = 18) pyplot.title('Trajectory of Osc vs Perturbed Orbit, Flight Time = %.2f days' %(tfinal / dt), fontsize=14) pyplot.plot(r_n[:,0], r_n[:,1]) pyplot.plot(r_osc_n[:,0], r_osc_n[:,1]) pyplot.legend(['Perturbed Orbit', 'Osculating Orbit']); pyplot.figure(figsize = (10,10)) pyplot.grid(True) pyplot.xlabel(r'X Distance (km)', fontsize = 18) pyplot.ylabel(r'Y Distance (km)', fontsize = 18) pyplot.title('Trajectory of Osc, Perturbed and Jupiter Orbit, Flight Time = %.2f days' %(tfinal / dt), fontsize=14) pyplot.plot(r_n[:,0], r_n[:,1]) pyplot.plot(r_osc_n[:,0], r_osc_n[:,1]) pyplot.plot(r3_n[:,0], r3_n[:,1]) pyplot.legend(['Perturbed Orbit', 'Osculating Orbit', 'Jupiter\'s Orbit']); # - # Looking at our first plot we can see that there is a change in Mars' orbit due to the gravitational pull from Jupiter as it flies by! # <h3 align="center">Dig Deeper</h3> # See what happens when you change the orbital parameters! What happens with different planets or with a satellite orbiting earth? What about when both planets don't start at the zero point of the y axis? # <h2 align="center">References</h2> # * [NASA Mars Fact Sheet](http://nssdc.gsfc.nasa.gov/planetary/factsheet/marsfact.html) # # * [NASA Jupiter Fact Sheet](http://nssdc.gsfc.nasa.gov/planetary/factsheet/jupiterfact.html) # # * [Standard Gravitational Parameter Wikipedia](https://en.wikipedia.org/wiki/Standard_gravitational_parameter) # # * [Perturbation (Astronomy)](http://tinyurl.com/gwz68sp) # # * <NAME>., AIAA Education Series, _An Introduction to the Mathematics and Methods of Astrodynamics_, AIAA, 1999 # # * <NAME> & <NAME>, AIAA Education Series, _Analytical Mechanics of Aerospace Systems_, AIAA, 2000 # # * <NAME>, _Atmospheric and Space Flight Dynamics: Modeling and Simulation with MATLAB and Simulink_, Birkhauser, 2007 from IPython.core.display import HTML css_file = '../numericalmoocstyle.css' HTML(open(css_file, "r").read())
mjorisch/Orbital_Perturbations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] editable=false hideCode=true hidePrompt=true # Synkrisis: The Bigraph Model Checker # =========================== # * * * # # A quick introduction # --------------------------------- # # Synkrisis is a bigraph modelling and engineering toolchain based on BigMC model checker sporting a modular approach. # It features static analysis for bigraph specification and supports its own transition system and model exporting for DTMC/MDP/LTS analysis. # Transition graphs output by BigMC can be exported to PRISM and Spot model checkers. # Since no information is saved across binder sessions, this tool is better suited to get a general idea of the tool and its capabilities. # Check the original Synkrisis repository to get more info to run this tool locally. # # ## Resources # More info on: # * Bigraphs: https://www.itu.dk/research/pls/wiki/index.php/A_Brief_Introduction_To_Bigraphs # * Bigmc: https://github.com/bigmc/bigmc # * Prism model checker: https://www.prismmodelchecker.org/ # * Spot model checker: https://spot.lrde.epita.fr/ # * and its HOA format: https://spot.lrde.epita.fr/concepts.html#hoa # * Synkrisis: https://github.com/AlessandroCaste/Synkrisis # # ## Synkrisis Syntax # You can write your bigraph model from file using Synkrisis syntax. # (https://github.com/AlessandroCaste/Synkrisis/wiki/Synkrisis-Syntax) # # This is a general layout of a bigraph model specification: # `controls` # `names` # `rules` # `model specification` # `makers` # `spot-acceptance` # `prism properties` # `external properties` # # ## How to use this notebook # In order to work and visualize bigraphical models you are only required to edit the following couple of cells: one for file specification and one for execution settings. # After that, the best way to run analysis is by clicking `Run->Run all cells` from the top-left menu. # # Here you can find further sample models: https://github.com/AlessandroCaste/Synkrisis/tree/master/src/test/java/models # * * * # Specify your model # ---------------- # # Input your bigraph model here: # # + hideCode=false hidePrompt=true active="" # %%writefile filename.bigraph # # # Write your BRS here # + # Set here the running arguments of your Synkrisis Execution # Path to the file to be executed filename = "examples/airport.bigraph" # The name of the model inside the file (ex: addition) modelname = "airport" # Maximum number of execution steps (unlimited is 0) steps = 0 # Print model and transition file? printable = True # Write some .dot file every x steps (default : 0) frequency = 0 # Translate to the following languages? prism = False spot = False # + editable=false hideCode=true hidePrompt=true jupyter={"source_hidden": true} import subprocess execution = "java -jar Synkrisis.jar -l" + filename if steps > 0: execution += " -m " + steps if printable: execution += " -G " if frequency > 0: execution += " -r " + frequency if spot: execution += " -o spot " if prism: execution += " -o prism " result = subprocess.run(execution, stdout=subprocess.PIPE, shell=True, universal_newlines=True) print(result.stdout) # + [markdown] editable=false hideCode=false hidePrompt=true # Visualize your model # --------------------------------- # # Visualize model and reactions bigraphs # + editable=false hideCode=true hidePrompt=true jupyter={"source_hidden": true} import os from IPython.display import Image if os.path.isfile(modelname + "/" + modelname + ".png"): display(Image(modelname + "/" + modelname + ".png")) else: print("No model picture found") # + editable=false hideCode=true hidePrompt=true jupyter={"source_hidden": true} from IPython.display import Image from os import listdir rulepath = modelname+"/rules/" if os.path.isdir(rulepath): for f in reversed(listdir(rulepath)): display(Image (modelname+"/rules/"+f)) else: print("No rules directory found") # + [markdown] editable=false # ## Transition Visualization # # Transition Graph is the output graph produced by the Bigraphical Reactive System Interpreter. # + editable=false hideCode=true hidePrompt=true jupyter={"source_hidden": true} from IPython.display import Image if os.path.isfile(modelname+"/tr_graph.png"): display(Image(modelname+"/tr_graph.png")) else: print("No transition graph picture to show") # + editable=false jupyter={"source_hidden": true} from IPython.display import Image if os.path.isfile(modelname+"/tr_labels.png"): display(Image(modelname+"/tr_labels.png")) else: print("No transition labels to show")
Synkrisis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:fano_pub] # language: python # name: conda-env-fano_pub-py # --- # ## The sigmom fit result doesn't line up with the MCMC maximum. Is that a problem? # The goal of this notebook is to understand if we can still trust the MCMC results from the corr2 function despite the maximum not exactly matching the fit with the true (sigmom) function. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import h5py import corner import sys sys.path.append('../python/') from edw_data_util import * from prob_dist import * from EdwRes import * # - filename = 'data/edelweiss_corr1_systematicErrors_sampler_nll_allpars_corrAB_gausPrior_2844a28.h5' # + f = h5py.File(filename,'r') # need to store data in an array: # The sampler will now have a chains attribute # which is an array with shape (Nwalker,N,Ndim) # where N is the number of interations (500 in our inital run) # and Ndim is the number of fit parameters path='{}/{}/'.format('mcmc','sampler') aH = np.asarray(f[path+'aH']) C = np.asarray(f[path+'C']) m = np.asarray(f[path+'m']) scale = np.asarray(f[path+'scale']) A = np.asarray(f[path+'A']) B = np.asarray(f[path+'B']) samples = np.asarray(f[path+'samples']) f.close() # - ndim = 6 samples_corner = samples[:, 300:, :].reshape((-1, ndim)) # + par_idx = 4 hist_aH, bins_aH = np.histogram(np.ndarray.flatten(samples[:,:,par_idx]),bins=52) max_idx_aH = np.argmax(hist_aH) mcmc_aH = 0.5*(bins_aH[max_idx_aH] + bins_aH[max_idx_aH+1]) print (0.5*(bins_aH[max_idx_aH] + bins_aH[max_idx_aH+1])) # - plt.plot(bins_aH[:-1], hist_aH, '*') #plt.vlines(bins_aH[max_idx_aH], 0, 30000) plt.vlines(np.mean(np.ndarray.flatten(samples[:,:,par_idx])), 0, 35000) # + # aH, C, m, scale, A, B correspond to indices # 0, 1, 2, 3, 4, 5 mcmc_par_arr = [] print (mcmc_par_arr) for idx in range(0,6): hist_par, bins_par = np.histogram(np.ndarray.flatten(samples[:,:,idx]),bins=52) max_idx_par = np.argmax(hist_par) mcmc_par = 0.5*(bins_par[max_idx_par] + bins_par[max_idx_par+1]) print(idx, mcmc_par) mcmc_par_arr.append(mcmc_par) print (mcmc_par_arr) # - fig = corner.corner(samples_corner, labels=["aH", "$C$", "$m$", "scale", "A", "B"], truths=mcmc_par_arr) # ## Compare MCMC vs best-fit values evaluated with corr2 # + # don't go too fine on Erecoil, the sigmomEdw evaluations will take too long Erecoil_keV = np.linspace(10, 100, 20) # set the max mcmc values aH_mcmc, C_mcmc, m_mcmc, scale_mcmc, A_mcmc, B_mcmc = mcmc_par_arr V_mcmc = 4*scale_mcmc model_NR_0_mcmc = [series_NRQ_var_corr2(x, 0, V_mcmc, aH_mcmc, 1/18.0, A_mcmc, B_mcmc, 'GGA3') for x in Erecoil_keV] model_NR_mcmc = np.sqrt(np.power(C_mcmc + m_mcmc*Erecoil_keV, 2) + model_NR_0_mcmc) ## these are the best-fit values aH_fit, C_fit, m_fit, scale_fit = 3.81134613e-02, 3.32613445e-02, 1.11522962e-04, 9.94778557e-01 A_fit, B_fit = 1.53737587e-01, 1.70327657e-01 V_fit = 4*scale_fit model_NR_0 = [series_NRQ_var_corr2(x, 0, V_fit, aH_fit, 1/18.0, A_fit, B_fit, 'GGA3') for x in Erecoil_keV] model_NR_fit = np.sqrt(np.power(C_fit + m_fit*Erecoil_keV, 2) + model_NR_0) # import data from Edelweiss resNR_data = pd.read_csv("data/edelweiss_NRwidth_GGA3_data.txt", skiprows=1, \ names=['E_recoil', 'sig_NR', 'E_recoil_err', 'sig_NR_err'], \ delim_whitespace=True) # set the data up for the fits # Edelweiss discards first two NR points since they're affected by the threshold NR_data = {'Erecoil': resNR_data["E_recoil"][2::], 'sigma': resNR_data["sig_NR"][2::], 'sigma_err': resNR_data["sig_NR_err"][2::]} plt.errorbar(NR_data['Erecoil'], NR_data['sigma'], yerr=NR_data['sigma_err'], fmt="o", label="data") plt.plot(Erecoil_keV, model_NR_mcmc, color="k", lw=2, alpha=0.55) plt.plot(Erecoil_keV, model_NR_fit, color="c", lw=2, alpha=0.8) plt.xlim(10, 200) plt.ylim(0.04, 0.065) # - # ## Compare MCMC vs best fit parameters evaluated with sigmom # + #sigmomEdw(x, band='NR',label='GGA3',F=0.000001, V=V, aH=aH, alpha=1/18.0, A=A, B=B) model_NR_0_true_mcmc = [sigmomEdw(x, band='NR', label='GGA3', F=0.000001, V=V_mcmc, aH=aH_mcmc, alpha=1/18.0, A=A_mcmc, B=B_mcmc) for x in Erecoil_keV] model_NR_0_true = [sigmomEdw(x, band='NR', label='GGA3', F=0.000001, V=V_fit, aH=aH_fit, alpha=1/18.0, A=A_fit, B=B_fit) for x in Erecoil_keV] # - model_NR_true_mcmc = np.sqrt(np.power(C_mcmc + m_mcmc*Erecoil_keV, 2) + np.power(model_NR_0_true_mcmc,2)) model_NR_true = np.sqrt(np.power(C_fit + m_fit*Erecoil_keV, 2) + np.power(model_NR_0_true,2)) # + plt.errorbar(NR_data['Erecoil'], NR_data['sigma'], yerr=NR_data['sigma_err'], fmt="o", label="data") plt.plot(Erecoil_keV, model_NR_true_mcmc, color="k", lw=2, alpha=0.55) plt.plot(Erecoil_keV, model_NR_true, color="c", lw=2, alpha=0.8) plt.xlim(10, 200) plt.ylim(0.04, 0.065) # -
analysis_notebooks/mcmc_corr2_sigmom_corrAB_varianceEffect.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="JuGzpxT_Z1EU" colab_type="code" cellView="form" colab={} #@title Copyright 2020 Google LLC. Double-click here for license information. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="T4r2z30vJSbA" colab_type="text" # # NumPy UltraQuick Tutorial # # NumPy is a Python library for creating and manipulating vectors and matrices. This Colab is not an exhaustive tutorial on NumPy. Rather, this Colab teaches you just enough to use NumPy in the Colab exercises of Machine Learning Crash Course. # + [markdown] id="vO47lN3aDOAv" colab_type="text" # ## About Colabs # # Machine Learning Crash Course uses Colaboratories (**Colabs**) for all programming exercises. Colab is Google's implementation of [Jupyter Notebook](https://jupyter.org/). Like all Jupyter Notebooks, a Colab consists of two kinds of components: # # * **Text cells**, which contain explanations. You are currently reading a text cell. # * **Code cells**, which contain Python code for you to run. Code cells have a light gray background. # # You *read* the text cells and *run* the code cells. # # # # # # + [markdown] id="SQ-pvdPymocS" colab_type="text" # ### Running code cells # # You must run code cells in order. In other words, you may only run a code cell once all the code cells preceding it have already been run. # # To run a code cell: # # 1. Place the cursor anywhere inside the [ ] area at the top left of a code cell. The area inside the [ ] will display an arrow. # 2. Click the arrow. # # Alternatively, you may invoke **Runtime->Run all**. Note, though, that some of the code cells will fail because not all the coding is complete. (You'll complete the coding as part of the exercise.) # + [markdown] id="v9GhD7Fsmoqw" colab_type="text" # ### If you see errors... # # The most common reasons for seeing code cell errors are as follows: # # * You didn't run *all* of the code cells preceding the current code cell. # * If the code cell is labeled as a **Task**, then: # * You haven't yet written the code that implements the task. # * You did write the code, but the code contained errors. # + [markdown] id="Ll9RWewwFwX6" colab_type="text" # ## Import NumPy module # # Run the following code cell to import the NumPy module: # + id="guvPzSWYJGZ4" colab_type="code" colab={} import numpy as np # + [markdown] id="7cT9fXS_JUpa" colab_type="text" # ## Populate arrays with specific numbers # # Call `np.array` to create a NumPy matrix with your own hand-picked values. For example, the following call to `np.array` creates an 8-element vector: # + id="XxJR5xKpJbB3" colab_type="code" colab={} one_dimensional_array = np.array([1.2, 2.4, 3.5, 4.7, 6.1, 7.2, 8.3, 9.5]) print(one_dimensional_array) # + [markdown] id="RKywqhLTbR1M" colab_type="text" # You can also use `np.array` to create a two-dimensional matrix. To create a two-dimensional matrix, specify an extra layer of square brackets. For example, the following call creates a 3x2 matrix: # + id="_veGj18eMCDu" colab_type="code" colab={} two_dimensional_array = np.array([[6, 5], [11, 7], [4, 8]]) print(two_dimensional_array) # + [markdown] id="0ED7eug9CvGR" colab_type="text" # To populate a matrix with all zeroes, call `np.zeros`. To populate a matrix with all ones, call `np.ones`. # + [markdown] id="gEy_pdBoROu3" colab_type="text" # ## Populate arrays with sequences of numbers # # You can populate an array with a sequence of numbers: # + id="CjHfYWhdQYtO" colab_type="code" colab={} sequence_of_integers = np.arange(5, 12) print(sequence_of_integers) # + [markdown] id="1x3OoWrPWn8S" colab_type="text" # Notice that `np.arange` generates a sequence that includes the lower bound (5) but not the upper bound (12). # + [markdown] id="aiqqxDBINAOY" colab_type="text" # ## Populate arrays with random numbers # # NumPy provides various functions to populate matrices with random numbers across certain ranges. For example, `np.random.randint` generates random integers between a low and high value. The following call populates a 6-element vector with random integers between 50 and 100. # # # # + id="tG8ao9CsNqw8" colab_type="code" colab={} random_integers_between_50_and_100 = np.random.randint(low=50, high=101, size=(6)) print(random_integers_between_50_and_100) # + [markdown] id="BSU7lMUcgRm3" colab_type="text" # Note that the highest generated integer `np.random.randint` is one less than the `high` argument. # + [markdown] id="lQF6-Eg0ksqE" colab_type="text" # To create random floating-point values between 0.0 and 1.0, call `np.random.random`. For example: # + id="6Ny0eXZPk5Ax" colab_type="code" colab={} random_floats_between_0_and_1 = np.random.random([6]) print(random_floats_between_0_and_1) # + [markdown] id="eXOdSjRlSEf6" colab_type="text" # ## Mathematical Operations on NumPy Operands # # If you want to add or subtract two vectors or matrices, linear algebra requires that the two operands have the same dimensions. Furthermore, if you want to multiply two vectors or matrices, linear algebra imposes strict rules on the dimensional compatibility of operands. Fortunately, NumPy uses a trick called [**broadcasting**](https://developers.google.com/machine-learning/glossary/#broadcasting) to virtually expand the smaller operand to dimensions compatible for linear algebra. For example, the following operation uses broadcasting to add 2.0 to the value of every item in the vector created in the previous code cell: # + id="J5E5S0wjRvQr" colab_type="code" colab={} random_floats_between_2_and_3 = random_floats_between_0_and_1 + 2.0 print(random_floats_between_2_and_3) # + [markdown] id="x6K_poVDPpAg" colab_type="text" # The following operation also relies on broadcasting to multiply each cell in a vector by 3: # + id="tYjvXmvFPoPB" colab_type="code" colab={} random_integers_between_150_and_300 = random_integers_between_50_and_100 * 3 print(random_integers_between_150_and_300) # + [markdown] id="hfYVa8iQTaUL" colab_type="text" # ## Task 1: Create a Linear Dataset # # Your goal is to create a simple dataset consisting of a single feature and a label as follows: # # 1. Assign a sequence of integers from 6 to 20 (inclusive) to a NumPy array named `feature`. # 2. Assign 15 values to a NumPy array named `label` such that: # # ``` # label = (3)(feature) + 4 # ``` # For example, the first value for `label` should be: # # ``` # label = (3)(6) + 4 = 22 # ``` # + id="qK9UF2rUc3Y_" colab_type="code" colab={} feature = ? # write your code here print(feature) label = ? # write your code here print(label) # + id="KjtIAYvMTPGl" colab_type="code" cellView="form" colab={} #@title Double-click to see a possible solution to Task 1. feature = np.arange(6, 21) print(feature) label = (feature * 3) + 4 print(label) # + [markdown] id="RNsjGYRj87PB" colab_type="text" # ## Task 2: Add Some Noise to the Dataset # # To make your dataset a little more realistic, insert a little random noise into each element of the `label` array you already created. To be more precise, modify each value assigned to `label` by adding a *different* random floating-point value between -2 and +2. # # Don't rely on broadcasting. Instead, create a `noise` array having the same dimension as `label`. # + id="HF-flFfs9r0q" colab_type="code" colab={} noise = ? # write your code here print(noise) label = ? # write your code here print(label) # + id="7chgYKrC93np" colab_type="code" cellView="form" colab={} #@title Double-click to see a possible solution to Task 2. noise = (np.random.random([15]) * 4) - 2 print(noise) label = label + noise print(label)
ml/cc/exercises/numpy_ultraquick_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pylazybam example useage # # <NAME> # # <small>The Walter and Eliza Hall Institute and The University of Melbourne</small> # This example file is intended to give a flavour of the potential usages of pylazybam. As a library intened for interactive use in notebooks and bespoke scripts its intentionally very open ended and flexable. # # First you will need to install pylazybam following the instructions in the readme. There are no dependencies, so this should be easy even on Windows and ARM based linux, although you will have issues with anything other than trivial BAM files on a Raspberry Pi. from pylazybam import bam import gzip # We import as `bam` as it gives an easy to type, short, safe and descriptive namespace. # You can get help on pylazybam functions by using `help()` or in a notebook prefacing with a `?` # ?bam.FileReader # We are using data generated by the NYU genome centre from one of the well know genome in a bottle references. This case has been sequenced to high depth, and mapped against a GRCh38 reference with decoys and ALTs # It can be downloaded from `lftp ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR323/ERR3239484/NA12778.final.cram` # The file we are using has had some manipulations done on it with `samtools view` but the exact manipulations have been lost to the mists of time and advancing years of the operator. Time for some bioinfosleuthing<sup>(tm)</sup> Lets open the file up and have a look at what we can see. NA12778 = bam.FileReader(gzip.open('Genomes/NA12778_GRCh38_HLA_namesorted.bam')) for headerline in NA12778.header.split('\n'): if headerline[:3] == '@PG': print(headerline) # Ok so we can see a lot of programs have operated on this file, but there are no entries for samtools so we dont know the basis for the subsetting. Lets see if we can work it out. from collections import Counter id_counts = Counter() for align in NA12778: id_counts[bam.get_ref_index(align)] += 1 print(id_counts) # We have over 1.2M reads from index 5, so that must be the chromosome that was specified in the samtools view region. We can look up its name: NA12778.index_to_ref[5] # Chromosome 6 makes sense at it is MHC related. Lets see if we can work out what the start and end of the selected regions were. with bam.FileReader(gzip.open('Genomes/NA12778_GRCh38_HLA_namesorted.bam')) as NA12778: pos = [] for align in NA12778: if bam.get_ref_index(align) == 5: pos.append(bam.get_pos(align)) print(f'Reads from {NA12778.index_to_ref[5]} start at {min(pos)} and end at {max(pos)}') # Given that reads overlapping the region and their pairs will be included in the output, it looks like samtools view - chr:28510000-33480000 is the most likely command used to generate the file, likely converting from CRAM to BAM in the process. Now lets check if it has been sorted NA12778 = bam.FileReader(gzip.open('Genomes/NA12778_GRCh38_HLA_namesorted.bam')) print(NA12778.sort_order) # So just like it suggest in the filename this BAM has reads that map to the HLA region on chromosome 6 in a namesorted order. This is just what we need for the next step which is selecting those reads that have mappings on the HLA alt chromosomes, and writing them to a new file. # # To do this we will set up a bam.FileReader and a bam.FileWriter. We will copy across the header information for the new file from the original, and we will add an @PG line to indicate what we have done. It's a bit hard to make it completely reproducable with an ad hoc script, but we can show we did something and indicate our intent. NA12778 = bam.FileReader(gzip.open('Genomes/NA12778_GRCh38_HLA_namesorted.bam')) NA12778_any_HLA_hits = bam.FileWriter('Genomes/NA12778_any_HLA_hits.bam') NA12778_any_HLA_hits.raw_header = NA12778.raw_header NA12778_any_HLA_hits.raw_refs = NA12778.raw_refs NA12778_any_HLA_hits.update_header(id = 'pylazybam_any_HLA', program = 'pylazybam', version = '0.1.0', description = "all alignments for reads where any alignment maps to a HLA alt contig") NA12778_any_HLA_hits.write_header() # We have a file to read from, a file to write to, and all we need now are some functions to manipulate the reads along the way. The first function is to get all the alignments that belong to a given read pair at once. def get_alignment_batches(in_bam): previous_name = None alignbatch = [] for align in in_bam: name = bam.get_raw_read_name(align, bam.get_len_read_name(align)) if previous_name and name != previous_name: yield alignbatch alignbatch = [] alignbatch.append(align) previous_name = name if alignbatch: yield alignbatch # Now we need a function that iterates through the batches of alignments and make a decision on each of the alignments. In our case it is going to find alignment batches where any alignment has a reference assignment that we are interested in. def any_align_maps_to_ref(in_bam, refs = [0,]): # Iterate through groups of alignments from the same read for alignbatch in get_alignment_batches(in_bam): # Iterate through alignments from the same read for align in alignbatch: if bam.get_ref_index(align) in refs: yield alignbatch break # Ok so what alignment ids are we interested in? They are not human friendly so we will need to look them up in the `ref_to_index` dictionary on the `FileReader` HLA_refnames = [x for x in NA12778.ref_to_index.keys() if 'HLA' in x] HLA_refids = [NA12778.ref_to_index[x] for x in HLA_refnames] # So now that we have all the pieces we need lets collect out the reads we are interested in looking at. We are going to use timeit so we have an idea of what the perfomance is like. # %%timeit -n 1 -r 1 for alignbatch in any_align_maps_to_ref(NA12778, HLA_refids): for align in alignbatch: NA12778_any_HLA_hits.write(align) #we will close the file to ensure the end of file marker is correctly written NA12778_any_HLA_hits.close() # That took 20s on a seven year old MacBook Pro for 1.2M reads. Reasonable for interactive use. More typical files of 100M reads you will want to go to lunch. Something like the original 700M read file will take 3-4h. # ## Plotting AS vs XS scores # # Now that we have the reads we are interested in one file we can ask a slightly more intersting question - what does the distribution of AS (reported primary alignment) and XS (next best alignment) scores look like for these reads. NA12778_any_HLA_hits = bam.FileReader(gzip.open('Genomes/NA12778_any_HLA_hits.bam')) primary_AS_XS = [] for align in NA12778_any_HLA_hits: if not bam.is_flag(align, bam.FLAGS['secondary']): primary_AS_XS.append((bam.get_AS(align,no_tag=0),bam.get_XS(align,no_tag=0))) print(primary_AS_XS[:5], len(primary_AS_XS)) from matplotlib.pyplot import hexbin, figure fig = figure(figsize=(10,10)) ax = fig.add_subplot(1, 1, 1, aspect=1) ax.set_xlim(120,152) ax.set_ylim(120,152) ax.set_ylabel("XS - Alignment score of the next best alignment") ax.set_xlabel("AS - Alignment score of the primary read") ax.set_title("AS vs XS scores for HLA reads with secondary mapping to a GRCh38 HLA alt contig") hexbin(x=[x[0] for x in primary_AS_XS],y=[y[1] for y in primary_AS_XS], gridsize=75, bins='log', cmap='Greys') # This is a simple example, but by combining the alignbatch approach above more complex examples that look at relationships between different alignments of a read pair and/or factors like the presence and type of variants can easily be constructed. # ## Outputing reads as fastq # Sometimes it is useful to output a subset of reads for further analysis, and occasionally you will want to do this in fastq format # + test = b'\xcd\x01\x00\x00\xf5\x0c\x00\x00^\x08\x00\x00\'\tI\x12\x02\x00s\x08\x96\x00\x00\x00\x0b\x00\x00\x00h\x88\xeb\x04\xd0\x01\x00\x00A00132:55:HCKWTDSXX:1:1102:27263:27759\x00\xf0\x01\x00\x00t\x07\x00\x00\x14\x11\x14\x11\x14\x11\x11\x14A\x14A\x14A\x14!"A\x14A\x14B\x12"\x12A\x14A\x14B\x14A\x11\x14!A!\x81!D(A\x11HA\x14A\x18A\x11\x12\x14(\x18\x82!\x84!\x11\x84H\x11"\x11\x11\x81!B\x11AHA\x88\x12\x12\x88\x1e\x1e\x1e\x1e\x1e\n\x1e\n\x1e\n\n\x1e\n\n\x1e\n\x1e\x1e\x1e\x1e\x1e\n\x1e\n\n\n\x14\x14\n\n\n\x04\n\x1e\x1e\n\n\x1e\x14\n\x14\n\n\n\n\n\n\x04\n\x1e\x1e\n\n\x1e\x14\n\x1e\n\x1e\n\n\x1e\x1e\x1e\x14\x1e\n\x1e\n\n\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\n\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\n\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\n\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\n\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x1e\x14\x1eSAZchr12,82544431,+,102M48S,60,3;\x00XAZchr12,+82544561,122S28M,0;\x00MCZ150M\x00PGZMarkDuplicates\x00ASc\x1eXSc\x1cMDZ0G30\x00NMI\x01\x00\x00\x00RGZNA12778_CTCACCAA-CTAGGCAA_HCKWTDSXX_L001\x00' def align_to_fastq(align): readname_length = bam.get_len_read_name(align) number_cigar_operations = bam.get_number_cigar_operations(align) sequence_length = bam.get_len_sequence(align) readname = bam.get_read_name(align, readname_length) sequence = bam.decode_sequence(bam.get_raw_sequence(align, readname_length, number_cigar_operations, sequence_length)) qual = bam.decode_base_qual(bam.get_raw_base_qual(align, readname_length, number_cigar_operations, sequence_length)) return f"@{readname}\n{sequence}\n{'+'}\n{qual}\n" print(align_to_fastq(test)) # - fastqf = open('NA12778_any_HLA_hits.R1.fastq.gz','w') fastqr = open('NA12778_any_HLA_hits.R2.fastq.gz','w') NA12778_any_HLA_hits = bam.FileReader(gzip.open('Genomes/NA12778_any_HLA_hits.bam')) for align in NA12778_any_HLA_hits: if not bam.is_flag(align, bam.FLAGS['secondary']): if bam.is_flag(align, bam.FLAGS['forward']): fastqf.write(align_to_fastq(align)) else: fastqr.write(align_to_fastq(align)) fastqf.close() fastqr.close() # ### your uses # If you have an interesting use of pylazybam feel free to send a pull request against the readme and I will add it to a list of contributed example scripts
example_usage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pattom72/dw_matrix_car/blob/master/day3_simple_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="PdTiCG8LK9dx" colab_type="code" colab={} # #!pip install --upgrade tables # #!pip install eli5 # + id="7MdzH0cMQAnK" colab_type="code" outputId="264c96e9-9e04-40ad-cfe0-dfd68d9d7c78" colab={"base_uri": "https://localhost:8080/", "height": 180} import pandas as pd import numpy as np from sklearn.dummy import DummyRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance # + id="ec14CiGhQH0M" colab_type="code" outputId="4494c8bd-25c0-4fe2-9d96-d00954134a50" colab={"base_uri": "https://localhost:8080/", "height": 36} # cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two" # + id="4H_30TE8QMfM" colab_type="code" outputId="0fde7734-bde4-4b39-8ab4-0e6fdc52d493" colab={"base_uri": "https://localhost:8080/", "height": 36} df = pd.read_hdf('data/car.h5') df.shape # + id="2FgLEFg8Qqj8" colab_type="code" outputId="75cfe6ef-d75a-4181-ad09-886ef90e8ca9" colab={"base_uri": "https://localhost:8080/", "height": 206} df.columns # + [markdown] id="wWjDl4JARKJu" colab_type="text" # ## Dummy Model # + id="ylJLDxTBQtma" colab_type="code" outputId="4c668684-edef-4b06-d081-cfa823618ad5" colab={"base_uri": "https://localhost:8080/", "height": 36} df.select_dtypes(np.number).columns # + id="MEzbn4R9Qvlk" colab_type="code" outputId="28cdb062-8303-4797-d929-69cd27d3ab08" colab={"base_uri": "https://localhost:8080/", "height": 36} feats = ['car_id'] x = df[ feats ].values y = df['price_value'].values model = DummyRegressor() model.fit(x, y) y_pred = model.predict(x) mae(y, y_pred) # + id="oRunLd6MQzLo" colab_type="code" outputId="1bae7b75-d35b-4389-b434-898a905c6941" colab={"base_uri": "https://localhost:8080/", "height": 36} [x for x in df.columns if 'price' in x] # + id="J-9zbj1sQ-36" colab_type="code" outputId="65a3e3c6-f5e8-4360-bef3-f2f7d51ae7c6" colab={"base_uri": "https://localhost:8080/", "height": 74} df['price_currency'].value_counts() # + id="veQLkD1nRCDY" colab_type="code" outputId="f475323c-99b0-43e4-bf05-477c13a4d8ad" colab={"base_uri": "https://localhost:8080/", "height": 36} df = df[ df['price_currency'] != 'EUR' ] df.shape # + [markdown] id="iEcb7pZPRXGS" colab_type="text" # ## Features # + id="vpxKVPiVRZfa" colab_type="code" colab={} SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0], list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT] = factorized_values df[feat + SUFFIX_CAT] = df[feat].factorize()[0] # + id="smRK9UriRgo8" colab_type="code" outputId="88ae8881-31f7-4819-83b8-748241e9ab17" colab={"base_uri": "https://localhost:8080/", "height": 36} cat_feats = [x for x in df.columns if SUFFIX_CAT in x ] cat_feats = [x for x in cat_feats if 'price' not in x] len(cat_feats) # + id="sG9Tq3UxRkZe" colab_type="code" outputId="bb26ba49-3192-46f1-d58b-27b5a8b31c4f" colab={"base_uri": "https://localhost:8080/", "height": 36} x = df[cat_feats].values y = df['price_value'].values model = DecisionTreeRegressor(max_depth=5) scores = cross_val_score(model, x, y, cv=3, scoring = 'neg_mean_absolute_error') np.mean(scores) # + id="1yzYE6fzRpNo" colab_type="code" outputId="4cdd3572-6d96-42cf-e173-9b79fa817a73" colab={"base_uri": "https://localhost:8080/", "height": 399} m = DecisionTreeRegressor(max_depth=5) m.fit(x, y) imp = PermutationImportance(m, random_state=0).fit(x, y) eli5.show_weights(imp, feature_names=cat_feats) # + id="ObW7_rnSRr3K" colab_type="code" colab={} def group_and_barplot(feat_groupby, feat_agg='price_value', agg_func=[np.mean, np.median, np.size], feat_sort='size', top=50, subplots=True): (df .groupby(feat_groupby)[feat_agg] .agg(agg_func) .sort_values(by=feat_sort, ascending=False) .head(top) ).plot(kind='bar', figsize=(15,10), subplots=subplots) # + id="z3MPiVkFSOdQ" colab_type="code" outputId="0d1ab126-f6ce-415f-8f8e-8b709618f55e" colab={"base_uri": "https://localhost:8080/", "height": 622} group_and_barplot('param_napęd__cat'); # + id="L6m6I4blSShE" colab_type="code" colab={}
day3_simple_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %run preproc.ipynb # !jupyter kernelspec list import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder import sys print('Python3 executable is ',sys.executable) import matplotlib.pyplot as plt from sklearn.ensemble import BaggingClassifier # - # ### First load the data from a csv data = pd.read_csv(filepath_or_buffer="githubadvisedata.csv",index_col="index",dtype={"buysell":np.str, "durationtype":np.str, "advisor":np.str,"otheradvices":np.str, "symbolname":np.str, "success":np.bool,"advisor":np.str, "niftysentiment":np.str}) # ### Next, create a dataframe from the contents. LabelEncode the symbolname and leadername fields since # ### later scatter plot functions expect numerical values for x and y axes # ### Also sort the dataframe first based on the ascending order of success fraction per leader, and secondly based # ### on the same per symbolname # ### Create two scatter plots where you plot the success (True or False) in two different colors (yellow for True and # ### brown for False. In the first plot advisor is the x-axis and the position of the symbolname in the dataframe, sorted # ### as mentioned above, is the y-axis. In the second plot, symbolname is the y-axis and the position of the advisor # ### in the dataframe, sorted as mentioned above, is the x-axis # ### As you will notice, the first scatter plot is brown at the bottom and progressively turns more yellow towards the top # ### Similarly, the second plot is more brown to the left and progressively turns more yellow towards the right # + #plt.scatter(alpha=0.5) tempdata = pd.DataFrame(data=data,copy=True) symbol_encoder=LabelEncoder() leader_encoder=LabelEncoder() tempdata["symbolname"] = symbol_encoder.fit_transform(data["symbolname"]) tempdata["advisor"] = leader_encoder.fit_transform(data["advisor"])#pd.to_numeric(data["advisor"]) #tempdata.groupby("advisor").apply(lambda x: x[x=='True'].count()) #tempdata.groupby("advisor").agg({"success": (lambda x: x[x=='True'].count())}) def aggFunc(threshold): def func(x): c=x.count() if (c>=threshold): return x[x==True].count() else: return 0 return func leader_successes = (tempdata.groupby("advisor").agg({"success": aggFunc(10)}) /\ tempdata.groupby("advisor").count()).sort_values(by="success",ascending=False) leader_successes = leader_successes[["success"]] symbolname_successes = (tempdata.groupby("symbolname").agg({"success": aggFunc(5)}) /\ tempdata.groupby("symbolname").count()).sort_values(by="success",ascending=False) symbolname_successes = symbolname_successes[["success"]] tempdata['advisor_successes']=pd.Series(data=[leader_successes.loc[ll]["success"] for ll in tempdata['advisor']],index=tempdata.index) tempdata["symbolname_successes"] = pd.Series(data=[symbolname_successes.loc[ss]["success"] for ss in tempdata['symbolname']],index=tempdata.index) tempdata["success"] = LabelEncoder().fit_transform(data["success"]) tempdata = tempdata[["symbolname","advisor","success","symbolname_successes","advisor_successes","durationtype"]].sort_values(by=["symbolname_successes","symbolname"],ascending=True) tempdata.assign(sequence_no=range(len(tempdata))).plot.scatter(alpha=0.5, c='success',colormap='viridis',y="sequence_no",x="advisor",figsize=(5,5)) print(tempdata.assign(sequence_no=range(len(tempdata)))) othertempdata = tempdata[["symbolname","advisor","success","symbolname_successes","advisor_successes","durationtype"]].sort_values(by=["advisor_successes","advisor"],ascending=True) othertempdata.assign(sequence_no=range(len(othertempdata))).plot.scatter(alpha=0.5, c='success',colormap='viridis',y="symbolname",x="sequence_no",figsize=(5,5)) print(othertempdata.assign(sequence_no=range(len(othertempdata)))) # - # ### The following symbol advises have had large SUCCESS tdata = tempdata[tempdata["symbolname_successes"]>0.8] list(zip(symbol_encoder.classes_[tdata["symbolname"].values], tdata["symbolname_successes"].values, tdata["durationtype"].values)) # ### The following leader advises have had large SUCCESS tdata = othertempdata[othertempdata["advisor_successes"]>0.7] list(zip(leader_encoder.classes_[tdata["advisor"].values], tdata["advisor_successes"].values, tdata["durationtype"].values)) # ### Try the VotingClassifier ensemble predictor # + from sklearn.ensemble import VotingClassifier from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier pipeline = FullPipeline() tempdata = data.copy() tempdata["success"] = pd.Series([str(x) for x in data["success"].values], index=tempdata.index) non_num_attrs = ["advisor","durationtype","buysell","symbolname","niftysentiment"] data_prepared = pipeline.full_pipeline_apply_features(tempdata,non_num_attrs) label_prepared = pipeline.full_pipeline_apply_labels(tempdata,["success"]).ravel() data_train, data_test,label_train,label_test = train_test_split(data_prepared,label_prepared,test_size=0.20) #vote_classifier = VotingClassifier(estimators=[('rfclassifier', RandomForestClassifier(n_estimators=25, random_state=42,max_depth=60)), ('svc', SVC(C=1000,degree=4,gamma='scale',kernel='poly',coef0=0.1,decision_function_shape='ovo')), ('naive_bayes',MultinomialNB(alpha=1.12))],voting="hard") vote_classifier = VotingClassifier(estimators=[('dtree1',DecisionTreeClassifier(random_state=312,max_depth=5,criterion="gini")),('dtree2',DecisionTreeClassifier(random_state=42,min_samples_split=350,criterion="gini")),('rfclassifier', RandomForestClassifier(n_estimators=1000, random_state=23,max_depth=10))], voting="soft") vote_classifier.fit(data_train, label_train) # - vote_classifier.score(X=data_test, y=label_test) # ### Now lets try Bagging Classifiers # + from sklearn.ensemble import BaggingClassifier bg_classifier = BaggingClassifier(random_state=49,base_estimator=DecisionTreeClassifier(),n_estimators=200,bootstrap=True,max_features=720,max_samples=200) bg_classifier.fit(data_train, label_train) bg_classifier.score(data_test, label_test) # - # ### Pasting classifier pst_classifier = BaggingClassifier(random_state=49,base_estimator=DecisionTreeClassifier(),n_estimators=299,bootstrap=False,max_features=720,max_samples=200) pst_classifier.fit(data_train, label_train) pst_classifier.score(data_test, label_test) # ### Ada Boost Classifier # + from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import mean_squared_error ada_classifier = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=4),learning_rate=0.70,n_estimators=120,random_state=49,algorithm="SAMME.R") ada_classifier.fit(data_train, label_train) print(ada_classifier.score(data_test, label_test)) ada_classifier.set_params(n_estimators=120) errors = [mean_squared_error(label_test, y_pred) for y_pred in ada_classifier.staged_predict(data_test)] bst_n_estimators = np.argmin(errors) print('bst_n_estimators',bst_n_estimators) ada_best = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=4),learning_rate=0.70,n_estimators=bst_n_estimators,random_state=49,algorithm="SAMME.R") ada_best.fit(data_train,label_train) ada_best.score(data_test,label_test) # - # ### Gradient Boost Classifier # + from sklearn.ensemble import GradientBoostingClassifier grad_classifier = GradientBoostingClassifier(max_depth=4, learning_rate=0.70,n_estimators=120, random_state=42) grad_classifier.fit(data_train,label_train) errors = [mean_squared_error(label_test,y_pred) for y_pred in grad_classifier.staged_predict(data_test)] bst_n_estimators = np.argmin(errors) grad_best = GradientBoostingClassifier(max_depth=4, n_estimators=bst_n_estimators) print('bst_n_estimators',bst_n_estimators) grad_best.fit(data_train,label_train) grad_best.score(data_test, label_test) # - # ### A stacked generalization classifier. Use a RandomForest classifier at the end # + svc_classifier=SVC(C=1900,degree=4,gamma='scale',kernel='poly',coef0=0.1,decision_function_shape='ovo',probability=True) svc_classifier.fit(data_train, label_train) print(svc_classifier.score(data_test, label_test)) svc_probas = svc_classifier.predict_proba(data_train)[:,1] rnd_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True) rnd_classifier.fit(data_train,label_train) rndf_probas=rnd_classifier.predict_proba(data_train)[:,1] # + from sklearn.metrics import precision_recall_curve, precision_score, recall_score, roc_auc_score, roc_curve svc_test_probas = svc_classifier.predict_proba(data_test)[:,1] precisions,recalls, thresholds = precision_recall_curve(probas_pred=svc_test_probas, y_true=label_test) plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plot_precision_recall_vs_threshold(precisions, recalls,thresholds) print(roc_auc_score(y_score=svc_test_probas,y_true=label_test)) fpr,tpr,thresholds= roc_curve(y_score=svc_test_probas, y_true=label_test) plt.subplot(1,2,2) plot_roc_curve(fpr=fpr,tpr=tpr) rndf_test_probas = rnd_classifier.predict_proba(data_test)[:,1] precisions,recalls, thresholds = precision_recall_curve(probas_pred=rndf_test_probas, y_true=label_test) plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plot_precision_recall_vs_threshold(precisions, recalls,thresholds) print(roc_auc_score(y_score=rndf_test_probas,y_true=label_test)) fpr,tpr,thresholds= roc_curve(y_score=rndf_test_probas, y_true=label_test) plt.subplot(1,2,2) plot_roc_curve(fpr=fpr,tpr=tpr) # - data_train.shape svc_preds = svc_classifier.predict(data_train) svc_test_preds = svc_classifier.predict(data_test) rndf_preds = rnd_classifier.predict(data_train) rndf_test_preds = rnd_classifier.predict(data_test) data_train_stack = pd.DataFrame({"svc_preds": svc_preds, "rndf_preds":rndf_preds}) #data_train_stack,data_test_stack,label_train_stack,label_test_stack = train_test_split(data_stack,label_train,test_size=0.2) data_test_stack = pd.DataFrame({"svc_preds":svc_test_preds, "rndf_preds":rndf_test_preds}) rnd_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True) rnd_classifier.fit(data_train_stack,label_train) print(rnd_classifier.score(data_test_stack, label_test)) print(rnd_classifier.oob_score_) # ### Results with stacking not quite satisfactory. Let's try Voting. I am desperate to somehow get a better # ### score than SVC (68.4) vot_classifier = VotingClassifier(estimators=[("svc",svc_classifier),("rndf",rnd_classifier)],voting="soft") vot_classifier.fit(data_train,label_train) vot_classifier.score(data_test, label_test) # ### That was not too bad. Let us try a DecisionTreeClassifier for the stack classifier # + dt_classifier = DecisionTreeClassifier(max_depth=3,random_state=49) dt_classifier.fit(data_train_stack,label_train) stack_probas = dt_classifier.predict_proba(data_test_stack)[:,1] precisions,recalls, thresholds = precision_recall_curve(probas_pred=stack_probas, y_true=label_test) plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plot_precision_recall_vs_threshold(precisions, recalls,thresholds) print(roc_auc_score(y_score=stack_probas,y_true=label_test)) fpr,tpr,thresholds= roc_curve(y_score=stack_probas, y_true=label_test) plt.subplot(1,2,2) plot_roc_curve(fpr=fpr,tpr=tpr) print("score",dt_classifier.score(data_test_stack, label_test)) # - data_train.shape # ### Let us avoid binarizing and see what happens when we use Ada boost - of course we can then use only Tree based classifiers label_test pipel=Pipeline(steps=[('select',DataFrameSelector(non_num_attrs)),('encode',MyMultiLabelEncoder())]) data_prepared_encoded=pipel.fit_transform(tempdata) data_train, data_test,label_train,label_test = train_test_split(data_prepared_encoded,label_prepared,test_size=0.20) ada_classifier = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=4),learning_rate=0.7,n_estimators=120,random_state=49,algorithm="SAMME.R") ada_classifier.fit(data_train,label_train) errors =[mean_squared_error(label_test, y_pred) for y_pred in ada_classifier.staged_predict(data_test)] bst_n_estimators = np.argmin(errors) print('bst_n_estimators',bst_n_estimators) ada_best = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=4),learning_rate=0.7,n_estimators=bst_n_estimators,random_state=49,algorithm="SAMME.R") ada_best.fit(data_train,label_train) print('score',ada_best.score(data_test, label_test)) sorted(zip(ada_best.feature_importances_,non_num_attrs),reverse=True) # + from sklearn.model_selection import GridSearchCV dtree=DecisionTreeClassifier() params_grid = [{"max_features":[1,2,3,4,5],"max_depth":[6,3,4,5],"random_state":[42]}] grid_search=GridSearchCV(cv=5, estimator=dtree,param_grid=params_grid) grid_search.fit(data_train, label_train) grid_search.best_params_ #grid_search.best_score_ DecisionTreeClassifier(max_depth=5,max_features=5,random_state=42).fit(data_train, label_train).score(data_test,label_test) # - ada_classifier = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=5,max_features=5,random_state=42),learning_rate=0.14,n_estimators=90,random_state=42,algorithm="SAMME.R") ada_classifier.fit(data_train, label_train) ada_classifier.score(data_test, label_test) from sklearn.naive_bayes import GaussianNB mnb_classifier = GaussianNB(var_smoothing=8e-07) mnb_classifier.fit(data_train.toarray(), label_train) mnb_classifier.score(data_test.toarray(), label_test) #mnb_classifier.get_params() # ### Lets try our luck with ExtraTreesClassifier from sklearn.ensemble import ExtraTreesClassifier ex_classifier = ExtraTreesClassifier(bootstrap=True,criterion='entropy',max_depth=4,random_state=49,n_estimators=1000,n_jobs=-1) ex_classifier.fit(data_train,label_train) ex_classifier.score(data_test,label_test) # ### Dimension Reduction. Let us start with PCA # ### Restore data_train and data_test to original (binarized) version data_train, data_test,label_train,label_test = train_test_split(data_prepared,label_prepared,test_size=0.20) data_train.shape, data_test.shape # + from sklearn.decomposition import PCA pca = PCA(n_components=0.95) X_reduced = pca.fit_transform(data_train.toarray()) n_components = pca.n_components_ print(n_components,data_train.shape[1]) X_test_reduced = pca.transform(data_test.toarray()) svc_classifier=SVC(C=1900,degree=5,gamma='scale',kernel='poly',coef0=0.1,decision_function_shape='ovo') svc_classifier.fit(X_reduced, label_train) print(svc_classifier.score(X_test_reduced, label_test)) # - # ## Use PCA to reduce dimensions then use RandForest with bagging, and max_features and max_depth and oob_score # ## to get a good estimate of error # + pca = PCA(n_components=0.95) X_reduced = pca.fit_transform(data_train.toarray()) n_components = pca.n_components_ print(n_components) X_test_reduced = pca.transform(data_test.toarray()) # - F=int(np.log2(n_components)+1) rndf_classifier=RandomForestClassifier(bootstrap=True,n_estimators=1000,max_depth=10,max_features=F,n_jobs=-1,random_state=42,oob_score=True) rndf_classifier.fit(X_reduced, label_train) print(rndf_classifier.score(X_test_reduced, label_test)) rndf_classifier.oob_score_ rndf_classifier.oob_decision_function_ len(label_train) # ### Now kernel PCA # + from sklearn.decomposition import KernelPCA pca = KernelPCA(gamma=0.04, kernel="rbf", n_components=200) X_reduced=pca.fit_transform(data_train) X_test_reduced = pca.transform(data_test.toarray()) rndf_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True) rndf_classifier.fit(X_reduced, label_train) print(rndf_classifier.score(X_test_reduced, label_test)) # - from sklearn.model_selection import GridSearchCV pipeline = Pipeline(steps=[("kernel_pca",KernelPCA()),("dtree",DecisionTreeClassifier(max_depth=3, random_state=49))]) params_grid = [{"kernel_pca__n_components":[170,276,400],"kernel_pca__gamma":[0.03, 0.05,1.0],"kernel_pca__kernel":["rbf","sigmoid"]}] grid_search = GridSearchCV(estimator=pipeline,cv=3,param_grid=params_grid) grid_search.fit(data_train,label_train) grid_search.best_params_ n_components=grid_search.best_params_["kernel_pca__n_components"] n_components # + from sklearn.model_selection import cross_val_score pipeline = Pipeline(steps=[("kernel_pca",KernelPCA(gamma=1.0,kernel="sigmoid",n_components=n_components)),("dtree",DecisionTreeClassifier(max_depth=3, random_state=49))]) cross_val_score(estimator=pipeline,X=data_train,y=label_train,cv=3,n_jobs=-1) # + transformer = KernelPCA(gamma=1.0,kernel="sigmoid",n_components=n_components) X_reduced = transformer.fit_transform(data_train) X_test_reduced = transformer.transform(data_test) # - F= int(np.log2(n_components)+1) rndf_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,max_features=F,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True) rndf_classifier.fit(X_reduced, label_train) print('score on test data',rndf_classifier.score(X_test_reduced, label_test)) print('oob_score',rndf_classifier.oob_score_) # + probas = rndf_classifier.predict_proba(X=X_test_reduced)[:,1] print(len(probas),label_test.shape) precisions,recalls,thresholds = precision_recall_curve(probas_pred=probas,y_true=label_test) plot_precision_recall_vs_threshold(precisions, recalls, thresholds) # - # ## Use the Lasso classifier (you need to use LogisticRegression with the right parameters) and detect attributes # ## that can be removed because the coefficients (theta) obtained are zero # + from sklearn.linear_model import LogisticRegressionCV from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import Pipeline lasso_classifier=LogisticRegressionCV(Cs=[1.0],max_iter=100,cv=5,random_state=42,n_jobs=-1,penalty='l1',solver='saga') lasso_classifier.fit(X_reduced, label_train) print('score on test data',lasso_classifier.score(X_test_reduced, label_test)) # - n_components = np.count_nonzero(lasso_classifier.coef_[0]) print(n_components) X_reduced_after_lasso = X_reduced[:,lasso_classifier.coef_[0]!=0.0] X_test_reduced_after_lasso = X_test_reduced[:,lasso_classifier.coef_[0]!=0.0] lasso_classifier=LogisticRegressionCV(Cs=[1.0],max_iter=100,cv=5,random_state=42,n_jobs=-1,penalty='l1',solver='saga') lasso_classifier.fit(X_reduced_after_lasso, label_train) print('score on test data',lasso_classifier.score(X_test_reduced_after_lasso, label_test)) lasso_sgd_classifier = SGDClassifier(alpha=0.1,loss='log',penalty='l2', n_jobs=-1,random_state=42, max_iter=1000) lasso_sgd_classifier.fit(X_reduced, label_train) print('score on test data',lasso_sgd_classifier.score(X_test_reduced, label_test)) # ### Use the reduced attributes for Random Forest again F= int(np.log2(127)+1) rndf_classifier=RandomForestClassifier(bootstrap=True,max_depth=9,max_features=F,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True) rndf_classifier.fit(X_reduced_after_lasso, label_train) print('score on test data',rndf_classifier.score(X_test_reduced_after_lasso, label_test)) print('oob_score',rndf_classifier.oob_score_) # + active="" # # max_depth Comment # score on test data 0.6661849710982659 3 # oob_score 0.6448699421965318 # # score on test data 0.6685934489402697 4 # oob_score 0.6467967244701349 # # score on test data 0.6724470134874759 5 peak for testdata # oob_score 0.653179190751445 # # score on test data 0.6695568400770713 6 # oob_score 0.6541425818882466 # # score on test data 0.6652215799614644 7 # oob_score 0.6587186897880539 # # score on test data 0.6594412331406551 8 peak for oob_score # oob_score 0.6588391136801541 # # score on test data 0.6560693641618497 9 # oob_score 0.6585982658959537 # # - # ### Let us see how SVC does on this reduced data svc_classifier=SVC(C=1900,degree=4,gamma='scale',kernel='poly',coef0=0.1,decision_function_shape='ovo',probability=True) svc_classifier.fit(X_reduced_after_lasso, label_train) print(svc_classifier.score(X_test_reduced_after_lasso, label_test)) # ### And what about Ada boost on the reduced data # + ada_classifier = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=1),learning_rate=0.3,n_estimators=120,random_state=49,algorithm="SAMME.R") ada_classifier.fit(X_reduced_after_lasso, label_train) print(ada_classifier.score(X_test_reduced_after_lasso, label_test)) ada_classifier.set_params(n_estimators=120) errors = [mean_squared_error(label_test, y_pred) for y_pred in ada_classifier.staged_predict(X_test_reduced_after_lasso)] bst_n_estimators = np.argmin(errors) print('bst_n_estimators',bst_n_estimators) # - ada_best = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=1),learning_rate=0.3045,n_estimators=bst_n_estimators,random_state=49,algorithm="SAMME.R") ada_best.fit(X_reduced_after_lasso,label_train) ada_best.score(X_test_reduced_after_lasso,label_test) # + active="" # learning_rate # 0.6623314065510597 0.303 # 0.6647398843930635 0.304 # 0.6652215799614644 0.3045 highest score # 0.6642581888246628 0.305 # + ### Locally Linear Embedding # + from sklearn.manifold import LocallyLinearEmbedding pca = LocallyLinearEmbedding(n_components=277, n_neighbors=10) X_reduced=pca.fit_transform(data_train.toarray()) X_test_reduced = pca.transform(data_test.toarray()) rndf_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True) rndf_classifier.fit(X_reduced, label_train) print(rndf_classifier.score(X_test_reduced, label_test)) # + ### # -
github_advise_analysis_ensemble_learning_and_other_advanced.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import yt import numpy as np import matplotlib.pyplot as plt # + def return_y_dens_cplt(folder,time,nbins,field="densityMean"): ds = yt.load(folder+'/cplt'+time+'/') ad = ds.all_data() plot = yt.ProfilePlot(ad,"y",[field],weight_field="cell_volume",x_log=False,y_log={field:False},n_bins=nbins) profile = plot.profiles[0] x = np.array(profile.x) y = np.array(profile[field]) return x, y def return_y_plt(folder,time,nbins,field="densityMean"): ds = yt.load(folder+'/plt'+time+'/') ad = ds.all_data() plot = yt.ProfilePlot(ad,"y",[field],weight_field="cell_volume",x_log=False,y_log={field:False},n_bins=nbins) profile = plot.profiles[0] x = np.array(profile.x) y = np.array(profile[field]) return x, y # - time = "{:09d}".format(int(480000)) #time = "{:09d}".format(int(220000)) x1a, y1a = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet1',time,nbins=48,field="chargeDensityMeanSpecies00000") x1b, y1b = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet2',time,nbins=48,field="chargeDensityMeanSpecies00000") x1c, y1c = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet3',time,nbins=48,field="chargeDensityMeanSpecies00000") x1d, y1d = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet4',time,nbins=48,field="chargeDensityMeanSpecies00000") x1e, y1e = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet5',time,nbins=48,field="chargeDensityMeanSpecies00000") x1f, y1f = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet6',time,nbins=48,field="chargeDensityMeanSpecies00000") time = "{:09d}".format(int(400000)) x1g, y1g = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet7',time,nbins=48,field="chargeDensityMeanSpecies00000") x1h, y1h = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet8',time,nbins=48,field="chargeDensityMeanSpecies00000") x1i, y1i = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet9',time,nbins=48,field="chargeDensityMeanSpecies00000") x1j, y1j = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_50wet10',time,nbins=48,field="chargeDensityMeanSpecies00000") # + #time = "{:09d}".format(int(60000)) time = "{:09d}".format(int(240000)) x7a, y7a = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet1',time,nbins=48,field="chargeDensityMeanSpecies00000") x7b, y7b = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet2',time,nbins=48,field="chargeDensityMeanSpecies00000") x7c, y7c = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet3',time,nbins=48,field="chargeDensityMeanSpecies00000") x7d, y7d = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet4',time,nbins=48,field="chargeDensityMeanSpecies00000") x7e, y7e = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet5',time,nbins=48,field="chargeDensityMeanSpecies00000") x7f, y7f = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet6',time,nbins=48,field="chargeDensityMeanSpecies00000") x7g, y7g = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet7',time,nbins=48,field="chargeDensityMeanSpecies00000") x7h, y7h = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet8',time,nbins=48,field="chargeDensityMeanSpecies00000") time = "{:09d}".format(int(180000)) x7i, y7i = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet9',time,nbins=48,field="chargeDensityMeanSpecies00000") x7j, y7j = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet10',time,nbins=48,field="chargeDensityMeanSpecies00000") x7k, y7k = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet11',time,nbins=48,field="chargeDensityMeanSpecies00000") x7l, y7l = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet12',time,nbins=48,field="chargeDensityMeanSpecies00000") x7m, y7m = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet13',time,nbins=48,field="chargeDensityMeanSpecies00000") x7n, y7n = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/1_1_100wet14',time,nbins=48,field="chargeDensityMeanSpecies00000") # - time = "{:09d}".format(int(580000)) x10a, y10a = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet1',time,nbins=48,field="chargeDensityMeanSpecies00000") x10b, y10b = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet1',time,nbins=48,field="chargeDensityMeanSpecies00000") x10c, y10c = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet3',time,nbins=48,field="chargeDensityMeanSpecies00000") x10d, y10d = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet4',time,nbins=48,field="chargeDensityMeanSpecies00000") x10e, y10e = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet5',time,nbins=48,field="chargeDensityMeanSpecies00000") x10f, y10f = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet6',time,nbins=48,field="chargeDensityMeanSpecies00000") time = "{:09d}".format(int(490000)) x10g, y10g = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet7',time,nbins=48,field="chargeDensityMeanSpecies00000") x10h, y10h = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet8',time,nbins=48,field="chargeDensityMeanSpecies00000") x10i, y10i = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet9',time,nbins=48,field="chargeDensityMeanSpecies00000") x10j, y10j = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp3/2_1_50wet10',time,nbins=48,field="chargeDensityMeanSpecies00000") # + y1=[] x1=x1b rat = 40/48 for (item1,item2,item3,item4,item5,item6,item7,item8,item9,item10) in zip(y1a,y1b,y1c,y1d,y1e,y1f,y1g,y1h,y1i,y1j): y1.append((item1+item2+item3+item4+item5+item6+rat*item7+rat*item8+rat*item9+rat*item10)/(6+4*rat)) y7=[] x7=x7b rat=18/24 for (item1,item2,item3,item4,item5,item6,item7,item8,item9,item10,item11,item12,item13,item14) in zip(y7a,y7b,y7c,y7d,y7e,y7f,y7g,y7h,y7i,y7j,y7k,y7l,y7m,y7n): y7.append((item1+item2+item3+item4+item5+item6+item7+item8+rat*item9+rat*item10+rat*item11+rat*item12+rat*item13+rat*item14)/(8+6*rat)) y10=[] x10=x10a rat = 49/58 for (item1,item2,item3,item4,item5,item6,item7,item8,item9,item10) in zip(y10a,y10b,y10c,y10d,y10e,y10f,y10g,y10h,y10i,y10j): y10.append((item1+item2+item3+item4+item5+item6+rat*item7+rat*item8+rat*item9+rat*item10)/(6+4*rat)) # + plt.rcParams['mathtext.fontset'] = 'cm' plt.rc('text', usetex=True) plt.rc('xtick', labelsize=13) plt.rc('ytick', labelsize=13) plt.rc('axes', linewidth=1.5) fig = plt.figure(figsize=[21,10]) plt.subplots_adjust(wspace=0.20) ax1 = fig.add_subplot(1,2,1) #ax1.scatter(x1,y1,s=150,color='black',facecolors='none',marker='o', linewidths=2, label='100\% wet') #ax1.scatter(x2,y2,s=150,color='red',marker='x', linewidths=3, label='50\% wet') #ax1.scatter(x3,y3,s=150,color='green',marker='s',facecolors='none', linewidths=3, label='50\% wet') #ax1.plot(xa,ya,color='black', linewidth=2, label='50\% wet') #ax1.plot(xd,yd, color='hotpink', linestyle='dashed',linewidth=3.5, label='50\% wet - no dry') ax1.plot(x1,y1,color='red', linestyle='dashed',linewidth=3.5, label='wet drift off') ax1.plot(x2,y2,color='green', linestyle='dashed',linewidth=3.5, label='dry drift off') ax1.plot(x3,y3,color='orange', linestyle='dashed',linewidth=3.5, label='rfd off') #ax1.plot(x4,y4,color='purple', linestyle='dashed',linewidth=3.5, label='midpoint off') #ax1.plot(x4,y4,color='blue', linestyle='dashdot',linewidth=3.5, label='6nm fit') ax1.set_xlabel(r'$y$', fontsize=30) ax1.set_ylabel(r"$\varrho\,(\mathrm{C}/\mathrm{cm}^3)$", fontsize=30,rotation=90, labelpad=10) ax1.set_ylim([-5,155]) ax1.tick_params(labelsize=20) ax1.legend(loc=(0.65,0.04),fontsize=20) ax1.xaxis.set_tick_params(width=1.5) ax1.yaxis.set_tick_params(width=1.5) ax2 = fig.add_subplot(1,2,2) #ax2.plot(xa,ya,color='green', linewidth=2, label='50\% wet') #ax2.scatter(x1,y1,s=150,color='black',facecolors='none',marker='o', linewidths=2, label='100\% wet') #ax2.scatter(x2,y2,s=150,color='red',marker='x', linewidths=3, label='50\% wet') #ax2.scatter(x3,y3,s=150,color='green',marker='s',facecolors='none', linewidths=3, label='50\% wet') #ax2.plot(xb,yb,color='blue', linewidth=2, label='100\% wet') #ax2.plot(x6,y6,color='black', linewidth=2, label='100\% wet no dry move') ax2.plot(x9,y9,color='green',linewidth=2, label='100\% wet wet drift off') #ax2.plot(x1,y1,color='orange', linestyle='dashed',linewidth=3.5, label='50\% wet wet drift off') #ax2.plot(x2,y2,color='blue', linestyle='dashed',linewidth=3.5, label='50\% wet dry drift off') #ax2.plot(x5,y5,color='purple', linestyle='dashed',linewidth=3.5, label='50\% wet no dry move') #ax2.plot(x11,y11,color='green', linestyle='dashed',linewidth=3.5, label='50\% wet no wet move') ax2.plot(x12,y12,color='blue',linewidth=2, label='50\% wet no dry move no wet drift') ax2.plot(x13,y13,color='orange',linewidth=2, label='50\% wet no wet move') #ax2.plot(xc,yc,color='orange', linestyle='dashdot',linewidth=3.5, label='100% wet, half timestep') ax2.set_xlabel(r'$y$', fontsize=30) ax2.set_ylabel(r"$\varrho\,(\mathrm{C}/\mathrm{cm}^3)$", fontsize=30,rotation=90, labelpad=10) ax2.set_ylim([-5,155]) ax2.tick_params(labelsize=20) ax2.legend(loc=(0.65,0.04),fontsize=20) ax2.xaxis.set_tick_params(width=1.5) ax2.yaxis.set_tick_params(width=1.5) # ax2 = fig.add_subplot(1,2,2) # ax2.scatter(xpos,nmob,s=150,color='black',facecolors='none',marker='o', linewidths=2, label='100\% wet') # ax2.scatter(xpos,nmob50,s=150,color='red',marker='x', linewidths=3, label='50\% wet') # ax2.set_xlabel(r'$\tilde{y}/a_t$', fontsize=30) # ax2.set_ylabel(r'$\gamma_{\bot}$', fontsize=30,rotation=0, labelpad=15) # ax2.set_ylim([0.005,1.05]) # ax2.tick_params(labelsize=20) # ax2.legend(loc=(0.65,0.04),fontsize=20) # ax2.xaxis.set_tick_params(width=1.5) # ax2.yaxis.set_tick_params(width=1.5) # fig.savefig("mob3.pdf", bbox_inches = 'tight', # pad_inches = 0.05) # - xC=np.linspace(0.5*0.0625e-7,3e-7-0.5*0.0625e-7,48) print(xC) print(x1a) # + plt.rcParams['mathtext.fontset'] = 'cm' plt.rc('text', usetex=True) plt.rc('xtick', labelsize=13) plt.rc('ytick', labelsize=13) plt.rc('axes', linewidth=1.5) fig = plt.figure(figsize=[21,10]) plt.subplots_adjust(wspace=0.20) ax1 = fig.add_subplot(1,2,1) yd=np.zeros(len(x10)) ax1.plot(xC,y1,color='black', linewidth=2.0, label='50\% wet') #ax1.plot(xC,y11,color='blue', linewidth=2.0, label='50\% wet') #ax1.plot(xC,y11,color='blue', linewidth=2.0, label='50\% wet') #ax1.plot(x2,y2,color='blue',linewidth=2, label='50\% wet, wet part only') #ax1.plot(x4,y4,color='red',linewidth=2, label='50\% wet, dry part only') ax1.plot(xC,y7,color='red', linestyle='dashed',linewidth=3.5, label='100\% wet') #ax1.plot(xC,y12,color='blue', linestyle='dashed',linewidth=3.5, label='100\% wet') ax1.plot(xC,yd,color='black',linewidth=2, label='from Ref.') #ax1.plot([6*0.0625e-7-0.03125e-7,6*0.0625e-7-0.03125e-7],[100,140],color='black',linewidth=1, label='_not in legend') #ax1.plot([7*0.0625e-7-0.03125e-7,7*0.0625e-7-0.03125e-7],[100,140],color='black',linewidth=1, label='_not in legend') ax1.set_xlabel(r'$y$', fontsize=30) ax1.set_ylabel(r"$n_+$", fontsize=30,rotation=0, labelpad=15) ax1.set_ylim([-5,155]) ax1.tick_params(labelsize=20) ax1.legend(loc=(0.05,0.78),fontsize=20) ax1.xaxis.set_tick_params(width=1.5) ax1.yaxis.set_tick_params(width=1.5) ax2 = fig.add_subplot(1,2,2) ax2.plot(xC,y10,color='black',linewidth=2, label='50\% wet') #ax2.plot(xC,y110,color='blue',linewidth=2, label='50\% wet') ax2.plot(xC,yd,color='black',linewidth=2, label='from Ref.') ax2.set_xlabel(r'$y$', fontsize=30) ax2.set_ylabel(r"$n_+$", fontsize=30, labelpad=15,rotation=0) ax2.set_ylim([-5,155]) ax2.tick_params(labelsize=20) ax2.legend(loc=(0.05,0.83),fontsize=20) ax2.xaxis.set_tick_params(width=1.5) ax2.yaxis.set_tick_params(width=1.5) fig.savefig("static1_raw.svg", bbox_inches = 'tight', pad_inches = 0.05) # - time = "{:09d}".format(int(450000)) #time = "{:09d}".format(int(220000)) x11a, y11a = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_50wet1',time,nbins=48,field="chargeDensityMeanSpecies00000") x11b, y11b = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_50wet2',time,nbins=48,field="chargeDensityMeanSpecies00000") x11c, y11c = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_50wet3',time,nbins=48,field="chargeDensityMeanSpecies00000") x11d, y11d = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_50wet4',time,nbins=48,field="chargeDensityMeanSpecies00000") x11e, y11e = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_50wet5',time,nbins=48,field="chargeDensityMeanSpecies00000") x11f, y11f = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_50wet6',time,nbins=48,field="chargeDensityMeanSpecies00000") #time = "{:09d}".format(int(60000)) time = "{:09d}".format(int(200000)) x17a, y17a = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_100wet1',time,nbins=48,field="chargeDensityMeanSpecies00000") x17b, y17b = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_100wet2',time,nbins=48,field="chargeDensityMeanSpecies00000") x17c, y17c = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_100wet3',time,nbins=48,field="chargeDensityMeanSpecies00000") x17d, y17d = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_100wet4',time,nbins=48,field="chargeDensityMeanSpecies00000") x17e, y17e = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_100wet5',time,nbins=48,field="chargeDensityMeanSpecies00000") x17f, y17f = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_100wet6',time,nbins=48,field="chargeDensityMeanSpecies00000") x17g, y17g = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_100wet7',time,nbins=48,field="chargeDensityMeanSpecies00000") x17h, y17h = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/1_1_100wet8',time,nbins=48,field="chargeDensityMeanSpecies00000") #x7i, y7i = return_y_dens_cplt('~/coriProject/dladiges/static_eq_tests/100wet_nodrymove9',time,nbins=48,field="chargeDensityMeanSpecies00000") #x7j, y7j = return_y_dens_cplt('~/coriProject/dladiges/static_eq_tests/100wet_nodrymove10',time,nbins=48,field="chargeDensityMeanSpecies00000") time = "{:09d}".format(int(560000)) x110a, y110a = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/2_1_50wet1',time,nbins=48,field="chargeDensityMeanSpecies00000") x110b, y110b = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/2_1_50wet1',time,nbins=48,field="chargeDensityMeanSpecies00000") x110c, y110c = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/2_1_50wet3',time,nbins=48,field="chargeDensityMeanSpecies00000") x110d, y110d = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/2_1_50wet4',time,nbins=48,field="chargeDensityMeanSpecies00000") x110e, y110e = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/2_1_50wet5',time,nbins=48,field="chargeDensityMeanSpecies00000") x110f, y110f = return_y_dens_cplt('~/coriProject/dladiges/static_eq_comp4/2_1_50wet6',time,nbins=48,field="chargeDensityMeanSpecies00000") # + y11=[] x11=x11b for (item1,item2,item3,item4,item5,item6) in zip(y11a,y11b,y11c,y11d,y11e,y11f): y11.append((item1+item2+item3+item4+item5+item6)/6) y17=[] x17=x17b for (item1,item2,item3,item4,item5,item6,item7,item8) in zip(y17a,y17b,y17c,y17d,y17e,y17f,y17g,y17h): y17.append((item1+item2+item3+item4+item5+item6+item7+item8)/8) y110=[] x110=x110a for (item1,item2,item3,item4,item5,item6) in zip(y110a,y110b,y110c,y110d,y110e,y110f): y110.append((item1+item2+item3+item4+item5+item6)/6) # + plt.rcParams['mathtext.fontset'] = 'cm' plt.rc('text', usetex=True) plt.rc('xtick', labelsize=13) plt.rc('ytick', labelsize=13) plt.rc('axes', linewidth=1.5) fig = plt.figure(figsize=[31,15]) plt.subplots_adjust(wspace=0.20) ax1 = fig.add_subplot(1,2,1) ax1.plot(x1,y1,color='black',linewidth=2, label='50\% wet') ax1.plot(x2,y2,color='blue',linewidth=2, label='50\% wet, wet part only') ax1.plot(x4,y4,color='red',linewidth=2, label='50\% wet, dry part only') ax1.plot(x7,y7,color='purple',linewidth=2, label='100\% wet') ax1.set_xlabel(r'$y$', fontsize=30) ax1.set_ylabel(r"$\varrho\,(\mathrm{C}/\mathrm{cm}^3)$", fontsize=30,rotation=90, labelpad=10) ax1.set_ylim([-5,155]) ax1.tick_params(labelsize=20) ax1.legend(loc=(0.65,0.04),fontsize=20) ax1.xaxis.set_tick_params(width=1.5) ax1.yaxis.set_tick_params(width=1.5) ax2 = fig.add_subplot(1,2,2) ax2.plot(x9,y9,color='black',linewidth=2, label='50\% wet, no drift') ax2.plot(x3,y3,color='blue',linewidth=2, label='50\% wet, wet part only, no drift') ax2.plot(x5,y5,color='red',linewidth=2, label='50\% wet, dry part only, no drift') ax2.plot(x8,y8,color='purple',linewidth=2, label='100\% wet, no drift') ax2.plot(x7,y7,color='purple', linestyle='dotted',linewidth=2, label='100\% wet') ax2.set_xlabel(r'$y$', fontsize=30) ax2.set_ylabel(r"$\varrho\,(\mathrm{C}/\mathrm{cm}^3)$", fontsize=30,rotation=90, labelpad=10) ax2.set_ylim([-5,155]) ax2.tick_params(labelsize=20) ax2.legend(loc=(0.65,0.04),fontsize=20) ax2.xaxis.set_tick_params(width=1.5) ax2.yaxis.set_tick_params(width=1.5) # - time = "{:010d}".format(int(170000)) print(time) time = "{:09d}".format(int(300000)) X1a, Y1a = return_y_dens_cplt('~/coriProject/dladiges/EO_tests4/low',time,nbins=112,field="chargeDensityMeanSpecies00000") X2a, Y2a = return_y_dens_cplt('~/coriProject/dladiges/EO_tests4/low',time,nbins=112,field="chargeDensityMeanSpecies00001") X3a,Y3a = return_y_plt('~/coriProject/dladiges/EO_tests4/low',time,nbins=56,field="averaged_meanx") time = "{:09d}".format(int(150000)) X1b, Y1b = return_y_dens_cplt('~/coriProject/dladiges/EO_tests3/inputthinlow',time,nbins=56,field="chargeDensityMeanSpecies00000") X2b, Y2b = return_y_dens_cplt('~/coriProject/dladiges/EO_tests3/inputthinlow',time,nbins=56,field="chargeDensityMeanSpecies00001") X3b,Y3b = return_y_plt('~/coriProject/dladiges/EO_tests3/inputthinlow',time,nbins=56,field="averaged_meanx") time = "{:09d}".format(int(150000)) X1b, Y1b = return_y_dens_cplt('~/coriProject/dladiges/EO_tests4/low_rev/',time,nbins=224,field="chargeDensityMeanSpecies00000") X2b, Y2b = return_y_dens_cplt('~/coriProject/dladiges/EO_tests4/low_rev/',time,nbins=224,field="chargeDensityMeanSpecies00001") X3b,Y3b = return_y_plt('~/coriProject/dladiges/EO_tests4/low_rev/',time,nbins=56,field="averaged_meanx") time = "{:09d}".format(int(130000)) X1c, Y1c = return_y_dens_cplt('~/coriProject/dladiges/EO_tests2/inputthinlow_noadj',time,nbins=56,field="chargeDensityMeanSpecies00000") X2c, Y2c = return_y_dens_cplt('~/coriProject/dladiges/EO_tests2/inputthinlow_noadj',time,nbins=56,field="chargeDensityMeanSpecies00001") X3c,Y3c = return_y_plt('~/coriProject/dladiges/EO_tests2/inputthinlow_noadj/',time,nbins=56,field="averaged_meanx") def mirrorAv(arr): l = len(arr); out=[] for i in range(l): out.append((arr[i]+arr[l-1-i])/2); return out # + plt.rcParams['mathtext.fontset'] = 'cm' plt.rc('text', usetex=True) plt.rc('xtick', labelsize=13) plt.rc('ytick', labelsize=13) plt.rc('axes', linewidth=1.5) fig = plt.figure(figsize=[31,15]) plt.subplots_adjust(wspace=0.20) ax1 = fig.add_subplot(1,2,1) ax1.plot(X1a,mirrorAv(Y1a),color='red',linewidth=2, label='charge') ax1.plot(X2a,mirrorAv(-Y2a),color='blue',linewidth=2, label='charge') ax1.plot(X1b,mirrorAv(Y1b),color='red',linestyle='dashed',linewidth=2, label='charge') ax1.plot(X2b,mirrorAv(-Y2b),color='blue',linestyle='dashed',linewidth=2, label='charge') ax2 = fig.add_subplot(1,2,2) ax2.plot(X3a,mirrorAv(Y3a),color='black',linewidth=2, label='50\% wet, no drift') ax2.plot(X3b,mirrorAv(Y3b),color='black',linestyle='dashed',linewidth=2, label='50\% wet, no drift') fig.savefig("EO_raw.svg", bbox_inches = 'tight', pad_inches = 0.05) # + plt.rcParams['mathtext.fontset'] = 'cm' plt.rc('text', usetex=True) plt.rc('xtick', labelsize=13) plt.rc('ytick', labelsize=13) plt.rc('axes', linewidth=1.5) fig = plt.figure(figsize=[31,15]) plt.subplots_adjust(wspace=0.20) ax1 = fig.add_subplot(1,2,1) ax1.plot(X1,Y1,color='blue',linewidth=2, label='charge') ax1.plot(X2,-Y2,color='red',linewidth=2, label='charge') #ax1.plot(X2,Y2+Y1,color='black',linewidth=2, label='charge') ax2 = fig.add_subplot(1,2,2) ax2.plot(X3,Y3,color='black',linewidth=2, label='50\% wet, no drift') # -
tools/notebooks/plotting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Segment Tree # # * A binary tree that represents a list of elements # * Leaf of this tree, (or in some cases the lowest level of the tree), represents this list # * Every node in this tree contains some information aggregated over node's children # * Like Sum, Average, Min, Max etc # # For this discussion I'll use a list of number as an example `[3,8,6,4,2,5,9,0,7,1]` and the information that nodes will store is going to be the smallest number among node's children `"minimum"` # ## Creating the Tree # # Usually the segment tree is constructed as an array. Being a binary tree the `ith` node has its children at `2*i+1` and `2*i+2`. # # First step in creating the tree is allocating an array big enough to store all potential nodes. Since, the input array becomes the leaf of this tree and it's easier to calculate the number of nodes required if you know a few basic properties of a binary tree. # # ### Steps # * Calculate the length of input sequence `[3,8,6,4,2,5,9,0,7,1]` arr = [3,8,6,4,2,5,9,0,7,1] N = len(arr) # * If size of array is not a power of `2` pad the array with placeholder to make it so. This makes the segment tree node calculation easier and makes the final tree balances as well. import math k = math.ceil(math.log(N, 2)) while len(arr) < pow(2, k): arr.append(math.inf) # * Now that we know the number of leaf element the total number of nodes can be calculates as `2^(k+1)` node_count = pow(2, k+1) # * Initialize the array with placeholders and that is our segment tree without any elements segment_tree = [math.inf]*node_count # ## Building the Tree # # * Tree is built recursively # * Each recursion works on a range of input array elements # * Every iteration of this recursion populates one index in the segment tree and return its value # * If the range denotes only one element, i.e. the leaf of the segment tree then the input array value is put there # * Otherwise the recursion partitions the range and merge the result from left and right recursion tree def build_tree(tree, arr, tree_index, lo, hi): if (lo == hi): # leaf node, save the array element # this is the smallest number tree[tree_index] = arr[lo] return tree[tree_index] left_inx = 2*tree_index+1 right_inx = 2*tree_index+2 mid = (lo + hi) // 2 left = build_tree(tree, arr, left_inx, lo, mid) right = build_tree(tree, arr, right_inx, mid+1, hi) # merge the result tree[tree_index] = min(left, right) return tree[tree_index] build_tree(segment_tree, arr, 0, 0, len(arr)-1) segment_tree # ## Range Query # # Given the current example, the queries will be regarding getting the minimum number in a range. The answer is found by traversing the segment tree recursively and locating the range in question and reading the result from the node. The recursive routine takes the range in query and the current segment tree node which also represents the range of input array it holds. There could be three cases. # * If the range query is disjoint with the current range then return placeholder. # * If the range query is completely within the current range then return the value in the segment tree node. # * If the range query is completely in the left subtree of current node then recursively query left tree # * If the range query is completely in the right subtree of current node then recursively query right tree # * If there is an overlap then query both and return the minimum. # # + def query(tree, tree_index, lo, hi, i, j): # print(tree_index, lo, hi, i, j) if (lo > j or hi < i): # disjoint return math.inf if (i <= lo and j >= hi): # completely inside # print(tree_index, lo, hi, i, j) return tree[tree_index] left_inx = 2*tree_index+1 right_inx = 2*tree_index+2 mid = (lo + hi) // 2 if i >= mid+1: # completely in right half return query(tree, right_inx, mid+1, hi, i, j) elif j <= mid: # completely in left half return query(tree, left_inx, lo, mid, i, j) # in case of overlap left = query(tree, left_inx, lo, mid, i, j) right = query(tree, right_inx, mid+1, hi, i, j) return min(left, right) print("[0, n-1]", query(segment_tree, 0, 0, len(arr)-1, 0, len(arr)-1)) print("[0, 1]", query(segment_tree, 0, 0, len(arr)-1, 0, 1)) print("[2, 6]", query(segment_tree, 0, 0, len(arr)-1, 2, 6)) # - # ## Updating the Tree # # In case the input array changes the segment tree should get updated as well. This is done recursively as well by traversing the path from the leaf to root of the segment tree. # + def update(tree, tree_index, lo, hi, arr_index, val): if (lo == hi): # Update the leaf tree[tree_index] = val return val left_inx = 2*tree_index+1 right_inx = 2*tree_index+2 mid = (lo + hi) // 2 if arr_index >= mid+1: # in right half ch = update(tree, right_inx, mid+1, hi, arr_index, val) elif arr_index <= mid: # in left half ch = update(tree, left_inx, lo, mid, arr_index, val) tree[tree_index] = min(tree[tree_index], ch) return tree[tree_index] arr[5] = 1 update(segment_tree, 0, 0, len(arr)-1, 5, 1) print("[2, 6]", query(segment_tree, 0, 0, len(arr)-1, 2, 6))
series-dsa/segment-tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # + df = pd.DataFrame({ 'a': [1,2,3], 'b': [4,5,6], 'c': [7,8,9] }) print(df) def add_one(x): return x + 1 df.applymap(add_one) # + grades_df = pd.DataFrame( data={'exam1': [43, 81, 78, 75, 89, 70, 91, 65, 98, 87], 'exam2': [24, 63, 56, 56, 67, 51, 79, 46, 72, 60]}, index=['Andre', 'Barry', 'Chris', 'Dan', 'Emilio', 'Fred', 'Greta', 'Humbert', 'Ivan', 'James'] ) def convert_grades(grades): if grades >= 90: return 'A' elif grades >= 80: return 'B' elif grades >= 70: return 'C' elif grades >= 60: return 'D' else: return 'F' grades_df.applymap(convert_grades) # + grades_df = pd.DataFrame( data={'exam1': [43, 81, 78, 75, 89, 70, 91, 65, 98, 87], 'exam2': [24, 63, 56, 56, 67, 51, 79, 46, 72, 60]}, index=['Andre', 'Barry', 'Chris', 'Dan', 'Emilio', 'Fred', 'Greta', 'Humbert', 'Ivan', 'James'] ) def convert_grades(grades): if grades >= 90: return 'A' elif grades >= 80: return 'B' elif grades >= 70: return 'C' elif grades >= 60: return 'D' else: return 'F' grades_df.applymap(convert_grades) # -
python-programming/numpy_panda_2/dataframe_applymap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_tf) # language: python # name: conda_tf # --- # + import t3f import numpy as np import tensorflow as tf import keras.backend as K tf.set_random_seed(0) np.random.seed(0) sess = tf.InteractiveSession() K.set_session(sess) # - # ## Tensor Nets # # In this notebook we provide an example of how to build a simple Tensor Net (see https://arxiv.org/abs/1509.06569). # # The main ingredient is the so-called TT-Matrix, a generalization of the Kronecker product matrices, i.e. matrices of the form # $$A = A_1 \otimes A_2 \cdots \otimes A_n$$ # # In `t3f` TT-Matrices are represented using the `TensorTrain` class. # + W = t3f.random_matrix([[4, 7, 4, 7], [5, 5, 5, 5]], tt_rank=2) print(W) # - # Using TT-Matrices we can compactly represent densely connected layers in neural networks, which allows us to greatly reduce number of parameters. Matrix multiplication can be handled by the `t3f.matmul` method which allows for multiplying dense (ordinary) matrices and TT-Matrices. Very simple neural network could look as following (for initialization several options such as `t3f.initializers.glorot`, `t3f.initializers.he` or `t3f.random_matrix` are available): # + x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.int64, [None]) initializer = t3f.glorot_initializer([[4, 7, 4, 7], [5, 5, 5, 5]], tt_rank=2) W1 = t3f.get_variable('W1', initializer=initializer) b1 = tf.get_variable('b1', shape=[625]) h1 = t3f.matmul(x, W1) + b1 h1 = tf.nn.relu(h1) W2 = tf.get_variable('W2', shape=[625, 10]) b2 = tf.get_variable('b2', shape=[10]) h2 = tf.matmul(h1, W2) + b2 y_ = tf.one_hot(y, 10) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=h2)) # - # For convenience we have implemented a layer analogous to *Keras* `Dense` layer but with a TT-Matrix instead of an ordinary matrix. An example of fully trainable net is provided below. from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Activation, Dropout, Flatten import numpy as np from keras.utils import to_categorical from keras import optimizers from utils import TTDense (x_train, y_train), (x_test, y_test) = mnist.load_data() # Some preprocessing... # + x_train = x_train / 127.5 - 1.0 x_test = x_test / 127.5 - 1.0 y_train = to_categorical(y_train, num_classes=10) y_test = to_categorical(y_test, num_classes=10) # - model = Sequential() model.add(Flatten(input_shape=(28, 28))) model.add(TTDense(row_dims=[7, 4, 7, 4], column_dims=[5, 5, 5, 5], tt_rank=4, init='glorot', activation='relu', bias_init=1e-3)) model.add(Dense(10)) model.add(Activation('softmax')) model.summary() # Note that in the dense layer we only have $1725$ parameters instead of $784 * 625 = 490000$. optimizer = optimizers.Adam(lr=1e-2) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=2, batch_size=64, validation_data=(x_test, y_test)) # # Compression of Dense layers # Let us now train an ordinary DNN (without TT-Matrices) and show how we can compress it using the TT decomposition. model = Sequential() model.add(Flatten(input_shape=(28, 28))) model.add(Dense(625, activation='relu')) model.add(Dense(10)) model.add(Activation('softmax')) model.summary() optimizer = optimizers.Adam(lr=1e-3) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=5, batch_size=64, validation_data=(x_test, y_test)) # Let us convert the matrix used in the Dense layer to the TT-Matrix with tt-ranks equal to 16 (since we trained the network without the low-rank structure assumption we may wish start with high rank values). W = model.trainable_weights[0] print(W) Wtt = t3f.to_tt_matrix(W, shape=[[7, 4, 7, 4], [5, 5, 5, 5]], max_tt_rank=16) print(Wtt) # We need to evaluate the tt-cores of Wtt. We also need to store other parameters for later (biases and the second dense layer). cores = sess.run(Wtt.tt_cores) other_params = model.get_weights()[1:] # Now we can construct a tensor network with the first Dense layer replaced by `Wtt` # initialized using the previously computed cores. model = Sequential() model.add(Flatten(input_shape=(28, 28))) model.add(TTDense(row_dims=[7, 4, 7, 4], column_dims=[5, 5, 5, 5], tt_rank=16, activation='relu')) model.add(Dense(10)) model.add(Activation('softmax')) optimizer = optimizers.Adam(lr=1e-3) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) model.set_weights(list(cores) + other_params) print("new accuracy: ", model.evaluate(x_test, y_test)[1]) model.summary() # We see that even though we now have about 5% of the original number of parameters we still achieve a relatively high accuracy. # ## Finetuning the model. # We can now finetune this tensor network. model.fit(x_train, y_train, epochs=2, batch_size=64, validation_data=(x_test, y_test)) # We see that we were able to achieve higher validation accuracy than we had in the plain DNN, while keeping the number of parameters extremely small.
examples/tensor-nets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:torch2] * # language: python # name: conda-env-torch2-py # --- # %reload_ext nb_black # %matplotlib inline # + import copy import gc import math import random from pathlib import Path from PIL import Image from concurrent.futures import ThreadPoolExecutor import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics import cv2 from easydict import EasyDict from tqdm.notebook import tqdm from iterstrat.ml_stratifiers import MultilabelStratifiedKFold import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler import pytorch_lightning as pl import albumentations as A from albumentations.pytorch import ToTensorV2 import timm from transformers import ( AdamW, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, ) # - # # NIPA Round 1 # + args = dict( seed=42, num_plants=8, num_diseases=14, plant2idx={3: 0, 4: 1, 5: 2, 7: 3, 8: 4, 10: 5, 11: 6, 13: 7}, disease2idx={ 1: 0, 2: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 11: 7, 14: 8, 15: 9, 16: 10, 17: 11, 18: 12, 20: 13, }, # Data model_path=Path("/home/isleof/Development/nipa/models"), data_path=Path("/home/isleof/Development/nipa/input/train"), data_path_128=Path("/home/isleof/Development/nipa/input/train-128"), test_data_path=Path("/home/isleof/Development/nipa/input/test"), train_csv=Path("/home/isleof/Development/nipa/input/train_folds.csv"), test_csv=Path("/home/isleof/Development/nipa/input/test.csv"), # Model backbone="gluon_seresnext50_32x4d", # Optimizer optimizer="adamw", lr=1e-3, weight_decay=1e-4, scheduler={"method": "cosine", "warmup_epochs": 1}, # Train gradient_accumulation_steps=1, precision=16, num_workers=4, batch_size=64, max_epochs=40, ) args["trn_aug"] = A.Compose( [ A.OneOf( [ A.IAAAdditiveGaussianNoise(p=1), A.GaussNoise(p=1), ], p=0.5, ), A.OneOf( [ A.RandomBrightnessContrast( brightness_limit=0.1, contrast_limit=0.1, p=1 ), A.CLAHE(clip_limit=3, p=1), A.IAASharpen(p=1), A.IAAEmboss(p=1), ], p=0.5, ), A.OneOf( [ A.MotionBlur(blur_limit=3, p=1), A.MedianBlur(blur_limit=3, p=1), A.GaussianBlur(p=1), ], p=0.5, ), A.VerticalFlip(p=0.5), A.HorizontalFlip(p=0.5), A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=20, p=0.5), A.Normalize(), ToTensorV2(), ] ) args["val_aug"] = A.Compose([A.Normalize(), ToTensorV2()]) # - # ## Setup # + def seed_everything(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True seed_everything(args["seed"]) # + def visualize(dataset, idx=0): data = dataset[idx] img = data["img"] plant = data["plant"] disease = data["disease"] print(plant, disease) plt.imshow(img) plt.axis("off") def visualize_tfms(dataset, idx=0, samples=10, cols=5): dataset = copy.deepcopy(dataset) dataset.tfms = A.Compose( [t for t in dataset.tfms if not isinstance(t, (A.Normalize, ToTensorV2))] ) rows = samples // cols fig, axes = plt.subplots(rows, cols, figsize=(12, 6)) for i, ax in enumerate(axes.flat): img = dataset[idx]["img"] ax.imshow(img) ax.set_axis_off() plt.tight_layout() plt.show() # + # df = pd.read_csv( # "../input/train.tsv", sep="\t", header=None, names=["img", "plant", "disease"] # ) # + # test_df = pd.read_csv("../input/test.tsv", sep="\t", header=None, names=["img"]) # test_df.to_csv(args.test_csv, index=False) # - df = pd.read_csv(args["train_csv"]) df.head() plants = list(sorted(df.plant.unique())) diseases = list(sorted(df.disease.unique())) print("Plant:", plants, len(plants)) print("Disease:", diseases, len(diseases)) # + [markdown] heading_collapsed=true # ## k-folds # + hidden=true df = df.sample(frac=1.0).reset_index(drop=True) # + hidden=true df["fold"] = -1 y = df[["plant", "disease"]].values kf = MultilabelStratifiedKFold(n_splits=5) for fold, (trn_idx, val_idx) in enumerate(kf.split(df, y)): df.loc[val_idx, "fold"] = fold # + hidden=true df.to_csv(args["train_csv"], index=False) df.head() # + [markdown] heading_collapsed=true # ## EDA # + hidden=true img_paths = list((args["data_path"]).iterdir()) img_paths[:5] # + hidden=true idx = np.random.randint(len(img_paths)) path = img_paths[idx] img = Image.open(path).convert("RGB") print(np.array(img).shape) img # + hidden=true sns.countplot(df.plant) plt.show() # + hidden=true sns.countplot(df.disease) plt.show() # + [markdown] heading_collapsed=true # ## Resize # + hidden=true (PATH / "train-128").mkdir(exist_ok=True) # + hidden=true def resize_img(fn): Image.open(fn).resize((128, 128)).save((fn.parent.parent) / "train-128" / fn.name) with ThreadPoolExecutor(8) as e: e.map(resize_img, img_paths) # - # ## Dataset class PlantDataset(Dataset): def __init__(self, df, tfms=None, fast=True): self.imgs = df.img.values self.plants = [args["plant2idx"][plant] for plant in df.plant.values] self.diseases = [args["disease2idx"][disease] for disease in df.disease.values] self.tfms = tfms self.fast = fast def __len__(self): return len(self.imgs) def __getitem__(self, idx): dir_path = args["data_path_128"] if self.fast else args["data_path"] img_path = dir_path / self.imgs[idx] plant = self.plants[idx] disease = self.diseases[idx] img = cv2.imread(str(img_path)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if self.tfms is not None: img = self.tfms(image=img)["image"] plant = torch.tensor(plant, dtype=torch.long) disease = torch.tensor(disease, dtype=torch.long) return {"img": img, "plant": plant, "disease": disease} dataset = PlantDataset(df) idx = np.random.randint(len(dataset)) visualize(dataset, idx) # ## Transform dataset = PlantDataset(df, tfms=args["trn_aug"]) idx = np.random.randint(len(dataset)) visualize_tfms(dataset, idx) # ## Model # + class LabelSmoothingCrossEntropy(nn.Module): """ NLL loss with label smoothing. """ def __init__(self, smoothing=0.1): """ Constructor for the LabelSmoothing module. :param smoothing: label smoothing factor """ super(LabelSmoothingCrossEntropy, self).__init__() assert smoothing < 1.0 self.smoothing = smoothing self.confidence = 1.0 - smoothing def forward(self, x, target): logprobs = F.log_softmax(x, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = self.confidence * nll_loss + self.smoothing * smooth_loss return loss.mean() class SoftTargetCrossEntropy(nn.Module): def __init__(self): super(SoftTargetCrossEntropy, self).__init__() def forward(self, x, target): loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) return loss.mean() class Backbone(nn.Module): def __init__(self, name="resnet18", pretrained=True): super().__init__() self.net = timm.create_model(name, pretrained=pretrained) if "regnet" in name: self.out_features = self.net.head.fc.in_features elif "csp" in name: self.out_features = self.net.head.fc.in_features elif "res" in name: # works also for resnest self.out_features = self.net.fc.in_features elif "efficientnet" in name: self.out_features = self.net.classifier.in_features elif "densenet" in name: self.out_features = self.net.classifier.in_features elif "senet" in name: self.out_features = self.net.fc.in_features elif "inception" in name: self.out_features = self.net.last_linear.in_features else: self.out_features = self.net.classifier.in_features def forward(self, x): x = self.net.forward_features(x) return x class Net(nn.Module): def __init__(self, args, pretrained=True): super().__init__() self.args = args self.backbone = Backbone(args["backbone"]) self.global_pool = nn.AdaptiveAvgPool2d(1) self.head1 = nn.Linear(self.backbone.out_features, args["num_plants"]) self.head2 = nn.Linear(self.backbone.out_features, args["num_diseases"]) def forward(self, x): x = self.backbone(x) x = self.global_pool(x) x = x[:, :, 0, 0] x1 = self.head1(x) x2 = self.head2(x) return {"plant": x1, "disease": x2} # - class Model(pl.LightningModule): def __init__(self, trn_dl=None, val_dl=None): super().__init__() self.trn_dl = trn_dl self.val_dl = val_dl self.lr = args["lr"] if self.trn_dl is not None: self.num_train_steps = math.ceil( len(self.trn_dl) / args["gradient_accumulation_steps"] ) self.model = Net(args) self.criterion = LabelSmoothingCrossEntropy() self.val_f1_plant = pl.metrics.Fbeta(num_classes=args["num_plants"], beta=1) self.val_f1_disease = pl.metrics.Fbeta(num_classes=args["num_diseases"], beta=1) def forward(self, x): return self.model(x) def shared_step(self, batch, batch_nb): img = batch["img"] plant = batch["plant"] disease = batch["disease"] output_dict = self(img) loss_plant = self.criterion(output_dict["plant"], plant) loss_disease = self.criterion(output_dict["disease"], disease) loss = 0.5 * loss_plant + 0.5 * loss_disease return loss, output_dict def training_step(self, batch, batch_nb): loss, _ = self.shared_step(batch, batch_nb) self.log("train_loss", loss) for i, param_group in enumerate(self.optimizer.param_groups): self.log(f"lr/lr{i}", param_group["lr"]) return {"loss": loss} def validation_step(self, batch, batch_nb): target_plant = batch["plant"] target_disease = batch["disease"] loss, output_dict = self.shared_step(batch, batch_nb) logits_plant = output_dict["plant"] logits_disease = output_dict["disease"] self.val_f1_plant(logits_plant, target_plant) self.val_f1_disease(logits_disease, target_disease) return {"loss": loss} def validation_epoch_end(self, outputs): avg_loss = torch.stack([o["loss"] for o in outputs]).mean() f1 = 0.5 * self.val_f1_plant.compute() + 0.5 * self.val_f1_disease.compute() self.val_f1_plant.reset() self.val_f1_disease.reset() self.log("val_f1", f1) print(f"Epoch: {self.current_epoch} | Loss: {avg_loss} | F1: {f1}") def configure_optimizers(self): self.optimizer = AdamW( self.model.parameters(), lr=self.lr, weight_decay=args["weight_decay"] ) self.scheduler = get_cosine_schedule_with_warmup( self.optimizer, num_warmup_steps=self.num_train_steps * args["scheduler"]["warmup_epochs"], num_training_steps=int(self.num_train_steps * args["max_epochs"]), ) return [self.optimizer], [{"scheduler": self.scheduler, "interval": "step"}] def train_dataloader(self): return self.trn_dl def val_dataloader(self): return self.val_dl # ## Train # + pl.seed_everything(args["seed"]) for fold in range(5): trn_df = df[df.fold != fold] val_df = df[df.fold == fold] trn_ds = PlantDataset(trn_df, tfms=args["trn_aug"], fast=False) val_ds = PlantDataset(val_df, tfms=args["val_aug"], fast=False) trn_dl = DataLoader( trn_ds, batch_size=args["batch_size"], sampler=RandomSampler(trn_ds), num_workers=args["num_workers"], pin_memory=True, drop_last=True, ) val_dl = DataLoader( val_ds, batch_size=2 * args["batch_size"], sampler=SequentialSampler(val_ds), num_workers=args["num_workers"], pin_memory=True, ) model = Model(trn_dl=trn_dl, val_dl=val_dl) # early_stopping_callback = pl.callbacks.early_stopping.EarlyStopping( # monitor="val_f1", # verbose=True, # mode="max" # ) trainer = pl.Trainer( gpus=1, auto_lr_find=True, benchmark=True, max_epochs=args["max_epochs"], precision=args["precision"], accumulate_grad_batches=args["gradient_accumulation_steps"], progress_bar_refresh_rate=50, # callbacks=[early_stopping_callback] ) trainer.fit(model) trainer.save_checkpoint(f"fold{fold}.ckpt") del model, trainer, trn_ds, val_ds, trn_dl, val_dl, trn_df, val_df gc.collect() torch.cuda.empty_cache() # - # ## Inference class InferencePlantDataset(Dataset): def __init__(self, df, tfms=None): self.imgs = df.img.values self.tfms = tfms def __len__(self): return len(self.imgs) def __getitem__(self, idx): dir_path = args["test_data_path"] img_path = dir_path / self.imgs[idx] img = cv2.imread(str(img_path)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if self.tfms is not None: img = self.tfms(image=img)["image"] return {"img": img} test_df = pd.read_csv(args["test_csv"]) test_ds = InferencePlantDataset(test_df, tfms=args["val_aug"]) test_dl = DataLoader( test_ds, batch_size=2 * args["batch_size"], sampler=SequentialSampler(test_ds), num_workers=args["num_workers"], pin_memory=True, ) idx2plants = {v: k for k, v in args["plant2idx"].items()} idx2disease = {v: k for k, v in args["disease2idx"].items()} # + fin_plants = np.zeros((len(test_df), args["num_plants"])) fin_diseases = np.zeros((len(test_df), args["num_diseases"])) for fold in tqdm(range(5)): model = Model.load_from_checkpoint(checkpoint_path=f"fold{fold}.ckpt") model.cuda() model.eval() plants = [] diseases = [] with torch.no_grad(): for batch in test_dl: img = batch["img"] img = img.cuda() output_dict = model(img) plants.append(output_dict["plant"].detach().cpu().numpy()) diseases.append(output_dict["disease"].detach().cpu().numpy()) plants = np.concatenate(plants) diseases = np.concatenate(diseases) fin_plants += plants / 5 fin_diseases += diseases / 5 del model gc.collect() torch.cuda.empty_cache() # - plants = fin_plants.argmax(-1) diseases = fin_diseases.argmax(-1) plants = [idx2plants[p] for p in plants] diseases = [idx2disease[d] for d in diseases] subm = test_df.copy() subm["plant"] = plants subm["disease"] = diseases subm = subm[["img", "plant", "disease"]] subm.head() subm.to_csv("submission.tsv", sep="\t", header=None, index=False)
notebooks/00.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Scipy minimize variational plugin # # In this notebook, we briefly introduce the particularization of the `Optimizer` abstract plugin for `scipy.optimize.minimize`. # # We assume that you are already familiar with the `Optimize` class. If not, you can access a detailed notebook introducting this class [here](../plugins/junctions_and_optimizers.ipynb). # # `ScipyMinimizePlugin` is an `Optimizer` wrapping the `scipy.optimize.minimize` method, thus inheriting from all the underlying minimization algorithms. # # The plugin can be instantiated as follows: # + import numpy from qat.vsolve.optimize import ScipyMinimizePlugin from scipy.optimize import minimize ## A cobyla minimizer over any number of variables, random initialization, 20 max steps cobyla = ScipyMinimizePlugin(tol=1e-2, method="COBYLA", options={"maxiter": 20}) # - # Lets try to use this plugin to solve a QAOA instance. # + from qat.vsolve.qaoa import MaxCut import networkx as nx import matplotlib.pyplot as plt from qat.qpus import get_default_qpu qpu = get_default_qpu() stack = cobyla | qpu graph = nx.cycle_graph(4) problem = MaxCut(graph) job = problem.to_job(2) circuit = job.circuit result = stack.submit(job) print("The maxcut problem:") print(problem) print("Final energy:", result.value) print("The optimization data:") print(result.meta_data["optimizer_data"]) print("The best set of parameters:") print(result.meta_data["parameters"]) # - # Notice that the 'optimizer_data' entry of the result's meta_data contains the (stringified) output of scipy's minimize function. # # As we can see, 20 iterations are not enough for the optimizer to converge. # Lets try with 200: cobyla = ScipyMinimizePlugin(method="COBYLA", tol=1e-2, options={"maxiter": 200}) stack = cobyla | qpu result = stack.submit(job) print("The maxcut problem:") print(problem) print("Final energy:", result.value) print("The optimization data:") print(result.meta_data["optimizer_data"])
misc/notebooks/tutorials/variational_algorithms/scipy_optimize_binder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:metis] * # language: python # name: conda-env-metis-py # --- import sys sys.path.append("/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages") # + import numpy as np import matplotlib.pyplot as plt from scipy.io.wavfile import read, write from pydub import AudioSegment from pydub.playback import play import librosa import librosa.display from IPython.display import Audio from numpy.fft import fft, ifft import sys # %matplotlib inline # - # ## PyDub ## #read in song clime_pt_2 = AudioSegment.from_wav('Audio_Samples/10 Clime Part Two.L.wav') #get duration clime_pt_2.duration_seconds # + #slice audio seconds = 1000 favorite_part = clime_pt_2[285*seconds:290*seconds] # - #playback play(favorite_part) #reverse, for some lolz favorite_reversed = favorite_part.reverse() play(favorite_reversed) #crossfade for fun favorite_faded = favorite_part.fade_in(2*seconds).fade_out(2*seconds) play(favorite_faded) # + #convert to array of samples sound = clime_pt_2.set_frame_rate(16000) channel_sounds = sound.split_to_mono() samples = [s.get_array_of_samples() for s in channel_sounds] fp_arr = np.array(samples).T.astype(np.float32) # - len(fp_arr) fp_arr # ## Librosa ## #load in audio clime_pt_2_path = 'Audio_Samples/10 Clime Part Two.L.wav' x , sr = librosa.load(clime_pt_2_path) print(type(x), type(sr)) #we can also plot waveform in librosa plt.figure(figsize=(14, 5)) librosa.display.waveplot(x, sr=sr) #zero crossing for a segment, used heavily in speech recognition and MIR, associated with percussive musics n0 = 9000 n1 = 9100 plt.figure(figsize=(14, 5)) plt.plot(x[n0:n1]) plt.grid() # calculate zero crossings manually zero_crossings = librosa.zero_crossings(x[n0:n1], pad=False) print(sum(zero_crossings)) #or a spectrogram, showing presence of frequenices X = librosa.stft(x) #fourier transform Xdb = librosa.amplitude_to_db(abs(X)) plt.figure(figsize=(14, 5)) librosa.display.specshow(Xdb, sr=sr, x_axis='time', y_axis='hz') plt.colorbar() #log plt.figure(figsize=(14, 5)) librosa.display.specshow(Xdb, sr=sr, x_axis='time', y_axis='log') plt.colorbar() #estimate tempo for song onset_env = librosa.onset.onset_strength(x, sr=sr) tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr) tempo # + #extract harmonic and percussive data harmonic, percussive = librosa.effects.hpss(x) #plot pitch classes chroma = librosa.feature.chroma_cqt(y=harmonic, sr=sr) plt.figure(figsize=(18,5)) librosa.display.specshow(chroma, sr=sr, x_axis='time', y_axis='chroma', vmin=0, vmax=1) plt.title('Chromagram') plt.colorbar(); # -
Audio Demos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spotify's Worldwide Daily Song Ranking # # Dataset from: https://www.kaggle.com/edumucelli/spotifys-worldwide-daily-song-ranking # Crawled from: https://spotifycharts.com/regional # Crawler code is hosted on github: https://github.com/edumucelli/spotify-worldwide-ranking # # The original dataset contains only records of 2017. I modified the scripts found on github, to crawl the data of 2018, up to September. import pandas as pd import json import matplotlib.pyplot as plt # ## Load data df_ranking = pd.read_csv('./spotify_worldwide_daily_song_ranking_2017_2018.csv', parse_dates=['Date']) df_continents = pd.Series(json.load(open('./continents.json')), name='continent').to_frame() df_continents.index.name = 'code' df_continents.reset_index(level=0, inplace=True) df_countries = pd.read_json('./countries.json', orient='index') df_countries.index.name = 'code' df_countries.reset_index(level=0, inplace=True) # ## See some items in dataset df_ranking.head() # ### Statistical summary df_ranking.describe(include='all') # ### Rows contain missing data df_ranking[df_ranking.isnull().any(axis=1)].head() df_ranking.isnull().sum() # ### Drop missing data df_ranking.dropna(inplace=True) df_ranking.describe(include='all') # ### Simple plot fig = plt.figure(figsize=(16, 4)) df_ranking['Region'].value_counts().plot(kind='bar') print(df_ranking['Region'].value_counts().tail(30)) # See what's happening in the data of region 'lu' fig = plt.figure(figsize=(16, 4)) df_ranking.loc[df_ranking['Region'] == 'lu']['Date'].value_counts().plot(kind='line', style='.-') # Some days do not have data at all fig = plt.figure(figsize=(16, 4)) x_range = pd.date_range(df_ranking['Date'].min(), df_ranking['Date'].max()) df_ranking['Date'].value_counts().reindex(x_range, fill_value=0).plot(kind='line', style='.-') df_continents.head() df_countries.head() # ## A little bit preprocessing df_region = pd.merge(df_countries, df_continents, right_on='code', left_on='continent') df_region = df_region[['code_x', 'name', 'code_y', 'continent_y']] df_region.columns = ['country_code', 'country', 'continent_code', 'continent'] df_region = df_region.append({ 'country_code': 'GLOBAL', 'country': 'GLOBAL', 'continent_code': 'GLOBAL', 'continent': 'GLOBAL' }, ignore_index=True) df_region['country_code'] = df_region['country_code'].str.lower() df_region.tail() df_ranking.head() df_ranking_region = pd.merge(df_ranking, df_region, left_on='Region', right_on='country_code') df_ranking_region.head() # ### Extract more columns from 'Date' column df_date = df_ranking_region[['Date']] df_date = df_date.drop_duplicates() df_date = df_date.reset_index(drop=True) df_date = df_date.sort_values(['Date']) df_date.head() # + def year (d): return f'Y{d.year}' print(year(df_date['Date'][0])) # + def monthofyear (d): return 'M'.join((str(d.year), str(d.month).zfill(2))) print(monthofyear(df_date['Date'][0])) # + def quarterofyear (d): return 'Q'.join((str(d.year), str(d.quarter))) print(quarterofyear(df_date['Date'][0])) # + def weekofyear (d): if d.month == 1 and d.week == 52: return 'W'.join((str(d.year - 1), str(d.week).zfill(2))) else: return 'W'.join((str(d.year), str(d.week).zfill(2))) print(weekofyear(df_date['Date'][0])) print(weekofyear(df_date['Date'][10])) # - df_date.count() df_date['Year'] = df_date['Date'].apply(lambda x: year(x)) df_date['Quarterofyear'] = df_date['Date'].apply(lambda x: quarterofyear(x)) df_date['Monthofyear'] = df_date['Date'].apply(lambda x: monthofyear(x)) df_date['Weekofyear'] = df_date['Date'].apply(lambda x: weekofyear(x)) df_date.head() df_date.tail() df_ranking = df_ranking_region.merge(df_date, on='Date', how='left') # df_ranking_2017 = df_ranking.loc[df_ranking['Year'] == 'Y2017'] # df_ranking_2017.tail() # Missing data for week 2017W22 fig = plt.figure(figsize=(16, 4)) df_ranking['Weekofyear'].value_counts().reindex(df_date['Weekofyear'].sort_values(), fill_value=0).plot(kind='line', style='.-') # ### Unique songs df_songs = df_ranking[['URL', 'Track Name', 'Artist']].drop_duplicates(['URL']) df_songs.count() # ## Aggregate stream counts df_ranking_continent = df_ranking.groupby(['URL', 'continent', 'Date']).sum() df_ranking_continent = df_ranking_continent.reset_index() df_ranking_continent = df_ranking_continent.merge(df_date, on='Date', how='left') df_ranking_continent.head() df_ranking_continent_daily = df_ranking_continent.merge(df_songs, on='URL', how='left') df_ranking_continent_daily.sort_values(['Streams'], ascending=[False]).head() df_ranking_continent_yearly = df_ranking_continent_daily.groupby(['URL', 'continent', 'Year'])['Streams'].sum() df_ranking_continent_yearly = df_ranking_continent_yearly.reset_index() df_ranking_continent_yearly = df_ranking_continent_yearly.merge(df_songs, on='URL', how='left') df_ranking_continent_yearly.head() df_ = df_ranking_continent_yearly[df_ranking_continent_yearly['continent'] == 'GLOBAL'] df_.sort_values(['Streams'], ascending=[False]).head() # df_ranking_continent_weekly = df_ranking_continent_daily.groupby(['URL', 'continent', 'Weekofyear'])['Streams'].sum() # Smooth out missing data df_ranking_continent_weekly = df_ranking_continent_daily.groupby(['URL', 'continent', 'Weekofyear'])['Streams'].mean()*7 df_ranking_continent_weekly = df_ranking_continent_weekly.reset_index() df_ranking_continent_weekly = df_ranking_continent_weekly.merge(df_songs, on='URL', how='left') df_ranking_continent_weekly.head() df_ = df_ranking_continent_weekly[df_ranking_continent_weekly['continent'] == 'GLOBAL'] df_.sort_values(['Streams'], ascending=[False]).head(20) df_songs[df_songs['Track Name'] == 'Something Just Like This']['URL'].to_string() df_ = df_ranking_continent_weekly[df_ranking_continent_weekly['Track Name'] == "Something Just Like This"] df_.sort_values(['Weekofyear'], ascending=[True]).head(20) df_ranking_continent_weekly.to_csv('ranking_continent_weekly.csv', index=False) df_sjlt_weekly = df_ranking_continent_weekly[df_ranking_continent_weekly['Track Name'] == "Something Just Like This"] df_sjlt_weekly = df_sjlt_weekly.sort_values(['Weekofyear', 'Streams'], ascending=[True, False]) df_sjlt_weekly.to_csv('something_just_like_this_weekly.csv', index=False) df_sjlt_daily = df_ranking_continent_daily[df_ranking_continent_daily['Track Name'] == "Something Just Like This"] df_sjlt_daily = df_sjlt_daily.sort_values(['Weekofyear', 'Streams'], ascending=[True, False]) df_sjlt_daily.to_csv('something_just_like_this_daily.csv', index=False)
20180909_spotify-song-streams-radial-stacked-bar-chart/spotify-worldwide-daily-song-ranking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Adattípusok # # (2020. 03. 13. - 3. óra) # # <NAME> # # --- # + [markdown] slideshow={"slide_type": "slide"} # ## 1. Adat és utasítás # + slideshow={"slide_type": "slide"} len('alma') # + [markdown] slideshow={"slide_type": "slide"} # Dokumentáció: # # - típusok: <https://docs.python.org/3/library/stdtypes.html> # - függvények: <https://docs.python.org/3/library/functions.html> # - # Adat helyettesítése *változóval*: x = 'alma' len(x) # + [markdown] slideshow={"slide_type": "slide"} # ## 2. Változók és értékadás # # Változó: címke egy memóriaterületen (pl. C/C++) # # ![cpp](img/mem_c.png) # # Változó: címke egy memóriában lévő objektumon (Python) # # ![python](img/mem_py.png) # - x = 10 print(x) x = 20 print(x) x = 'alma' # + [markdown] slideshow={"slide_type": "subslide"} # ### Trükkös értékadások # + slideshow={"slide_type": "subslide"} x = y = 10 print(x, y) x, y = 10, 20 print(x, y) # + [markdown] slideshow={"slide_type": "slide"} # ## 3. Miért van az adatoknak típusa? # # <br> # # ```python # type(adat) # ``` # # - memória # # - utasítások (függvények, műveletek, metódusok) # + [markdown] slideshow={"slide_type": "subslide"} # ### Függvények # + slideshow={"slide_type": "subslide"} s = 'alma' x = 10 print(s) print(x) # + slideshow={"slide_type": "subslide"} len(s) # + slideshow={"slide_type": "subslide"} len(x) # + [markdown] slideshow={"slide_type": "subslide"} # ### Műveletek # + slideshow={"slide_type": "subslide"} x + 20 # + slideshow={"slide_type": "subslide"} s + 'alma' # + slideshow={"slide_type": "subslide"} x + 'alma' # - # ### Metódusok # + slideshow={"slide_type": "fragment"} s.upper() # + slideshow={"slide_type": "fragment"} x.upper() # + slideshow={"slide_type": "fragment"} # A műveletek is metódusok: x.__add__(20) # - 'alma'.__add__('barack') # + [markdown] slideshow={"slide_type": "slide"} # ## 4. Egyszerű adattípusok # + [markdown] slideshow={"slide_type": "subslide"} # ### Igazságérték (*boolean*) # # - lehetséges értékek: `True` vagy`False` # - műveletek: `and`, `or`, `not` # - kapcsolódó függvények: `bool()` # + [markdown] slideshow={"slide_type": "subslide"} # ### Egész számok (*integer*) # # - lehetséges értékek: ~ # - műveletek: `+`, `-`, `*`, `/`, `//`, `%`, `**`, `<`, `<=` `>`, `>=`, `==`, `!=` # - kapcsolódó függvények: `int()`, `bin()`, `oct()`, `hex()` # + [markdown] slideshow={"slide_type": "subslide"} # ### Lebegőpontos számok (*float*) # # - lehetséges értékek: tizedestörtek # - műveletek: `+`, `-`, `*`, `/`, `//`, `%`, `**`, `<`, `<=` `>`, `>=`, `==`, `!=` # - kapcsolódó függvények: `float()`, `round()` # + [markdown] slideshow={"slide_type": "subslide"} # ### Karakterláncok (*string*) # # - lehetséges értékek: ~ # - jelölés: `'valami'` vagy `"valami"` # - műveletek: `+ <string>`, `* <int>`, `<`, `<=` `>`, `>=` # + [markdown] slideshow={"slide_type": "subslide"} # ### Karakterláncok indexelése, szeletelése # # - `s[i]`: `s` karakterlánc `i`-edik karaktere # - `s[:i]`: `i`-edik karakterig az összes, `i` már nem # - `s[i:]`: `i`-edik karaktertől az összes, `i` is # - `s[i:j]`: `i`-edik karaktertől a `j`-edikig, `i`-edik benne van `j`-edik már nincs # + [markdown] slideshow={"slide_type": "subslide"} # ### Karakterláncok metódusai # # - `s.upper()`: nagybetűsít # - `s.lower()`: kisbetűsít # - `s.endswith(suffix)`: igaz, ha a suffixszel végződik a string # - `s.startswith(prefix)`: igaz, ha a prefixszel kezdődik a string # - `s.isdigit()`: igaz, ha `s` csak számjegyeket tartalmaz # - `s.isalpha()`: igaz, ha `s` csak betűket tartalmaz # - `s.isspace()`: igaz, ha `s` csak szóközjellegű karaktereket tartalmaz # - `s.join(iterable)`: *iterable* (pl. lista) elemeit konkatenálja, *s*-t használva összekötőnek # - `s.strip()`: levágja a string elején és végén lévő szóközjellegű karaktereket # - a `s.strip(karakterek)` szóközök helyett a felsorolt karaktereket vágja le # - az `lstrip()`, és az `rstrip()` csak a bal- ill. jobboldaliakat vágja le # - `s.split(delimiter)`: felszeleteli a stringet delimiterek mentén, a szeletek listáját adja vissza # - a `s.split()` a szóközjellegű karakterek mentén darabol # - `s.replace(mit, mire)`: lecseréli a stringben az elsőként megadott paraméter előfordulásait a második paraméterre # # Továbbiak: <https://docs.python.org/3/library/stdtypes.html#string-methods>
02.bevprog/types.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Q# # language: qsharp # name: iqsharp # --- # # Teleportation Kata # # **Teleportation** quantum kata is a series of exercises designed to get you familiar with programming in Q#. It covers the quantum teleportation protocol which allows you to communicate a quantum state using only classical communication and previously shared quantum entanglement. # # - Teleportation is described in [this Wikipedia article](https://en.wikipedia.org/wiki/Quantum_teleportation). # - An interactive demonstration can be found [on the Wolfram Demonstrations Project](http://demonstrations.wolfram.com/QuantumTeleportation/). # # Each task is wrapped in one operation preceded by the description of the task. Your goal is to fill in the blank (marked with `// ...` comment) with some Q# code that solves the task. To verify your answer, run the cell using Ctrl+Enter (⌘+Enter on macOS). # ## Part I. Standard Teleportation # We split the teleportation protocol into several steps, following the description in the [Q# documentation](https://docs.microsoft.com/en-us/quantum/techniques/putting-it-all-together) : # # * Preparation (creating the entangled pair of qubits that are sent to Alice and Bob). # * Sending the message (Alice's task): Entangling the message qubit with Alice's qubit and extracting two classical bits to be sent to Bob. # * Reconstructing the message (Bob's task): Using the two classical bits Bob received from Alice to get Bob's qubit into the state in which the message qubit had been originally. Finally, we compose these steps into the complete teleportation protocol. # ### Task 1.1. Entangled pair # # **Input:** two qubits qAlice and qBob, each in $|0\rangle$ state. # # **Goal:** prepare a Bell state $|\Phi^{+}\rangle = \frac{1}{\sqrt{2}}(|00\rangle + |11\rangle)$ on these qubits. # # > In the context of the quantum teleportation protocol, this is the preparation step: qubits qAlice and qBob will be sent to Alice and Bob, respectively. # + %kata T11_Entangle operation Entangle (qAlice : Qubit, qBob : Qubit) : Unit { H(qAlice); CX(qAlice, qBob); } # - # ### Task 1.2. Send the message (Alice's task) # # Entangle the message qubit with Alice's qubit and extract two classical bits to be sent to Bob. # # **Inputs**: # 1. Alice's part of the entangled pair of qubits qAlice. # 2. The message qubit qMessage. # # **Output**: # Two classical bits Alice will send to Bob via classical channel as a tuple of Bool values. The first bit in the tuple should hold the result of measurement of the message qubit, the second bit - the result of measurement of Alice's qubit. # Represent measurement result 'One' as `true` and 'Zero' as `false`. The state of the qubits in the end of the operation doesn't matter. # + %kata T12_SendMessage operation SendMessage (qAlice : Qubit, qMessage : Qubit) : (Bool, Bool) { CX(qAlice, qMessage); H(qAlice); let aBit = M(qAlice); let bBit = M(qMessage); return (aBit == One, bBit == One); } # - # ### Task 1.3. Reconstruct the message (Bob's task) # # Transform Bob's qubit into the required state using the two classical bits received from Alice. # # **Inputs**: # 1. Bob's part of the entangled pair of qubits qBob. # 2. The tuple of classical bits received from Alice, in the format used in task 1.2. # # **Goal** : Transform Bob's qubit qBob into the state in which the message qubit had been originally. # + %kata T13_ReconstructMessage operation ReconstructMessage (qBob : Qubit, (b1 : Bool, b2 : Bool)) : Unit { if (b2) { X(qBob); } if (b1) { Z(qBob); } } # - # ### Task 1.4. Standard teleportation protocol # # Put together the steps implemented in tasks 1.1 - 1.3 to implement the full teleportation protocol. # # **Inputs:** # # 1. The two qubits qAlice and qBob in $|0\rangle$ state. # 2. The message qubit qMessage in the state $|\psi\rangle$ to be teleported. # # **Goal:** Transform Bob's qubit qBob into the state $|\psi\rangle$. The state of the qubits qAlice and qMessage in the end of the operation doesn't matter. # + %kata T14_StandardTeleport operation StandardTeleport (qAlice : Qubit, qBob : Qubit, qMessage : Qubit) : Unit { Entangle(qAlice, qBob); let bits = SendMessage(qAlice, qMessage); ReconstructMessage(qBob, bits); } # - # ### Task 1.5. Prepare a state and send it as a message (Alice's task) # # Given a Pauli basis along with a state `true` as 'One' or `false` as 'Zero', prepare a message qubit, entangle it with Alice's qubit, and extract two classical bits to be sent to Bob. # # **Inputs:** # # 1. Alice's part of the entangled pair of qubits qAlice. # 2. A PauliX, PauliY, or PauliZ basis in which the message qubit should be prepared # 3. A Bool indicating the eigenstate in which the message qubit should be prepared # # **Output:** # # Two classical bits Alice will send to Bob via classical channel as a tuple of Bool values. The first bit in the tuple should hold the result of measurement of the message qubit, the second bit - the result of measurement of Alice's qubit. Represent measurement result 'One' as `true` and 'Zero' as `false`. The state of the qubit qAlice in the end of the operation doesn't matter. # + %kata T15_PrepareAndSendMessage open Microsoft.Quantum.Preparation; operation PrepareAndSendMessage (qAlice : Qubit, basis : Pauli, state : Bool) : (Bool, Bool) { use qMessage = Qubit() { //prep payload if (state) { X(qMessage); } PreparePauliEigenstate(basis, qMessage); //Pauli, Qubit //measure both qubits let bits = SendMessage(qAlice, qMessage); return bits; } } # - # ### Task 1.6. Reconstruct and measure the message state (Bob's task) # # Transform Bob's qubit into the required state using the two classical bits received from Alice and measure it in the same basis in which she prepared the message. # # **Inputs:** # # 1. Bob's part of the entangled pair of qubits qBob. # 2. The tuple of classical bits received from Alice, in the format used in task 1.5. # 3. The PauliX, PauliY, or PauliZ basis in which the message qubit was originally prepared. # # **Output:** A Bool indicating the eigenstate in which the message qubit was prepared, 'One' as `true` and 'Zero' as `false`. The state of the qubit qBob in the end of the operation doesn't matter. # # > To get the output, transform Bob's qubit qBob into the state in which the message qubit was originally prepared, then measure it. # + %kata T16_ReconstructAndMeasureMessage operation ReconstructAndMeasureMessage (qBob : Qubit, (b1 : Bool, b2 : Bool), basis : Pauli) : Bool { ReconstructMessage(qBob, (b1, b2)); //let bobBit = M(qBob); //this won't quite work because we have to measure in the right basis let bobBit = Measure([basis], [qBob]); //Measure (bases : Pauli[], qubits : Qubit[]) - note treated as arrays return bobBit == One; } # - # ### Task 1.7. Testing standard quantum teleportation # # **Goal:** Test that the `StandardTeleport` operation from task 1.4 is able to successfully teleport the states $|0\rangle$ and $|1\rangle$, as well as superposition states such as $\frac{1}{\sqrt{2}} \big(|0\rangle + |1\rangle\big)$, $\frac{1}{\sqrt{2}} \big(|0\rangle - |1\rangle\big)$, $\frac{1}{\sqrt{2}} \big(|0\rangle + i|1\rangle\big)$ and $\frac{1}{\sqrt{2}} \big(|0\rangle - i|1\rangle\big)$. # # > This is an open-ended task, and is not covered by a unit test. To run the code, execute the cell with the definition of the `Run_StandardTeleport` operation first; if it compiled successfully without any errors, you can run the operation by executing the next cell (`%simulate Run_StandardTeleport`). # # > Note that this task relies on your implementations of the previous tasks. If you are getting the "No variable with that name exists." error, you might have to execute previous code cells before retrying this task. # # <details> # <summary><b>Need a hint? Click here</b</summary> # You may find your answers for 1.5 and 1.6 useful # </details> # + open Microsoft.Quantum.Diagnostics; operation Run_StandardTeleport () : String { use (qAlice, qBob, qMessage) = (Qubit(), Qubit(), Qubit()) { //case 1: by default, qMessage is in |0>, so Bob's bit should read as 0 //case 2: put qMessage in |1>, then Bob's bit should be 1, which would yield 010, aka |2> //X(qMessage); StandardTeleport (qAlice, qBob, qMessage); //let bit = M(qBob); //Message($"bit = {bit}"); DumpMachine(); Reset(qAlice); Reset(qBob); Reset(qMessage); return "Did Teleportation succeed?"; } } # - %simulate Run_StandardTeleport # ## Part II. Teleportation using different entangled pair # # In this section we will take a look at the changes in the reconstruction process (Bob's task) if the qubits shared between Alice and Bob are entangled in a different state. Alice's part of the protocol remains the same in all tasks. # As a reminder, the standard teleportation protocol requires shared qubits in state $|\Phi^{+}\rangle = \frac{1}{\sqrt{2}} \big(|00\rangle + |11\rangle\big)$. # # In each task, the inputs are # 1. Bob's part of the entangled pair of qubits qBob. # 2. The tuple of classical bits received from Alice, in the format used in task 1.2. # # The goal is to transform Bob's qubit qBob into the state in which the message qubit had been originally. # ### Task 2.1. Reconstruct the message if the entangled qubits were in the state $|\Phi^{-}\rangle = \frac{1}{\sqrt{2}} \big(|00\rangle - |11\rangle\big)$ # + %kata T21_ReconstructMessage_PhiMinus operation ReconstructMessage_PhiMinus (qBob : Qubit, (b1 : Bool, b2 : Bool)) : Unit { // ... } # - # ### Task 2.2. Reconstruct the message if the entangled qubits were in the state $|\Psi^{+}\rangle = \frac{1}{\sqrt{2}} \big(|01\rangle + |10\rangle\big)$ # + %kata T22_ReconstructMessage_PsiPlus operation ReconstructMessage_PsiPlus (qBob : Qubit, (b1 : Bool, b2 : Bool)) : Unit { // ... } # - # ### Task 2.3. Reconstruct the message if the entangled qubits were in the state $|\Psi^{-}\rangle = \frac{1}{\sqrt{2}} \big(|01\rangle - |10\rangle\big)$ # + %kata T23_ReconstructMessage_PsiMinus operation ReconstructMessage_PsiMinus (qBob : Qubit, (b1 : Bool, b2 : Bool)) : Unit { // ... } # - # ## Part III. Principle of deferred measurement # # The principle of deferred measurement claims that measurements can be moved from an intermediate stage of a quantum circuit to the end of the circuit. If the measurement results are used to perform classically controlled operations, they can be replaced by controlled quantum operations. # ### Task 3.1. Measurement-free teleportation. # # In this task we will apply this principle to the teleportation circuit. # # **Inputs:** # # 1. The two qubits qAlice and qBob in $|\Phi^{+}\rangle$ state. # 2. The message qubit qMessage in the state $|\psi\rangle$ to be teleported. # # **Goal:** transform Bob's qubit qBob into the state $|\psi\rangle$ using no measurements. At the end of the operation qubits qAlice and qMessage should not be entangled with qBob. # + %kata T31_MeasurementFreeTeleport operation MeasurementFreeTeleport (qAlice : Qubit, qBob : Qubit, qMessage : Qubit) : Unit { // ... } # - # ## Part IV. Teleportation with three entangled qubits # # Quantum teleportation using entangled states other than Bell pairs is also feasible. Here we look at just one of many possible schemes - in it a state is transferred from Alice to a third participant Charlie, but this may only be accomplished if Charlie # has the trust of the second participant Bob. # ### Task 4.1. Entangled trio # # **Inputs:** three qubits qAlice, qBob, and qCharlie, each in $|0\rangle$ state. # # **Goal:** create an entangled state $|\Psi^{3}\rangle = \frac{1}{2} \big(|000\rangle + |011\rangle + |101\rangle + |110\rangle\big)$ on these qubits. # # In the context of the quantum teleportation protocol, this is the preparation step: qubits qAlice, qBob, and qCharlie will be sent to Alice, Bob, and Charlie respectively. # + %kata T41_EntangleThreeQubits operation EntangleThreeQubits (qAlice : Qubit, qBob : Qubit, qCharlie : Qubit) : Unit { // ... } # - # ### Task 4.2. Reconstruct the message (Charlie's task) # # Alice has a message qubit in the state $|\psi\rangle$ to be teleported, she has entangled it with her own qubit from $|\Psi^{3}\rangle$ in the same manner as task 1.2 and extracted two classical bits in order to send them to Charlie. Bob has also measured his own qubit from $|\Psi^{3}\rangle$ and sent Charlie the result. Transform Charlie's qubit into the required state using the two classical bits received from Alice, and the one classical bit received from Bob. # # **Inputs:** # 1. Charlie's part of the entangled trio of qubits qCharlie. # 2. The tuple of classical bits received from Alice, in the format used in task 1.2. # 3. A classical bit resulting from the measurement of Bob's qubit. # # **Goal:** Transform Charlie's qubit qCharlie into the state in which the message qubit had been originally. # + %kata T42_ReconstructMessageWhenThreeEntangledQubits operation ReconstructMessageWhenThreeEntangledQubits (qCharlie : Qubit, (b1 : Bool, b2 : Bool), b3 : Bool) : Unit { // ... }
Teleportation/Teleportation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # name: python2 # --- import pandas as pd import sqlite3 # # 9.1 Reading data from SQL databases # So far we've only talked about reading data from CSV files. That's a pretty common way to store data, but there are many others! Pandas can read from HTML, JSON, SQL, Excel (!!!), HDF5, Stata, and a few other things. In this chapter we'll talk about reading data from SQL databases. # # You can read data from a SQL database using the `pd.read_sql` function. `read_sql` will automatically convert SQL column names to DataFrame column names. # # `read_sql` takes 2 arguments: a `SELECT` statement, and a database connection object. This is great because it means you can read from *any* kind of SQL database -- it doesn't matter if it's MySQL, SQLite, PostgreSQL, or something else. # # This example reads from a SQLite database, but any other database would work the same way. con = sqlite3.connect("../data/weather_2012.sqlite") df = pd.read_sql("SELECT * from weather_2012 LIMIT 3", con) df # `read_sql` doesn't automatically set the primary key (`id`) to be the index of the dataframe. You can make it do that by adding an `index_col` argument to `read_sql`. # # If you've used `read_csv` a lot, you may have seen that it has an `index_col` argument as well. This one behaves the same way. df = pd.read_sql("SELECT * from weather_2012 LIMIT 3", con, index_col='id') df # If you want your dataframe to be indexed by more than one column, you can give a list of columns to `index_col`: df = pd.read_sql("SELECT * from weather_2012 LIMIT 3", con, index_col=['id', 'date_time']) df # # 9.2 Writing to a SQLite database # Pandas has a `write_frame` function which creates a database table from a dataframe. Right now this only works for SQLite databases. Let's use it to move our 2012 weather data into SQL. # # You'll notice that this function is in `pd.io.sql`. There are a ton of useful functions for reading and writing various kind of data in `pd.io`, and it's worth spending some time exploring them. ([see the documentation!](http://pandas.pydata.org/pandas-docs/stable/io.html)) weather_df = pd.read_csv('../data/weather_2012.csv') con = sqlite3.connect("../data/test_db.sqlite") con.execute("DROP TABLE IF EXISTS weather_2012") weather_df.to_sql("weather_2012", con) # We can now read from the `weather_2012` table in `test_db.sqlite`, and we see that we get the same data back: con = sqlite3.connect("../data/test_db.sqlite") df = pd.read_sql("SELECT * from weather_2012 LIMIT 3", con) df # The nice thing about having your data in a database is that you can do arbitrary SQL queries. This is cool especially if you're more familiar with SQL. Here's an example of sorting by the Weather column: con = sqlite3.connect("../data/test_db.sqlite") df = pd.read_sql("SELECT * from weather_2012 ORDER BY Weather LIMIT 3", con) df # If you have a PostgreSQL database or MySQL database, reading from it works exactly the same way as reading from a SQLite database. You create a connection using `psycopg2.connect()` or `MySQLdb.connect()`, and then use # # `pd.read_sql("SELECT whatever from your_table", con)` # # 9.3 Connecting to other kinds of database # To connect to a MySQL database: # # *Note: For these to work, you will need a working MySQL / PostgreSQL database, with the correct localhost, database name, etc.* # + active="" # import MySQLdb # con = MySQLdb.connect(host="localhost", db="test") # - # To connect to a PostgreSQL database: # + active="" # import psycopg2 # con = psycopg2.connect(host="localhost") # - # <style> # @font-face { # font-family: "Computer Modern"; # src: url('http://mirrors.ctan.org/fonts/cm-unicode/fonts/otf/cmunss.otf'); # } # div.cell{ # width:800px; # margin-left:16% !important; # margin-right:auto; # } # h1 { # font-family: Helvetica, serif; # } # h4{ # margin-top:12px; # margin-bottom: 3px; # } # div.text_cell_render{ # font-family: Computer Modern, "Helvetica Neue", Arial, Helvetica, Geneva, sans-serif; # line-height: 145%; # font-size: 130%; # width:800px; # margin-left:auto; # margin-right:auto; # } # .CodeMirror{ # font-family: "Source Code Pro", source-code-pro,Consolas, monospace; # } # .text_cell_render h5 { # font-weight: 300; # font-size: 22pt; # color: #4057A1; # font-style: italic; # margin-bottom: .5em; # margin-top: 0.5em; # display: block; # } # # .warning{ # color: rgb( 240, 20, 20 ) # }
Learning Unittests/pandas-cookbook-master/pandas-cookbook-master/cookbook/Chapter 9 - Loading data from SQL databases.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py3] # language: python # name: conda-env-py3-py # --- # + import matplotlib.pyplot as plt import netCDF4 as nc import numpy as np from salishsea_tools import geo_tools # %matplotlib inline # - bathyfile = '/home/sallen/MEOPAR/grid/bathymetry_201702.nc' meshfile = '/home/sallen/MEOPAR/grid/mesh_mask201702.nc' mesh = nc.Dataset(meshfile) model_lats = nc.Dataset(bathyfile).variables['nav_lat'][:] model_lons = nc.Dataset(bathyfile).variables['nav_lon'][:] t_mask = mesh.variables['tmask'][0, 0] windfile = './ubcSSaAtmosphereGridV1_0f03_6268_df4b.nc' wind_lats = nc.Dataset(windfile).variables['latitude'][:] wind_lons = nc.Dataset(windfile).variables['longitude'][:] -360 wavefile = '/results/SalishSea/wwatch3-forecast/SoG_ww3_fields_20170515_20170517.nc' wave_lats = nc.Dataset(wavefile).variables['latitude'][:] wave_lons = nc.Dataset(wavefile).variables['longitude'][:] -360. wave_lons, wave_lats = np.meshgrid(wave_lons, wave_lats) hs = nc.Dataset(wavefile).variables['hs'][0] wave_mask = np.where(hs !=0, 1, 0) def get_tidal_stations(lon, lat, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=20): y, x = geo_tools.find_closest_model_point(lon, lat, model_lons, model_lats, grid='NEMO', land_mask=1-t_mask) ywind, xwind = geo_tools.find_closest_model_point(lon, lat, wind_lons, wind_lats, grid='GEM2.5') ywave, xwave = geo_tools.find_closest_model_point(lon, lat, wave_lons, wave_lats, grid='NEMO', land_mask=1-wave_mask) fig, ax = plt.subplots(1, 1, figsize=(7, 7)) bigx = min(x+size, model_lons.shape[1]-1) imin, imax = model_lats[y-size, x-size], model_lats[y+size, bigx] jmin, jmax = model_lons[y+size, x-size], model_lons[y-size, bigx] dlon = model_lons[y+1, x+1] - model_lons[y, x] dlat = model_lats[y+1, x+1] - model_lats[y, x] ax.pcolormesh(model_lons - dlon/2., model_lats-dlat/2., t_mask, cmap='Greys_r') ax.set_xlim(jmin, jmax) ax.set_ylim(imin, imax) ax.plot(model_lons[y, x], model_lats[y, x], 'ro', label='NEMO') ax.plot(wind_lons[ywind, xwind], wind_lats[ywind, xwind], 'ys', label='GEM2.5') ax.plot(wave_lons[ywave, xwave], wave_lats[ywave, xwave], 'bo', label='WW3') ax.legend() return "NEMO y, x: {0}, Wind y, x: {1}, Wave y, x: {2}".format([y, x], [ywind, xwind], [ywave, xwave]) # ### <NAME> # # 7277 <NAME> 48.6536  123.4515  get_tidal_stations(-123.4515, 48.6536, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### Woodwards # 7610 Woodwards's Landing 49.1251  123.0754  get_tidal_stations(-123.0754, 49.1251, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### New Westminster # 7654 New Westminster 49.203683  122.90535  get_tidal_stations(-122.90535, 49.203683, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### Sandy Cove # 7786 Sandy Cove 49.34  123.23  get_tidal_stations(-123.23, 49.34, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### Port Renfrew # check # 8525 Port Renfrew 48.555 124.421 get_tidal_stations(-124.421, 48.555, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### Victoria # 7120 Victoria 48.424666  123.3707  get_tidal_stations(-123.3707, 48.424666, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### Sand Heads # 7594 Sand Heads 49.125 123.195   # From Marlene's email # 49º 06’ 21.1857’’, -123º 18’ 12.4789’’ # we are using 426, 292 # end of jetty is 429, 295 lat_sh = 49+6/60.+21.1857/3600. lon_sh = -(123+18/60.+12.4789/3600.) print(lon_sh, lat_sh) get_tidal_stations(lon_sh, lat_sh, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=20) # ### Nanaimo # 7917 Nanaimo 49.17  123.93  get_tidal_stations(-123.93, 49.17, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # In our code its at 484, 208 with lon,lat at -123.93 and 49.16: leave as is for now # ### Boundary Bay # # Guesstimated from Map # -122.925 49.0 get_tidal_stations(-122.925, 49.0, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=15) # ### Squamish # 49 41.675 N 123 09.299 W print (49+41.675/60, -(123+9.299/60.)) print (model_lons.shape) get_tidal_stations(-(123+9.299/60.), 49.+41.675/60., model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### Half Moon Bay # 49 30.687 N 123 54.726 W print (49+30.687/60, -(123+54.726/60.)) get_tidal_stations(-(123+54.726/60.), 49.+30.687/60., model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### Friday Harbour # -123.016667, 48.55 get_tidal_stations(-123.016667, 48.55, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) # ### Neah Bay # -124.6, 48.4 get_tidal_stations(-124.6, 48.4, model_lons, model_lats, wind_lons, wind_lats, wave_lons, wave_lats, t_mask, wave_mask, size=10) from salishsea_tools import places
notebooks/Tidal Station Locations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import warnings warnings.filterwarnings('ignore') # %matplotlib notebook import pandas as pd import numpy as np from util import * from sklearn.model_selection import train_test_split from sklearn import metrics from skater.core.global_interpretation.interpretable_models.brlc import BRLC from skater.core.global_interpretation.interpretable_models.bigdatabrlc import BigDataBRLC # Read the datasets df = pd.read_csv('LoanStats3a.csv', skiprows=1) df = df[df.loan_status.isin(['Fully Paid','Charged Off'])] # + #remove columns that are entirely null for column in df.columns: if df[column].isnull().mean() >= .99: df = df.drop(column, 1) #remove columns with constant values for column in df.columns: if df[column].unique().shape[0] == 1: df = df.drop(column, 1) # - # ### The Data print("The data has {0} rows and {1} fields".format(*df.shape)) df.head(1).T # Quick Summary df.describe() df['int_rate'] = df['int_rate'].apply(process_int_rate) df['term'] = df['term'].apply(process_term) df['emp_length'] = df['emp_length'].apply(process_emp_length) df['revol_util'] = df['revol_util'].apply(process_revol_util) df['pub_rec_bankruptcies'] = df['pub_rec_bankruptcies'].fillna(0) df.head(2).T # %matplotlib inline df.loan_status.value_counts().plot(kind='bar') # ### DTI also a factor def_by_dti = df.set_index('dti').groupby(by=(lambda x: round_to_nearest(x, 5), 'loan_status'))['loan_amnt'].count().unstack() def_by_dti = (def_by_dti['Charged Off'] / def_by_dti.sum(axis=1)) ax = def_by_dti.plot(kind = 'bar') # ### Small business loans are much riskier def_rates_by_categorical(df, 'purpose', with_variance=True) # ### (Light) Feature Engineering # + df_ = df.copy() domain_columns = ['loan_amnt', 'term', 'annual_inc', 'installment_over_income', 'has_employer_info', 'is_employed', 'dti', 'inq_last_6mths', 'delinq_2yrs', 'open_acc', 'int_rate', 'revol_util', 'pub_rec_bankruptcies', 'revol_bal', 'requested_minus_funded', 'debt_to_income' ] #features to engineer df_['requested_minus_funded'] = df_['loan_amnt'] - df_['funded_amnt'] df_['has_employer_info'] = df_['emp_title'].isnull() df_['installment_over_income'] = df_['installment'] / df_['annual_inc'] df_['is_employed'] = df_['emp_length'].isnull() df_['debt_to_income'] = (df_['revol_bal'] + df_['funded_amnt']) / df['annual_inc'] #dummy section dummy_columns = ['home_ownership'] #'grade', 'addr_state' for column in dummy_columns: dummies = pd.get_dummies(df_[column], prefix="{}_is".format(column)) columns_to_add = dummies.columns.values[:-1] dummies = dummies[columns_to_add] df_ = df_.join(dummies) domain_columns.extend(columns_to_add) df_["emp_title"] = df_["emp_title"].fillna("None") df_['target'] = df_['loan_status'].apply(lambda x: 0 if x == 'Charged Off' else 1) domain_columns = list(set(domain_columns)) from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X = scaler.fit_transform(df_[domain_columns].values) X_df = pd.DataFrame(X) # + # input_data = df_[domain_columns] # input_data.head(2) # input_data["has_employer_info"] = input_data["has_employer_info"].astype('category') # input_data["has_employer_info_Encoded"] = input_data["has_employer_info"].cat.codes # input_data["is_employed"] = input_data["is_employed"].astype('category') # input_data["is_employed_Encoded"] = input_data["is_employed"].cat.codes # input_data = input_data.drop(['has_employer_info', 'is_employed'], axis=1) # # # Remove NaN values # input_data_clean = input_data.dropna() # + # input_data_clean = input_data_clean[["is_employed_Encoded", "has_employer_info_Encoded"]] # print(input_data_clean.dtypes) # input_data_clean.head(2) # - # Default test split-size = 0.25 y = df_['target'] Xtrain, Xtest, ytrain, ytest = train_test_split(X_df, y) # split # Target Labels: 1:positive 0:negative np.unique(ytrain) print(len(Xtrain)) print(type(Xtrain)) print(type(ytrain)) Xtrain.head() sbrl_big = BigDataBRLC(sub_sample_percentage=0.1, min_rule_len=1, max_rule_len=3, iterations=10000, n_chains=3, surrogate_estimator="SVM", drop_features=True) n_x, n_y = sbrl_big.subsample(Xtrain, ytrain, pos_label=1) print(len(n_x)) # Create an instance of the estimator from timeit import default_timer as timer from datetime import timedelta start = timer() # Train a model, by default discretizer is enabled. So, you wish to exclude features then exclude them using # the undiscretize_feature_list parameter model = sbrl_big.fit(n_x, n_y, bin_labels='default') elapsed = (timer() - start) print(timedelta(seconds=round(elapsed))) # Features considered sbrl_big.feature_names sbrl_big.print_model() sbrl_big.save_model("model1.pkl") # quick look at the test set Xtest[0:3] # + # Discretize the testing set similar to train set new_X_test = sbrl_big.discretizer(Xtest, n_x.columns, labels_for_bin='default') #ytest = n_y new_X_test.head(2) print(new_X_test.shape[0]) print(len(ytest)) new_X_train = sbrl_big.discretizer(Xtrain, n_x.columns, labels_for_bin='default') print(new_X_train.shape[0]) print(len(ytrain)) # - # ### Computing performance metrics for BRLC for train and test # + results_train_sbrl = sbrl_big.predict_proba(new_X_train) fpr_sbrl, tpr_sbrl, thresholds_sbrl = metrics.roc_curve(ytrain, results_train_sbrl[1], pos_label=1) roc_auc_sbrl = metrics.auc(fpr_sbrl, tpr_sbrl) print("AUC-ROC using SBRL(Train): {}".format(roc_auc_sbrl)) print("Accuracy(Train): {}".format(metrics.accuracy_score(ytrain, sbrl_big.predict(new_X_train)[1]))) print("-----------------------------------------------------------------\n") results_test_sbrl = sbrl_big.predict_proba(new_X_test) fpr_sbrl, tpr_sbrl, thresholds_sbrl = metrics.roc_curve(ytest ,results_test_sbrl[1], pos_label=1) roc_auc_sbrl = metrics.auc(fpr_sbrl, tpr_sbrl) print("AUC-ROC using SBRL(Test): {}".format(roc_auc_sbrl)) print("Accuracy(Test): {}".format(metrics.accuracy_score(ytest, sbrl_big.predict(new_X_test)[1]))) # - # ### Evaluating performance of a black box classifier from sklearn.svm import LinearSVC from sklearn.calibration import CalibratedClassifierCV svm = LinearSVC(random_state=0) est = CalibratedClassifierCV(svm) est.fit(Xtrain, ytrain) results_train_svm = pd.DataFrame(est.predict_proba(Xtrain))[1] results_test_svm = pd.DataFrame(est.predict_proba(Xtest))[1] # + fpr_svm, tpr_svm, thresholds_svm = metrics.roc_curve(ytrain, results_train_svm, pos_label=1) roc_auc_svm = metrics.auc(fpr_svm, tpr_svm) print("AUC-ROC using SVM(Train): {}".format(roc_auc_svm)) print("Accuracy(Test): {}".format(est.score(Xtrain, ytrain))) print("-----------------------------------------------------------------\n") fpr_svm, tpr_svm, thresholds_svm = metrics.roc_curve(ytest, results_test_svm, pos_label=1) roc_auc_svm = metrics.auc(fpr_svm, tpr_svm) print("AUC-ROC using SVM(Test): {}".format(roc_auc_svm)) print("Accuracy(Test): {}".format(est.score(Xtest, ytest)))
examples/credit_analysis/credit_analysis_rule_lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from dpm.models import ( OrdinalLayer, OrdinalModel, OrdinalLoss, exp_cdf, erf_cdf, tanh_cdf, normal_cdf, laplace_cdf, cauchy_cdf ) from dpm.visualize import ( plot_ordinal_classes, plot_ordinal_classes_from_layer ) import torch import torch.nn as nn import torch.optim as optim import matplotlib.pyplot as plt # + import pandas as pd import numpy as np # DATA red = pd.read_csv('winequality-red.csv', sep=';') white = pd.read_csv('winequality-white.csv', sep=';') wine = pd.concat((red, white)) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler, PowerTransformer, FunctionTransformer gaussian_columns = ['alcohol', 'chlorides', 'fixed acidity', 'density', 'pH', 'sulphates', 'volatile acidity'] power_columns = ['citric acid', 'free sulfur dioxide', 'residual sugar', 'total sulfur dioxide'] column_transformer = ColumnTransformer([ ('gaussian', StandardScaler(), gaussian_columns), ('power', PowerTransformer(), power_columns) ]) X = column_transformer.fit_transform(wine) y = wine.quality.values.astype(np.long) # Map y from (3-8) to (0-5) y -= y.min() from sklearn.model_selection import train_test_split (X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.20, stratify=y, random_state=666) X_train = torch.tensor(X_train).float() X_test = torch.tensor(X_test).float() y_train = torch.tensor(y_train).long() y_test= torch.tensor(y_test).long() from torch.utils.data import TensorDataset, DataLoader train_loader = DataLoader(TensorDataset(X_train, y_train), batch_size=32, shuffle=True) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) num_features = len(gaussian_columns + power_columns) num_classes = len(np.unique(y)) # + f = torch.linspace(-12, 12, 100) cutpoints = torch.tensor([-3, 3]) plot_ordinal_classes(f, cutpoints, title="Sigmoid") plt.show() plot_ordinal_classes(f, cutpoints, func=laplace_cdf, title='Laplace') plt.show() plot_ordinal_classes(f, cutpoints, func=normal_cdf, title='Normal') plt.show() # + # Define Models predictor = nn.Sequential( nn.Linear(X.shape[1], 64), nn.ELU(), nn.Linear(64, 64), nn.ELU(), nn.Linear(64, 1, bias=False) ) model = OrdinalModel(predictor, OrdinalLayer(num_classes)) criterion = OrdinalLoss('mean') opti = optim.Adam(model.parameters(), lr=1e-2) # quick plot of init values f = torch.linspace(-5, 5, 100) plot_ordinal_classes_from_layer(f, model.ordinal) # + epochs = 200 for epoch in range(epochs): model.train() total_loss = 0 for (data, label) in train_loader: opti.zero_grad() output = model(data) loss = criterion(output, label) loss.backward() opti.step() total_loss += loss.item() * len(label) print(f"[INFO {epoch}: LOSS: {total_loss/len(X_train)}]") model.eval() with torch.no_grad(): output = model(X_train) print(f"[INFO {epoch}: ACC : {(output.argmax(-1) == y_train).float().mean():.3f}]") print(f"[INFO {epoch}: TOP : {(abs(output.argmax(-1) - y_train) < 2).float().mean():.3f}]") output = model(X_test) loss = criterion(output, y_test) print(f"[INFO {epoch}: LOSS: {loss.item()}]") print(f"[INFO {epoch}: ACC : {(output.argmax(-1) == y_test).float().mean():.3f}]") print(f"[INFO {epoch}: TOP : {(abs(output.argmax(-1) - y_test) < 2).float().mean():.3f}]") print() # + # What are the learned ordinal params? print(model.ordinal.theta.detach()) print(model.ordinal.threshold.detach()) # + num_points = 101 cutpoints = model.ordinal.threshold.detach() offset = cutpoints.std() + 2 # why manual f? in case you want to zoom in f = torch.linspace(cutpoints.min() - offset, cutpoints.max() + offset, num_points) plot_ordinal_classes_from_layer(f, model.ordinal) # -
Notebooks/Ordinal/TestModule.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt from IPython.display import display from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve from sklearn.linear_model import LogisticRegression as LR from sklearn.metrics import accuracy_score, confusion_matrix from sklearn import metrics DF = pd.read_csv("downloads/dataset_mushrooms.csv") pd.set_option('display.max_columns', None) DF[:5] print(DF.columns) print(len(DF.columns)) Sub = pd.read_csv("downloads/submission_mushrooms.csv") Sub[:5] Y = np.array([1 if x=="p" else 0 for x in DF["class"]]) print(Y) np.unique(Y) X = DF.drop(["class"], axis = 1) X.shape Merged = X.append(Sub) Merged.shape Merged_dropped = Merged.drop(columns=["veil-type"]) Merged_dropped.shape Merged_dropped["stalk-root"] = Merged_dropped["stalk-root"].str.replace("?","b") print(Merged_dropped.shape) print(np.unique(Merged_dropped["stalk-root"])) Merged_dummies = pd.get_dummies(Merged_dropped) print(Merged_dummies.shape) print(np.unique(Merged_dummies)) Sub_processed = Merged_dummies[7921:] print("Length =",len(Sub_processed), "shape =",Sub_processed.shape) X_processed = Merged_dummies[0:7921] print("Length =",len(X_processed), "shape =",Sub_processed.shape) # + model=LR() acc_1=[] auc_1= [] X2 = np.array(X_processed) xtrain,xtest,ytrain,ytest=train_test_split(X2,Y) model.fit(xtrain,ytrain) yp=model.predict(xtest) probabilities=model.predict_proba(xtest) [:,1] FPR,TPR,Threshold = roc_curve(ytest,probabilities) plot1= plt.plot(FPR,TPR) acc_1.append(accuracy_score(ytest,yp)) auc_1.append(metrics.auc(FPR,TPR)) print(auc_1) print(acc_1) plt.annotate("confusion_matrix\n" + str(confusion_matrix(Y,yp2)), xy = (.6,.3), xytext =(.6, .3),) plt.gcf().set_size_inches(5,5) plt.plot([0,1],[0,1],c = "k", ls=":") plt.xlabel("False Positive Ratio",fontsize=16) plt.ylabel("True Positive Ratio",fontsize=16) plt.title("Poisonous Shroom Indicator",fontsize=16) # - len(Y) len(yp) yp2=model.predict(X2) con = confusion_matrix(Y,yp2) con # + P = np.array(Sub_processed) yp5=model.predict(P) h = pd.DataFrame(yp5) h2 = h.rename({0:'Y_predicted'},axis=1) h2 # - h2.to_csv("Group4_Mushrooms_Log-Reg_Y-predicted", index=True) # + e_total = [] p_total = [] for value in yp5: if value == 1: p_total.append("1") else: e_total.append("0") print("There are " + str(len(p_total)) + " poisonous mushrooms.") print("There are " + str(len(e_total)) + " edible mushrooms.") # -
Mushroom3 - Copy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Creating a feature matrix from a networkx graph # # In this notebook we will look at a few ways to quickly create a feature matrix from a networkx graph. # + import networkx as nx import pandas as pd G = nx.read_gpickle('major_us_cities') # - # ## Node based features G.nodes(data=True) # Initialize the dataframe, using the nodes as the index df = pd.DataFrame(index=G.nodes()) df # ### Extracting attributes # # Using `nx.get_node_attributes` it's easy to extract the node attributes in the graph into DataFrame columns. # + df['location'] = pd.Series(nx.get_node_attributes(G, 'location')) df['population'] = pd.Series(nx.get_node_attributes(G, 'population')) df.head() # - # ### Creating node based features # # Most of the networkx functions related to nodes return a dictionary, which can also easily be added to our dataframe. G.degree() # + df['clustering'] = pd.Series(nx.clustering(G)) df['degree'] = pd.Series(G.degree()) df # - # # Edge based features G.edges(data=True) # Initialize the dataframe, using the edges as the index df = pd.DataFrame(index=G.edges()) df # ### Extracting attributes # # Using `nx.get_edge_attributes`, it's easy to extract the edge attributes in the graph into DataFrame columns. # + df['weight'] = pd.Series(nx.get_edge_attributes(G, 'weight')) df # - # ### Creating edge based features # # Many of the networkx functions related to edges return a nested data structures. We can extract the relevant data using list comprehension. # + df['preferential attachment'] = [i[2] for i in nx.preferential_attachment(G, df.index)] df # - # In the case where the function expects two nodes to be passed in, we can map the index to a lamda function. # + df['Common Neighbors'] = df.index.map(lambda city: len(list(nx.common_neighbors(G, city[0], city[1])))) df # -
Applied_Data_Science_with_Python/Applied Social Network Analysis in Python/Graph+Features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Coupling vs Wavelength # # + # sphinx_gallery_thumbnail_path = '../images/Experiment_CouplingVSWavelength.png' def run(Plot, Save): import numpy as np from PyMieSim.Experiment import SphereSet, SourceSet, Setup, PhotodiodeSet from PyOptik import ExpData detecSet = PhotodiodeSet(NA = 0.2, Phi = [0, 30, 60], Gamma = 0, Filter = None, Sampling = 300, CouplingMode = 'Point') scatSet = SphereSet(Diameter = 500e-9, Material = ExpData('BK7'), nMedium = 1) sourceSet = SourceSet(Wavelength = np.linspace(400e-9, 1000e-9, 50), Polarization = 0, Amplitude = 1 ) Experiment = Setup(ScattererSet = scatSet, SourceSet = sourceSet, DetectorSet = detecSet) Data = Experiment.Get('Coupling') print(Data) if Plot: Data.Plot(y='Coupling', x='Wavelength') if Save: from pathlib import Path dir = f'docs/images/{Path(__file__).stem}' Data.SaveFig(Directory=dir, y='Coupling', x='Wavelength') if __name__ == '__main__': run(Plot=True, Save=False)
docs/source/Experiments/CouplingVSWavelength.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="YrOKr9pwkxJw" # # GEOPARSING FOR HIKE DESCRIPTIONS # # This notebook is proposed by [<NAME>](https://ludovicmoncla.github.io/) as part of the [CHOUCAS](http://choucas.ign.fr/) (2017-2021) project. # # # ## Overview # # In this tutorial, we'll learn about a few different things. # # - How to use the [PERDIDO API](http://erig.univ-pau.fr/PERDIDO/api.jsp) for geoparsing (geotagging + geocoding) French hike descriptions # - Display custom geotagging results (PERDIDO TEI-XML) with the [displaCy Named Entity Visualizer](https://spacy.io/usage/visualizers) # - Display geocoding results on a map # + [markdown] colab_type="text" id="5gi1PFqtkxJy" # ## Introduction # # Geoparsing (also known as toponym resolution) refers to the process of extracting place names from text and assigning geographic coordinates to them. # This involves two main tasks: geotagging and geocoding. # Geotagging consists to identify spans of text referring to place names while geocoding consists to find unambiguous geographic coordinates. # # - # # The geotagging service of the PERDIDO API uses a cascade of finite-state transducers defining specific patterns for NER and identification of geographic information (spatial relations, etc.). # > <NAME>. and <NAME>. (2019). "Geoparsing and geocoding places in a dynamic space context." In The Semantics of Dynamic Space in French: Descriptive, experimental and formal studies on motion expression, 66, 353. # # The geocoding task uses a simple gazetteer lookup method. # For geocoding French hike description we use the bdnyme database provided by IGN. # In this notebook, we'll use the GPS trace associated with each hike description to compute to find the area where entities should be located. This will help to reduce toponym ambiguities during the geocoding step. # # # ### PERDIDO Geoparser API # # # The [PERDIDO API](http://erig.univ-pau.fr/PERDIDO/) has been developped for extracting and retrieving displacements from unstructured texts. # > <NAME>., <NAME>., <NAME>., & <NAME>. (2016). "Reconstruction of itineraries from annotated text with an informed spanning tree algorithm." International Journal of Geographical Information Science, 30, 1137–1160. # # In this tutorial we'll see how to use the PERDIDO API for geoparsing French hike descriptions. # We will apply geoparsing on some hike descriptions downloaded from the [visorando](https://www.visorando.com/) web sharing platform. # # The PERDIDO Geoparsing and Geocoding services (`http://erig.univ-pau.fr/PERDIDO/api/geoparsing/`) take 4 parameters: # 1. api_key: API key of the user # 2. lang: language of the document (currently only available for French) # 3. content: textual content to parse # 4. bbox: allows to filter entities locations using a bounding box. # # The PERDIDO Geoparser returns XML-TEI. The `<name>` element refers to named entities (proper nouns) and the type attribute indicates its class (place, person, etc.). The `<rs>` element refers to extended named entities (e.g. refuge du Bois). The `<location>` element indicates that geographic coordinates were found during geocoding. # # # ```xml # <rs type="place" subtype="ene" start="13" end="27" startT="3" endT="6" id="en.13"> # <term type="place" start="13" end="19" startT="3" endT="4"> # <w lemma="refuge" type="N" xml:id="w4">Refuge</w> # </term> # <w lemma="du" type="PREPDET" xml:id="w5">du</w> # <rs type="unknown" subtype="no" start="23" end="27" startT="5" endT="6" id="en.14"> # <name type="unknown" id="en.1"> # <w lemma="null" type="NPr" xml:id="w6">Bois</w> # </name> # </rs> # <location> # <geo source="bdnyme">6.744359 45.459557</geo> # </location> # </rs> # ``` # ## Getting started # # # First, you need to register on the PERDIDO website to get your API key: http://erig.univ-pau.fr/PERDIDO/api.jsp # if some libraries are not installed on your environment (this is the case with binder) # !pip3 install spacy # !pip3 install lxml # !pip3 install gpxpy # !pip3 install geojson # !pip3 install folium # + import requests import glob from spacy.tokens import Span from spacy.tokens import Doc from spacy.vocab import Vocab from spacy import displacy import lxml.etree as etree import gpxpy import gpxpy.gpx import geojson import folium from IPython.display import display # - # Let's define some useful functions. # + ''' function Perdido2displaCy() transforms the PERDIDO-NER XML output into spaCy format (for display purpose) ''' def Perdido2displaCy(contentXML): vocab = Vocab() words = [] spaces = [] root = etree.fromstring(bytes(contentXML, 'utf-8')) contentTXT = "" for w in root.findall('.//w'): contentTXT += w.text + ' ' words.append(w.text) spaces.append(True) doc = Doc(vocab, words=words, spaces=spaces) ents = [] for child in root.findall('.//location'): rs = get_parent(child, 'rs') if rs is not None: if not loc_in_parent(rs): if 'startT' in rs.attrib: start = rs.get('startT') if 'endT' in rs.attrib: stop = rs.get('endT') type = 'LOC' ents.append(Span(doc, int(start), int(stop), label=type)) for child in root.findall('.//rs[@type="place"]'): if not parent_exists(child, 'rs', 'place'): if not loc_in_child(child): if 'startT' in child.attrib: start = child.get('startT') if 'endT' in child.attrib: stop = child.get('endT') type = 'MISC' ents.append(Span(doc, int(start), int(stop), label=type)) doc.ents = ents return doc ''' function parent_exists() returns True if one of the ancestor of the element child_node have the name name_node''' def parent_exists(child_node, name_node): try: parent_node = next(child_node.iterancestors()) if parent_node.tag == name_node: if 'startT' in parent_node.attrib: return True return parent_exists(parent_node, name_node) except StopIteration: return False ''' function parent_exists() returns True if one of the ancestor of the element child_node have the name name_node''' def parent_exists(child_node, name_node, type_val): try: parent_node = next(child_node.iterancestors()) if parent_node.tag == name_node: if 'type' in parent_node.attrib: if parent_node.get('type') == type_val: if 'startT' in parent_node.attrib: return True return parent_exists(parent_node, name_node, type_val) except StopIteration: return False ''' function loc_in_parent() returns a boolean, true if the element location is found in the <rs> ancestor or false ''' def loc_in_parent(node): try: parent_node = next(node.iterancestors()) if parent_node.tag == "rs": if parent_node.find('location') is not None: return True else: return loc_in_parent(parent_node) else: return False except StopIteration: return False ''' function loc_in_child() returns a boolean, true if the element location is found in a child element or false ''' def loc_in_child(node): #root.find('./text/body/div1/index[@type="head"]').get('value') child_node = node.find('.//location') if child_node is not None: return True else: return False ''' function get_parent() returns the first ancestor of the element child_node that have the name name_node ''' def get_parent(child_node, name_node): try: parent_node = next(child_node.iterancestors()) if parent_node.tag == name_node: if 'startT' in parent_node.attrib: return parent_node return get_parent(parent_node, name_node) except StopIteration: return None ''' function display_map() display the map using the folium library ''' def display_map(json_data): coords = list(geojson.utils.coords(json_data)) ave_lat = sum(p[0] for p in coords)/len(coords) ave_lon = sum(p[1] for p in coords)/len(coords) if len(coords) > 0: print(str(len(coords))+" records found in gazetteer:") m = folium.Map(location=[ave_lat, ave_lon], zoom_start=12) folium.GeoJson(data, name='Toponyms', tooltip=folium.features.GeoJsonTooltip(fields=['id', 'name', 'source'], localize=True)).add_to(m) display(m) else: print("Sorry, no records found in gazetteer for geocoding!") ''' function display_map_gpx() display the map using the folium library ''' def display_map_gpx(json_data, gpx_filename): gpx = gpxpy.parse(open(gpx_filename, 'r')) points = [] for track in gpx.tracks: for segment in track.segments: for point in segment.points: points.append(tuple([point.latitude, point.longitude])) #print(points) ave_lat = sum(p[0] for p in points)/len(points) ave_lon = sum(p[1] for p in points)/len(points) m = folium.Map(location=[ave_lat, ave_lon], zoom_start=12) folium.PolyLine(points, color="red", weight=2.5, opacity=1).add_to(m) coords = list(geojson.utils.coords(json_data)) if len(coords) > 0: print(str(len(coords))+" records found in gazetteer:") folium.GeoJson(json_data, name='Toponyms', tooltip=folium.features.GeoJsonTooltip(fields=['id', 'name', 'source'], localize=True)).add_to(m) else: print("Sorry, no records found in gazetteer for geocoding!") display(m) ''' function get_bbox() returns the bounding box of a given GPS trace ''' def get_bbox(gpx_filename): latitudes = [] longitudes = [] elevations = [] gpx = gpxpy.parse(open(gpx_filename, 'r')) points = [] for track in gpx.tracks: for segment in track.segments: for point in segment.points: latitudes.append(point.latitude) longitudes.append(point.longitude) elevations.append(point.elevation) return str(min(longitudes))+' '+str(min(latitudes))+' '+str(max(longitudes))+' '+str(max(latitudes)) # - # ## Setting parameters api_key = 'demo' # !! replace by yours lang = 'French' # currently only available for French version = 'Standard' # default: Standard # + # get the list of txt files from the data directory txtfiles = [] for file in sorted(glob.glob("data/*.txt")): txtfiles.append(file[:-4]) print(txtfiles) # + # get the textual content from file file = open(txtfiles[0]+".txt", "r") content = "" for paragraph in file: content += paragraph print(content) # + # get the bounding box from the gps trace gpx_filename = txtfiles[0]+".gpx" bbox = get_bbox(gpx_filename) #print(bbox) # set the parameters for the PERDIDO API parameters = {'api_key': api_key, 'lang': lang, 'content': content, "bbox": bbox} # - # ## Call the geoparsing REST API r = requests.get('http://erig.univ-pau.fr/PERDIDO/api/geoparsing/', params=parameters) print(r.text) # shows the result of the request #you can parse this XML to retrieve the information you are interested in # In the next cells, we will use the displacy library from spaCy to display the PERDIDO-NER XML output. For this purpose, we defined the function `Perdido2displaCy()` in order to transform the PERDIDO-NER XML into a [spaCy](https://spacy.io/) compatible format. Geocoded toponyms are marked in orange (with the label: LOC) while toponyms that are not associated with a location are marked in grey (with the label: MISC). doc = Perdido2displaCy(r.text) displacy.render(doc, style="ent", jupyter=True) # ## Call the geocoding REST API # + r = requests.get('http://erig.univ-pau.fr/PERDIDO/api/geocoding/', params=parameters) #print("geojson : "+r.text) ## you can save the geojson in a file if needed data = geojson.loads(r.text) display_map_gpx(data, gpx_filename) # - # ## In brief print(len(txtfiles)) # + # choose the file you want to process among the 30 hike descriptions id_file = 4 file = open(txtfiles[id_file]+".txt", "r") # get the textual content from file content = "" for paragraph in file: content += paragraph gpx_filename = txtfiles[id_file]+".gpx" bbox = get_bbox(gpx_filename) parameters = {'api_key': api_key, 'lang': lang, 'content': content, "bbox": bbox} r = requests.get('http://erig.univ-pau.fr/PERDIDO/api/geoparsing/', params=parameters) displacy.render(Perdido2displaCy(r.text), style="ent", jupyter=True) r = requests.get('http://erig.univ-pau.fr/PERDIDO/api/geocoding/', params=parameters) display_map_gpx(geojson.loads(r.text), gpx_filename) # -
GeoparsingHikeDescriptions.ipynb