code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Image processing for NGC 309: Part2 # + import numpy as np import math from astropy.io import fits import os import sys import matplotlib.pyplot as plt import logging mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) global PIXEDFIT_HOME PIXEDFIT_HOME = os.environ['PIXEDFIT_HOME'] sys.path.insert(0, PIXEDFIT_HOME+'/src') from piXedfit_images import images_processing # - filters = ['galex_fuv', 'galex_nuv', 'sdss_u', 'sdss_g', 'sdss_r', 'sdss_i', 'sdss_z', '2mass_j', '2mass_h', '2mass_k', 'wise_w1', 'wise_w2'] nbands = len(filters) # ### Open the FITS file containing maps of multiband fluxes hdu = fits.open("fluxmap_califa_NGC309.fits") hdu.info() header = hdu[0].header print (header['unit']) print (header) gal_region = hdu['GALAXY_REGION'].data flux_map = hdu['FLUX'].data flux_err_map = hdu['FLUX_ERR'].data # ### Plot maps of SDSS-r and 2MASS/J band flux ## plot r-band flux map: fig1 = plt.figure(figsize=(8,8)) f1 = plt.subplot() plt.title("%s" % filters[4], fontsize=20) plt.xlabel("[pixel]", fontsize=20) plt.ylabel("[pixel]", fontsize=20) plt.imshow(np.log10(flux_map[4]), origin='lower', cmap='nipy_spectral_r') plt.colorbar() fig1 = plt.figure(figsize=(8,8)) f1 = plt.subplot() plt.title("%s" % filters[7], fontsize=20) plt.xlabel("[pixel]", fontsize=20) plt.ylabel("[pixel]", fontsize=20) plt.imshow(np.log10(flux_map[7]), origin='lower', cmap='nipy_spectral_r') plt.colorbar() # ### Run elliptical isophotes fitting to the r-band map # + data = flux_map[4] from piXedfit_images import ellipse_fit x0,y0,ell,pa = ellipse_fit(data=data, init_x0=65.0, init_y0=65.0, init_sma=10.0, init_ell=0.1, init_pa=10.0) # - # ### Determine maximum radius to cut the outskirt region containing irregular 'filamentary' structures # + ## draw ellipse: from piXedfit_images import draw_ellipse max_sma = 53.0 ellipse_xy = draw_ellipse(x0,y0,max_sma,ell,pa) fig1 = plt.figure(figsize=(8,8)) f1 = plt.subplot() plt.title("%s" % filters[7], fontsize=20) plt.xlabel("[pixel]", fontsize=20) plt.ylabel("[pixel]", fontsize=20) plt.imshow(np.log10(flux_map[7]), origin='lower', cmap='nipy_spectral_r') plt.colorbar() plt.plot(ellipse_xy[0], ellipse_xy[1], lw=2, color='black') fig1 = plt.figure(figsize=(8,8)) f1 = plt.subplot() plt.title("%s" % filters[4], fontsize=20) plt.xlabel("[pixel]", fontsize=20) plt.ylabel("[pixel]", fontsize=20) plt.imshow(np.log10(flux_map[4]), origin='lower', cmap='nipy_spectral_r') plt.colorbar() plt.plot(ellipse_xy[0], ellipse_xy[1], lw=2, color='black') # - # ### Crop the selected region within the circle # + from piXedfit_images import crop_ellipse_galregion_fits input_fits = "fluxmap_califa_NGC309.fits" output_fits = "crop_fluxmap_califa_NGC309.fits" x_cent = x0 y_cent = y0 ell = ell pa = pa rmax = max_sma flux_map_new = crop_ellipse_galregion_fits(input_fits,output_fits,x_cent,y_cent,ell,pa,rmax) # - # ### Open the FITS file containing cropped maps hdu = fits.open("crop_fluxmap_califa_NGC309.fits") hdu.info() header = hdu[0].header print (header['unit']) print (header) gal_region = hdu['GALAXY_REGION'].data flux_map = hdu['FLUX'].data flux_err_map = hdu['FLUX_ERR'].data # ### Plot the maps of multiband fluxes for bb in range(0,nbands): fig1 = plt.figure(figsize=(8,8)) f1 = plt.subplot() plt.title("%s" % filters[int(bb)], fontsize=20) plt.xlabel("[pixel]", fontsize=20) plt.ylabel("[pixel]", fontsize=20) plt.imshow(np.log10(flux_map[int(bb)]), origin='lower', cmap='nipy_spectral_r') plt.colorbar() # ### Checking the results by plotting SEDs of 100 pixels within central 100x100 square # + ### get SEDs of pixels: ## (band,y,x) ==> (y,x,band) flux_map_trans = np.transpose(flux_map, axes=(1,2,0)) flux_err_map_trans = np.transpose(flux_err_map, axes=(1,2,0)) dim_y = gal_region.shape[0] dim_x = gal_region.shape[1] pix_SED = [] pix_SED_err = [] for yy in range(60,70): for xx in range(60,70): pix_SED.append(flux_map_trans[yy][xx]) pix_SED_err.append(flux_err_map_trans[yy][xx]) npixs_used = len(pix_SED) print (npixs_used) # - # ### Get central wavelengths of the photometric filters using cwave_filters function # + from filtering import cwave_filters photo_wave = cwave_filters(filters) print (filters) print (photo_wave) # - # ### Plot the SEDs # + fig1 = plt.figure(figsize=(14,7)) f1 = plt.subplot() f1.set_xscale('log') f1.set_yscale('log') plt.xlabel(r'Wavelength $[\AA]$', fontsize=21) plt.ylabel(r'$F_{\lambda}$ [1e-17 erg $s^{-1}cm^{-2}\AA^{-1}$]', fontsize=21) for ii in range(0,len(pix_SED)): plt.errorbar(photo_wave, pix_SED[ii], yerr=0, fmt='-o', lw=1) # -
examples/NGC309_image_processing_p2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # A Gentle Introduction to ``torch.autograd`` # --------------------------------- # # ``torch.autograd`` is PyTorch’s automatic differentiation engine that powers # neural network training. In this section, you will get a conceptual # understanding of how autograd helps a neural network train. # # ## Background # # Neural networks (NNs) are a collection of nested functions that are # executed on some input data. These functions are defined by *parameters* # (consisting of weights and biases), which in PyTorch are stored in # tensors. # # Training a NN happens in two steps: # # **Forward Propagation**: In forward prop, the NN makes its best guess # about the correct output. It runs the input data through each of its # functions to make this guess. # # **Backward Propagation**: In backprop, the NN adjusts its parameters # proportionate to the error in its guess. It does this by traversing # backwards from the output, collecting the derivatives of the error with # respect to the parameters of the functions (*gradients*), and optimizing # the parameters using gradient descent. For a more detailed walkthrough # of backprop, check out this `video from # 3Blue1Brown <https://www.youtube.com/watch?v=tIeHLnjs5U8>`__. # # # # # ## Usage in PyTorch # # Let's take a look at a single training step. # For this example, we load a pretrained resnet18 model from ``torchvision``. # We create a random data tensor to represent a single image with 3 channels, and height & width of 64, # and its corresponding ``label`` initialized to some random values. # # import torch, torchvision model = torchvision.models.resnet18(pretrained=True) data = torch.rand(1, 3, 64, 64) labels = torch.rand(1, 1000) # Next, we run the input data through the model through each of its layers to make a prediction. # This is the **forward pass**. # # # prediction = model(data) # forward pass # We use the model's prediction and the corresponding label to calculate the error (``loss``). # The next step is to backpropagate this error through the network. # Backward propagation is kicked off when we call ``.backward()`` on the error tensor. # Autograd then calculates and stores the gradients for each model parameter in the parameter's ``.grad`` attribute. # # # loss = (prediction - labels).sum() loss.backward() # backward pass # Next, we load an optimizer, in this case SGD with a learning rate of 0.01 and momentum of 0.9. # We register all the parameters of the model in the optimizer. # # # optim = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9) # Finally, we call ``.step()`` to initiate gradient descent. The optimizer adjusts each parameter by its gradient stored in ``.grad``. # # # optim.step() #gradient descent # At this point, you have everything you need to train your neural network. # The below sections detail the workings of autograd - feel free to skip them. # # # # -------------- # # # # ## Differentiation in Autograd # # Let's take a look at how ``autograd`` collects gradients. We create two tensors ``a`` and ``b`` with # ``requires_grad=True``. This signals to ``autograd`` that every operation on them should be tracked. # # # # + import torch a = torch.tensor([2., 3.], requires_grad=True) b = torch.tensor([6., 4.], requires_grad=True) # - # We create another tensor ``Q`` from ``a`` and ``b``. # # \begin{align}Q = 3a^3 - b^2\end{align} # # Q = 3*a**3 - b**2 # Let's assume ``a`` and ``b`` to be parameters of an NN, and ``Q`` # to be the error. In NN training, we want gradients of the error # w.r.t. parameters, i.e. # # \begin{align}\frac{\partial Q}{\partial a} = 9a^2\end{align} # # \begin{align}\frac{\partial Q}{\partial b} = -2b\end{align} # # # When we call ``.backward()`` on ``Q``, autograd calculates these gradients # and stores them in the respective tensors' ``.grad`` attribute. # # We need to explicitly pass a ``gradient`` argument in ``Q.backward()`` because it is a vector. # ``gradient`` is a tensor of the same shape as ``Q``, and it represents the # gradient of Q w.r.t. itself, i.e. # # \begin{align}\frac{dQ}{dQ} = 1\end{align} # # Equivalently, we can also aggregate Q into a scalar and call backward implicitly, like ``Q.sum().backward()``. # # # external_grad = torch.tensor([1., 1.]) Q.backward(gradient=external_grad) # Gradients are now deposited in ``a.grad`` and ``b.grad`` # # # check if collected gradients are correct print(9*a**2 == a.grad) print(-2*b == b.grad) # ## Optional Reading - Vector Calculus using ``autograd`` # # Mathematically, if you have a vector valued function # $\vec{y}=f(\vec{x})$, then the gradient of $\vec{y}$ with # respect to $\vec{x}$ is a Jacobian matrix $J$: # # \begin{align}J # = # \left(\begin{array}{cc} # \frac{\partial \bf{y}}{\partial x_{1}} & # ... & # \frac{\partial \bf{y}}{\partial x_{n}} # \end{array}\right) # = # \left(\begin{array}{ccc} # \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\ # \vdots & \ddots & \vdots\\ # \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} # \end{array}\right)\end{align} # # Generally speaking, ``torch.autograd`` is an engine for computing # vector-Jacobian product. That is, given any vector $\vec{v}$, compute the product # $J^{T}\cdot \vec{v}$ # # If $\vec{v}$ happens to be the gradient of a scalar function $l=g\left(\vec{y}\right)$: # # \begin{align}\vec{v} # = # \left(\begin{array}{ccc}\frac{\partial l}{\partial y_{1}} & \cdots & \frac{\partial l}{\partial y_{m}}\end{array}\right)^{T}\end{align} # # then by the chain rule, the vector-Jacobian product would be the # gradient of $l$ with respect to $\vec{x}$: # # \begin{align}J^{T}\cdot \vec{v}=\left(\begin{array}{ccc} # \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{1}}\\ # \vdots & \ddots & \vdots\\ # \frac{\partial y_{1}}{\partial x_{n}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} # \end{array}\right)\left(\begin{array}{c} # \frac{\partial l}{\partial y_{1}}\\ # \vdots\\ # \frac{\partial l}{\partial y_{m}} # \end{array}\right)=\left(\begin{array}{c} # \frac{\partial l}{\partial x_{1}}\\ # \vdots\\ # \frac{\partial l}{\partial x_{n}} # \end{array}\right)\end{align} # # This characteristic of vector-Jacobian product is what we use in the above example; # ``external_grad`` represents $\vec{v}$. # # # # Computational Graph # ~~~~~~~~~~~~~~~~~~~ # # Conceptually, autograd keeps a record of data (tensors) & all executed # operations (along with the resulting new tensors) in a directed acyclic # graph (DAG) consisting of # `Function <https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function>`__ # objects. In this DAG, leaves are the input tensors, roots are the output # tensors. By tracing this graph from roots to leaves, you can # automatically compute the gradients using the chain rule. # # In a forward pass, autograd does two things simultaneously: # # - run the requested operation to compute a resulting tensor, and # - maintain the operation’s *gradient function* in the DAG. # # The backward pass kicks off when ``.backward()`` is called on the DAG # root. ``autograd`` then: # # - computes the gradients from each ``.grad_fn``, # - accumulates them in the respective tensor’s ``.grad`` attribute, and # - using the chain rule, propagates all the way to the leaf tensors. # # Below is a visual representation of the DAG in our example. In the graph, # the arrows are in the direction of the forward pass. The nodes represent the backward functions # of each operation in the forward pass. The leaf nodes in blue represent our leaf tensors ``a`` and ``b``. # # .. figure:: /_static/img/dag_autograd.png # # <div class="alert alert-info"><h4>Note</h4><p>**DAGs are dynamic in PyTorch** # An important thing to note is that the graph is recreated from scratch; after each # ``.backward()`` call, autograd starts populating a new graph. This is # exactly what allows you to use control flow statements in your model; # you can change the shape, size and operations at every iteration if # needed.</p></div> # # Exclusion from the DAG # ^^^^^^^^^^^^^^^^^^^^^^ # # ``torch.autograd`` tracks operations on all tensors which have their # ``requires_grad`` flag set to ``True``. For tensors that don’t require # gradients, setting this attribute to ``False`` excludes it from the # gradient computation DAG. # # The output tensor of an operation will require gradients even if only a # single input tensor has ``requires_grad=True``. # # # # + x = torch.rand(5, 5) y = torch.rand(5, 5) z = torch.rand((5, 5), requires_grad=True) a = x + y print(f"Does `a` require gradients? : {a.requires_grad}") b = x + z print(f"Does `b` require gradients?: {b.requires_grad}") # - # In a NN, parameters that don't compute gradients are usually called **frozen parameters**. # It is useful to "freeze" part of your model if you know in advance that you won't need the gradients of those parameters # (this offers some performance benefits by reducing autograd computations). # # Another common usecase where exclusion from the DAG is important is for # `finetuning a pretrained network <https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html>`__ # # In finetuning, we freeze most of the model and typically only modify the classifier layers to make predictions on new labels. # Let's walk through a small example to demonstrate this. As before, we load a pretrained resnet18 model, and freeze all the parameters. # # # + from torch import nn, optim model = torchvision.models.resnet18(pretrained=True) # Freeze all the parameters in the network for param in model.parameters(): param.requires_grad = False # - # Let's say we want to finetune the model on a new dataset with 10 labels. # In resnet, the classifier is the last linear layer ``model.fc``. # We can simply replace it with a new linear layer (unfrozen by default) # that acts as our classifier. # # model.fc = nn.Linear(512, 10) # Now all parameters in the model, except the parameters of ``model.fc``, are frozen. # The only parameters that compute gradients are the weights and bias of ``model.fc``. # # # Optimize only the classifier optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.9) # Notice although we register all the parameters in the optimizer, # the only parameters that are computing gradients (and hence updated in gradient descent) # are the weights and bias of the classifier. # # The same exclusionary functionality is available as a context manager in # `torch.no_grad() <https://pytorch.org/docs/stable/generated/torch.no_grad.html>`__ # # # # -------------- # # # # Further readings: # ~~~~~~~~~~~~~~~~~~~ # # - `In-place operations & Multithreaded Autograd <https://pytorch.org/docs/stable/notes/autograd.html>`__ # - `Example implementation of reverse-mode autodiff <https://colab.research.google.com/drive/1VpeE6UvEPRz9HmsHh1KS0XxXjYu533EC>`__ # #
pythonExample/pytorchExample/2-LearningPytorch/1-60minBlitz/2-autograd_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt train_data=pd.read_csv('titanic.csv') test_data=pd.read_csv('test.csv') sub=pd.read_csv('test.csv') train_data.head() test_data.head() test_data.isnull().sum() train_data=train_data.drop(['PassengerId','Name','SibSp','Parch','Ticket','Fare','Cabin','Embarked'],axis=1) train_data.head() # + test_data=test_data.drop(['PassengerId','Name','SibSp','Parch','Ticket','Fare','Cabin','Embarked'],axis=1) test_data.head() # - gender=pd.get_dummies(train_data['Sex']) train_data=train_data.drop(['Sex'],axis=1) train_data=pd.concat([train_data,gender],axis=1) train_data # + train_data.head() # - gender=pd.get_dummies(test_data['Sex']) test_data=test_data.drop(['Sex'],axis=1) test_data=pd.concat([test_data,gender],axis=1) test_data.head() test_data # + sns.boxplot(test_data['Pclass'],test_data['Age']) # - sns.boxplot(train_data['Pclass'],train_data['Age']) def age(col): varage=col[0] varclass=col[1] if(pd.isnull(varage)): if varclass==1: return 45 if varclass==2: return 25 else: return 20 else: return varage train_data['Age']=train_data[['Age','Pclass']].apply(age,axis=1) train_data.isnull().sum() test_data['Age']=test_data[['Age','Pclass']].apply(age,axis=1) test_data.isnull().sum() x=train_data.iloc[:,1:] y=train_data.iloc[:,0] train = train_data.drop('Survived', axis=1) target = train_data['Survived'] train.isnull().sum() Log_reg=LogisticRegression() Log_reg.fit(train,target) y_pred=Log_reg.predict(test_data) y_pred # + submission = pd.DataFrame({ "PassengerId": sub["PassengerId"], "Survived": y_pred }) submission.to_csv('submission.csv', index=False) # - submission = pd.read_csv('submission.csv') submission.head() X_train,X_test,Y_train,Y_test=train_test_split(x,y,test_size=0.15,random_state=40) X_train Log_reg.fit(X_train,Y_train) y_predict=Log_reg.predict(X_test) accuracy_score(Y_test,y_predict) value_of_score=list() for i in range(1,100): X_train,X_test,Y_train,Y_test=train_test_split(x,y,test_size=0.15,random_state=i) Log_reg.fit(X_train,Y_train) y_predict=Log_reg.predict(X_test) score=accuracy_score(Y_test,y_predict) value_of_score.append(score) max(value_of_score) value_of_score.index(max(value_of_score)) plt.scatter(range(1,100),value_of_score,color='orange') plt.plot(range(1,100),value_of_score) plt.axhline(max(value_of_score),color='red') plt.axvline(11,color='green') X_train,X_test,Y_train,Y_test=train_test_split(x,y,test_size=0.15,random_state=11) Log_reg.fit(X_train,Y_train) y_predict=Log_reg.predict(X_test) score=accuracy_score(Y_test,y_predict) score
Titanic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Pipelines # # Pipelines are just series of steps you perform on data in `sklearn`. (The `sklearn` [guide to them is here.](https://scikit-learn.org/stable/modules/compose.html)) # # A "typical" pipeline in ML projects # 1. [Preprocesses the data](https://scikit-learn.org/stable/modules/preprocessing.html) to clean and tranform variables # 1. Possibly selects a subset of variables from among the features [to avoid overfitting](03a_ML_obj_and_tradeoff) (see also [this](https://scikit-learn.org/stable/modules/feature_selection.html)) # 1. Runs [a model](03e_whichModel) on those cleaned variables # # ```{tip} # You can set up pipelines with `make_pipeline`. # ``` # # ## Intro to pipes # # ```{margin} # <img src="https://media.giphy.com/media/k5b6fkFnSA3yo/source.gif" alt="Mario" style="width:200px;"> # ``` # # For example, here is a simple pipeline: # + from sklearn.pipeline import make_pipeline from sklearn.impute import SimpleImputer from sklearn.linear_model import Ridge ridge_pipe = make_pipeline(SimpleImputer(),Ridge(1.0)) # - # You put a series of steps inside `make_pipeline`, separated by commas. # # The pipeline object (printed out below) is a list of steps, where each step has a name (e.g. "simpleimputer" ) and a task associated with that name (e.g. "SimpleImputer()"). ridge_pipe # ```{tip} # You can `.fit()` and `.predict()` pipelines like any model, and they can be used in `cross_validate` too! # ``` # # Using it is the same as using any estimator! After I load the data we've been using [from the last two pages](04d_crossval) below (hidden), we can fit and predict like on the ["one model intro" page](04c_onemodel): # + tags=["hide-input"] import pandas as pd import numpy as np from sklearn.linear_model import Ridge from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold, cross_validate url = 'https://github.com/LeDataSciFi/ledatascifi-2022/blob/main/data/Fannie_Mae_Plus_Data.gzip?raw=true' fannie_mae = pd.read_csv(url,compression='gzip').dropna() y = fannie_mae.Original_Interest_Rate fannie_mae = (fannie_mae .assign(l_credscore = np.log(fannie_mae['Borrower_Credit_Score_at_Origination']), l_LTV = np.log(fannie_mae['Original_LTV_(OLTV)']), ) .iloc[:,-11:] ) rng = np.random.RandomState(0) # this helps us control the randomness so we can reproduce results exactly X_train, X_test, y_train, y_test = train_test_split(fannie_mae, y, random_state=rng) # - ridge_pipe.fit(X_train,y_train) ridge_pipe.predict(X_test) # Those are the same numbers as before - good! # # We can use this pipeline in our cross validation in place of the estimator: cross_validate(ridge_pipe,X_train,y_train, cv=KFold(5), scoring='r2')['test_score'].mean() # ## Preprocessing in pipes # # ```{warning} # (Virtually) All preprocessing should be done in the pipeline! # ``` # # [This is the link you should start with to see how you might clean and preprocess data.](https://scikit-learn.org/stable/modules/preprocessing.html) Key preprocessing steps include # - Filling in missing values (imputation) or dropping those observations # - Standardization # - Encoding categorical data # # With real-world data, you'll have many data types. So the preprocessing steps you apply to one column won't necessarily be what the next column needs. # # I use [ColumnTransformer](https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html#sphx-glr-auto-examples-compose-plot-column-transformer-mixed-types-py) to assemble my preprocessing portion of my full pipeline, and it allows me to process different variables differently. # # --- # # **The generic steps to preprocess in a pipeline:** # 1. Set up a pipeline for numerical data # 1. Set up a pipeline for categorical variables # 1. Set up the ColumnTransformer: # - `ColumnTransformer()` is a function, so it needs the parentheses "()" # - The first argument inside it is a list (so now it is `ColumnTransformer([])`) # - Each element in that list is a tuple that has three parts: # - name of the step (you decide the name), # - estimator/pipeline to use on that step, # - and which variables to use it on # - **Put the pipeline for each variable type as its own tuple inside `ColumnTransformer([<here!>])`** # 1. Use the `ColumnTransformer` set as the first step inside your glorious estimation pipeline. # # --- # # So, let me put this together: # # ```{tip} # This is good pseudo! # ``` # + from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer, make_column_selector ############# # Step 1: how to deal with numerical vars # pro-tip: you might set up several numeric pipelines, because # some variables might need very different treatment! ############# numer_pipe = make_pipeline(SimpleImputer()) # this deals with missing values (somehow?) # you might also standardize the vars in this numer_pipe ############# # Step 2: how to deal with categorical vars ############# cat_pipe = make_pipeline(OneHotEncoder(drop='first')) # notes on this cat pipe: # OneHotEncoder is just one way to deal with categorical vars # drop='first' is necessary if the model is regression ############# # Step 3: combine the subparts ############# preproc_pipe = ColumnTransformer( [ # arg 1 of ColumnTransformer is a list, so this starts the list # a tuple for the numerical vars: name, pipe, which vars to apply to ("num_impute", numer_pipe, ['l_credscore','TCMR']), # a tuple for the categorical vars: name, pipe, which vars to apply to ("cat_trans", cat_pipe, ['Property_state']) ] , remainder = 'drop' # you either drop or passthrough any vars not modified above ) ############# # Step 4: put the preprocessing into an estimation pipeline ############# new_ridge_pipe = make_pipeline(preproc_pipe,Ridge(1.0)) # - # The data loaded above has no categorical variables, so I'm going to reload the data and keep new variables to illustrate what we can do: # - `'TCMR','l_credscore'` are numerical # - `'Property_state'` is categorical # - `'l_LTV'` is in the data, but should be dropped (because of `remainder='drop'`) # # So here is the raw data: # + tags=["hide-input"] url = 'https://github.com/LeDataSciFi/ledatascifi-2022/blob/main/data/Fannie_Mae_Plus_Data.gzip?raw=true' fannie_mae = pd.read_csv(url,compression='gzip').dropna() y = fannie_mae.Original_Interest_Rate fannie_mae = (fannie_mae .assign(l_credscore = np.log(fannie_mae['Borrower_Credit_Score_at_Origination']), l_LTV = np.log(fannie_mae['Original_LTV_(OLTV)']), ) [['TCMR', 'Property_state', 'l_credscore', 'l_LTV']] ) rng = np.random.RandomState(0) # this helps us control the randomness so we can reproduce results exactly X_train, X_test, y_train, y_test = train_test_split(fannie_mae, y, random_state=rng) display(X_train.head()) display(X_train.describe().T.round(2)) # - # We could `.fit()` and `.transform()` using the `preproc_pipe` from step 3 (or just `.fit_transform()` to do it in one command) to see how it transforms the data. But the output is tough to use: preproc_pipe.fit_transform(X_train) # So I added a convenience function (`df_after_transform`) to the [community codebook](https://github.com/LeDataSciFi/ledatascifi-2022/tree/main/community_codebook) to show the dataframe after the ColumnTransformer step. # # Notice # - The `l_LTV` column is gone! # - The property state variable is now 50+ variables (one dummy for each state, and a few territories) # - The numerical variables aren't changed (there are no missing variables, so the imputation does nothing) # # This is the transformed data: # + from df_after_transform import df_after_transform df_after_transform(preproc_pipe,X_train) # - display(df_after_transform(preproc_pipe,X_train) .describe().T.round(2) .iloc[:7,:]) # only show a few variables for space... # ## Working with pipes # # - Using pipes is the same as any model: `.fit()` and `.predict()`, put into CVs # - When modelling, you should spend time interrogating model predictions, plotting and printing. Does the model struggle predicting certain observations? Does it excel at some? # - You'll want to tweak parts of your pipeline. The next pages cover how we can do that.
content/05/04e_pipelines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: colette # language: python # name: colette # --- # + import panel as pn from functools import wraps pn.extension() def togglize(toggle_name: str="Hide / Unhide Widget", color: str="green"): def decorator_function(original_function): @wraps(original_function) # So we can stack decorators. def wrapper_function(*args, **kwargs): ''' In this block are the boilerplate for the actual togglizer ''' tog_color = { "green": "success", "red": "danger", "blue": "primary" } toggle = pn.widgets.Toggle(name=toggle_name, button_type=tog_color[color]) @pn.depends(toggle) def toggle_watch(x): if x: ''' The original function we wanted to decorate is here. It's what's being returned. ''' return original_function else: return None sample_widget_plot = pn.Column(pn.Column(toggle), toggle_watch) return sample_widget_plot return result return wrapper_function return decorator_function # - @togglize("Jorts", "blue") def some_panel_widget2(): return pn.pane.PNG('blue_square.png') some_panel_widget2()
colette/togglizer_isolated.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Face Recognition Program - For Mailing, Whatsapp, AWS Service Usage!! # # ### Creating Training Data for Face Reconnition using Haarcascade Model # + import cv2 import numpy as np face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') def face_extractor(img): gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale(gray, 1.3, 5) if faces is (): return None for (x,y,w,h) in faces: cropped_face = img[y:y+h, x:x+w] return cropped_face cap = cv2.VideoCapture(0) count = 0 while True: ret, frame = cap.read() if face_extractor(frame) is not None: count += 1 face = cv2.resize(face_extractor(frame), (200, 200)) face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY) file_name_path = './faces/' + str(count) + '.jpg' cv2.imwrite(file_name_path, face) cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2) cv2.imshow('Face Cropper', face) else: print("Face not found") pass if cv2.waitKey(1) == 13 or count == 100: break cap.release() cv2.destroyAllWindows() print("Collecting Samples Complete") # - # ### Training My Face Model # + import cv2 import numpy as np from os import listdir from os.path import isfile, join data_path = './faces/' onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))] Training_Data, Labels = [], [] for i, files in enumerate(onlyfiles): image_path = data_path + onlyfiles[i] images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) Training_Data.append(np.asarray(images, dtype=np.uint8)) Labels.append(i) Labels = np.asarray(Labels, dtype=np.int32) model = cv2.face_LBPHFaceRecognizer.create() model.train(np.asarray(Training_Data), np.asarray(Labels)) print("Model trained sucessefully") # - # ### Task 6.1 : Mailing While Face Detected # + import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.base import MIMEBase from email import encoders def email(): fromaddr = "2018cscloud<EMAIL>" toaddr = "<EMAIL>" msg = MIMEMultipart() msg['From'] = fromaddr msg['To'] = toaddr msg['Subject'] = "Sending Akanksha's photo" body = "Akanksha's Photo" msg.attach(MIMEText(body, 'plain')) filename = "akuu15.jpg" attachment = open("./faces/akuu15.jpg", "rb") p = MIMEBase('application', 'octet-stream') p.set_payload((attachment).read()) encoders.encode_base64(p) p.add_header('Content-Disposition', "attachment; filename= %s" % filename) msg.attach(p) s = smtplib.SMTP('smtp.gmail.com', 587) s.starttls() password=input("enter Password: ") s.login(fromaddr, password) text = msg.as_string() s.sendmail(fromaddr, toaddr, text) s.quit() # - # ### Task 6.2 : Send whatsapp message to your friend while face Detected import pywhatkit as kit import datetime def whatsapp(): current_time = datetime.datetime.now() concode="+91" number=input("Enter Number: ") kit.sendwhatmsg(" ".join([concode, number]), "It's an automated message from Akanksha(India)", current_time.hour, current_time.minute+1, wait_time=10) # ### Task 6.3 : Create EC2 instance using AWS using CLI command import os def AWS_launch(): itype=input("Enter Instance Type: ") key=input("Enter Key Name: ") count=input("How many instance you want to launch? : ") os.system("aws ec2 run-instances --image-id ami-011c99152163a87ae --instance-type {itype} --count {count} --subnet-id subnet-718a9e19 --security-group-ids sg-05580d069bf2756bd --key-name {key}".format(key=key, itype=itype, count=count)) # ### Task 6.4 : Create EBS Volume using AWS using CLI command def EBS_launch(): size=input("Input Size of Volume you want to create: ") os.system("aws ec2 create-volume --availability-zone ap-south-1a --size {size} --volume-type gp2 --tag-specification ResourceType=volume,Tags=[{Key=taSK,Value=6}]".format(size=size)) # ### Task 6.5 : Attach the Newly Created Volume to EC2 instance def EBS_attach(): volume_id=input("Enter volume Id: ") instance_id=input("Enter instance Id: ") os.system("aws ec2 attach-volume --volume-id {v} --instance-id {i} --device xvdh".format(v = volume_id, i = instance_id)) # ### Running Facial Recognition Model to predict face and do particular task # + import cv2 import numpy as np import os face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') def face_detector(img, size=0.5): gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale(gray, 1.3, 5) if faces is (): return img, [] for (x,y,w,h) in faces: cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2) roi = img[y:y+h, x:x+w] roi = cv2.resize(roi, (200, 200)) return img, roi cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() image, face = face_detector(frame) try: face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY) results = model.predict(face) if results[1] < 500: confidence = int( 100 * (1 - (results[1])/400) ) display_string = str(confidence) + '% Confident it is User' cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (255,120,150), 2) if confidence > 80: cv2.putText(image, "Hey Akanksha", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2) cv2.imshow('Face Recognition', image ) whatsapp_msg_sent() AWS_launch() EBS_launch() EBS_attach() # email break else: cv2.putText(image, "I dont know, how r u", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2) cv2.imshow('Face Recognition', image ) except: cv2.putText(image, "", (220, 120) , cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2) cv2.putText(image, "", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2) cv2.imshow('Face Recognition', image ) pass if cv2.waitKey(1) == 13: break cap.release() cv2.destroyAllWindows()
.ipynb_checkpoints/Face Recognition Program - For Mailing, Whatsapp, AWS Service Usage!!-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] Collapsed="false" # # "Titanic: Machine Learning from Disaster" # > First machine learning problem on binary classification task # # - toc: true # - badges: true # - comments: true # - categories: [jupyter, kaggle, classification, machine-learning] # - image: images/titanic.jpg # - # # Preface # This notebook demonstrates how I solve the Titanic problem on Kaggle. I will try my best to illustrate all steps I used to solve the problem. However, solving a machine learning problem takes a lot of steps, including understanding the problem, cleaning the data, exploration data analysis, feature engineering, machine learning algorithms, model evaluation and selection, evaluation metrics, etc. Actually each step can be a standalone topic for further investigation. Maybe I would write blog posts for these topics in the future. # + [markdown] Collapsed="false" # # Overview # + [markdown] Collapsed="false" # > youtube: https://youtu.be/8yZMXCaFshs # + [markdown] Collapsed="false" # ## Description # + [markdown] Collapsed="false" # The sinking of the Titanic is one of the most infamous shipwrecks in history. # # On April 15, 1912, during her maiden voyage, the widely considered “unsinkable” RMS Titanic sank after colliding with an iceberg. Unfortunately, there weren’t enough lifeboats for everyone onboard, resulting in the death of 1502 out of 2224 passengers and crew. # # While there was some element of luck involved in surviving, it seems some groups of people were more likely to survive than others. # # In this challenge, we ask you to build a predictive model that answers the question: “what sorts of people were more likely to survive?” using passenger data (ie name, age, gender, socio-economic class, etc). # + [markdown] Collapsed="false" # ### What Data Will I Use in This Competition? # + [markdown] Collapsed="false" # In this competition, you’ll gain access to two similar datasets that include passenger information like name, age, gender, socio-economic class, etc. One dataset is titled `train.csv` and the other is titled `test.csv`. # # Train.csv will contain the details of a subset of the passengers on board (891 to be exact) and importantly, will reveal whether they survived or not, also known as the “ground truth”. # # The `test.csv` dataset contains similar information but does not disclose the “ground truth” for each passenger. It’s your job to predict these outcomes. # # Using the patterns you find in the `train.csv` data, predict whether the other 418 passengers on board (found in `test.csv`) survived. # + [markdown] Collapsed="false" # ## Evaluation # + [markdown] Collapsed="false" # ### Goal # + [markdown] Collapsed="false" # It is your job to predict if a passenger survived the sinking of the Titanic or not. # For each in the test set, you must predict a 0 or 1 value for the variable. # + [markdown] Collapsed="false" # ### Metric # + [markdown] Collapsed="false" # Your score is the percentage of passengers you correctly predict. This is known as accuracy. # + [markdown] Collapsed="false" # ### Submission File Format # + [markdown] Collapsed="false" # You should submit a csv file with exactly 418 entries plus a header row. Your submission will show an error if you have extra columns (beyond PassengerId and Survived) or rows. # # The file should have exactly 2 columns: # # ``` # PassengerId (sorted in any order) # Survived (contains your binary predictions: 1 for survived, 0 for deceased) # PassengerId,Survived # 892,0 # 893,1 # 894,0 # Etc. # ``` # + [markdown] Collapsed="false" # # Problem-Solving Process # + [markdown] Collapsed="false" # > Important: Although the problem-solving process presented here looks like a linear and waterfall style, it's actually really an iterative process, which means that you may need to go back and forth to make sure your previous hypothesis was correct, or you need to test whether your new idea really works. # + [markdown] Collapsed="false" # ## Initial Setup # + [markdown] Collapsed="false" # First we need to import libraries # + Collapsed="false" #collapse-hide # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np import collections # allow IPython Notebook cell multiple outputs from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.model_selection import StratifiedShuffleSplit, cross_validate from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder import csv import warnings warnings.filterwarnings('ignore') # + [markdown] Collapsed="false" # Initialize plotting parameters # + Collapsed="false" #collapse-hide plt.rcParams['figure.figsize'] = (12,8) plt.rcParams['font.size'] = 14 plt.rcParams['axes.grid'] = True # + [markdown] Collapsed="false" # Helper functions # + Collapsed="false" #collapse-hide def set_seaborn_plot_style(ax, xlabel, ylabel, font_size=14): ax.legend(loc='upper right', fontsize=14); ax.tick_params(axis="x", labelsize=font_size); ax.tick_params(axis="y", labelsize=font_size); ax.set_xlabel(xlabel, fontsize=font_size); ax.set_ylabel(ylabel, fontsize=font_size); # + [markdown] Collapsed="false" # Then we download training/testing data from Kaggle using [kaggle-api](https://github.com/Kaggle/kaggle-api). # + Collapsed="false" #collapse-hide # !kaggle competitions download -c titanic --force # !unzip -o titanic.zip -d ./datasets # + [markdown] Collapsed="false" # > Note: If you see an error like this, `IOError: Could not find kaggle.json. Make sure it is located in /root/.kaggle. Or use the environment method.`, please upload your `kagggle.json` file which has api username and api key inside. # + [markdown] Collapsed="false" # Let's check what are inside the datasets directory # + Collapsed="false" #collapse-hide # !ls ./datasets # + [markdown] Collapsed="false" # ## EDA # + [markdown] Collapsed="false" # ### A Sneak Peek # + [markdown] Collapsed="false" # Let's load data into pandas DataFrame format, since it's much easier for computation and anlysis. Then we can have a sneak peek at the datasets. # + Collapsed="false" #collapse-hide train_data_path = './datasets/train.csv' test_data_path = './datasets/test.csv' train_df = pd.read_csv(train_data_path) test_df = pd.read_csv(test_data_path) print("Training dataset") train_df print("Testing dataset") test_df # + [markdown] Collapsed="false" # So, for the training dataset, there are 891 instances; while for the testing dataset, there are only 418 instances. Besides that, we can see that the training dataset has 12 columns(11 feature columns), since it has a label(Survived) for each instance. Now let's understand the training dataset further, such as inspect data type of each column, check if there is any null value, etc. # + [markdown] Collapsed="false" # ### More Information on Data: Data Types, Missing Values, etc. # + Collapsed="false" #collapse-hide print('training data') train_df.info() print() print('tesitng data') test_df.info() # + [markdown] Collapsed="false" # Since there are 891 entries in total, so `Age`, `Cabin` and `Embarked` columns have missing values(`null`). Besides, since this is a binary classification task, we need to make sure the labels really have two values only.(`0 or 1`) # + Collapsed="false" #collapse-hide train_df['Survived'].value_counts() # + [markdown] Collapsed="false" # Now let's understand the meaning of each column in traning data deeper. # # **Data Dictionary** # # |Variable|Definition|Key| # |---|---|---| # |survival|Survivial|0 = No, 1 = Yes| # |pclass|Ticket class|1 = 1st, 2 = 2nd, 3 = 3rd| # |sex|Sex|| # |Age|Age in years|| # |sibsp|# of siblings/spouses aboard the Titanic|| # |parch|# of parents/children aboard the Titanic|| # |ticket|Ticket number|| # |fare|Passenger fare|| # |cabin|Cabin number|| # |embarked|Port of Embarkation|C = Cherbourg, Q = Queenstown, S = Southampton| # # **Variable Notes** # # > Tip: Since the numbers below all have true meanings, so it's better to simply use regular numbers for encoding the features. If a feature doesn't have ordinal characteristics, then we can transfer numbers to the one-hot encoding format. # # # **pclass**: A proxy for socio-economic status (SES) # - 1st = Upper # - 2nd = Middle # - 3rd = Lower # # **age**: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5 # # **sibsp**: The dataset defines family relations in this way... # - Sibling = brother, sister, stepbrother, stepsister # - Spouse = husband, wife (mistresses and fiancés were ignored) # # **parch**: The dataset defines family relations in this way... # - Parent = mother, father # - Child = daughter, son, stepdaughter, stepson # - Some children travelled only with a nanny, therefore parch=0 for them. # + [markdown] Collapsed="false" # ### Plotting for Easier Exploration # + [markdown] Collapsed="false" # **PassengerId** # + Collapsed="false" #collapse-hide plt.title('PassengerId'); plt.xlabel('Instance Entry'); plt.ylabel('PassengerId'); plt.plot(train_df['PassengerId']); # + [markdown] Collapsed="false" # It should be a straight line # + Collapsed="false" #collapse-hide assert collections.Counter(train_df['PassengerId']) == collections.Counter(range(1, 892)), 'PassengerId is not a straight line' # + [markdown] Collapsed="false" # **Pclass** # + Collapsed="false" #collapse-hide plt.title('Ticket class(1=1st, 2=2nd, 3=3rd)'); plt.xlabel('Pclass'); plt.ylabel('Number'); plt.hist(train_df['Pclass']); # + [markdown] Collapsed="false" # **Sex** # + Collapsed="false" #collapse-hide plt.title('Sex'); plt.xlabel('Sex'); plt.ylabel('Number'); plt.hist(train_df['Sex']); # + [markdown] Collapsed="false" # **Age** # + Collapsed="false" #collapse-hide plt.title('Age in years'); plt.xlabel('Age'); plt.ylabel('Number'); plt.hist(train_df['Age'].dropna(), bins=30); # + [markdown] Collapsed="false" # > Note: The `Age` column has 177(891-714) null values. # + [markdown] Collapsed="false" # **Sibsp** # + Collapsed="false" #collapse-hide plt.title('# of siblings/spouses aboard the Titanic'); plt.xlabel('SibSp'); plt.ylabel('Number'); plt.hist(train_df['SibSp']); # + [markdown] Collapsed="false" # **Parch** # + Collapsed="false" #collapse-hide plt.title('# of parents/children aboard the Titanic'); plt.xlabel('Parch'); plt.ylabel('Number'); plt.hist(train_df['Parch']); # + [markdown] Collapsed="false" # **Fare** # + Collapsed="false" #collapse-hide plt.title('Passenger fare'); plt.xlabel('Fare'); plt.ylabel('Number'); plt.hist(train_df['Fare'], bins=30); # + [markdown] Collapsed="false" # **Embarked** # + Collapsed="false" #collapse-hide plt.title('Port of Embarkation(C=Cherbourg, Q=Queenstown, S=Southampton)'); plt.xlabel('Embarked'); plt.ylabel('Number'); plt.hist(train_df['Embarked'].dropna()); # + [markdown] Collapsed="false" # **Correlation Plot** # + [markdown] Collapsed="false" # After we inspect distributions of each feature column, now we are going to see the correlation plot among features and the label. # + Collapsed="false" #collapse-hide plt.figure(figsize=(12, 12)); ax = sns.heatmap(train_df.corr(), annot=True, center=0); ax.set_yticklabels(ax.get_yticklabels(), rotation = 0); # + [markdown] Collapsed="false" # > Note: The Survived column is the label of the training dataset. # + [markdown] Collapsed="false" # We can easily observe that no two columns are highly correlated with each other(like >0.7). Also, it makes sense that some features are a lot more correlated with some other features. For example, Pclass and Survived and Pclass and Fare. Now I want to further draw plots to understand more among these "highly-correlated" columns. # + [markdown] Collapsed="false" # **Scatter Plots** # + [markdown] Collapsed="false" # Pclass vs. Fare with Survived as hue # + Collapsed="false" #collapse-hide xlabel = 'Pclass' ylabel = 'Fare with log10 scale' plt.figure(figsize=(12, 8)); ax = sns.scatterplot(x=train_df['Pclass'], y=np.log10(train_df['Fare']), hue=train_df['Survived']); set_seaborn_plot_style(ax, xlabel, ylabel); # + [markdown] Collapsed="false" # We can easily see that fare is much higher in the 1st class. However, there seems like an outlier point(blue) in the bottom left that we may need to investigate further. # + [markdown] Collapsed="false" # Pclass vs. Age with Survived as hue # + Collapsed="false" #collapse-hide xlabel = 'Pclass' ylabel = 'Age' plt.figure(figsize=(12, 8)); ax = sns.scatterplot(x='Pclass', y='Age', hue='Survived', data=train_df); set_seaborn_plot_style(ax, xlabel, ylabel); # + [markdown] Collapsed="false" # We can at least reach these conclusions: # - Most people who were survived are in the 1st class, and least people in the 3rd class were survived # - For survivers among all classes, the 1st class has the biggest spread # - For the 2nd and 3rd classes, most survivers are in the younger ages, especially for the 2nd class # + [markdown] Collapsed="false" # Age vs. Fare with Survived as hue # + Collapsed="false" #collapse-hide xlabel='Age' ylabel='Fare' plt.figure(figsize=(12, 8)); ax = sns.scatterplot(x='Age', y='Fare', hue='Survived', data=train_df); set_seaborn_plot_style(ax, xlabel, ylabel); # + [markdown] Collapsed="false" # We can easily observe that more adults who had higher fare were survived than those who had lower fare. # - # ## Feature Engineering # Since machine learning algorithms only take numerical data, so we need to make sure every feature has appropriate form before modeling. This process is called feature engineering, including some common operations like dealing with missing values, choosing appropriate features to feed into models, some mathematical transformation on features, encoding categorical feature columns, etc. # > Tip: What operations to take in feature engineering plays an important role in influencing model performance, so we need to choose appropriate operations based on what model we decide to use. # #### Choose appropriate features # There are two columns(Name, Ticket) with text features, and it's more complicated. So, I'll discard them first for the baseline model. The PassengerId column doesn't correlate with other featrues, I'll discard them as they look like doesn't have the true meaning. # + #collapse-hide label = ['Survived'] features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked'] train_df_ = train_df[features+label] test_df_ = test_df[features] print("training data") train_df_ print("testing data") test_df_ # - # #### Dealing with missing values # - Discard the column(if there are too many null values, or some critical information is missing) # - Discard the row(but this would make less training data) # - Imputation(with the mean, median, min, max, etc. value of other instances) # We inspect what columns have missing values first # + #collapse-hide print("training data") train_df_.info() print() print("testing data") test_df_.info() # - # We can see that the Cabin column has too many null values both in training and testing data, so I'll discard it. # + #collapse-hide label = ['Survived'] features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] train_df_ = train_df[features+label] test_df_ = test_df[features] print("training data") train_df_ print("testing data") test_df_ # - # For other columns that have missing values, We can use choose appropriate imputation techniques based on their distribution. Now, we simply choose the `most_frequent` strategy for brevity. Finally we make sure they don't have any missing values. # + #collapse-hide label = ['Survived'] features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent') dtypes = train_df_.dtypes train_df_impute = pd.DataFrame(imp.fit_transform(train_df_[features]), columns=features) train_df_ = pd.concat([train_df_impute, train_df_[label]], axis=1) test_df_ = pd.DataFrame(imp.transform(test_df_), columns=features) for i, feature in enumerate(features): train_df_[feature] = train_df_[feature].astype(dtypes[i]) test_df_[feature] = test_df_[feature].astype(dtypes[i]) print("training data") train_df_.info() print() print("testing data") test_df_.info() # - # #### Some mathematical transformations on features # - Normalization(make feature values between [0, 1]) # - Standardization(with mean 0, vairance 1) # - Log-transformation(make feature values with smaller variation) # This part plays an important role in model performance, and it really depends on what model you choose. For example, if your model uses the "distance-based" algorithm like SGD or ridge regression, then standardization may help. # #### Encoding categorical feature columns # - One-hot encoding(if the column values don't have meaningful ordinal information) # - Ordinal numbers # It seems like the Embarked column should also take one-hot encoding format; however, I am curious if they actually correlate with other columns. If they are correlated with other columns, it makes more sense to have ordinal characteristics. # + #collapse_hide embarked_mapping = { 'S': 1, 'C': 2, 'Q': 3 } train_df_['Embarked'] = train_df_['Embarked'].map(embarked_mapping) test_df_['Embarked'] = test_df_['Embarked'].map(embarked_mapping) # - # Let's plot a correlation plot and pay attention to relation between the Embarked column with other columns! #collapse_hide plt.figure(figsize=(12, 12)); ax = sns.heatmap(train_df_.corr(), annot=True, center=0); ax.set_yticklabels(ax.get_yticklabels(), rotation = 0); # Since the Embarked column is somewhat correlated with the Survived column, so I think it makes sense that I transform the column as ordinal format. # Finally, the Sex column should't have ordinal characteristics, so we'll transform it to be one-hot encoding format. # + #collapse_hide train_df_ = pd.get_dummies(train_df_) test_df_ = pd.get_dummies(test_df_) print('training data') train_df_ print() print('testing data') test_df_ # - # Now we finally have data ready to reach to the most exciting modeling part! # + [markdown] Collapsed="false" # ## Modeling and Evaluation # - # After having some time doing data exploration, we are now going to build a model and evaluate its performance. The goal of building the first model is not to have the best performance; rather, we would want a baseline that can help us understand where and how we can do better. After all, we need to understand the problem-solving process should be iterative! # # In order to build a machine learning model, we need several things prepared: # 1. Model-ready data: it should be data after doing feature engineering, not just raw data # 2. Machine learning algorithm: this is what modeling stands for # 3. Evaluation Metric: in this problem, we are given to use "accuracy" as the metric # # Since it's a binary classification task, I'll choose logistic regression as the baseline model! # ### Baseline Model # **Model training using cross-validation** # We use [StratifiedShuffleSplit](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) by preserving the percentage of samples for each class through random folds. Also, we use [cross_validate](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html) for cross-validation. # + #collapse-hide features = test_df_.columns label = ['Survived'] X = train_df_[features] y = train_df_[label] # random_state is for reproducibility sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0) clf = LogisticRegression() validation_results = cross_validate(clf, X, y, cv=sss, return_estimator=True) estimators, test_scores = validation_results['estimator'], validation_results['test_score'] estimators test_scores print(f"mean accuracy: {np.mean(test_scores)}") # - # Our baseline model reaches ~79.7% accuracy! Not bad! Then we can submit prediction results on real testing data to Kaggle to check the true performance. # **Model prediction** #collapse-hide survived = estimators[2].predict(test_df_) # we choose the estimator with the best training performance for prediction # **Save the Model Prediction Results to CSV** # + #collapse-hide submission_dict = { 'PassengerId': [*range(892, 1310)], 'Survived': survived } # sanity check assert len(submission_dict['PassengerId']) == len(submission_dict['Survived']), \ f"PasserngerId values({len(submission_dict['PassengerId'])}) length should equal to Survived values({len(submission_dict['Survived'])}) length" # save the dict to csv file with open('submission.csv', 'w') as csvfile: fieldnames = submission_dict.keys() writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for passenger_id, survived in zip(submission_dict['PassengerId'], submission_dict['Survived']): _ = writer.writerow({'PassengerId': passenger_id, 'Survived': survived}) #depress output # - # **Submit to Kaggle** #collapse-hide # !kaggle competitions submit -f submission.csv -m "submit baseline model" titanic # ![baseline](images/baseline.png) # Although the training accuracy reaches ~79.7%, the testing accuracy only has 75.6%. There might be overfitting or we need to use a better model! # ## The Pragmatic Approaches # Now I am going to show you some pragmatic approaches to solve machine learning problems. # # A pipeline allows you to chain multiple transformations of the data before you apply a final model. # # Why we need to use the pipeline approach? Before I answer this question, I would like to ask you a question first. Did you notice that there is a problem in the cross-validation process above? OK, let me tell you the truth. In the cross-validation process, the input data comes from the output of feature engineering process. During the cross-validation process, we split the input data into 5 splits, and take one of the splits as the validation data. In theory, validation data shouldn't know any information about training data; however, before we run the cross-validation, we apply all training data to feature engineering process, thus we leak information to validation data later. This condition would make us too optimistic. So maybe this is one of the reasons for overfitting. # # Now I am going to answer the first question I asked. There are several reasons that we use the pipeline approach: # 1. It makes code cleaner and more configurable. # 2. It prevents you from making mistakes like what I just mensioned above.(Because all processing happen inside the pipeline loop) # ### ColumnTransformer and Pipeline # + #collapse-hide label = ['Survived'] numeric_features = ['Age', 'SibSp', 'Parch', 'Fare'] categorical_features = ['Pclass', 'Sex', 'Embarked'] X_train = train_df[numeric_features + categorical_features] y_train = train_df[label] X_test = test_df[numeric_features + categorical_features] for cat in categorical_features: X_train[cat].fillna('missing', inplace=True) X_test[cat].fillna('missing', inplace=True) numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(missing_values=np.nan, strategy='median')) ]) col_transformer = ColumnTransformer( transformers=[ ('onehot', OneHotEncoder(handle_unknown='ignore'), ['Embarked']), ('ordinal', OrdinalEncoder(), ['Sex']) ], remainder='passthrough' ) categorical_transformer = Pipeline(steps=[ ('col', col_transformer) ]) preprocessor = ColumnTransformer(transformers=[ ('num', numeric_transformer, numeric_features), ('cat', categorical_transformer, categorical_features), ]) model = Pipeline(steps=[ ('preprocessor', preprocessor), ('clf', LogisticRegression()) ]) sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0) validation_results = cross_validate(model, X_train, y_train, cv=sss, return_estimator=True) estimators, test_scores = validation_results['estimator'], validation_results['test_score'] test_scores print(f"mean accuracy: {np.mean(test_scores)}") # - # Now you've seen how pipeline looks like! Remember what I said earlier that the machine learning process is iterative, and we always need to figure out ways to make prediction performance better based on the evaluation metric. Now we've already implemented the baseline model; however, it shouldn't be the end of the journey. Since we can and should try different parameters or models to make the better performance, next problem is how can we do it the right way? As it turns out, there is some good functionalities in sklearn, so now I am going to show this feature to you. # ### GridSearch # GridSearch makes us define the grid of (hyper)parameters to help us navigate through these parameters and find out the set of parameters that make the best model performance. # + [markdown] Collapsed="false" # # Reference # + [markdown] Collapsed="false" # - <a href="https://www.kaggle.com/c/titanic/" target="_blank">Titanic: Machine Learning from Disaster</a> # + [markdown] Collapsed="false" # # Comments # + Collapsed="false"
_notebooks/2020-03-31-kaggle-titanic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Kubeflow Pipelines - Retail Product Stockouts Prediction using AutoML Tables # # + # Configuration PROJECT_ID = "<project-id-123456>" COMPUTE_REGION = "us-central1" # Currently "us-central1" is the only region supported by AutoML tables. # The bucket must be Regional (not multi-regional) and the region should be us-central1. This is a limitation of the batch prediction service. batch_predict_gcs_output_uri_prefix = 'gs://<gcs-bucket-regional-us-central1>/<subpath>/' # + # AutoML Tables components from kfp.components import load_component_from_url automl_create_dataset_for_tables_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/b3179d86b239a08bf4884b50dbf3a9151da96d66/components/gcp/automl/create_dataset_for_tables/component.yaml') automl_import_data_from_bigquery_source_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/b3179d86b239a08bf4884b50dbf3a9151da96d66/components/gcp/automl/import_data_from_bigquery/component.yaml') automl_create_model_for_tables_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/b3179d86b239a08bf4884b50dbf3a9151da96d66/components/gcp/automl/create_model_for_tables/component.yaml') automl_prediction_service_batch_predict_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/b3179d86b239a08bf4884b50dbf3a9151da96d66/components/gcp/automl/prediction_service_batch_predict/component.yaml') automl_split_dataset_table_column_names_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/b3179d86b239a08bf4884b50dbf3a9151da96d66/components/gcp/automl/split_dataset_table_column_names/component.yaml') # + # Define the pipeline import kfp def retail_product_stockout_prediction_pipeline( gcp_project_id: str, gcp_region: str, batch_predict_gcs_output_uri_prefix: str, dataset_bq_input_uri: str = 'bq://product-stockout.product_stockout.stockout', dataset_display_name: str = 'stockout_data', target_column_name: str = 'Stockout', model_display_name: str = 'stockout_model', batch_predict_bq_input_uri: str = 'bq://product-stockout.product_stockout.batch_prediction_inputs', train_budget_milli_node_hours: 'Integer' = 1000, ): # Create dataset create_dataset_task = automl_create_dataset_for_tables_op( gcp_project_id=gcp_project_id, gcp_region=gcp_region, display_name=dataset_display_name, ) # Import data import_data_task = automl_import_data_from_bigquery_source_op( dataset_path=create_dataset_task.outputs['dataset_path'], input_uri=dataset_bq_input_uri, ) # Prepare column schemas split_column_specs = automl_split_dataset_table_column_names_op( dataset_path=import_data_task.outputs['dataset_path'], table_index=0, target_column_name=target_column_name, ) # Train a model create_model_task = automl_create_model_for_tables_op( gcp_project_id=gcp_project_id, gcp_region=gcp_region, display_name=model_display_name, dataset_id=create_dataset_task.outputs['dataset_id'], target_column_path=split_column_specs.outputs['target_column_path'], #input_feature_column_paths=None, # All non-target columns will be used if None is passed input_feature_column_paths=split_column_specs.outputs['feature_column_paths'], optimization_objective='MAXIMIZE_AU_PRC', train_budget_milli_node_hours=train_budget_milli_node_hours, ).after(import_data_task) # Batch prediction batch_predict_task = automl_prediction_service_batch_predict_op( model_path=create_model_task.outputs['model_path'], bq_input_uri=batch_predict_bq_input_uri, gcs_output_uri_prefix=batch_predict_gcs_output_uri_prefix, ) # The pipeline should be able to authenticate to GCP. # Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details. # # For example, you may uncomment the following lines to use GSA keys. # from kfp.gcp import use_gcp_secret # kfp.dsl.get_pipeline_conf().add_op_transformer(use_gcp_secret('user-gcp-sa')) # + # Run the pipeline # Get the GCP location of your project. from google.cloud import automl location_path = automl.AutoMlClient().location_path(PROJECT_ID, COMPUTE_REGION) kfp.run_pipeline_func_on_cluster( retail_product_stockout_prediction_pipeline, arguments=dict( gcp_project_id=PROJECT_ID, gcp_region=COMPUTE_REGION, dataset_bq_input_uri='bq://product-stockout.product_stockout.stockout', batch_predict_bq_input_uri='bq://product-stockout.product_stockout.batch_prediction_inputs', batch_predict_gcs_output_uri_prefix=batch_predict_gcs_output_uri_prefix, ) )
samples/core/AutoML tables/AutoML Tables - Retail product stockout prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Statistical Debugging # # In this chapter, we introduce _statistical debugging_ – the idea that specific events during execution could be _statistically correlated_ with failures. We start with coverage of individual lines and then proceed towards further execution features. # - from bookutils import YouTubeVideo YouTubeVideo("UNuso00zYiI") # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Prerequisites** # # * You should have read the [chapter on tracing executions](Tracer.ipynb). # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} import bookutils # + [markdown] slideshow={"slide_type": "skip"} # ## Synopsis # <!-- Automatically generated. Do not edit. --> # # To [use the code provided in this chapter](Importing.ipynb), write # # ```python # >>> from debuggingbook.StatisticalDebugger import <identifier> # ``` # # and then make use of the following features. # # # This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes. # # To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. # # Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. # # ### Collecting Events from Calls # # To collect events from calls that are labeled manually, use # # ```python # >>> debugger = TarantulaDebugger() # >>> with debugger.collect_pass(): # >>> remove_html_markup("abc") # >>> with debugger.collect_pass(): # >>> remove_html_markup('<b>abc</b>') # >>> with debugger.collect_fail(): # >>> remove_html_markup('"abc"') # ``` # Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) # # ### Collecting Events from Tests # # To collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form: # # ```python # >>> debugger = TarantulaDebugger() # >>> with debugger: # >>> remove_html_markup("abc") # >>> with debugger: # >>> remove_html_markup('<b>abc</b>') # >>> with debugger: # >>> remove_html_markup('"abc"') # >>> assert False # raise an exception # ``` # `with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. # # ### Visualizing Events as a Table # # After collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs. # # ```python # >>> debugger.event_table(args=True, color=True) # ``` # # | `remove_html_markup` | `s='abc'` | `s='<b>abc</b>'` | `s='"abc"'` | # | --------------------- | ---- | ---- | ---- | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:1</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:2</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:3</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:4</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:6</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:7</samp> | X | X | X | # | <samp style="background-color: hsl(120.0, 50.0%, 80%)" title=" 0%"> remove_html_markup:8</samp> | - | X | - | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:9</samp> | X | X | X | # | <samp style="background-color: hsl(120.0, 50.0%, 80%)" title=" 0%">remove_html_markup:10</samp> | - | X | - | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%">remove_html_markup:11</samp> | X | X | X | # | <samp style="background-color: hsl(0.0, 100.0%, 80%)" title="100%">remove_html_markup:12</samp> | - | - | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%">remove_html_markup:13</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%">remove_html_markup:14</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%">remove_html_markup:16</samp> | X | X | X | # # # ### Visualizing Suspicious Code # # If you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines: # # ```python # >>> debugger # ``` # # | `remove_html_markup` | `s='abc'` | `s='<b>abc</b>'` | `s='"abc"'` | # | --------------------- | ---- | ---- | ---- | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:1</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:2</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:3</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:4</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:6</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:7</samp> | X | X | X | # | <samp style="background-color: hsl(120.0, 50.0%, 80%)" title=" 0%"> remove_html_markup:8</samp> | - | X | - | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%"> remove_html_markup:9</samp> | X | X | X | # | <samp style="background-color: hsl(120.0, 50.0%, 80%)" title=" 0%">remove_html_markup:10</samp> | - | X | - | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%">remove_html_markup:11</samp> | X | X | X | # | <samp style="background-color: hsl(0.0, 100.0%, 80%)" title="100%">remove_html_markup:12</samp> | - | - | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%">remove_html_markup:13</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%">remove_html_markup:14</samp> | X | X | X | # | <samp style="background-color: hsl(60.0, 100.0%, 80%)" title=" 50%">remove_html_markup:16</samp> | X | X | X | # # # ### Ranking Events # # The method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations. # # ```python # >>> debugger.rank() # # [('remove_html_markup', 12), # ('remove_html_markup', 14), # ('remove_html_markup', 2), # ('remove_html_markup', 9), # ('remove_html_markup', 4), # ('remove_html_markup', 16), # ('remove_html_markup', 11), # ('remove_html_markup', 6), # ('remove_html_markup', 13), # ('remove_html_markup', 1), # ('remove_html_markup', 3), # ('remove_html_markup', 7), # ('remove_html_markup', 8), # ('remove_html_markup', 10)] # ``` # ### Classes and Methods # # Here are all classes defined in this chapter: # # # ![](PICS/StatisticalDebugger-synopsis-1.svg) # # # ![](PICS/StatisticalDebugger-synopsis-2.svg) # # # - # ## Introduction # # The idea behind _statistical debugging_ is fairly simple. We have a program that sometimes passes and sometimes fails. This outcome can be _correlated_ with events that precede it – properties of the input, properties of the execution, properties of the program state. If we, for instance, can find that "the program always fails when Line 123 is executed, and it always passes when Line 123 is _not_ executed", then we have a strong correlation between Line 123 being executed and failure. # # Such _correlation_ does not necessarily mean _causation_. For this, we would have to prove that executing Line 123 _always_ leads to failure, and that _not_ executing it does not lead to (this) failure. Also, a correlation (or even a causation) does not mean that Line 123 contains the defect – for this, we would have to show that it actually is an error. Still, correlations make excellent hints as it comes to search for failure causes – in all generality, if you let your search be guided by _events that correlate with failures_, you are more likely to find _important hints on how the failure comes to be_. # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## Collecting Events # # How can we determine events that correlate with failure? We start with a general mechanism to actually _collect_ events during execution. The abstract `Collector` class provides # # * a `collect()` method made for collecting events, called from the `traceit()` tracer; and # * an `events()` method made for retrieving these events. # # Both of these are _abstract_ and will be defined further in subclasses. # - from Tracer import Tracer from typing import Sequence, Any, Callable, Optional, Type, Tuple, Any from typing import Dict, Union, Set, List, cast, TypeVar from types import FrameType, TracebackType class Collector(Tracer): """A class to record events during execution.""" def collect(self, frame: FrameType, event: str, arg: Any) -> None: """Collecting function. To be overridden in subclasses.""" pass def events(self) -> Set: """Return a collection of events. To be overridden in subclasses.""" return set() def traceit(self, frame: FrameType, event: str, arg: Any) -> None: self.collect(frame, event, arg) # A `Collector` class is used like `Tracer`, using a `with` statement. Let us apply it on the buggy variant of `remove_html_markup()` from the [Introduction to Debugging](Intro_Debugging.ipynb): def remove_html_markup(s): # type: ignore tag = False quote = False out = "" for c in s: if c == '<' and not quote: tag = True elif c == '>' and not quote: tag = False elif c == '"' or c == "'" and tag: quote = not quote elif not tag: out = out + c return out with Collector() as c: out = remove_html_markup('"abc"') out # There's not much we can do with our collector, as the `collect()` and `events()` methods are yet empty. However, we can introduce an `id()` method which returns a string identifying the collector. This string is defined from the _first function call_ encountered. from types import FunctionType Coverage = Set[Tuple[Callable, int]] class Collector(Collector): def __init__(self) -> None: """Constructor.""" self._function: Optional[Callable] = None self._args: Optional[Dict[str, Any]] = None self._argstring: Optional[str] = None self._exception: Optional[Type] = None self.classes_to_ignore = [self.__class__] def traceit(self, frame: FrameType, event: str, arg: Any) -> None: """Tracing function. Saves the first function and calls collect().""" if 'self' in frame.f_locals: for cls in self.classes_to_ignore: if isinstance(frame.f_locals['self'], cls): # Do not collect our own methods return if self._function is None and event == 'call': # Save function self._function = self.create_function(frame) self._args = frame.f_locals.copy() self._argstring = ", ".join([f"{var}={repr(self._args[var])}" for var in self._args]) self.collect(frame, event, arg) def collect(self, frame: FrameType, event: str, arg: Any) -> None: """Collector function. To be overloaded in subclasses.""" pass def id(self) -> str: """Return an identifier for the collector, created from the first call""" return f"{self.function().__name__}({self.argstring()})" def function(self) -> Callable: """Return the function from the first call, as a function object""" if not self._function: raise ValueError("No call collected") return self._function def argstring(self) -> str: """Return the list of arguments from the first call, as a printable string""" if not self._argstring: raise ValueError("No call collected") return self._argstring def args(self) -> Dict[str, Any]: """Return a dict of argument names and values from the first call""" if not self._args: raise ValueError("No call collected") return self._args def exception(self) -> Optional[Type]: """Return the exception class from the first call, or None if no exception was raised.""" return self._exception def __repr__(self) -> str: """Return a string representation of the collector""" # We use the ID as default representation when printed return self.id() def covered_functions(self) -> Set[Callable]: """Set of covered functions. To be overloaded in subclasses.""" return set() def coverage(self) -> Coverage: """ Return a set (function, lineno) with locations covered. To be overloaded in subclasses. """ return set() # Here's how the collector works: with Collector() as c: remove_html_markup('abc') c.function(), c.id() # ### Error Prevention # While collecting, we'd like to avoid collecting events in the collection infrastructure. The `classes_to_ignore` attribute takes care of this. class Collector(Collector): def add_classes_to_ignore(self, classes_to_ignore: List[Type]) -> None: """ Define additional classes to ignore during collection (typically `Debugger` classes using these collectors). """ self.classes_to_ignore += classes_to_ignore # If we exit a block without having collected anything, that's likely an error. class Collector(Collector): def __exit__(self, exc_tp: Type, exc_value: BaseException, exc_traceback: TracebackType) -> Optional[bool]: """Exit the `with` block.""" ret = super().__exit__(exc_tp, exc_value, exc_traceback) if not self._function: if exc_tp: return False # re-raise exception else: raise ValueError("No call collected") return ret # ## Collecting Coverage # # So far, our `Collector` class does not collect any events. Let us extend it such that it collects _coverage_ information – that is, the set of locations executed. To this end, we introduce a `CoverageCollector` subclass which saves the coverage in a set containing functions and line numbers. from types import FunctionType, FrameType from StackInspector import StackInspector class CoverageCollector(Collector, StackInspector): """A class to record covered locations during execution.""" def __init__(self) -> None: """Constructor.""" super().__init__() self._coverage: Coverage = set() def collect(self, frame: FrameType, event: str, arg: Any) -> None: """Save coverage for an observed event.""" name = frame.f_code.co_name function = self.search_func(name, frame) if function is None: function = self.create_function(frame) location = (function, frame.f_lineno) self._coverage.add(location) # We also override `events()` such that it returns the set of covered locations. class CoverageCollector(CoverageCollector): def events(self) -> Set[Tuple[str, int]]: """ Return the set of locations covered. Each location comes as a pair (`function_name`, `lineno`). """ return {(func.__name__, lineno) for func, lineno in self._coverage} # The methods `coverage()` and `covered_functions()` allow precise access to the coverage obtained. class CoverageCollector(CoverageCollector): def covered_functions(self) -> Set[Callable]: """Return a set with all functions covered.""" return {func for func, lineno in self._coverage} def coverage(self) -> Coverage: """Return a set (function, lineno) with all locations covered.""" return self._coverage # Here is how we can use `CoverageCollector` to determine the lines executed during a run of `remove_html_markup()`: with CoverageCollector() as c: remove_html_markup('abc') c.events() # Sets of line numbers alone are not too revealing. They provide more insights if we actually list the code, highlighting these numbers: import inspect from bookutils import getsourcelines # like inspect.getsourcelines(), but in color def code_with_coverage(function: Callable, coverage: Coverage) -> None: source_lines, starting_line_number = \ getsourcelines(function) line_number = starting_line_number for line in source_lines: marker = '*' if (function, line_number) in coverage else ' ' print(f"{line_number:4} {marker} {line}", end='') line_number += 1 code_with_coverage(remove_html_markup, c.coverage()) # Remember that the input `s` was `"abc"`? In this listing, we can see which lines were covered and which lines were not. From the listing already, we can see that `s` has neither tags nor quotes. # Such coverage computation plays a big role in _testing_, as one wants tests to cover as many different aspects of program execution (and notably code) as possible. But also during debugging, code coverage is essential: If some code was not even executed in the failing run, then any change to it will have no effect. from bookutils import quiz quiz('Let the input be `"<b>Don\'t do this!</b>"`. ' "Which of these lines are executed? Use the code to find out!", [ "`tag = True`", "`tag = False`", "`quote = not quote`", "`out = out + c`" ], "[ord(c) - ord('a') - 1 for c in 'cdf']") # To find the solution, try this out yourself: with CoverageCollector() as c: remove_html_markup("<b>Don't do this!</b>") # code_with_coverage(remove_html_markup, c.coverage) # ## Computing Differences # # Let us get back to the idea that we want to _correlate_ events with passing and failing outcomes. For this, we need to examine events in both _passing_ and _failing_ runs, and determine their _differences_ – since it is these differences we want to associate with their respective outcome. # ### A Base Class for Statistical Debugging # # The `StatisticalDebugger` base class takes a collector class (such as `CoverageCollector`). Its `collect()` method creates a new collector of that very class, which will be maintained by the debugger. As argument, `collect()` takes a string characterizing the outcome (such as `'PASS'` or `'FAIL'`). This is how one would use it: # # ```python # debugger = StatisticalDebugger() # with debugger.collect('PASS'): # some_passing_run() # with debugger.collect('PASS'): # another_passing_run() # with debugger.collect('FAIL'): # some_failing_run() # ``` # Let us implement `StatisticalDebugger`. The base class gets a collector class as argument: class StatisticalDebugger: """A class to collect events for multiple outcomes.""" def __init__(self, collector_class: Type = CoverageCollector, log: bool = False): """Constructor. Use instances of `collector_class` to collect events.""" self.collector_class = collector_class self.collectors: Dict[str, List[Collector]] = {} self.log = log # The `collect()` method creates (and stores) a collector for the given outcome, using the given outcome to characterize the run. Any additional arguments are passed to the collector. class StatisticalDebugger(StatisticalDebugger): def collect(self, outcome: str, *args: Any, **kwargs: Any) -> Collector: """Return a collector for the given outcome. Additional args are passed to the collector.""" collector = self.collector_class(*args, **kwargs) collector.add_classes_to_ignore([self.__class__]) return self.add_collector(outcome, collector) def add_collector(self, outcome: str, collector: Collector) -> Collector: if outcome not in self.collectors: self.collectors[outcome] = [] self.collectors[outcome].append(collector) return collector # The `all_events()` method produces a union of all events observed. If an outcome is given, it produces a union of all events with that outcome: class StatisticalDebugger(StatisticalDebugger): def all_events(self, outcome: Optional[str] = None) -> Set[Any]: """Return a set of all events observed.""" all_events = set() if outcome: for collector in self.collectors[outcome]: all_events.update(collector.events()) else: for outcome in self.collectors: for collector in self.collectors[outcome]: all_events.update(collector.events()) return all_events # Here's a simple example of `StatisticalDebugger` in action: s = StatisticalDebugger() with s.collect('PASS'): remove_html_markup("abc") with s.collect('PASS'): remove_html_markup('<b>abc</b>') with s.collect('FAIL'): remove_html_markup('"abc"') # The method `all_events()` returns all events collected: s.all_events() # If given an outcome as argument, we obtain all events with the given outcome. s.all_events('FAIL') # The attribute `collectors` maps outcomes to lists of collectors: s.collectors # Here's the collector of the one (and first) passing run: s.collectors['PASS'][0].id() s.collectors['PASS'][0].events() # To better highlight the differences between the collected events, we introduce a method `event_table()` that prints out whether an event took place in a run. # ### Excursion: Printing an Event Table from IPython.display import display, Markdown, HTML import html class StatisticalDebugger(StatisticalDebugger): def function(self) -> Optional[Callable]: """ Return the entry function from the events observed, or None if ambiguous. """ names_seen = set() functions = [] for outcome in self.collectors: for collector in self.collectors[outcome]: # We may have multiple copies of the function, # but sharing the same name func = collector.function() if func.__name__ not in names_seen: functions.append(func) names_seen.add(func.__name__) if len(functions) != 1: return None # ambiguous return functions[0] def covered_functions(self) -> Set[Callable]: """Return a set of all functions observed.""" functions = set() for outcome in self.collectors: for collector in self.collectors[outcome]: functions |= collector.covered_functions() return functions def coverage(self) -> Coverage: """Return a set of all (functions, line_numbers) observed""" coverage = set() for outcome in self.collectors: for collector in self.collectors[outcome]: coverage |= collector.coverage() return coverage def color(self, event: Any) -> Optional[str]: """ Return a color for the given event, or None. To be overloaded in subclasses. """ return None def tooltip(self, event: Any) -> Optional[str]: """ Return a tooltip string for the given event, or None. To be overloaded in subclasses. """ return None def event_str(self, event: Any) -> str: """Format the given event. To be overloaded in subclasses.""" if isinstance(event, str): return event if isinstance(event, tuple): return ":".join(self.event_str(elem) for elem in event) return str(event) def event_table_text(self, *, args: bool = False, color: bool = False) -> str: """ Print out a table of events observed. If `args` is True, use arguments as headers. If `color` is True, use colors. """ sep = ' | ' all_events = self.all_events() longest_event = max(len(f"{self.event_str(event)}") for event in all_events) out = "" # Header if args: out += '| ' func = self.function() if func: out += '`' + func.__name__ + '`' out += sep for name in self.collectors: for collector in self.collectors[name]: out += '`' + collector.argstring() + '`' + sep out += '\n' else: out += '| ' + ' ' * longest_event + sep for name in self.collectors: for i in range(len(self.collectors[name])): out += name + sep out += '\n' out += '| ' + '-' * longest_event + sep for name in self.collectors: for i in range(len(self.collectors[name])): out += '-' * len(name) + sep out += '\n' # Data for event in sorted(all_events): event_name = self.event_str(event).rjust(longest_event) tooltip = self.tooltip(event) if tooltip: title = f' title="{tooltip}"' else: title = '' if color: color_name = self.color(event) if color_name: event_name = \ f'<samp style="background-color: {color_name}"{title}>' \ f'{html.escape(event_name)}' \ f'</samp>' out += f"| {event_name}" + sep for name in self.collectors: for collector in self.collectors[name]: out += ' ' * (len(name) - 1) if event in collector.events(): out += "X" else: out += "-" out += sep out += '\n' return out def event_table(self, **_args: Any) -> Any: """Print out event table in Markdown format.""" return Markdown(self.event_table_text(**_args)) def __repr__(self) -> str: return self.event_table_text() def _repr_markdown_(self) -> str: return self.event_table_text(args=True, color=True) # ### End of Excursion s = StatisticalDebugger() with s.collect('PASS'): remove_html_markup("abc") with s.collect('PASS'): remove_html_markup('<b>abc</b>') with s.collect('FAIL'): remove_html_markup('"abc"') s.event_table(args=True) quiz("How many lines are executed in the failing run only?", [ "One", "Two", "Three" ], 'int(chr(50))') # These lines only executed in the failing run would be a correlation to look for. # ### Collecting Passing and Failing Runs # # While our `StatisticalDebugger` class allows arbitrary outcomes, we are typically only interested in two outcomes, namely _passing_ vs. _failing_ runs. We therefore introduce a specialized `DifferenceDebugger` class that provides customized methods to collect and access passing and failing runs. class DifferenceDebugger(StatisticalDebugger): """A class to collect events for passing and failing outcomes.""" PASS = 'PASS' FAIL = 'FAIL' def collect_pass(self, *args: Any, **kwargs: Any) -> Collector: """Return a collector for passing runs.""" return self.collect(self.PASS, *args, **kwargs) def collect_fail(self, *args: Any, **kwargs: Any) -> Collector: """Return a collector for failing runs.""" return self.collect(self.FAIL, *args, **kwargs) def pass_collectors(self) -> List[Collector]: return self.collectors[self.PASS] def fail_collectors(self) -> List[Collector]: return self.collectors[self.FAIL] def all_fail_events(self) -> Set[Any]: """Return all events observed in failing runs.""" return self.all_events(self.FAIL) def all_pass_events(self) -> Set[Any]: """Return all events observed in passing runs.""" return self.all_events(self.PASS) def only_fail_events(self) -> Set[Any]: """Return all events observed only in failing runs.""" return self.all_fail_events() - self.all_pass_events() def only_pass_events(self) -> Set[Any]: """Return all events observed only in passing runs.""" return self.all_pass_events() - self.all_fail_events() # We can use `DifferenceDebugger` just as a `StatisticalDebugger`: T1 = TypeVar('T1', bound='DifferenceDebugger') def test_debugger_html_simple(debugger: T1) -> T1: with debugger.collect_pass(): remove_html_markup('abc') with debugger.collect_pass(): remove_html_markup('<b>abc</b>') with debugger.collect_fail(): remove_html_markup('"abc"') return debugger # However, since the outcome of tests may not always be predetermined, we provide a simpler interface for tests that can fail (= raise an exception) or pass (not raise an exception). import traceback class DifferenceDebugger(DifferenceDebugger): def __enter__(self) -> Any: """Enter a `with` block. Collect coverage and outcome; classify as FAIL if the block raises an exception, and PASS if it does not. """ self.collector = self.collector_class() self.collector.add_classes_to_ignore([self.__class__]) self.collector.__enter__() return self def __exit__(self, exc_tp: Type, exc_value: BaseException, exc_traceback: TracebackType) -> Optional[bool]: """Exit the `with` block.""" status = self.collector.__exit__(exc_tp, exc_value, exc_traceback) if status is None: pass else: return False # Internal error; re-raise exception if exc_tp is None: outcome = self.PASS else: outcome = self.FAIL self.add_collector(outcome, self.collector) return True # Ignore exception, if any # Using this interface, we can rewrite `test_debugger_html()`: T2 = TypeVar('T2', bound='DifferenceDebugger') def test_debugger_html(debugger: T2) -> T2: with debugger: remove_html_markup('abc') with debugger: remove_html_markup('<b>abc</b>') with debugger: remove_html_markup('"abc"') assert False # Mark test as failing return debugger test_debugger_html(DifferenceDebugger()) # ### Analyzing Events # # Let us now focus on _analyzing_ events collected. Since events come back as _sets_, we can compute _unions_ and _differences_ between these sets. For instance, we can compute which lines were executed in _any_ of the passing runs of `test_debugger_html()`, above: debugger = test_debugger_html(DifferenceDebugger()) pass_1_events = debugger.pass_collectors()[0].events() pass_2_events = debugger.pass_collectors()[1].events() in_any_pass = pass_1_events | pass_2_events in_any_pass # Likewise, we can determine which lines were _only_ executed in the failing run: fail_events = debugger.fail_collectors()[0].events() only_in_fail = fail_events - in_any_pass only_in_fail # And we see that the "failing" run is characterized by processing quotes: code_with_coverage(remove_html_markup, only_in_fail) debugger = test_debugger_html(DifferenceDebugger()) debugger.all_events() # These are the lines executed only in the failing run: debugger.only_fail_events() # These are the lines executed only in the passing runs: debugger.only_pass_events() # Again, having these lines individually is neat, but things become much more interesting if we can see the associated code lines just as well. That's what we will do in the next section. # ## Visualizing Differences # # To show correlations of line coverage in context, we introduce a number of _visualization_ techniques that _highlight_ code with different colors. # ### Discrete Spectrum # # The first idea is to use a _discrete_ spectrum of three colors: # # * _red_ for code executed in failing runs only # * _green_ for code executed in passing runs only # * _yellow_ for code executed in both passing and failing runs. # # Code that is not executed stays unhighlighted. # Our `DiscreteSpectrumDebugger` subclass provides a `color()` method that returns one of these three colors depending on the line number: class DiscreteSpectrumDebugger(DifferenceDebugger): """Visualize differences between executions using three discrete colors""" def suspiciousness(self, event: Any) -> Optional[float]: """Return a suspiciousness value [0, 1.0] for the given event, or `None` if unknown""" passing = self.all_pass_events() failing = self.all_fail_events() if event in passing and event in failing: return 0.5 elif event in failing: return 1.0 elif event in passing: return 0.0 else: return None def color(self, event: Any) -> Optional[str]: """Return a color for the given event.""" suspiciousness = self.suspiciousness(event) if suspiciousness is None: return None if suspiciousness > 0.8: return 'mistyrose' if suspiciousness >= 0.5: return 'lightyellow' return 'honeydew' def tooltip(self, event: Any) -> str: """Return a tooltip for the given event.""" passing = self.all_pass_events() failing = self.all_fail_events() if event in passing and event in failing: return "in passing and failing runs" elif event in failing: return "only in failing runs" elif event in passing: return "only in passing runs" else: return "never" def percentage(self, event: Any) -> str: """Return the suspiciousness for the given event as percentage string""" suspiciousness = self.suspiciousness(event) if suspiciousness is not None: return str(int(suspiciousness * 100)).rjust(3) + '%' else: return ' ' * len('100%') # The `code()` method takes a function and shows each of its source code lines using the given spectrum, using HTML markup: class DiscreteSpectrumDebugger(DiscreteSpectrumDebugger): def code(self, functions: Optional[Set[Callable]] = None, *, color: bool = False, suspiciousness: bool = False, line_numbers: bool = True) -> str: """ Return a listing of `functions` (default: covered functions). If `color` is True, render as HTML, using suspiciousness colors. If `suspiciousness` is True, include suspiciousness values. If `line_numbers` is True (default), include line numbers. """ if not functions: functions = self.covered_functions() out = "" seen = set() for function in functions: source_lines, starting_line_number = \ inspect.getsourcelines(function) if (function.__name__, starting_line_number) in seen: continue seen.add((function.__name__, starting_line_number)) if out: out += '\n' if color: out += '<p/>' line_number = starting_line_number for line in source_lines: if color: line = html.escape(line) if line.strip() == '': line = '&nbsp;' location = (function.__name__, line_number) location_suspiciousness = self.suspiciousness(location) if location_suspiciousness is not None: tooltip = f"Line {line_number}: {self.tooltip(location)}" else: tooltip = f"Line {line_number}: not executed" if suspiciousness: line = self.percentage(location) + ' ' + line if line_numbers: line = str(line_number).rjust(4) + ' ' + line line_color = self.color(location) if color and line_color: line = f'''<pre style="background-color:{line_color}" title="{tooltip}">{line.rstrip()}</pre>''' elif color: line = f'<pre title="{tooltip}">{line}</pre>' else: line = line.rstrip() out += line + '\n' line_number += 1 return out def _repr_html_(self) -> str: """When output in Jupyter, visualize as HTML""" return self.code(color=True) def __str__(self) -> str: """Show code as string""" return self.code(color=False, suspiciousness=True) def __repr__(self) -> str: """Show code as string""" return self.code(color=False, suspiciousness=True) # This is how the `only_pass_events()` and `only_fail_events()` sets look like when visualized with code. The "culprit" line is well highlighted: debugger = test_debugger_html(DiscreteSpectrumDebugger()) debugger # We can clearly see that the failure is correlated with the presence of quotes in the input string (which is an important hint!). But does this also show us _immediately_ where the defect to be fixed is? quiz("Does the line `quote = not quote` actually contain the defect?", [ "Yes, it should be fixed", "No, the defect is elsewhere" ], '164 * 2 % 326') # Indeed, it is the preceding condition that is wrong. In order to fix a program, we have to find a location that # # 1. _causes_ the failure (i.e., it can be changed to make the failure go away); and # 2. is a _defect_ (i.e., contains an error). # # In our example above, the highlighted code line is a _symptom_ for the error. To some extent, it is also a _cause_, since, say, commenting it out would also resolve the given failure, at the cost of causing other failures. However, the preceding condition also is a cause, as is the presence of quotes in the input. # # Only one of these also is a _defect_, though, and that is the preceding condition. Hence, while correlations can provide important hints, they do not necessarily locate defects. # For those of us who may not have color HTML output ready, simply printing the debugger lists suspiciousness values as percentages. print(debugger) # ### Continuous Spectrum # # The criterion that an event should _only_ occur in failing runs (and not in passing runs) can be too aggressive. In particular, if we have another run that executes the "culprit" lines, but does _not_ fail, our "only in fail" criterion will no longer be helpful. # Here is an example. The input # # ```html # <b color="blue">text</b> # ``` # # will trigger the "culprit" line # # ```python # quote = not quote # ``` # # but actually produce an output where the tags are properly stripped: remove_html_markup('<b color="blue">text</b>') # As a consequence, we no longer have lines that are being executed only in failing runs: debugger = test_debugger_html(DiscreteSpectrumDebugger()) with debugger.collect_pass(): remove_html_markup('<b link="blue"></b>') debugger.only_fail_events() # In our spectrum output, the effect now is that the "culprit" line is as yellow as all others. debugger # We therefore introduce a different method for highlighting lines, based on their _relative_ occurrence with respect to all runs: If a line has been _mostly_ executed in failing runs, its color should shift towards red; if a line has been _mostly_ executed in passing runs, its color should shift towards green. # This _continuous spectrum_ has been introduced by the seminal _Tarantula_ tool \cite{Jones2002}. In Tarantula, the color _hue_ for each line is defined as follows: # $$\textit{color hue}(\textit{line}) = \textit{low color(red)} + \frac{\%\textit{passed}(\textit{line})}{\%\textit{passed}(\textit{line}) + \%\textit{failed}(\textit{line})} \times \textit{color range}$$ # Here, `%passed` and `%failed` denote the percentage at which a line has been executed in passing and failing runs, respectively. A hue of 0.0 stands for red, a hue of 1.0 stands for green, and a hue of 0.5 stands for equal fractions of red and green, yielding yellow. # We can implement these measures right away as methods in a new `ContinuousSpectrumDebugger` class: class ContinuousSpectrumDebugger(DiscreteSpectrumDebugger): """Visualize differences between executions using a color spectrum""" def collectors_with_event(self, event: Any, category: str) -> Set[Collector]: """ Return all collectors in a category that observed the given event. """ all_runs = self.collectors[category] collectors_with_event = set(collector for collector in all_runs if event in collector.events()) return collectors_with_event def collectors_without_event(self, event: Any, category: str) -> Set[Collector]: """ Return all collectors in a category that did not observe the given event. """ all_runs = self.collectors[category] collectors_without_event = set(collector for collector in all_runs if event not in collector.events()) return collectors_without_event def event_fraction(self, event: Any, category: str) -> float: all_collectors = self.collectors[category] collectors_with_event = self.collectors_with_event(event, category) fraction = len(collectors_with_event) / len(all_collectors) # print(f"%{category}({event}) = {fraction}") return fraction def passed_fraction(self, event: Any) -> float: return self.event_fraction(event, self.PASS) def failed_fraction(self, event: Any) -> float: return self.event_fraction(event, self.FAIL) def hue(self, event: Any) -> Optional[float]: """Return a color hue from 0.0 (red) to 1.0 (green).""" passed = self.passed_fraction(event) failed = self.failed_fraction(event) if passed + failed > 0: return passed / (passed + failed) else: return None # Having a continuous hue also implies a continuous suspiciousness and associated tooltips: class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger): def suspiciousness(self, event: Any) -> Optional[float]: hue = self.hue(event) if hue is None: return None return 1 - hue def tooltip(self, event: Any) -> str: return self.percentage(event) # The hue for lines executed only in failing runs is (deep) red, as expected: debugger = test_debugger_html(ContinuousSpectrumDebugger()) for location in debugger.only_fail_events(): print(location, debugger.hue(location)) # Likewise, the hue for lines executed in passing runs is (deep) green: for location in debugger.only_pass_events(): print(location, debugger.hue(location)) # The Tarantula tool not only sets the hue for a line, but also uses _brightness_ as measure for support – that is, how often was the line executed at all. The brighter a line, the stronger the correlation with a passing or failing outcome. # The brightness is defined as follows: # $$\textit{brightness}(line) = \max(\%\textit{passed}(\textit{line}), \%\textit{failed}(\textit{line}))$$ # and it is easily implemented, too: class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger): def brightness(self, event: Any) -> float: return max(self.passed_fraction(event), self.failed_fraction(event)) # Our single "only in fail" line has a brightness of 1.0 (the maximum). debugger = test_debugger_html(ContinuousSpectrumDebugger()) for location in debugger.only_fail_events(): print(location, debugger.brightness(location)) # With this, we can now define a color for each line. To this end, we override the (previously discrete) `color()` method such that it returns a color specification giving hue and brightness. We use the HTML format `hsl(hue, saturation, lightness)` where the hue is given as a value between 0 and 360 (0 is red, 120 is green) and saturation and lightness are provided as percentages. class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger): def color(self, event: Any) -> Optional[str]: hue = self.hue(event) if hue is None: return None saturation = self.brightness(event) # HSL color values are specified with: # hsl(hue, saturation, lightness). return f"hsl({hue * 120}, {saturation * 100}%, 80%)" debugger = test_debugger_html(ContinuousSpectrumDebugger()) # Lines executed only in failing runs are still shown in red: for location in debugger.only_fail_events(): print(location, debugger.color(location)) # ... whereas lines executed only in passing runs are still shown in green: for location in debugger.only_pass_events(): print(location, debugger.color(location)) debugger # What happens with our `quote = not quote` "culprit" line if it is executed in passing runs, too? with debugger.collect_pass(): out = remove_html_markup('<b link="blue"></b>') quiz('In which color will the `quote = not quote` "culprit" line ' 'be shown after executing the above code?', [ '<span style="background-color: hsl(120.0, 50.0%, 80%)">Green</span>', '<span style="background-color: hsl(60.0, 100.0%, 80%)">Yellow</span>', '<span style="background-color: hsl(30.0, 100.0%, 80%)">Orange</span>', '<span style="background-color: hsl(0.0, 100.0%, 80%)">Red</span>' ], '999 // 333') # We see that it still is shown with an orange-red tint. debugger # TODO: Having tag attributes in single quotes will spoil all fault localization # Here's another example, coming right from the Tarantula paper. The `middle()` function takes three numbers `x`, `y`, and `z`, and returns the one that is neither the minimum nor the maximum of the three: def middle(x, y, z): # type: ignore if y < z: if x < y: return y elif x < z: return y else: if x > y: return y elif x > z: return x return z middle(1, 2, 3) # Unfortunately, `middle()` can fail: middle(2, 1, 3) # Let is see whether we can find the bug with a few additional test cases: T3 = TypeVar('T3', bound='DifferenceDebugger') def test_debugger_middle(debugger: T3) -> T3: with debugger.collect_pass(): middle(3, 3, 5) with debugger.collect_pass(): middle(1, 2, 3) with debugger.collect_pass(): middle(3, 2, 1) with debugger.collect_pass(): middle(5, 5, 5) with debugger.collect_pass(): middle(5, 3, 4) with debugger.collect_fail(): middle(2, 1, 3) return debugger # Note that in order to collect data from multiple function invocations, you need to have a separate `with` clause for every invocation. The following will _not_ work correctly: # # ```python # with debugger.collect_pass(): # middle(3, 3, 5) # middle(1, 2, 3) # ... # ``` debugger = test_debugger_middle(ContinuousSpectrumDebugger()) debugger.event_table(args=True) # Here comes the visualization. We see that the `return y` line is the culprit here – and actually also the one to be fixed. debugger quiz("Which of the above lines should be fixed?", [ '<span style="background-color: hsl(45.0, 100%, 80%)">Line 3: `elif x < y`</span>', '<span style="background-color: hsl(34.28571428571429, 100.0%, 80%)">Line 5: `elif x < z`</span>', '<span style="background-color: hsl(20.000000000000004, 100.0%, 80%)">Line 6: `return y`</span>', '<span style="background-color: hsl(120.0, 20.0%, 80%)">Line 9: `return y`</span>', ], r'len(" middle ".strip()[:3])') # Indeed, in the `middle()` example, the "reddest" line is also the one to be fixed. Here is the fixed version: def middle_fixed(x, y, z): # type: ignore if y < z: if x < y: return y elif x < z: return x else: if x > y: return y elif x > z: return x return z middle_fixed(2, 1, 3) # ## Ranking Lines by Suspiciousness # # In a large program, there can be several locations (and events) that could be flagged as suspicious. It suffices that some large code block of say, 1,000 lines, is mostly executed in failing runs, and then all of this code block will be visualized in some shade of red. # # To further highlight the "most suspicious" events, one idea is to use a _ranking_ – that is, coming up with a list of events where those events most correlated with failures would be shown at the top. The programmer would then examine these events one by one and proceed down the list. We will show how this works for two "correlation" metrics – first the _Tarantula_ metric, as introduced above, and then the _Ochiai_ metric, which has shown to be one of the best "ranking" metrics. # We introduce a base class `RankingDebugger` with an abstract method `suspiciousness()` to be overloaded in subclasses. The method `rank()` returns a list of all events observed, sorted by suspiciousness, highest first. class RankingDebugger(DiscreteSpectrumDebugger): """Rank events by their suspiciousness""" def rank(self) -> List[Any]: """Return a list of events, sorted by suspiciousness, highest first.""" def susp(event: Any) -> float: suspiciousness = self.suspiciousness(event) assert suspiciousness is not None return suspiciousness events = list(self.all_events()) events.sort(key=susp, reverse=True) return events def __repr__(self) -> str: return repr(self.rank()) # ### The Tarantula Metric # # We can use the Tarantula metric to sort lines according to their suspiciousness. The "redder" a line (a hue of 0.0), the more suspicious it is. We can simply define # $$ # \textit{suspiciousness}_\textit{tarantula}(\textit{event}) = 1 - \textit{color hue}(\textit{event}) # $$ # where $\textit{color hue}$ is as defined above. # This is exactly the `suspiciousness()` function as already implemented in our `ContinuousSpectrumDebugger`. # We introduce the `TarantulaDebugger` class, inheriting visualization capabilities from the `ContinuousSpectrumDebugger` class as well as the suspiciousness features from the `RankingDebugger` class. class TarantulaDebugger(ContinuousSpectrumDebugger, RankingDebugger): """Spectrum-based Debugger using the Tarantula metric for suspiciousness""" pass # Let us list `remove_html_markup()` with highlighted lines again: tarantula_html = test_debugger_html(TarantulaDebugger()) tarantula_html # Here's our ranking of lines, from most suspicious to least suspicious: tarantula_html.rank() tarantula_html.suspiciousness(tarantula_html.rank()[0]) # We see that the first line in the list is indeed the most suspicious; the two "green" lines come at the very end. # For the `middle()` function, we also obtain a ranking from "reddest" to "greenest". tarantula_middle = test_debugger_middle(TarantulaDebugger()) tarantula_middle tarantula_middle.rank() tarantula_middle.suspiciousness(tarantula_middle.rank()[0]) # ### The Ochiai Metric # # The _Ochiai_ Metric \cite{Ochiai1957} first introduced in the biology domain \cite{daSilvaMeyer2004} and later applied for fault localization by Abreu et al. \cite{Abreu2009}, is defined as follows: # $$ # \textit{suspiciousness}_\textit{ochiai} = \frac # {\textit{failed}(\textit{event})} # {\sqrt{ # \bigl(\textit{failed}(\textit{event}) + \textit{not-in-failed}(\textit{event})\bigr) # \times # \bigl(\textit{failed}(\textit{event}) + \textit{passed}(\textit{event})\bigr) # }} # $$ # where # # * $\textit{failed}(\textit{event})$ is the number of times the event occurred in _failing_ runs # * $\textit{not-in-failed}(\textit{event})$ is the number of times the event did _not_ occur in failing runs # * $\textit{passed}(\textit{event})$ is the number of times the event occurred in _passing_ runs. # # We can easily implement this formula: import math class OchiaiDebugger(ContinuousSpectrumDebugger, RankingDebugger): """Spectrum-based Debugger using the Ochiai metric for suspiciousness""" def suspiciousness(self, event: Any) -> Optional[float]: failed = len(self.collectors_with_event(event, self.FAIL)) not_in_failed = len(self.collectors_without_event(event, self.FAIL)) passed = len(self.collectors_with_event(event, self.PASS)) try: return failed / math.sqrt((failed + not_in_failed) * (failed + passed)) except ZeroDivisionError: return None def hue(self, event: Any) -> Optional[float]: suspiciousness = self.suspiciousness(event) if suspiciousness is None: return None return 1 - suspiciousness # Applied on the `remove_html_markup()` function, the individual suspiciousness scores differ from Tarantula. However, we obtain a very similar visualization, and the same ranking. ochiai_html = test_debugger_html(OchiaiDebugger()) ochiai_html ochiai_html.rank() ochiai_html.suspiciousness(ochiai_html.rank()[0]) # The same observations also apply for the `middle()` function. ochiai_middle = test_debugger_middle(OchiaiDebugger()) ochiai_middle ochiai_middle.rank() ochiai_middle.suspiciousness(ochiai_middle.rank()[0]) # ### How Useful is Ranking? # # So, which metric is better? The standard method to evaluate such rankings is to determine a _ground truth_ – that is, the set of locations that eventually are fixed – and to check at which point in the ranking any such location occurs – the earlier, the better. In our `remove_html_markup()` and `middle()` examples, both the Tarantula and the Ochiai metric perform flawlessly, as the "culprit" line is always ranked at the top. However, this need not always be the case; the exact performance depends on the nature of the code and the observed runs. (Also, the question of whether there always is exactly one possible location where the program can be fixed is open for discussion.) # You will be surprised that over time, _several dozen_ metrics have been proposed \cite{Wong2016}, each performing somewhat better or somewhat worse depending on which benchmark they were applied on. The two metrics discussed above each have their merits – the Tarantula metric was among the first such metrics, and the Ochiai metric is generally shown to be among the most effective ones \cite{Abreu2009}. # While rankings can be easily _evaluated_, it is not necessarily clear whether and how much they serve programmers. As stated above, the assumption of rankings is that developers examine one potentially defective statement after another until they find the actually defective one. However, in a series of human studies with developers, Parnin and Orso \cite{Parnin2011} found that this assumption may not hold: # # > It is unclear whether developers can actually determine the faulty nature of a statement by simply looking at it, without any additional information (e.g., the state of the program when the statement was executed or the statements that were executed before or after that one). # # In their study, they found that rankings could help completing a task faster, but this effect was limited to experienced developers and simpler code. Artificially changing the rank of faulty statements had little to no effect, implying that developers would not strictly follow the ranked list of statements, but rather search through the code to understand it. At this point, a _visualization_ as in the Tarantula tool can be helpful to programmers as it _guides_ the search, but a _ranking_ that _defines_ where to search may be less useful. # Having said that, ranking has its merits – notably as it comes to informing _automated_ debugging techniques. In the [chapter on program repair](Repairer.ipynb), we will see how ranked lists of potentially faulty statements tell automated repair techniques where to try to repair the program first. And once such a repair is successful, we have a very strong indication on where and how the program could be fixed! # ## Using Large Test Suites # In fault localization, the larger and the more thorough the test suite, the higher the precision. Let us try out what happens if we extend the `middle()` test suite with additional test cases. # The function `middle_testcase()` returns a random input for `middle()`: import random def middle_testcase() -> Tuple[int, int, int]: x = random.randrange(10) y = random.randrange(10) z = random.randrange(10) return x, y, z [middle_testcase() for i in range(5)] # The function `middle_test()` simply checks if `middle()` operates correctly – by placing `x`, `y`, and `z` in a list, sorting it, and checking the middle argument. If `middle()` fails, `middle_test()` raises an exception. def middle_test(x: int, y: int, z: int) -> None: m = middle(x, y, z) assert m == sorted([x, y, z])[1] middle_test(4, 5, 6) from ExpectError import ExpectError with ExpectError(): middle_test(2, 1, 3) # The function `middle_passing_testcase()` searches and returns a triple `x`, `y`, `z` that causes `middle_test()` to pass. def middle_passing_testcase() -> Tuple[int, int, int]: while True: try: x, y, z = middle_testcase() middle_test(x, y, z) return x, y, z except AssertionError: pass (x, y, z) = middle_passing_testcase() m = middle(x, y, z) print(f"middle({x}, {y}, {z}) = {m}") # The function `middle_failing_testcase()` does the same; but its triple `x`, `y`, `z` causes `middle_test()` to fail. def middle_failing_testcase() -> Tuple[int, int, int]: while True: try: x, y, z = middle_testcase() middle_test(x, y, z) except AssertionError: return x, y, z (x, y, z) = middle_failing_testcase() m = middle(x, y, z) print(f"middle({x}, {y}, {z}) = {m}") # With these, we can define two sets of test cases, each with 100 inputs. MIDDLE_TESTS = 100 MIDDLE_PASSING_TESTCASES = [middle_passing_testcase() for i in range(MIDDLE_TESTS)] MIDDLE_FAILING_TESTCASES = [middle_failing_testcase() for i in range(MIDDLE_TESTS)] # Let us run the `OchiaiDebugger` with these two test sets # + ochiai_middle = OchiaiDebugger() for x, y, z in MIDDLE_PASSING_TESTCASES: with ochiai_middle.collect_pass(): middle(x, y, z) for x, y, z in MIDDLE_FAILING_TESTCASES: with ochiai_middle.collect_fail(): middle(x, y, z) # - ochiai_middle # We see that the "culprit" line is still the most likely to be fixed, but the two conditions leading to the error (`x < y` and `x < z`) are also listed as potentially faulty. That is because the error might also be fixed be changing these conditions – although this would result in a more complex fix. # ## Other Events besides Coverage # # We close this chapter with two directions for further thought. If you wondered why in the above code, we were mostly talking about `events` rather than lines covered, that is because our framework allows for tracking arbitrary events, not just coverage. In fact, any data item a collector can extract from the execution can be used for correlation analysis. (It may not be so easily visualized, though.) # Here's an example. We define a `ValueCollector` class that collects pairs of (local) variables and their values during execution. Its `events()` method then returns the set of all these pairs. class ValueCollector(Collector): """"A class to collect local variables and their values.""" def __init__(self) -> None: """Constructor.""" super().__init__() self.vars: Set[str] = set() def collect(self, frame: FrameType, event: str, arg: Any) -> None: local_vars = frame.f_locals for var in local_vars: value = local_vars[var] self.vars.add(f"{var} = {repr(value)}") def events(self) -> Set[str]: """A set of (variable, value) pairs observed""" return self.vars # If we apply this collector on our set of HTML test cases, these are all the events that we obtain – essentially all variables and all values ever seen: debugger = test_debugger_html(ContinuousSpectrumDebugger(ValueCollector)) for event in debugger.all_events(): print(event) # However, some of these events only occur in the failing run: for event in debugger.only_fail_events(): print(event) # Some of these differences are spurious – the string `"abc"` (with quotes) only occurs in the failing run – but others, such as `quote` being True and `c` containing a single quote are actually relevant for explaining when the failure comes to be. # We can even visualize the suspiciousness of the individual events, setting the (so far undiscussed) `color` flag for producing an event table: debugger.event_table(color=True, args=True) # There are many ways one can continue from here. # # * Rather than checking for concrete values, one could check for more _abstract properties_, for instance – what is the sign of the value? What is the length of the string? # * One could check for specifics of the _control flow_ – is the loop taken? How many times? # * One could check for specifics of the _information flow_ – which values flow from one variable to another? # # There are lots of properties that all could be related to failures – and if we happen to check for the right one, we may obtain a much crisper definition of what causes the failure. We will come up with more ideas on properties to check as it comes to [mining specifications](SpecificationMining,ipynb). # ## Training Classifiers # # The metrics we have discussed so far are pretty _generic_ – that is, they are fixed no matter how the actual event space is structured. The field of _machine learning_ has come up with techniques that learn _classifiers_ from a given set of data – classifiers that are trained from labeled data and then can predict labels for new data sets. In our case, the labels are test outcomes (PASS and FAIL), whereas the data would be features of the events observed. # A classifier by itself is not immediately useful for debugging (although it could predict whether future inputs will fail or not). Some classifiers, however, have great _diagnostic_ quality; that is, they can _explain_ how their classification comes to be. [Decision trees](https://scikit-learn.org/stable/modules/tree.html) fall into this very category. # A decision tree contains a number of _nodes_, each one associated with a predicate. Depending on whether the predicate is true or false, we follow the given "true" or "false" branch to end up in the next node, which again contains a predicate. Eventually, we end up in the outcome predicted by the tree. The neat thing is that the node predicates actually give important hints on the circumstances that are _most relevant_ for deciding the outcome. # Let us illustrate this with an example. We build a class `ClassifyingDebugger` that trains a decision tree from the events collected. To this end, we need to set up our input data such that it can be fed into a classifier. # We start with identifying our _samples_ (runs) and the respective _labels_ (outcomes). All values have to be encoded into numerical values. class ClassifyingDebugger(DifferenceDebugger): """A debugger implementing a decision tree for events""" PASS_VALUE = +1.0 FAIL_VALUE = -1.0 def samples(self) -> Dict[str, float]: samples = {} for collector in self.pass_collectors(): samples[collector.id()] = self.PASS_VALUE for collector in debugger.fail_collectors(): samples[collector.id()] = self.FAIL_VALUE return samples debugger = test_debugger_html(ClassifyingDebugger()) debugger.samples() # Next, we identify the _features_, which in our case is the set of lines executed in each sample: class ClassifyingDebugger(ClassifyingDebugger): def features(self) -> Dict[str, Any]: features = {} for collector in debugger.pass_collectors(): features[collector.id()] = collector.events() for collector in debugger.fail_collectors(): features[collector.id()] = collector.events() return features debugger = test_debugger_html(ClassifyingDebugger()) debugger.features() # All our features have names, which must be strings. class ClassifyingDebugger(ClassifyingDebugger): def feature_names(self) -> List[str]: return [repr(feature) for feature in self.all_events()] debugger = test_debugger_html(ClassifyingDebugger()) debugger.feature_names() # Next, we define the _shape_ for an individual sample, which is a value of +1 or -1 for each feature seen (i.e., +1 if the line was covered, -1 if not). class ClassifyingDebugger(ClassifyingDebugger): def shape(self, sample: str) -> List[float]: x = [] features = self.features() for f in self.all_events(): if f in features[sample]: x += [+1.0] else: x += [-1.0] return x debugger = test_debugger_html(ClassifyingDebugger()) debugger.shape("remove_html_markup(s='abc')") # Our input X for the classifier now is a list of such shapes, one for each sample. class ClassifyingDebugger(ClassifyingDebugger): def X(self) -> List[List[float]]: X = [] samples = self.samples() for key in samples: X += [self.shape(key)] return X debugger = test_debugger_html(ClassifyingDebugger()) debugger.X() # Our input Y for the classifier, in contrast, is the list of labels, again indexed by sample. class ClassifyingDebugger(ClassifyingDebugger): def Y(self) -> List[float]: Y = [] samples = self.samples() for key in samples: Y += [samples[key]] return Y debugger = test_debugger_html(ClassifyingDebugger()) debugger.Y() # We now have all our data ready to be fit into a tree classifier. The method `classifier()` creates and returns the (tree) classifier for the observed runs. from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz class ClassifyingDebugger(ClassifyingDebugger): def classifier(self) -> DecisionTreeClassifier: classifier = DecisionTreeClassifier() classifier = classifier.fit(self.X(), self.Y()) return classifier # We define a special method to show classifiers: import graphviz class ClassifyingDebugger(ClassifyingDebugger): def show_classifier(self, classifier: DecisionTreeClassifier) -> Any: dot_data = export_graphviz(classifier, out_file=None, filled=False, rounded=True, feature_names=self.feature_names(), class_names=["FAIL", "PASS"], label='none', node_ids=False, impurity=False, proportion=True, special_characters=True) return graphviz.Source(dot_data) # This is the tree we get for our `remove_html_markup()` tests. The top predicate is whether the "culprit" line was executed (-1 means no, +1 means yes). If not (-1), the outcome is PASS. Otherwise, the outcome is TRUE. debugger = test_debugger_html(ClassifyingDebugger()) classifier = debugger.classifier() debugger.show_classifier(classifier) # We can even use our classifier to predict the outcome of additional runs. If, for instance, we execute all lines except for, say, Line 7, 9, and 11, our tree classifier would predict failure – because the "culprit" line 12 is executed. classifier.predict([[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1]]) # Again, there are many ways to continue from here. Which events should we train the classifier from? How do classifiers compare in their performance and diagnostic quality? There are lots of possibilities left to explore, and we only begin to realize the potential for automated debugging. # ## Synopsis # # This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes. # # To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. # # Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. # ### Collecting Events from Calls # # To collect events from calls that are labeled manually, use debugger = TarantulaDebugger() with debugger.collect_pass(): remove_html_markup("abc") with debugger.collect_pass(): remove_html_markup('<b>abc</b>') with debugger.collect_fail(): remove_html_markup('"abc"') # Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) # ### Collecting Events from Tests # # To collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form: debugger = TarantulaDebugger() with debugger: remove_html_markup("abc") with debugger: remove_html_markup('<b>abc</b>') with debugger: remove_html_markup('"abc"') assert False # raise an exception # `with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. # ### Visualizing Events as a Table # # After collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs. debugger.event_table(args=True, color=True) # ### Visualizing Suspicious Code # # If you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines: debugger # ### Ranking Events # # The method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations. debugger.rank() # ### Classes and Methods # # Here are all classes defined in this chapter: # ignore from ClassDiagram import display_class_hierarchy # ignore display_class_hierarchy([TarantulaDebugger, OchiaiDebugger], abstract_classes=[ StatisticalDebugger, DifferenceDebugger, RankingDebugger ], public_methods=[ StatisticalDebugger.__init__, StatisticalDebugger.all_events, StatisticalDebugger.event_table, StatisticalDebugger.function, StatisticalDebugger.coverage, StatisticalDebugger.covered_functions, DifferenceDebugger.__enter__, DifferenceDebugger.__exit__, DifferenceDebugger.all_pass_events, DifferenceDebugger.all_fail_events, DifferenceDebugger.collect_pass, DifferenceDebugger.collect_fail, DifferenceDebugger.only_pass_events, DifferenceDebugger.only_fail_events, DiscreteSpectrumDebugger.code, DiscreteSpectrumDebugger.__repr__, DiscreteSpectrumDebugger._repr_html_, ContinuousSpectrumDebugger.code, ContinuousSpectrumDebugger.__repr__, RankingDebugger.rank ], project='debuggingbook') # ignore display_class_hierarchy([CoverageCollector, ValueCollector], public_methods=[ Tracer.__init__, Tracer.__enter__, Tracer.__exit__, Tracer.changed_vars, # type: ignore Collector.__init__, Collector.__repr__, Collector.function, Collector.args, Collector.argstring, Collector.exception, Collector.id, Collector.collect, CoverageCollector.coverage, CoverageCollector.covered_functions, CoverageCollector.events, ValueCollector.__init__, ValueCollector.events ], project='debuggingbook') # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## Lessons Learned # # * _Correlations_ between execution events and outcomes (pass/fail) can make important hints for debugging # * Events occurring only (or mostly) during failing runs can be _highlighted_ and _ranked_ to guide the search # * Important hints include whether the _execution of specific code locations_ correlates with failure # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Next Steps # # Chapters that build on this one include # # * [how to determine invariants that correlate with failures](DynamicInvariants.ipynb) # * [how to automatically repair programs](Repairer.ipynb) # - # ## Background # # The seminal works on statistical debugging are two papers: # # * "Visualization of Test Information to Assist Fault Localization" \cite{Jones2002} by <NAME>, <NAME>, and <NAME> introducing Tarantula and its visualization. The paper won an ACM SIGSOFT 10-year impact award. # * "Bug Isolation via Remote Program Sampling" by <NAME>, <NAME>, <NAME>, and <NAME>, introducing the term "Statistical debugging". Liblit won the ACM Doctoral Dissertation Award for this work. # # The Ochiai metric for fault localization was introduced by \cite{Abreu2009}. The overview by Wong et al. \cite{Wong2016} gives a comprehensive overview on the field of statistical fault localization. # # The study by Parnin and Orso \cite{Parnin2011} is a must to understand the limitations of the technique. # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## Exercises # - # ### Exercise 1: A Postcondition for Middle # # What would be a postcondition for `middle()`? How can you check it? # **Solution.** A simple postcondition for `middle()` would be # # ```python # assert m == sorted([x, y, z])[1] # ``` # # where `m` is the value returned by `middle()`. `sorted()` sorts the given list, and the index `[1]` returns, well, the middle element. (This might also be a much shorter, but possibly slightly more expensive implementation for `middle()`) # Since `middle()` has several `return` statements, the easiest way to check the result is to create a wrapper around `middle()`: def middle_checked(x, y, z): # type: ignore m = middle(x, y, z) assert m == sorted([x, y, z])[1] return m # `middle_checked()` catches the error: from ExpectError import ExpectError with ExpectError(): m = middle_checked(2, 1, 3) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Exercise 2: Statistical Dependencies # # Using the dependencies from [the chapter on slicing](Slicer.ipynb), can you determine which specific data or control dependencies correlate with failure?
notebooks/StatisticalDebugger.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Classes class Sample: def __init__(new_object that was just created,) class Rectangle: def __init__(self, width, height): self.width = width self.height = height r1 = Rectangle(10, 20)
my_classes/basic/.ipynb_checkpoints/classes-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten,Reshape from keras.layers import Conv1D, MaxPooling1D from keras.utils import np_utils from keras.layers import LSTM, LeakyReLU, CuDNNLSTM, CuDNNGRU from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping import h5py import os import tensorflow as tf from keras.backend.tensorflow_backend import set_session from keras import regularizers import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler # + os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '1' os.environ['TF_CPP_MIN_LOG_LEVEL']='2' config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) # - with h5py.File(''.join(['data/allcoin2015to2017_wf.h5']), 'r') as hf: test_inputs = hf['test_inputs'].value test_outputs = hf['test_outputs'].value test_input_times = hf['test_input_times'].value test_output_times = hf['test_output_times'].value original_test_inputs = hf['original_test_inputs'].value original_test_outputs = hf['original_test_outputs'].value original_datas= hf['original_datas'].value step_size = test_inputs.shape[1] units= 50 second_units = 30 batch_size = 8 nb_features = test_inputs.shape[2] epochs = 50 output_size=1 # + result_file = 'allcoin2015to2017_WF_GRU_tanh_leaky.csv' df = pd.read_csv(result_file) zero_indexes = [i for i,e in enumerate(df.epoch) if e==0] zero_indexes df['partition'] = -1 for z in zero_indexes: df.partition += [0]*(z) + [1] * (len(df)-z ) df.loc[df.val_loss == df.val_loss.min(), :] df.groupby('partition').describe()['val_loss'] # - model = Sequential() model.add(CuDNNGRU(units=units, input_shape=(step_size,nb_features),return_sequences=True)) model.add(Activation('tanh')) model.add(Dropout(0.2)) model.add(MaxPooling1D(pool_size=16)) model.add(Dense(output_size)) model.add(LeakyReLU()) model.load_weights('weights/allcoin2015to2017_WF_GRU_tanh_leakypartition_21.hdf5') model.compile(loss='mse', optimizer='adam') predicted = model.predict(test_inputs) scaler = StandardScaler() scaler.fit(original_datas) predicted_inverted = [] for i in range(len(predicted)): predicted_inverted.append(scaler.inverse_transform(predicted[i,:])) predicted_inverted = np.array(predicted_inverted) ground_true_df = pd.DataFrame() ground_true_df['times'] = pd.to_datetime(test_input_times[:,:,0].reshape(-1),unit='s') ground_true_df['value'] = original_test_inputs[:,:,0].reshape(-1) ground_true_df.set_index('times') ground_true_df = ground_true_df.drop_duplicates(subset=['times','value']) ground_true_df.head() # + plt.figure(figsize=(20,10)) plt.plot(ground_true_df.times,ground_true_df.value, label = 'Actual') for i in range(len(test_output_times)): if i%16 == 0: prediction_df = pd.DataFrame() prediction_df['times']= pd.to_datetime(test_output_times[i,:,:].reshape(-1),unit='s') prediction_df['value']= predicted_inverted[i,:,:].reshape(-1) plt.plot(prediction_df.times,prediction_df.value,'r', label='Predicted') # plt.legend(loc='upper left') plt.savefig('result/allcoin2015to2017_WF_GRU_tanh_leaky_scale_on_whole.png') plt.show() # - pd.to_datetime(test_output_times[i,:,:].reshape(-1),unit='s')
.ipynb_checkpoints/PlotGRUWF-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/baker371/k8-data-visualization/blob/lwasampijja-baker/upwork-devs/lwasampijja-baker/Introduction_to_Google_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="olglENk1mC3e" colab_type="text" # ## **Introduction to Google colaboratory (Colab)** # # + [markdown] id="k1VQnhrtmpG0" colab_type="text" # Google released its internal research tool “Colaboratory” which is a tool for machine learning education and research. It’s a **Jupyter notebook** environment that requires no setup to use. In simpler terms, it’s a jupyter notebook with all the collaboration abilities of Google docs, meaning more than one person can work on the same code at the same time. But, the real attraction is the free computing power that this tool offers. Google colab currently offers the computing services of a Tesla K80 GPU for free. # + [markdown] id="JVYyVswLthyN" colab_type="text" # ### 1. Getting Started # + [markdown] id="UNN9NXtsBBhv" colab_type="text" # ![](http://drive.google.com/uc?export=view&id=1ZTSHY6fMu4HF8opXkTXKlhXHMJmfNFbD) # + [markdown] id="F7pCCu2Qt4bm" colab_type="text" # Go to www.google.com and type in Google colab. Click on Google Colab as shown in the image above. You must have and logged into your Gmail account. # + [markdown] id="Krk5OTOiuMBa" colab_type="text" # ### 2. Openning Google Colab # + [markdown] id="_lj3I9E-DDJj" colab_type="text" # ![](http://drive.google.com/uc?export=view&id=1wlGSQOuNYXExmeHYkR9MjXrE5LXiGFfF) # + [markdown] id="cBxmPcRJu5w8" colab_type="text" # To select Jupyter notebooks already on your github, select Github as indicated in (1) above and copy your Github link as shown in (2). # Select the appropriate repository and branch as you see fit (3). Existing notebooks will be highlighted and you can choose to open any (4) or go ahead to create a new notebook (5). # + [markdown] id="tkXoKqP6wo1Z" colab_type="text" # ### 3. Inside the Notebook # + [markdown] id="i3WsFSGZDZQN" colab_type="text" # ![](http://drive.google.com/uc?export=view&id=1J5m4MkbPH2sm5882Wn2u2cUFLeL614QT) # + [markdown] id="laqhnJq_xI-2" colab_type="text" # Just like any Google Doc, the notebook looks similar with some key features including; # 1. To add a code block. # 2. To add a text block. # 3. Upload files from the local drive or mount your Google drive. # 4. The ability to add comments. # 5. Share the notebook with specific individuals. # 6. etc # + [markdown] id="-YpfbtXJyYd4" colab_type="text" # ### 4. The Code Blocks # + [markdown] id="OSgHbP0xDvAn" colab_type="text" # ![](http://drive.google.com/uc?export=view&id=1DRwICaPfUreMV9fK8vmtCF5sIuQV1Hr9) # + [markdown] id="uAM6rgCO0e-x" colab_type="text" # As you will soon realise, most of the python packages you will need are already installed and up to date. However if you need a package that is not installed, you can always go ahead and install it. # # To run the code, click on the "play" button. Happy coding! # + [markdown] id="W46h06dL1qCc" colab_type="text" # # + [markdown] id="bYbkbqqM1p_2" colab_type="text" # ### 5. The Text Blocks # # These work even better than the normal jupyter notebooks with normal text editor menu and buttons to bold text, create hyper links, add images, e.t.c. # # **Note:** The usual notebook markdown still works here and feel free to use it. # + [markdown] id="IgItzLH_1p8p" colab_type="text" # ### 6. Commiting your work to Github. # + [markdown] id="Q7Z29ELV3-5r" colab_type="text" # 1. Sign into your Github account because you will be asked to authenticate. # 2. Go to File -> Save a copy in Github, the following pop up will show up. # + [markdown] id="uND4IENMD_So" colab_type="text" # ![](http://drive.google.com/uc?export=view&id=1nxtXxaOv8_vEbTna0dzCtbbRb5XxPpmi) # + [markdown] id="lKddyxw04mKb" colab_type="text" # 3. Specifiy your repository, and select the appropriate branch if any. # 4. Specify the path where you would like your notebook to be saved. # 5. Write your commit message and click ok. # + [markdown] id="7g3cJCWN5MPB" colab_type="text" # ### Advanced Use # + [markdown] id="Mbj_YKS_7bXm" colab_type="text" # 1. The notebook can be kept private, shared with rights to view only, edit, amongest multiple individuals or to be viewed by anyone with a link. # 2. Just like anyother Google Doc, the notebook has its own version control. # 3. API can be imported directly or as a file for usage. # + [markdown] id="Rgu3YEcz8obp" colab_type="text" # ## <center> THE END </center>
upwork-devs/lwasampijja-baker/Introduction_to_Google_Colab.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .fs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: .NET (F#) // language: F# // name: .net-fsharp // --- // # Taco Truck Problem // ## Import Flips Library // // Import the Flips library by typing `#r "nuget: Flips, Version=2.0.0"` // // This exercise will rely on `System`, `Flips.Domain`, and `Flips.Solve` // + //import Flips below open System open Flips.Domain open Flips.Solve // - // ## Step 1: Create Decision Variables // // Create decision variables using the `Decision` module, namely the `Decision.createContinuous` function. This function takes a `variable name`, `lower bound`, and `upper bound` as parameters. // // ```fsharp // let numberOfHotdogs = Decision.createContinuous "NumberofHotdogs" 0.0 infinity // ``` // + // Create a decision variable for the number of burgers below // Create a decision variable for the number of tacos below // - // ## Step 2: Create the Objective // // Create an objective expression using your variables using your decision variables and the parameters given. Then create an objective based on that using `Objective.create`. This function takes the `objective name`, `goal` (Maximize or Minimize), and `objective expression` as parameters. // // ```fsharp // let objectiveExpression = 1.50*numberOfHamburgers+1.0*numberOfTacos // let objective = Objective.create "MaximizeRevenue" Maximize objectiveExpression // ``` // // *or* // // ```fsharp // let objective = Objective.create "MaximizeRevenue" Maximize (1.50*numberOfHamburgers+1.0*numberOfTacos) // ``` // + // Create an objective expression here // Create an objective here // - // ## Step 3: Create Constraints // // Create constraints based on those given in the problem using `Constraint.Create`. This function takes the `constraint name` and `constraint expression` as parameters. // // *Note*: Due to .NET standards regarding the use of `>=`, `<=`, and `=`, this library uses `>==`, `<==`, and `==` as the operators for comparison. // // ```fsharp // let maxHotDogs = Constraint.Create "MaxHotDogs" (numberOfHotDogs <== hotdogBuns) // ``` // + // Create a constraint for the max number of Burgers // Create a constraint for the max number of Tacos // Create a constraint for the total weight // - // ## Step 4: Create the Model // // Using the `Model` module, you can create a model using `Model.create` (which takes the `objective` as a parameter). Then add the constraints using `Model.addConstraint`, passing in the `constraint`. This is all done using the pipe (`|>`) operator. // // ```fsharp // let model = // Model.create objective // |> Model.addConstraint maxHotDogs // |> Model.addConstraint maxWeight // ``` // Create the model below // ## Step 5: Create Settings and Solve // // To solve the model, create the settings needed to run the model using `SolverSettings`. For this problem we will use the CBC solver and allow 10 seconds (10,000 ms) for solving. There is an option to write to an LP file, but for now, specify it as `None`. // // ```fsharp // let settings = { // SolverType = SolverType.CBC // MaxDuration = 10_000L // WriteLPFile = None // } // ``` // // From there, run the `solve` function using the model and the solver settings. // // ```fsharp // let result = solve settings model // ``` // + // Create solver settings below // Store result of solved model below // - // ## Step 6: Print Results // // The value of `result` will either be `Optimal` if it found the optimal solution or `Suboptimal` if it ran out of time. // // ```fsharp // match result with // | Suboptimal msg -> printfn "Unable to solve. Error: %s" msg // | Optimal solution -> // printfn "Objective Value: %f" solution.ObjectiveResult // // for (decision, value) in solution.DecisionResults |> Map.toSeq do // let (DecisionName name) = decision.Name // printfn "Decision: %s\tValue: %f" name value // ``` // + printfn "--Result--" // Print the results of the solver below
code/fsharp-workshop/Solutions/.ipynb_checkpoints/Ex01_TacoTruck-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Reading image files SEED=42 # + import glob import os import pathlib import matplotlib.pyplot as plt import numpy as np import pandas as pd from PIL import Image # + TRAINING_CSV_PATH = '../data/raw/Final_Training/Images/*/*.csv' IMAGE_WIDTH = 30 IMAGE_HEIGHT = IMAGE_WIDTH # - os.path.dirname(TRAINING_CSV_PATH) # + def build_images_database(path): """Build a pandas Dataframe with all images information. Parameters ---------- path: path pattern to read csv files containing images information Return ------ A pandas DataFrame with one line per image """ df_list = [] for filename in glob.glob(path): df = pd.read_csv(filename, sep=';') df['path'] = os.path.dirname(filename) + '/' + df['Filename'] df_list.append(df) return pd.concat(df_list, ignore_index=True) def build_images_list(filename_list): """Build a list of images as np.array Parameters ------- filename_list : list of images filenames Returns ------ An list of np.array images """ images_list = [] for filename in filename_list: image = Image.open(filename) image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT)) image = image.getdata(band=0) image = np.array(image) images_list.append(image) return images_list def build_classes_array(df, col_name="ClassId"): """" Build a vector numpy array of classes Parameters --------- df : pd DataFrame with a column containing classes id Returns ---------- """ return df[col_name].values # - images_df = build_images_database(TRAINING_CSV_PATH) images_df.head() images_df["Width"].value_counts() plt.imshow(Image.open(images_df.query('Width == 162')['path'].values[0]).resize((30, 30))) images_list = build_images_list(images_df['path']) images_list[1].shape # ## Training the model # + from sklearn import svm, metrics, datasets from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.metrics import accuracy_score from sklearn.metrics import roc_curve, auc # - # ### Take a sample for the code images_df.shape # + N = None if N is None: images_list = build_images_list(images_df['path'].values) data = np.array(images_list) labels = build_classes_array(images_df) else : images_list = build_images_list(images_df.head(N)['path'].values) data = np.array(images_list) labels = build_classes_array(images_df)[:N] print(data.shape, labels.shape) # + #Splitting training and testing dataset X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=SEED) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) # + #after several tests in Google collab : optimal parameters : param_grid = [ {'C': [10], 'kernel': ['rbf']}, ] svc = svm.SVC(random_state=SEED) clf = GridSearchCV(svc, param_grid) model = clf.fit(X_train, y_train) # - # ### Prediction & Score model # + # prédire sur le jeu de test y_pred = clf.predict(X_test) # calculate accuracy accuracy = accuracy_score(y_test, y_pred) print('Model accuracy is: ', accuracy) # - accuracy_df = pd.DataFrame() accuracy_df['actual'] = y_test accuracy_df['predicted']= y_pred # + import seaborn as sns sns.heatmap(pd.crosstab(accuracy_df['actual'], accuracy_df['predicted'])) # - # ## Save the classifier # + import pickle from joblib import dump, load # - s = pickle.dumps(clf) dump(clf, '../models/SVM-traffic.joblib') # ## Testing model def classify_image(path, model): """Classify image by model Parameters ---------- path: filepath to image model: tf/keras classifier Returns ------- class id returned by model classifier """ images_list = [] image = Image.open(path) image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT)) image = image.getdata(band=0) image = np.array(image) images_list.append(image) return clf.predict(np.array(images_list)) # Tirage aléatoire d'une image image_sample = images_df.sample(1) classify_image(image_sample['path'].values[0], clf.predict) # Affichage de l'image plt.imshow(Image.open(image_sample['path'].values[0])) # ## Reloading model test_model = load('../models/SVM-traffic.joblib') # Tirage aléatoire d'une image image_sample = images_df.sample(1) classify_image(image_sample['path'].values[0], test_model) # ## Evaluating performance # ### Load test data test_images_df = pd.read_csv('../data/raw/GT-final_test.csv', sep=';') test_images_df['path'] = test_images_df['Filename'].apply(lambda x:'../data/raw/Final_Test/Images/' + x) test_images_df.tail() # + test_list = build_images_list(test_images_df['path'].values) test_data = np.array(test_list) test_labels = build_classes_array(test_images_df) # - print(test_data.shape, test_labels.shape) # prédire sur le jeu de données Test : y_pred_test = test_model.predict(test_data) test_labels[:3] test_labels == y_pred_test np.transpose(y_pred_test) # ### Compare actual to predicted accuracy_df = pd.DataFrame() accuracy_df['actual'] = test_labels accuracy_df['predicted']= y_pred_test accuracy_df.head() # + import seaborn as sns sns.heatmap(pd.crosstab(accuracy_df['actual'], accuracy_df['predicted'])) # - pd.crosstab(accuracy_df['actual'], accuracy_df['predicted']) # + import seaborn as sns fig,ax = plt.subplots(figsize=(12,10)) sns.heatmap(pd.crosstab(accuracy_df['actual'], accuracy_df['predicted'],normalize="index"), cmap="RdGy", ax = ax); # - # ### Report print("Classification report for - \n{}:\n{}\n".format( clf, metrics.classification_report(test_labels, y_pred_test))) # On constate que l'accuracy de ce modèle (SVM) est plus faible que le neural network. # - L'accuracy de la base train, du validation set du neural network est égal à 0,98 contre 0,94 pour le SVM modèle # - De plus, on trouve une accuracy de 0,73 pour la base test pour le SVM, tandis que pour le neural network on trouve une accuracy de 0,96 pour le neural network. # # Concernant les erreurs de classification, on remarque que : # ##### Le neural network classe mal : # - la classe 3 (24 mauvaises classification) : qui correspond aux panneaux 'Speed limit (60km/h)', le neural network les confond avec les panneaux 'Speed limit (80km/h)' # - la classe 12 (40 mauvaises classification), qui sont les panneaux 'Priority road', confondus avec 'No vehicles' # - la classe 30 (30 mauvaises classification), qui sont les panneaux 'Beware of ice/snow', confondus avec 'Slippery road' # # ##### Le SVM classe mal (moins de la moitié) : # - la classe 23,'Slippery road' confondu avec la 9, la 10 ("No passing', "No passing veh over 3.5 tons') # - la classe 19,'Dangerous curve left', majoritairement confondu avec la classe 4 ('Speed limit (70km/h)') # - la classe 32,''End speed + passing limits', majoritairement confondu avec la classe 6 ('End of speed limit (80km/h)') # - la classe 28 # - la classe 27, etc.. # # #### On voit très bien, que le neural network est pour cette classification, beaucoup plus performant que le modèle SVM.
notebook/SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Python doesn't need "getters" or "setters" # ...because all attributes and methods are public, and you're expected to behave yourself. The pythonic version... if you want to prevent direct access to attributes is to use properties. # + # hidden_name is meant to be "private" # don't want people to access this directly, so we'll use a getter and setter. class Duck(): def __init__(self, input_name): self.hidden_name = input_name # get_name is the "getter" def get_name(self): print("inside the getter") return self.hidden_name # set_name is the "setter" def set_name(self, input_name): print("inside the setter") self.hidden_name = input_name # property is pythonic! # property defines two METHODS as properties of the attribute called "name" name = property(get_name, set_name) # + # create an object using class Duck fowl = Duck('Howard') # when you refer to the "name" of any Duck object.. # it calls the get_name() method fowl.name # - # direct call as well fowl.get_name() # when you ASSIGN a value to the name attribute.. # the set_name() method will be called... fowl.name = "Daffy" fowl.name # ## another way to define properties is with *decorators* # In this example... # - @proeprty goes before the getter method # - @name.setter goes before the setter method # # The point of this is to show that you can have no visible get_name() or set_name() methods! # # **Decorators** are functions that take another function and extends the behavior of the latter function without explicitly modifying it. See link: https://realpython.com/primer-on-python-decorators/ class Duck(): def __init__(self, input_name): self.hidden_name = input_name # note there's no get_name() method @property def name(self): print("inside the getter") return self.hidden_name # why is name.setter not highlighted correctly? # there is no longer a set_name() method either! @name.setter def name(self, input_name): print("inside the setter") self.hidden_name = input_name # now we don't need a ... # name = property(get_name, set_name) fowl = Duck("Howard") fowl.name fowl.name = "Donald"
Luba Python/pythonic_duck_classes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This file shows some of the plots of the row-by-row time-shifted Pioneer data in more detail, including plots with expanded x-axes as well as some snapshots at different time periods. It also compares the row-by-row time-shift to the monthly time-shift. # This is a plot of the overall comparison between OMNI and Pioneer solar wind speed. # + import numpy as np import pandas as pd import datetime import matplotlib.pyplot as plt fig = plt.figure() fig.suptitle("Data From Pioneer 05/01/1972 to 05/01/1973") mF0 = pd.read_csv('Pioneer1Year.csv') mF0['Time'] = pd.to_datetime(mF0['Time']) fig1 = plt.figure(figsize=(16, 8)) fig1.suptitle("Pioneer speed compared with OMNI speed") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_SPEED_kms', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='PIONEER_SPEED_kms', ax=ax) plt.ylabel("Km/s") # - # This is a plot of the solar wind speed in a place where the time-shift is working well. fig1 = plt.figure(figsize=(16, 8)) fig1.suptitle("Pioneer speed compared with OMNI speed 1972-05-16 to 1972-05-18") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_SPEED_kms', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='PIONEER_SPEED_kms', ax=ax) plt.ylabel("Km/s") plt.xlim([datetime.date(1972, 5, 16), datetime.date(1972, 5, 18)]) # This is a snapshot of the solar wind speed in a place where the timshift is not working well. fig1 = plt.figure(figsize=(16, 8)) fig1.suptitle("Pioneer speed compared with OMNI speed 1972-12-01 to 1973-01-01") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_SPEED_kms', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='PIONEER_SPEED_kms', ax=ax) plt.ylabel("Km/s") plt.xlim([datetime.date(1972, 12, 1), datetime.date(1973, 1, 1)]) # This is a plot of the overall scaled density between Pioneer and OMNI. fig2 = plt.figure(figsize=(16, 8)) fig2.suptitle("Scaled Pioneer density compared with OMNI density") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_DENSITY_Ncm3', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='SCALED_PIONEER_DENSITY', ax=ax) plt.ylabel("N/cc") # This is a snapshot of the density plot where the timeshift is working well. fig2 = plt.figure(figsize=(16, 8)) fig2.suptitle("Scaled Pioneer density compared with OMNI density 1972-05-16 to 1972-05-18") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_DENSITY_Ncm3', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='SCALED_PIONEER_DENSITY', ax=ax) plt.ylabel("N/cc") plt.xlim([datetime.date(1972, 5, 16), datetime.date(1972, 5, 18)]) # This is a snapshot of the density plot where the timeshift is not working well. fig2 = plt.figure(figsize=(16, 8)) fig2.suptitle("Scaled Pioneer density compared with OMNI density 1972-12-01 to 1973-01-01") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_DENSITY_Ncm3', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='SCALED_PIONEER_DENSITY', ax=ax) plt.ylabel("N/cc") plt.xlim([datetime.date(1972, 12, 1), datetime.date(1973, 1, 1)]) # This is a plot of the overall scaled magnetic field average. fig3 = plt.figure(figsize=(16, 8)) fig3.suptitle("Scaled Pioneer magnetic field average compared with OMNI magnetic field average") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_MAG_AVG_nT', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='SCALED_PIONEER_MAG_AVG', ax=ax) plt.ylabel("nT") # This is an shapshot of the scaled magnetic field average where the timeshift is working well. fig3 = plt.figure(figsize=(16, 8)) fig3.suptitle("Scaled Pioneer magnetic field average compared with OMNI magnetic field average 1972-05-16 to 1972-05-18") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_MAG_AVG_nT', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='SCALED_PIONEER_MAG_AVG', ax=ax) plt.ylabel("nT") plt.xlim([datetime.date(1972, 5, 16), datetime.date(1972, 5, 18)]) # This is an shapshot of the scaled magnetic field average where the timeshift is not working well. fig3 = plt.figure(figsize=(16, 8)) fig3.suptitle("Scaled Pioneer magnetic field average compared with OMNI magnetic field average 1972-12-01 to 1973-01-01") ax = plt.gca() mF0.plot(kind='line', x='Time', y='OMNI_MAG_AVG_nT', color='red', ax=ax) mF0.plot(kind='line', x='Time', y='SCALED_PIONEER_MAG_AVG', ax=ax) plt.ylabel("nT") plt.xlim([datetime.date(1972, 12, 1), datetime.date(1973, 1, 1)]) # I think that the reason why the row-by-row timeshift appears to be worse than the month-by-month is that the month-by-month used the calculation of theoretical arrival time based on distance and speed as a starting point. Each month was adjusted further so that the maxima and minimums were closer between the data sets, additionally the exact time-shift was determined by the comparisons all three solar wind variables between OMNI and Pioneer rather than upon only one. Looking at the density graph for when the time-shift was not working (1792-12-01 to 1973-01-01) it appears that the time-shift is being overestimated by the speed alone. # Here is a plot of the number of hours that the data is being time-shifted, looking at these values I think that it's possible that some of the inaccuracy is due to the fact that Pioneer could be hit by slow wind that would be at a larger time difference and fast wind at a smaller time difference, and the row-by-row time-shift does not account for this. fig3 = plt.figure(figsize=(16, 8)) fig3.suptitle("Pioneer time-shift values over time") ax = plt.gca() mF0.plot(kind='line', x='Time', y='Time_offset_hours', color='red', ax=ax) plt.ylabel("Hours") plt.xlim([datetime.date(1972, 5, 1), datetime.date(1973, 1, 1)]) fig3 = plt.figure(figsize=(16, 8)) fig3.suptitle("Pioneer time-shift values by Distance") ax = plt.gca() mF0.plot(kind='line', x='PIONEER_DIST_Au', y='Time_offset_hours', color='red', ax=ax) plt.ylabel("Hours") plt.xlim([1.35, 3.35]) # As you can see, the time-shift follows a generally linear relationship with time and distance, however, beyond 3 AU there seems to be a lot of variation between distance values that are close to eachother, which might account for the difference in accuracy. # Here are the time-shift values for the monthly time-shifted Pioneer data. # + mF1 = pd.read_csv('Pioneer2.csv') mF1['EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(mF1['EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ']) mF1['EPOCH_yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(mF1['EPOCH_yyyy-mm-ddThh:mm:ss.sssZ']) mF1.rename(columns = {'EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ':'OMNI Time', 'EPOCH_yyyy-mm-ddThh:mm:ss.sssZ':'Time'}, inplace = True) fig1 = plt.figure(figsize=(16, 8)) fig3.suptitle("Pioneer time-shift values over time") ax = plt.gca() mF1.plot(kind='line', x='Time', y='Time_offset_hours', color='red', ax=ax) plt.ylabel("Hours") #plt.xlim([datetime.date(1972, 5, 1), datetime.date(1973, 3, 1)]) # - fig3 = plt.figure(figsize=(16, 8)) fig3.suptitle("Pioneer time-shift values by Distance") ax = plt.gca() mF1.plot(kind='line', x='PIONEER_DIST_Au', y='Time_offset_hours', color='red', ax=ax) plt.ylabel("Hours") #plt.xlim([datetime.date(1972, 12, 1), datetime.date(1973, 1, 1)]) # These time-shift values are faily similar for values below 1 AU, but they diverge as distance increases. Looking at the average values for the time-shift this observation is also true. # In order to calculate which technique should be used in the future, we should consider 1) which is backed-up by physical interactions and 2) which produces a better result. Obviously, using the equation for a row-by-row timeshift produces a more re-producable and unbiased than by hand-tuning a monthly-timeshift. If I had to hypothesize why there is such a large difference between the two methods, it would be due the cyclical nature the solar wind fluxuations, i.e. that the larger signatures that occur once every week or so were offset at some point during the time-shifting process, causing the data to be off by a number of days. It could have also been caused by using the solar wind speed at OMNI instead of Pioneer to estimate the time-shift, if somehow the speed that reached Pioneer would have been at a higher speed that the solar wind speed measured at OMNI. Technically this could be possible if there was enough of a difference between the Longitudes or Latitudes of OMNI and Pioneer. mF0.describe() mF1['LAT_DIFF_deg'] = mF1.apply(lambda row: abs(row.OMNI_LAT_deg - row.PIONEER_LAT_deg), axis = 1) mF1['LONG_DIFF_deg'] = mF1.apply(lambda row: abs(row.OMNI_LONG_deg - row.PIONEER_LONG_deg), axis = 1) mF1['SCALED_PIONEER_DENSITY'] = mF1.apply(lambda row: row.PIONEER_DENSITY_Ncm3 * row.PIONEER_DIST_Au * row.PIONEER_DIST_Au, axis = 1) mF1['SCALED_PIONEER_MAG_AVG'] = mF1.apply(lambda row: row.PIONEER_MAG_AVG_nT * row.PIONEER_DIST_Au, axis = 1) mF1['Speed_DIFF_Kms'] = mF1.apply(lambda row: abs(row.OMNI_SPEED_kms - row.PIONEER_SPEED_kms), axis = 1) mF1['Density_DIFF_Ncc'] = mF1.apply(lambda row: abs(row.OMNI_DENSITY_Ncm3 - row.SCALED_PIONEER_DENSITY), axis = 1) mF1['Mag_DIFF_nT'] = mF1.apply(lambda row: abs(row.OMNI_MAG_AVG_nT - row.SCALED_PIONEER_MAG_AVG), axis = 1) mF1.describe() # In answering the second question, we see that the difference values for the three main solar wind variables for the row-by-row timeshift are slightly worse than the ones for the ones for the monthly-average timeshift. This could be caused by a number of things. First, the row-by-row dataset has data extending further into the future, so its possible that something else could be effecting its accuracy. Additionally, shifting the values to minimize the differences between all three solar wind variables, which was done for the monthly time-shift, may have resulted in better performance for the density and magnetic field average, as these may not propagate directly from the solar wind speed. # In conclusion, I think that the row-by-row timeshift may be a better choice for future research into this issue, as it is easier to gather more data and more replecatable for future data-gathering efforts. However, I would also keep in mind that basing the entire timeshift off of the solar wind speed may not fully capture how the other solar wind variables propagate over time (looking at the second plot for the scaled density, this certainly seems to be the case). Additionally, the arrival times for the row-by-row shifted data don't seem to be as accurate as a result of the "hands-off" approach. If I were to move forward with this work, I would try and filter out values with too large of a longitude or latitude difference, and contintue to try and refine the timeshift, perhaps even creating a ML model to calculate the timeshift from well-performing data.
Pioneer Time-Shift Comparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### imports # + import numpy as np import hypertools as hyp import matplotlib.pyplot as plt import seaborn as sns import re import joblib import cortex import tables import pickle from sklearn.feature_extraction.text import CountVectorizer from sklearn.decomposition import LatentDirichletAllocation from scipy.spatial.distance import cdist from stimulus_utils import load_grids_for_stories, load_generic_trfiles from dsutils import make_word_ds, make_semantic_model from npp import zscore from util import make_delayed from ridge import bootstrap_ridge from nltk.corpus import stopwords import logging logging.basicConfig(level=logging.DEBUG) % matplotlib inline # - sns.set_context('talk') from SemanticModel import SemanticModel eng1000 = SemanticModel.load("../data/english1000sm.hf5") # ### load data # + # stories for fitting regression mdoels Rstories = ['alternateithicatom', 'avatar', 'howtodraw', 'legacy', 'life', 'myfirstdaywiththeyankees', 'naked', 'odetostepfather', 'souls', 'undertheinfluence'] # story for testing regression models/predicting activity Pstories = ['wheretheressmoke'] allstories = Rstories + Pstories # + # Load TextGrids grids = load_grids_for_stories(allstories) # Load TRfiles trfiles = load_generic_trfiles(allstories) # Make word datasequences wordseqs = make_word_ds(grids, trfiles) # dictionary of {storyname : word DataSequence} # + # load wikipedia articles wiki_corpus = hyp.load('wiki').data[0] # fix encoding, remove newlines wiki_corpus = np.array([doc[0].decode('utf8').replace('\n', ' ') for doc in wiki_corpus]) # remove odd characters for i, doc in enumerate(wiki_corpus): wiki_corpus[i] = re.sub("[^\w\s'-]+", '', doc) # - # combine wikipedia pages and stories into full corpus training_corpus = np.append(wiki_corpus, np.array([' '.join(wordseqs[story].data) for story in allstories]), 0) # fMRI data resptf = tables.open_file("../data/fmri-responses.hf5") zRresp = resptf.root.zRresp.read() zPresp = resptf.root.zPresp.read() mask = resptf.root.mask.read() # + # regression model params alphas = np.logspace(1, 3, 10) nboots = 1 chunklen = 40 nchunks = 20 # param for concatenating stories trim = 5 # - # # functions def lanczosinterp2D(data, oldtime, newtime, window=3, cutoff_mult=1.0): """Interpolates the columns of [data], assuming that the i'th row of data corresponds to oldtime(i). A new matrix with the same number of columns and a number of rows given by the length of [newtime] is returned. The time points in [newtime] are assumed to be evenly spaced, and their frequency will be used to calculate the low-pass cutoff of the interpolation filter. [window] lobes of the sinc function will be used. [window] should be an integer. """ # Find the cutoff frequency cutoff = 1/np.mean(np.diff(newtime)) * cutoff_mult print ("Doing lanczos interpolation with cutoff=%0.3f and %d lobes." % (cutoff, window)) # Build up sinc matrix lancmat = np.zeros((len(newtime), len(oldtime))) for ndi in range(len(newtime)): lancmat[ndi,:] = lanczosfun(cutoff, newtime[ndi]-oldtime, window) # Construct new signal by multiplying the sinc matrix by the data newdata = np.dot(lancmat, data) return newdata def lanczosfun(cutoff, t, window=3): """Compute the lanczos function with some cutoff frequency [B] at some time [t]. If given a [window], only the lowest-order [window] lobes of the sinc function will be non-zero. """ t = t * cutoff val = window * np.sin(np.pi*t) * np.sin(np.pi*t/window) / (np.pi**2 * t**2) val[t==0] = 1.0 val[np.abs(t)>window] = 0.0 return val # ### grid search function def search_params(topic_opts, window_opts, training_corpus): output_dict = dict() for n_topics in topic_opts: for wsize in window_opts: print(f'topics: {n_topics}\twindow size: {wsize}') # get windows windows = [] for doc in training_corpus: text = doc.split() for i in range(0, len(text), wsize): windows.append(' '.join(text[i:i+wsize])) # fit model print('fitting model') cv = CountVectorizer(stop_words='english') fit_cv = cv.fit_transform(windows) LDA_wiki = LatentDirichletAllocation(n_components=n_topics, learning_method='batch', random_state=0).fit(fit_cv) # transform words print('projecting stimulus words') ldaseqs = dict() for story in allstories: vecs = np.empty((len(wordseqs[story].data), n_topics)) for ix, word in enumerate(wordseqs[story].data): vecs[ix] = LDA_wiki.transform(cv.transform([word])) ldaseqs[story] = vecs # downsample to TR scale print('downsampling topic vectors') huthseqs = dict() for story in allstories: huthseqs[story] = make_semantic_model(wordseqs[story], eng1000) downsampled_ldaseqs = dict() for story in allstories: downsampled_ldaseqs[story] = lanczosinterp2D(ldaseqs[story], huthseqs[story].data_times, huthseqs[story].tr_times, window=3) # concatenate across stories LDARstim = np.vstack([zscore(downsampled_ldaseqs[story][5+trim:-trim]) for story in Rstories]) LDAPstim = np.vstack([zscore(downsampled_ldaseqs[story][5+trim:-trim]) for story in Pstories]) # concatenate delays for FIR model delLDARstim = make_delayed(LDARstim, range(1, 5)) delLDAPstim = make_delayed(LDAPstim, range(1, 5)) # run regression analysis print('computing alpha') Lwt, Lcorr, Lalphas, Lbscorrs, Lvalinds = bootstrap_ridge(delLDARstim, zRresp, delLDAPstim, zPresp, alphas, nboots, chunklen, nchunks, singcutoff=1e-10, single_alpha=True) # get weights LDApred = np.dot(delLDAPstim, Lwt) # get voxelwise correlations print('computing response correlations') LDA_voxcorrs = np.zeros((zPresp.shape[1],)) for vi in range(zPresp.shape[1]): LDA_voxcorrs[vi] = np.corrcoef(zPresp[:,vi], LDApred[:,vi])[0,1] print(f'median correlation: {str(np.median(LDA_voxcorrs))[:5]}') output_dict[f't{n_topics}_w{wsize}'] = np.median(LDA_voxcorrs) return output_dict # define range to search over topic_opts = [5, 10, 15, 25, 50, 100, 150, 200] window_opts = [50, 100, 200, 500] # run function (takes about 9 hrs to run) output_dict = search_params(topic_opts, window_opts, training_corpus) # + # # save out results # with open('param_search.p', 'wb') as f: # pickle.dump(output_dict, f) # - # load in results with open('param_search.p', 'rb') as f: output_dict = pickle.load(f) output_dict # format results for plotting t5 = np.mean([v for k, v in output_dict.items() if 't5' in k]) t10 = np.mean([v for k, v in output_dict.items() if 't10' in k]) t15 = np.mean([v for k, v in output_dict.items() if 't15' in k]) t25 = np.mean([v for k, v in output_dict.items() if 't25' in k]) t50 = np.mean([v for k, v in output_dict.items() if 't50' in k]) t100 = np.mean([v for k, v in output_dict.items() if 't100' in k]) t150 = np.mean([v for k, v in output_dict.items() if 't150' in k]) t200 = np.mean([v for k, v in output_dict.items() if 't200' in k]) w50 = np.mean([v for k, v in output_dict.items() if 'w50' in k]) w100 = np.mean([v for k, v in output_dict.items() if 'w100' in k]) w200 = np.mean([v for k, v in output_dict.items() if 'w200' in k]) w500 = np.mean([v for k, v in output_dict.items() if 'w500' in k]) # median correlation of Huth model predictions (copied from other notebook) Huth_pred_acc = 0.06684583347295449 plt.plot([5, 10, 15, 25, 50, 100, 150, 200], [t5, t10, t15, t25, t50, t100, t150, t200]) # un-comment this line to compare to Huth model accuracy # plt.axhline(Huth_pred_acc, color='r') plt.xlabel('Number of topics') plt.ylabel('Mean voxel response\nprediction accuracy') plt.title('Effect of changing number of topics') plt.tight_layout() plt.plot([50, 100, 200, 500], [w50, w100, w200, w500]) # un-comment this line to compare to Huth model accuracy # plt.axhline(Huth_pred_acc, color='r') plt.xlabel('Window size') plt.ylabel('Mean voxel response\nprediction accuracy') plt.title('Effect of changing window size') plt.tight_layout() # + params = list(output_dict.keys()) corrs = list(output_dict.values()) # best combination of parameters best_param = params[np.argmax(corrs)] print(f'best combination of n_topics and window size:\n\t{best_param}') print(f'highest correlation between real and predicted voxel response:\n\t{output_dict[best_param]}') # - print(f'number of words in Huth corpus vocabulary :\n\t{len(eng1000.vocab)}') print(f'number of words in Wikipedia model corpus vocabulary :\n\t{len(set(" ".join(training_corpus).split(" ")))}')
data-stories/semantic-mapping/notebooks/search_params.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SimPy: Treatment Centre # # `simpy` uses process based model worldview. Given its simplicity it is a highly flexible discrete-event simulation package. # # One of the benefits of a package like `simpy` is that it is written in standard python and is free and open for others to use. # * For research this is highly beneficial: # * models and methods tested against them can be shared without concerns for commerical licensing. # * experimental results (either from model or method) can be recreated by other research teams. # * The version of `simpy` in use can also be controlled. This avoids backwards compatibility problems if models are returned to after several years. # # Here we will take a look at code that implements a full `simpy` model including, time dependent arrivals, results collection, control of random numbers and multiple replications. # # > The full scope of what is possible in `simpy` it out of scope. Detailed documentation for `simpy` and additional models can be found here: https://simpy.readthedocs.io/en/latest/ # # --- # ## Imports # # It is recommended that you use the provided conda virtual environment `os-sim`. # # >If you are running this code in **Google Colab** then `simpy` can be pip installed. # + # if running in Google Colab. # #!pip install simpy==4.0.1 # - import simpy simpy.__version__ import numpy as np import pandas as pd import itertools import math import matplotlib.pyplot as plt # need numpy > v1.18 np.__version__ # --- # # ## FirstTreatment: A health clinic based in the US. # # **This example is based on exercise 13 from Nelson (2013) page 170.** # # > *<NAME>. (2013). [Foundations and methods of stochastic simulation](https://www.amazon.co.uk/Foundations-Methods-Stochastic-Simulation-International/dp/1461461596/ref=sr_1_1?dchild=1&keywords=foundations+and+methods+of+stochastic+simulation&qid=1617050801&sr=8-1). Springer.* # # Patients arrive to the health clinic between 6am and 12am following a non-stationary poisson process. After 12am arriving patients are diverted elsewhere and remaining WIP is completed. On arrival, all patients quickly sign-in and are **triaged**. # # The health clinic expects two types of patient arrivals: # # **Trauma arrivals:** # * patients with severe illness and trauma that must first be stablised in a **trauma room**. # * these patients then undergo **treatment** in a cubicle before being discharged. # # **Non-trauma arrivals** # * patients with minor illness and no trauma go through **registration** and **examination** activities # * a proportion of non-trauma patients require **treatment** in a cubicle before being dicharged. # # > In this model treatment of trauma and non-trauma patients is modelled seperately. # ## Constants and defaults for modelling **as-is** # ### Distribution parameters # + # sign-in/triage parameters DEFAULT_TRIAGE_MEAN = 3.0 # registration parameters DEFAULT_REG_MEAN = 5.0 DEFAULT_REG_VAR= 2.0 # examination parameters DEFAULT_EXAM_MEAN = 16.0 DEFAULT_EXAM_VAR = 3.0 # trauma/stabilisation DEFAULT_TRAUMA_MEAN = 90.0 # Trauma treatment DEFAULT_TRAUMA_TREAT_MEAN = 30.0 DEFAULT_TRAUMA_TREAT_VAR = 4.0 # Non trauma treatment DEFAULT_NON_TRAUMA_TREAT_MEAN = 13.3 DEFAULT_NON_TRAUMA_TREAT_VAR = 2.0 # prob patient requires treatment given trauma DEFAULT_NON_TRAUMA_TREAT_P = 0.60 # proportion of patients triaged as trauma DEFAULT_PROB_TRAUMA = 0.12 # - # ### Time dependent arrival rates data # # The data for arrival rates varies between clinic opening at 6am and closure at 12am. # + NSPP_PATH = 'https://raw.githubusercontent.com/TomMonks/' \ + 'open-science-for-sim/main/src/notebooks/01_foss_sim/data/ed_arrivals.csv' # visualise ax = pd.read_csv(NSPP_PATH).plot(y='arrival_rate', x='period', rot=45, kind='bar',figsize=(12,5), legend=False) ax.set_xlabel('hour of day') ax.set_ylabel('mean arrivals'); # - # ### Resource counts # # > Inter count variables representing the number of resources at each activity in the processes. # + DEFAULT_N_TRIAGE = 1 DEFAULT_N_REG = 1 DEFAULT_N_EXAM = 3 DEFAULT_N_TRAUMA = 2 # Non-trauma cubicles DEFAULT_N_CUBICLES_1 = 1 # trauma pathway cubicles DEFAULT_N_CUBICLES_2 = 1 # - # ### Simulation model run settings # + # default random number SET DEFAULT_RNG_SET = None N_STREAMS = 20 # default results collection period DEFAULT_RESULTS_COLLECTION_PERIOD = 60 * 19 # number of replications. DEFAULT_N_REPS = 5 # Show the a trace of simulated events # not recommended when running multiple replications TRACE = True # - # ## Utility functions def trace(msg): ''' Utility function for printing a trace as the simulation model executes. Set the TRACE constant to False, to turn tracing off. Params: ------- msg: str string to print to screen. ''' if TRACE: print(msg) # ## Distribution classes # # To help with controlling sampling `numpy` distributions are packaged up into classes that allow easy control of random numbers. # # **Distributions included:** # * Exponential # * Log Normal # * Bernoulli # * Normal # * Uniform # + class Exponential: ''' Convenience class for the exponential distribution. packages up distribution parameters, seed and random generator. ''' def __init__(self, mean, random_seed=None): ''' Constructor Params: ------ mean: float The mean of the exponential distribution random_seed: int, optional (default=None) A random seed to reproduce samples. If set to none then a unique sample is created. ''' self.rng = np.random.default_rng(seed=random_seed) self.mean = mean def sample(self, size=None): ''' Generate a sample from the exponential distribution Params: ------- size: int, optional (default=None) the number of samples to return. If size=None then a single sample is returned. ''' return self.rng.exponential(self.mean, size=size) class Bernoulli: ''' Convenience class for the Bernoulli distribution. packages up distribution parameters, seed and random generator. ''' def __init__(self, p, random_seed=None): ''' Constructor Params: ------ p: float probability of drawing a 1 random_seed: int, optional (default=None) A random seed to reproduce samples. If set to none then a unique sample is created. ''' self.rng = np.random.default_rng(seed=random_seed) self.p = p def sample(self, size=None): ''' Generate a sample from the exponential distribution Params: ------- size: int, optional (default=None) the number of samples to return. If size=None then a single sample is returned. ''' return self.rng.binomial(n=1, p=self.p, size=size) class Lognormal: """ Encapsulates a lognormal distirbution """ def __init__(self, mean, stdev, random_seed=None): """ Params: ------- mean: float mean of the lognormal distribution stdev: float standard dev of the lognormal distribution random_seed: int, optional (default=None) Random seed to control sampling """ self.rng = np.random.default_rng(seed=random_seed) mu, sigma = self.normal_moments_from_lognormal(mean, stdev**2) self.mu = mu self.sigma = sigma def normal_moments_from_lognormal(self, m, v): ''' Returns mu and sigma of normal distribution underlying a lognormal with mean m and variance v source: https://blogs.sas.com/content/iml/2014/06/04/simulate-lognormal -data-with-specified-mean-and-variance.html Params: ------- m: float mean of lognormal distribution v: float variance of lognormal distribution Returns: ------- (float, float) ''' phi = math.sqrt(v + m**2) mu = math.log(m**2/phi) sigma = math.sqrt(math.log(phi**2/m**2)) return mu, sigma def sample(self): """ Sample from the normal distribution """ return self.rng.lognormal(self.mu, self.sigma) # + class Normal: ''' Convenience class for the normal distribution. packages up distribution parameters, seed and random generator. ''' def __init__(self, mean, sigma, random_seed=None): ''' Constructor Params: ------ mean: float The mean of the normal distribution sigma: float The stdev of the normal distribution random_seed: int, optional (default=None) A random seed to reproduce samples. If set to none then a unique sample is created. ''' self.rng = np.random.default_rng(seed=random_seed) self.mean = mean self.sigma = sigma def sample(self, size=None): ''' Generate a sample from the normal distribution Params: ------- size: int, optional (default=None) the number of samples to return. If size=None then a single sample is returned. ''' return self.rng.normal(self.mean, self.sigma, size=size) class Uniform(): ''' Convenience class for the Uniform distribution. packages up distribution parameters, seed and random generator. ''' def __init__(self, low, high, random_seed=None): ''' Constructor Params: ------ low: float lower range of the uniform high: float upper range of the uniform random_seed: int, optional (default=None) A random seed to reproduce samples. If set to none then a unique sample is created. ''' self.rand = np.random.default_rng(seed=random_seed) self.low = low self.high = high def sample(self, size=None): ''' Generate a sample from the uniform distribution Params: ------- size: int, optional (default=None) the number of samples to return. If size=None then a single sample is returned. ''' return self.rand.uniform(low=self.low, high=self.high, size=size) # - # ## Model parameterisation # # For convienience a container class is used to hold the large number of model parameters. The `Scenario` class includes defaults these can easily be changed and at runtime to experiments with different designs. class Scenario: ''' Container class for scenario parameters/arguments Passed to a model and its process classes ''' def __init__(self, random_number_set=DEFAULT_RNG_SET): ''' The init method sets up our defaults. Parameters: ----------- random_number_set: int, optional (default=DEFAULT_RNG_SET) Set to control the initial seeds of each stream of pseudo random numbers used in the model. ''' # sampling self.random_number_set = random_number_set self.init_sampling() # count of each type of resource self.init_resourse_counts() def set_random_no_set(self, random_number_set): ''' Controls the random sampling Parameters: ---------- random_number_set: int Used to control the set of psuedo random numbers used by the distributions in the simulation. ''' self.random_number_set = random_number_set self.init_sampling() def init_resourse_counts(self): ''' Init the counts of resources to default values... ''' self.n_triage = DEFAULT_N_TRIAGE self.n_reg = DEFAULT_N_REG self.n_exam = DEFAULT_N_EXAM self.n_trauma = DEFAULT_N_TRAUMA # non-trauma (1), trauma (2) treatment cubicles self.n_cubicles_1 = DEFAULT_N_CUBICLES_1 self.n_cubicles_2 = DEFAULT_N_CUBICLES_2 def init_sampling(self): ''' Create the distributions used by the model and initialise the random seeds of each. ''' # create random number streams rng_streams = np.random.default_rng(self.random_number_set) self.seeds = rng_streams.integers(0, 999999999, size=N_STREAMS) # create distributions # Triage duration self.triage_dist = Exponential(DEFAULT_TRIAGE_MEAN, random_seed=self.seeds[0]) # Registration duration (non-trauma only) self.reg_dist = Lognormal(DEFAULT_REG_MEAN, np.sqrt(DEFAULT_REG_VAR), random_seed=self.seeds[1]) # Evaluation (non-trauma only) self.exam_dist = Normal(DEFAULT_EXAM_MEAN, np.sqrt(DEFAULT_EXAM_VAR), random_seed=self.seeds[2]) # Trauma/stablisation duration (trauma only) self.trauma_dist = Exponential(DEFAULT_TRAUMA_MEAN, random_seed=self.seeds[3]) # Non-trauma treatment self.nt_treat_dist = Lognormal(DEFAULT_NON_TRAUMA_TREAT_MEAN, np.sqrt(DEFAULT_NON_TRAUMA_TREAT_VAR), random_seed=self.seeds[4]) # treatment of trauma patients self.treat_dist = Lognormal(DEFAULT_TRAUMA_TREAT_MEAN, np.sqrt(DEFAULT_TRAUMA_TREAT_VAR), random_seed=self.seeds[5]) # probability of non-trauma patient requiring treatment self.nt_p_treat_dist = Bernoulli(DEFAULT_NON_TRAUMA_TREAT_P, random_seed=self.seeds[6]) # probability of non-trauma versus trauma patient self.p_trauma_dist = Bernoulli(DEFAULT_PROB_TRAUMA, random_seed=self.seeds[7]) # init sampling for non-stationary poisson process self.init_nspp() def init_nspp(self): # read arrival profile self.arrivals = pd.read_csv(NSPP_PATH) self.arrivals['mean_iat'] = 60 / self.arrivals['arrival_rate'] # maximum arrival rate (smallest time between arrivals) self.lambda_max = self.arrivals['arrival_rate'].max() # thinning exponential self.arrival_dist = Exponential(60.0 / self.lambda_max, random_seed=self.seeds[8]) # thinning uniform rng self.thinning_rng = Uniform(low=0.0, high=1.0, random_seed=self.seeds[9]) # ## Patient Pathways Process Logic # # `simpy` uses a process based worldview. We can easily create whatever logic - simple or complex for the model. Here the process logic for trauma and non-trauma patients is seperated into two classes `TraumaPathway` and `NonTraumaPathway`. class TraumaPathway: ''' Encapsulates the process a patient with severe injuries or illness. These patients are signed into the ED and triaged as having severe injuries or illness. Patients are stabilised in resus (trauma) and then sent to Treatment. Following treatment they are discharged. ''' def __init__(self, identifier, env, args): ''' Constructor method Params: ----- identifier: int a numeric identifier for the patient. env: simpy.Environment the simulation environment args: Scenario Container class for the simulation parameters ''' self.identifier = identifier self.env = env self.args = args # metrics self.arrival = -np.inf self.wait_triage = -np.inf self.wait_trauma = -np.inf self.wait_treat = -np.inf self.total_time = -np.inf self.triage_duration = -np.inf self.trauma_duration = -np.inf self.treat_duration = -np.inf def execute(self): ''' simulates the major treatment process for a patient 1. request and wait for sign-in/triage 2. trauma 3. treatment ''' # record the time of arrival and entered the triage queue self.arrival = self.env.now # request sign-in/triage with self.args.triage.request() as req: yield req # record the waiting time for triage self.wait_triage = self.env.now - self.arrival trace(f'patient {self.identifier} triaged to trauma ' f'{self.env.now:.3f}') # sample triage duration. self.triage_duration = args.triage_dist.sample() yield self.env.timeout(self.triage_duration) self.triage_complete() # record the time that entered the trauma queue start_wait = self.env.now # request trauma room with self.args.trauma.request() as req: yield req # record the waiting time for trauma self.wait_trauma = self.env.now - start_wait # sample stablisation duration. self.trauma_duration = args.trauma_dist.sample() yield self.env.timeout(self.trauma_duration) self.trauma_complete() # record the time that entered the treatment queue start_wait = self.env.now # request treatment cubicle with self.args.cubicle_2.request() as req: yield req # record the waiting time for trauma self.wait_treat = self.env.now - start_wait trace(f'treatment of patient {self.identifier} at ' f'{self.env.now:.3f}') # sample treatment duration. self.treat_duration = args.trauma_dist.sample() yield self.env.timeout(self.treat_duration) self.treatment_complete() # total time in system self.total_time = self.env.now - self.arrival def triage_complete(self): ''' Triage complete event ''' trace(f'triage {self.identifier} complete {self.env.now:.3f}; ' f'waiting time was {self.wait_triage:.3f}') def trauma_complete(self): ''' Patient stay in trauma is complete. ''' trace(f'stabilisation of patient {self.identifier} at ' f'{self.env.now:.3f}') def treatment_complete(self): ''' Treatment complete event ''' trace(f'patient {self.identifier} treatment complete {self.env.now:.3f}; ' f'waiting time was {self.wait_treat:.3f}') class NonTraumaPathway(object): ''' Encapsulates the process a patient with minor injuries and illness. These patients are signed into the ED and triaged as having minor complaints and streamed to registration and then examination. Post examination 40% are discharged while 60% proceed to treatment. Following treatment they are discharged. ''' def __init__(self, identifier, env, args): ''' Constructor method Params: ----- identifier: int a numeric identifier for the patient. env: simpy.Environment the simulation environment args: Scenario Container class for the simulation parameters ''' self.identifier = identifier self.env = env self.args = args # triage resource self.triage = args.triage # metrics self.arrival = -np.inf self.wait_triage = -np.inf self.wait_reg = -np.inf self.wait_exam = -np.inf self.wait_treat = -np.inf self.total_time = -np.inf self.triage_duration = -np.inf self.reg_duration = -np.inf self.exam_duration = -np.inf self.treat_duration = -np.inf def execute(self): ''' simulates the non-trauma/minor treatment process for a patient 1. request and wait for sign-in/triage 2. patient registration 3. examination 4.1 40% discharged 4.2 60% treatment then discharge ''' # record the time of arrival and entered the triage queue self.arrival = self.env.now # request sign-in/triage with self.triage.request() as req: yield req # record the waiting time for triage self.wait_triage = self.env.now - self.arrival trace(f'patient {self.identifier} triaged to minors ' f'{self.env.now:.3f}') # sample triage duration. self.triage_duration = args.triage_dist.sample() yield self.env.timeout(self.triage_duration) trace(f'triage {self.identifier} complete {self.env.now:.3f}; ' f'waiting time was {self.wait_triage:.3f}') # record the time that entered the registration queue start_wait = self.env.now # request registration clert with self.args.registration.request() as req: yield req # record the waiting time for registration self.wait_reg = self.env.now - start_wait trace(f'registration of patient {self.identifier} at ' f'{self.env.now:.3f}') # sample registration duration. self.reg_duration = args.reg_dist.sample() yield self.env.timeout(self.reg_duration) trace(f'patient {self.identifier} registered at' f'{self.env.now:.3f}; ' f'waiting time was {self.wait_reg:.3f}') # record the time that entered the evaluation queue start_wait = self.env.now # request examination resource with self.args.exam.request() as req: yield req # record the waiting time for registration self.wait_exam = self.env.now - start_wait trace(f'examination of patient {self.identifier} begins ' f'{self.env.now:.3f}') # sample examination duration. self.exam_duration = args.exam_dist.sample() yield self.env.timeout(self.exam_duration) trace(f'patient {self.identifier} examination complete ' f'at {self.env.now:.3f};' f'waiting time was {self.wait_exam:.3f}') # sample if patient requires treatment? self.require_treat = self.args.nt_p_treat_dist.sample() if self.require_treat: # record the time that entered the treatment queue start_wait = self.env.now # request treatment cubicle with self.args.cubicle_1.request() as req: yield req # record the waiting time for treatment self.wait_treat = self.env.now - start_wait trace(f'treatment of patient {self.identifier} begins ' f'{self.env.now:.3f}') # sample treatment duration. self.treat_duration = args.nt_treat_dist.sample() yield self.env.timeout(self.treat_duration) trace(f'patient {self.identifier} treatment complete ' f'at {self.env.now:.3f};' f'waiting time was {self.wait_treat:.3f}') # total time in system self.total_time = self.env.now - self.arrival # ## Main model class # # The main class that a user interacts with to run the model is `TreatmentCentreModel`. This implements a `.run()` method, contains a simple algorithm for the non-stationary poission process for patients arrivals and inits instances of `TraumaPathway` or `NonTraumaPathway` depending on the arrival type. class TreatmentCentreModel: ''' The treatment centre model Patients arrive at random to a treatment centre, are triaged and then processed in either a trauma or non-trauma pathway. ''' def __init__(self, args): self.env = simpy.Environment() self.args = args self.init_resources() self.patients = [] self.trauma_patients = [] self.non_trauma_patients = [] self.rc_period = None self.results = None def init_resources(self): ''' Init the number of resources and store in the arguments container object Resource list: 1. Sign-in/triage bays 2. registration clerks 3. examination bays 4. trauma bays 5. non-trauma cubicles (1) 6. trauma cubicles (2) ''' # sign/in triage self.args.triage = simpy.Resource(self.env, capacity=self.args.n_triage) # registration self.args.registration = simpy.Resource(self.env, capacity=self.args.n_reg) # examination self.args.exam = simpy.Resource(self.env, capacity=self.args.n_exam) # trauma self.args.trauma = simpy.Resource(self.env, capacity=self.args.n_trauma) # non-trauma treatment self.args.cubicle_1 = simpy.Resource(self.env, capacity=self.args.n_cubicles_1) # trauma treatment self.args.cubicle_2 = simpy.Resource(self.env, capacity=self.args.n_cubicles_2) def run(self, results_collection_period=DEFAULT_RESULTS_COLLECTION_PERIOD): ''' Conduct a single run of the model in its current configuration Parameters: ---------- results_collection_period, float, optional default = DEFAULT_RESULTS_COLLECTION_PERIOD warm_up, float, optional (default=0) length of initial transient period to truncate from results. Returns: -------- None ''' # setup the arrival generator process self.env.process(self.arrivals_generator()) # store rc perio self.rc_period = results_collection_period # run self.env.run(until=results_collection_period) def arrivals_generator(self): ''' Simulate the arrival of patients to the model Patients either follow a TraumaPathway or NonTraumaPathway simpy process. Non stationary arrivals implemented via Thinning acceptance-rejection algorithm. ''' for patient_count in itertools.count(): # this give us the index of dataframe to use t = int(self.env.now // 60) % self.args.arrivals.shape[0] lambda_t = self.args.arrivals['arrival_rate'].iloc[t] #set to a large number so that at least 1 sample taken! u = np.Inf interarrival_time = 0.0 # reject samples if u >= lambda_t / lambda_max while u >= (lambda_t / self.args.lambda_max): interarrival_time += self.args.arrival_dist.sample() u = self.args.thinning_rng.sample() # iat yield self.env.timeout(interarrival_time) trace(f'patient {patient_count} arrives at: {self.env.now:.3f}') # sample if the patient is trauma or non-trauma trauma = self.args.p_trauma_dist.sample() if trauma: # create and store a trauma patient to update KPIs. new_patient = TraumaPathway(patient_count, self.env, self.args) self.trauma_patients.append(new_patient) else: # create and store a non-trauma patient to update KPIs. new_patient = NonTraumaPathway(patient_count, self.env, self.args) self.non_trauma_patients.append(new_patient) # start the pathway process for the patient self.env.process(new_patient.execute()) # ### Logic to process end of run results. # # the class `SimulationSummary` accepts a `TraumaCentreModel`. At the end of a run it can be used calculate mean queuing times and the percentage of the total run that a resource was in use. class SimulationSummary: ''' End of run result processing logic of the simulation model ''' def __init__(self, model): ''' Constructor Params: ------ model: TraumaCentreModel The model. ''' self.model = model self.args = model.args self.results = None def process_run_results(self): ''' Calculates statistics at end of run. ''' self.results = {} # list of all patients patients = self.model.non_trauma_patients + self.model.trauma_patients # mean triage times (both types of patient) mean_triage_wait = self.get_mean_metric('wait_triage', patients) # triage utilisation (both types of patient) triage_util = self.get_resource_util('triage_duration', self.args.n_triage, patients) # mean waiting time for registration (non_trauma) mean_reg_wait = self.get_mean_metric('wait_reg', self.model.non_trauma_patients) # registration utilisation (trauma) reg_util = self.get_resource_util('reg_duration', self.args.n_reg, self.model.non_trauma_patients) # mean waiting time for examination (non_trauma) mean_wait_exam = self.get_mean_metric('wait_exam', self.model.non_trauma_patients) # examination utilisation (non-trauma) exam_util = self.get_resource_util('exam_duration', self.args.n_exam, self.model.non_trauma_patients) # mean waiting time for treatment (non-trauma) mean_treat_wait = self.get_mean_metric('wait_treat', self.model.non_trauma_patients) # treatment utilisation (non_trauma) treat_util1 = self.get_resource_util('treat_duration', self.args.n_cubicles_1, self.model.non_trauma_patients) # mean total time (non_trauma) mean_total = self.get_mean_metric('total_time', self.model.non_trauma_patients) # mean waiting time for trauma mean_trauma_wait = self.get_mean_metric('wait_trauma', self.model.trauma_patients) # trauma utilisation (trauma) trauma_util = self.get_resource_util('trauma_duration', self.args.n_trauma, self.model.trauma_patients) # mean waiting time for treatment (rauma) mean_treat_wait2 = self.get_mean_metric('wait_treat', self.model.trauma_patients) # treatment utilisation (trauma) treat_util2 = self.get_resource_util('treat_duration', self.args.n_cubicles_2, self.model.trauma_patients) # mean total time (trauma) mean_total2 = self.get_mean_metric('total_time', self.model.trauma_patients) self.results = {'00_arrivals':len(patients), '01a_triage_wait': mean_triage_wait, '01b_triage_util': triage_util, '02a_registration_wait':mean_reg_wait, '02b_registration_util': reg_util, '03a_examination_wait':mean_wait_exam, '03b_examination_util': exam_util, '04a_treatment_wait(non_trauma)':mean_treat_wait, '04b_treatment_util(non_trauma)':treat_util1, '05_total_time(non-trauma)':mean_total, '06a_trauma_wait':mean_trauma_wait, '06b_trauma_util':trauma_util, '07a_treatment_wait(trauma)':mean_treat_wait2, '07b_treatment_util(trauma)':treat_util2, '08_total_time(trauma)':mean_total2, '09_throughput': self.get_throughput(patients)} def get_mean_metric(self, metric, patients): ''' Calculate mean of the performance measure for the select cohort of patients, Only calculates metrics for patients where it has been measured. Params: ------- metric: str The name of the metric e.g. 'wait_treat' patients: list A list of patients ''' mean = np.array([getattr(p, metric) for p in patients if getattr(p, metric) > -np.inf]).mean() return mean def get_resource_util(self, metric, n_resources, patients): ''' Calculate proportion of the results collection period where a resource was in use. Done by tracking the duration by patient. Only calculates metrics for patients where it has been measured. Params: ------- metric: str The name of the metric e.g. 'treatment_duration' patients: list A list of patients ''' total = np.array([getattr(p, metric) for p in patients if getattr(p, metric) > -np.inf]).sum() return total / (self.model.rc_period * n_resources) def get_throughput(self, patients): ''' Returns the total number of patients that have successfully been processed and discharged in the treatment centre (they have a total time record) Params: ------- patients: list list of all patient objects simulated. Returns: ------ float ''' return len([p for p in patients if p.total_time > -np.inf]) def summary_frame(self): ''' Returns run results as a pandas.DataFrame Returns: ------- pd.DataFrame ''' #append to results df if self.results is None: self.process_run_results() df = pd.DataFrame({'1':self.results}) df = df.T df.index.name = 'rep' return df # ## Executing a model # # We note that there are **many ways** to setup a `simpy` model and execute it (that is part of its fantastic flexibility). The organisation of code we show below is based on our experience of using the package in practice. The approach also allows for easy parallisation over multiple CPU cores using `joblib`. # # We include two functions. `single_run()` and `multiple_replications`. The latter is used to repeatedly call and process the results from `single_run`. def single_run(scenario, rc_period=DEFAULT_RESULTS_COLLECTION_PERIOD, random_no_set=DEFAULT_RNG_SET): ''' Perform a single run of the model and return the results Parameters: ----------- scenario: Scenario object The scenario/paramaters to run rc_period: int The length of the simulation run that collects results random_no_set: int or None, optional (default=DEFAULT_RNG_SET) Controls the set of random seeds used by the stochastic parts of the model. Set to different ints to get different results. Set to None for a random set of seeds. Returns: -------- pandas.DataFrame: results from single run. ''' # set random number set - this controls sampling for the run. scenario.set_random_no_set(random_no_set) # create an instance of the model model = TreatmentCentreModel(scenario) # run the model model.run(results_collection_period=rc_period) # run results summary = SimulationSummary(model) summary_df = summary.summary_frame() return summary_df def multiple_replications(scenario, rc_period=DEFAULT_RESULTS_COLLECTION_PERIOD, n_reps=5): ''' Perform multiple replications of the model. Params: ------ scenario: Scenario Parameters/arguments to configurethe model rc_period: float, optional (default=DEFAULT_RESULTS_COLLECTION_PERIOD) results collection period. the number of minutes to run the model to collect results n_reps: int, optional (default=DEFAULT_N_REPS) Number of independent replications to run. Returns: -------- pandas.DataFrame ''' results = [single_run(scenario, rc_period, random_no_set=rep) for rep in range(n_reps)] #format and return results in a dataframe df_results = pd.concat(results) df_results.index = np.arange(1, len(df_results)+1) df_results.index.name = 'rep' return df_results # ### Single run of the model # # The script below performs a single replication of the simulation model. # # **Try:** # # * Changing the `random_no_set` of the `single_run` call. # * Assigning the value `True` to `TRACE` # + # Change this to True to see a trace... TRACE = False # create the default scenario args = Scenario() # use the single_run() func # try changing `random_no_set` to see different run results print('Running simulation ...', end=' => ') results = single_run(args, random_no_set=42) print('simulation complete.') # show results (transpose replication for easier view) results.T # - # ### Multiple independent replications # # Given the set up it is now easy to perform multiple replications of the model. # # **Try**: # * Changing `n_reps` # + # %%time args = Scenario() #run multiple replications. #by default it runs 5 replications. print('Running multiple replications', end=' => ') results = multiple_replications(args, n_reps=50) print('done.\n') results.head(3) # - # summarise the results (2.dp) results.mean().round(2) # ### Visualise replications fig, ax = plt.subplots(2, 1, figsize=(12,4)) ax[0].hist(results['01a_triage_wait']); ax[0].set_ylabel('wait for triage') ax[1].hist(results['02a_registration_wait']); ax[1].set_ylabel('wait for registration'); # ## Scenario Analysis # # The structured approach we took to organising our `simpy` model allows us to easily experiment with alternative scenarios. We could employ a formal experimental design if needed. For simplicity here we will limit ourselves by running user chosen competing scenarios and compare their mean performance to the base case. # # > Note that we have our `simpy` model includes an implementation of **Common Random Numbers** across scenarios. def get_scenarios(): ''' Creates a dictionary object containing objects of type `Scenario` to run. Returns: -------- dict Contains the scenarios for the model ''' scenarios = {} scenarios['base'] = Scenario() # extra triage capacity scenarios['triage+1'] = Scenario() scenarios['triage+1'].n_triage += 1 # extra examination capacity scenarios['exam+1'] = Scenario() scenarios['exam+1'].n_exam += 1 # extra non-trauma treatment capacity scenarios['treat+1'] = Scenario() scenarios['treat+1'].n_cubicles_1 += 1 scenarios['triage+exam'] = Scenario() scenarios['triage+exam'].n_triage += 1 scenarios['triage+exam'].n_exam += 1 return scenarios def run_scenario_analysis(scenarios, rc_period, n_reps): ''' Run each of the scenarios for a specified results collection period and replications. Params: ------ scenarios: dict dictionary of Scenario objects rc_period: float model run length n_rep: int Number of replications ''' print('Scenario Analysis') print(f'No. Scenario: {len(scenarios)}') print(f'Replications: {n_reps}') scenario_results = {} for sc_name, scenario in scenarios.items(): print(f'Running {sc_name}', end=' => ') replications = multiple_replications(scenario, rc_period=rc_period, n_reps=n_reps) print('done.\n') #save the results scenario_results[sc_name] = replications print('Scenario analysis complete.') return scenario_results # ### Script to run scenario analysis # + #number of replications N_REPS = 20 #get the scenarios scenarios = get_scenarios() #run the scenario analysis scenario_results = run_scenario_analysis(scenarios, DEFAULT_RESULTS_COLLECTION_PERIOD, N_REPS) # - def scenario_summary_frame(scenario_results): ''' Mean results for each performance measure by scenario Parameters: ---------- scenario_results: dict dictionary of replications. Key identifies the performance measure Returns: ------- pd.DataFrame ''' columns = [] summary = pd.DataFrame() for sc_name, replications in scenario_results.items(): summary = pd.concat([summary, replications.mean()], axis=1) columns.append(sc_name) summary.columns = columns return summary # as well as rounding you may want to rename the cols/rows to # more readable alternatives. summary_frame = scenario_summary_frame(scenario_results) summary_frame.round(2) # ## End
src/notebooks/01_foss_sim/01_simpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ZfbRXE-Z16oB" # # Chapter3 ニューラルネットワークの基本 # ## 2. アヤメの分類【サンプルコード】 # + id="Wb2fGwco1iq6" # 必要なパッケージのインストール import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split import torch from torch.utils.data import TensorDataset, DataLoader from torch import nn import torch.nn.functional as F from torch import optim # + [markdown] id="5wx2kgfW3KK2" # ## 2.1. アヤメ(Iris)データセット # + id="mNIB9efo2m-g" # データセットのロード iris = load_iris() # + id="Qg7vOpDT20F5" executionInfo={"status": "ok", "timestamp": 1603101516044, "user_tz": -540, "elapsed": 22609, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="05666252-a6a4-4945-e067-a88c7950ee21" colab={"base_uri": "https://localhost:8080/", "height": 1000} # データセットの説明 print(iris.DESCR) # + id="BxWg8ORg23TN" executionInfo={"status": "ok", "timestamp": 1603101516044, "user_tz": -540, "elapsed": 22604, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="1a0fc3fc-0b0b-482f-a986-e8bab2a357ef" colab={"base_uri": "https://localhost:8080/", "height": 123} # データフレームに変換 df = pd.DataFrame(iris.data, columns=iris.feature_names) print(df.head()) # + id="JzO-xRgw2_-P" executionInfo={"status": "ok", "timestamp": 1603101516044, "user_tz": -540, "elapsed": 22598, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="93653659-ae50-400d-9ebb-10056c9bf902" colab={"base_uri": "https://localhost:8080/", "height": 158} # 品種の追加 df['Variety'] = iris.target df.loc[df['Variety'] == 0, 'Variety'] = 'setosa' df.loc[df['Variety'] == 1, 'Variety'] = 'versicolor' df.loc[df['Variety'] == 2, 'Variety'] = 'virginica' print(df.head()) # + id="hFVWLfGR3CPT" executionInfo={"status": "ok", "timestamp": 1603101516045, "user_tz": -540, "elapsed": 22594, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="97d6ab1c-7c70-4469-caf3-9f213ed9c745" colab={"base_uri": "https://localhost:8080/", "height": 176} # 基本統計量の確認 print(df.describe()) # + id="GXjzxj263EDb" executionInfo={"status": "ok", "timestamp": 1603101523000, "user_tz": -540, "elapsed": 29544, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="03cb1878-f5af-4f67-b28c-c86dab7d8b6d" colab={"base_uri": "https://localhost:8080/", "height": 725} # データセットの可視化 sns.pairplot(df, hue='Variety') plt.show() # + [markdown] id="8IAqpxjG3NYS" # ## 2.2. 前準備 # + id="S_b0Suxh3GPH" # データセットの読み込み iris = load_iris() data = iris.data # 特徴量 label = iris.target # ラベル(品種) # + [markdown] id="QI10VTuz3WQv" # ## 2.3. 訓練データとテストデータの用意 # + id="cfzT7Hrw3Yb4" executionInfo={"status": "ok", "timestamp": 1603101523000, "user_tz": -540, "elapsed": 29525, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="aeacefbc-f56f-4b38-9719-71ae8ea738c9" colab={"base_uri": "https://localhost:8080/", "height": 87} # 学習データとテストデータを分割 train_data, test_data, train_label, test_label = train_test_split( data, label, test_size=0.2) # 学習データとテストデータのサイズの確認 print("train_data size: {}".format(len(train_data))) print("test_data size: {}".format(len(test_data))) print("train_label size: {}".format(len(train_label))) print("test_label size: {}".format(len(test_label))) # + id="WDefxCZi3aFd" # ndarrayをPyTorchのTensorに変換 train_x = torch.Tensor(train_data) test_x = torch.Tensor(test_data) train_y = torch.LongTensor(train_label) # torch.int64のデータ型に test_y = torch.LongTensor(test_label) # torch.int64のデータ型に # + id="c0HE-4Hl3cVQ" # 特徴量とラベルを結合したデータセットを作成 train_dataset = TensorDataset(train_x, train_y) test_dataset = TensorDataset(test_x, test_y) # + id="LDKHxoUK3dhN" executionInfo={"status": "ok", "timestamp": 1603101523002, "user_tz": -540, "elapsed": 29504, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="e808029f-3011-4500-dd05-0e974777f671" colab={"base_uri": "https://localhost:8080/", "height": 52} # ミニバッチサイズを指定したデータローダーを作成 train_batch = DataLoader( dataset=train_dataset, # データセットの指定 batch_size=5, # バッチサイズの指定 shuffle=True, # シャッフルするかどうかの指定 num_workers=2) # コアの数 test_batch = DataLoader( dataset=test_dataset, batch_size=5, shuffle=False, num_workers=2) # ミニバッチデータセットの確認 for data, label in train_batch: print("batch data size: {}".format(data.size())) # バッチの入力データサイズ print("batch label size: {}".format(label.size())) # バッチのラベルサイズ break # + [markdown] id="JmkhKtNC3f2n" # ## 2.4. ニューラルネットワークの定義 # + id="caL2VyjF3ehM" # ニューラルネットワークの定義 class Net(nn.Module): def __init__(self, D_in, H, D_out): super(Net, self).__init__() self.linear1 = torch.nn.Linear(D_in, H) self.linear2 = torch.nn.Linear(H, D_out) def forward(self, x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x # + id="PA4UaoKi3jFO" # ハイパーパラメータの定義 D_in = 4 # 入力次元: 4 H = 100 # 隠れ層次元: 100 D_out = 3 # 出力次元: 3 epoch = 100 # 学習回数 # + id="b54ZDXnd3j6x" executionInfo={"status": "ok", "timestamp": 1603101532069, "user_tz": -540, "elapsed": 38548, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="e34028b8-898c-4dff-b5f0-a6aa07d7a67c" colab={"base_uri": "https://localhost:8080/", "height": 34} # ネットワークのロード # CPUとGPUどちらを使うかを指定 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') net = Net(D_in, H, D_out).to(device) # デバイスの確認 print("Device: {}".format(device)) # + [markdown] id="SCi0ojdj3lw6" # ## 2.5. 損失関数と最適化関数の定義 # + id="IK-7MsVT3k6n" # 損失関数の定義 criterion = nn.CrossEntropyLoss() # 最適化関数の定義 optimizer = optim.Adam(net.parameters()) # + [markdown] id="zr3gcqPV3pvp" # ## 2.6. 学習 # + id="5cdxJ6Rq3o4G" executionInfo={"status": "ok", "timestamp": 1603101557116, "user_tz": -540, "elapsed": 63577, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="6acc02cc-e112-4c8f-97e4-9f88c886ae6c" colab={"base_uri": "https://localhost:8080/", "height": 1000} # 損失と正解率を保存するリストを作成 train_loss_list = [] # 学習損失 train_accuracy_list = [] # 学習データの正答率 test_loss_list = [] # 評価損失 test_accuracy_list = [] # テストデータの正答率 # 学習(エポック)の実行 for i in range(epoch): # エポックの進行状況を表示 print('---------------------------------------------') print("Epoch: {}/{}".format(i+1, epoch)) # 損失と正解率の初期化 train_loss = 0 # 学習損失 train_accuracy = 0 # 学習データの正答数 test_loss = 0 # 評価損失 test_accuracy = 0 # テストデータの正答数 # ---------学習パート--------- # # ニューラルネットワークを学習モードに設定 net.train() # ミニバッチごとにデータをロードし学習 for data, label in train_batch: # GPUにTensorを転送 data = data.to(device) label = label.to(device) # 勾配を初期化 optimizer.zero_grad() # データを入力して予測値を計算(順伝播) y_pred_prob = net(data) # 損失(誤差)を計算 loss = criterion(y_pred_prob, label) # 勾配の計算(逆伝搬) loss.backward() # パラメータ(重み)の更新 optimizer.step() # ミニバッチごとの損失を蓄積 train_loss += loss.item() # 予測したラベルを予測確率y_pred_probから計算 y_pred_label = torch.max(y_pred_prob, 1)[1] # ミニバッチごとに正解したラベル数をカウント train_accuracy += torch.sum(y_pred_label == label).item() / len(label) # ミニバッチの平均の損失と正解率を計算 batch_train_loss = train_loss / len(train_batch) batch_train_accuracy = train_accuracy / len(train_batch) # ---------学習パートはここまで--------- # # ---------評価パート--------- # # ニューラルネットワークを評価モードに設定 net.eval() # 評価時の計算で自動微分機能をオフにする with torch.no_grad(): for data, label in test_batch: # GPUにTensorを転送 data = data.to(device) label = label.to(device) # データを入力して予測値を計算(順伝播) y_pred_prob = net(data) # 損失(誤差)を計算 loss = criterion(y_pred_prob, label) # ミニバッチごとの損失を蓄積 test_loss += loss.item() # 予測したラベルを予測確率y_pred_probから計算 y_pred_label = torch.max(y_pred_prob, 1)[1] # ミニバッチごとに正解したラベル数をカウント test_accuracy += torch.sum(y_pred_label == label).item() / len(label) # ミニバッチの平均の損失と正解率を計算 batch_test_loss = test_loss / len(test_batch) batch_test_accuracy = test_accuracy / len(test_batch) # ---------評価パートはここまで--------- # # エポックごとに損失と正解率を表示 print("Train_Loss: {:.4f} Train_Accuracy: {:.4f}".format( batch_train_loss, batch_train_accuracy)) print("Test_Loss: {:.4f} Test_Accuracy: {:.4f}".format( batch_test_loss, batch_test_accuracy)) # 損失と正解率をリスト化して保存 train_loss_list.append(batch_train_loss) train_accuracy_list.append(batch_train_accuracy) test_loss_list.append(batch_test_loss) test_accuracy_list.append(batch_test_accuracy) # + [markdown] id="ELW8Rkph3vFw" # ## 2.7. 結果の可視化 # + id="hcm4QGVq3wSY" executionInfo={"status": "ok", "timestamp": 1603101557702, "user_tz": -540, "elapsed": 64155, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="9fa22a53-0f84-473f-bfc1-333f07ebde8d" colab={"base_uri": "https://localhost:8080/", "height": 573} # 損失 plt.figure() plt.title('Train and Test Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.plot(range(1, epoch+1), train_loss_list, color='blue', linestyle='-', label='Train_Loss') plt.plot(range(1, epoch+1), test_loss_list, color='red', linestyle='--', label='Test_Loss') plt.legend() # 凡例 # 正解率 plt.figure() plt.title('Train and Test Accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.plot(range(1, epoch+1), train_accuracy_list, color='blue', linestyle='-', label='Train_Accuracy') plt.plot(range(1, epoch+1), test_accuracy_list, color='red', linestyle='--', label='Test_Accuracy') plt.legend() # 表示 plt.show() # + [markdown] id="5GY7Kiaq39Pb" # ## 2.8. 新たにテスト用のデータセットを用意して推定したい場合 # + id="6LL7SWB738RY" # CPUとGPUどちらを使うかを指定 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 学習パラメータを保存 torch.save(net.to(device).state_dict(), '3-2_iris_net.pth') # + id="J2lGB1j04A04" executionInfo={"status": "ok", "timestamp": 1603101557703, "user_tz": -540, "elapsed": 64141, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="7edda885-e2de-485f-93e3-8b4ed6f2e5b0" colab={"base_uri": "https://localhost:8080/", "height": 34} # ハイパーパラメータの定義 D_in = 4 # 入力次元: 4 H = 100 # 隠れ層次元: 100 D_out = 3 # 出力次元: 3 # 保存した学習パラメータを読み込む net2 = Net(D_in, H, D_out).to(device) net2.load_state_dict(torch.load('3-2_iris_net.pth', map_location=device)) # + id="UquHHIrh4CMn" executionInfo={"status": "ok", "timestamp": 1603101557704, "user_tz": -540, "elapsed": 64134, "user": {"displayName": "\u658e\u85e4\u52c7\u54c9", "photoUrl": "", "userId": "04901706568829922240"}} outputId="8e5041ac-d998-4488-ad12-d5f30a339b8d" colab={"base_uri": "https://localhost:8080/", "height": 34} # ニューラルネットワークを評価モードに設定 net2.eval() # 推定時の計算で自動微分機能をオフにする with torch.no_grad(): # 初期化 test_accuracy = 0 for data, label in test_batch: # GPUにTensorを転送 data = data.to(device) label = label.to(device) # データを入力して予測値を計算(順伝播) y_pred_prob = net(data) # 予測したラベルを予測確率y_pred_probから計算 y_pred_label = torch.max(y_pred_prob, 1)[1] # ミニバッチごとに正解したラベル数をカウント test_accuracy += torch.sum(y_pred_label == label).item() / len(label) # ミニバッチの平均の損失と正解率を計算 batch_test_accuracy = test_accuracy / len(test_batch) # 正解率を表示 print("Accuracy: {:.3f}".format(batch_test_accuracy))
Ubuntu/Chapter3/Section3-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier iris=load_iris() X=iris["data"][:,2:] y=iris["target"] tree_clf=DecisionTreeClassifier(max_depth=2) tree_clf.fit(X,y) # + from sklearn.tree import export_graphviz import os export_graphviz( tree_clf, out_file=os.getcwd()+"\\iris_tree_clf.dot", feature_names=iris.feature_names[2:], class_names=iris.target_names, rounded=True, filled=True, ) # + import graphviz with open("iris_tree_clf.dot") as f: dot_file=f.read() graphviz.Source(dot_file) # - tree_clf.predict_proba([[5, 1.5]]) tree_clf.predict([[5, 1.5]]) # ## Regression from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor(max_depth=2) tree_reg.fit(X, y) # + from sklearn.tree import export_graphviz import os export_graphviz( tree_reg, out_file=os.getcwd()+"\\iris_tree_reg.dot", feature_names=iris.feature_names[2:], class_names=iris.target_names, rounded=True, filled=True, ) # + import graphviz with open("iris_tree_reg.dot") as f: dot_file=f.read() graphviz.Source(dot_file)
Decision Trees/Decision Tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import psychrnn from psychrnn.tasks import rdm as rd from psychrnn.backend.models.basic import Basic import tensorflow as tf from matplotlib import pyplot as plt # %matplotlib inline # - # define params globally first before passing to RDM, mess w/ model params and explain what can do. (can turn on and off dale's law). some masking, input output connectivity. train_params variables stuff dt = 10 # time-step tau = 100 # intrinsic time constant of neural state decay T = 2000 # time to run for (number of steps is T/dt) N_batch = 50 # number of trials per training step N_rec = 50 # number of recurrent units name = 'basicModel' rdm = rd.RDM(dt = dt, tau = tau, T = T, N_batch = N_batch) # RDM puts the params passed in as well as other generated params into a dict we can then use to create our model. params = rdm.__dict__ print(params) # Generate *N_batch* trials to be used in training gen = rdm.batch_generator() # We add in a few params that Basic(RNN) needs but that RDM doesn't generate for us. params['name'] = name #Used to scope out a namespace for global variables. params['N_rec'] = N_rec # There are some other optional parameters we can add in: params['dale_ratio'] = None # Default: None -- when the dale_ratio is set, dale's law is applied params['rec_noise'] = 0.0 # Default: 0.0 -- how much noise to add to the new_state calculation params['W_in_train'] = True # Indicates whether W_in is trainable. Default: True params['W_rec_train'] = True # Indicates whether W_rec is trainable. Default: True params['W_out_train'] = True # Indicates whether W_out is trainable. Default: True params['b_rec_train'] = True # Indicates whether b_rec is trainable. Default: True params['b_out_train'] = True # Indicates whether b_out is trainable. Default: True params['init_state_train'] = True # Indicates whether init_state is trainable. Default: True params['load_weights_path'] = None # When given a path, loads weights from file in that path. Default: None # Instantiate our model basicModel = Basic(params) # Start a tensorflow session with loss, regularization, predictions, and regularized loss defined. basicModel.build() # Set the training parameters for our model. All of the parameters below are optional. train_params = {} train_params['save_weights_path'] = '../weights/refactor_weights.npz' # Where to save the model after training. Default: None train_params['training_iters'] = 100000 # number of iterations to train for Default: 10000 train_params['learning_rate'] = .001 # Sets learning rate if use default optimizer Default: .001 train_params['loss_epoch'] = 10 # Compute and record loss every 'loss_epoch' epochs. Default: 10 train_params['verbosity'] = True # If true, prints information as training progresses. Default: True train_params['save_training_weights_epoch'] = 100 # save training weights every 'save_training_weights_epoch' epochs. Default: 100 train_params['training_weights_path'] = None # where to save training weights as training progresses. Default: None train_params['generator_function'] = None # replaces trial_batch_generator with the generator_function when not none. Default: None train_params['optimizer'] = tf.train.AdamOptimizer(learning_rate=train_params['learning_rate']) # What optimizer to use to compute gradients. Default: tf.train.AdamOptimizer(learning_rate=train_params['learning_rate']) train_params['clip_grads'] = True # If true, clip gradients by norm 1. Default: True # Train our model. basicModel.train(gen, train_params) # ### Plot results # Extract the next trial from the generator x,y,m = next(gen) # Plot the x value of the trial -- for the RDM, this includes two input neurons with different coherence. plt.plot(range(0, len(x[0,:,:])*dt,dt), x[0,:,:]) plt.ylabel("Input Magnitude") plt.xlabel("Time (ms)") plt.title("Input Data") # Run the trained model on this trial (not included in the training set). results = basicModel.test(x) output = results[0] state_var = results[1] plt.plot(range(0, len(output[0,:,:])*dt,dt),output[0,:,:]) plt.ylabel("Activity of Output Unit") plt.xlabel("Time (ms)") plt.title("Output on New Sample") plt.plot(range(0, len(state_var[0,:,:])*dt,dt),state_var[0,:,:]) plt.ylabel("State Variable Value") plt.xlabel("Time (ms)") plt.title("Evolution of State Variables over Time") # Clean up the model to clear out the tensorflow namespace basicModel.destruct()
psychrnn/notebooks/RDM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # REINFORCE in TensorFlow # # This notebook implements a basic reinforce algorithm a.k.a. policy gradient for CartPole env. # # It has been deliberately written to be as simple and human-readable. # # The notebook assumes that you have [openai gym](https://github.com/openai/gym) installed. # # In case you're running on a server, [use xvfb](https://github.com/openai/gym#rendering-on-a-server) # + import gym import numpy as np, pandas as pd import matplotlib.pyplot as plt # %matplotlib inline env = gym.make("CartPole-v0") #gym compatibility: unwrap TimeLimit if hasattr(env,'env'): env=env.env env.reset() n_actions = env.action_space.n state_dim = env.observation_space.shape[0] plt.imshow(env.render("rgb_array")) # + import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.distributions import Categorical import torchvision.transforms as T from torchsummary import summary device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device # - import pixiedust # # Building the policy network # For REINFORCE algorithm, we'll need a model that predicts action probabilities given states. # # For numerical stability, please __do not include the softmax layer into your network architecture__. # # We'll use softmax or log-softmax where appropriate. class policy_estimator(): def __init__(self, env): self.n_inputs = env.observation_space.shape[0] self.n_outputs = env.action_space.n # Define network self.network = nn.Sequential( nn.Linear(self.n_inputs, 16), nn.ReLU(), nn.Linear(16, self.n_outputs), nn.Softmax(dim=-1)) def predict(self, state): action_probs = self.network(torch.FloatTensor(state)) return action_probs def discount_rewards(rewards, gamma=0.99): r = np.array([gamma**i * rewards[i] for i in range(len(rewards))]) # Reverse the array direction for cumsum and then # revert back to the original order r = r.cumsum()[::-1].copy() return r def generate_session(_agent, t_max=1000): s_0 = env.reset() states, actions, rewards = [], [], [] done = False action_space = np.arange(env.action_space.n) for t in range(t_max): # Get actions and convert to numpy array action_probs = _agent.predict(s_0).detach().numpy() action = np.random.choice(action_space, p=action_probs) s_1, r, done, _ = env.step(action) states.append(s_0) rewards.append(r) actions.append(action) s_0 = s_1 if done: break return states, actions, rewards def train_on_session(_agent, _optimizer, states, actions, rewards, gamma=0.99): _optimizer.zero_grad() state_tensor = torch.FloatTensor(states) reward_tensor = torch.FloatTensor(discount_rewards(rewards)) # Actions are used as indices, must be LongTensor action_tensor = torch.LongTensor(actions) # Calculate loss prob = _agent.predict(state_tensor) logprob = torch.log( _agent.predict(state_tensor)) selected_logprobs = logprob[np.arange(len(action_tensor)), action_tensor] selected_probs = prob[np.arange(len(action_tensor)), action_tensor] entropy = - torch.sum(selected_probs * selected_logprobs) loss = -(reward_tensor * selected_logprobs).mean() - 0.001*entropy # Calculate gradients loss.backward() # Apply gradients _optimizer.step() return np.sum(rewards) pe = policy_estimator(env) optimizer = optim.Adam(pe.network.parameters(), lr=0.01) # + pixiedust={"displayParams": {}} for i in range(100): rewards = [train_on_session(pe, optimizer, *generate_session(pe)) for _ in range(100)] # generate new sessions print("mean reward:%.3f" % (np.mean(rewards))) if np.mean(rewards) > 500: print("You Win!") # but you can train even further break # - # ### Results & video def generate_session(_agent, t_max=1000): s_0 = env.reset() states, actions, rewards = [], [], [] done = False action_space = np.arange(env.action_space.n) for t in range(t_max): # Get actions and convert to numpy array action_probs = _agent.predict(s_0).detach().numpy() action = np.random.choice(action_space, p=action_probs) s_1, r, done, _ = env.step(action) states.append(s_0) rewards.append(r) actions.append(action) s_0 = s_1 if done: break return sum(rewards) #record sessions import gym.wrappers env = gym.wrappers.Monitor(gym.make("CartPole-v0"),directory="videos",force=True) sessions = [generate_session(pe) for _ in range(100)] env.close() # + #show video from IPython.display import HTML import os video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/"))) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices # - from submit import submit_cartpole submit_cartpole(generate_session, pe, "<EMAIL>", "mygnPZCi4gI87AfJ") # + # That's all, thank you for your attention! # Not having enough? There's an actor-critic waiting for you in the honor section. # But make sure you've seen the videos first.
week5_policy_based/practice_reinforce.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JohnAngeloDazo/CPEN-21A-ECE-2-1/blob/main/Control_Structure.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="9Aw0Yqq8Be7i" # ##If Statement # + colab={"base_uri": "https://localhost:8080/"} id="XoSAudEUBiMS" outputId="4fc4e272-94bf-4d00-bf32-27b7d0c41297" a=12 b=100 if b>a: print("b is greater than a") # + [markdown] id="lIPCIIMQBt5L" # ##Elif Statement # + colab={"base_uri": "https://localhost:8080/"} id="iF2AnVxrBx5j" outputId="19183f10-fdc3-4e21-86fa-ad52f7f52204" a=12 b=13 if b>a: print("b is greater than a") elif b==a: print("b is equal to a") # + [markdown] id="uyDEPGjrCrna" # ##Else Statement # + colab={"base_uri": "https://localhost:8080/"} id="gMCX9XnhCtqa" outputId="a6337232-d3d5-4513-9175-4f0c6273ab9a" a=30 b=30 if a>b: print("a is greater than b") elif b>a: print("b is greater than b") else: print("a is equal to b") # + [markdown] id="E1KPEauVDBDJ" # ##Short Hand if Statement # + colab={"base_uri": "https://localhost:8080/"} id="xiYjptEQDDii" outputId="d83b24a3-02a3-4746-911a-e0ee9cc197ce" a=12 b=6 if a>b: print("a is greater than") # + [markdown] id="fJm9PGz9DNkJ" # ##Short Hand If... Else Statement # + colab={"base_uri": "https://localhost:8080/"} id="1hNQ8g_4DeEk" outputId="393b10f6-4fb3-4cb1-834e-bd97101cf9f2" a=7 b=14 print("a is greater than b")if a>b else print("b is greater than a") # + [markdown] id="NliPNL5nD7ti" # And logical condition # + colab={"base_uri": "https://localhost:8080/"} id="QfLL02LbEFCL" outputId="674c2ea1-c746-4a20-caeb-63b26a511810" a=200 b=300 c=500 if a>b and c>a: print("Both conditions are True") else: print("Evaluated as False") # + [markdown] id="BPSdyoW1Eo9a" # Or logical condition # + colab={"base_uri": "https://localhost:8080/"} id="x-nATVZZEqra" outputId="1c26d61f-7b36-4ce6-9ff2-527bba925097" a=200 b=300 c=500 if a>b or c>a: print("Evaluated as True") else: print("Evaluated as False") # + [markdown] id="WKf9h26EqmBn" # ##Nested If.. Else Statement # + colab={"base_uri": "https://localhost:8080/"} id="o2QiO4ytqosX" outputId="801dcf89-7a11-46f2-8aa4-f74b24e4f229" x=20 if x>10: print("Above ten") if x>20: print("Above twenty") else: print("Above ten but Not above twenty") else: print("Not above ten") # + [markdown] id="Tsj73EKEsQK-" # ##Example 1 # + colab={"base_uri": "https://localhost:8080/"} id="yrtquksBtVpt" outputId="3aeb765c-a256-4a2f-e921-33e68aecb58a" # The qualifying age to vote age=int(input("Enter your age:")) if age>=18: print("You are qualified to vote") else: print("You are not qualified to vote") # + [markdown] id="RBzZmDRfszc9" # ##Example 2 # + colab={"base_uri": "https://localhost:8080/"} id="oT8aisv5s1Nm" outputId="ac05b75f-db36-411e-ecf2-51cf2eb73f7e" num=int(input("Enter the number:")) if num==0: print("Zero") elif num>0: print("Positive") else: print("Negative") # + [markdown] id="u_u4qSEQuxAF" # ##Example 3 # + colab={"base_uri": "https://localhost:8080/"} id="fT5Dll0UuyJH" outputId="1dd1f44b-5c98-40da-d459-247e8a0ccc40" grade=float(input("Enter your grade:")) if grade>=75: print("Passed") elif grade<74: print("Failed") else: print("Remedial") # + colab={"base_uri": "https://localhost:8080/"} id="vmEWGvhsvjrK" outputId="2d5cbae9-4cdc-4110-eee0-d40e7fd92521" grade=float(input("Enter your grade:")) if grade>=75: print("Passed") elif grade<74: print("Failed") else: print("Remedial") # + colab={"base_uri": "https://localhost:8080/"} id="leepqcbjv8Wd" outputId="74b09b75-b14c-4b4e-ffb8-a838961679d5" grade=float(input("Enter your grade:")) if grade>=75: print("Passed") elif grade<74: print("Failed") else: print("Remedial")
Control_Structure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # How to extract bottleneck features # Modern CNNs can take weeks to train on multiple GPUs on ImageNet, but fortunately, many researchers share their final weights. Keras, e.g., contains pre-trained models for several of the reference architectures discussed above, namely VGG16 and 19, ResNet50, InceptionV3 and InceptionResNetV2, MobileNet, DenseNet, NASNet and MobileNetV2 # # This notebook illustrates how to download pre-trained VGG16 model, either with the final layers to generate predictions or without the final layers as illustrated in the figure below to extract the outputs produced by the bottleneck features. # ## Imports # + import numpy as np from pathlib import Path import tensorflow as tf from tensorflow.keras.applications.vgg19 import VGG19, preprocess_input from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image import tensorflow.keras.backend as K # - gpu_devices = tf.config.experimental.list_physical_devices('GPU') if gpu_devices: print('Using GPU') tf.config.experimental.set_memory_growth(gpu_devices[0], True) else: print('Using CPU') # ## Load and Preprocess Sample Images # # Before supplying an image to a pre-trained network in Keras, there are some required preprocessing steps. # # We have imported a very small dataset of 7 # images and stored the preprocessed image input as `img_input`. Note that the dimensionality of this array is `(8, 224, 224, 3)`. In this case, each of the 8 images is a 3D tensor, with shape `(224, 224, 3)`. img_paths = Path('images/img_input').glob('*.jpg') def path_to_tensor(img_path): # loads RGB image as PIL.Image.Image type img = image.load_img(img_path, target_size=(224, 224)) # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3) x = image.img_to_array(img) # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor return np.expand_dims(x, axis=0) def paths_to_tensor(img_paths): list_of_tensors = [path_to_tensor(img_path) for img_path in img_paths] return np.vstack(list_of_tensors) # + # calculate the image input img_input = preprocess_input(paths_to_tensor(img_paths)) img_input.shape # - # ## Import Pre-Trained VGG-16 # # Import the VGG-16 network (including the final classification layer) that has been pre-trained on ImageNet. # Keras makes it very straightforward to download and use pre-trained models: vgg16 = VGG16() vgg16.summary() # For this network, `model.predict` returns a 1000-dimensional probability vector containing the predicted probability that an image returns each of the 1000 ImageNet categories. The dimensionality of the obtained output from passing `img_input` through the model is `(8, 1000)`. The first value of `7` merely denotes that 7 images were passed through the network. y_pred = vgg16.predict(img_input) y_pred.shape np.argmax(y_pred, axis=1) # ## Import the VGG-16 Model, with the Final Fully-Connected Layers Removed # # When performing transfer learning, we need to remove the final layers of the network, as they are too specific to the ImageNet database. This is accomplished in the code cell below. # # ![VGG-16 model for transfer learning](images/vgg19_transfer.png) # You can use this model like any other Keras model for predictions. To exclude the fully-connected layers, just add the keyword `include_top=False` to obtain the output of the final convolutional layer when passing an image to the CNN. vgg16 = VGG16(include_top=False) vgg16.summary() # By omitting the fully-connected layers, we are no longer forced to use a fixed input size for the model (224x224, the original ImageNet format). By only keeping the convolutional modules, our model can be adapted to arbitrary input sizes. # ### Extract Output of Final Max Pooling Layer # # Now, the network stored in `model` is a truncated version of the VGG-16 network, where the final three fully-connected layers have been removed. In this case, `model.predict` returns a 3D array (with dimensions $7\times 7\times 512$) corresponding to the final max pooling layer of VGG-16. The dimensionality of the obtained output from passing `img_input` through the model is `(8, 7, 7, 512)`. The first value of `8` merely denotes that 8 images were passed through the network. vgg16.predict(img_input).shape # This is exactly how we calculate the bottleneck features for your project! # ## Import ResNet50 # ### With final layer resnet = ResNet50() resnet.summary() # ### Without final layer resnet = ResNet50(include_top=False) resnet.summary() # ## Import Inception V3 # ### With final layer inception = InceptionV3() inception.summary() # ### Without final layer inception = InceptionV3(include_top=False) inception.summary()
18_convolutional_neural_nets/09_bottleneck_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: chineseocr # language: python # name: chineseocr # --- import os os.chdir('../../') # ## GPU 设置 GPUID='0'##调用GPU序号 os.environ["CUDA_VISIBLE_DEVICES"] = GPUID # + import numpy as np import tensorflow as tf from glob import glob from PIL import Image import cv2 Input =tf.keras.layers.Input Lambda = tf.keras.layers.Lambda load_model = tf.keras.models.load_model Model = tf.keras.models.Model from apphelper.image import get_box_spilt,read_voc_xml,resize_im,read_singLine_for_yolo from text.keras_yolo3 import preprocess_true_boxes, yolo_text from train.text.utils import get_random_data_ as get_random_data def data_generator(roots, anchors, num_classes,splitW): '''data generator for fit_generator @@roots:jpg/png ''' n = len(roots) np.random.shuffle(roots) scales = [416,608,608,608]##多尺度训练 i = 0 j = 0 m = len(scales) while True: root = roots[i] i+=1 if i>=n: i=0 scale = scales[j] j+=1 if j>=m: j=0 xmlP = os.path.splitext(root)[0]+'.xml' boxes = read_voc_xml(xmlP) im = Image.open(root) w,h = resize_im(im.size[0],im.size[1], scale=scale, max_scale=None) if max(w,h)>2048: w,h = resize_im(im.size[0],im.size[1], scale=scale, max_scale=2048) input_shape = (h,w) isRoate=True rorateDegree=np.random.uniform(-5,5) rorateDegree = 0 newBoxes,newIm = get_box_spilt(boxes, im, w,h,splitW=splitW, isRoate=isRoate, rorateDegree=rorateDegree) newBoxes = np.array(newBoxes) if len(newBoxes)==0: continue if np.random.randint(0,100)>70: if np.random.randint(0,100)>50: ##图像水平翻转 newBoxes[:,[0,2]] = w-newBoxes[:,[2,0]] im = Image.fromarray(cv2.flip(np.array(im),1)) else: ##垂直翻转 newBoxes[:,[1,3]] = h-newBoxes[:,[3,1]] im = Image.fromarray(cv2.flip(np.array(im),0)) maxN = 128##随机选取128个box用于训练 image_data = [] box_data = [] image, box = get_random_data(newIm,newBoxes, input_shape,max_boxes=maxN) image_data = np.array([image]) box_data = np.array([box]) y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes) yield [image_data, *y_true], [np.zeros(1)]*4 # - # ## 加载训练数据集,标注XML软件参考https://github.com/cgvict/roLabelImg.git # + val_split = 0.1 root='train/data/text/*/*.[j|p|J]*' jpgPath = glob(root) ##剔除为标记的图像 delPaths = [] for p in jpgPath: xmlP = os.path.splitext(p)[0]+'.xml' if not os.path.exists(xmlP): delPaths.append(p) print('total:',len(jpgPath)) jpgPath = list(set(jpgPath) - set(delPaths)) print('total:',len(jpgPath)) np.random.shuffle(jpgPath) num_val = int(len(jpgPath)*val_split) num_train = len(jpgPath) - num_val # - # ## 定义anchors及加载训练模型 ## 计算训练集anchors from train.text.gen_anchors import YOLO_Kmeans## anchors生产 splitW = 8##文本分割最小宽度 #cluster = YOLO_Kmeans(cluster_number=9, root=root, scales=[416, 512, 608, 608, 608, 768, 960, 1024], splitW=splitW) #8,9, 8,18, 8,31, 8,59, 8,124, 8,351, 8,509, 8,605, 8,800 #print(cluster.anchors) # + ## 数据事例 from apphelper.image import xy_rotate_box,box_rotate def plot_boxes(img,angle, result,color=(0,0,0)): tmp = np.array(img) c = color w,h = img.size thick = int((h + w) / 300) i = 0 if angle in [90,270]: imgW,imgH = img.size[::-1] else: imgW,imgH = img.size for line in result: cx =line['cx'] cy = line['cy'] degree =line['angle'] w = line['w'] h = line['h'] x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree) x1,y1,x2,y2,x3,y3,x4,y4 = box_rotate([x1,y1,x2,y2,x3,y3,x4,y4],angle=(360-angle)%360,imgH=imgH,imgW=imgW) cx =np.mean([x1,x2,x3,x4]) cy = np.mean([y1,y2,y3,y4]) cv2.line(tmp,(int(x1),int(y1)),(int(x2),int(y2)),c,1) cv2.line(tmp,(int(x2),int(y2)),(int(x3),int(y3)),c,1) cv2.line(tmp,(int(x3),int(y3)),(int(x4),int(y4)),c,1) cv2.line(tmp,(int(x4),int(y4)),(int(x1),int(y1)),c,1) mess=str(i) cv2.putText(tmp, mess, (int(cx), int(cy)),0, 1e-3 * h, c, thick // 2) i+=1 return Image.fromarray(tmp) def plot_box(img,boxes): blue = (0, 0, 0) #18 tmp = np.copy(img) for box in boxes: cv2.rectangle(tmp, (int(box[0]),int(box[1])), (int(box[2]), int(box[3])), blue, 1) #19 return Image.fromarray(tmp) def show(p,scale=608): im = Image.open(p) xmlP = p.replace('.jpg','.xml').replace('.png','.xml') boxes = read_voc_xml(xmlP) im = Image.open(p) w,h = resize_im(im.size[0],im.size[1], scale=scale, max_scale=4096) input_shape = (h,w) isRoate=True rorateDegree=np.random.uniform(-5,5) rorateDegree=0 newBoxes,newIm = get_box_spilt(boxes, im, sizeW=w, SizeH=h, splitW=splitW, isRoate=isRoate, rorateDegree=rorateDegree) return plot_boxes(im,0, boxes,color=(0,0,0)),plot_box(newIm,newBoxes),newBoxes # - a,b,newBoxes = show(jpgPath[9]) b #anchors = cluster.anchors anchors = '8,9, 8,18, 8,31, 8,59, 8,124, 8,351, 8,509, 8,605, 8,800' anchors = [float(x) for x in anchors.split(',')] anchors = np.array(anchors).reshape(-1, 2) num_anchors = len(anchors) class_names = ['none','text',]##text num_classes = len(class_names) textModel = yolo_text(num_classes,anchors,train=True) #textModel.load_weights('models/text.h5')##加载预训练模型权重 textModel.load_weights('models/text.h5')##加载预训练模型权重 trainLoad = data_generator(jpgPath[:num_train], anchors, num_classes,splitW) testLoad = data_generator(jpgPath[num_train:], anchors, num_classes,splitW) adam = tf.keras.optimizers.Adam(lr=0.0005) textModel.compile(optimizer=adam, loss={'xy_loss':lambda y_true, y_pred:y_pred, 'wh_loss':lambda y_true, y_pred:y_pred, 'confidence_loss':lambda y_true, y_pred:y_pred, 'class_loss':lambda y_true, y_pred:y_pred, } ) textModel.fit_generator(generator=trainLoad, steps_per_epoch=num_train, epochs=2, verbose=2, callbacks=None, validation_data=testLoad, validation_steps=num_val) # + from text.keras_yolo3 import yolo_text,box_layer,K from config import kerasTextModel,IMGSIZE,keras_anchors,class_names from apphelper.image import resize_im,letterbox_image from PIL import Image import numpy as np import tensorflow as tf graph = tf.get_default_graph()##解决web.py 相关报错问题 anchors = [float(x) for x in keras_anchors.split(',')] anchors = np.array(anchors).reshape(-1, 2) num_anchors = len(anchors) num_classes = len(class_names) textModelTest = yolo_text(num_classes,anchors) kerasTextModel = '/tmp/textModel.h5' textModelTest.load_weights(kerasTextModel) sess = K.get_session() image_shape = K.placeholder(shape=(2, ))##图像原尺寸:h,w input_shape = K.placeholder(shape=(2, ))##图像resize尺寸:h,w box_score = box_layer([*textModelTest.output,image_shape,input_shape],anchors, num_classes) def text_detect(img,prob = 0.05): im = Image.fromarray(img) scale = IMGSIZE[0] w,h = im.size w_,h_ = resize_im(w,h, scale=scale, max_scale=2048)##短边固定为608,长边max_scale<4000 #boxed_image,f = letterbox_image(im, (w_,h_)) boxed_image = im.resize((w_,h_), Image.BICUBIC) image_data = np.array(boxed_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) # Add batch dimension. imgShape = np.array([[h,w]]) inputShape = np.array([[h_,w_]]) global graph with graph.as_default(): ##定义 graph变量 解决web.py 相关报错问题 """ pred = textModel.predict_on_batch([image_data,imgShape,inputShape]) box,scores = pred[:,:4],pred[:,-1] """ box,scores = sess.run( [box_score], feed_dict={ textModelTest.input: image_data, input_shape: [h_, w_], image_shape: [h, w], K.learning_phase(): 0 })[0] keep = np.where(scores>prob) box[:, 0:4][box[:, 0:4]<0] = 0 box[:, 0][box[:, 0]>=w] = w-1 box[:, 1][box[:, 1]>=h] = h-1 box[:, 2][box[:, 2]>=w] = w-1 box[:, 3][box[:, 3]>=h] = h-1 box = box[keep[0]] scores = scores[keep[0]] return box,scores # - p='./train/text/26BB94CA21C11AB38BC5FC2E08D140CD.jpg' IMGSIZE=416,416 img = np.array(Image.open(p)) box,scores = text_detect(img,prob = 0.01) plot_box(img,box)
train/text/text-train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Sinopia Knowledge Graph # This notebook creates a Sinopia [kglab](https://derwen.ai/docs/kgl/) Knowledge Graph. # + jupyter={"outputs_hidden": true} tags=[] # ! pip install kglab # + import json from datetime import datetime from typing import Dict, List, Optional import pandas as pd import kglab import rdflib import requests # - # Defines Sinopia-specific namespaces in a dictionary and create a SINOPIA rdflib.Namespace used in each resource's graph. # + namespaces = { "bf": "http://id.loc.gov/ontologies/bibframe/", "bflc": "http://id.loc.gov/ontologies/bflc/", "mads": "http://www.loc.gov/mads/rdf/v1#", "skos": "http://www.w3.org/2004/02/skos/core#", "sinopia": "http://sinopia.io/vocabulary/" } SINOPIA = rdflib.Namespace("http://sinopia.io/vocabulary/") # - # **TODO:** These functions need to be moved to a local Python module for use in other notebooks. def from_api(api_url: str) -> Dict: """Takes a Sinopia API endpoint URI, extracts each resource and template, and returns a dictionary with two lists, a resources and a templates, and the total number of resources harvested from the api. @param api_url -- URI to Sinopia API endpoint @param group -- optional Group name """ def add_resource(resource): if not 'data' in resource: print(f"\n{resource.get('uri')} missing data") return output["total"] += 1 graph = rdflib.Graph() for key, url in namespaces.items(): graph.namespace_manager.bind(key, url) jsonld = json.dumps(resource.pop("data")).encode() try: graph.parse(data=jsonld, format="json-ld") except Exception as error: print(f"Failed to parse {resource}\n{error}") return payload = {"graph": graph, "meta": resource} if "sinopia:template:resource" in resource.get("templateId"): output["templates"].append(payload) else: output["resources"].append(payload) output = {"resources": [], "templates": [], "total": 0} start = datetime.utcnow() print(f"Started harvest of resources at {start} for {api_url}") initial = requests.get(f"{api_url}") print("0", end="") for row in initial.json().get("data"): add_resource(row) next_link = initial.json().get("links").get("next") while 1: result = requests.get(next_link) if result.status_code > 300: break payload = result.json() new_next = payload.get("links").get("next") if new_next is None: new_text = payload.get("links").get("first") if new_next == next_link or new_next is None: break for row in payload.get("data"): add_resource(row) next_link = new_next print(".", end="") if not output["total"] % 250: print(f"{output['total']}", end="") end = datetime.utcnow() print(f"\nFinished total time {(end-start).seconds / 60.}") return output # + # from_api? # + tags=[] stage_rdf = from_api("https://api.stage.sinopia.io/resource") # + # kglab.KnowledgeGraph? # - # ## Creating a Knowledge Graph # Here we iterate through the Stage resources, retrieve the RDF graph, and then we create an instance of the kglab # Knowledge Graph. # + stage = rdflib.ConjunctiveGraph() for row in stage_rdf['resources']: stage += row.get('graph') kg = kglab.KnowledgeGraph( name = "Sinopia Stage KG", base_uri = "https://api.stage.sinopia.io/resource/", namespaces = namespaces, import_graph=stage ) # - # ### Graph Measuresments measure = kglab.Measure() measure.measure_graph(kg) print("edges: {:,}\n".format(measure.get_edge_count())) print("nodes: {:,}\n".format(measure.get_node_count())) # ## SPARQL Query as a Pandas Data # One of the exciting capabilities of our new Stage Knowledge graph is that we can query our Stage datastore with SPARQL and runturn the result as a [Pandas](https://pandas.pydata.org/) dataframe. The dataframe can now be fead into a [FastAI](), [Pytorch](), or [Tensorflow]() models for further analysis. all_triples_query = """ SELECT ?subject ?predicate ?object WHERE { ?subject ?predicate ?object . }""" df = kg.query_as_df(all_triples_query) With each triple in our df.info() df.head() df.tail() df.sample(10) # ## Visualizations # # + tags=[] subgraph = kglab.SubgraphTensor(kg) pyvis_graph = subgraph.build_pyvis_graph(notebook=True) pyvis_graph.force_atlas_2based() pyvis_graph.show("tmp.fig03.html") # - second_kg = kglab.KnowledgeGraph( name = "Sinopia Stage Second KG", base_uri = "https://api.stage.sinopia.io/resource/", namespaces = namespaces, import_graph=dev_rdf['resources'][1]['graph'] ) # + tags=[] subgraph = kglab.SubgraphTensor(second_kg) pyvis_graph = subgraph.build_pyvis_graph(notebook=True) pyvis_graph.force_atlas_2based() pyvis_graph.show("tmp.fig03.html") # - # ## Stanford Knowledge Base # stanford_url = "https://api.stage.sinopia.io/resource?group=stanford" stanford_rdf = from_api(stanford_url) len(stanford_rdf['resources']) # + tags=[] len(stanford_rdf['resources']) # - stanford_graph = rdflib.ConjunctiveGraph() for row in stanford_rdf['resources']: stanford_graph += row['graph'] stanford_kb = kglab.KnowledgeGraph( name = "Sinopia Stage Stanford KG", base_uri = "https://api.stage.sinopia.io/resource/", namespaces = namespaces, import_graph=stanford_graph ) measure.measure_graph(stanford_kb) print("edges: {:,}\n".format(measure.get_edge_count())) print("nodes: {:,}\n".format(measure.get_node_count())) # ## Work-Instance-Item Visualization from rdflib.plugins.stores.sparqlconnector import SPARQLConnector qa_pcc_endpoint = SPARQLConnector('http://services.ld4l.org/fuseki/PCC/sparql', returnFormat='json') query = qa_pcc_endpoint.query("SELECT ?s ?p ?o WHERE { ?s ?p ?o . } LIMIT 10") for row in query: print(row) def bf_entities(uri: str) -> rdflib.Graph: entity_ mla_url = "https://api.stage.sinopia.io/resource?group=mla" mla_rdf = from_api(mla_url) # + jupyter={"outputs_hidden": true} tags=[] mla_rdf # - # ## Extract MLA Resources and Save as Turtle namespaces['pmo'] = "http://performedmusicontology.org/ontology/" namespaces['rdaw'] = "http://rdaregistry.info/Elements/w/" mla_result = from_api("https://api.stage.sinopia.io/resource?group=mla") len(mla_result['resources']) # + tags=[] print(mla_result['resources'][0]['graph'].serialize(format='turtle').decode()) # - # ### Saves Serialized Resources as Turtle into Zip file import zipfile with zipfile.ZipFile("mla-resources.zip", "w") as zip_file: for resource in mla_result['resources']: zip_file.writestr(f"{resource['meta'].get('id')}.ttl", resource['graph'].serialize(format='turtle'))
doc/stage-kg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] graffitiCellId="id_1myd4mr" # ### Problem statement # # In an encryption system where ASCII lower case letters represent numbers in the pattern `a=1, b=2, c=3...` and so on, find out all the codes that are possible for a given input number. # # **Example 1** # # * `number = 123` # * `codes_possible = ["aw", "abc", "lc"]` # # Explanation: The codes are for the following number: # # * 1 . 23 = "aw" # * 1 . 2 . 3 = "abc" # * 12 . 3 = "lc" # # # **Example 2** # # * `number = 145` # * `codes_possible = ["ade", "ne"]` # # Return the codes in a list. The order of codes in the list is not important. # # *Note: you can assume that the input number will not contain any 0s* # + graffitiCellId="id_v2stglr" def all_codes(number): """ :param: number - input integer Return - list() of all codes possible for this number TODO: complete this method and return a list with all possible codes for the input number """ pass # + [markdown] graffitiCellId="id_q8i2zj9" # <span class="graffiti-highlight graffiti-id_q8i2zj9-id_yrg0ir2"><i></i><button>Show Solution</button></span> # + graffitiCellId="id_byos91q" def test_function(test_case): number = test_case[0] solution = test_case[1] output = all_codes(number) output.sort() solution.sort() if output == solution: print("Pass") else: print("Fail") # + graffitiCellId="id_yrg0ir2" # Solution def get_alphabet(number): """ Helper function to figure out alphabet of a particular number Remember: * ASCII for lower case 'a' = 97 * chr(num) returns ASCII character for a number e.g. chr(65) ==> 'A' """ return chr(number + 96) def all_codes(number): if number == 0: return [""] # calculation for two right-most digits e.g. if number = 1123, this calculation is meant for 23 remainder = number % 100 output_100 = list() if remainder <= 26 and number > 9 : # get all codes for the remaining number output_100 = all_codes(number // 100) alphabet = get_alphabet(remainder) for index, element in enumerate(output_100): output_100[index] = element + alphabet # calculation for right-most digit e.g. if number = 1123, this calculation is meant for 3 remainder = number % 10 # get all codes for the remaining number output_10 = all_codes(number // 10) alphabet = get_alphabet(remainder) for index, element in enumerate(output_10): output_10[index] = element + alphabet output = list() output.extend(output_100) output.extend(output_10) return output # + graffitiCellId="id_l3o11vy" number = 123 solution = ['abc', 'aw', 'lc'] test_case = [number, solution] test_function(test_case) # + graffitiCellId="id_a5y5trj" number = 145 solution = ['ade', 'ne'] test_case = [number, solution] test_function(test_case) # + graffitiCellId="id_ujafvah" number = 1145 solution = ['aade', 'ane', 'kde'] test_case = [number, solution] test_function(test_case) # + graffitiCellId="id_6d1dxt9" number = 4545 solution = ['dede'] test_case = [number, solution] test_function(test_case)
data structure/recursion/Return-Codes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Itto-ryu/CPEN-21A-CPE-1-2/blob/main/OOP_Concepts.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="hM-QjeHwSAoK" # ##Python Classes and Objects # + [markdown] id="QFvri0OAR9EZ" # ###Create classes # + id="oKR8kSGbSWUS" class MyClass: pass # + colab={"base_uri": "https://localhost:8080/"} id="oljh4dsySozL" outputId="f673225e-059f-45a5-97fe-385c61fecbaa" class OOP1_2: x=5 print(x) # + [markdown] id="3Dc9AKjgUEYq" # ###Create Objects # + colab={"base_uri": "https://localhost:8080/"} id="uzlKmoj6SpEJ" outputId="f43e6e3f-cd68-4c2c-d493-ceb2b183fd5c" class OOP1_2: def __init__(self,name,age): #__init__() self.name = name #attributes self.age = age def identity(self): print(self.name,self.age) person = OOP1_2("Ryu", 19) #create objects print(person.identity) print(person.name) print(person.age) # + colab={"base_uri": "https://localhost:8080/"} id="qVT6sHQ0SpUB" outputId="1a7225f4-17c9-4e63-b410-59a8d5740769" #Modify the Object Name person.name = "Yong" person.age = 24 print(person.name) print(person.age) # + colab={"base_uri": "https://localhost:8080/", "height": 200} id="8h7_96AMW1lf" outputId="1814018c-7814-45bc-ef9f-bf000a35a621" #Delete the Object del person.name print(person.name) # + [markdown] id="62V8i7MXXiVQ" # ###Application 1 - Write a Python program that computes the area of a square, and name its class as Square, side as attribute. # + colab={"base_uri": "https://localhost:8080/"} id="LS51BIfKXdZP" outputId="da7a1f69-d934-4b25-9e0d-a44f916ca02a" class Square: def __init__(self,sides): self.sides = sides def area(self): return self.sides*self.sides #formula to compute the area of the square def display(self): print("the area of the square is:", self.area()) square = Square(4) print(square.sides) square.display() # + [markdown] id="TYJPIYMkbYPO" # ###Application 2 - Write a Python program that displays your full name, age, course, school. Create a class names MyClass, and name, age, school as attributes # + colab={"base_uri": "https://localhost:8080/"} id="xKR1qM0Cbh-N" outputId="23ee7d63-79fb-40d9-bfc4-8665d930aa22" class MyClass: def __init__(self,name,age,course,school): self.name = name self.age = age self.course = course self.school = school def identity(self): print(self.name,self.age,self.course,self.school) collegestudent = MyClass("<NAME>",19,"Bachelor of Science in Computer Engineering","Cavite State Univeristy") print(collegestudent.identity) print(collegestudent.name) print(collegestudent.age) print(collegestudent.course) print(collegestudent.school)
OOP_Concepts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np df = pd.read_csv("https://raw.githubusercontent.com/chirudukuru/DMDW/main/student-mat.csv") df.head() # Proximity measures of binary attributes . df1 = df[['schoolsup','famsup','paid','activities','nursery','higher','internet','romantic']] df1.head() df1 = df1.replace("no",0) df1 = df1.replace("yes",1) df1.head() n = np.array(df1[['schoolsup','famsup']]) n=n.reshape(-1,2) n.shape m = np.array(df1[['internet','romantic']]) map=m.reshape(-1,2) m.shape from scipy.spatial import distance dist_matrix = distance.cdist(n,m) print(dist_matrix) import seaborn as sns import matplotlib.pyplot as plt sns.heatmap(dist_matrix) plt.show() nominal = df[['Mjob','Fjob','reason','guardian']] nominal = nominal.replace('at_home','home') nominal = (nominal.astype('category')) from sklearn.preprocessing import LabelEncoder lb = LabelEncoder() nominal['Mjob'] = lb.fit_transform(nominal['Mjob']) nominal['Fjob'] = lb.fit_transform(nominal['Fjob']) nominal['reason'] = lb.fit_transform(nominal['reason']) nominal['guardian'] = lb.fit_transform(nominal['guardian']) nominal.head() nominal1=np.array(nominal) nominal1.reshape(-1,2) nominal2=np.array(nominal) nominal2.reshape(-1,2) from scipy.spatial import distance dist_matrix = distance.cdist(nominal1,nominal2) print(dist_matrix) sns.heatmap(dist_matrix) plt.show()
18CSE109_DMDWLAB_Assignment_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sparameters # # gdsfactory provides you with a Lumerical FDTD interface to calculate Sparameters # # by default another repo [gdslib](https://gdslib.readthedocs.io/en/latest/index.html) stores the Sparameters # # You can chain the Sparameters to calculate solve of larger # circuits using a circuit solver such as: # # - Lumerical interconnect # - [simphony (open source)](https://simphonyphotonics.readthedocs.io/en/latest/) # # # If the Sparameters exists in `gdslib` you can access them really fast. # + # NBVAL_SKIP import pp pp.sp.plot(pp.c.mmi1x2(), keys=['S23m', 'S13m'], logscale=True) # - pp.sp.write(pp.c.mmi1x2(), layer2nm={(1,0): 210}) # gdsfactory can also compute the Sparameters of a component that have not been simulated before. # + # NBVAL_SKIP import pp cs = [pp.c.coupler_ring(gap=gap, bend_radius=bend_radius) for gap in [0.15, 0.2, 0.3] for bend_radius in [5, 10]] for c in cs: pp.show(c) print(c) pp.sp.write(c) # - # To debug a simulation you can create a Lumerical session outside the simulator, pass it to the simulator, and use `run=False` flag # NBVAL_SKIP import lumapi s = lumapi.FDTD() c = pp.c.waveguide() pp.sp.write(c, run=False, session=s) # By default gdsfactory uses the generic layermap for 220nm height silicon layer. # # You can also define your components with a different material, thicknes or GDS layermap # + # NBVAL_SKIP layer2material = { (2, 0): "sin", } layer2nm = {(2, 0): 400} c = pp.c.waveguide(layer=(2,0)) sp = pp.sp.write(c, layer2nm=layer2nm, layer2material=layer2material, session=s) pp.qp(c) pp.sp.plot(c)
notebooks/30_sparameters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/paulcodrea/dissertation/blob/main/5b_LSTM_prediction_BTC_price_and_sentiment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="x4mCQvIev9T9" outputId="fcdb3d66-7ec9-4368-d9e5-ea6eb3de7b4c" # !pip install wandb --quiet # + colab={"base_uri": "https://localhost:8080/"} id="ndQZJsg6v-u0" outputId="089ae395-3e05-46ba-b328-3b4852af987e" # !wandb login # + id="2V9Z9wsTF-3L" import os import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import wandb from wandb.keras import WandbCallback from time import time from keras.models import Sequential, load_model from keras.layers.core import Dense, Dropout from keras.layers import LSTM from time import time from keras.callbacks import EarlyStopping from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler() # + colab={"base_uri": "https://localhost:8080/", "height": 110} id="KEC03ClHwLFe" outputId="f6de2278-bf74-43b7-ce3d-252c9049ad66" wandb.init(project="5b_LSTM-precition_BTC_price-and-sentiment", entity="paulcodrea") # + id="Min6QDbawRuU" wandb.config = { "learning_rate": 0.001, "epochs": 40, "batch_size": 4, "train_p": 0.55, "val_p": 0.05, "LSTM_layer": [50, 100], "Dropout_layer": [0.15, 0.2], "activation": 'tanh', "timesteps": 1, "n_features": 2, # numbers of columns } # Construct the metrics to store the results metrics_df = pd.DataFrame(columns=['script','epoch', 'batch_size','timesteps', 'train_p', 'val_p', 'test_p', 'loss', 'val_loss', 'mse', 'rmse', 'mae', 'mape', 'runtime', 'cross_correlation']) # + colab={"base_uri": "https://localhost:8080/"} id="f-bHnQa4IvU2" outputId="540b0f44-904b-40cc-cb91-0826f3046d2b" # df = pd.read_csv('/content/drive/MyDrive/COMP30030_Dissertation_paul.codrea/Market-prediction/final_price-and-compund.csv', parse_dates=True, index_col="date") df = pd.read_csv('/content/drive/MyDrive/COMP30030_Dissertation_paul.codrea/Market-prediction/final_price-and-score.csv', parse_dates=True, index_col="date") # df = pd.read_csv('/content/drive/MyDrive/COMP30030_Dissertation_paul.codrea/Market-prediction/final_sentiment_price.csv', parse_dates=True, index_col="date") # df = df[['compund', 'positive', 'negative', 'neutral', 'score', 'close']] df = df[['score', 'close']] # df = df[['compund', 'close']] print(df.head()) # + id="MQ3M45WdIz-g" # Data normalization. This is one of the first steps to normalize the values. # The goal is to change the values of numeric columns in the data set to a common scale, wihtout distorting differeces in the randes of values. df_values = df.values sc = MinMaxScaler() data_scaled = sc.fit_transform(df_values) # + id="oZzFVdPwI9MV" # Splitting data into training and testing data train_index = int(wandb.config['train_p'] * len(data_scaled)) val_index = train_index + int(wandb.config['val_p'] * len(data_scaled)) train = data_scaled[:train_index] val = data_scaled[train_index:val_index] test = data_scaled[val_index:] # + colab={"base_uri": "https://localhost:8080/"} id="qdXLzbN-JB1F" outputId="d63e34b2-b1f1-4675-a249-bddc74cc781a" print("train,test,val",train.shape, test.shape, val.shape) # + colab={"base_uri": "https://localhost:8080/"} id="IxyaiDzbJRRU" outputId="0a7e7458-5b89-47da-fbeb-09b51ef10787" xtrain, ytrain = train[:,:2], train[:,1] xtest, ytest = test[:,:2], test[:,1] xval, yval = val[:,:2], val[:,1] print(len(xtrain), len(ytrain)) print(xtrain.shape, ytrain.shape) ####################################################################### # xtrain, ytrain = train[:, :6], train[:, 5] # xtest, ytest = test[:, :6], test[:, 5] # xval, yval = val[:, :6], val[:, 5] # print(len(xtrain), len(ytrain)) # print(xtrain.shape, ytrain.shape) # + colab={"base_uri": "https://localhost:8080/"} id="3QRrWdk9OArm" outputId="c5c1db39-95d0-4531-ddab-d704591e6850" # Samples -> these are the rows in the data. # Number of hours in the future? # Timesteps -> these are the past observations for a feature (such as lag variable). # input_shape is the shape of the training dataset. timesteps = wandb.config["timesteps"] n_features = wandb.config["n_features"] train_len = len(xtrain) - timesteps test_len = len(xtest) - timesteps val_len = len(xval) - timesteps print("Train len:", train_len) print("Test len:", test_len) x_train = np.zeros((train_len, timesteps, n_features)) y_train = np.zeros((train_len)) for i in range(train_len): ytemp = i+timesteps x_train[i] = xtrain[i:ytemp] y_train[i] = ytrain[ytemp] print("x_train", x_train.shape) print("y_train", y_train.shape) x_test = np.zeros((test_len, timesteps, n_features)) y_test = np.zeros((test_len)) for i in range(test_len): ytemp = i+timesteps x_test[i] = xtest[i:ytemp] y_test[i] = ytest[ytemp] print("x_test", x_test.shape) print("y_test", y_test.shape) x_val = np.zeros((val_len, timesteps, n_features)) y_val = np.zeros((val_len)) for i in range(val_len): ytemp = i+timesteps x_val[i] = xval[i:ytemp] y_val[i] = yval[ytemp] print("x_val", x_val.shape) print("y_val", y_val.shape) ################################################################################ # 2 hidden layers with 50 neurons each and a dropout between every one of them. # Start with Sequencial class. model = Sequential() # return_sequence will return a sequence rather than a single value for each input. # Sequential model -> as apipeline with raw data fed in model.add(LSTM(wandb.config['LSTM_layer'][0], input_shape = (timesteps, n_features), return_sequences=True, activation=wandb.config['activation'])) # Dropout Regularisation - method of ignoring and dropping random units during training. # This is essential to prevent overfitting. e.g. Dropout of 15% model.add(Dropout(wandb.config['Dropout_layer'][0])) model.add(LSTM(wandb.config['LSTM_layer'][1], activation=wandb.config['activation'])) model.add(Dropout(wandb.config['Dropout_layer'][1])) model.add(Dense(1)) # This layer is at the end of the architecture and it is used for outputting a prediction. print(model.summary()) # mean-squared-error loss function and Adam optimiser. MSE is a standard loss function for a regression model. # adam -> optimiser algorithm model.compile(loss = 'mean_squared_error', optimizer = 'adam') # Too many epochs can lead to overfitting of the training dataset, whereas too few may result in an underfit model. # Early stopping is a method that allows you to specify an arbitrary large number of training epochs and stop training # once the model performance stops improving on a hold out validation dataset. earlystop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=80, verbose=1, mode='min') start = time() print("start:",0) history = model.fit(x_train, y_train, epochs = wandb.config['epochs'], batch_size=wandb.config['batch_size'], validation_data=(x_val, y_val), verbose = 1, shuffle = False, callbacks=[WandbCallback(), earlystop]) # Print the time it took to run the code runtime = time()-start print("Time: %.4f" % runtime) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="tr0ci0PUQ9r8" outputId="9afe123a-3cf1-45f6-c760-b990593e650a" # Plotting data loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(loss)) plt.figure(figsize=(10, 8)) plt.plot(epochs, loss, 'b', label='Training loss') plt.plot(epochs, val_loss, 'orange', label='Validation loss') plt.title("Training and Validation loss") plt.legend() plt.show() # Predict the model # print(x_test) y_pred = model.predict(x_test) # print(y_pred) # Print out Mean Squared Error (MSE) mse = np.mean((y_pred - y_test)**2) print("MSE: %.4f" % mse) # Print out Root Mean Squared Error (RMSE) rmse = np.sqrt(mse) print("RMSE: %.4f" % rmse) # Print out Mean Absolute Error (MAE) mae = np.mean(np.abs(y_pred - y_test)) print("MAE: %.4f" % mae) # Print out Mean Absolute Percentage Error (MAPE) mape = np.mean(np.abs((y_pred - y_test) / y_test)) * 100 print("MAPE (percentage): %.4f" % mape) # Invers Scaling # actual_price = sc.inverse_transform([y_test]) # actual_price = np.reshape(actual_price, (actual_price.shape[1], 1)) # predicted_price = sc.inverse_transform(y_pred) # Plotting the prediction plt.figure(figsize=(18,8)) plt.plot(y_test, '.-', color='red', label='Real market values', alpha=0.5) plt.plot(y_pred, '.-', color='blue', label='Predicted values', alpha=1) plt.title("Bitcoin Price Prediction using RNN-LSTM") plt.xlabel("Time") plt.ylabel("Price") plt.legend() plt.show() # + [markdown] id="eVAIgP14Eng7" # ## Cross-correlation analysis # The correlation coefficient is measured on a scale from -1 to 1. A correlation coefficient of 1 indicates a perfect positive correlation between the prices of two stocks, meaning the stocks always move in the same direction by the same amount. A coefficient of -1 indicates a perfect negative correlation, meaning that the stocks have historically always moved in the opposite direction. If two stocks have a correlation coefficient of 0, it means there is no correlation and, therefore, no relationship between the stocks. It is unusual to have either a perfect positive or negative correlation. # + id="6agMSLjJIic1" actual_price_df = pd.DataFrame(y_test, columns=['price']) predicted_price_df = pd.DataFrame(y_pred, columns=['price']) # + id="9bAaguJte4Pa" def crosscorr(datax, datay, lag=0, method="pearson"): """ Lag-N cross correlation. Parameters —------— lag : int, default 0 datax, datay : pandas.Series objects of equal length Returns —------— crosscorr : float """ return datax.corr(datay.shift(lag), method=method) # + colab={"base_uri": "https://localhost:8080/", "height": 690} id="rOC8FEMFI_tA" outputId="a3c67c8e-9412-4f48-eaae-9564d1a7cc3b" # Calculate the Pearson Cross Correlation for lag 0 curr_corr = crosscorr(predicted_price_df['price'], actual_price_df['price'], method="pearson") # Print out the Correlation for lag 0 print(f"Pearson CC for lag 0: {curr_corr}") # Process of improving the CC value xcov = [crosscorr(predicted_price_df['price'], actual_price_df['price'], lag=i, method="pearson") for i in range(-len(actual_price_df), len(actual_price_df))] # Identify the lag that maximizes the correlation lag_max = np.argmax(xcov) print(f"Lag that maximizes the correlation {lag_max}\n\n") plt.figure(figsize=(18,8)) plt.plot(xcov, '.-', color='blue', label='Cross-correlation', alpha=1) plt.title("Cross-correlation between actual and predicted values") plt.xlabel("Units of time") plt.ylabel("Cross-correlation") plt.legend() plt.show() # If the Lag is different from 0 then shift the predicted price df and plot again if lag_max != 0: # Use the lag value to shift the predicted values to align with the actual values predicted_price_df['price'] = predicted_price_df['price'].shift(lag_max) plt.figure(figsize=(18,8)) plt.plot(predicted_price_df, '.-', color='blue', label='Predicted values', alpha=1) plt.plot(actual_price_df, '.-', color='red', label='Real market values', alpha=0.5) plt.title("Predicted values") plt.xlabel("Units of time") plt.ylabel("Price") plt.legend() else: print("\n\n No changes to current plotting") # + id="qA0eRlXiJK1M" # add values to metrics_df metrics_df = metrics_df.append({'script': 'LSTM_price_score', 'epoch': wandb.config['epochs'], 'batch_size': wandb.config['batch_size'], 'timesteps': wandb.config['timesteps'], 'train_p': wandb.config['train_p'] * 100, 'val_p': wandb.config['val_p'] * 100, 'test_p': (1 - wandb.config['train_p'] - wandb.config['val_p']) * 100, 'loss': min(loss), 'val_loss': min(val_loss), 'mse': mse, 'rmse': rmse, 'mae': mae, 'mape': mape, 'runtime': runtime, 'cross_correlation':curr_corr}, ignore_index=True) # + [markdown] id="AfjSms7UEHol" # ## Forecast upcoming 'n_hours' # + id="Hx5cRlqDx1An" n_hours = 4 # Prepare dataset based on xtest x_test_copy = x_test.copy() # Apend n_hours more np.zero to the end of x_test_copy x_test_copy = np.append(x_test_copy, np.zeros((n_hours, timesteps, n_features)), axis=0) # + id="H2nvFbf8G8VM" # Forecast the price based on the model build y_pred_forecast = model.predict(x_test_copy) # forecast_price = sc.inverse_transform(y_pred_forecast) # + colab={"base_uri": "https://localhost:8080/", "height": 491} id="PFvtP2iXJ2H0" outputId="75696468-544c-4fe4-9042-6f3aac7dd278" plt.figure(figsize=(18,8)) plt.plot(y_test, '.-', color='red', label='Real market values', alpha=0.5) plt.plot(y_pred_forecast, '.-', color='blue', label='Predicted values', alpha=1) plt.title("Bitcoin Price Prediction using RNN-LSTM") plt.xlabel("Time") plt.ylabel("Price") plt.legend() plt.show() # + id="JPs4A42UKami" # Print out statistics of the forecasted price # print(f"Forecasted price: {forecast_price[-1]}") # # Compare the price based on the n_hours before the actual price # print(f"Actual price: {actual_price[-1]}") # # Print out if the price will go down or up # if forecast_price[-1] > actual_price[-1]: # print(f"Price will go up in the following {n_hours} hours") # else: # print(f"Price will go down in the following {n_hours} hours")
5b_LSTM_prediction_BTC_price_and_sentiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="mug_MpPEExpa" # # Plotting a trajectory of a particle # # # # [Here](http://hyperphysics.phy-astr.gsu.edu/hbase/mot.html) is a link to the Hyperphysics section of kinematics, we will be coding up these equations. # # [Here](https://www.khanacademy.org/science/physics/one-dimensional-motion/kinematic-formulas/a/what-are-the-kinematic-formulas) is a link to the derivation of those equations, also worth a read to brush up on your kinematics. # # # + [markdown] colab_type="text" id="Ve9a2hazbIU7" # First, we will import the two libraries we used earlier, Numpy and Matplotlib. These will let us do some basic maths, and set up our plot. # + colab={} colab_type="code" id="h2ucdCa8D3J0" import numpy as np import matplotlib.pyplot as plt # + [markdown] colab_type="text" id="pBPGb_4GbtuP" # We will then set up some initial parameters. We are going to simulate a projectile launched from the ground at an angle and plot the trajectory. # # We will define everything in separate 'x' and 'y' dimensions, and simply sum the results. # # Firstly, we define the acceleration due to gravity in the x and y direction in units of m/s^2: # + colab={} colab_type="code" id="0ig0QMObcTyM" a_x = 0 a_y = -9.81 # note the sign of acceleration is downwards # + [markdown] colab_type="text" id="e0pxp-TDcmOS" # Now we will choose an initial velocity in m/s, and an angle we want to launch the projectile from in degrees: # + colab={} colab_type="code" id="gm7fXlrecueD" v_i = 100 angle = 60 # + [markdown] colab_type="text" id="wr3EkLtudKz0" # Next, we break down this velocity into an x and y component using trigonometry. Take a look at the figure below and check that you can see the equations in Python matching those in the plot: # # ![alt text](https://i.imgur.com/uks7IpN.png) # # Here is where we call `Numpy`, note that `np.sin(angle)` will assume the angle is in radians but we defined *our* angle in degrees. Luckily `Numpy` *also* has a function to convert degrees into radians called `np.deg2rad(angle)`. # # Putting these two functions together: # # **Note:** When presented with nested brackets, Python will start on the inside and work outwards. # + colab={} colab_type="code" id="sXdmydA76iQy" # + colab={} colab_type="code" id="0KHaItNrdpUg" v_ix = v_i * np.cos(np.deg2rad(angle)) v_iy = v_i * np.sin(np.deg2rad(angle)) # + [markdown] colab_type="text" id="T2fZgB8oeU3H" # We now need to create a list of points, or timestamps, at which to calculate the x/y position of the projectile as time passes. # # We work out how long the projectile will fly for in seconds before it hits the ground (`t_max`), and create a list of interval size `t_step` between 0 and that time. # # Using the time-of-flight equation: # # ![alt text](https://i.imgur.com/4ocn4aU.png) # + colab={} colab_type="code" id="MgV8uHhyevJF" t_max = 2 * v_i * np.sin(np.deg2rad(angle)) / -a_y t_step = 0.01 timestamps = np.arange(0, t_max, t_step) # + [markdown] colab_type="text" id="NrTYvbIxfVHm" # Now, we take those time stamps and plug them into the equation below (once for x and once for y) along with our inital conditions: # # ![alt text](https://i.imgur.com/krbihbA.png) # + colab={} colab_type="code" id="keA4zv1AfqdV" x = v_ix * timestamps + 0.5 * a_x * timestamps**2 y = v_iy * timestamps + 0.5 * a_y * timestamps**2 # + [markdown] colab_type="text" id="HcooVQrKgCda" # Lastly, we set up a plot like in the introductory Notebook and plot our results! # # # + colab={} colab_type="code" id="7Uq53KtggJb2" fig, ax = plt.subplots() ax.plot(x, y) plt.show() # + [markdown] colab_type="text" id="XlPfHer4gY7y" # The entire script together is shown below. I've added extra comments to remind us what is going on, this is good practice for when we start writing more complicated code! I've also added a few extra lines in the plotting part to make our plot look nicer. # # The curly brackets `{}` in the `plt.title()` line is a way to tell Python to expect some formatting after the string, in this case we want to insert the value of `angle` in the title. # + colab={} colab_type="code" id="g9THFF8Agcqn" import numpy as np import matplotlib.pyplot as plt # set up intial conditions, all SI units a_x = 0 a_y = -9.81 # note the sign of acceleration is downwards v_i = 100 angle = 45 # work out initial velocities in x/y v_ix = v_i * np.cos(np.deg2rad(angle)) v_iy = v_i * np.sin(np.deg2rad(angle)) # set up a list of timestamps to evaluate the velocity t_max = 2 * v_i * np.sin(np.deg2rad(angle)) / -a_y t_step = 0.01 timestamps = np.arange(0, t_max, t_step) # use kinematic equation to work out the x and y velocities x = v_ix * timestamps + (0.5 * a_x * timestamps**2) y = v_iy * timestamps + (0.5 * a_y * timestamps**2) # plot the results! fig, ax = plt.subplots() ax.plot(x, y) plt.xlabel('Distance / m') plt.ylabel('Height / m') plt.title('The trajectory of a particle launched at {} degrees'.format(angle)) plt.show() # + [markdown] colab_type="text" id="dC8cx8WvJy3Z" # # Multiple Projectiles # # Lets say we wanted to measure the distance our projectile travelled for a variety of starting conditions such as various angles, heights, gravity, or initial velocities. # # We *could* simply run the code over and over and manually change the code each time ... but we can do better than that. Plus it would be nice to plot all the results at once for easy comparison. # # Here's how we can adapt the above code to plot a few different angles at once. I'll also save the maximum distance for each angle, and print the results. # # As before I'll go through the bits of code one at a time, and then put it all together. It's very similar to the previous example but we're adding a loop so we can plot a few trajectories on one graph. # + [markdown] colab_type="text" id="q-vFfNk5j58T" # As before, we'll start with some imports: # + colab={} colab_type="code" id="nZLYzD3aj_Nq" import numpy as np import matplotlib.pyplot as plt # + [markdown] colab_type="text" id="UrvNyAZNkD-y" # We then define some physical values for our initial conditions: # + colab={} colab_type="code" id="kxbo3RLWkWVB" v_i = 100 a_x = 0 a_y = -9.81 # + [markdown] colab_type="text" id="kxh5DW9okdxR" # Now for the angles. Before we had a single value, this time we're going to create a list of angles to use. We will use `np.arange()` which creates evenly spaced values between two numbers at a given interval. In this case we start at 0 degrees and end at 90 degrees in intervals of 10. See [here](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.arange.html) for more information. # # + colab={} colab_type="code" id="jFvE3__Eki1p" angles = np.arange(0,90,10) print (angles) # + [markdown] colab_type="text" id="Gg-qe5ZAk9py" # Now for the loop. Last time we simply called the equations for our initial conditions along with the timestamps and created the single plot. # # *This* time we're going to loop through each value of angle, and create a plot for each one and add it to the graph. # # Because this is a loop, we can't break it up into multiple cells, but I've added a lot of comments to help understand each line. # + colab={} colab_type="code" id="Eb1py-Vplu5m" # lets loop through each of those angle values and work out the trajectory for i in range(len(angles)): # each projectile gets a unique set of timestamps, so that each trajectory curve has 100 samples t_max = 2 * v_i * np.sin(np.deg2rad(angles[i])) / -a_y t_step = 0.01 timestamps = np.arange(0, t_max, t_step) # we then work out the x/y coordinates of our projectile at each timestamp # this is identical as before, but this time we're just picking the i-th angle from the list we made earlier x = v_i * timestamps * np.cos(np.deg2rad(angles[i])) + (0.5 * a_x * timestamps**2) y = v_i * timestamps * np.sin(np.deg2rad(angles[i])) + (0.5 * a_y * timestamps**2) # add that data to the plot, give it a label plt.plot(x, y, label = angles[i]) # the loop will now repeat for the next angle until we reach the last angle plt.legend() plt.xlabel('Distance / m') plt.ylabel('Height / m') plt.title('The trajectory of a particle launched at various angles') plt.xlim((0,1200)) plt.show() # + [markdown] colab_type="text" id="RUJqy0OYpr6n" # You might think that the last line in the loop above will show a new plot each time in the loop, but it simply means 'add this data to the plot'. # # We then overwrite the values of `x` and `y` in the next loop iteration for the next angle, and add *that* new data to the plot. Then in the end we plot all the data together. # # You may also have noticed the extra bit of code `label = angles[i]`. This gives each line a label, in this case the angle, and we can add these labels to the plot later. # # Once the loop is finished we can finally show the plot on the screen with `plt.show()`. I've also added a few extra formatting things like axis labels and a title like before. The `matplotlib` library is clever and auto asigns a new colour to each plot for us. # # The line `plt.legend()` simply turns on the legend, and it will display the labels we assigned earlier. # + [markdown] colab_type="text" id="GiUAyU75q_De" # Also as before, I've put everything together in one cell and deleted some of the comments: # + colab={} colab_type="code" id="i3JH1_lFKq1e" v_i = 100 a_y = -9.81 a_x = 0 angles = np.arange(0,90,10) # create an empty array, we'll save a value here for each projectile max_distance = [] # lets loop through each of those angle values and work out the trajectory for i in range(len(angles)): # each projectile gets a unique set of timestamps, so that each one is sampled 100 times t_max = 2 * v_i * np.sin(np.deg2rad(angles[i])) / -a_y t_step = 0.01 timestamps = np.arange(0, t_max, t_step) # we then work out the x/y coordinates of our projectile at each timestamp for that angle x = v_i * timestamps * np.cos(np.deg2rad(angles[i])) + (0.5 * a_x * timestamps ** 2) y = v_i * timestamps * np.sin(np.deg2rad(angles[i])) + (0.5 * a_y * timestamps ** 2) # this little 'if' loop will take the last x=position of each projectile so we can compare how far they landed # BUT, the 1st angle is 0 degrees and doesn't go anyway, so I need to explcity put a '0' as the 1st value in 'max_distances' # for all the rest I can just take the last item in the list of x-distances with x[-1] # 'append' just means add the end of the list if angles[i] == 0: max_distance.append(0) else: max_distance.append(x[-1]) # add that data to the plot, give it a label plt.plot(x, y, label = angles[i]) plt.legend() plt.xlabel('Distance / m') plt.ylabel('Height / m') plt.title('The trajectory of a particle launched at various angles') plt.xlim((0,1200)) plt.show() # + [markdown] colab_type="text" id="c6VDcUB8rcww" # Lastly, remember we saved how far each projectile went? We can print these results here by looping through that array: # + colab={} colab_type="code" id="G8GG3pKrPwvW" # print a statement by looping through the list of angles and max_distances # we also use another Numpy function to round the distance value to 2 dp for neatness for i in range(len(angles)): print ('Projectile launched at {a} degrees reached {b} meters'.format(a=angles[i], b=np.round(max_distance[i],2)))
particle-physcis'/plotting a trajectory of a particle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Checking installation. import numpy as np from scipy import special as sp import matplotlib.pyplot as plt import pandas as pd # %matplotlib inline # #### Parabola # + x = np.array([0,1,2,3,4,5,15,25,35,50]) y = np.sqrt(4*x) plt.plot(x, y, label='up') plt.plot(x, -y, label='down') plt.xlabel('X') plt.ylabel('Y') plt.legend() # - # #### Factorial square # + x = np.array([0,1,2,3,4,5,15,25,35,50]) y = (x/sp.factorial(5))**2 plt.plot(x, y, label='up') plt.plot(x, -y, label='down') plt.xlabel('X') plt.ylabel('Y') plt.legend() # - # #### Use of pandas pd.DataFrame.from_dict({'students':['a','b', 'c'], 'marks':[95,30,40]})
trial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import import_ipynb # + from ElektronImFeld import * from EmptySmarti import * from Smartis import * from Buttons import * from Chart2 import * from ipywidgets import HBox, VBox, Layout from math import pi, sin, cos from IPython.display import clear_output class Replay(Smartis): def __init__(self): super().__init__() self.stopSendInformation() self.updateTime = 1 self.replayIndex = 0 self.dataArray = [] def createButtons(self): #Erzeuge das Object, welches alle Buttons managt self.buttonsObject = Buttons(self) #Erzeuge verschiedene Buttons button_play = self.buttonsObject.newTogglePlayPause("b_play", 40,40) self.buttonsObject.disabled("b_play",True) #Aktualisiere ein Dictionarie mit den Stati der Buttons self.buttonStatesDict = self.buttonsObject.getStates() def createChart(self): pass def createSmartiObjects(self): self.currentSmarti = ElektronImFeld() self.currentSmarti.stopSendInformation() self.currentButtonObject = self.currentSmarti.getButtonObject() self.currentButtons = self.currentButtonObject.getButtons() #self.empty = EmptySmarti() #self.empty.stopSendInformation() #self.empty.visualize() def update(self): self.updating_example_layer_1() def visualize(self): #Holen der einzelnen Buttons und Charts zur Darstellung self.buttonsDict = self.buttonsObject.getButtons() #Darstellung clear_output(wait=True) display(HBox([self.buttonsDict["b_play"]])) self.currentSmarti.visualize() self.currentSmarti.start() for key in self.currentButtons: #print(self.currentButtons[key]) self.currentButtonObject.disabled(key,True) def updating_example_layer_1(self): pass def getData(self): self.dataArray = self.server.getInformation() #print(self.dataArray) def startReplay(self): #Array Positions: smartiPosition = 1 timePosition = 2 buttonPosition = 3 valuePosition = 4 dataLength = len(self.dataArray) i = self.replayIndex currentData = self.dataArray[i] self.currentButtonObject.changeStates(currentData[buttonPosition],self.castingValue(currentData[valuePosition])) #print(currentData[buttonPosition],self.castingValue(currentData[valuePosition])) currentTime = datetime.strptime(currentData[timePosition], "%m/%d/%Y, %H:%M:%S.%f")-timedelta(seconds=0) if i < dataLength-1: nextTime = datetime.strptime(self.dataArray[i+1][timePosition], "%m/%d/%Y, %H:%M:%S.%f")-timedelta(seconds=0) timeDiff = nextTime-currentTime print(timeDiff.total_seconds()) threading.Timer(round(timeDiff.total_seconds(),3), self.startReplay).start() self.replayIndex+=1 else: self.replayIndex = 0 def castingValue(self,value): if value == "True" or value == "False": return bool(value) else: try: a = float(value) b = int(value) if a != b: return a else: return b except: #raise ValueError("failed to coerce str to int or float") return value # + e = Replay() e.createSmartiObjects() e.visualize() # - e.getData() e.startReplay()
cz_beta/2021-08-01/Replay.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # %matplotlib inline import random random.seed(1100038344) import survivalstan import numpy as np import pandas as pd from stancache import stancache from matplotlib import pyplot as plt print(survivalstan.models.pem_survival_model_gamma) d = stancache.cached( survivalstan.sim.sim_data_exp_correlated, N=100, censor_time=20, rate_form='1 + sex', rate_coefs=[-3, 0.5], ) d['age_centered'] = d['age'] - d['age'].mean() d.head() survivalstan.utils.plot_observed_survival(df=d[d['sex']=='female'], event_col='event', time_col='t', label='female') survivalstan.utils.plot_observed_survival(df=d[d['sex']=='male'], event_col='event', time_col='t', label='male') plt.legend() dlong = stancache.cached( survivalstan.prep_data_long_surv, df=d, event_col='event', time_col='t' ) dlong.head() testfit = survivalstan.fit_stan_survival_model( model_cohort = 'test model', model_code = survivalstan.models.pem_survival_model_gamma, df = dlong, sample_col = 'index', timepoint_end_col = 'end_time', event_col = 'end_failure', formula = '~ age_centered + sex', iter = 5000, chains = 4, seed = 9001, FIT_FUN = stancache.cached_stan_fit, ) survivalstan.utils.print_stan_summary([testfit], pars='lp__') survivalstan.utils.print_stan_summary([testfit], pars='log_baseline') survivalstan.utils.plot_stan_summary([testfit], pars='baseline') survivalstan.utils.plot_coefs([testfit], element='baseline') survivalstan.utils.plot_coefs([testfit]) survivalstan.utils.plot_pp_survival([testfit], fill=False) survivalstan.utils.plot_observed_survival(df=d, event_col='event', time_col='t', color='green', label='observed') plt.legend() survivalstan.utils.plot_pp_survival([testfit], by='sex') ppsurv = survivalstan.utils.prep_pp_survival_data([testfit], by='sex') subplot = plt.subplots(1, 1) survivalstan.utils._plot_pp_survival_data(ppsurv.query('sex == "male"').copy(), subplot=subplot, color='blue', alpha=0.3) survivalstan.utils._plot_pp_survival_data(ppsurv.query('sex == "female"').copy(), subplot=subplot, color='red', alpha=0.3) survivalstan.utils.plot_observed_survival(df=d[d['sex']=='female'], event_col='event', time_col='t', color='red', label='female') survivalstan.utils.plot_observed_survival(df=d[d['sex']=='male'], event_col='event', time_col='t', color='blue', label='male') plt.legend()
Test pem_survival_model_gamma with simulated data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Passo 1. Configuração do projeto # + import joblib import os import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.metrics import confusion_matrix, classification_report # pacotes incluidos. from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression print('Matplot version: {}'.format(matplotlib.__version__)) print('Numpy version : {}'.format(np.__version__)) print('Pandas version : {}'.format(pd.__version__)) print('Sklearn version: {}'.format(sklearn.__version__)) print('Seaborn version: {}'.format(sns.__version__)) DATA_PATH = os.path.join('..', 'data', 'raw') DATA_FILE = 'diabetes.csv' DIABETES_DATA = os.path.join(DATA_PATH, DATA_FILE) DEPLOY_PATH = os.path.join('..', 'model') DEPLOY_FILE = 'finalized_model.sav' RANDOM_STATE = 42 print('Configuração completa') # - # # Passo 2. Carga de dados def load_data(data_path, data_file): csv_path = os.path.join(data_path, data_file) return pd.read_csv(csv_path) diabetes_data = load_data(DATA_PATH, DATA_FILE) # # Passo 3. Análise exploratória de dados print('Diabetes dataset tem {} linhas e {} colunas'.format(diabetes_data.shape[0], diabetes_data.shape[1])) diabetes_data.head() plt.figure(figsize=(10, 5)) _ = sns.countplot(x='Outcome', data=diabetes_data) # ## Verificando valores nulos (NA) plt.figure(figsize=(10,5)) _ = sns.heatmap(diabetes_data.isnull(), yticklabels=False, cbar=False) # ## Correlação e distribuição dos dados X = diabetes_data.drop(['Outcome'], axis=1) y = diabetes_data['Outcome'] _ = pd.plotting.scatter_matrix(X, c=y, figsize=[10,10], marker='D') # # Passo 4. Preparação de dados # ## 4.1 Amostragem aleatória # diabetes_amostra = diabetes_data.groupby('Outcome', group_keys=False).apply( # lambda x: x.sample(min(len(x), 4), random_state=42) # ) # diabetes_amostra = diabetes_amostra.reset_index(drop=True) # diabetes_amostra # ## 4.2 Tratamento de dados ausentes # ### Identificação de dados ausentes diabetes_data.tail(10) num_ausentes = (diabetes_data[['Glucose', 'BloodPressure', 'SkinThickness' , 'Insulin', 'BMI']] == 0).sum() print('Quantidade de dados ausentes:\n{}'.format(num_ausentes)) diabetes_data[['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI']] =diabetes_data[ ['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI']].replace(0, np.nan) diabetes_data.isnull().sum() for c in range(diabetes_data.shape[1]): ausentes = diabetes_data[[diabetes_data.columns[c]]].isnull().sum() percentual = ausentes / diabetes_data.shape[0] * 100 print ("> %s, ausentes: %d (%.2f%%)" % (diabetes_data.columns[c], ausentes, percentual) ) plt.figure(figsize=(10,5)) _ = sns.heatmap(diabetes_data.isnull(), yticklabels=False, cbar=False) # ### Remocao e Imputação de dados ausentes # diabetes_data.drop(['Insulin'], axis=1, inplace=True) from sklearn.impute import SimpleImputer imputer = SimpleImputer(missing_values=np.nan, strategy='mean') diabetes_data[['Glucose', 'BloodPressure', 'SkinThickness', 'BMI']] = imputer.fit_transform( diabetes_data[['Glucose', 'BloodPressure', 'SkinThickness', 'BMI']].values ) diabetes_data.describe() # ## Passo 5. Transformação de dados # + from sklearn.preprocessing import MinMaxScaler #from sklearn.preprocessing import StandardScaler X = diabetes_data.drop(['Outcome'], axis=1) y = diabetes_data['Outcome'] scaler = MinMaxScaler() X_scaled = pd.DataFrame(scaler.fit_transform(X)) X_scaled.columns = X.columns #scaler = StandardScaler() #X_scaled = pd.DataFrame(scaler.fit_transform(X)) #X_scaled.columns = X.columns # - X_scaled.head() # # Passo 6. Particionamento do dataset X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, stratify=y, test_size=.3, random_state=RANDOM_STATE) # # Passo 7. Treinamento do modelo def build_classifiers(): """Retorna uma lista com os classificadores que serão avaliados""" classifiers=[] classifiers.append( ('knn', # nome do classificador. KNeighborsClassifier(), # instancia do classificador. {'n_neighbors': range(1, 33, 2)} # hyperparametros. ) ) classifiers.append( ('lr', # nome do classificador. LogisticRegression(max_iter=1000), # instancia do classificador. {'penalty': ['l2'], 'C': [100, 10, 1, 0.1, 0.01]} # hyperparametros. ) ) classifiers.append( ('lr', # nome do classificador. LogisticRegression(max_iter=1000), # instancia do classificador. {'penalty': ['l2'], 'C': [100, 10, 1, 0.1, 0.01]} # hyperparametros. ) )
aprendizado-de-maquina-i/analise-de-classificadores/.ipynb_checkpoints/modelagem_v2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Backtesting example # =========== # This notebook assumes you have the bitfinex library installed import sys sys.path.append('..') from bitfinex.backtest import data # %pylab inline # fetching data from Quandl # ----------------------------- # For better access to Quandl data it is nice to have an API key. Let's see what kind of data Quandl keeps for bitfinex. with open('quandl.key', 'r') as f: key = f.read().strip() data.Quandl.search('bitfinex') # Apparently Quandl is offering us only daily prices. For backtesting it is nice to have higher frequency data. For that, another possible source is bitcoincharts, which provides some nice CSVs: Trade data is delayed by approx. 15 minutes. It will return the 2000 most recent trades.. The CSV to be loaded below is about 50MB in size: http://api.bitcoincharts.com/v1/csv/bitfinexUSD.csv.gz history = data.CSVDataSource('bitfinexUSD.csv.gz',fields=['unix_time', 'price', 'ammount']) history.parse_timestamp_column('unix_time',unit='s') #history = data.pd.read_csv('bitfinexUSD.csv.gz',names=['unix_time', 'price', 'amount']) #history['time'] = data.pd.to_datetime(history['unix_time'],unit='s') #history = history.set_index('time') # Let's check how many points we have. history.data.info() history.data[-10:] history.data[-500:].plot(y='price',figsize=(10,6), style='-o', grid=True); history.data[-50:].plot(y='ammount', kind='bar',figsize=(10,6), grid=True);
examples/Backtest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import pandas as pd import numpy as np import os from sklearn.svm import SVC athlete_events = pd.read_csv('../CSV for ML models/athlete_events.csv') athlete_events.head() filter_data = athlete_events[["Sex", "Age", "Height", "Weight", "Team", "Year", "Season", "Sport", "Event", "Medal"]] winter_data = filter_data[filter_data["Season"] == "Winter"] winter_data.head() summer_data = filter_data[(filter_data["Season"] == "Summer")] summer_data.head() summer_sports = summer_data["Sport"].unique() winter_sports = winter_data['Sport'].unique() df1 = summer_data[(summer_data["Sport"] == "Gymnastics") & (summer_data["Sex"] == "M")] df1 = df1[["Age", "Height", "Weight", "Medal"]] df1 = df1.dropna() df1 = df1.reset_index(drop = True) df1.head() target = df1["Medal"] target_names = ["Gold", "Silver", "Bronze"] data = df1.drop("Medal", axis=1) feature_names = data.columns data.head() X_train, X_test, y_train, y_test = train_test_split(data, target, random_state=42) model = SVC(kernel='rbf') model.fit(X_train, y_train) print('Test Acc: %.3f' % model.score(X_test, y_test)) from sklearn.metrics import classification_report predictions = model.predict(X_test) print(classification_report(y_test, predictions, target_names=target_names))
ML Models/SVM_olympic_medalists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### _Speech Processing Labs 2020: Signals: Module 2_ # + ## Run this first! # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import cmath from math import floor from matplotlib.animation import FuncAnimation from IPython.display import HTML plt.style.use('ggplot') from dspMisc import * # - # # 1 Building the Source # # ### Learning Outcomes # * Be able to describe what an impulse train is # * Be able to explain why an impulse train is used to model the voice source # * Be able to describe the frequency response of single impulse and and impulse train # # # ### Need to Know # * Topic Videos: Harmonics, Impulse Train, Frequency Domain # * [Interpreting the Discrete Fourier Transform](./sp-m1-5-interpreting-the-dft.ipynb) # # # # # ## 1.1 A Single Impulse Response # # The previous notebooks looked at the [Discrete Fourier Transform](sp-m1-4-discrete-fourier-transform.ipynb) does and how to [interpret the DFT's outputs](sp-m1-5-interpreting-the-dft.ipynb). Now we've got a grip on that, # we can start thinking about how this ability to go from the time domain to the frequency domain (and back again) can help us build up a model of speech. # # Let's start simple: What happens when the input is just a single **impulse**? What can this tell us? # # The following code cells generate a single impulse in an input sequence of length `N=64`, given a specific sampling rate `f_s`. As we saw previously, the DFT frequency resolution is completely determined by these two parameters. # # + ## Set the number of samples N, sampling rate f_s ## As usual all our interpretation of the DFT outputs will depend on the values of these parameters N=64 #sampling rate: f_s = 64 ## sample time t_s = 1/f_s ## Check our parameters print("Number of samples: N = %d" % N) print("sampling rate: f_s = %f\nsampling time: t_s: %f" % (f_s, t_s)) # + ## indices of input sequence of size N nsteps = np.array(range(N)) ## the sequence of time steps given the sampling rate time_steps = t_s * nsteps ## Now let's create an impulse response # First, we create a sequence of length N but all zeros x_impulse = np.zeros(N) # And then set a single element to be 1, i.e. a single impulse x_impulse[1]=1 ## Now, we plot it: fig, timedom = plt.subplots(figsize=(16, 4)) timedom.scatter(time_steps, x_impulse, color='magenta') timedom.plot(time_steps, x_impulse, color='magenta') timedom.set_ylabel("Amplitude") timedom.set_xlabel("Time (s)") timedom.set_title("A single impulse as input") # - # The plot above shows an time vs amplitude graph of input $x[n]$, where all but 1 of the $N=64$ input points are zero, and $x[1]=1$. # # Now let's look at the DFT of this single impulse. # + ## Now let's look at the DFT outputs of the impulse: mag_impulse, phase_impulse = get_dft_mag_phase(x_impulse, N) ## Note: in this case N=f_s so the DFT output frequencies are the same as the DFT output indices ## We'll look at cases where this differs later dft_freqs = get_dft_freqs_all(f_s, N) ## plot the magnitudes, but this time we're going to need to zoom in a bit on the y-axis: fig, timedom = plt.subplots(figsize=(16, 4)) timedom.set(ylim=(-1, 4)) timedom.plot([0,np.max(dft_freqs)], [0,0], color='grey') timedom.scatter(dft_freqs, mag_impulse) ## Plot the phases fig, timedom = plt.subplots(figsize=(16, 4)) timedom.plot([0,np.max(dft_freqs)], [0,0], color='grey') timedom.scatter(dft_freqs, phase_impulse) ## You should see that magnitudes for all the bins is one (you might need to change the y axis limit) # - # ### Exercise: # **Question** # # * What does the magnitude spectrum show? # * What does the phase spectrum show? # * How might this be useful for modelling the vocal source? # # ### Notes # ## 1.2 From Impulse to Impulse Train # # The DFT analysis above showed us that a single impulse can potentially be linked to any frequency! # This might not seem very useful at first, but actually we can use this to start making a model of the voice source that we can shape in the way we want. The first thing is to add a **periodic** element. To do this we'll make an **impulse train**: a sequence `x` with value 1 every `n_period` samples, and zero otherwise. # # We should note though that not all speech sounds are periodic. For example, fricatives like /s/ and /sh/ are more like white noise. We'll have to model these in other ways. # # Now let's make an impulse train with `N=64` samples, a sampling rate of `f_s=64` samples per second, and an impulse period `n_period=4`: # + ## Let's keep the number of samples and the sampling rate the same as above N=64 f_s = 64 t_s = 1/f_s nsteps = np.array(range(N)) time_steps = t_s * nsteps ## Now let's create an impulse response: # create a sequence of length N but all zeros x_impulse_train = np.zeros(N) # set the impulse period to be 1 impulse every n_period samples n_period = 4 # Find the indices which will carry the impulses, i.e. every n_period-th one starting from 0 to N impulse_indices = np.arange(0, N, n_period) ## Set the impulses x_impulse_train[impulse_indices] = 1 ## Plot it! fig, timedom = plt.subplots(figsize=(16, 4)) timedom.scatter(time_steps, x_impulse_train, color='magenta') timedom.plot(time_steps, x_impulse_train, color='magenta') timedom.set_ylabel("Amplitude") timedom.set_xlabel("Time (s)") timedom.set_title("An impulse train: an impulse every %d samples" % n_period) # - # You should see a repeated sequence over 1 second where every 4th sample has amplitude 1, and all the rest have value 0. # # ### DFT of an impulse train # # Now let's look at the DFT of this impulse train. # + ## Get the DFT outputs: magnitude and phase mag_impulse_train, phase_impulse_train = get_dft_mag_phase(x_impulse_train, N) ## Get the DFT output frequencies, for plotting dft_freqs = get_dft_freqs_all(f_s, N) ## plot the magnitudes, but this time we're going to need to zoom in a bit on the y-axis: fig, fdom = plt.subplots(figsize=(16, 4)) fdom.set(ylim=(-1, N), xlim=(-1, N/2)) fdom.scatter(dft_freqs, mag_impulse_train) fdom.set_xlabel("Frequency (Hz)") fdom.set_ylabel("Magnitude") fdom.set_title("Impulse Train Magnitude Response (First N/2 DFT outputs)") ## Plot the phases fig, fdom = plt.subplots(figsize=(16, 4)) fdom.set(ylim=(-4,4), xlim=(-1, N/2)) fdom.scatter(dft_freqs, phase_impulse_train) fdom.set_xlabel("Frequency (Hz)") fdom.set_ylabel("Phase (radians)") fdom.set_title("Impulse Train Phase Response (First N/2 DFT outputs)") # - # The magnitude (top) plot indicates that the impulse train has frequency components at multiples of 8 Hz. # The phase plot (bottom) doesn't show a phase shift. This also makes sense since our input sequence started with a 1, so acts like cosine with no phase shift. # # **Note** We only plotted the first $N/2$ DFT outputs since we saw previously that DFT outputs are symmetrical around $N/2$. # # ### Exercise # # * What the relationship between the non-zero magnitudes in the example above? # * What's the fundamental frequency of the impulse train? # * What DFT output frequencies have non-zero magnitudes if you you change `n_period` to `8`? # * What happens when the frequency doesn't exactly match one of the DFT outputs? # * e.g. try `n_period = 5` # ### Notes # ## 1.3 Impulse train fundamental frequency # # Since we eventually want to model the vocal source, we want to be able to create impulse trains with specific fundamental frequencies ($F_0$). As usual for digital signal processing, the actual sequence we generate to represent this will depend on the sample rate. # # The following cell defines a function to create impulse trains varying the sample rate, desired frequency, and number of samples. We'll use this later to see how this interacts with different types of filters. # def make_impulse_train(sample_rate, frequency, n_samples): # make an arrange of n_samples, all zeros to start x = np.zeros(n_samples) # Determine where the impulses go based on the sample rate # The time between samples: sample_time = 1/sample_rate #A frequency of f cycles/second means the wavelength=1/f # So samples_per_cycle = wavelength/t_s = 1/frequency / 1/sample_rate = sample_rate/frequency ## We need to round to the nearest integer samples_per_cycle = round(sample_rate/frequency) # Set indices for impulses impulse_positions = np.arange(0, n_samples, samples_per_cycle) #print("impulse_positions:", impulse_positions) # set the impulses x[impulse_positions] = 1 ## return the time steps associated with the impulse train samples nsteps = np.array(range(n_samples)) time_steps = (1/sample_rate) * nsteps return x, time_steps # + ## Set the number of samples and sampling rate N = 64 f_s = 64 ## set our desired impulse train frequency freq = 65 x_impulse_train, time_steps = make_impulse_train(sample_rate=f_s, frequency=freq, n_samples=N) fig, timedom = plt.subplots(figsize=(16, 4)) timedom.scatter(time_steps, x_impulse_train, color='magenta') timedom.plot(time_steps, x_impulse_train, color='magenta') # - # ### Exercise # # Try changing the frequency of the impulse train. # * What's the highest frequency you can actually generate if the sample rate equals 64? # # ### Notes # ### Next: Filters # # Now that we've made a (sort of) source, we want to create a filter that can alter input (impulse train) so that the output looks the way we want it it. In class you've seen two types of filters: # # * Finite Impulse Response (FIR) # * Infinite Impulse Response (IIR) # # Both perform a transform on an input sequence $x[n]$ to give us some desired output sequence $y[n]$. The difference between the two types of filters is basically whether we only use the inputs to derive each output $y[n]$ (FIR), or whether we also use previous outputs (IIR). # # The following notebooks illustrate some of the properties of FIR and IIR filters.
signals/sp-m2-1-impulse-as-source.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Chapter 3 – Classification** # # _This notebook contains all the sample code and solutions to the exercises in chapter 3._ # # Setup # First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures: # + # To support both python 2 and python 3 from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "classification" def save_fig(fig_id, tight_layout=True): path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png") print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format='png', dpi=300) # - # # MNIST # **Warning**: `fetch_mldata()` is deprecated since Scikit-Learn 0.20. You should use `fetch_openml()` instead. However, it returns the unsorted MNIST dataset, whereas `fetch_mldata()` returned the dataset sorted by target (the training set and the test test were sorted separately). In general, this is fine, but if you want to get the exact same results as before, you need to sort the dataset using the following function: def sort_by_target(mnist): reorder_train = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[:60000])]))[:, 1] reorder_test = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[60000:])]))[:, 1] mnist.data[:60000] = mnist.data[reorder_train] mnist.target[:60000] = mnist.target[reorder_train] mnist.data[60000:] = mnist.data[reorder_test + 60000] mnist.target[60000:] = mnist.target[reorder_test + 60000] import sklearn print('The scikit-learn version is {}.'.format(sklearn.__version__)) try: from sklearn.datasets import fetch_openml mnist = fetch_openml('mnist_784', version=1, cache=True) mnist.target = mnist.target.astype(np.int8) # fetch_openml() returns targets as strings sort_by_target(mnist) # fetch_openml() returns an unsorted dataset except ImportError: from sklearn.datasets import fetch_mldata mnist = fetch_mldata('MNIST original') mnist["data"], mnist["target"] mnist.data.shape X, y = mnist["data"], mnist["target"] X.shape y.shape 28*28 # + some_digit = X[36000] some_digit_image = some_digit.reshape(28, 28) plt.imshow(some_digit_image, cmap = mpl.cm.binary, interpolation="nearest") plt.axis("off") save_fig("some_digit_plot") plt.show() # - def plot_digit(data): image = data.reshape(28, 28) plt.imshow(image, cmap = mpl.cm.binary, interpolation="nearest") plt.axis("off") # EXTRA def plot_digits(instances, images_per_row=10, **options): size = 28 images_per_row = min(len(instances), images_per_row) images = [instance.reshape(size,size) for instance in instances] n_rows = (len(instances) - 1) // images_per_row + 1 row_images = [] n_empty = n_rows * images_per_row - len(instances) images.append(np.zeros((size, size * n_empty))) for row in range(n_rows): rimages = images[row * images_per_row : (row + 1) * images_per_row] row_images.append(np.concatenate(rimages, axis=1)) image = np.concatenate(row_images, axis=0) plt.imshow(image, cmap = mpl.cm.binary, **options) plt.axis("off") plt.figure(figsize=(9,9)) example_images = np.r_[X[:12000:600], X[13000:30600:600], X[30600:60000:590]] plot_digits(example_images, images_per_row=10) save_fig("more_digits_plot") plt.show() y[36000] X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:] # + import numpy as np shuffle_index = np.random.permutation(60000) X_train, y_train = X_train[shuffle_index], y_train[shuffle_index] # - # # Binary classifier y_train_5 = (y_train == 5) y_test_5 = (y_test == 5) y_train_5 # **Note**: a few hyperparameters will have a different default value in future versions of Scikit-Learn, so a warning is issued if you do not set them explicitly. This is why we set `max_iter=5` and `tol=-np.infty`, to get the same results as in the book, while avoiding the warnings. # + from sklearn.linear_model import SGDClassifier sgd_clf = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42) sgd_clf.fit(X_train, y_train_5) # - some_digit sgd_clf.predict([some_digit]) from sklearn.model_selection import cross_val_score cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy") # + from sklearn.model_selection import StratifiedKFold from sklearn.base import clone skfolds = StratifiedKFold(n_splits=3, random_state=42) for train_index, test_index in skfolds.split(X_train, y_train_5): clone_clf = clone(sgd_clf) X_train_folds = X_train[train_index] y_train_folds = (y_train_5[train_index]) X_test_fold = X_train[test_index] y_test_fold = (y_train_5[test_index]) clone_clf.fit(X_train_folds, y_train_folds) y_pred = clone_clf.predict(X_test_fold) n_correct = sum(y_pred == y_test_fold) print(n_correct / len(y_pred)) # - from sklearn.base import BaseEstimator class Never5Classifier(BaseEstimator): def fit(self, X, y=None): pass def predict(self, X): return np.zeros((len(X), 1), dtype=bool) never_5_clf = Never5Classifier() cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring="accuracy") # + from sklearn.model_selection import cross_val_predict y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3) # + from sklearn.metrics import confusion_matrix confusion_matrix(y_train_5, y_train_pred) # - y_train_perfect_predictions = y_train_5 confusion_matrix(y_train_5, y_train_perfect_predictions) # + from sklearn.metrics import precision_score, recall_score precision_score(y_train_5, y_train_pred) # - 4344 / (4344 + 1307) recall_score(y_train_5, y_train_pred) 4344 / (4344 + 1077) from sklearn.metrics import f1_score f1_score(y_train_5, y_train_pred) 4344 / (4344 + (1077 + 1307)/2) plot_digit(some_digit) y_scores = sgd_clf.decision_function([some_digit]) y_scores threshold = 0 y_some_digit_pred = (y_scores > threshold) y_some_digit_pred threshold = 200000 y_some_digit_pred = (y_scores > threshold) y_some_digit_pred y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function") # Note: there was an [issue](https://github.com/scikit-learn/scikit-learn/issues/9589) in Scikit-Learn 0.19.0 (fixed in 0.19.1) where the result of `cross_val_predict()` was incorrect in the binary classification case when using `method="decision_function"`, as in the code above. The resulting array had an extra first dimension full of 0s. Just in case you are using 0.19.0, we need to add this small hack to work around this issue: y_scores.shape # hack to work around issue #9589 in Scikit-Learn 0.19.0 if y_scores.ndim == 2: y_scores = y_scores[:, 1] # + from sklearn.metrics import precision_recall_curve precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores) # + def plot_precision_recall_vs_threshold(precisions, recalls, thresholds): plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2) plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2) plt.xlabel("Threshold", fontsize=16) plt.legend(loc="upper left", fontsize=16) plt.ylim([0, 1]) plt.figure(figsize=(8, 4)) plot_precision_recall_vs_threshold(precisions, recalls, thresholds) plt.xlim([-700000, 700000]) save_fig("precision_recall_vs_threshold_plot") plt.show() # - (y_train_pred == (y_scores > 0)).all() y_train_pred_90 = (y_scores > 70000) precision_score(y_train_5, y_train_pred_90) recall_score(y_train_5, y_train_pred_90) # + def plot_precision_vs_recall(precisions, recalls): plt.plot(recalls, precisions, "b-", linewidth=2) plt.xlabel("Recall", fontsize=16) plt.ylabel("Precision", fontsize=16) plt.axis([0, 1, 0, 1]) plt.figure(figsize=(8, 6)) plot_precision_vs_recall(precisions, recalls) save_fig("precision_vs_recall_plot") plt.show() # - # # ROC curves # + from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_train_5, y_scores) # + def plot_roc_curve(fpr, tpr, label=None): plt.plot(fpr, tpr, linewidth=2, label=label) plt.plot([0, 1], [0, 1], 'k--') plt.axis([0, 1, 0, 1]) plt.xlabel('False Positive Rate', fontsize=16) plt.ylabel('True Positive Rate', fontsize=16) plt.figure(figsize=(8, 6)) plot_roc_curve(fpr, tpr) save_fig("roc_curve_plot") plt.show() # + from sklearn.metrics import roc_auc_score roc_auc_score(y_train_5, y_scores) # - # **Note**: we set `n_estimators=10` to avoid a warning about the fact that its default value will be set to 100 in Scikit-Learn 0.22. from sklearn.ensemble import RandomForestClassifier forest_clf = RandomForestClassifier(n_estimators=10, random_state=42) y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method="predict_proba") y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest) plt.figure(figsize=(8, 6)) plt.plot(fpr, tpr, "b:", linewidth=2, label="SGD") plot_roc_curve(fpr_forest, tpr_forest, "Random Forest") plt.legend(loc="lower right", fontsize=16) save_fig("roc_curve_comparison_plot") plt.show() roc_auc_score(y_train_5, y_scores_forest) y_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3) precision_score(y_train_5, y_train_pred_forest) recall_score(y_train_5, y_train_pred_forest) # # Multiclass classification sgd_clf.fit(X_train, y_train) sgd_clf.predict([some_digit]) some_digit_scores = sgd_clf.decision_function([some_digit]) some_digit_scores np.argmax(some_digit_scores) sgd_clf.classes_ sgd_clf.classes_[5] from sklearn.multiclass import OneVsOneClassifier ovo_clf = OneVsOneClassifier(SGDClassifier(max_iter=5, tol=-np.infty, random_state=42)) ovo_clf.fit(X_train, y_train) ovo_clf.predict([some_digit]) len(ovo_clf.estimators_) forest_clf.fit(X_train, y_train) forest_clf.predict([some_digit]) forest_clf.predict_proba([some_digit]) cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy") from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train.astype(np.float64)) cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy") y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3) conf_mx = confusion_matrix(y_train, y_train_pred) conf_mx def plot_confusion_matrix(matrix): """If you prefer color and a colorbar""" fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111) cax = ax.matshow(matrix) fig.colorbar(cax) plt.matshow(conf_mx, cmap=plt.cm.gray) save_fig("confusion_matrix_plot", tight_layout=False) plt.show() row_sums = conf_mx.sum(axis=1, keepdims=True) norm_conf_mx = conf_mx / row_sums norm_conf_mx np.fill_diagonal(norm_conf_mx, 0) plt.matshow(norm_conf_mx, cmap=plt.cm.gray) save_fig("confusion_matrix_errors_plot", tight_layout=False) plt.show() # + cl_a, cl_b = 3, 5 X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)] X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)] X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)] X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)] plt.figure(figsize=(8,8)) plt.subplot(221); plot_digits(X_aa[:25], images_per_row=5) plt.subplot(222); plot_digits(X_ab[:25], images_per_row=5) plt.subplot(223); plot_digits(X_ba[:25], images_per_row=5) plt.subplot(224); plot_digits(X_bb[:25], images_per_row=5) save_fig("error_analysis_digits_plot") plt.show() # - # # Multilabel classification # + from sklearn.neighbors import KNeighborsClassifier y_train_large = (y_train >= 7) y_train_odd = (y_train % 2 == 1) y_multilabel = np.c_[y_train_large, y_train_odd] knn_clf = KNeighborsClassifier() knn_clf.fit(X_train, y_multilabel) # - knn_clf.predict([some_digit]) # **Warning**: the following cell may take a very long time (possibly hours depending on your hardware). y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3, n_jobs=-1) f1_score(y_multilabel, y_train_knn_pred, average="macro") # # Multioutput classification np.random.randint(0,10,(3,2)) noise = np.random.randint(0, 100, (len(X_train), 784)) X_train_mod = X_train + noise noise = np.random.randint(0, 100, (len(X_test), 784)) X_test_mod = X_test + noise y_train_mod = X_train y_test_mod = X_test len(X_train_mod) some_index = 5500 plt.subplot(121); plot_digit(X_test_mod[some_index]) plt.subplot(122); plot_digit(y_test_mod[some_index]) save_fig("noisy_digit_example_plot") plt.show() knn_clf.fit(X_train_mod, y_train_mod) clean_digit = knn_clf.predict([X_test_mod[some_index]]) plot_digit(clean_digit) save_fig("cleaned_digit_example_plot") # # Extra material # ## Dummy (ie. random) classifier from sklearn.dummy import DummyClassifier dmy_clf = DummyClassifier() y_probas_dmy = cross_val_predict(dmy_clf, X_train, y_train_5, cv=3, method="predict_proba") y_scores_dmy = y_probas_dmy[:, 1] fprr, tprr, thresholdsr = roc_curve(y_train_5, y_scores_dmy) plot_roc_curve(fprr, tprr) # ## KNN classifier from sklearn.neighbors import KNeighborsClassifier knn_clf = KNeighborsClassifier(n_jobs=-1, weights='distance', n_neighbors=4) knn_clf.fit(X_train, y_train) y_knn_pred = knn_clf.predict(X_test) from sklearn.metrics import accuracy_score accuracy_score(y_test, y_knn_pred) # + from scipy.ndimage.interpolation import shift def shift_digit(digit_array, dx, dy, new=0): return shift(digit_array.reshape(28, 28), [dy, dx], cval=new).reshape(784) plot_digit(shift_digit(some_digit, 5, 1, new=100)) # + X_train_expanded = [X_train] y_train_expanded = [y_train] for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)): shifted_images = np.apply_along_axis(shift_digit, axis=1, arr=X_train, dx=dx, dy=dy) X_train_expanded.append(shifted_images) y_train_expanded.append(y_train) X_train_expanded = np.concatenate(X_train_expanded) y_train_expanded = np.concatenate(y_train_expanded) X_train_expanded.shape, y_train_expanded.shape # - knn_clf.fit(X_train_expanded, y_train_expanded) y_knn_expanded_pred = knn_clf.predict(X_test) accuracy_score(y_test, y_knn_expanded_pred) ambiguous_digit = X_test[2589] knn_clf.predict_proba([ambiguous_digit]) plot_digit(ambiguous_digit) # # Exercise solutions # ## 1. An MNIST Classifier With Over 97% Accuracy # **Warning**: the next cell may take hours to run, depending on your hardware. # + from sklearn.model_selection import GridSearchCV param_grid = [{'weights': ["uniform", "distance"], 'n_neighbors': [3, 4, 5]}] knn_clf = KNeighborsClassifier() grid_search = GridSearchCV(knn_clf, param_grid, cv=5, verbose=3, n_jobs=-1) grid_search.fit(X_train, y_train) # - grid_search.best_params_ grid_search.best_score_ # + from sklearn.metrics import accuracy_score y_pred = grid_search.predict(X_test) accuracy_score(y_test, y_pred) # - # ## 2. Data Augmentation from scipy.ndimage.interpolation import shift def shift_image(image, dx, dy): image = image.reshape((28, 28)) shifted_image = shift(image, [dy, dx], cval=0, mode="constant") return shifted_image.reshape([-1]) # + image = X_train[1000] shifted_image_down = shift_image(image, 0, 5) shifted_image_left = shift_image(image, -5, 0) plt.figure(figsize=(12,3)) plt.subplot(131) plt.title("Original", fontsize=14) plt.imshow(image.reshape(28, 28), interpolation="nearest", cmap="Greys") plt.subplot(132) plt.title("Shifted down", fontsize=14) plt.imshow(shifted_image_down.reshape(28, 28), interpolation="nearest", cmap="Greys") plt.subplot(133) plt.title("Shifted left", fontsize=14) plt.imshow(shifted_image_left.reshape(28, 28), interpolation="nearest", cmap="Greys") plt.show() # + X_train_augmented = [image for image in X_train] y_train_augmented = [label for label in y_train] for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)): for image, label in zip(X_train, y_train): X_train_augmented.append(shift_image(image, dx, dy)) y_train_augmented.append(label) X_train_augmented = np.array(X_train_augmented) y_train_augmented = np.array(y_train_augmented) # - shuffle_idx = np.random.permutation(len(X_train_augmented)) X_train_augmented = X_train_augmented[shuffle_idx] y_train_augmented = y_train_augmented[shuffle_idx] knn_clf = KNeighborsClassifier(**grid_search.best_params_) knn_clf.fit(X_train_augmented, y_train_augmented) y_pred = knn_clf.predict(X_test) accuracy_score(y_test, y_pred) # By simply augmenting the data, we got a 0.5% accuracy boost. :) # ## 3. Tackle the Titanic dataset # The goal is to predict whether or not a passenger survived based on attributes such as their age, sex, passenger class, where they embarked and so on. # First, login to [Kaggle](https://www.kaggle.com/) and go to the [Titanic challenge](https://www.kaggle.com/c/titanic) to download `train.csv` and `test.csv`. Save them to the `datasets/titanic` directory. # Next, let's load the data: # + import os TITANIC_PATH = os.path.join("datasets", "titanic") # + import pandas as pd def load_titanic_data(filename, titanic_path=TITANIC_PATH): csv_path = os.path.join(titanic_path, filename) return pd.read_csv(csv_path) # - train_data = load_titanic_data("train.csv") test_data = load_titanic_data("test.csv") # The data is already split into a training set and a test set. However, the test data does *not* contain the labels: your goal is to train the best model you can using the training data, then make your predictions on the test data and upload them to Kaggle to see your final score. # Let's take a peek at the top few rows of the training set: train_data.head() # The attributes have the following meaning: # * **Survived**: that's the target, 0 means the passenger did not survive, while 1 means he/she survived. # * **Pclass**: passenger class. # * **Name**, **Sex**, **Age**: self-explanatory # * **SibSp**: how many siblings & spouses of the passenger aboard the Titanic. # * **Parch**: how many children & parents of the passenger aboard the Titanic. # * **Ticket**: ticket id # * **Fare**: price paid (in pounds) # * **Cabin**: passenger's cabin number # * **Embarked**: where the passenger embarked the Titanic # Let's get more info to see how much data is missing: train_data.info() # Okay, the **Age**, **Cabin** and **Embarked** attributes are sometimes null (less than 891 non-null), especially the **Cabin** (77% are null). We will ignore the **Cabin** for now and focus on the rest. The **Age** attribute has about 19% null values, so we will need to decide what to do with them. Replacing null values with the median age seems reasonable. # The **Name** and **Ticket** attributes may have some value, but they will be a bit tricky to convert into useful numbers that a model can consume. So for now, we will ignore them. # Let's take a look at the numerical attributes: train_data.describe() # * Yikes, only 38% **Survived**. :( That's close enough to 40%, so accuracy will be a reasonable metric to evaluate our model. # * The mean **Fare** was £32.20, which does not seem so expensive (but it was probably a lot of money back then). # * The mean **Age** was less than 30 years old. # Let's check that the target is indeed 0 or 1: train_data["Survived"].value_counts() # Now let's take a quick look at all the categorical attributes: train_data["Pclass"].value_counts() train_data["Sex"].value_counts() train_data["Embarked"].value_counts() # The Embarked attribute tells us where the passenger embarked: C=Cherbourg, Q=Queenstown, S=Southampton. # Now let's build our preprocessing pipelines. We will reuse the `DataframeSelector` we built in the previous chapter to select specific attributes from the `DataFrame`: # + from sklearn.base import BaseEstimator, TransformerMixin # A class to select numerical or categorical columns # since Scikit-Learn doesn't handle DataFrames yet class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names] # - # Let's build the pipeline for the numerical attributes: # # **Warning**: Since Scikit-Learn 0.20, the `sklearn.preprocessing.Imputer` class was replaced by the `sklearn.impute.SimpleImputer` class. # + from sklearn.pipeline import Pipeline try: from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+ except ImportError: from sklearn.preprocessing import Imputer as SimpleImputer num_pipeline = Pipeline([ ("select_numeric", DataFrameSelector(["Age", "SibSp", "Parch", "Fare"])), ("imputer", SimpleImputer(strategy="median")), ]) # - num_pipeline.fit_transform(train_data) # We will also need an imputer for the string categorical columns (the regular `SimpleImputer` does not work on those): # Inspired from stackoverflow.com/questions/25239958 class MostFrequentImputer(BaseEstimator, TransformerMixin): def fit(self, X, y=None): self.most_frequent_ = pd.Series([X[c].value_counts().index[0] for c in X], index=X.columns) return self def transform(self, X, y=None): return X.fillna(self.most_frequent_) # **Warning**: earlier versions of the book used the `LabelBinarizer` or `CategoricalEncoder` classes to convert each categorical value to a one-hot vector. It is now preferable to use the `OneHotEncoder` class. Since Scikit-Learn 0.20 it can handle string categorical inputs (see [PR #10521](https://github.com/scikit-learn/scikit-learn/issues/10521)), not just integer categorical inputs. If you are using an older version of Scikit-Learn, you can import the new version from `future_encoders.py`: try: from sklearn.preprocessing import OrdinalEncoder # just to raise an ImportError if Scikit-Learn < 0.20 from sklearn.preprocessing import OneHotEncoder except ImportError: from future_encoders import OneHotEncoder # Scikit-Learn < 0.20 # Now we can build the pipeline for the categorical attributes: cat_pipeline = Pipeline([ ("select_cat", DataFrameSelector(["Pclass", "Sex", "Embarked"])), ("imputer", MostFrequentImputer()), ("cat_encoder", OneHotEncoder(sparse=False)), ]) cat_pipeline.fit_transform(train_data) # Finally, let's join the numerical and categorical pipelines: from sklearn.pipeline import FeatureUnion preprocess_pipeline = FeatureUnion(transformer_list=[ ("num_pipeline", num_pipeline), ("cat_pipeline", cat_pipeline), ]) # Cool! Now we have a nice preprocessing pipeline that takes the raw data and outputs numerical input features that we can feed to any Machine Learning model we want. X_train = preprocess_pipeline.fit_transform(train_data) X_train # Let's not forget to get the labels: y_train = train_data["Survived"] # We are now ready to train a classifier. Let's start with an `SVC`: # + from sklearn.svm import SVC svm_clf = SVC(gamma="auto") svm_clf.fit(X_train, y_train) # - # Great, our model is trained, let's use it to make predictions on the test set: X_test = preprocess_pipeline.transform(test_data) y_pred = svm_clf.predict(X_test) # And now we could just build a CSV file with these predictions (respecting the format excepted by Kaggle), then upload it and hope for the best. But wait! We can do better than hope. Why don't we use cross-validation to have an idea of how good our model is? # + from sklearn.model_selection import cross_val_score svm_scores = cross_val_score(svm_clf, X_train, y_train, cv=10) svm_scores.mean() # - # Okay, over 73% accuracy, clearly better than random chance, but it's not a great score. Looking at the [leaderboard](https://www.kaggle.com/c/titanic/leaderboard) for the Titanic competition on Kaggle, you can see that you need to reach above 80% accuracy to be within the top 10% Kagglers. Some reached 100%, but since you can easily find the [list of victims](https://www.encyclopedia-titanica.org/titanic-victims/) of the Titanic, it seems likely that there was little Machine Learning involved in their performance! ;-) So let's try to build a model that reaches 80% accuracy. # Let's try a `RandomForestClassifier`: # + from sklearn.ensemble import RandomForestClassifier forest_clf = RandomForestClassifier(n_estimators=100, random_state=42) forest_scores = cross_val_score(forest_clf, X_train, y_train, cv=10) forest_scores.mean() # - # That's much better! # Instead of just looking at the mean accuracy across the 10 cross-validation folds, let's plot all 10 scores for each model, along with a box plot highlighting the lower and upper quartiles, and "whiskers" showing the extent of the scores (thanks to <NAME> for suggesting this visualization). Note that the `boxplot()` function detects outliers (called "fliers") and does not include them within the whiskers. Specifically, if the lower quartile is $Q_1$ and the upper quartile is $Q_3$, then the interquartile range $IQR = Q_3 - Q_1$ (this is the box's height), and any score lower than $Q_1 - 1.5 \times IQR$ is a flier, and so is any score greater than $Q3 + 1.5 \times IQR$. plt.figure(figsize=(8, 4)) plt.plot([1]*10, svm_scores, ".") plt.plot([2]*10, forest_scores, ".") plt.boxplot([svm_scores, forest_scores], labels=("SVM","Random Forest")) plt.ylabel("Accuracy", fontsize=14) plt.show() # To improve this result further, you could: # * Compare many more models and tune hyperparameters using cross validation and grid search, # * Do more feature engineering, for example: # * replace **SibSp** and **Parch** with their sum, # * try to identify parts of names that correlate well with the **Survived** attribute (e.g. if the name contains "Countess", then survival seems more likely), # * try to convert numerical attributes to categorical attributes: for example, different age groups had very different survival rates (see below), so it may help to create an age bucket category and use it instead of the age. Similarly, it may be useful to have a special category for people traveling alone since only 30% of them survived (see below). train_data["AgeBucket"] = train_data["Age"] // 15 * 15 train_data[["AgeBucket", "Survived"]].groupby(['AgeBucket']).mean() train_data["RelativesOnboard"] = train_data["SibSp"] + train_data["Parch"] train_data[["RelativesOnboard", "Survived"]].groupby(['RelativesOnboard']).mean() # ## 4. Spam classifier # First, let's fetch the data: # + import os import tarfile from six.moves import urllib DOWNLOAD_ROOT = "http://spamassassin.apache.org/old/publiccorpus/" HAM_URL = DOWNLOAD_ROOT + "20030228_easy_ham.tar.bz2" SPAM_URL = DOWNLOAD_ROOT + "20030228_spam.tar.bz2" SPAM_PATH = os.path.join("datasets", "spam") def fetch_spam_data(spam_url=SPAM_URL, spam_path=SPAM_PATH): if not os.path.isdir(spam_path): os.makedirs(spam_path) for filename, url in (("ham.tar.bz2", HAM_URL), ("spam.tar.bz2", SPAM_URL)): path = os.path.join(spam_path, filename) if not os.path.isfile(path): urllib.request.urlretrieve(url, path) tar_bz2_file = tarfile.open(path) tar_bz2_file.extractall(path=SPAM_PATH) tar_bz2_file.close() # - fetch_spam_data() # Next, let's load all the emails: HAM_DIR = os.path.join(SPAM_PATH, "easy_ham") SPAM_DIR = os.path.join(SPAM_PATH, "spam") ham_filenames = [name for name in sorted(os.listdir(HAM_DIR)) if len(name) > 20] spam_filenames = [name for name in sorted(os.listdir(SPAM_DIR)) if len(name) > 20] len(ham_filenames) len(spam_filenames) # We can use Python's `email` module to parse these emails (this handles headers, encoding, and so on): # + import email import email.policy def load_email(is_spam, filename, spam_path=SPAM_PATH): directory = "spam" if is_spam else "easy_ham" with open(os.path.join(spam_path, directory, filename), "rb") as f: return email.parser.BytesParser(policy=email.policy.default).parse(f) # - ham_emails = [load_email(is_spam=False, filename=name) for name in ham_filenames] spam_emails = [load_email(is_spam=True, filename=name) for name in spam_filenames] # Let's look at one example of ham and one example of spam, to get a feel of what the data looks like: print(ham_emails[1].get_content().strip()) print(spam_emails[6].get_content().strip()) # Some emails are actually multipart, with images and attachments (which can have their own attachments). Let's look at the various types of structures we have: def get_email_structure(email): if isinstance(email, str): return email payload = email.get_payload() if isinstance(payload, list): return "multipart({})".format(", ".join([ get_email_structure(sub_email) for sub_email in payload ])) else: return email.get_content_type() # + from collections import Counter def structures_counter(emails): structures = Counter() for email in emails: structure = get_email_structure(email) structures[structure] += 1 return structures # - structures_counter(ham_emails).most_common() structures_counter(spam_emails).most_common() # It seems that the ham emails are more often plain text, while spam has quite a lot of HTML. Moreover, quite a few ham emails are signed using PGP, while no spam is. In short, it seems that the email structure is useful information to have. # Now let's take a look at the email headers: for header, value in spam_emails[0].items(): print(header,":",value) # There's probably a lot of useful information in there, such as the sender's email address (<EMAIL> looks fishy), but we will just focus on the `Subject` header: spam_emails[0]["Subject"] # Okay, before we learn too much about the data, let's not forget to split it into a training set and a test set: # + import numpy as np from sklearn.model_selection import train_test_split X = np.array(ham_emails + spam_emails) y = np.array([0] * len(ham_emails) + [1] * len(spam_emails)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # - # Okay, let's start writing the preprocessing functions. First, we will need a function to convert HTML to plain text. Arguably the best way to do this would be to use the great [BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/) library, but I would like to avoid adding another dependency to this project, so let's hack a quick & dirty solution using regular expressions (at the risk of [un̨ho͞ly radiańcé destro҉ying all enli̍̈́̂̈́ghtenment](https://stackoverflow.com/a/1732454/38626)). The following function first drops the `<head>` section, then converts all `<a>` tags to the word HYPERLINK, then it gets rid of all HTML tags, leaving only the plain text. For readability, it also replaces multiple newlines with single newlines, and finally it unescapes html entities (such as `&gt;` or `&nbsp;`): # + import re from html import unescape def html_to_plain_text(html): text = re.sub('<head.*?>.*?</head>', '', html, flags=re.M | re.S | re.I) text = re.sub('<a\s.*?>', ' HYPERLINK ', text, flags=re.M | re.S | re.I) text = re.sub('<.*?>', '', text, flags=re.M | re.S) text = re.sub(r'(\s*\n)+', '\n', text, flags=re.M | re.S) return unescape(text) # - # Let's see if it works. This is HTML spam: html_spam_emails = [email for email in X_train[y_train==1] if get_email_structure(email) == "text/html"] sample_html_spam = html_spam_emails[7] print(sample_html_spam.get_content().strip()[:1000], "...") # And this is the resulting plain text: print(html_to_plain_text(sample_html_spam.get_content())[:1000], "...") # Great! Now let's write a function that takes an email as input and returns its content as plain text, whatever its format is: def email_to_text(email): html = None for part in email.walk(): ctype = part.get_content_type() if not ctype in ("text/plain", "text/html"): continue try: content = part.get_content() except: # in case of encoding issues content = str(part.get_payload()) if ctype == "text/plain": return content else: html = content if html: return html_to_plain_text(html) print(email_to_text(sample_html_spam)[:100], "...") # Let's throw in some stemming! For this to work, you need to install the Natural Language Toolkit ([NLTK](http://www.nltk.org/)). It's as simple as running the following command (don't forget to activate your virtualenv first; if you don't have one, you will likely need administrator rights, or use the `--user` option): # # `$ pip3 install nltk` try: import nltk stemmer = nltk.PorterStemmer() for word in ("Computations", "Computation", "Computing", "Computed", "Compute", "Compulsive"): print(word, "=>", stemmer.stem(word)) except ImportError: print("Error: stemming requires the NLTK module.") stemmer = None # We will also need a way to replace URLs with the word "URL". For this, we could use hard core [regular expressions](https://mathiasbynens.be/demo/url-regex) but we will just use the [urlextract](https://github.com/lipoja/URLExtract) library. You can install it with the following command (don't forget to activate your virtualenv first; if you don't have one, you will likely need administrator rights, or use the `--user` option): # # `$ pip3 install urlextract` try: import urlextract # may require an Internet connection to download root domain names url_extractor = urlextract.URLExtract() print(url_extractor.find_urls("Will it detect github.com and https://youtu.be/7Pq-S557XQU?t=3m32s")) except ImportError: print("Error: replacing URLs requires the urlextract module.") url_extractor = None # We are ready to put all this together into a transformer that we will use to convert emails to word counters. Note that we split sentences into words using Python's `split()` method, which uses whitespaces for word boundaries. This works for many written languages, but not all. For example, Chinese and Japanese scripts generally don't use spaces between words, and Vietnamese often uses spaces even between syllables. It's okay in this exercise, because the dataset is (mostly) in English. # + from sklearn.base import BaseEstimator, TransformerMixin class EmailToWordCounterTransformer(BaseEstimator, TransformerMixin): def __init__(self, strip_headers=True, lower_case=True, remove_punctuation=True, replace_urls=True, replace_numbers=True, stemming=True): self.strip_headers = strip_headers self.lower_case = lower_case self.remove_punctuation = remove_punctuation self.replace_urls = replace_urls self.replace_numbers = replace_numbers self.stemming = stemming def fit(self, X, y=None): return self def transform(self, X, y=None): X_transformed = [] for email in X: text = email_to_text(email) or "" if self.lower_case: text = text.lower() if self.replace_urls and url_extractor is not None: urls = list(set(url_extractor.find_urls(text))) urls.sort(key=lambda url: len(url), reverse=True) for url in urls: text = text.replace(url, " URL ") if self.replace_numbers: text = re.sub(r'\d+(?:\.\d*(?:[eE]\d+))?', 'NUMBER', text) if self.remove_punctuation: text = re.sub(r'\W+', ' ', text, flags=re.M) word_counts = Counter(text.split()) if self.stemming and stemmer is not None: stemmed_word_counts = Counter() for word, count in word_counts.items(): stemmed_word = stemmer.stem(word) stemmed_word_counts[stemmed_word] += count word_counts = stemmed_word_counts X_transformed.append(word_counts) return np.array(X_transformed) # - # Let's try this transformer on a few emails: X_few = X_train[:3] X_few_wordcounts = EmailToWordCounterTransformer().fit_transform(X_few) X_few_wordcounts # This looks about right! # Now we have the word counts, and we need to convert them to vectors. For this, we will build another transformer whose `fit()` method will build the vocabulary (an ordered list of the most common words) and whose `transform()` method will use the vocabulary to convert word counts to vectors. The output is a sparse matrix. # + from scipy.sparse import csr_matrix class WordCounterToVectorTransformer(BaseEstimator, TransformerMixin): def __init__(self, vocabulary_size=1000): self.vocabulary_size = vocabulary_size def fit(self, X, y=None): total_count = Counter() for word_count in X: for word, count in word_count.items(): total_count[word] += min(count, 10) most_common = total_count.most_common()[:self.vocabulary_size] self.most_common_ = most_common self.vocabulary_ = {word: index + 1 for index, (word, count) in enumerate(most_common)} return self def transform(self, X, y=None): rows = [] cols = [] data = [] for row, word_count in enumerate(X): for word, count in word_count.items(): rows.append(row) cols.append(self.vocabulary_.get(word, 0)) data.append(count) return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1)) # - vocab_transformer = WordCounterToVectorTransformer(vocabulary_size=10) X_few_vectors = vocab_transformer.fit_transform(X_few_wordcounts) X_few_vectors X_few_vectors.toarray() # What does this matrix mean? Well, the 64 in the third row, first column, means that the third email contains 64 words that are not part of the vocabulary. The 1 next to it means that the first word in the vocabulary is present once in this email. The 2 next to it means that the second word is present twice, and so on. You can look at the vocabulary to know which words we are talking about. The first word is "of", the second word is "and", etc. vocab_transformer.vocabulary_ # We are now ready to train our first spam classifier! Let's transform the whole dataset: # + from sklearn.pipeline import Pipeline preprocess_pipeline = Pipeline([ ("email_to_wordcount", EmailToWordCounterTransformer()), ("wordcount_to_vector", WordCounterToVectorTransformer()), ]) X_train_transformed = preprocess_pipeline.fit_transform(X_train) # + from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score log_clf = LogisticRegression(solver="liblinear", random_state=42) score = cross_val_score(log_clf, X_train_transformed, y_train, cv=3, verbose=3) score.mean() # - # Over 98.7%, not bad for a first try! :) However, remember that we are using the "easy" dataset. You can try with the harder datasets, the results won't be so amazing. You would have to try multiple models, select the best ones and fine-tune them using cross-validation, and so on. # # But you get the picture, so let's stop now, and just print out the precision/recall we get on the test set: # + from sklearn.metrics import precision_score, recall_score X_test_transformed = preprocess_pipeline.transform(X_test) log_clf = LogisticRegression(solver="liblinear", random_state=42) log_clf.fit(X_train_transformed, y_train) y_pred = log_clf.predict(X_test_transformed) print("Precision: {:.2f}%".format(100 * precision_score(y_test, y_pred))) print("Recall: {:.2f}%".format(100 * recall_score(y_test, y_pred))) # -
03_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="zX4Kg8DUTKWO" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="Za8-Nr5k11fh" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" id="Eq10uEbw0E4l" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="oYM61xrTsP5d" # # Transfer Learning with TensorFlow Hub for TFLite # + [markdown] id="aFNhz34Svuhe" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%202%20-%20TensorFlow%20Lite/Week%201/Examples/TFLite_Week1_Transfer_Learning.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> # Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%202%20-%20TensorFlow%20Lite/Week%201/Examples/TFLite_Week1_Transfer_Learning.ipynb"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> # View source on GitHub</a> # </td> # </table> # + [markdown] id="bL54LWCHt5q5" # ## Setup # + id="110fGB18UNJn" try: # %tensorflow_version 2.x except: pass # + id="dlauq-4FWGZM" import numpy as np import matplotlib.pylab as plt import tensorflow as tf import tensorflow_hub as hub import tensorflow_datasets as tfds tfds.disable_progress_bar() from tqdm import tqdm print("\u2022 Using TensorFlow Version:", tf.__version__) print("\u2022 Using TensorFlow Hub Version: ", hub.__version__) print('\u2022 GPU Device Found.' if tf.test.is_gpu_available() else '\u2022 GPU Device Not Found. Running on CPU') # + [markdown] id="mmaHHH7Pvmth" # ## Select the Hub/TF2 Module to Use # # Hub modules for TF 1.x won't work here, please use one of the selections provided. # + id="FlsEcKVeuCnf" module_selection = ("mobilenet_v2", 224, 1280) #@param ["(\"mobilenet_v2\", 224, 1280)", "(\"inception_v3\", 299, 2048)"] {type:"raw", allow-input: true} handle_base, pixels, FV_SIZE = module_selection MODULE_HANDLE ="https://tfhub.dev/google/tf2-preview/{}/feature_vector/4".format(handle_base) IMAGE_SIZE = (pixels, pixels) print("Using {} with input size {} and output dimension {}".format(MODULE_HANDLE, IMAGE_SIZE, FV_SIZE)) # + id="eDg-yd8fatdQ" # + [markdown] id="sYUsgwCBv87A" # ## Data Preprocessing # + [markdown] id="8nqVX3KYwGPh" # Use [TensorFlow Datasets](http://tensorflow.org/datasets) to load the cats and dogs dataset. # # This `tfds` package is the easiest way to load pre-defined data. If you have your own data, and are interested in importing using it with TensorFlow see [loading image data](../load_data/images.ipynb) # # + [markdown] id="YkF4Boe5wN7N" # The `tfds.load` method downloads and caches the data, and returns a `tf.data.Dataset` object. These objects provide powerful, efficient methods for manipulating data and piping it into your model. # # Since `"cats_vs_dog"` doesn't define standard splits, use the subsplit feature to divide it into (train, validation, test) with 80%, 10%, 10% of the data respectively. # + id="xYXcGT_7b_Rr" # splits = tfds.Split.ALL.subsplit(weighted=(80,10,10)) # splits, info = tfds.load('cats_vs_dogs', with_info=True, as_supervised=True, split=splits) # (train_examples, validation_examples, test_examples) = splits # num_examples = info.splits['train'].num_examples # num_classes = info.features['label'].num_classes # + id="SQ9xK9F2wGD8" # splits = tfds.Split.ALL.subsplit(weighted=(80, 10, 10)) # splits, info = tfds.load('cats_vs_dogs', with_info=True, as_supervised=True, split = splits) ri = (tfds.core.ReadInstruction('train', to=10, unit='%') + tfds.core.ReadInstruction('train', from_=-80, unit='%')) splits, info = tfds.load('cats_vs_dogs', with_info=True, as_supervised=True, split=[ 'train[:80%]', 'train[80%:90%]','train[90%:100%]' ] ) (train_examples, validation_examples, test_examples) = splits num_examples = info.splits['train'].num_examples num_classes = info.features['label'].num_classes # + id="Yu2hgXSVmikc" num_classes # + [markdown] id="pmXQYXNWwf19" # ### Format the Data # # Use the `tf.image` module to format the images for the task. # # Resize the images to a fixes input size, and rescale the input channels # + id="y7UyXblSwkUS" def format_image(image, label): image = tf.image.resize(image, IMAGE_SIZE) / 255.0 return image, label # + [markdown] id="1nrDR8CnwrVk" # Now shuffle and batch the data # # + id="zAEUG7vawxLm" BATCH_SIZE = 32 #@param {type:"integer"} # + id="fHEC9mbswxvM" train_batches = train_examples.shuffle(num_examples // 4).map(format_image).batch(BATCH_SIZE).prefetch(1) validation_batches = validation_examples.map(format_image).batch(BATCH_SIZE).prefetch(1) test_batches = test_examples.map(format_image).batch(1) # + [markdown] id="ghQhZjgEw1cK" # Inspect a batch # + id="gz0xsMCjwx54" for image_batch, label_batch in train_batches.take(1): pass image_batch.shape # + [markdown] id="FS_gVStowW3G" # ## Defining the Model # # All it takes is to put a linear classifier on top of the `feature_extractor_layer` with the Hub module. # # For speed, we start out with a non-trainable `feature_extractor_layer`, but you can also enable fine-tuning for greater accuracy. # + cellView="form" id="RaJW3XrPyFiF" do_fine_tuning = True #@param {type:"boolean"} # + [markdown] id="wd0KfstqaUmE" # Load TFHub Module # + id="svvDrt3WUrrm" feature_extractor = hub.KerasLayer(MODULE_HANDLE, input_shape=IMAGE_SIZE + (3,), output_shape=[FV_SIZE], trainable=do_fine_tuning) # + id="50FYNIb1dmJH" print("Building model with", MODULE_HANDLE) model = tf.keras.Sequential([ feature_extractor, tf.keras.layers.Dense(num_classes, activation='softmax') ]) model.summary() # + id="y5DkJxH3ZpQG" #@title (Optional) Unfreeze some layers NUM_LAYERS = 9 #@param {type:"slider", min:1, max:50, step:1} if do_fine_tuning: feature_extractor.trainable = True for layer in model.layers[-NUM_LAYERS:]: layer.trainable = True else: feature_extractor.trainable = False # + [markdown] id="u2e5WupIw2N2" # ## Training the Model # + id="9f3yBUvkd_VJ" if do_fine_tuning: model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.002, momentum=0.9), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy']) else: model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # + id="w_YKX2Qnfg6x" EPOCHS = 5 hist = model.fit(train_batches, epochs=EPOCHS, validation_data=validation_batches) # + [markdown] id="u_psFoTeLpHU" # ## Export the Model # + id="XaSb5nVzHcVv" CATS_VS_DOGS_SAVED_MODEL = "exp_saved_model" # + [markdown] id="fZqRAg1uz1Nu" # Export the SavedModel # + id="yJMue5YgnwtN" tf.saved_model.save(model, CATS_VS_DOGS_SAVED_MODEL) # + id="SOQF4cOan0SY" magic_args="-s $CATS_VS_DOGS_SAVED_MODEL" language="bash" # saved_model_cli show --dir $1 --tag_set serve --signature_def serving_default # + id="FY7QGBgBytwX" loaded = tf.saved_model.load(CATS_VS_DOGS_SAVED_MODEL) # + id="tIhPyMISz952" print(list(loaded.signatures.keys())) infer = loaded.signatures["serving_default"] print(infer.structured_input_signature) print(infer.structured_outputs) # + [markdown] id="XxLiLC8n0H16" # ## Convert Using TFLite's Converter # + [markdown] id="1aUYvCpfWmrQ" # Load the TFLiteConverter with the SavedModel # + id="dqJRyIg8Wl1n" converter = tf.lite.TFLiteConverter.from_saved_model(CATS_VS_DOGS_SAVED_MODEL) # + [markdown] id="AudcNjT0UtfF" # ### Post-Training Quantization # The simplest form of post-training quantization quantizes weights from floating point to 8-bits of precision. This technique is enabled as an option in the TensorFlow Lite converter. At inference, weights are converted from 8-bits of precision to floating point and computed using floating-point kernels. This conversion is done once and cached to reduce latency. # # To further improve latency, hybrid operators dynamically quantize activations to 8-bits and perform computations with 8-bit weights and activations. This optimization provides latencies close to fully fixed-point inference. However, the outputs are still stored using floating point, so that the speedup with hybrid ops is less than a full fixed-point computation. # + id="WmSr2-yZoUhz" converter.optimizations = [tf.lite.Optimize.DEFAULT] # + [markdown] id="YpCijI08UxP0" # ### Post-Training Integer Quantization # We can get further latency improvements, reductions in peak memory usage, and access to integer only hardware accelerators by making sure all model math is quantized. To do this, we need to measure the dynamic range of activations and inputs with a representative data set. You can simply create an input data generator and provide it to our converter. # + id="clM_dTIkWdIa" def representative_data_gen(): for input_value, _ in test_batches.take(100): yield [input_value] # + id="0oPkAxDvUias" converter.representative_dataset = representative_data_gen # + [markdown] id="IGUAVTqXVfnu" # The resulting model will be fully quantized but still take float input and output for convenience. # # Ops that do not have quantized implementations will automatically be left in floating point. This allows conversion to occur smoothly but may restrict deployment to accelerators that support float. # + [markdown] id="cPVdjaEJVkHy" # ### Full Integer Quantization # # To require the converter to only output integer operations, one can specify: # + id="eQi1aO2cVhoL" converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] # + [markdown] id="snwssESbVtFw" # ### Finally convert the model # + id="tUEgr46WVsqd" tflite_model = converter.convert() tflite_model_file = 'converted_model.tflite' with open(tflite_model_file, "wb") as f: f.write(tflite_model) # + [markdown] id="BbTF6nd1KG2o" # ## Test the TFLite Model Using the Python Interpreter # + id="dg2NkVTmLUdJ" # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_path=tflite_model_file) interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] # + id="snJQVs9JNglv" # Gather results for the randomly sampled test images predictions = [] test_labels, test_imgs = [], [] for img, label in tqdm(test_batches.take(10)): interpreter.set_tensor(input_index, img) interpreter.invoke() predictions.append(interpreter.get_tensor(output_index)) test_labels.append(label.numpy()[0]) test_imgs.append(img) # + cellView="form" id="YMTWNqPpNiAI" #@title Utility functions for plotting # Utilities for plotting class_names = ['cat', 'dog'] def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) img = np.squeeze(img) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'green' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) # + [markdown] id="fK_CTyL3XQt1" # NOTE: Colab runs on server CPUs. At the time of writing this, TensorFlow Lite doesn't have super optimized server CPU kernels. For this reason post-training full-integer quantized models may be slower here than the other kinds of optimized models. But for mobile CPUs, considerable speedup can be observed. # + cellView="form" id="1-lbnicPNkZs" #@title Visualize the outputs { run: "auto" } index = 3 #@param {type:"slider", min:0, max:9, step:1} plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(index, predictions, test_labels, test_imgs) plt.show() # + [markdown] id="PmZRieHmKLY5" # Create a file to save the labels. # + id="H92yi-vbZpQb" labels = ['cat', 'dog'] with open('labels.txt', 'w') as f: f.write('\n'.join(labels)) # + [markdown] id="7PxItDgvZpQb" # If you are running this notebook in a Colab, you can run the cell below to download the model and labels to your local disk. # # **Note**: If the files do not download when you run the cell, try running the cell a second time. Your browser might prompt you to allow multiple files to be downloaded. # + id="0jJAxrQB2VFw" try: from google.colab import files files.download('converted_model.tflite') files.download('labels.txt') except: pass # + [markdown] id="BDlmpjC6VnFZ" # # Prepare the Test Images for Download (Optional) # + [markdown] id="_1ja_WA0WZOH" # This part involves downloading additional test images for the Mobile Apps only in case you need to try out more samples # + id="fzLKEBrfTREA" # !mkdir -p test_images # + id="Qn7ukNQCSewb" from PIL import Image for index, (image, label) in enumerate(test_batches.take(50)): image = tf.cast(image * 255.0, tf.uint8) image = tf.squeeze(image).numpy() pil_image = Image.fromarray(image) pil_image.save('test_images/{}_{}.jpg'.format(class_names[label[0]], index)) # + id="xVKKWUG8UMO5" # !ls test_images # + id="l_w_-UdlS9Vi" # !zip -qq cats_vs_dogs_test_images.zip -r test_images/ # + [markdown] id="OHujfDG5ZpQl" # If you are running this notebook in a Colab, you can run the cell below to download the Zip file with the images to your local disk. # # **Note**: If the Zip file does not download when you run the cell, try running the cell a second time. # + id="Giva6EHwWm6Y" try: files.download('cats_vs_dogs_test_images.zip') except: pass
TensorFlow Deployment/Course 2 - TensorFlow Lite/Week 1/Examples/TFLite_Week1_Transfer_Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="O5WVrOdzC3K1" # #Finetuning Data Generation Script # + [markdown] id="Nq31-sx6C-eM" # This notebook processes tsv data and uploads the processed data to GCS to be used for finetuning MutFormer. # + [markdown] id="E2XB_l-Hgzq_" # # Configure settings/Mount Drive if needed # + id="ozmx1LCLw3SQ" #@markdown ## General Config #@markdown Whether or not this script is being run in a GCP runtime (if more memory is required for large databases) GCP_RUNTIME = False #@param {type:"boolean"} #@markdown Which mode to use (a different mode means a different finetuning task): options are: #@markdown * "MRPC" - paired sequence method #@markdown * "MRPC_w_ex_data" - paired sequence method with external data #@markdown * "RE" - single sequence method #@markdown * "NER" - single sequence per residue prediction #@markdown #@markdown You can add more modes by creating a new processor and/or a new model_fn inside of the "mutformer_model_code" folder downloaded from github, then changing the corresponding code snippets in the code segment named "Authorize for GCS, Imports, and General Setup" (also edit the dropdown below). MODE = "MRPC" #@param ["MRPC_w_ex_data", "MRPC", "RE", "NER"] {type:"string"} #### ^^^^^ dropdown list for all modes ^^^^^ #@markdown Name of the GCS bucket to use: BUCKET_NAME = "theodore_jiang" #@param {type:"string"} BUCKET_PATH = "gs://"+BUCKET_NAME #@markdown \ #@markdown #@markdown #@markdown ## IO Config #@markdown Input finetuning data folder: data will be read from here to be processed and uploaded to GCS (can be a drive path, or a GCS path if needed for large databases; must be a GCS path if using GCP_RUNTIME): #@markdown #@markdown * For processing multiple sets i.e. for multiple sequence lengths, simply store these sets into separate subfolders inside of the folder listed below, with each subfolder being named as specified in the following section. #@markdown #@markdown * For processing a single set, this folder should directly contain one dataset. #@markdown INPUT_DATA_DIR = "gs://theodore_jiang/finetune_updated_data/MRPC_wo_preds" #@param {type: "string"} if not GCP_RUNTIME: ##if INPUT_DATA_DIR is a drive path, if "/content/drive" in INPUT_DATA_DIR: ##mount google drive from google.colab import drive if GCP_RUNTIME: raise Exception("if GCP_RUNTIME, a GCS path must be used, since Google's cloud TPUs can only communicate with GCS and not drive") # !fusermount -u /content/drive drive.flush_and_unmount() drive.mount('/content/drive', force_remount=True) #@markdown Name of the folder in GCS to put processed data into: #@markdown * For generating multiple datasets i.e. for different sequence lengths, they will be written as individual subfolders inside of this folder. OUTPUT_DATA_DIR = "MRPC_finetune_update_loaded" #@param {type:"string"} DATA_INFO = { ##dictionary that will be uploaded alongside "mode":MODE ##each dataset to indicate its parameters } #### Vocabulary for the model (MutFormer uses the vocabulary below) ([PAD] #### [UNK],[CLS],[SEP], and [MASK] are necessary default tokens; B and J #### are markers for the beginning and ending of a protein sequence, #### respectively; the rest are all amino acids possible, ranked #### approximately by frequency of occurence in human population) #### vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv vocab = \ '''[PAD] [UNK] [CLS] [SEP] [MASK] L S B J E A P T G V K R D Q I N F H Y C M W''' with open("vocab.txt", "w") as fo: for token in vocab.split("\n"): fo.write(token+"\n") # + [markdown] id="HA4ieYajd-Ht" # #If running on a GCP runtime, follow these instructions to set it up: # + [markdown] id="YSUONYA5id9M" # ###1) Create a VM from the GCP website # ###2) Open a command prompt on your computer and perform the following steps" # To ssh into the VM, run: # # ``` # gcloud beta compute ssh --zone <COMPUTE ZONE> <VM NAME> --project <PROJECT NAME> -- -L 8888:localhost:8888 # ``` # # Note: Make sure the port above matches the port below (in this case it's 8888) # \ # \ # In the new command prompt that popped out, either run each of the commands below individually, or copy and paste the one liner below: # ``` # sudo apt-get update # sudo apt-get -y install python3 python3-pip # sudo apt-get install pkg-config # sudo apt-get install libhdf5-serial-dev # sudo apt-get install libffi6 libffi-dev # sudo -H pip3 install jupyter tensorflow google-api-python-client tqdm # sudo -H pip3 install jupyter_http_over_ws # jupyter serverextension enable --py jupyter_http_over_ws # jupyter notebook --NotebookApp.allow_origin='https://colab.research.google.com' --port=8888 --NotebookApp.port_retries=0 --no-browser # ``` # One command: # ``` # sudo apt-get update ; sudo apt-get -y install python3 python3-pip ; sudo apt-get install pkg-config ; sudo apt-get -y install libhdf5-serial-dev ; sudo apt-get install libffi6 libffi-dev; sudo -H pip3 install jupyter tensorflow google-api-python-client tqdm ; sudo -H pip3 install jupyter_http_over_ws ; jupyter serverextension enable --py jupyter_http_over_ws ; jupyter notebook --NotebookApp.allow_origin='https://colab.research.google.com' --port=8888 --NotebookApp.port_retries=0 --no-browser # ``` # ###3) In this notebook, click the "connect to local runtime" option under the connect button, and copy and paste the link outputted by command prompt with "locahost: ..." # + [markdown] id="Z1PvmBO8eR00" # #Clone the MutFormer repo # + id="2vKz_tKFeO0s" colab={"base_uri": "https://localhost:8080/"} outputId="b39dc47e-251b-49fc-9d97-94640b59be09" if GCP_RUNTIME: # !sudo apt-get -y install git-all #@markdown Where to clone the repo into: REPO_DESTINATION_PATH = "mutformer" #@param {type:"string"} import os,shutil if not os.path.exists(REPO_DESTINATION_PATH): os.makedirs(REPO_DESTINATION_PATH) else: shutil.rmtree(REPO_DESTINATION_PATH) os.makedirs(REPO_DESTINATION_PATH) cmd = "git clone https://github.com/WGLab/mutformer.git \"" + REPO_DESTINATION_PATH + "\"" # !{cmd} # + [markdown] id="Yj1mClhQQE_n" # #Authorize for GCS, Imports, and General Setup # + id="9S4CiOh3RzFW" colab={"base_uri": "https://localhost:8080/"} outputId="7605c017-0569-40e2-8d4f-9c2418ac03f0" if not GCP_RUNTIME: from google.colab import auth print("Authorize for GCS:") auth.authenticate_user() print("Authorize done") import sys import json import random import logging import tensorflow as tf import time import os import shutil import importlib if REPO_DESTINATION_PATH == "mutformer": if os.path.exists("mutformer_code"): shutil.rmtree("mutformer_code") shutil.copytree(REPO_DESTINATION_PATH,"mutformer_code") REPO_DESTINATION_PATH = "mutformer_code" if not os.path.exists("mutformer"): shutil.copytree(REPO_DESTINATION_PATH+"/mutformer_model_code","mutformer") else: shutil.rmtree("mutformer") shutil.copytree(REPO_DESTINATION_PATH+"/mutformer_model_code","mutformer") if "mutformer" in sys.path: sys.path.remove("mutformer") sys.path.append("mutformer") from mutformer import modeling, optimization, tokenization,run_classifier,run_ner_for_pathogenic #### <<<<< if you added more modes, change these imports to import the correct processors from mutformer.run_classifier import MrpcProcessor,REProcessor,MrpcWithExDataProcessor #### <<<<< and correct training scripts (i.e. run_classifier and run_ner_for_pathogenic) from mutformer.run_ner_for_pathogenic import NERProcessor ##reload modules so that you don't need to restart the runtime to reload modules in case that's needed modules2reload = [modeling, optimization, tokenization, run_classifier, run_ner_for_pathogenic] for module in modules2reload: importlib.reload(module) # configure logging log = logging.getLogger('tensorflow') log.setLevel(logging.INFO) log.handlers = [] formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch = logging.StreamHandler() ch.setLevel(logging.INFO) ch.setFormatter(formatter) log.addHandler(ch) if MODE=="MRPC": #### vvvvv if you added more modes, change this part to set the processors and training scripts correctly vvvvv processor = MrpcProcessor() script = run_classifier USE_EX_DATA = False elif MODE=="MRPC_w_ex_data": processor = MrpcWithExDataProcessor() script = run_classifier USE_EX_DATA = True elif MODE=="RE": processor = REProcessor() script = run_classifier USE_EX_DATA = False elif MODE=="NER": processor = NERProcessor() script = run_ner_for_pathogenic USE_EX_DATA = False else: raise Exception("The mode specified was not one of the available modes: [\"MRPC\", \"RE\",\"NER\"].") label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer(vocab_file="vocab.txt", do_lower_case=False) #### ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # + [markdown] id="rkFC96e0cK6n" # # Data Generation # + [markdown] id="4lXDH9WeQWGw" # ###General setup and definitions # + id="nUXoN_qYQZOA" #@markdown Maximum batch size the finetuning_benchmark script can handle without OOM (must be divisible by NUM_TPU_CORES_WHEN_TESTING): MAX_BATCH_SIZE = 1024 #@param {type:"integer"} #@markdown How many tpu cores will be used during evaluation and prediction (for colab runtimes, it's 8): NUM_TPU_CORES_WHEN_TESTING = 8 #@param {type:"integer"} def generate_data(MAX_SEQ_LENGTH, data_folder_current, DATA_GCS_DIR, PRECISE_TESTING, USING_SHARDS, SHARD_SIZE): try: print("\nUpdating and uploading data info json...\n") DATA_INFO["sequence_length"] = MAX_SEQ_LENGTH ##update data info with sequence length if USE_EX_DATA: ##if using external data, update data def get_ex_data_num(file): ##info with the # of external datapoints being used ex_data = tf.gfile.Open(file).read().split("\n")[0].split("\t")[3].split() return len(ex_data) DATA_INFO["ex_data_num"] = get_ex_data_num(data_folder_current+"/"+\ [file for file in tf.io.gfile.listdir(data_folder_current) if file.endswith(".tsv")][0]) ##just get the first tsv in the folder with tf.gfile.Open(DATA_GCS_DIR+"/info.json","w+") as out: ##writes out a dictionary containing json.dump(DATA_INFO,out,indent=2) ##the dataset's parameters print("Data info json uploaded successfully") except Exception as e: print("could not update and upload data info json. Error:",e) try: print("\nGenerating train set...\n") if USING_SHARDS: rd_rg = [0,SHARD_SIZE] i=0 else: rd_rg = None while True: train_examples = processor.get_train_examples(data_folder_current,read_range=rd_rg) if len(train_examples) == 0: break train_file = os.path.join(DATA_GCS_DIR, "train.tf_record") if USING_SHARDS: train_file+="_"+str(i) script.file_based_convert_examples_to_features( train_examples, label_list, MAX_SEQ_LENGTH, tokenizer, train_file) if not USING_SHARDS: break else: rd_rg = [pt+SHARD_SIZE for pt in rd_rg] i+=1 except Exception as e: print("training data generation failed. Error:",e) try: print("\nGenerating eval set...\n") if USING_SHARDS: rd_rg = [0,SHARD_SIZE] i=0 else: rd_rg = None while True: eval_examples = processor.get_dev_examples(data_folder_current,read_range=rd_rg) if len(eval_examples) == 0: break eval_file = os.path.join(DATA_GCS_DIR, "eval.tf_record") if USING_SHARDS: eval_file+="_"+str(i) script.file_based_convert_examples_to_features( eval_examples, label_list, MAX_SEQ_LENGTH, tokenizer, eval_file) if not USING_SHARDS: break else: rd_rg = [pt+SHARD_SIZE for pt in rd_rg] i+=1 except Exception as e: print("eval data generation failed. Error:",e) try: print("\nGenerating test set...\n") if USING_SHARDS: rd_rg = [0,SHARD_SIZE] i=0 else: rd_rg = None while True: test_examples = processor.get_test_examples(data_folder_current,read_range=rd_rg) if len(test_examples) == 0: break test_file = os.path.join(DATA_GCS_DIR, "test.tf_record") if USING_SHARDS: test_file+="_"+str(i) ## if using precise testing, the data will be split into two sets: ## one set will be able to be predicted on the maximum possible batch ## size, while the other will be predicted on a batch size of one, to ##ensure the fastest prediction without leaving out any datapoints if PRECISE_TESTING and len(test_examples)<SHARD_SIZE: test_file_trailing = os.path.join(DATA_GCS_DIR, "test_trailing.tf_record") def largest_mutiple_under_max(max,multiple_base): return int(max/multiple_base)*multiple_base split = largest_mutiple_under_max(len(test_examples),MAX_BATCH_SIZE) test_examples_head = test_examples[:split] test_examples_trailing = test_examples[split:] script.file_based_convert_examples_to_features( test_examples_head, label_list, MAX_SEQ_LENGTH, tokenizer, test_file) script.file_based_convert_examples_to_features( test_examples_trailing, label_list, MAX_SEQ_LENGTH, tokenizer, test_file_trailing) else: script.file_based_convert_examples_to_features( test_examples, label_list, MAX_SEQ_LENGTH, tokenizer, test_file) if not USING_SHARDS: break else: rd_rg = [pt+SHARD_SIZE for pt in rd_rg] i+=1 except Exception as e: print("testing data generation failed. Error:",e) # + [markdown] id="ED2rhitTCdjm" # ###Generation ops # + [markdown] id="rhUajg5kClz2" # There are currently two data generations ops (more can be added): # 1. Varying sequence lengths: multiple sets of different sequence lengths will be generated # * Store multiple individual datasets as subfolders inside of Input finetuning data folder, with each folder named its corresponding sequence length. # 2. Only one dataset: a single dataset with a specified set of parameters will be generated # * Directly store only the files train.tsv, dev.tsv, and test.tsv for one dataset inside Input finetuning data folder # + [markdown] id="_TWmbWT5SJqg" # ####Varying sequence lengths # + id="JrCuEbr6dv8U" #@markdown List of maximum sequence lengths to generate data for MAX_SEQ_LENGTHS = [1024,512,256,128,64] #@param #@markdown Whether or not to ensure all datapoints are used during prediction by using an extra trailing test dataset so no datapoints will be skipped due to the batch size. (This option should be used unless an extra trailing test dataset is a large problem) PRECISE_TESTING = True #@param {type:"boolean"} #@markdown Whether or not to split the data processing into (for really large databases, since finetuning data typically isn't that large) USING_SHARDS = False #@param {type:"boolean"} #@markdown If USING_SHARDS, what shard size to use (how many lines/datapoints should be in each shard) (MUST BE DIVISIBLE BY "MAX_BATCH_SIZE") SHARD_SIZE = 1024000 #@param {type:"integer"} for MAX_SEQ_LENGTH in MAX_SEQ_LENGTHS: print("\n\nGenerating data for seq length:",MAX_SEQ_LENGTH,"\n\n") DATA_GCS_DIR = BUCKET_PATH+"/"+OUTPUT_DATA_DIR +"/"+ str(MAX_SEQ_LENGTH) data_folder_current= INPUT_DATA_DIR+"/"+str(MAX_SEQ_LENGTH) generate_data(MAX_SEQ_LENGTH, data_folder_current, DATA_GCS_DIR, PRECISE_TESTING, USING_SHARDS, SHARD_SIZE) # + [markdown] id="zEOfXa4WiB2N" # ###Only one dataset # + id="IytLW0VbgOZz" #@markdown Maximum output data length (when using paired method, actual protein sequence length is about half of this value): MAX_SEQ_LENGTH = 512 #@param {type:"integer"} #@markdown Whether or not to ensure all datapoints are used during prediction by using an extra trailing test dataset so no datapoints will be skipped due to the batch size. (This option should be used most of the time unless an extra trailing test dataset is a large problem) PRECISE_TESTING = True #@param {type:"boolean"} #@markdown Whether or not to split the data processing into (for really large databases, since finetuning data typically isn't that large) USING_SHARDS = False #@param {type:"boolean"} #@markdown If USING_SHARDS, what shard size to use (how many lines/datapoints should be in each shard) (MUST BE DIVISIBLE BY "MAX_BATCH_SIZE") SHARD_SIZE = 1024000 #@param {type:"integer"} DATA_GCS_DIR = BUCKET_PATH+"/"+OUTPUT_DATA_DIR+"/"+str(MAX_SEQ_LENGTH) data_folder_current = INPUT_DATA_DIR generate_data(MAX_SEQ_LENGTH, data_folder_current, DATA_GCS_DIR, PRECISE_TESTING, USING_SHARDS, SHARD_SIZE)
mutformer_finetuning/mutformer_finetuning_data_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Homework 3 - Tryout pandas and numpy # # Copy this notebook. Rename it as: YOURNAME-Homework-panda-numpy-Sept-2017 # # with your name replacing yourname and replaced with the date you submit or to the last part. # # Do the homeworks problems below. # # Upload your completed jupyter notebook to your github site and send me the url via the elearning site as your homework submission. Please note that you need to do your own 3.4, and 3.5 if you share data and analysis from others then you cannot get more than a 3. # ## Homework 3.1 # # ### 3.1.a # Load the data from: http://opendata.dc.gov/datasets that I have include in this github # into a dataframe. ( The file has been is available in directory ./data/ccp_current_csv.csv ) # ### 3.1.a what is its shape and what does that mean? # ## Homework 3.2 # What are the number of rows in each 'QUADRANT' ? # ## Homework 3.3 - Array math demonstration # For two arrarys # # a= [1,2,3,4] type=float # # b= [5,6,7,8] type=float # # Peform the following array operations using numpy # ( show both operational use of numpy and functional (example addition operation => + vs addition function => numbpy.add() ) # # ### 3.3.1 addition a+b # ### 3.3.2 subtraction a-b # ### 3.3.3 multiplication a*b # ### 3.3.4 divsion a/b # ### 3.3.5 modulo a%b # ### 3.3.6 power a^b # ## Homework 3.4 # Find your own data and load it into a dataframe # ## Homework 3.5 # Provide an interesting analysis of the data columns ( frequency or averages )
Homeworks/Homework3/Homework3-pandas-numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Disciplina - DQF10441 Física Experimental II" # ## Aula em 06/08/2021 - Semestre 2021/1 EARTE # ### [DQF - CCENS](http://alegre.ufes.br/ccens/departamento-de-quimica-e-fisica) - [UFES/Alegre](http://alegre.ufes.br/) # #### Professor : [<NAME>.](https://github.com/rcolistete) # + [markdown] slideshow={"slide_type": "slide"} # # Softwares de Desenho/Projeto e Simulação de Circuitos Eletrônicos # + [markdown] slideshow={"slide_type": "fragment"} # ### [MultiSimLive (www.multisim.com)](https://www.multisim.com/) # # [MultiSimLive](https://www.multisim.com/) é mais uma opção de web-app (acessível via navegador web, sem precisar instalar no computador) de software de desenho/projeto (EDA) e de simulação (usando SPICE) de circuitos eletrônicos. # # Tem : # - [versão gratuita](https://www.multisim.com/pricing/); # - [dezenas de exemplos de circuitos eletrônicos oficiais](https://www.multisim.com/reference/) e [milhares da comunidade](https://www.multisim.com/discover/); # - [documentação oficial](https://www.multisim.com/help/) e [tutoriais](https://www.multisim.com/get-started/); # # + [markdown] slideshow={"slide_type": "subslide"} # ### Importante ! # + [markdown] slideshow={"slide_type": "fragment"} # Parte dos softwares de simulação apresentados também podem ser utilizados para criar desenhos de circuitos eletrônicos (mas sem fazer diagramação em placa de circuito impresso), são digamos EDA's simplificados, por terem simbologia padrão de componentes eletrônicos. # + [markdown] slideshow={"slide_type": "slide"} # # Multímetros # + [markdown] slideshow={"slide_type": "fragment"} # ![image.png](attachment:image.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Lendo medidas de um Multímetro # + [markdown] slideshow={"slide_type": "fragment"} # Em parte das medidas usaremos o multímetro Minipa [ET-2042E](https://www.minipa.com.br/multimetros/multimetros-digitais/349-et-2042e) # # ![image.png](attachment:image.png) # + [markdown] slideshow={"slide_type": "subslide"} # ### Como ler medidas de tensão $V$ de um voltímetro (ou multímetro nesse modo) # + [markdown] slideshow={"slide_type": "fragment"} # Para medir : # - $V_{bat} = 1,5$ V (teórico); # # qual escala do multímetro Minipa ET-2042E deve ser usada para ter menor erro em tal medida ? # + [markdown] slideshow={"slide_type": "fragment"} # R.: vide tabela B do manual do multímetro Minipa ET-2042E, **escala de 6V, que tem resolução de 0,001V e precisão de $\pm(0,5\%+2D)$**. # + [markdown] slideshow={"slide_type": "fragment"} # Se em tal escala foi medida a tensão (elétrica) de uma pilha recarregável : # # $$V_{bat} = 1,329\,\text{V}$$ # # então calcule a incerteza da $V_{bat}$ medida, escrevendo a medida no formato (valor nominal +/- incerteza) com o número correto de algarismos significativos. # + [markdown] slideshow={"slide_type": "subslide"} # R.: $(1,329 \pm 0,009)$ V # - (1.329*0.005)+2*0.001 # + [markdown] slideshow={"slide_type": "subslide"} # ### Como ler medidas de corrente elétrica $i$ de um amperímetro (ou multímetro nesse modo) # + [markdown] slideshow={"slide_type": "fragment"} # Para medir : # - $I_R = 12$ mA (teórico); # # qual escala do multímetro Minipa ET-2042E deve ser usada para ter menor erro em tal medida ? # + [markdown] slideshow={"slide_type": "fragment"} # R.: vide tabela C do manual do multímetro Minipa ET-2042E, escala de 60 mA, que tem resolução de 0,01 mA e precisão de $\pm(0,8\%+8D)$. # + [markdown] slideshow={"slide_type": "fragment"} # Se em tal escala foi medida a corrente (elétrica) : # # $$I_{R} = 11,58\,\text{mA}$$ # # então calcule a incerteza da $I_R$, escrevendo a medida no formato (valor nominal +/- incerteza) com o número correto de algarismos significativos. # + [markdown] slideshow={"slide_type": "subslide"} # R.: $(11,58 \pm 0,17)$ mA # + slideshow={"slide_type": "fragment"} (11.58*0.008)+8*0.01 # + [markdown] slideshow={"slide_type": "subslide"} # ### Como ler medidas de resistência elétrica $R$ de um ohmímetro (ou multímetro nesse modo) # + [markdown] slideshow={"slide_type": "fragment"} # Para medir : # - $R = 4,7 k\Omega$ (valor nominal de um resistor); # # qual escala do multímetro Minipa ET-2042E deve ser usada para ter menor erro em tal medida ? # + [markdown] slideshow={"slide_type": "fragment"} # R.: vide tabela ? do manual do multímetro Minipa ET-2042E, escala de $6\,k\Omega$, que tem resolução de $0,001\,k\Omega$ e precisão de $\pm(0,8\% + 3D)$. # + [markdown] slideshow={"slide_type": "fragment"} # Se em tal escala foi medida a resistência (elétrica) : # # $$ R = 4,813\,k\Omega $$ # # então calcule a incerteza de $R$, escrevendo a medida no formato (valor nominal +/- incerteza) com o número correto de algarismos significativos. # + [markdown] slideshow={"slide_type": "subslide"} # R.: $(4,81 \pm 0,04)\,k\Omega$, pois adotamos a dezena limite como sendo 30. # + slideshow={"slide_type": "fragment"} (4.813*0.008) + 3*0.001 # + [markdown] slideshow={"slide_type": "slide"} # # Grupos & kits de material # + [markdown] slideshow={"slide_type": "fragment"} # Faltam : # + [markdown] slideshow={"slide_type": "fragment"} # - 3 alunos preencherem o formulário; # + [markdown] slideshow={"slide_type": "fragment"} # - definição da composição de um dos grupos; # + [markdown] slideshow={"slide_type": "fragment"} # - endereço de 1 dos 'líderes' (temporários) de grupos; # + [markdown] slideshow={"slide_type": "fragment"} # para o professor definir os grupos e líderes que receberão kit de material. # + [markdown] slideshow={"slide_type": "slide"} # # Experimentos # + [markdown] slideshow={"slide_type": "fragment"} # - Cronograma atualizado : # * (13/08) 1o roteiro "Caracterização de dispositivos eletrônicos via multímetros"; # * (20/08) 2o roteiro, "Ligação série e paralelo"; # * (27/08) 3o roteiro, "Circuitos RC, carga e descarga de capacitor"; # * (03/09) 4o roteiro, "Filtros RC e espectro de sinal usando osciloscópio"; # * (17/09) 5o roteiro, "Medição de campo magnético"; # * (24/09) 6o roteiro, "Medição de luz e espectro luminoso". # + [markdown] slideshow={"slide_type": "slide"} # # (TAREFA) Python + Uncertainties # + [markdown] slideshow={"slide_type": "fragment"} # Pratique calculando $P = V \times i$ da tabela da 4a questão do 1o trabalho. Fazer em grupo e enviar por email para o professor até 4a-feira, 11/08/2021, em Jupyter Notebook. # + [markdown] slideshow={"slide_type": "fragment"} # Mas use Uncertainties com arrays (NumPy), vide aula de 25/06/2021. # + [markdown] slideshow={"slide_type": "fragment"} # Motivação para usar arrays com incertezas : # + [markdown] slideshow={"slide_type": "fragment"} # - 1o relatório tem vários cálculos de incertezas ao fazer caracterização $V \times i$ de 4 componentes, com 11 pares de medidas em cada um; # - todos os outros relatórios vão ter cálculos de incerteza. # + [markdown] slideshow={"slide_type": "slide"} # # Desenho/Projeto de Circuitos Eletrônicos # + [markdown] slideshow={"slide_type": "fragment"} # Fazer em grupo e mostrar diagrama de circuito com : # + [markdown] slideshow={"slide_type": "fragment"} # - fonte de tensão + 2 resistores (em série); # + [markdown] slideshow={"slide_type": "fragment"} # - fonte de tensão + 1 resistor e 1 diodo (em série); # + [markdown] slideshow={"slide_type": "fragment"} # - fonte de tensão + 1 resistor e 1 LED (em série).
Aulas/Aula_20210806/Aula_FisExpII_prev_20210806.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests from bs4 import BeautifulSoup import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy import stats import numpy as np import statsmodels.api as sm import hockey_scraper import pickle import time import random pd.set_option('display.max_columns', None) # ### Define Helper Functions # The below functions are created to help dynamically scrape Natural Stat Trick, the NHL API, create features, and merge the dataframes #scrape NST def get_and_format_nst_team_stats(season, sit, rate): #dict to convert team names from NST to team abbreviations from NHL API nst_to_sched = {'<NAME>': 'ANA', 'Arizona Coyotes': 'ARI', 'Boston Bruins': 'BOS', 'Buffalo Sabres': 'BUF', 'Calgary Flames': 'CGY', 'Carolina Hurricanes': 'CAR', 'Chicago Blackhawks': 'CHI', 'Colorado Avalanche': 'COL', 'Columbus Blue Jackets': 'CBJ', 'Dallas Stars': 'DAL', 'Detroit Red Wings': 'DET', 'Edmonton Oilers': 'EDM', 'Florida Panthers': 'FLA', 'Los Angeles Kings': 'L.A', 'Minnesota Wild': 'MIN', 'Montreal Canadiens': 'MTL', 'Nashville Predators': 'NSH', 'New Jersey Devils': 'N.J', 'New York Islanders': 'NYI', 'New York Rangers': 'NYR', 'Ottawa Senators': 'OTT', 'Philadelphia Flyers': 'PHI', 'Pittsburgh Penguins': 'PIT', 'San Jose Sharks': 'S.J', 'St Louis Blues': 'STL', 'Tampa Bay Lightning': 'T.B', 'Toronto Maple Leafs': 'TOR', 'Vancouver Canucks': 'VAN', 'Vegas Golden Knights': 'VGK', 'Washington Capitals': 'WSH', 'Winnipeg Jets': 'WPG'} #dyanmic URL url = 'https://www.naturalstattrick.com/games.php?fromseason={}&thruseason={}&stype=2&sit={}&loc=B&team=All&rate={}'.format( season, season, sit, rate) #scrape html table from webpage df = pd.read_html(url, header=0, index_col = 0, na_values=["-"])[0] #reset index df.reset_index(inplace = True) #format date df['Date'] = df['Game'].apply(lambda x: pd.to_datetime(x[0:10])) #add team game number df['Game_Number'] = df.groupby('Team').cumcount() + 1 #replcate Team name with team abbreviation df = df.replace({'Team': nst_to_sched}) #add team key to merge with game results df df['Team_Key'] = df['Team'].astype(str)+'_'+df['Date'].astype(str) return df #merge 5v5, PP, and PK team game logs from NST def merge_team_stats(primary_df, pp_df, pk_df): primary_df = primary_df.merge(pk_df[['Team_Key', 'TOI', 'xGA', 'GA']], on = 'Team_Key', how = 'left', suffixes = ('','_pk') ) primary_df = primary_df.merge(pp_df[['Team_Key', 'TOI', 'xGF', 'GF']], on = 'Team_Key', how = 'left', suffixes = ('','_pp') ) return primary_df # Feature Engineering # See Exploratory Data Analysis Notebook for more commentary on the Features #calculate team features. Number of rolling games used is dynamic to test if different numbers of game work better. def calculate_team_features(df, rolling_games = 20): df[f'sum_rolling{rolling_games}_TOI_5v5'] = df.groupby('Team')['TOI'].transform(lambda x: x.rolling(rolling_games, rolling_games).sum().shift()) df[f'sum_rolling{rolling_games}_FF_5v5'] = df.groupby('Team')['FF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_FA_5v5'] = df.groupby('Team')['FA'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_GF_5v5'] = df.groupby('Team')['GF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_GA_5v5'] = df.groupby('Team')['GA'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_xGF_5v5'] = df.groupby('Team')['xGF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_xGA_5v5'] = df.groupby('Team')['xGA'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_SF_5v5'] = df.groupby('Team')['SF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'last_{rolling_games}_FF%_5v5'] = df[f'sum_rolling{rolling_games}_FF_5v5']*100/ (df[f'sum_rolling{rolling_games}_FF_5v5']+df[f'sum_rolling{rolling_games}_FA_5v5']) df[f'last_{rolling_games}_GF%_5v5'] = df[f'sum_rolling{rolling_games}_GF_5v5']*100/ (df[f'sum_rolling{rolling_games}_GF_5v5']+df[f'sum_rolling{rolling_games}_GA_5v5']) df[f'last_{rolling_games}_xGF%_5v5'] = df[f'sum_rolling{rolling_games}_xGF_5v5']*100/ (df[f'sum_rolling{rolling_games}_xGF_5v5']+df[f'sum_rolling{rolling_games}_xGA_5v5']) df[f'last_{rolling_games}_SH%'] = df[f'sum_rolling{rolling_games}_GF_5v5']*100 / df[f'sum_rolling{rolling_games}_SF_5v5'] #fix NaNs in pp and pk features. If team wasnt on PP or PK in a game that game is missing from dataframe. df['TOI_pp'] = np.where(df['TOI_pp'].isna(), 0, df['TOI_pp']) df['TOI_pk'] = np.where(df['TOI_pk'].isna(), 0, df['TOI_pk']) df['xGF_pp'] = np.where(df['xGF_pp'].isna(), 0, df['xGF_pp']) df['GF_pp'] = np.where(df['GF_pp'].isna(), 0, df['GF_pp']) df['xGA_pk'] = np.where(df['xGA_pk'].isna(), 0, df['xGA_pk']) df['GA_pk'] = np.where(df['GA_pk'].isna(), 0, df['GA_pk']) #pp features df[f'sum_rolling{rolling_games}_TOI_pp'] = df.groupby('Team')['TOI_pp'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_xGF_pp'] = df.groupby('Team')['xGF_pp'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_GF_pp'] = df.groupby('Team')['GF_pp'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'last{rolling_games}_pp_TOI_per_game'] = df.groupby('Team')['TOI_pp'].transform(lambda x: x.rolling(rolling_games, rolling_games ).mean().shift()) df[f'last{rolling_games}_xGF_per_min_pp'] = df[f'sum_rolling{rolling_games}_xGF_pp'] / df[f'sum_rolling{rolling_games}_TOI_pp'] df[f'last{rolling_games}_GF_per_min_pp'] = df[f'sum_rolling{rolling_games}_GF_pp'] / df[f'sum_rolling{rolling_games}_TOI_pp'] #pk features df[f'sum_rolling{rolling_games}_TOI_pk'] = df.groupby('Team')['TOI_pk'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_xGA_pk'] = df.groupby('Team')['xGA_pk'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'sum_rolling{rolling_games}_GA_pk'] = df.groupby('Team')['GA_pk'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) df[f'last{rolling_games}_pk_TOI_per_game'] = df.groupby('Team')['TOI_pk'].transform(lambda x: x.rolling(rolling_games, rolling_games ).mean().shift()) df[f'last{rolling_games}_xGA_per_min_pk'] = df[f'sum_rolling{rolling_games}_xGA_pk'] / df[f'sum_rolling{rolling_games}_TOI_pk'] df[f'last{rolling_games}_GA_per_min_pk'] = df[f'sum_rolling{rolling_games}_GA_pk'] / df[f'sum_rolling{rolling_games}_TOI_pk'] #to get back to back category df['Last_Game_Date'] = df.groupby('Team')['Date'].shift() df['Days_Since_Last_Game'] = df['Date'] - df['Last_Game_Date'] df['B2B'] = np.where(df['Days_Since_Last_Game'] == '1 days', 1, 0) return df #scrape data from NHL API via hockey_scraper to get official results. def get_game_results(season_start, season_end): sched_df = hockey_scraper.scrape_schedule(season_start, season_end) sched_df['Home_Team_Won'] = np.where(sched_df['home_score'] > sched_df['away_score'], 1, 0) #create keys for home and away team in order to import their features sched_df['Home_Team_Key'] = sched_df['home_team'].astype(str)+'_'+sched_df['date'].astype(str) sched_df['Away_Team_Key'] = sched_df['away_team'].astype(str)+'_'+sched_df['date'].astype(str) return sched_df #merge team features, goalies feature, and Elo feature to the schedule DF. This DF will be used for modeling. def merge_starters_and_features(game_results_df, goalies_df, features_df, elo, feature_columns, goalie_feature_columns): goalies_df = goalies_df[goalies_df['TOI'] >=28.5] df = game_results_df.merge(goalies_df[goalie_feature_columns].add_prefix('home_'), left_on = 'Home_Team_Key', right_on = 'home_Team_Key', how = 'left').rename(columns ={'home_Name':'home_goalie'}).drop(columns = 'home_Team_Key') df = df.merge(goalies_df[goalie_feature_columns].add_prefix('away_'), left_on = 'Away_Team_Key', right_on = 'away_Team_Key', how = 'left').rename(columns ={'away_Name':'away_goalie'}).drop(columns = 'away_Team_Key') df = df.merge(features_df[feature_columns].add_prefix('home_'), left_on = 'Home_Team_Key', right_on = 'home_Team_Key', how = 'left') df = df.merge(features_df[feature_columns].add_prefix('away_'), left_on = 'Away_Team_Key', right_on = 'away_Team_Key', how = 'left') df = df.merge(elo[['elo_Team_Key', 'Rating.A.Pre']].add_prefix('home_'), left_on='Home_Team_Key', right_on='home_elo_Team_Key', how = 'left').drop(columns = 'home_elo_Team_Key') df = df.merge(elo[['elo_Team_Key', 'Rating.A.Pre']].add_prefix('away_'), left_on='Away_Team_Key', right_on='away_elo_Team_Key', how= 'left').drop(columns = 'away_elo_Team_Key') #categorize B2B conditions = [((df['home_B2B'] == 0) & (df['away_B2B'] == 0)), ((df['home_B2B'] == 1) & (df['away_B2B'] == 0)), ((df['home_B2B'] == 0) & (df['away_B2B'] == 1)), ((df['home_B2B'] == 1) & (df['away_B2B'] == 1)) ] choices = ['Neither', 'Home_only', 'Away_only', 'Both'] df['B2B_Status'] = np.select(conditions, choices) #season conditions = [((df['date'] >= '2017-10-04') & (df['date'] <= '2018-04-08')), ((df['date'] >= '2018-10-03') & (df['date'] <= '2019-04-06')), ((df['date'] >= '2019-10-02') & (df['date'] <= '2020-03-12')), ((df['date'] >= '2021-01-13') & (df['date'] <= '2021-06-29')) ] choices = ['2017-2018', '2018-2019', '2019-2020', '2020-2021'] df['Season'] = np.select(conditions, choices) return df #goalie feature columns goalie_feature_columns = ['Team_Key', 'Name', 'Goalie_FenwickSV%', 'Goalie_GSAx/60', 'Goalie_HDCSV%'] # ### Get ELO Data # # Credit to [<NAME>](https://github.com/NeilPaine538/NHL-Player-And-Team-Ratings) for calculating and providing the data. elo = pd.read_csv('https://raw.githubusercontent.com/NeilPaine538/NHL-Player-And-Team-Ratings/master/nhl_elo_historical.csv') elo = elo[elo['Date'] > '2013'] elo.head() elo_conversion = {'VEG' : 'VGK', 'NJD': 'N.J', 'SJS': 'S.J', 'TBL' : 'T.B', 'LAK' : 'L.A'} elo = elo.replace({'Franch.A': elo_conversion}) elo.head() elo['Franch.A'].value_counts() elo['elo_Team_Key'] = elo['Franch.A'].astype(str)+'_'+elo['Date'].astype(str) # ### Get Goalie Data #import dictionary with goalie names and IDs from NHL API infile = open("data/goalie_ids.pickle",'rb') goalie_ids = pickle.load(infile) infile.close() ## scrape season long stats to get name of all goalies who played in time frame goalie_list = pd.read_html('https://www.naturalstattrick.com/playerteams.php?fromseason=20182019&thruseason=20202021&stype=2&sit=5v5&score=all&stdoi=g&rate=n&team=ALL&pos=S&loc=B&toi=0&gpfilt=none&fd=&td=&tgp=410&lines=single&draftteam=ALL')[0] #find which goalies are missing from goalie_ids dictionary missing_goalies2 = [g for g in list(goalie_list['Player']) if g not in goalie_ids.keys() ] ## <NAME> already in Data Dictionary as <NAME> missing_goalies2.remove('<NAME>') missing_goalies2 def goalie_features(df, rolling_games = 40): rolling_games = rolling_games min_games = 10 df['Date'] = df['Game'].apply(lambda x: pd.to_datetime(x[0:10])) df['Team_Key'] = df['Team'].astype(str)+'_'+df['Date'].astype(str) df['Rolling_TOI'] = df.groupby('ID')['TOI'].transform(lambda x: x.rolling(rolling_games, min_games).sum().shift()) df['Rolling_FA'] = df.groupby('ID')['FA'].transform(lambda x: x.rolling(rolling_games, min_games).sum().shift()) df['Rolling_SA'] = df.groupby('ID')['SA'].transform(lambda x: x.rolling(rolling_games, min_games).sum().shift()) df['Rolling_GA'] = df.groupby('ID')['GA'].transform(lambda x: x.rolling(rolling_games, min_games).sum().shift()) df['Rolling_xGA'] = df.groupby('ID')['xGA'].transform(lambda x: x.rolling(rolling_games, min_games).sum().shift()) df['Rolling_HDCA'] = df.groupby('ID')['HDCA'].transform(lambda x: x.rolling(rolling_games, min_games).sum().shift()) df['Rolling_HDGA'] = df.groupby('ID')['HDGA'].transform(lambda x: x.rolling(rolling_games, min_games).sum().shift()) df['Goalie_FenwickSV%'] = (df['Rolling_FA'] - df['Rolling_GA']) / df['Rolling_FA'] df['Goalie_GSAx'] = df['Rolling_xGA'] - df['Rolling_GA'] df['Goalie_GSAx/60'] = df['Goalie_GSAx']*60 / df['Rolling_TOI'] df['Goalie_HDCSV%'] = (df['Rolling_HDCA'] - df['Rolling_HDGA'] ) / df['Rolling_HDCA'] return df # only scrape at most 2 seasons at a time def get_goalie_data(goalie_ids, start_year, end_year): counter = 0 for name, gid in goalie_ids.items(): sequence = [x/10 for x in range(60, 120)] time.sleep(random.choice(sequence)) url = 'https://www.naturalstattrick.com/playerreport.php?fromseason={}&thruseason={}&playerid={}&sit=all&stype=2&stdoi=oi&rate=n&v=g'.format(start_year, end_year, gid) #due to number of http requests, NST may ban your IP before the loop finishes. I needed to use a VPN to get around this. If IP gets banned, this function will still return the current DF and you can call the function again and pass in an updated goalie dictionary to get the rest try: individual_df = pd.read_html(url)[0] individual_df['Name'] = name individual_df['ID'] = gid except: print(f'Ended before {name}') return all_goalies4 if counter == 0: all_goalies4 = individual_df print(name) print(counter) elif counter != 0: all_goalies4 = pd.concat([all_goalies4, individual_df]) print(name) print(counter) counter +=1 return all_goalies4 goalies_161718 = get_goalie_data(goalie_ids, 20162017, 20172018) goalies_161718.to_csv('data/goalie_logs_1617_1718') goalies_181920 = get_goalie_data(goalie_ids, 20182019, 20192020) goalies_181920.to_csv('data/goalie_logs_1819_1920') goalies_2021 = get_goalie_data(goalie_ids, 20202021, 20202021) goalies_2021.to_csv('data/goalie_logs_2021') goalies_all_C = pd.concat([goalies_161718, goalies_181920, goalies_2021]) goalie_features_dfC = goalie_features(goalies_all_C) goalie_features_dfC pickle_out = open("goalie_game_log_df.pickle","wb") pickle.dump(goalie_features_dfC, pickle_out) pickle_out.close() goalie_features_dfC.to_csv('data/goalie_game_logs_C.csv') # ### Determine Goalie Stats to Impute for Goalies Having Player Less Than 10 Games goalie_features_dfC.isna().sum() #set df for goalies who hadnt played 10 games at that point ig_df = goalie_features_dfC[(goalie_features_dfC['Goalie_FenwickSV%'].isna()) & (goalie_features_dfC['Date'] >= '2017-10-04') & (goalie_features_dfC['Date']<'2021')] ig_df[ig_df['Date'].apply(lambda x: x.year) == 2021] ig_TOI = ig_df['TOI'].sum() ig_FA = ig_df['FA'].sum() ig_GA = ig_df['GA'].sum() ig_xGA = ig_df['xGA'].sum() ig_HDCA = ig_df['HDCA'].sum() ig_HDGA = ig_df['HDGA'].sum() ig_FenwickSV = (ig_FA - ig_GA) /ig_FA ig_GSAx = ig_xGA - ig_GA ig_GSAx60 = (ig_GSAx*60) / ig_TOI ig_HDCSV = (ig_HDCA - ig_HDGA )/ ig_HDCA #experience goalie df for comparison eg_df = goalie_features_dfC[(~goalie_features_dfC['Goalie_FenwickSV%'].isna()) & (goalie_features_dfC['Date'] >= '2017-10-04') & (goalie_features_dfC['Date']<'2021')] eg_TOI = eg_df['TOI'].sum() eg_FA = eg_df['FA'].sum() eg_GA = eg_df['GA'].sum() eg_xGA = eg_df['xGA'].sum() eg_HDCA = eg_df['HDCA'].sum() eg_HDGA = eg_df['HDGA'].sum() eg_FenwickSV = (eg_FA - eg_GA) /eg_FA eg_GSAx = eg_xGA - eg_GA eg_GSAx60 = (eg_GSAx*60) / eg_TOI eg_HDCSV = (eg_HDCA - eg_HDGA )/ eg_HDCA display(ig_FenwickSV) display(eg_FenwickSV) display(ig_GSAx60) display(eg_GSAx60) goalie_features_dfB['Goalie_GSAx/60'].std() display(ig_HDCSV ) display(eg_HDCSV) ig = [ig_FenwickSV, ig_GSAx , ig_GSAx60 , ig_HDCSV ] # ### Calculating Weighted Rolling Features # This is a work in progress # + # def calculate_weighted_team_features(df, rolling_games = 20): # weights = np.arange(1,rolling_games) # df[f'sum_rolling{rolling_games}_TOI_5v5'] = df.groupby('Team')['TOI'].transform(lambda x: x.rolling(rolling_games, rolling_games).sum().shift()) # df[f'sum_rolling{rolling_games}_FF_5v5'] = df.groupby('Team')['FF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'sum_rolling{rolling_games}_FA_5v5'] = df.groupby('Team')['FA'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'sum_rolling{rolling_games}_GF_5v5'] = df.groupby('Team')['GF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'sum_rolling{rolling_games}_GA_5v5'] = df.groupby('Team')['GA'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'sum_rolling{rolling_games}_xGF_5v5'] = df.groupby('Team')['xGF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'sum_rolling{rolling_games}_xGA_5v5'] = df.groupby('Team')['xGA'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'sum_rolling{rolling_games}_SF_5v5'] = df.groupby('Team')['SF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'last_{rolling_games}_FF%_5v5'] = df[f'sum_rolling{rolling_games}_FF_5v5']*100/ (df[f'sum_rolling{rolling_games}_FF_5v5']+df[f'sum_rolling{rolling_games}_FA_5v5']) # df[f'last_{rolling_games}_GF%_5v5'] = df[f'sum_rolling{rolling_games}_GF_5v5']*100/ (df[f'sum_rolling{rolling_games}_GF_5v5']+df['sum_rolling20_GA_5v5']) # df[f'last_{rolling_games}_xGF%_5v5'] = df['sum_rolling20_xGF_5v5']*100/ (df['sum_rolling20_xGF_5v5']+df[f'sum_rolling{rolling_games}_GA_5v5']) # df[f'last_{rolling_games}_SH%'] = df[f'sum_rolling{rolling_games}_GF_5v5']*100 / df[f'sum_rolling{rolling_games}_SF_5v5'] # #fix NaNs in pp and pk features # df['TOI_pp'] = np.where(df['TOI_pp'].isna(), 0, df['TOI_pp']) # df['TOI_pk'] = np.where(df['TOI_pk'].isna(), 0, df['TOI_pk']) # df['xGF_pp'] = np.where(df['xGF_pp'].isna(), 0, df['xGF_pp']) # df['xGA_pk'] = np.where(df['xGA_pk'].isna(), 0, df['xGA_pk']) # #pp features # df[f'sum_rolling{rolling_games}_TOI_pp'] = df.groupby('Team')['TOI_pp'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'sum_rolling{rolling_games}_xGF_pp'] = df.groupby('Team')['xGF_pp'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'last{rolling_games}_pp_TOI_per_game'] = df.groupby('Team')['TOI_pp'].transform(lambda x: x.rolling(rolling_games, rolling_games ).mean().shift()) # df[f'last{rolling_games}_xGF_per_min_pp'] = df[f'sum_rolling{rolling_games}_xGF_pp'] / df[f'sum_rolling{rolling_games}_TOI_pp'] # #pk features # df[f'sum_rolling{rolling_games}_TOI_pk'] = df.groupby('Team')['TOI_pk'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'sum_rolling{rolling_games}_xGA_pk'] = df.groupby('Team')['xGA_pk'].transform(lambda x: x.rolling(rolling_games, rolling_games ).sum().shift()) # df[f'last{rolling_games}_pk_TOI_per_game'] = df.groupby('Team')['TOI_pk'].transform(lambda x: x.rolling(rolling_games, rolling_games ).mean().shift()) # df[f'last{rolling_games}_xGA_per_min_pk'] = df[f'sum_rolling{rolling_games}_xGA_pk'] / df[f'sum_rolling{rolling_games}_TOI_pk'] # #to get back to back category # df['Last_Game_Date'] = df.groupby('Team')['Date'].shift() # df['Days_Since_Last_Game'] = df['Date'] - df['Last_Game_Date'] # df['B2B'] = np.where(df['Days_Since_Last_Game'] == '1 days', 1, 0) # return df # - # df = primary # w = np.arange(1, 21) # rolling_games = 20 # df[f'sum_rolling{rolling_games}_FF_5v5'] = df.groupby('Team')['FF'].transform(lambda x: x.rolling(rolling_games, rolling_games ).apply(lambda x: (x * w).sum()).shift()) # df[f'sum_rolling{rolling_games}_FA_5v5'] = df.groupby('Team')['FA'].transform(lambda x: x.rolling(rolling_games, rolling_games ).apply(lambda x: (x * w).sum()).shift()) # df[f'last_{rolling_games}_FF%_5v5'] = df[f'sum_rolling{rolling_games}_FF_5v5']*100/ (df[f'sum_rolling{rolling_games}_FF_5v5']+df[f'sum_rolling{rolling_games}_FA_5v5']) # ### Get Data With No Scoring and Venue Adjustments primary1617 = get_and_format_nst_team_stats('20162017','5v5', 'n') pp1617 = get_and_format_nst_team_stats('20162017','pp', 'n') pk1617 = get_and_format_nst_team_stats('20162017','pk', 'n') primary1718 = get_and_format_nst_team_stats('20172018','5v5', 'n') pp1718 = get_and_format_nst_team_stats('20172018','pp', 'n') pk1718 = get_and_format_nst_team_stats('20172018','pk', 'n') features1617 = merge_team_stats(primary1617, pp1617, pk1617) features1718 = merge_team_stats(primary1718, pp1718, pk1718) features1819 = merge_team_stats(primary1819, pp1819, pk1819) features1920 = merge_team_stats(primary1920, pp1920, pk1920) features2021 = merge_team_stats(primary2021, pp2021, pk2021) team_stats_all_seasons = pd.concat([features1617, features1718, features1819, features1920, features2021]).sort_values('Date') pd.options.display.max_rows = 100 team_stats_all_seasons.isna().sum() for games in [3,5,10,20,30]: team_stats_all_seasons = calculate_team_features(team_stats_all_seasons, games) team_stats_all_seasons.tail() feature_columns_all_seasons = ['Team_Key', 'last_3_FF%_5v5', 'last_3_GF%_5v5', 'last_3_xGF%_5v5', 'last_3_SH%', 'last3_pp_TOI_per_game', 'last3_xGF_per_min_pp', 'last3_pk_TOI_per_game', 'last3_xGA_per_min_pk', 'B2B', 'last_5_FF%_5v5', 'last_5_GF%_5v5', 'last_5_xGF%_5v5', 'last_5_SH%', 'last5_pp_TOI_per_game', 'last5_xGF_per_min_pp', 'last5_pk_TOI_per_game', 'last5_xGA_per_min_pk', 'last_10_FF%_5v5', 'last_10_GF%_5v5', 'last_10_xGF%_5v5', 'last_10_SH%', 'last10_pp_TOI_per_game', 'last10_xGF_per_min_pp', 'last10_pk_TOI_per_game', 'last10_xGA_per_min_pk', 'last_20_FF%_5v5', 'last_20_GF%_5v5', 'last_20_xGF%_5v5', 'last_20_SH%', 'last20_pp_TOI_per_game', 'last20_xGF_per_min_pp', 'last20_pk_TOI_per_game', 'last20_xGA_per_min_pk', 'last_30_FF%_5v5', 'last_30_GF%_5v5', 'last_30_xGF%_5v5', 'last_30_SH%', 'last30_pp_TOI_per_game', 'last30_xGF_per_min_pp', 'last30_pk_TOI_per_game', 'last30_xGA_per_min_pk'] df_20172018_B = merge_starters_and_features(results, goalie_features_dfB, team_stats_all_seasons, feature_columns_all_seasons, goalie_feature_columns) df_20182019_B = merge_starters_and_features(results1819, goalie_features_dfB, team_stats_all_seasons, feature_columns_all_seasons, goalie_feature_columns) df_20192020_B = merge_starters_and_features(results1920, goalie_features_dfB, team_stats_all_seasons, feature_columns_all_seasons, goalie_feature_columns) df_20202021_B = merge_starters_and_features(results2021, goalie_features_dfB, team_stats_all_seasons, feature_columns_all_seasons, goalie_feature_columns) all_games_multirolling_noSVA = pd.concat([df_20172018_B, df_20182019_B, df_20192020_B, df_20202021_B]) #impute goalie stats where lack of games causing NaN all_games_multirolling_noSVA['away_Last_20_FenwickSV%'] = np.where(all_games_multirolling_noSVA['away_Last_20_FenwickSV%'].isna(), ig_FenwickSV,all_games_multirolling_noSVA['away_Last_20_FenwickSV%']) all_games_multirolling_noSVA['away_Last_20_GSAx/60'] = np.where(all_games_multirolling_noSVA['away_Last_20_GSAx/60'].isna(), ig_GSAx60, all_games_multirolling_noSVA['away_Last_20_GSAx/60']) all_games_multirolling_noSVA['away_Last_20_HDCSV%'] = np.where(all_games_multirolling_noSVA['away_Last_20_HDCSV%'].isna(), ig_HDCSV, all_games_multirolling_noSVA['away_Last_20_HDCSV%']) all_games_multirolling_noSVA['home_Last_20_FenwickSV%'] = np.where(all_games_multirolling_noSVA['home_Last_20_FenwickSV%'].isna(), ig_FenwickSV,all_games_multirolling_noSVA['home_Last_20_FenwickSV%']) all_games_multirolling_noSVA['home_Last_20_GSAx/60'] = np.where(all_games_multirolling_noSVA['home_Last_20_GSAx/60'].isna(), ig_GSAx60, all_games_multirolling_noSVA['home_Last_20_GSAx/60']) all_games_multirolling_noSVA['home_Last_20_HDCSV%'] = np.where(all_games_multirolling_noSVA['home_Last_20_HDCSV%'].isna(), ig_HDCSV, all_games_multirolling_noSVA['home_Last_20_HDCSV%']) all_games_multirolling_noSVA.to_csv('data/all_games_multirolling_noSVA.csv') pd.options.display.max_rows = 104 all_games_multirolling_noSVA.isna().sum() # ### Get Data With Scoring and Venue Adjustments # + #scraping team stats from NST sequence = [x/10 for x in range(60, 120)] time.sleep(random.choice(sequence)) primarysva1617 = get_and_format_nst_team_stats('20162017', 'sva', 'n') primarysva1718 = get_and_format_nst_team_stats('20172018','sva', 'n') primarysva1819 = get_and_format_nst_team_stats('20182019','sva', 'n') primarysva1920 = get_and_format_nst_team_stats('20192020','sva', 'n') primarysva2021 = get_and_format_nst_team_stats('20202021','sva', 'n') time.sleep(random.choice(sequence)) pp1617 = get_and_format_nst_team_stats('20162017','pp', 'n') pp1718 = get_and_format_nst_team_stats('20172018','pp', 'n') pp1819 = get_and_format_nst_team_stats('20182019','pp', 'n') pp1920 = get_and_format_nst_team_stats('20192020','pp', 'n') pp2021 = get_and_format_nst_team_stats('20202021','pp', 'n') time.sleep(random.choice(sequence)) pk1617 = get_and_format_nst_team_stats('20162017','pk', 'n') pk1718 = get_and_format_nst_team_stats('20172018','pk', 'n') pk1819 = get_and_format_nst_team_stats('20182019','pk', 'n') pk1920 = get_and_format_nst_team_stats('20192020','pk', 'n') pk2021 = get_and_format_nst_team_stats('20202021','pk', 'n') # - #merge features for each season featuressva1617 = merge_team_stats(primarysva1617, pp1617, pk1617) featuressva1718 = merge_team_stats(primarysva1718, pp1718, pk1718) featuressva1819 = merge_team_stats(primarysva1819, pp1819, pk1819) featuressva1920 = merge_team_stats(primarysva1920, pp1920, pk1920) featuressva2021 = merge_team_stats(primarysva2021, pp2021, pk2021) #concat each season into one df team_stats_all_seasons_sva = pd.concat([featuressva1617, featuressva1718, featuressva1819, featuressva1920, featuressva2021]).sort_values('Date') #calculate different rolling game features for games in [1,3,5,10,20,30,40,50]: team_stats_all_seasons_sva = calculate_team_features(team_stats_all_seasons_sva, games) list(team_stats_all_seasons_sva.columns) feature_columns_all_seasons = [ 'Game_Number', 'Team_Key', 'last_1_FF%_5v5', 'last_1_GF%_5v5', 'last_1_xGF%_5v5', 'last_1_SH%', 'last1_pp_TOI_per_game', 'last1_xGF_per_min_pp', 'last1_GF_per_min_pp', 'last1_pk_TOI_per_game', 'last1_xGA_per_min_pk', 'last1_GA_per_min_pk', 'last_3_FF%_5v5', 'last_3_GF%_5v5', 'last_3_xGF%_5v5', 'last_3_SH%', 'last3_pp_TOI_per_game', 'last3_xGF_per_min_pp', 'last3_GF_per_min_pp', 'last3_pk_TOI_per_game', 'last3_xGA_per_min_pk', 'last3_GA_per_min_pk', 'Last_Game_Date', 'Days_Since_Last_Game', 'B2B', 'last_5_FF%_5v5', 'last_5_GF%_5v5', 'last_5_xGF%_5v5', 'last_5_SH%', 'last5_pp_TOI_per_game', 'last5_xGF_per_min_pp', 'last5_GF_per_min_pp', 'last5_pk_TOI_per_game', 'last5_xGA_per_min_pk', 'last5_GA_per_min_pk', 'last_10_FF%_5v5', 'last_10_GF%_5v5', 'last_10_xGF%_5v5', 'last_10_SH%', 'last10_pp_TOI_per_game', 'last10_xGF_per_min_pp', 'last10_GF_per_min_pp', 'last10_pk_TOI_per_game', 'last10_xGA_per_min_pk', 'last10_GA_per_min_pk', 'last_20_FF%_5v5', 'last_20_GF%_5v5', 'last_20_xGF%_5v5', 'last_20_SH%', 'last20_pp_TOI_per_game', 'last20_xGF_per_min_pp', 'last20_GF_per_min_pp', 'last20_pk_TOI_per_game', 'last20_xGA_per_min_pk', 'last20_GA_per_min_pk', 'last_30_FF%_5v5', 'last_30_GF%_5v5', 'last_30_xGF%_5v5', 'last_30_SH%', 'last30_pp_TOI_per_game', 'last30_xGF_per_min_pp', 'last30_GF_per_min_pp', 'last30_pk_TOI_per_game', 'last30_xGA_per_min_pk', 'last30_GA_per_min_pk', 'last_40_FF%_5v5', 'last_40_GF%_5v5', 'last_40_xGF%_5v5', 'last_40_SH%', 'last40_pp_TOI_per_game', 'last40_xGF_per_min_pp', 'last40_GF_per_min_pp', 'last40_pk_TOI_per_game', 'last40_xGA_per_min_pk', 'last40_GA_per_min_pk', 'last40_pp_TOI_per_game', 'last40_xGF_per_min_pp', 'last40_GF_per_min_pp', 'last40_pk_TOI_per_game', 'last40_xGA_per_min_pk', 'last40_GA_per_min_pk', 'last_50_FF%_5v5', 'last_50_GF%_5v5', 'last_50_xGF%_5v5', 'last_50_SH%', 'last50_pp_TOI_per_game', 'last50_xGF_per_min_pp', 'last50_GF_per_min_pp', 'last50_pk_TOI_per_game', 'last50_xGA_per_min_pk', 'last50_GA_per_min_pk'] #get official game results results1718 = get_game_results('2017-10-04', '2018-04-08') results1819 = get_game_results('2018-10-03', '2019-04-06') results1920 = get_game_results('2019-10-02', '2020-03-12') results2021 = get_game_results('2021-01-13', '2021-05-06') display(results1718.shape) display(results1819.shape) display(results1920.shape) display(results2021.shape) display(results1718.shape[0] + results1819.shape[0] +results1920.shape[0] +results2021.shape[0]) df_20172018_C = merge_starters_and_features(results1718, goalie_features_dfC, team_stats_all_seasons_sva, elo, feature_columns_all_seasons, goalie_feature_columns) df_20182019_C = merge_starters_and_features(results1819, goalie_features_dfC, team_stats_all_seasons_sva, elo, feature_columns_all_seasons, goalie_feature_columns) df_20192020_C = merge_starters_and_features(results1920, goalie_features_dfC, team_stats_all_seasons_sva, elo, feature_columns_all_seasons, goalie_feature_columns) df_20202021_C = merge_starters_and_features(results2021, goalie_features_dfC, team_stats_all_seasons_sva, elo, feature_columns_all_seasons, goalie_feature_columns) all_games_multirolling_SVA_2 = pd.concat([df_20172018_C, df_20182019_C, df_20192020_C, df_20202021_C]) # some duplicates due to 2 goalies playsing > 28.5 minutes in a game, dropping first all_games_multirolling_SVA_2 = all_games_multirolling_SVA_2[~all_games_multirolling_SVA_2.duplicated(subset='game_id')] all_games_multirolling_SVA_2.shape #all star game is only missing game from Elo ratings, will be dropped all_games_multirolling_SVA_2[all_games_multirolling_SVA_2['home_Rating.A.Pre'].isna()]['home_team'].value_counts() #impute goalie stats where lack of games causing NaN all_games_multirolling_SVA_2['away_Goalie_FenwickSV%'] = np.where(all_games_multirolling_SVA_2['away_Goalie_FenwickSV%'].isna(), ig_FenwickSV,all_games_multirolling_SVA_2['away_Goalie_FenwickSV%']) all_games_multirolling_SVA_2['away_Goalie_GSAx/60'] = np.where(all_games_multirolling_SVA_2['away_Goalie_GSAx/60'].isna(), ig_GSAx60, all_games_multirolling_SVA_2['away_Goalie_GSAx/60']) all_games_multirolling_SVA_2['away_Goalie_HDCSV%'] = np.where(all_games_multirolling_SVA_2['away_Goalie_HDCSV%'].isna(), ig_HDCSV, all_games_multirolling_SVA_2['away_Goalie_HDCSV%']) all_games_multirolling_SVA_2['home_Goalie_FenwickSV%'] = np.where(all_games_multirolling_SVA_2['home_Goalie_FenwickSV%'].isna(), ig_FenwickSV,all_games_multirolling_SVA_2['home_Goalie_FenwickSV%']) all_games_multirolling_SVA_2['home_Goalie_GSAx/60'] = np.where(all_games_multirolling_SVA_2['home_Goalie_GSAx/60'].isna(), ig_GSAx60, all_games_multirolling_SVA_2['home_Goalie_GSAx/60']) all_games_multirolling_SVA_2['home_Goalie_HDCSV%'] = np.where(all_games_multirolling_SVA_2['home_Goalie_HDCSV%'].isna(), ig_HDCSV, all_games_multirolling_SVA_2['home_Goalie_HDCSV%']) all_games_multirolling_SVA_2.to_csv('data/all_games_multirolling_SVA_3.csv') #not significant amount of games missing pd.options.display.max_rows = 120 all_games_multirolling_SVA_2.isna().sum()[all_games_multirolling_SVA_2.isna().sum() >25]
Data_Collection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from sklearn.linear_model import SGDClassifier X = [[0, 0], [1, 1]] y = [0, 1] clf = SGDClassifier(loss='hinge', penalty='l2') clf.fit(X, y) # - clf.predict([[2, 2]]) clf.coef_ clf.intercept_ # To get the signed distance to the hyperplane use SGDClassifier.decision_function: clf.decision_function([[2, 2]])
SGD-learn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Markdown Cells # Text can be added to Jupyter Notebooks using Markdown cells. Markdown is a popular markup language that is a superset of HTML. Its specification can be found here: # # <http://daringfireball.net/projects/markdown/> # ## Markdown basics # You can make text *italic* or **bold**. # You can build nested itemized or enumerated lists: # # * One # - Sublist # - This # - Sublist # - That # - The other thing # * Two # - Sublist # * Three # - Sublist # # Now another list: # # 1. Here we go # 1. Sublist # 2. Sublist # 2. There we go # 3. Now this # You can add horizontal rules: # # --- # Here is a blockquote: # # > Beautiful is better than ugly. # > Explicit is better than implicit. # > Simple is better than complex. # > Complex is better than complicated. # > Flat is better than nested. # > Sparse is better than dense. # > Readability counts. # > Special cases aren't special enough to break the rules. # > Although practicality beats purity. # > Errors should never pass silently. # > Unless explicitly silenced. # > In the face of ambiguity, refuse the temptation to guess. # > There should be one-- and preferably only one --obvious way to do it. # > Although that way may not be obvious at first unless you're Dutch. # > Now is better than never. # > Although never is often better than *right* now. # > If the implementation is hard to explain, it's a bad idea. # > If the implementation is easy to explain, it may be a good idea. # > Namespaces are one honking great idea -- let's do more of those! # And shorthand for links: # # [Jupyter's website](http://jupyter.org) # ## Headings # You can add headings by starting a line with one (or multiple) `#` followed by a space, as in the following example: # # ``` # # Heading 1 # # Heading 2 # ## Heading 2.1 # ## Heading 2.2 # ``` # ## Embedded code # You can embed code meant for illustration instead of execution in Python: # # def f(x): # """a docstring""" # return x**2 # # or other languages: # # if (i=0; i<n; i++) { # printf("hello %d\n", i); # x += 4; # } # ## LaTeX equations # Courtesy of MathJax, you can include mathematical expressions both inline: # $e^{i\pi} + 1 = 0$ and displayed: # # $$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$$ # Inline expressions can be added by surrounding the latex code with `$`: # ``` # $e^{i\pi} + 1 = 0$ # ``` # # Expressions on their own line are surrounded by `$$`: # ```latex # $$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$$ # ``` # ## GitHub flavored markdown # The Notebook webapp supports Github flavored markdown meaning that you can use triple backticks for code blocks: # # <pre> # ```python # print "Hello World" # ``` # </pre> # # <pre> # ```javascript # console.log("Hello World") # ``` # </pre> # # Gives: # # ```python # print "Hello World" # ``` # # ```javascript # console.log("Hello World") # ``` # # And a table like this: # # <pre> # ``` # # | This | is | # |------|------| # | a | table| # # ``` # </pre> # # A nice HTML Table: # # | This | is | # |------|------| # | a | table| # # ## General HTML # Because Markdown is a superset of HTML you can even add things like HTML tables: # # <table> # <tr> # <th>Header 1</th> # <th>Header 2</th> # </tr> # <tr> # <td>row 1, cell 1</td> # <td>row 1, cell 2</td> # </tr> # <tr> # <td>row 2, cell 1</td> # <td>row 2, cell 2</td> # </tr> # </table> # ## Local files # If you have local files in your Notebook directory, you can refer to these files in Markdown cells directly: # # [subdirectory/]<filename> # # For example, in the images folder, we have the Python logo: # # <img src="../images/python_logo.svg" /> # # <img src="images/python_logo.svg" /> # # and a video with the HTML5 video tag: # # <video controls src="../images/animation.m4v" /> # # <video controls src="../images/animation.m4v" /> # # These do not embed the data into the notebook file, and require that the files exist when you are viewing the notebook. # ### Security of local files # Note that this means that the Jupyter notebook server also acts as a generic file server # for files inside the same tree as your notebooks. Access is not granted outside the # notebook folder so you have strict control over what files are visible, but for this # reason it is highly recommended that you do not run the notebook server with a notebook # directory at a high level in your filesystem (e.g. your home directory). # # When you run the notebook in a password-protected manner, local file access is restricted # to authenticated users unless read-only views are active.
jupyter-notebooks/Working With Markdown Cells.ipynb
# # Fast tokenizers special powers # Install the Transformers and Datasets libraries to run this notebook. # !pip install datasets transformers[sentencepiece] # + from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") example = "My name is Sylvain and I work at Hugging Face in Brooklyn." encoding = tokenizer(example) print(type(encoding)) # - tokenizer.is_fast encoding.is_fast encoding.tokens() encoding.word_ids() start, end = encoding.word_to_chars(3) example[start:end] # + from transformers import pipeline token_classifier = pipeline("token-classification") token_classifier("My name is Sylvain and I work at Hugging Face in Brooklyn.") # + from transformers import pipeline token_classifier = pipeline("token-classification", aggregation_strategy="simple") token_classifier("My name is Sylvain and I work at Hugging Face in Brooklyn.") # + from transformers import AutoTokenizer, AutoModelForTokenClassification model_checkpoint = "dbmdz/bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) model = AutoModelForTokenClassification.from_pretrained(model_checkpoint) example = "My name is Sylvain and I work at Hugging Face in Brooklyn." inputs = tokenizer(example, return_tensors="pt") outputs = model(**inputs) # - print(inputs["input_ids"].shape) print(outputs.logits.shape) # + import torch probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)[0].tolist() predictions = outputs.logits.argmax(dim=-1)[0].tolist() print(predictions) # - model.config.id2label # + results = [] tokens = inputs.tokens() for idx, pred in enumerate(predictions): label = model.config.id2label[pred] if label != "O": results.append({"entity": label, "score": probabilities[idx][pred], "word": tokens[idx]}) print(results) # - inputs_with_offsets = tokenizer(example, return_offsets_mapping=True) inputs_with_offsets["offset_mapping"] example[12:14] # + results = [] inputs_with_offsets = tokenizer(example, return_offsets_mapping=True) tokens = inputs_with_offsets.tokens() offsets = inputs_with_offsets["offset_mapping"] for idx, pred in enumerate(predictions): label = model.config.id2label[pred] if label != "O": start, end = offsets[idx] results.append( {"entity": label, "score": probabilities[idx][pred], "word": tokens[idx], "start": start, "end": end} ) print(results) # - example[33:45] # + import numpy as np results = [] inputs_with_offsets = tokenizer(example, return_offsets_mapping=True) tokens = inputs_with_offsets.tokens() offsets = inputs_with_offsets["offset_mapping"] idx = 0 while idx < len(predictions): pred = predictions[idx] label = model.config.id2label[pred] if label != "O": # Remove the B- or I- label = label[2:] start, _ = offsets[idx] # Grab all the tokens labeled with I-label all_scores = [] while idx < len(predictions) and model.config.id2label[predictions[idx]] == f"I-{label}": all_scores.append(probabilities[idx][pred]) _, end = offsets[idx] idx += 1 # The score is the mean of all the scores of the token in that grouped entity. score = np.mean(all_scores).item() word = example[start:end] results.append( {"entity_group": label, "score": score, "word": word, "start": start, "end": end} ) idx += 1 print(results) # + from transformers import pipeline question_answerer = pipeline("question-answering") context = """ 🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other. """ question = "Which deep learning libraries back 🤗 Transformers?" question_answerer(question=question, context=context) # - long_context = """ 🤗 Transformers: State of the Art NLP 🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone. 🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments. Why should I use transformers? 1. Easy-to-use state-of-the-art models: - High performance on NLU and NLG tasks. - Low barrier to entry for educators and practitioners. - Few user-facing abstractions with just three classes to learn. - A unified API for using all our pretrained models. - Lower compute costs, smaller carbon footprint: 2. Researchers can share trained models instead of always retraining. - Practitioners can reduce compute time and production costs. - Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages. 3. Choose the right framework for every part of a model's lifetime: - Train state-of-the-art models in 3 lines of code. - Move a single model between TF2.0/PyTorch frameworks at will. - Seamlessly pick the right framework for training, evaluation and production. 4. Easily customize a model or an example to your needs: - We provide examples for each architecture to reproduce the results published by its original authors. - Model internals are exposed as consistently as possible. - Model files can be used independently of the library for quick experiments. 🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other. """ question_answerer(question=question, context=long_context) # + from transformers import AutoTokenizer, AutoModelForQuestionAnswering model_checkpoint = "distilbert-base-cased-distilled-squad" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint) inputs = tokenizer(question, context, return_tensors="pt") outputs = model(**inputs) # - start_logits = outputs.start_logits end_logits = outputs.end_logits print(start_logits.shape, end_logits.shape) # + import torch sequence_ids = inputs.sequence_ids() # Mask everything apart the tokens of the context mask = [i != 1 for i in sequence_ids] # Unmask the [CLS] token mask[0] = False mask = torch.tensor(mask)[None] start_logits[mask] = -10000 end_logits[mask] = -10000 # - start_probabilities = torch.nn.functional.softmax(start_logits, dim=-1)[0] end_probabilities = torch.nn.functional.softmax(end_logits, dim=-1)[0] scores = start_probabilities[None, :] * end_probabilities[:, None] scores = torch.triu(scores) max_index = scores.argmax().item() start_index = max_index // scores.shape[1] end_index = max_index % scores.shape[1] print(scores[start_index, end_index]) # + inputs_with_offsets = tokenizer(question, context, return_offsets_mapping=True) offsets = inputs_with_offsets["offset_mapping"] start_char, _ = offsets[start_index] _, end_char = offsets[end_index] answer = context[start_char: end_char] # - result = {"answer": answer, "start": start_char, "end": end_char, "score": scores[start_index, end_index]} print(result) inputs = tokenizer(question, long_context) print(len(inputs["input_ids"])) inputs = tokenizer(question, long_context, max_length=384, truncation="only_second") print(tokenizer.decode(inputs["input_ids"])) # + sentence = "This sentence is not too long but we are going to split it anyway." inputs = tokenizer( sentence, truncation=True, return_overflowing_tokens=True, max_length=6, stride=2 ) for ids in inputs["input_ids"]: print(tokenizer.decode(ids)) # - print(inputs.keys()) print(inputs['overflow_to_sample_mapping']) # + sentences = [ "This sentence is not too long but we are going to split it anyway.", "This sentence is shorter but will still get split.", ] inputs = tokenizer( sentences, truncation=True, return_overflowing_tokens=True, max_length=6, stride=2 ) print(inputs['overflow_to_sample_mapping']) # - inputs = tokenizer( question, long_context, stride=128, max_length=384, padding="longest", truncation="only_second", return_overflowing_tokens=True, return_offsets_mapping=True ) # + _ = inputs.pop("overflow_to_sample_mapping") offsets = inputs.pop("offset_mapping") inputs = inputs.convert_to_tensors("pt") print(inputs["input_ids"].shape) # + outputs = model(**inputs) start_logits = outputs.start_logits end_logits = outputs.end_logits print(start_logits.shape, end_logits.shape) # + sequence_ids = inputs.sequence_ids() # Mask everything apart the tokens of the context mask = [i != 1 for i in sequence_ids] # Unmask the [CLS] token mask[0] = False # Mask all the [PAD] tokens mask = torch.logical_or(torch.tensor(mask)[None], (inputs["attention_mask"] == 0)) start_logits[mask] = -10000 end_logits[mask] = -10000 # - start_probabilities = torch.nn.functional.softmax(start_logits, dim=-1) end_probabilities = torch.nn.functional.softmax(end_logits, dim=-1) # + candidates = [] for start_probs, end_probs in zip(start_probabilities, end_probabilities): scores = start_probs[:, None] * end_probs[None, :] idx = torch.triu(scores).argmax().item() start_idx = idx // scores.shape[0] end_idx = idx % scores.shape[0] score = scores[start_idx, end_idx].item() candidates.append((start_idx, end_idx, score)) print(candidates) # - for candidate, offset in zip(candidates, offsets): start_token, end_token, score = candidate start_char, _ = offset[start_token] _, end_char = offset[end_token] answer = long_context[start_char: end_char] result = {"answer": answer, "start": start_char, "end": end_char, "score": score} print(result)
course/chapter6/section3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Resampling and Cleaning # This file is composed of data resampling from the combined sensor data and cleaning the outcomes dataset. # __INPUT: .csv files containing the combined sensor data and the outcomes time dataset__(These files are output from 01_sensor_concat. (10_ACC_Combined.csv, 10_Temp_Combined.csv, 10_EDA_Combined.csv, 10_BVP_Combined.csv, 10_Outcomes.csv) # __OUTPUT: Datasets for Individuals and Outcomes Dataset w/ End-Times__ (19-0_PATIENT_ID_HERE_aggregated.csv, 20_Outcomes_w_end.csv) # ## Imports # + import pandas as pd import datetime from datetime import timedelta import os import matplotlib.pyplot as plt import seaborn as sns import re import warnings warnings.simplefilter("ignore") # - # ## Directory and Read in Data # + ACC1 = pd.read_csv("10_ACC_Combined.csv") TEMP1 = pd.read_csv("10_Temp_Combined.csv") EDA1 = pd.read_csv("10_EDA_Combined.csv") BVP1 = pd.read_csv("10_BVP_Combined.csv") HR1 = pd.read_csv("10_HR_Combined.csv") outcomes1 = pd.read_csv("10_Outcomes.csv") # - ACC = ACC1.copy() TEMP = TEMP1.copy() EDA = EDA1.copy() BVP = BVP1.copy() HR = HR1.copy() outcomes = outcomes1.copy() # ## Pre-Processing (Convert to Date-Time, Set Index, Drop Subject ID) # + # Convert to date time and set it as the index so interpolation can work ACC['Time'] = pd.to_datetime(ACC['Time']) ACC = ACC.set_index('Time') TEMP['Time'] = pd.to_datetime(TEMP['Time']) TEMP = TEMP.set_index('Time') EDA['Time'] = pd.to_datetime(EDA['Time']) EDA = EDA.set_index('Time') BVP['Time'] = pd.to_datetime(BVP['Time']) BVP = BVP.set_index('Time') HR['Time'] = pd.to_datetime(HR['Time']) HR = HR.set_index('Time') # - ids = EDA['Subject_ID'].copy() ids.isnull().values.any() EDA = EDA.drop(['Subject_ID'], axis = 1) ACC = ACC.drop(['Subject_ID'], axis = 1) TEMP = TEMP.drop(['Subject_ID'], axis = 1) BVP = BVP.drop(['Subject_ID'], axis = 1) HR = HR.drop(['Subject_ID'], axis = 1) # ## Resampling & Interpolation # Interpolation works now that the index is DateTime # Multiple cells incase errors are present as these take a while to run EDA = EDA.resample('250L').interpolate() TEMP = TEMP.resample('250L').interpolate() BVP = BVP.resample('250L').interpolate() HR = HR.resample('250L').interpolate() ACC = ACC.drop_duplicates() ACC = ACC.resample('250L').interpolate() ids = ids.resample('250L').ffill() # ## Concatenate Resampled Data From Individual Sensors comb = pd.concat([ACC, TEMP, EDA, BVP, HR, ids], axis = 1) # ## Cleaning Outcomes Dataframe # Experimental Procedure for the 56 Participants # 1. seated rest to measure baseline __(4  min)__ <br> # 2. paced deep breathing __(1  min)__ <br> # 3. physical activity (walking to increase HR up to 50% of the recommended maximum) __(5  min)__ <br> # 4. seated rest (washout from physical activity) __(~2  min)__ <br> # 5. a typing task __(1  min)__ <br> # ### Convert all cols to datetime for i in list(outcomes.columns[1:9]): outcomes[i] = pd.to_datetime(outcomes[i]) # ### Fix Extra Space outcomes = outcomes.rename(columns = {'Activity Start 1 ': 'Activity Start 1'}) # ### Activity Segmentation outcomes['Baseline End 1'] = outcomes['Baseline Start 1'] + timedelta(minutes = 4) outcomes['Baseline End 2'] = outcomes['Baseline Start 2'] + timedelta(minutes = 4) outcomes['DB End 1'] = outcomes['DB Start 1'] + timedelta(minutes = 1) outcomes['DB End 2'] = outcomes['DB Start 2'] + timedelta(minutes = 1) outcomes['Type End 1'] = outcomes['Type Start 1'] + timedelta(minutes = 1) outcomes['Type End 2'] = outcomes['Type Start 2'] + timedelta(minutes = 1) outcomes['Activity End 1'] = outcomes['Activity Start 1'] + timedelta(minutes = 5) outcomes['Activity End 2'] = outcomes['Activity Start 2'] + timedelta(minutes = 5) # ### Updated Outcomes Dataset # ### Read Out Updated Outcomes to CSV outcomes.to_csv('20_Outcomes_w_end.csv', index = False) # ## Clean & Segment Combined Sensor Dataset # ### Reset Index comb = comb.reset_index() # ### Evaluate Existing Sensor Dataset # The Value Counts are Very Disparate Among Subjects comb['Subject_ID'].value_counts() # This is because our resampling took second by second measures over multiple days for some participants, as shown in subject 19-015 below: comb.loc[comb['Subject_ID'] =='19-015'] # ### Filtering Combined Dataset for Activity Time Segments # descript comb_filter = pd.DataFrame(columns = comb.columns) for subject in outcomes['Subject ID']: #Baseline 1 baseline_start1 = outcomes.loc[outcomes['Subject ID'] == subject, 'Baseline Start 1'].item() baseline_end1 = outcomes.loc[outcomes['Subject ID'] == subject, 'Baseline End 1'].item() keep = comb[comb['Subject_ID'] == subject] keep = keep.loc[(keep['Time']>=baseline_start1) & (keep['Time']<=baseline_end1)] keep['Activity'] = 'Baseline 1' comb_filter = comb_filter.append(keep) #Baseline 2 baseline_start2 = outcomes.loc[outcomes['Subject ID'] == subject, 'Baseline Start 2'].item() baseline_end2 = outcomes.loc[outcomes['Subject ID'] == subject, 'Baseline End 2'].item() keep = comb[comb['Subject_ID'] == subject] keep = keep.loc[(keep['Time']>=baseline_start2) & (keep['Time']<=baseline_end2)] keep['Activity'] = 'Baseline 2' comb_filter = comb_filter.append(keep) #DB 1 db_start1 = outcomes.loc[outcomes['Subject ID'] == subject, 'DB Start 1'].item() db_end1 = outcomes.loc[outcomes['Subject ID'] == subject, 'DB End 1'].item() keep = comb[comb['Subject_ID'] == subject] keep = keep.loc[(keep['Time']>=db_start1) & (keep['Time']<=db_end1)] keep['Activity'] = 'DB 1' comb_filter = comb_filter.append(keep) #DB 2 db_start2 = outcomes.loc[outcomes['Subject ID'] == subject, 'DB Start 2'].item() db_end2 = outcomes.loc[outcomes['Subject ID'] == subject, 'DB End 2'].item() keep = comb[comb['Subject_ID'] == subject] keep = keep.loc[(keep['Time']>=db_start2) & (keep['Time']<=db_end2)] keep['Activity'] = 'DB 2' comb_filter = comb_filter.append(keep) #Activity 1 activity_start1 = outcomes.loc[outcomes['Subject ID'] == subject, 'Activity Start 1'].item() activity_end1 = outcomes.loc[outcomes['Subject ID'] == subject, 'Activity End 1'].item() keep = comb[comb['Subject_ID'] == subject] keep = keep.loc[(keep['Time']>=activity_start1) & (keep['Time']<=activity_end1)] keep['Activity'] = 'Activity 1' comb_filter = comb_filter.append(keep) #Activity 2 activity_start2 = outcomes.loc[outcomes['Subject ID'] == subject, 'Activity Start 2'].item() activity_end2 = outcomes.loc[outcomes['Subject ID'] == subject, 'Activity End 2'].item() keep = comb[comb['Subject_ID'] == subject] keep = keep.loc[(keep['Time']>=activity_start2) & (keep['Time']<=activity_end2)] keep['Activity'] = 'Activity 2' comb_filter = comb_filter.append(keep) #Type 1 type_start1 = outcomes.loc[outcomes['Subject ID'] == subject, 'Type Start 1'].item() type_end1 = outcomes.loc[outcomes['Subject ID'] == subject, 'Type End 1'].item() keep = comb[comb['Subject_ID'] == subject] keep = keep.loc[(keep['Time']>=type_start1) & (keep['Time']<=type_end1)] keep['Activity'] = 'Type 1' comb_filter = comb_filter.append(keep) #Type 2 type_start2 = outcomes.loc[outcomes['Subject ID'] == subject, 'Type Start 2'].item() type_end2 = outcomes.loc[outcomes['Subject ID'] == subject, 'Type End 2'].item() keep = comb[comb['Subject_ID'] == subject] keep = keep.loc[(keep['Time']>=type_start2) & (keep['Time']<=type_end2)] keep['Activity'] = 'Type 2' comb_filter = comb_filter.append(keep) comb_filter['Subject_ID'].value_counts() # Much better, we now have 5288 data values for each subject, meaning that all of the sensors are sampled at 4 Hz. # ## Create Individual Datasets from Filtered & Combined Sensor Data sub_name = list(comb_filter['Subject_ID'].unique()) # descrpt """ for i in range(len(sub_name)): df = comb_filter[comb_filter['Subject_ID'] == sub_name[i]].to_csv(f'./10_Individual Subjects Activity/{sub_name[i]}_aggregated_with_activity.csv', index = False) """ #All participants with activity comb_filter.to_csv("../20_exploratory_data_analysis/30_all_partic_aggregated_with_activity.csv", index = False) comb_filter.to_csv("../10_code/30_end_pre_processing/31_remove_outliers/30_all_partic_aggregated_with_activity.csv", index = False)
DigitalBiomarkers-HumanActivityRecognition/10_code/10_pre_outlier_removal_processing/.ipynb_checkpoints/10_data_preprocessing-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} import os import pickle import re import shutil import time from itertools import chain from pathlib import Path from cytoolz import groupby, merge from cytoolz.curried import get, get_in from killscreen import subutils, shortcuts as ks from killscreen.aws import ec2 import pyarrow as pa import pyarrow.csv from gPhoton.pretty import make_monitors from more_itertools import distribute from pyarrow import parquet from s3_fuse.mount_s3 import mount_bucket from s3_fuse.ps1_utils import prune_ps1_catalog from s3_fuse.utilz import parse_topline, sample_table key = "/home/ubuntu/galex_swarm.pem" uname = "ubuntu" DUMP_PATH = '/home/ubuntu/.slice_test/' os.makedirs(DUMP_PATH, exist_ok=True) S3_ROOT = "/mnt/s3" BUCKET="nishapur" # mount bucket to fetch metadata mount_bucket(backend="goofys", mount_path=S3_ROOT, bucket=BUCKET) # + pycharm={"name": "#%%\n"} # initialize a killscreen Cluster descriptions = ec2.describe( tag_filters={'Name': 'fornax-slice'}, states=("running", "stopped") ) # ...either from already-running EC2 instances... if len(descriptions) == 0: cluster = ec2.Cluster.launch( count=4, template="fornax-slice", key=key, uname=uname, use_private_ip=True ) # ...or from a new fleet request. else: cluster = ec2.Cluster.from_descriptions( descriptions, key=key, uname=uname, use_private_ip=True ) cluster.start() [instance.wait_until_running() for instance in cluster.instances] cluster.add_keys() print("\n".join([str(i) for i in cluster.instances])) # + pycharm={"name": "#%%\n"} # freshen these instances def git_update(*repo_names): return ks.chain( [f"cd {repo}; git clean -d -fx; git pull & cd ~" for repo in repo_names], "and" ) update = git_update("fornax-s3-subsets", "killscreen", "gphoton_working") updaters = cluster.command(update, _bg=True) # + pycharm={"name": "#%%\n"} # set up metadata objects in order to pick targets for slicing # catalog of PS1 extragalactic extended objects, including explicit # assignments to PS1 stack image projection / sky cells and GALEX # eclipse numbers (not used here) catalog_fn = "ps1_extragalactic_skycells_eclipses.parquet" if not Path(catalog_fn).exists(): shutil.copy( Path(S3_ROOT, "ps1/metadata", catalog_fn), Path(catalog_fn) ) catalog = parquet.read_table(catalog_fn) # for this demo, we only staged a subset of those PS1 stack images # (all of them at all 5 bands would be > 80 TB). this is a list of # the (randomly selected) projection and sky cells we staged. test_cell_fn = "ps1_extragalactic_skycells_eclipses_1k_cell_subset.csv" arbitrary_test_cells = ( pa.csv.read_csv(Path(S3_ROOT, "ps1/metadata", test_cell_fn)) .cast(pa.schema([("proj_cell", pa.uint16()), ("sky_cell", pa.uint8())])) ) small_catalog = prune_ps1_catalog(catalog, arbitrary_test_cells) del catalog # + pycharm={"name": "#%%\n"} # various settings for the test # how many objects shall we collect slices for? (785510 are available in this test set) TARGET_COUNT = 2000 # optional parameter -- restrict the total number of PS1 source cells to test the # performance effects of denser sampling. # (1000 total PS1 cells are available in this test set). # note that the total number of images accessed is number of cells * number of bands. MAX_CELL_COUNT = 100 if MAX_CELL_COUNT is not None: test_catalog = prune_ps1_catalog( small_catalog, sample_table(arbitrary_test_cells, k=MAX_CELL_COUNT) ) else: test_catalog = small_catalog targets = sample_table(test_catalog, k=TARGET_COUNT).to_pylist() # split these into chunks of work, making sure that all targets within # a single cell / image are assigned to the same instance -- target_groups = groupby(get(['proj_cell', 'sky_cell']), targets) # this is a simple heuristic to distribute work evenly, given the above constraint: groups = sorted(target_groups.values(), key=lambda v: 1 / len(v)) work_chunks = [ tuple(chain.from_iterable(chunk)) for chunk in distribute(len(cluster.instances), groups) ] # + pycharm={"name": "#%%\n"} # what script / interpreter are we actually using on the remote instances env = cluster.instances[0].conda_env("fornax-slice-testing") python = f"{env}/bin/python" endpoint = "/home/ubuntu/fornax-s3-subsets/s3_fuse_testing/ps1_cutout_endpoint.py" # + pycharm={"name": "#%%\n"} # simple process join function def wait_on(processes, polling_delay=0.1): while any([p.is_alive() for p in processes]): time.sleep(polling_delay) # when a remote process is done, grab the files from that instance # this could be done more concurrently, but synchronizing is a pain. # maybe scp from remotes? ideally inside the dump loop. getters = [] def grab_when_done(process, *_): print(f"{process.host.ip} done; getting files") getter = process.host.get(f"{DUMP_PATH}*", DUMP_PATH, _bg=True) getters.append(getter) # delete everything local so as to avoid confusion subutils.run(f"rm {DUMP_PATH}/* &") # set up some basic benchmarking... stat, note = make_monitors(silent=True) # ...and initiate the remote processes remote_processes = [] for chunk, instance in zip(work_chunks, cluster.instances): command = f"{python} {endpoint} '{chunk}'" viewer = instance.command( command, _bg=True, _viewer=True, _done=grab_when_done ) remote_processes.append(viewer) wait_on(remote_processes) note(f"remote processes completed,{stat()}", True) wait_on(getters) note(f"cleaned up files from remotes,{stat()}", True) retrieved_dumps = os.listdir(DUMP_PATH) cutfiles = tuple(filter(lambda f: f.endswith("pkl"), retrieved_dumps)) note(f"got {len(targets) * 2} cuts,{stat(total=True)}", True) log = note(None, eject=True) rate, weight = parse_topline(log) print(f"{rate} cutouts/s, {weight} MB / cutout (local only)") # cleanup cached arrays on remotes deletions = cluster.command(f"rm {DUMP_PATH}/* &", _bg=True) # + pycharm={"name": "#%%\n"} # should you like: examine logs from remotes... import pandas as pd logs = [] for logfile in filter(lambda f: f.endswith("csv"), retrieved_dumps): remote_log = pd.read_csv(Path(DUMP_PATH, logfile)) remote_log["host"] = re.search( r"(?<=ip_)(\d+_){4}", logfile ).group(0)[:-1] logs.append(remote_log) logs = pd.concat(logs) logs.columns = ["timestamp", "event", "duration", "volume", "host"] logs.sort_values(by=["host", "timestamp"]) # + pycharm={"name": "#%%\n"} # ...or your winnings cuts = [] for file in cutfiles: with open(Path(DUMP_PATH, file), "rb") as stream: cuts.append(pickle.load(stream)) cuts = merge(cuts) arrays = tuple(map(get_in(['arrays', 0]), cuts.values())) # + pycharm={"name": "#%%\n"} from random import choice import matplotlib.pyplot as plt import numpy as np fig, grid = plt.subplot_mosaic(np.arange(9).reshape(3,3)) plt.close() for ax in grid.values(): ax.set_axis_off() for ix in grid.keys(): array = arrays[choice(range(len(arrays)))] clipped = np.clip(array, *np.percentile(array, (1, 99))) grid[ix].imshow(clipped, cmap='autumn') fig # + pycharm={"name": "#%%\n"} # destroy the cluster if you are done with it cluster.terminate()
s3_fuse_testing/scaling_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab inline import pandas as pd from mpl_toolkits.axes_grid.inset_locator import inset_axes # + rcParams['axes.spines.right'] = False rcParams['axes.spines.top'] = False def xpercent_scale(): gca().set_xticklabels(['{:.0f}%'.format(x*100) for x in gca().get_xticks()]) # - sample_info = pd.read_csv('middle_sample_info.csv', index_col=0) results = pd.read_csv('middle_final_results.csv', index_col=0) ms_results = pd.read_csv('middle_MS_results.csv', index_col=0) # + df = pd.read_csv('data/rep6/middle_exp_mat.csv', index_col=0) df = df.loc[sample_info.index] df = df.T[df.sum(0) >= 3].T # Filter practically unobserved genes dfm = np.log10(df + 1) # - figsize(6, 4) plt.scatter(sample_info.abs_X, sample_info.abs_Y); results['pval'] = results['pval'].clip_lower(results.query('pval > 0')['pval'].min() / 2) results['qval'] = results['qval'].clip_lower(results.query('qval > 0')['qval'].min() / 2) ms_results['pval'] = ms_results['pval'].clip_lower(ms_results.query('pval > 0')['pval'].min() / 2) ms_results['qval'] = ms_results['qval'].clip_lower(ms_results.query('qval > 0')['qval'].min() / 2) plt.loglog() plt.scatter(results.l, results.pval); plt.gca().invert_yaxis(); plt.xlabel('Lengthscale / Period') plt.ylabel('P-value'); plt.loglog() plt.scatter(results.max_s2_t_hat, results.pval); plt.gca().invert_yaxis(); plt.xlabel('Spatial variance component') plt.ylabel('P-value'); plt.loglog() plt.scatter(results.max_s2_t_hat, results.qval); plt.gca().invert_yaxis(); plt.xlabel('Spatial variance component') plt.ylabel('Q-value'); plt.loglog() plt.scatter(1./results.max_delta, results.qval); plt.gca().invert_yaxis(); plt.xlabel('SNR') plt.ylabel('Q-value'); results.query('qval < 0.05').shape figsize(6, 2) plt.hist(results.query('pval < 1.')['pval'], bins=100, color='w', ec='k'); plt.title('P-Value Histogram - MERFISH') plt.ylabel('# Genes') plt.xlabel('P-value'); # plt.ylim(ymax=400) ms_results.model.value_counts() # + N = 5 top_per_ls = ms_results.query('qval < 0.05') \ .sort_values('qval', ascending=True) \ .groupby(['model', 'l']) \ .head(N) \ .sort_values(['l', 'qval'], ascending=[True, True]) top_per_ls # - top_per_ls.shape # + import NaiveDE dfm = NaiveDE.stabilize(df.T).T res = NaiveDE.regress_out(sample_info, dfm.T, 'np.log(cytoplasmArea)').T # - from sklearn.preprocessing import scale model_colors = {'SE': 'C0', 'PER': 'C1'} # + plt.figure(figsize=(8, 12), dpi=80) N = 5 N_rows = top_per_ls['l'].unique().shape[0] for j, T in enumerate(top_per_ls.groupby(['model', 'l'])): _, group = T for i, idx in enumerate(group.index): r = top_per_ls.loc[idx] ax = plt.subplot(N_rows, N, j * N + i + 1) plt.scatter(sample_info.abs_X, sample_info.abs_Y, c=scale(res[r['g']], with_std=True), s=1, vmin=0, vmax=2, rasterized=True); s = ' *' if r['qval'] < 0.01: s = ' **' if r['qval'] < 0.001: s = ' ***' plt.title(r['g'] + s) plt.xticks([]) plt.yticks([]) plt.hlines(700, -400, -200, lw=2) if -400 + r['l'] < 600: plt.hlines(650, -400, -400 + r['l'], lw=2, color=model_colors[r['model']], zorder=0) else: plt.hlines(650, -400, 380, lw=2, color=model_colors[r['model']], zorder=0) plt.plot([380 - 10, 380 + 10], [620, 680], lw=2, color=model_colors[r['model']], zorder=0) plt.plot([450 - 10, 450 + 10], [620, 680], lw=2, color=model_colors[r['model']], zorder=0) plt.hlines(650, 450, 600, lw=2, color=model_colors[r['model']], zorder=0) for spine in ax.spines.values(): spine.set_edgecolor(model_colors[r['model']]) plt.axis('equal') # Make insert with model probabilities ins_ax = inset_axes(ax, width='20%', height='10%', loc=3) plt.bar((1, 2, 3), top_per_ls.loc[idx][['SE_prob', 'PER_prob', 'linear_prob']], color=['C0', 'C1', 'C2']) plt.ylim(0, 1); plt.xticks([], []) plt.xlim(-0.0, 4) plt.yticks([1], ['']); plt.tight_layout() plt.savefig('top_MF_genes_per_ls.pdf') # - blank_genes = results[results.g.str.startswith('Blank-')]['g'].tolist() len(blank_genes) N = 3 top_per_ls = ms_results.query('g not in @blank_genes and qval < 1e-4') \ .sort_values('qval', ascending=True) \ .groupby(['model', 'l']) \ .head(N) \ .sort_values(['l', 'qval'], ascending=[True, True]) # + plt.figure(figsize=(6, 4), dpi=80) plt.xscale('log') plt.yscale('log') tmp = results.query('qval > 0.05') plt.scatter(1./ tmp['max_delta'], tmp['qval'], alpha=0.9, rasterized=True, label='Genes (Not spatial)', marker='o', color='k'); tmp = ms_results.query('model == "SE"') plt.scatter(1./ tmp['max_delta'], tmp['qval'], alpha=0.5, rasterized=True, label='Genes (Spatial function)', marker='o', color='C0'); tmp = ms_results.query('model == "PER"') tmp = tmp.drop('max_delta', 1).merge(results[['g', 'max_delta']], on='g') plt.scatter(1./ tmp['max_delta'], tmp['qval'], alpha=0.5, rasterized=True, label='Genes (Periodic function)', marker='o', color='C1'); tmp = ms_results.query('model == "linear"') tmp = tmp.drop('max_delta', 1).merge(results[['g', 'max_delta']], on='g') plt.scatter(1./ tmp['max_delta'], tmp['qval'], alpha=0.5, rasterized=True, label='Genes (Linear function)', marker='o', color='C2'); # Annotate negative controls tmp = results.query('g in @blank_genes') x_offsets = {'Blank-3': 3e-1, 'Blank-10': 1.5e0, 'Blank-1': 9e-1} y_offsets = {'Blank-3': 2e-1, 'Blank-5': 1e1, 'Blank-1': 1e-1, 'Blank-4': 1e0, 'Blank-10': 1, 'Blank-7': 1} for i, r in tmp.iterrows(): xy = (1./r['max_delta'], r['qval']) xyt = (xy[0] / x_offsets.get(r['g'], 2), xy[1] / y_offsets.get(r['g'], 3),) plt.annotate(r['g'], xy, xyt, color='tab:red', horizontalalignment='right', arrowprops={'arrowstyle': '-', 'color': 'tab:red'}, size=9) # Annotate top genes x_offsets = {'FASN': 4e-1, 'BSN': 6e-1, 'GTF3C4': 6e-1} y_offsets = {'THBS1': 1e-1, 'CDYL2': 1e-1, 'GTF3C4': 5e0, 'BRCA2': 3e-1} for i, r in top_per_ls.iterrows(): xy = (1./r['max_delta'], r['qval']) xyt = (xy[0] * x_offsets.get(r['g'], 1.2), xy[1] / y_offsets.get(r['g'], 1),) plt.annotate(r['g'], xy, xyt, color='k', arrowprops={'arrowstyle': '-'}, size=9) plt.xlim(4e-3, 3e0) plt.axhline(0.05, ls='--', c='k', lw=1) plt.xlabel('SNR'); plt.ylabel('Adj. P-value'); plt.gca().invert_yaxis() plt.legend(scatterpoints=3, loc='upper left'); plt.savefig('sigma_pval_MF.pdf', bbox_inches='tight'); # - import SpatialDE.plot # + plt.figure(figsize=(6, 4), dpi=80) SpatialDE.plot.FSV_sig(results, ms_results) # - ', '.join(results.g.tolist()) moffit_genes_plus = ['CENPF', 'CKAP5', 'POLQ', 'BUB3'] moffit_genes_minus = ['THBS1', 'FBN2', 'TSPAN3'] # + plt.figure(figsize=(6, 4), dpi=80) SpatialDE.plot.FSV_sig(results, ms_results) size_stats = results.query('g == "log_total_count"') # plt.scatter(size_stats['FSV'], size_stats['qval'], marker='x', c='k', s=50) # Annotate negative controls tmp = results.query('g in @blank_genes') x_offsets = {'Blank-3': 0.1, 'Blank-10': 0.1, 'Blank-1': 0.1, 'Blank-9': 0.03, 'Blank-4': 0.1, 'Blank-8': 0.07, 'Blank-1': 0.13, 'Blank-7': 0.11} y_offsets = {'Blank-3': 2e-1, 'Blank-5': 1e2, 'Blank-1': 4e0, 'Blank-4': 1e0, 'Blank-10': 3e1, 'Blank-7': 1} for i, r in tmp.iterrows(): xy = (r['FSV'], r['pval']) xyt = (xy[0] + x_offsets.get(r['g'], 0.01), xy[1] / y_offsets.get(r['g'], 1e1),) plt.annotate(r['g'], xy, xyt, color='tab:red', horizontalalignment='right', arrowprops={'arrowstyle': '-', 'color': 'tab:red'}, size=9) # Annotate highly dividing genes x_offsets = {'THBS1': -0.08, 'BSN': -0.06, 'GTF3C4': 0.1, 'C17orf51': -0.09, 'GTF3C4': -0.09, 'RBM20': -0.09, 'BRCA2': -0.09, 'BUB3': -0.09} y_offsets = {'THBS1': 1e1, 'CDYL2': 4e-1, 'GTF3C4': 5e0, 'BRCA2': 1e1, 'FASN': 1e-1} tmp = results.query('g in @moffit_genes_plus') for i, r in tmp.iterrows(): xy = (r['FSV'], r['pval']) xyt = (xy[0] + x_offsets.get(r['g'], 0.01), xy[1] / y_offsets.get(r['g'], 1e1),) plt.annotate(r['g'], xy, xyt, color='tab:green', arrowprops={'arrowstyle': '-', 'color': 'tab:green'}, size=9) # Annotate lowly dividing genes x_offsets = {'THBS1': -0.08, 'BSN': -0.06, 'GTF3C4': 0.1, 'C17orf51': -0.09, 'GTF3C4': -0.09, 'RBM20': -0.09, 'BRCA2': -0.09} y_offsets = {'THBS1': 1e1, 'CDYL2': 4e-1, 'GTF3C4': 5e0, 'BRCA2': 1e1, 'FASN': 1e-1} tmp = results.query('g in @moffit_genes_minus') for i, r in tmp.iterrows(): xy = (r['FSV'], r['pval']) xyt = (xy[0] + x_offsets.get(r['g'], 0.01), xy[1] / y_offsets.get(r['g'], 1e1),) plt.annotate(r['g'], xy, xyt, color='tab:blue', arrowprops={'arrowstyle': '-', 'color': 'tab:blue'}, size=9) # Annotate top genes x_offsets = {'THBS1': -0.08, 'BSN': -0.06, 'GTF3C4': 0.1, 'C17orf51': -0.09, 'GTF3C4': -0.09, 'RBM20': -0.09, 'BRCA2': -0.09} y_offsets = {'THBS1': 1e1, 'CDYL2': 4e-1, 'GTF3C4': 5e0, 'BRCA2': 1e1, 'FASN': 1e-1, 'SLC38A1': 5e-1} tmp = results.query('g in @top_per_ls.g and g not in @moffit_genes_minus and g not in @moffit_genes_plus') for i, r in tmp.iterrows(): xy = (r['FSV'], r['pval']) xyt = (xy[0] + x_offsets.get(r['g'], 0.01), xy[1] / y_offsets.get(r['g'], 1e1),) plt.annotate(r['g'], xy, xyt, color='k', arrowprops={'arrowstyle': '-', }, size=9) plt.savefig('FSV_pval_MF.pdf', bbox_inches='tight', dpi=150); # - results.query('qval < 0.05').shape results.query('qval < 0.05').shape[0] / results.shape[0] results.query('qval < 0.05 & g in @blank_genes').shape results.query('qval < 0.05 & g in @blank_genes').shape[0] / results.query('g in @blank_genes').shape[0] fdrs = 10 ** np.linspace(0, -8) total_pct = [] blank_pct = [] for fdr in fdrs: total_pct.append(results.query('qval < @fdr').shape[0] / results.shape[0]) blank_pct.append(results.query('qval < @fdr & g in @blank_genes').shape[0] / results.query('g in @blank_genes').shape[0]) figsize(5, 3) plt.xscale('log') plt.scatter(fdrs, total_pct); plt.scatter(fdrs, blank_pct); results.shape results['bpval'] = results.pval * results.shape[0] fdrs = 10 ** np.linspace(0, -8) total_pct = [] blank_pct = [] for fdr in fdrs: total_pct.append(results.query('bpval < @fdr').shape[0] / results.shape[0]) blank_pct.append(results.query('bpval < @fdr & g in @blank_genes').shape[0] / results.query('g in @blank_genes').shape[0]) figsize(5, 3) plt.xscale('log') plt.scatter(fdrs, total_pct); plt.scatter(fdrs, blank_pct); results['bp_blanks'] = results.pval * 10 results['bp_rna'] = results.pval * (results.shape[0] - 10) fdrs = 10 ** np.linspace(2, -10) total_pct = [] blank_pct = [] for fdr in fdrs: total_pct.append(results.query('bp_rna < @fdr').shape[0] / results.shape[0]) blank_pct.append(results.query('bp_blanks < @fdr & g in @blank_genes').shape[0] / results.query('g in @blank_genes').shape[0]) results.shape[0] - 10 # + figsize(5, 3) plt.xscale('log') plt.scatter(fdrs, total_pct, label='Genes (N=130)'); plt.scatter(fdrs, blank_pct, label='Blank controls (N=10)'); plt.axvline(0.05, lw=1, ls='--', c='k', zorder=0, label='FWER = 0.05'); plt.legend(scatterpoints=3); plt.ylabel('Fraction significant'); plt.xlabel('Family-Wise Error Rate'); plt.savefig('MF-FWER.pdf', bbox_inches='tight'); # - results.query('bp_blanks < 0.05 & g in @blank_genes')
Analysis/MERFISH/MERFISH Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + """ SIMPLE DATA VISUALIZATION WITH PANDAS ON STACKOVERFLOW'S SERVEY DATA """ #first we have to import pandas library import pandas as pd # + #read the csv file as a dataframe using pandas read_csv method #data frames are simply rows and columns of data # assigning which column must be used as column, that is 'Respondent' df = pd.read_csv('survey_results_public.csv', index_col='Respondent') # - #display the dataframe df # + #shape attribute gives the size of the dataframe in (rows, columns) #'this is an attribute not a method so no braces at the end ' df.shape # + #the info() method also gives number of rows and columns but also # data type of each columns df.info() """ 1 MainBranch 88331 non-null object 2 Hobbyist 88883 non-null object 3 OpenSourcer 88883 non-null object # mostly object means string here """ # - # to display all of the columns pd.set_option('display.max_columns', 85) #check here or go back and rerun df df #lets load another csv file that tells what every columns mean in this specific data schema_df = pd.read_csv('survey_results_schema.csv') #displaly it schema_df # + # to display all the rows of the schema_df because it is truncated # set the number to 85 pd.set_option('display.max_rows', 85) # - schema_df # if the dataframe is too large we only display the first 5 rows # df.head() # but any number of rows can be displayed df.head(3) # to display the last rows df.tail() # now lets try to get all the responses for Hobbyist column df['Hobbyist'] # + # lets see from these how many are answered yes and how many no df['Hobbyist'].value_counts() # + # we can use slicing like a list but the last index is included # and no brackets is used on the slicing part # it is inclusive as well df.loc[1:2, 'Hobbyist':'Employment'] # + # lets set an index to dataframe that makes a better for filtering #df.set_index('Respondent').head(3) # because Respondent is used as an id in the servey # but if we knew what the index will be we can assign the index when we first read the file # and if we want to use like this in the procces of manipulation later better to use this one # df = pd.read_csv('survey_results_public.csv', index_col='Respondent') # - schema_df.head(4) # + # lets look the meaning of each Column in the file by making Column an index schema_df.set_index('Column', inplace=True) # - schema_df schema_df.loc['Country'] # + # to see the whole truncated string we can pass both row and colulmn of that specific value schema_df.loc['WelcomeChange', 'QuestionText'] # + # to sort the index in alphabetical order # and make it stay like this for a while schema_df.sort_index(inplace=True) # and to sort it in descending order that is from last to first just pass ascending=False #schema_df.sort_index(ascending=False) # - schema_df # + # just create a filter that checkes the salary of American programmers who includes python in their work countries = ['United States'] filter2 =(df['Country'].isin(countries)) & (df['ConvertedComp'] >= 800000) & (df['LanguageWorkedWith'].str.contains('Python')) df.loc[filter2, ['Country', 'ConvertedComp', 'LanguageWorkedWith']] # + # lets rename some columns df.rename(columns={'ConvertedComp':'Salary in usd'}).head(2) # - df['Hobbyist'] # + # to change all the yes's and no's with yep and nope respectively # if there is another value other than yes and no in the series it will be NaN value # but if we dont want other values in the series not to be touched we use replace() method in place of map() df['Hobbyist'].map({'Yes': 'Yep','No': 'Nope'}).head() # -
Pandas 1 .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["preface-cell", "preface-title"] # <title>Learn Quantum Computation using Qiskit</title> # <div class="preface-top"> # <div class="preface-checker-pattern"></div> # <div class="preface-summary"> # <aside class="preface-summary-image"><img src="images/preface_illustration_2.svg"></aside> # <div class="preface-summary-text"> # <p> # Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn:</p> # <ol> # <li>The mathematics behind quantum algorithms</li> # <li>Details about today's non-fault-tolerant quantum devices</li> # <li>Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems</li> # </ol> # </div> # <a href="https://qiskit.org/textbook/ch-states/introduction.html"><button class="preface-button read-textbook">Read the textbook <span class="rangle"><img src="/textbook/assets/images/rightarrow.svg"></span></button></a> # </div> # + [markdown] tags=["preface-cell"] # # About the Textbook # # <p>This is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK.</p> # # ## Run the Code Inline # # <p>This textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the <a href="https://quantum-computing.ibm.com/jupyter">IBM Quantum Experience</a>, no installs required!</p> # + tags=["preface-code-cell"] # Click 'try', then 'run' to see the output, # you can change the code and run it again. print("This code works!") from qiskit import QuantumCircuit qc = QuantumCircuit(2) # Create circuit with 2 qubits qc.h(0) # Do H-gate on q0 qc.cx(0,1) # Do CNOT on q1 controlled by q0 qc.measure_all() qc.draw() # + [markdown] tags=["preface-cell"] # <a href="https://qiskit.org/textbook/widgets-index.html"><button class="preface-button">Interactivity Tour<span class="rangle"><img src="/textbook/assets/images/rightarrow.svg"></span></button></a> # # ## Learn with Real Quantum Systems # # <p>The best way to learn is by doing. Qiskit allows users to run experiments on state-of-the-art quantum devices from the comfort of their homes. The textbook teaches not only theoretical quantum computing, but the experimental quantum physics that realises it.</p> # # <img src="images/preface-hw-example.png" class="preface-image"> # # <a href="https://qiskit.org/textbook/ch-quantum-hardware/accessing_higher_energy_states.html"><button class="preface-button">See Example: Accessing Higher Level States<span class="rangle"><img src="/textbook/assets/images/rightarrow.svg"></span></button></a> # # # Using the Textbook # # <p>If you're reading the textbook independently, you don't have to read it all in order, but we recommend you read chapters 1-3 first.</p> # # ## Curriculum Integration # # <p>The textbook can be followed as an independent course, however it has been designed to accompany a traditional university course. The textbook shows students how to use Qiskit to experiment with quantum algorithms and hardware, and uses this to reinforce their understanding. # </p> # # <img src="images/curriculum.svg" class="preface-image"> # # ## Use the Textbook in Your Course # # If you are using the Qiskit Textbook in your course, you can join the IBM Quantum Educators Program. The Program provides: # # <ul class="preface-list"> # <li> The ability to reserve time for priority access to our open systems for in-class demonstrations </li> # <li> Access to additional premium systems beyond our open systems</li> # <li> Access to a 5-qubit system with full microwave control using Qiskit Pulse</li> # </ul> # # <a href="https://quantum-computing.ibm.com/programs/educators"><button class="preface-button">Sign Up for the IBM Quantum Educators Program<span class="rangle"><img src="/textbook/assets/images/rightarrow.svg"></span></button></a> # # # Contact # # <p> If you have any questions or suggestions about the textbook or would like to incorporate it into your curriculum, please contact <NAME> <a href="mailto:<EMAIL>">(<EMAIL>)</a>. In the true spirit of open-source, any chapter contributions are welcome in this GitHub repository.</p> # # # Contributors # # <p> Learn Quantum Computation using Qiskit is the work of several individuals. If you use it in your work, cite it using <a href="https://github.com/qiskit-community/qiskit-textbook/blob/master/content/qiskit-textbook.bib">this bib file</a> or directly as:</p> # <p><i> # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.</i></p> # -
content/preface.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem definition # Being $D$ the number of days and $S$ the list of sellers, each seller with the properties $price \in \mathbb{N}$ and $when \in \mathbb{N}$, the list $days$ have to each day $d\in \{d\in \mathbb{N} : d\leq D\}$ the seller $s \in S$ such that $s.price$ is minimum and $s.when \in (d - 30, d]$. # # The list $days$ provides an easy interpretation of the purchasing plan, and it will have the following format: # # `days = [[when_1, price_1],[when_2, price_2], ... , [when_D, price_D]]` # # The first 10 loaves have zero price and do not need to be bought from any seller and to solve the problem we should count the number of times we bought from each seller. The intuition behind this reasoning is very simple: we'll have to buy 1 loaf of bread every day, and we can "buy" from any seller that visited us in the last 30 days. # # # Complexity # The time complexity [1] of this algorithm is $O(|S| + |S|*D)$ because it first builts the $days$ list iterating through the list $S$ and then iterates it again using the count method, which has $O(n)$ complexity [2]. # That is, quadratic time $O(n^2)$ # # The spatial complexity is $O(D)$, because the list $days$ have $d$ element. That is, linear storage $O(n)$. # # # References # [1] - <NAME>, <NAME>, <NAME>, and <NAME>. 2009. Introduction to Algorithms, Third Edition (3rd ed.). The MIT Press. # [2] - <NAME>. Stack Overflow answer. Available in: https://stackoverflow.com/a/44813154/12555523 def calculate_purchasing_plan(total_days, sellers): days = [[None,0]]*10 + [[None,float("inf")]]*(total_days-10) expiracy = 30 for seller in sellers: days[seller[0]:seller[0]+expiracy] = list( map(lambda x: seller if seller[1]<x[1] else x, days[seller[0]:seller[0]+expiracy])) #print('\n'.join('{}: {}'.format(*k) for k in enumerate(days))) if [None,float("inf")] in days: return None return [days.count(seller) for seller in sellers] print(calculate_purchasing_plan(60, [(10,200), (15,100), (35,500), (50,30)])) #Expected answer: [5, 30, 5, 10] print(calculate_purchasing_plan(600, [(10,200), (15,100), (35,500), (50,30)])) #Expected answer: none (no possible solution) print(calculate_purchasing_plan(61, [(0,10),(30,200)])) #Expected answer: none (no possible solution) print(calculate_purchasing_plan(60, [(0,10),(30,-10),(3,10)])) #Expected answer: [20, 30, 0] print(calculate_purchasing_plan(30, [(-10,10),(20,-10)])) #Expected answer: none (because seller.when should be a natural number)
solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="ZZc-hhpItgMH" import numpy as np import commpy.modulation as commpy_modulation from ModulationPy import PSKModem, QAMModem # + colab={} colab_type="code" id="y77TcvujtgML" msg = np.random.randint(0, 2, int(1e4)) # + [markdown] colab_type="text" id="FjrlnawKtgMO" # ## ModulationPy # + colab={} colab_type="code" id="SQ0OoxBgtgMP" modem = PSKModem(4) # our class initialization m = modem.modulate(msg) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="XC4av2DXtgMT" outputId="3b6ecef5-fb6e-4ef6-d8c0-cfe117ab4477" # %%timeit m = modem.modulate(msg) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="3ukIC8DStgMW" outputId="079e7758-52be-4bc6-95d1-fe246c880a18" # %%timeit modem.demodulate(m) # demodulation # demodulation # - modem = QAMModem(256) # our class initialization m = modem.modulate(msg) # %%timeit m = modem.modulate(msg) # %%timeit modem.demodulate(m) # demodulation # demodulation # + [markdown] colab_type="text" id="zLQNPPjatgMZ" # ## CommPy # + colab={} colab_type="code" id="qxnSclywtgMZ" modem = commpy_modulation.PSKModem(4) m = modem.modulate(msg) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="wtTOnlX5tgMd" outputId="c7d610c6-eaa8-4738-a00f-c6faeffd82c0" # %%timeit m = modem.modulate(msg) # + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="NfhCU81EtgMi" outputId="0b970e72-9e62-445a-ec7c-1ab2a3aa7187" # %%timeit modem.demodulate(m, demod_type='soft') # - modem = commpy_modulation.QAMModem(256) m = modem.modulate(msg) # %%timeit m = modem.modulate(msg) # %%timeit modem.demodulate(m, demod_type='soft')
docs/CommPy_vs_ModulationPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # code for loading the format for the notebook import os # path : store the current path to convert back to it later path = os.getcwd() os.chdir(os.path.join('..', 'notebook_format')) from formats import load_style load_style(css_style = 'custom2.css') # + os.chdir(path) # 1. magic for inline plot # 2. magic to print version # 3. magic so that the notebook will reload external python modules # 4. a ipython magic to enable retina (high resolution) plots # https://gist.github.com/minrk/3301035 # %matplotlib inline # %load_ext watermark # %load_ext autoreload # %autoreload 2 # %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt from graphviz import Source from sklearn.datasets import load_iris from sklearn.metrics import accuracy_score from sklearn.tree import export_graphviz from sklearn.tree import DecisionTreeClassifier # %watermark -a 'Ethen' -d -t -v -p numpy,pandas,matplotlib,sklearn # - # # Decision Tree (Classification) # # Lets first create a scenario: say I want to buy a house and wish to borrow a loan from the bank. Now before giving me the loan, the bank is going to look at my history record like my credit, what has it been like in the past? How much money do I make? (and maybe some other information) and use them to determine whether loaning me money is a risky thing or not (whether I'm going to default). So how can the bank do this with decision trees? # # Decision trees are formed by a collection of rules (**if-then** statements that partition the data) based on variables available from the data set. So in the example above, a very simple decision tree model could look like this: # # ``` # if Credit = excellent then # Outcome = Yes # else if Credit = poor then # if Income = high then # Outcome = Yes # else if Income = low then # Outcome = No # ``` # # The algorithm works by starting at the top of the tree (the root node), then it will traverse down the branches of this decision tree and ask a series of questions. In the end it will reach the bottom of the tree (the leaf node) that contains the final outcome. For example, if somebody has a credit that's poor and his/her income is high, then the bank will say a Yes, we will give him/her the loan. # # Our task now is to learn how to generate the tree to create these decision rules for us. Thankfully, the core method for learning a decision tree can be viewed as a recursive algorithm. A decision tree can be "learned" by splitting the dataset into subsets based on the input features/attributes' value. This process is repeated on each derived subset in a recursive manner called recursive partitioning: # # 1. Start at the tree's root node # 2. Select the best rule/feature that splits the data into two subsets (child node) for the current node # 3. Repeated step 2 on each of the derived subset until the tree can't be further splitted. As we'll later see, we can set restrictions to decide when the tree should stop growing. # # There are a few additional details that we need to make more concrete. Including how to pick the rule/feature to split on and because it is a recursive algorithm, we have to figure out when to stop the recursion, in other words, when to not go and split another node in the tree. # ## Splitting criteria for classification trees # # The first question is what is the best rule/feature to split on and how do we measure that? One way to determine this is by choosing the one that maximizes the **Information Gain (IG)** at each split. # # $$IG(D_{p}, a) = I(D_{p}) - p_{left} I(D_{left}) - p_{right} I(D_{right})$$ # # - $IG$: Information Gain # - $a$: feature to perform the split # - $I$: Some impurity measure that we'll look at in the subsequent section # - $D_{p}$: training subset of the parent node # - $D_{left}$, $D_{right}$ :training subset of the left/right child node # - $p_{left}$, $p_{right}$: proportion of parent node samples that ended up in the left/right child node after the split. $\frac{N_{left}}{N_p}$ or $\frac{N_{right}}{N_p}$. Where: # - $N_p$: number of samples in the parent node # - $N_{left}$: number of samples in the left child node # - $N_{right}$: number of samples in the right child node # ## Impurity # # The two most common impurity measure are entropy and gini index. # # ### Entropy # # Entropy is defined as: # # $$I_E(t) = - \sum_{i =1}^{C} p(i \mid t) \;log_2 \,p(i \mid t)$$ # # for all non-empty classes, $p(i \mid t) \neq 0$, where: # # - $p(i \mid t)$ is the proportion (or frequency or probability) of the samples that belong to class $i$ for a particular node $t$ # - $C$ is the number of unique class labels # # The entropy is therefore 0 if all samples at a node belong to the same class, and the entropy is maximal if we have an uniform class distribution. For example, in a binary class setting, the entropy is 0 if $p(i =1 \mid t) =1$ or $p(i =0 \mid t) =1$. And if the classes are distributed uniformly with $p(i =1 \mid t) = 0.5$ and $p(i =0 \mid t) =0.5$ the entropy is 1, which we can visualize by plotting the entropy for binary class setting below. # + def entropy(p): return - p * np.log2(p) - (1 - p) * np.log2(1 - p) # change default figure and font size plt.rcParams['figure.figsize'] = 8, 6 plt.rcParams['font.size'] = 12 x = np.arange(0.0, 1.0, 0.01) ent = [entropy(p) if p != 0 else None for p in x] plt.plot(x, ent) plt.axhline(y = 1.0, linewidth = 1, color = 'k', linestyle = '--') plt.ylim([ 0, 1.1 ]) plt.xlabel('p(i=1)') plt.ylabel('Entropy') plt.show() # - # ### Gini Index # # Gini Index is defined as: # # \begin{align*} # I_G(t) &= \sum_{i =1}^{C} p(i \mid t) \big(1-p(i \mid t)\big) \nonumber \\ # &= \sum_{i =1}^{C} p(i \mid t) - p(i \mid t)^2 \nonumber \\ # &= \sum_{i =1}^{C} p(i \mid t) - \sum_{i =1}^{C} p(i \mid t)^2 \nonumber \\ # &= 1 - \sum_{i =1}^{C} p(i \mid t)^2 # \end{align*} # # Compared to Entropy, the maximum value of the Gini index is 0.5, which occurs when the classes are perfectly balanced in a node. On the other hand, the minimum value of the Gini index is 0 and occurs when there is only one class represented in a node (A node with a lower Gini index is said to be more "pure"). # # This time we plot Entropy and Gini index together to compare them against each other. # + def gini(p): return p * (1 - p) + (1 - p) * ( 1 - (1 - p) ) gi = gini(x) # plot for i, lab in zip([ent, gi], ['Entropy', 'Gini Index']): plt.plot(x, i, label = lab) plt.legend(loc = 'upper center', bbox_to_anchor = (0.5, 1.15), ncol = 3, fancybox = True, shadow = False) plt.axhline(y = 0.5, linewidth = 1, color = 'k', linestyle = '--') plt.axhline(y = 1.0, linewidth = 1, color = 'k', linestyle = '--') plt.ylim([ 0, 1.1 ]) plt.xlabel('p(i=1)') plt.ylabel('Impurity') plt.tight_layout() plt.show() # - # As we can see from the plot, there is not much differences (as in they both increase and decrease at similar range). In practice, Gini Index and Entropy typically yield very similar results and it is often not worth spending much time on evaluating decision tree models using different impurity criteria. As for which one to use, maybe consider Gini Index, because this way, we don’t need to compute the log, which can make it a bit computationly faster. # # Decision trees can also be used on regression task. It's just instead of using gini index or entropy as the impurity function, we use criteria such as MSE (mean square error): # # $$I_{MSE}(t) = \frac{1}{N_t} \sum_i^{N_t}(y_i - \bar{y})^2$$ # # Where $\bar{y}$ is the averages of the response at node $t$, and $N_t$ is the number of observations that reached node $t$. This is simply saying, we compute the differences between all $N_t$ observation's reponse to the average response, square it and take the average. # ## Concrete Example # # Here we'll calculate the Entropy score by hand to hopefully make things a bit more concrete. Using the bank loan example again, suppose at a particular node, there are 80 observations, of whom 40 were classified as Yes (the bank will issue the loan) and 40 were classified as No. # # We can first calculate the Entropy before making a split: # # $$I_E(D_{p}) = - \left( \frac{40}{80} log_2(\frac{40}{80}) + \frac{40}{80} log_2(\frac{40}{80}) \right) = 1$$ # # Suppose we try splitting on Income and the child nodes turn out to be. # # - Left (Income = high): 30 Yes and 10 No # - Right (Income = low): 10 Yes and 30 No # # $$I_E(D_{left}) = - \left( \frac{30}{40} log_2(\frac{30}{40}) + \frac{10}{40} log_2(\frac{10}{40}) \right) = 0.81$$ # $$I_E(D_{right}) = - \left( \frac{10}{40} log_2(\frac{10}{40}) + \frac{30}{40} log_2(\frac{30}{40}) \right) = 0.81$$ # $$IG(D_{p}, Income) = 1 - \frac{40}{80} (0.81) - \frac{40}{80} (0.81) = 0.19$$ # # Next we repeat the same process and evaluate the split based on splitting by Credit. # # - Left (Credit = excellent): 20 Yes and 0 No # - Right (Credit = poot): 20 Yes and 40 No # # $$I_E(D_{left}) = - \left( \frac{20}{20} log_2(\frac{20}{20}) + \frac{0}{20} log_2(\frac{0}{20}) \right) = 0$$ # $$I_E(D_{right}) = - \left( \frac{20}{60} log_2(\frac{20}{60}) + \frac{40}{60} log_2(\frac{40}{60}) \right) = 0.92$$ # $$IG(D_{p}, Credit) = 1 - \frac{20}{80} (0) - \frac{60}{80} (0.92) = 0.31$$ # # In this case, it will choose Credit as the feature to split upon. # # If we were to have more features, the decision tree algorithm will simply try every possible split, and will choose the split that maximizes the information gain. If the feature is a continuous variable, then we can simply get the unique values of that feature in a sorted order, then try all possible split values (threshold) by using cutoff point (average) between every two values (e.g. a unique value of 1, 2, 3 will result in trying the split on the value 1.5 and 2.5). Or to speed up computations, we can bin the unqiue values into buckets, and split on the buckets. # ## When To Stop Recursing # # The other question that we need to address is when to stop the tree from growing. There are some early stopping criteria that is commonly used to prevent the tree from overfitting. # # - **Maximum depth** The length of the longest path from a root node to a leaf node will not exceed this value. This is the most commonly tuned hyperparameter for tree-based method # - **Minimum sample split:** The minimum number of samples required to split a node should be greater than this number # - **Minimum information gain** The minimum information gain required for splitting on the best feature # # And that's pretty much it for classification trees! For a more visual appealing explanation, the following link this a website that uses interactive visualization to demonstrate how decision trees work. [A Visual Introduction to Machine Learning](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/) # # ## Implementation # # With all of that in mind, the following section implements a toy classification tree algorithm. class Tree: """ Classification tree using information gain with entropy as impurity Parameters ---------- max_features : int or None, default None The number of features to consider when looking for the best split, None uses all features min_samples_split : int, default 10 The minimum number of samples required to split an internal node max_depth : int, default 3 Maximum depth of the tree minimum_gain : float, default 1e-7 Minimum information gain required for splitting """ def __init__(self, max_depth = 3, max_features = None, minimum_gain = 1e-7, min_samples_split = 10): self.max_depth = max_depth self.max_features = max_features self.minimum_gain = minimum_gain self.min_samples_split = min_samples_split def fit(self, X, y): """pass in the 2d-array dataset and the response column""" self.n_class = np.unique(y).shape[0] # in the case you're wondering why we have this implementation of # choosing the number of features to consider when looking # for the best split, it will become much clearer when we # start discussing Random Forest algorithm if self.max_features is None or self.max_features > X.shape[1]: self.max_features = X.shape[1] self.feature_importance = np.zeros(X.shape[1]) self.tree = _create_decision_tree(X, y, self.max_depth, self.minimum_gain, self.max_features, self.min_samples_split, self.n_class, self.feature_importance, X.shape[0]) self.feature_importance /= np.sum(self.feature_importance) return self def predict(self, X): proba = self.predict_proba(X) pred = np.argmax(proba, axis = 1) return pred def predict_proba(self, X): proba = np.empty((X.shape[0], self.n_class)) for i in range(X.shape[0]): proba[i] = self._predict_row(X[i, :], self.tree) return proba def _predict_row(self, row, tree): """Predict single row""" if tree['is_leaf']: return tree['prob'] else: if row[tree['split_col']] <= tree['threshold']: return self._predict_row(row, tree['left']) else: return self._predict_row(row, tree['right']) # + def _create_decision_tree(X, y, max_depth, minimum_gain, max_features, min_samples_split, n_class, feature_importance, n_row): """recursively grow the decision tree until it reaches the stopping criteria""" try: assert max_depth > 0 assert X.shape[0] > min_samples_split column, value, gain = _find_best_split(X, y, max_features) assert gain > minimum_gain feature_importance[column] += (X.shape[0] / n_row) * gain # split the dataset and grow left and right child left_X, right_X, left_y, right_y = _split(X, y, column, value) left_child = _create_decision_tree(left_X, left_y, max_depth - 1, minimum_gain, max_features, min_samples_split, n_class, feature_importance, n_row) right_child = _create_decision_tree(right_X, right_y, max_depth - 1, minimum_gain, max_features, min_samples_split, n_class, feature_importance, n_row) except AssertionError: # if criteria reached, compute the classification # probability and return it as a leaf node # note that some leaf node may only contain partial classes, # thus specify the minlength so class that don't appear will # still get assign a probability of 0 counts = np.bincount(y, minlength = n_class) prob = counts / y.shape[0] leaf = {'is_leaf': True, 'prob': prob} return leaf node = {'is_leaf': False, 'left': left_child, 'right': right_child, 'split_col': column, 'threshold': value} return node def _find_best_split(X, y, max_features): """Greedy algorithm to find the best feature and value for a split""" subset = np.random.choice(X.shape[1], max_features, replace = False) max_col, max_val, max_gain = None, None, None parent_entropy = _compute_entropy(y) for column in subset: split_values = _find_splits(X, column) for value in split_values: splits = _split(X, y, column, value, return_X = False) gain = parent_entropy - _compute_splits_entropy(y, splits) if max_gain is None or gain > max_gain: max_col, max_val, max_gain = column, value, gain return max_col, max_val, max_gain def _compute_entropy(split): """entropy score using a fix log base 2""" _, counts = np.unique(split, return_counts = True) p = counts / split.shape[0] entropy = -np.sum(p * np.log2(p)) return entropy def _find_splits(X, column): """ find all possible split values (threshold), by getting unique values in a sorted order and finding cutoff point (average) between every two values """ X_unique = np.unique(X[:, column]) split_values = np.empty(X_unique.shape[0] - 1) for i in range(1, X_unique.shape[0]): average = (X_unique[i - 1] + X_unique[i]) / 2 split_values[i - 1] = average return split_values def _compute_splits_entropy(y, splits): """compute the entropy for the splits (the two child nodes)""" splits_entropy = 0 for split in splits: splits_entropy += (split.shape[0] / y.shape[0]) * _compute_entropy(split) return splits_entropy def _split(X, y, column, value, return_X = True): """split the response column using the cutoff threshold""" left_mask = X[:, column] <= value right_mask = X[:, column] > value left_y, right_y = y[left_mask], y[right_mask] if not return_X: return left_y, right_y else: left_X, right_X = X[left_mask], X[right_mask] return left_X, right_X, left_y, right_y # - # We will load the [Iris dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_iris.html), and use it as a sample dataset to test our algorithm. This data sets consists of 3 different types of irises’ (Setosa, Versicolour, and Virginica). It is stored as a 150x4 numpy.ndarray, where the rows are the samples and the columns being Sepal Length, Sepal Width, Petal Length and Petal Width. # + # load a sample dataset iris = load_iris() X = iris.data y = iris.target # train model and print the accuracy score tree = Tree() tree.fit(X, y) y_pred = tree.predict(X) print('classification distribution: ', np.bincount(y_pred)) print('accuracy score: ', accuracy_score(y, y_pred)) # - # use library to confirm results are comparable clf = DecisionTreeClassifier(criterion = 'entropy', min_samples_split = 10, max_depth = 3) clf.fit(X, y) y_pred = clf.predict(X) print('classification distribution: ', np.bincount(y_pred)) print('accuracy score: ', accuracy_score(y, y_pred)) # **Advantages of decision trees:** # # # - Features don't require scaling or normalization # - Great at dealing with data that have lots of categorical features # - Can be displayed graphically, thus making it highly interpretable (in the next code chunk) # - It is non-parametric, thus it will outperform linear models if relationship between features and response is highly non-linear # # For visualizing the decision tree, you might need to have graphviz installed. For the mac user, try doing `brew install graphviz` or follow the instructions in this [link](http://macappstore.org/graphviz-2/). # + # visualize the decision tree # export it as .dot file, other common parameters include # `rounded` (boolean to round the score on each node) export_graphviz(clf, feature_names = iris.feature_names, filled = True, class_names = iris.target_names, out_file = 'tree.dot') # read it in and visualize it, or if we wish to # convert the .dot file into other formats, we can do: # import os # os.system('dot -Tpng tree.dot -o tree.jpeg') with open('tree.dot') as f: dot_graph = f.read() Source(dot_graph) # - # # Reference # # - [scikit-learn Documentation: Decision Trees](http://scikit-learn.org/stable/modules/tree.html) # - [Blog: The advantages of the different impurity metrics](http://sebastianraschka.com/faq/docs/decision-tree-binary.html) # - [Notebook: Cheatsheet for Decision Tree Classification](http://nbviewer.jupyter.org/github/rasbt/pattern_classification/blob/master/machine_learning/decision_trees/decision-tree-cheatsheet.ipynb) # - [Github: Minimal and clean examples of machine learning algorithms](https://github.com/rushter/MLAlgorithms/blob/master/mla/ensemble/tree.py)
trees/decision_tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Exploring different Legend options in Magics # # This notebook will help you discover lots of posibilities for customizing legend on your maps in Magics. # + [markdown] slideshow={"slide_type": "slide"} # ### Install Magics # If you don't have Magics installed, run the next cell to install Magics using conda. # - # Install Magics in the current Jupyter kernel import sys # !conda install --yes --prefix {sys.prefix} Magics # + [markdown] slideshow={"slide_type": "subslide"} # **Mlegend** controls how legend looks on our maps. Here we can set things like display type, position, width, height, title and many more things. # List of all **mlegend** parameters you can find [in Magics documentation](https://confluence.ecmwf.int/display/MAGP/Legend "Legend parameters") # # One **mlegend** setting controlls legends of **ALL** the fields on a map. # # + [markdown] slideshow={"slide_type": "subslide"} # ### Import Magics and define non Lengend paramters # # For start let's import Magics and define some **none legend** parameters. We will try not to change these much in the rest of the notebook. # + slideshow={"slide_type": "slide"} import Magics.macro as magics projection = magics.mmap( subpage_map_library_area = "on", subpage_map_area_name = "europe", page_id_line = "off" ) coast = magics.mcoast() ecmwf_cont = magics.mcont( contour_automatic_setting = "ecmwf", legend = "on") # Different meteorological parameters we will plot in this notebook temperature = magics.mgrib(grib_input_file_name = "../../data/t850.grib") geopotential = magics.mgrib(grib_input_file_name = "../../data/z500.grib") precipitation = magics.mgrib(grib_input_file_name = "../../data/total_precipitation.grib") sat_ir = magics.mgrib(grib_input_file_name = "../../data/ssd.grib") # + [markdown] slideshow={"slide_type": "fragment"} # ### Default legend # # As with everything in Magics, default is something you can start with. # # Default display type is disjoint. Default text colour is blue, and position is horizontal on top. All of this can be changed. # + slideshow={"slide_type": "slide"} legend = magics.mlegend() magics.plot(projection, temperature, ecmwf_cont, coast, legend) # + [markdown] slideshow={"slide_type": "slide"} # ### Vertical legend with user defined title # # Legend default position is at the top, but another automatic position is on the right side. # Display type can be disjoint, histogram and continuous. # # If we have many values shaded, we can choose to plot only some of them by setting **legend_label_frequency**. # # Let's customize the title too. We can choose text font, size, colour, style etc. # + legend = magics.mlegend( legend_display_type = "continuous", legend_automatic_position = "right", legend_title = "on", legend_title_text = "Temperature at 850 hPa", legend_text_font_size = "0.45", legend_text_colour = "#2b619e", legend_label_frequency = 2) magics.plot(projection, temperature, ecmwf_cont, coast, legend) # - # ### Positional disjoint legend with user defined labels # # Lables on the legend can be numbers as well as user defined text (and both). # If we have too much text we might want to separate legend in columns, instead putting all in one row. # # Let's make highly customized positional, disjoint legend, with title and text labels. # + slideshow={"slide_type": "subslide"} t850_contour = magics.mcont( legend = "on", contour_shade = "on", contour_hilo = "off", contour = "off", contour_label = "off", contour_shade_method = "area_fill", contour_shade_max_level = 48., contour_shade_min_level = -48., contour_level_selection_type = "level_list", contour_level_list = [-48.0,-10.0,0.0,10.0,20.0,30.0, 48.0], contour_shade_colour_method = "list", contour_shade_colour_list = [ "blue_purple", "greenish_blue", "blue_green", "yellow_green", "yellow", "orange"]) legend = magics.mlegend( legend_box_mode = "positional", legend_box_x_position = 1.00, legend_box_y_position = 17.00, legend_box_x_length = 20.00, legend_box_y_length = 2.00, legend_column_count = 3, legend_display_type = "disjoint", legend_text_composition = "user_text_only", legend_title = "on", legend_user_lines = ["extremely cold", "very cold", "cold", "temperate", "hot", "very hot"], legend_text_font_size = "0.5", legend_text_colour = "#2b619e", legend_title_text = "Temperature at 850 hPa") magics.plot(projection, temperature, t850_contour, coast, legend) # - # ### Positional and histogram legend # # Legend can be in a form of histogram too. # + legend = magics.mlegend( legend_display_type = "histogram", legend_box_mode = "positional", legend_box_x_position = 1.00, legend_box_y_position = 17.00, legend_box_x_length = 18.00, legend_box_y_length = 3.00, legend_title = "on", legend_title_text = "Total precipitation", legend_text_font_size = "0.45", legend_text_colour = "#2b619e") magics.plot(projection, precipitation, ecmwf_cont, coast, legend) # - # ### User defined minimum and maximum # # If we don't want to show the actual minimum and maximum of the field, or list of levels, we can write something special only on first and last point at legend using **mlegend**'s maximum and minimum parameters. # # Also, for cases when we have many many levels we can remove the lines between colours on legend. # + legend = magics.mlegend( legend_display_type = "continuous", legend_automatic_position = "right", legend_title = "on", legend_title_text = "Brightness temperature", legend_text_font_size = "0.45", legend_text_colour = "#2b619e", legend_label_frequency = 4, legend_user_minimum = "on", legend_user_minimum_text = "< -100", legend_user_maximum = "on", legend_user_maximum_text = "> 50", legend_entry_border = "off") magics.plot(projection, sat_ir, ecmwf_cont, coast, legend) # - # ### Gradients' waypoints labels on Legend # # In cases we are using gradients as shading colour method, and have a large number of colours, we may want to label only gradients' waypoints. The way to do it is by supplying list of values for labels. # # A little tip for the case we don't want any lines on legend. Setting **legend_entry_border** to "off" will remove lines between the colours, but the line around the legend is still there as we can see in previous example. The trick to get rid of that line too is to set **legend_entry_border** to "on" and **legend_entry_border_colour** to "none". # # + t_cont = magics.mcont( legend = "on", contour = "off", contour_level_selection_type = "level_list", contour_level_list = [-30.,-20.,0.,20.,30.], contour_gradients_step_list = [5,10,10,5], contour_label = "off", contour_shade = "on", contour_shade_colour_method = "gradients", contour_gradients_technique = "rgb", contour_shade_method = "area_fill", contour_gradients_colour_list = ["RGB(0.01961,0.251,0.4157)","greenish_blue","white", "orangish_red","RGB(0.3756,0.06648,0.05582)"], contour_gradients_waypoint_method = "ignore") legend = magics.mlegend( legend_display_type = "continuous", legend_box_mode = "automatic", legend_text_composition = "user_text_only", legend_values_list = [-30.,-20.,0.,20.,30.], legend_text_font_size = "0.45", legend_text_colour = "#2b619e", legend_entry_border = "on", legend_entry_border_colour = "none") magics.plot(projection, temperature, t_cont, coast, legend)
visualisation/tutorials/Legend.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Assignment - Find All Nodes Distance K in Binary Tree # Time: O(n) -> Two-pass approach # Space: O(n) from collections import deque class Node: def __init__(self, val): self.val = val self.left = self.right = None def print_k_leaf(root, k): def dfs(root, parent = None): if not root: return root root.parent = parent if not root.left and not root.right: queue.append(root) dfs(root.left, root) dfs(root.right, root) queue = [] dfs(root) res = [] seen = set() for node in queue: K = k while K and node: node = node.parent K -= 1 if not K and node and node not in seen: res.append(node.val) seen.add(node) return res if __name__=='__main__': root = Node(3) root.left = Node(8) root.right = Node(9) root.left.left = Node(11) root.left.right = Node(7) root.left.right.left = Node(6) root.left.right.right = Node(12) root.right.left = Node(8) root.right.right = Node(3) print(print_k_leaf(root, 2)) # + # Time: O(n) -> One-pass approach # Space: O(n) class Node: def __init__(self, val): self.val = val self.left = self.right = None def print_k_leaf(node, k, res): def dfs(root, i = -1, arr = [], seen = []): if not root: return root arr.append(root.val) seen.append(False) i += 1 if not root.left and not root.right and i >= k: if not seen[i - k]: res.append(arr[i - k]) seen[i - k] = True dfs(root.left, i, arr, seen) dfs(root.right, i, arr, seen) arr.pop() seen.pop() dfs(root) if __name__=='__main__': root = Node(3) root.left = Node(8) root.right = Node(9) root.left.left = Node(11) root.left.right = Node(7) root.left.right.left = Node(6) root.left.right.right = Node(12) root.right.left = Node(8) root.right.right = Node(3) res = [] print_k_leaf(root, 2, res) print(res) # -
assignments/binary_tree/Print All Nodes K Distance from Leaf Node in Binary Tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Reading TREx Spectrograph raw image data # # Below we'll retrieve TREx Spectrograph data, read it, and do a bit of processing. An internet connection is required since we'll need to download data. # ## Retrieve a minute of data import requests # 2020-03-21 06:30 UTC in Rabbit Lake, Manitoba, was a particularly good night url = "https://data.phys.ucalgary.ca/sort_by_project/TREx/spectrograph/stream0/2020/03/03/rabb_spect-01/ut06/20200303_0630_rabb_spect-01_spectra.pgm.gz" r = requests.get(url) # save data to a local file filename = "20200303_0630_rabb_spect-01_spectra.pgm.gz" with open(filename, 'wb') as f: f.write(r.content) # ## Read the data file import trex_imager_readfile img, meta, problematic_files = trex_imager_readfile.read_spectrograph(filename) # the imager takes a picture every 15 seconds, so a 1-minute file usually contains 4 images print("Image dimensions: %d x %d" % (img.shape[0], img.shape[1])) print("Number of images: %d" % (img.shape[2])) # data for the first image img[:,:,0] # metadata for first image meta[0] # ## Visualize the image # %matplotlib inline from matplotlib import pyplot as plt plt.imshow(img[:,:,0], cmap="gray") plt.axis("off") plt.show() # # Let's scale the image so it's a bit brighter # Pulled from SciPy. This function was depreated in # scipy v1.2.0, but it's quite useful for us. import numpy as np def bytescale(data, cmin=None, cmax=None, high=65535, low=0): if high > 65535: raise ValueError("`high` should be less than or equal to 65535.") if low < 0: raise ValueError("`low` should be greater than or equal to 0.") if high < low: raise ValueError("`high` should be greater than or equal to `low`.") if cmin is None: cmin = data.min() if cmax is None: cmax = data.max() cscale = cmax - cmin if cscale < 0: raise ValueError("`cmax` should be larger than `cmin`.") elif cscale == 0: cscale = 1 scale = float(high - low) / cscale bytedata = (data - cmin) * scale + low return (bytedata.clip(low, high) + 0.5).astype(np.uint16) # scale the image with 200 as the floor and 2000 as the ceiling, then display im_scaled = bytescale(img[:,:,0], cmin=100, cmax=500) plt.imshow(im_scaled, cmap="gray") plt.axis("off") plt.show() # The x-axis is the meridional axis of the image we collect (which really is a magnetic North/South slice of the sky), and the y-axis is the wavelength. Each visible horizontal line you see in the image is a different wavelength. From top to bottom in the above image, the wavelength is ordered with red (630.0nm) towards the top and blue near the bottom (427.8nm) # ## Cleanup downloaded file # remove the downloaded files import os os.remove(filename)
python/examples/read_spectrograph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch.utils.data import torch from trochvision import transforms # torch.util.data: 子类化自定义的数据 # # transforms: 对数据预处理 # # [官方文档](https://pytorch-cn.readthedocs.io/zh/latest/) # ``` # class DataPrep(torch.utils.data.Dataset) # ``` # 所有其他数据集都应该进行子类化。 # 所有子类应该override__len__和__getitem__,前者提供了数据集的大小,后者支持整数索引,范围从0到len(self)。 # 当然还有个初始化__init__()类:属性+方法,__init__()就是定义自己的属性 # + class DataPrep(torch.utils.data.Dataset) #子类化 def __init__(self, root, transform=None, train=True): #第一步初始化各个变量 self.root = root self.train = train def __getitem__(self, idx): #第二步装载数据,返回[img,label],idx就是一张一张地读取 # get item 获取 数据 img = imread(img_path) #img_path根据自己的数据自定义,灵活性很高 img = torch.from_numpy(img).float() #需要转成float gt = imread(gt_path) #读取gt,如果是分类问题,可以根据文件夹或命名赋值 0 1 gt = torch.from_numpy(gt).float() return img, gt #返回 一一对应 def __len__(self): # + #encoding:utf-8 import torch.utils.data as data import torch from scipy.ndimage import imread import os import os.path import glob from torchvision import transforms def make_dataset(root, train=True): #读取自己的数据的函数 dataset = [] if train: dirgt = os.path.join(root, 'train_data/groundtruth') dirimg = os.path.join(root, 'train_data/imgs') for fGT in glob.glob(os.path.join(dirgt, '*.jpg')): # for k in range(45) fName = os.path.basename(fGT) fImg = 'train_ori'+fName[8:] dataset.append( [os.path.join(dirimg, fImg), os.path.join(dirgt, fName)] ) return dataset #自定義dataset的框架 class MyTrainData(data.Dataset): #需要繼承data.Dataset def __init__(self, root, transform=None, train=True): #初始化文件路進或文件名 self.train = train if self.train: self.train_set_path = make_dataset(root, train) def __getitem__(self, idx): if self.train: img_path, gt_path = self.train_set_path[idx] img = imread(img_path) img = np.atleast_3d(img).transpose(2, 0, 1).astype(np.float32) img = (img - img.min()) / (img.max() - img.min()) img = torch.from_numpy(img).float() gt = imread(gt_path) gt = np.atleast_3d(gt).transpose(2, 0, 1) gt = gt / 255.0 gt = torch.from_numpy(gt).float() return img, gt def __len__(self): return len(self.train_set_path) # -
project2/preparedata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true slideshow={"slide_type": "skip"} language="html" # <style> # .output_wrapper, .output { # height:auto !important; # max-height:300px; /* your desired max-height here */ # } # .output_scroll { # box-shadow:none !important; # webkit-box-shadow:none !important; # } # </style> # + deletable=true editable=true slideshow={"slide_type": "skip"} from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"} # ## Import Pandas # + deletable=true editable=true slideshow={"slide_type": "fragment"} import pandas as pd # + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"} # ## Dataset # + deletable=true editable=true slideshow={"slide_type": "fragment"} data = pd.read_csv('data-titanic.csv', index_col=3) data.head() # + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"} # ## Remove column(s) # + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"} # ### Remove one column # + deletable=true editable=true slideshow={"slide_type": "fragment"} data.drop('Ticket', axis=1, inplace=True) # + deletable=true editable=true slideshow={"slide_type": "fragment"} data.head() # + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"} # ### Remove more than one column # + deletable=true editable=true slideshow={"slide_type": "fragment"} data.drop(['Parch', 'Fare'], axis=1, inplace=True) # + deletable=true editable=true slideshow={"slide_type": "fragment"} data.head() # + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"} # ## Remove row(s) # + deletable=true editable=true slideshow={"slide_type": "fragment"} data.drop(['Braund, Mr. <NAME>', 'Heikkinen, <NAME>'], inplace=True) # + deletable=true editable=true slideshow={"slide_type": "fragment"} data.head() # + deletable=true editable=true slideshow={"slide_type": "skip"}
Chapter03/Removing columns from a pandas DataFrame.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd from glob import glob import os from shutil import copyfile from torch.utils.data import Dataset from PIL import Image import numpy as np from numpy.random import permutation import matplotlib.pyplot as plt from torchvision import transforms from torchvision.datasets import ImageFolder from torchvision.models import resnet18,resnet34 from torchvision.models.inception import inception_v3 from torchvision.models import densenet121 from torch.utils.data import DataLoader from torch.autograd import Variable import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F # %matplotlib inline # - is_cuda = torch.cuda.is_available() is_cuda # ## 유틸리티 함수 # + def imshow(inp,cmap=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp,cmap) class FeaturesDataset(Dataset): def __init__(self,featlst,labellst): self.featlst = featlst self.labellst = labellst def __getitem__(self,index): return (self.featlst[index],self.labellst[index]) def __len__(self): return len(self.labellst) def fit(epoch,model,data_loader,phase='training',volatile=False): if phase == 'training': model.train() if phase == 'validation': model.eval() volatile=True running_loss = 0.0 running_correct = 0 for batch_idx , (data,target) in enumerate(data_loader): if is_cuda: data,target = data.cuda(),target.cuda() data , target = Variable(data,volatile),Variable(target) if phase == 'training': optimizer.zero_grad() output = model(data) loss = F.cross_entropy(output,target) running_loss += F.cross_entropy(output,target,size_average=False).data preds = output.data.max(dim=1,keepdim=True)[1] running_correct += preds.eq(target.data.view_as(preds)).cpu().sum() if phase == 'training': loss.backward() optimizer.step() loss = running_loss/len(data_loader.dataset) accuracy = 100. * running_correct.item()/len(data_loader.dataset) print(f'{phase} loss is {loss:{5}.{2}} and {phase} accuracy is {running_correct}/{len(data_loader.dataset)}{accuracy:{10}.{4}}') return loss,accuracy # - # ## PyTorch 데이터셋 생성 data_transform = transforms.Compose([ transforms.Resize((299,299)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # For Dogs & Cats dataset train_dset = ImageFolder('../Chapter03/dogsandcats/train/',transform=data_transform) val_dset = ImageFolder('../Chapter03/dogsandcats/valid/',transform=data_transform) classes=2 imshow(train_dset[150][0]) # ## 학습과 검증 데이터셋을 위한 데이터 로더 생성 train_loader = DataLoader(train_dset,batch_size=32,shuffle=False,num_workers=3) val_loader = DataLoader(val_dset,batch_size=32,shuffle=False,num_workers=3) # ## Densenet 121 모델 생성 # + my_densenet = densenet121(pretrained=True).features if is_cuda: my_densenet = my_densenet.cuda() for p in my_densenet.parameters(): p.requires_grad = False # - # ## 컨볼루션 피처 추출 # + #For training data trn_labels = [] trn_features = [] #code to store densenet features for train dataset. for d,la in train_loader: o = my_densenet(Variable(d.cuda())) o = o.view(o.size(0),-1) trn_labels.extend(la) trn_features.extend(o.cpu().data) #For validation data val_labels = [] val_features = [] #Code to store densenet features for validation dataset. for d,la in val_loader: o = my_densenet(Variable(d.cuda())) o = o.view(o.size(0),-1) val_labels.extend(la) val_features.extend(o.cpu().data) # - # ## 학습과 검증 피처 데이터셋 생성 # + # Create dataset for train and validation convolution features trn_feat_dset = FeaturesDataset(trn_features,trn_labels) val_feat_dset = FeaturesDataset(val_features,val_labels) # Create data loaders for batching the train and validation datasets trn_feat_loader = DataLoader(trn_feat_dset,batch_size=64,shuffle=True,drop_last=True) val_feat_loader = DataLoader(val_feat_dset,batch_size=64) # - # ## 전연결 네트워크 class FullyConnectedModel(nn.Module): def __init__(self,in_size,out_size): super().__init__() self.fc = nn.Linear(in_size,out_size) def forward(self,inp): out = self.fc(inp) return out trn_features[0].size(0) fc_in_size = trn_features[0].size(0) fc = FullyConnectedModel(fc_in_size,classes) if is_cuda: fc = fc.cuda() optimizer = optim.Adam(fc.parameters(),lr=0.0001) # ## 학습과 검증 모델 # + train_losses , train_accuracy = [],[] val_losses , val_accuracy = [],[] for epoch in range(1,10): epoch_loss, epoch_accuracy = fit(epoch,fc,trn_feat_loader,phase='training') val_epoch_loss , val_epoch_accuracy = fit(epoch,fc,val_feat_loader,phase='validation') train_losses.append(epoch_loss) train_accuracy.append(epoch_accuracy) val_losses.append(val_epoch_loss) val_accuracy.append(val_epoch_accuracy) # -
study08/DogsandCatsUsingDenseNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Approximating functions on $R^2$ # # <b><NAME>, PhD</b> # # This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>. # # Original (Matlab) CompEcon file: **demapp02.m** # # Running this file requires the Python version of CompEcon. This can be installed with pip by running # # # !pip install compecon --upgrade # # <i>Last updated: 2021-Oct-01</i> # <hr> # ## About # This notebook illustrates how to use CompEcon Toolbox routines to construct and operate with an approximant for a function defined on a rectangle in $R^2$. # # In particular, we construct an approximant for $f(x_1,x_2) = \frac{\cos(x_1)}{\exp(x_2)}$ on $[-1,1]\times[-1,1]$. The function used in this illustration posseses a closed-form, which will allow us to measure approximation error precisely. Of course, in practical applications, the function to be approximated will not possess a known closed-form. # # In order to carry out the exercise, one must first code the function to be approximated at arbitrary points. # Let's begin: # ## Initial tasks import numpy as np import pandas as pd import matplotlib.pyplot as plt from compecon import BasisChebyshev, BasisSpline, nodeunif, demo from matplotlib import cm # ### Defining some functions # Function to be approximated and analytic partial derivatives # + exp, cos, sin = np.exp, np.cos, np.sin f = lambda x: cos(x[0]) / exp(x[1]) d1 = lambda x: -sin(x[0]) / exp(x[1]) d2 = lambda x: -cos(x[0]) / exp(x[1]) d11 = lambda x: -cos(x[0]) / exp(x[1]) d12 = lambda x: sin(x[0]) / exp(x[1]) d22 = lambda x: cos(x[0]) / exp(x[1]) # - # Set the points of approximation interval: a, b = 0, 1 # ## Choose an approximation scheme. # # In this case, let us use an 6 by 6 Chebychev approximation scheme: n = 6 # order of approximation basis = BasisChebyshev([n, n], a, b) # write n twice to indicate the two dimensions. # a and b are broadcast. # ### Compute the basis coefficients c. # There are various way to do this: # * One may compute the standard approximation nodes `x` and corresponding interpolation matrix `Phi` and function values `y` and use: x = basis.nodes Phi = basis.Phi(x) # input x may be omitted if evaluating at the basis nodes y = f(x) c = np.linalg.solve(Phi, y) # * Alternatively, one may compute the standard approximation nodes `x` and corresponding function values `y` and use these values to create a `BasisChebyshev` object with keyword argument `y`: x = basis.nodes y = f(x) fa = BasisChebyshev([n, n], a, b, y=y) # coefficients can be retrieved by typing fa.c # * ... or one may simply pass the function directly to BasisChebyshev using keyword `f`, which by default will evaluate it at the basis nodes F = BasisChebyshev([n, n], a, b, f=f) # coefficients can be retrieved by typing F.c # ### Evaluate the basis # Having created a `BasisChebyshev` object, one may now evaluate the approximant at any point `x` by calling the object: x = np.array([[0.5],[0.5]]) # first dimension should match the basis dimension F(x) # ... one may also evaluate the approximant's first partial derivatives at `x`: dfit1 = F(x, [1, 0]) dfit2 = F(x, [0, 1]) # ... one may also evaluate the approximant's second own partial and cross partial derivatives at `x`: dfit11 = F(x, [2, 0]) dfit22 = F(x, [0, 2]) dfit12 = F(x, [1, 1]) # ### Compare analytic and numerical computations # + pycharm={"name": "#%%\n"} print('Function Values and Derivatives of cos(x_1)/exp(x_2) at x=(0.5,0.5)') results = pd.DataFrame({ 'Numerical': [F(x),dfit1,dfit2,dfit11, dfit12,dfit22], 'Analytic': np.r_[f(x), d1(x), d2(x), d11(x), d12(x), d22(x)]}, index=['Function', 'Partial 1','Partial 2','Partial 11','Partial 12', 'Partial 22'] ) results.round(5) # - # The cell below shows how the preceeding table could be generated in a single loop, using the `zip` function and computing all partial derivatives at once. # + labels = ['Function', 'Partial 1','Partial 2','Partial 11','Partial 12','Partial 22'] analytics =[func(x) for func in [f,d1,d2,d11,d12,d22]] deriv = [[0, 1, 0, 2, 0, 1], [0, 0, 1, 0, 2, 1]] ff = '%-11s %12.5f %12.5f' print('Function Values and Derivatives of cos(x_1)/exp(x_2) at x=(0.5,0.5)') print('%-11s %12s %12s\n' % ('','Numerical', 'Analytic'), '_'*40) for lab,appr,an in zip(labels, F(x,order=deriv),analytics): print(f'{lab:11s} {an[0]:12.5f} {appr:12.5f}') # - # ### Approximation accuracy # One may evaluate the accuracy of the Chebychev polynomial approximant by computing the approximation error on a highly refined grid of points: # + nplot = [101, 101] # chose grid discretization X = nodeunif(nplot, [a, a], [b, b]) # generate refined grid for plotting yapp = F(X) # approximant values at grid nodes yact = f(X) # actual function values at grid points error = (yapp - yact).reshape(nplot) X1, X2 = X X1.shape = nplot X2.shape = nplot fig1 = plt.figure(figsize=[12, 6]) ax = fig1.add_subplot(1, 1, 1, projection='3d') ax.plot_surface(X1, X2, error, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('error') plt.title('Chebychev Approximation Error') plt.ticklabel_format(style='sci', axis='z', scilimits=(-1,1)) # - # The plot indicates that an order 11 by 11 Chebychev approximation scheme produces approximation errors no bigger in magnitude than $10^{-10}$. # Let us repeat the approximation exercise, this time constructing an order 21 by 21 cubic spline approximation scheme: # + n = [21, 21] # order of approximation S = BasisSpline(n, a, b, f=f) yapp = S(X) # approximant values at grid nodes error = (yapp - yact).reshape(nplot) fig2 = plt.figure(figsize=[12, 6]) ax = fig2.add_subplot(1, 1, 1, projection='3d') ax.plot_surface(X1, X2, error, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('error') plt.title('Cubic Spline Approximation Error'); # - # The plot indicates that an order 21 by 21 cubic spline approximation scheme produces approximation errors no bigger in magnitude than $10^{-6}$. # ### Save all figures to disc # + #demo.savefig([fig1,fig2], name='demapp02')
_build/jupyter_execute/notebooks/app/02 Approximating functions on R2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Inheritance # ### What is Inheritance? # # The basic idea of inheritance in object-oriented programming is that a class can be created which can inherit the attributes and methods of another class. The class which inherits another class is called the **child class or derived class**, and the class which is inherited by another class is called **parent or base class**. # # This means that inheritance supports code reusability. # ### Objectives # # - Refresh our knowledge of inheritance and its advantages. # - Understand the use of `super` keyword. # - Introduce us to basic typing annotations. # - Know the differences between `issubclass`, `isinstance` and `type`. # ![inheritance image](../images/vehicles_classification.png "inheritance") # <small>Photo credit: https://www.python-course.eu/</small> from typing import List import datetime import csv class Employee: raise_amt = 1.04 def __init__(self, first: str, last:str, pay:int): self.first = first self.last = last self.pay = pay self.email = first.lower() + '.' + last.lower() + '@company.com' @property def fullname(self)-> str: return f"{self.first} {self.last}" def __repr__(self)-> str: return f"{self.__class__.__name__}({self.first}, {self.last}, {self.pay})" def __str__(self)-> str: return f"{self.fullname} - {self.email} makes €{self.pay}" def apply_raise(self)-> float: return self.pay * self.raise_amt @classmethod def set_raise_amt(cls, amount): cls.raise_amt = amount @staticmethod def is_weekday(day): if day.weekday() == 5 or day.weekday() == 6: return False return True @classmethod def from_csv(cls, row): first, last, pay = row return cls(first, last, pay) emp1 = Employee('Chidinma', 'Kalu', 85000) emp1.__repr__() Employee.set_raise_amt(1.05) Employee.raise_amt day = datetime.date(2019, 12, 25) Employee.is_weekday(day) # read csv file of employees with open('../data/employee_file.csv', 'r') as employee_file: csv_reader = csv.reader(employee_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: line_count += 1 else: emp = Employee.from_csv(row) line_count += 1 print(f"emp_{line_count} = {emp.__repr__()}") # __Note That__: In OOP, Inheritance signifies an ***IS-A*** relation. # # For Example: # - A manager is an Employee, # - A dog is an Animal, # - A Car is a Vehicle. # Manager Class inherits from Employee class Manager(Employee): raise_amt = 1.11 def __init__(self, first: str, last:str, pay:int, dept:str, employees:List[str]=None): super().__init__(first, last, pay) self.dept = dept self.employees = list(employees) if employees else [] def add_emps(self, emp:str)-> List[str]: if emp not in self.employees: self.employees.append(emp) return self.employees def remove_emps(self, emp:str)-> List[str]: if emp in self.employees: self.employees.remove(emp) return self.employees def print_emps(self)-> str: for emp in self.employees: print('--->', emp) # read manager file marketing_team = ['Sue', 'Tina', 'James', 'Diana', 'Pat', 'John'] man1 = Manager("Jane", "Doe", 94000, "Marketing", marketing_team) man1 man1.remove_emps("Tina") # ### Overwriting # If a function is overwritten, the original function will be gone. The function will be redefined. This process has nothing to do with object orientation or inheritance. # + def func(x): return x + 2 print(f"First function: {func(3)}") # f will be overwritten (or redefined) in the following: def func(x): return x + 8 print(f"Second function: {func(3)}") # - # ### Overloading # # This refers to the ability to define a function with the same name multiple times and seeing different results depending on the number or types of the parameters. # + def start(a, b=None): if b is not None: return a + b else: return a print(start(a=2)) print(start(a=2, b=10)) # - # __The * operator can be used as a more general approach for a family of functions with 1, 2, 3 or even more parameters__ # + def start(*a): if len(a) == 1: return a[0] elif len(a) == 2: return a[0] + a[1] else: return a[0] + a[1] + a[2] print(start(2)) print(start(2, 10)) print(start(2, 10, 13)) # - # ### Overriding # # Overriding refers to having a method with the same name in the child class as in the parent class. The definition of the method differs in parent and child classes but the name remains the same. # # Overriding means that the first definition will not be available anymore. # + def func(n): return n + 10 def func(n,m): return n + m + 10 print(func(3, 4)) print(func(2)) #This will throw an error. # + class Vehicle: def print_details(self): print("This is parent Vehicle class method") class Car(Vehicle): def print_details(self): print("This is child Car class method") class Bike(Vehicle): def print_details(self): print("This is child Bike class method") # + car1 = Vehicle() car1.print_details() car2 = Car() car2.print_details() car3 = Bike() car3.print_details() # - # ### Difference between `issubclass` and `isinstance`? # # * __issubclass__ is used to check if a class is a subclass of another class. # * __isinstance__ is used to check if an object is an instance of another class or any of its subclass(es). # issubclass print(issubclass(Manager, Employee)) print(issubclass(Employee, Manager)) # isinstance print(isinstance(man1, Employee)) print(isinstance(emp1, Manager)) # ### Difference between `isinstance` and `type`? # # We see that `isinstance` returns True if we compare an object either with the class it belongs to or with the superclass. Whereas the equality operator only returns True, if we compare an object with its own class. # # People make the mistake of using `type()` where `isinstance()` would have been more appropriate. print(isinstance(emp1, Employee), isinstance(man1, Employee)) print(isinstance(emp1, Manager)) print(isinstance(man1, Manager)) print(type(man1) == Employee, type(man1) == Manager) # #### => Create a `Developer` subclass which inherits from the `Employee` parent class. # - Programming Language(s) # ### Takeaways
workshop/01-inheritance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from scipy.io import loadmat import plotly.offline as py import plotly.graph_objs as go import numpy as np import random from sklearn.model_selection import train_test_split py.init_notebook_mode() #importar funcion "plot" definida en "utils.py" from utils import plot # tensorflow import tensorflow as tf from tensorflow import layers # - #cargar archivo mat = loadmat("data/mnist.mat") mat # + #assignar data x = mat['X'] y = mat['y'] % 10 x.shape, y.shape # - #visualizar archivos algunas muestras for _ in range(1): n = random.randint(0, len(x) - 1) title = "Digit: {}".format(y[n][0]) plot(x[n], title, width=400, height=400) # + class Model(object): def __init__(self, learning_rate=0.01): self.graph = tf.Graph() with self.graph.as_default(): self.x = tf.placeholder(tf.float32, [None, 400]) self.y = tf.placeholder(tf.int64, [None, 1]) net = layers.dense(self.x, 256, activation=tf.nn.relu) net = layers.dense(net, 128, activation=tf.nn.relu) net = layers.dense(net, 64, activation=tf.nn.relu) logits = layers.dense(net, 10, activation=None) self.h = tf.nn.softmax(logits) self.prediction = tf.argmax(self.h, axis=1) labels = tf.one_hot(self.y, 10) self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) self.loss = tf.reduce_mean(self.loss) self.update = tf.train.AdamOptimizer(learning_rate).minimize(self.loss) y1 = tf.expand_dims(tf.argmax(self.h, axis=1), 1) self.accuracy = tf.equal(y1, self.y) self.accuracy = tf.cast(self.accuracy, tf.float32) self.accuracy = tf.reduce_mean(self.accuracy) self.sess = tf.Session(graph=self.graph) self.sess.run(tf.global_variables_initializer()) def fit(self, x, y, epochs=2000, print_interval=100, batch_size=64): x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8) for step in xrange(epochs): idx = random.sample(range(len(x_train)), batch_size) x_batch, y_batch = x_train[idx], y_train[idx] _, loss = self.sess.run([self.update, self.loss], feed_dict={self.x: x_batch, self.y: y_batch}) if step % print_interval == 0: accuracy = self.sess.run(self.accuracy, feed_dict={self.x: x_test, self.y: y_test}) print "loss: {0}, accuracy: {1}".format(loss, accuracy) def predict(self, x): return self.sess.run(self.prediction, feed_dict={self.x: x}) model = Model() # - model.fit(x, y) # + n = random.randint(0, len(x) - 1) x_sample, y_sample = x[n:n+1, :], y[n][0] prediction = model.predict(x_sample)[0] title = "Digit: {0}, prediction: {1}".format(y_sample,prediction) plot(x_sample, title, width=400, height=400) # + language="bash" # #cat /run_jupyter.sh
notebooks/ep3/mnist-solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Porto Seguro’s Safe Driver Prediction # I got <NAME> code as a startpack: https://github.com/felipeeeantunes/udacity_live # ## Initializing import pandas as pd import numpy as np import seaborn as sns import missingno as msno import gc from time import time from multiprocessing import * # + import matplotlib.pyplot as plt # %matplotlib inline from IPython.display import set_matplotlib_formats set_matplotlib_formats('pdf', 'png') pd.options.display.float_format = '{:.2f}'.format rc={'savefig.dpi': 75, 'figure.autolayout': False, 'figure.figsize': [12, 8], 'axes.labelsize': 18,\ 'axes.titlesize': 18, 'font.size': 18, 'lines.linewidth': 2.0, 'lines.markersize': 8, 'legend.fontsize': 16,\ 'xtick.labelsize': 16, 'ytick.labelsize': 16} sns.set(style='dark',rc=rc) # - default_color = '#56B4E9' colormap = plt.cm.cool # Setting working directory path = '../data/raw/' # ## Loading Files train = pd.read_csv(path + 'train.csv') test = pd.read_csv(path + 'test.csv') y = train['target'] del train['target'] y.head(5) id_train = train['id'].values id_test = test['id'].values columns_original = list(train.columns) columns_original train.head(5) # + from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from lightgbm import LGBMClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import cross_val_score # - def cross_val_model(X, y, model, n_splits=5): X = np.array(X) y = np.array(y) folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=15).split(X, y)) cross_score_mean = 0.0 t0 = time() for j, (train_idx, test_idx) in enumerate(folds): X_train = X[train_idx] y_train = y[train_idx] X_holdout = X[test_idx] y_holdout = y[test_idx] print ("Fit %s fold %d" % (str(model).split('(')[0], j+1)) model.fit(X_train, y_train) cross_score = cross_val_score(model, X_holdout, y_holdout, cv=3, scoring='roc_auc') print(" cross_score: %.5f (%.5f)" % (cross_score.mean(), cross_score.mean()*2-1)) print(" [%10d secs elapsed]: cross_score: %.5f (%.5f)" % (time()-t0, cross_score.mean(), cross_score.mean()*2-1)) cross_score_mean += cross_score.mean() cross_score_mean /= n_splits print("cross_score_mean: %.5f (%.5f)" % (cross_score_mean, cross_score_mean*2-1)) # ## Feature Engineering & Selection # Selected features from https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283/code selected_features = [ "ps_car_13", # : 1571.65 / shadow 609.23 "ps_reg_03", # : 1408.42 / shadow 511.15 "ps_ind_05_cat", # : 1387.87 / shadow 84.72 "ps_ind_03", # : 1219.47 / shadow 230.55 "ps_ind_15", # : 922.18 / shadow 242.00 "ps_reg_02", # : 920.65 / shadow 267.50 "ps_car_14", # : 798.48 / shadow 549.58 "ps_car_12", # : 731.93 / shadow 293.62 "ps_car_01_cat", # : 698.07 / shadow 178.72 "ps_car_07_cat", # : 694.53 / shadow 36.35 "ps_ind_17_bin", # : 620.77 / shadow 23.15 "ps_car_03_cat", # : 611.73 / shadow 50.67 "ps_reg_01", # : 598.60 / shadow 178.57 "ps_car_15", # : 593.35 / shadow 226.43 "ps_ind_01", # : 547.32 / shadow 154.58 "ps_ind_16_bin", # : 475.37 / shadow 34.17 "ps_ind_07_bin", # : 435.28 / shadow 28.92 "ps_car_06_cat", # : 398.02 / shadow 212.43 "ps_car_04_cat", # : 376.87 / shadow 76.98 "ps_ind_06_bin", # : 370.97 / shadow 36.13 "ps_car_09_cat", # : 214.12 / shadow 81.38 "ps_car_02_cat", # : 203.03 / shadow 26.67 "ps_ind_02_cat", # : 189.47 / shadow 65.68 "ps_car_11", # : 173.28 / shadow 76.45 "ps_car_05_cat", # : 172.75 / shadow 62.92 "ps_calc_09", # : 169.13 / shadow 129.72 "ps_calc_05", # : 148.83 / shadow 120.68 "ps_ind_08_bin", # : 140.73 / shadow 27.63 "ps_car_08_cat", # : 120.87 / shadow 28.82 "ps_ind_09_bin", # : 113.92 / shadow 27.05 "ps_ind_04_cat", # : 107.27 / shadow 37.43 "ps_ind_18_bin", # : 77.42 / shadow 25.97 "ps_ind_12_bin", # : 39.67 / shadow 15.52 "ps_ind_14", # : 37.37 / shadow 16.65 "ps_car_11_cat" # Very nice spot from Tilii : https://www.kaggle.com/tilii7 ] # ### Adding Combs train.head(5) from sklearn.preprocessing import LabelEncoder # add combinations from https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283/code combs = [ ('ps_reg_01', 'ps_car_02_cat'), ('ps_reg_01', 'ps_car_04_cat'), ] start = time() for n_c, (f1, f2) in enumerate(combs): name1 = f1 + "_plus_" + f2 print('current feature %60s %4d in %5.1f' % (name1, n_c + 1, (time() - start) / 60), end='') print('\r' * 75, end='') train[name1] = train[f1].apply(lambda x: str(x)) + "_" + train[f2].apply(lambda x: str(x)) test[name1] = test[f1].apply(lambda x: str(x)) + "_" + test[f2].apply(lambda x: str(x)) # Label Encode lbl = LabelEncoder() lbl.fit(list(train[name1].values) + list(test[name1].values)) train[name1] = lbl.transform(list(train[name1].values)) test[name1] = lbl.transform(list(test[name1].values)) len(selected_features) new_features = [f1 + '_plus_' + f2 for (f1, f2) in combs] selected_features.extend(new_features) new_features len(selected_features) # ### Categorical Target Encoding for categorical variables # from https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283/code def add_noise(series, noise_level): return series * (1 + noise_level * np.random.randn(len(series))) # adapted from https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283/code def target_encode(trn_series=None, tst_series=None, target=None, min_samples_leaf=1, smoothing=1, noise_level=0): """ Smoothing is computed like in the following paper by <NAME> https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf trn_series : training categorical feature as a pd.Series tst_series : test categorical feature as a pd.Series target : target data as a pd.Series min_samples_leaf (int) : minimum samples to take category average into account smoothing (int) : smoothing effect to balance categorical average vs prior """ assert len(trn_series) == len(target) assert trn_series.name == tst_series.name temp = pd.concat([trn_series, target], axis=1) # Compute target mean averages = temp.groupby(by=trn_series.name)[target.name].agg(["mean", "count"]) # Compute smoothing smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing)) # Apply average function to all target data prior = target.mean() # The bigger the count the less full_avg is taken into account averages[target.name] = prior * (1 - smoothing) + averages["mean"] * smoothing averages.drop(["mean", "count"], axis=1, inplace=True) # Apply averages to trn and tst series ft_trn_series = pd.merge( trn_series.to_frame(trn_series.name), averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}), on=trn_series.name, how='left')['average'].rename(trn_series.name + '_mean').fillna(prior) # pd.merge does not keep the index so restore it ft_trn_series.index = trn_series.index ft_tst_series = pd.merge( tst_series.to_frame(tst_series.name), averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}), on=tst_series.name, how='left')['average'].rename(trn_series.name + '_mean').fillna(prior) # pd.merge does not keep the index so restore it ft_tst_series.index = tst_series.index return add_noise(ft_trn_series, noise_level), add_noise(ft_tst_series, noise_level) f_cats = [x for x in selected_features if "_cat" in x] for f in f_cats: train[f + "_avg"], test[f + "_avg"] = target_encode(trn_series=train[f], tst_series=test[f], target=y, min_samples_leaf=200, smoothing=10, noise_level=0) # verify transformation train.head(3) new_te_columns = [c for c in train.columns if '_avg' in c] new_te_columns selected_features.extend(new_te_columns) ### VERIFY: for x in new_te_columns: selected_features_te.remove(x[:-4]) selected_features positive_cases = len(y[y == 1]) negative_cases = len(y) - positive_cases positive_cases, negative_cases, len(y) # parameters from https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283/code conf_xgb_model = { 'n_estimators': 200, 'max_depth': 4, 'objective': 'binary:logistic', 'learning_rate': 0.1, 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 1, 'reg_alpha': 0, 'reg_lambda': 1, 'nthread': -1, 'min_child_weight': 100, } xgb_model = XGBClassifier(**conf_xgb_model) cross_val_model(train[selected_features], y, xgb_model) conf_lgb_model = { 'boosting_type': 'gbdt', 'n_estimators': 200, 'max_depth': 4, 'objective': 'binary', 'metric': 'binary_logloss', 'learning_rate': 0.05, 'sub_feature': 0.8, 'num_leaves': 20, 'feature_fraction': 0.8, 'bagging_fraction': 0.7, 'bagging_freq': 3, } lgb_model = LGBMClassifier(**conf_lgb_model) cross_val_model(train[selected_features], y, lgb_model) # ## Ensembling # + increase = False class Ensemble(object): def __init__(self, n_splits, stacker, base_models): self.n_splits = n_splits self.stacker = stacker self.base_models = base_models self.S_train = None self.S_test = None def fit_predict(self, X, y, T): t0 = time() X = np.array(X) y = np.array(y) T = np.array(T) folds = list(StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=15).split(X, y)) S_train = np.zeros((X.shape[0], len(self.base_models))) S_test = np.zeros((T.shape[0], len(self.base_models))) for i, clf in enumerate(self.base_models): S_test_i = np.zeros((T.shape[0], self.n_splits)) for j, (train_idx, test_idx) in enumerate(folds): X_train = X[train_idx] y_train = y[train_idx] X_holdout = X[test_idx] y_holdout = y[test_idx] # Upsample during cross validation to avoid having the same samples # in both train and validation sets # Validation set is not up-sampled to monitor overfitting if increase: # Get positive examples pos = pd.Series(y_train == 1) # Add positive examples X_train = pd.concat([X_train, X_train.loc[pos]], axis=0) y_train = pd.concat([y_train, y_train.loc[pos]], axis=0) # Shuffle data idx = np.arange(len(X_train)) np.random.shuffle(idx) X_train = X_train.iloc[idx] y_train = y_train.iloc[idx] print ("Fitting %s fold %d" % (str(clf).split('(')[0], j+1)) clf.fit(X_train, y_train) y_pred = clf.predict_proba(X_holdout)[:,1] cross_score = cross_val_score(clf, X_holdout, y_holdout, cv=3, scoring='roc_auc') print(" [%10d secs elapsed]: cross_score: %.5f (%.5f)" % (time()-t0, cross_score.mean(), cross_score.mean()*2-1)) S_train[test_idx, i] = y_pred S_test_i[:, j] = clf.predict_proba(T)[:,1] S_test[:, i] = S_test_i.mean(axis=1) #print(" [%10d secs elapsed]: Stacker Score: %.5f (%.5f)" % (time()-t0, results.mean(), results.mean()*2-1)) self.S_train = S_train self.S_test = S_test if False: #cross validating stacker cross_val_model(S_train, y, self.stacker, n_splits=5) #Training with all training set (including validation) self.stacker.fit(S_train, y) res = self.stacker.predict_proba(S_test)[:,1] else: res = S_test.mean(axis=1) print(res) return res # - conf_log = { 'penalty':'l2', 'dual':False, 'tol':0.0001, 'C':1.0, 'fit_intercept':True, 'intercept_scaling':1, 'class_weight':None, 'random_state':None, 'solver':'liblinear', 'max_iter':100, 'multi_class':'ovr', 'verbose':0, 'warm_start':False, 'n_jobs':1 } log_model = LogisticRegression(**conf_log) stack = Ensemble(n_splits=5, stacker = log_model, base_models = (xgb_model, lgb_model)) y_pred = stack.fit_predict(train[selected_features], y, test) y_pred.S_test y_pred = stack.fit_predict(train[selected_features], y, test) # # Making a submission sub = pd.DataFrame() sub['id'] = id_test sub['target'] = y_pred sub.to_csv('stacked_xgb_lgb_v4.csv', index=False)
Peter_PortoSeguro_v4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # Single-subject example of SVM classification based on entire brain's voxels for CIMAQ memory encoding task (fMRI data). # # Trials (conditions) are classifierd according to either condition or memory performance (hit vs miss, correct vs incorrect source) # # + import os import sys import glob import numpy as np import pandas as pd import nilearn import scipy import nibabel as nb import sklearn import seaborn as sns import itertools from numpy import nan as NaN from matplotlib import pyplot as plt from nilearn import image, plotting from nilearn import datasets from nilearn.plotting import plot_stat_map, plot_roi, plot_anat, plot_img, show from nilearn.input_data import NiftiMasker from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score, f1_score from sklearn.model_selection import cross_val_predict, cross_val_score from sklearn.preprocessing import MinMaxScaler #libraries need to be installed in conda environment with pip install # - # Step 1: import brain imaging data # # **Note: I contrasted two different models: encoding trials modelled as separate regressors into one first-level model (MANY regressors...) # OR # a different model is created for each trial, where trials of no interest are modelled into two separate conditions (encoding or control); 2nd option was superior** # # I also contrasted a single model per trial, with other trials modelled as one ('other') condition, two conditions (encoding vs control task), and 4 conditions (control, miss, correct and wrong source). # # CONCLUSION: The two conditions model gave the best results, now used to create all betas in all subjects. # # + # subject dccid (identifier used in Loris-cimaq database) id = '122922' # Subject's anatomical scan (for display) and fMRI mask (normalized, non-linear) # directory where subject's functional mask and anatomical scan both reside anat_dir = '/Users/mombot/Documents/Simexp/CIMAQ/Data/anat/122922' # load subject's anatomical scan as nibabel image anat = nb.load(os.path.join(anat_dir, 'anat_sub122922_nuc_stereonl.nii')) # load mask of subject's functional MRI data as nibabel image mask = nb.load(os.path.join(anat_dir, 'func_sub122922_mask_stereonl.nii')) # visualize functional mask superimposed on subjet's anatomical image plot_roi(roi_img=mask, bg_img=anat, cmap='Paired') # sanity check: # verify that the functional mask from the NIAK preprocessing output directory (anat) # overlaps completely with the fMRI voxels (task epi scans) tscores = '/Users/mombot/Documents/Simexp/CIMAQ/Data/test/Output/122922/MultiModels/EncMinCTL_tscores_sub122922.nii' plot_stat_map(stat_map_img=tscores, bg_img=mask, cut_coords=(0, 0, 0), threshold=0.2, colorbar=True) # beta maps for ENCODING trials only, concatenated in temporal order (4D file) # each trial is a 3D .nii file # Note: a single model (design matrix) was used with a beta derived for each encoding trial (many regressors) # betas_enc4D = glob.glob('/Users/mombot/Documents/Simexp/CIMAQ/Data/Nistats/Betas/122922/SingleModel/concat*.nii')[0] # beta maps for ALL trials, # uploaded and concatenate a series of 3D images (1 per trial) in nilearn using the wild card # Note1: temporal order MUST be preserved when scans are ordered alphabetically (use left-padding for trial numbers) # Note2: separate models (design matrix) were used for each trial (1 beta for trial of interest, other trials bunched together) # Encoding and control trials of no interest were modelled as two separate conditions (2 regressors) betas_all3D_A = nilearn.image.load_img(img='/Users/mombot/Documents/Simexp/CIMAQ/Data/Nistats/Betas/122922/TrialContrasts/betas*nii', wildcards=True) # betas_all_4condi = nilearn.image.load_img(img='/Users/mombot/Documents/Simexp/CIMAQ/Data/Nistats/Betas/122922/OneModelPerTrial_4junkCondi/betas*nii', wildcards=True) #https://nilearn.github.io/modules/generated/nilearn.image.load_img.html#nilearn.image.load_img # - # Step 2: vectorize beta maps with nilearn's NiftiMasker to derive features for classification # # The NiftiMasker converts 4D beta-images into a 2D a vectorized data matrix (each 3D beta map becomes a 1D vector; rows = trials, columns = voxels) as input for machine learning. # # Masking: a normalized functional MRI data mask (outputted by NIAK) to determine which voxels to include in the data matrix # + #use NiftiMasker class to convert images into data matrices for decoding #create 2D array (numpy) as input for scikit-learn for decoding masker = NiftiMasker(mask_img=mask, standardize=True) # give the masker a filename and convert series of 3D beta maps into a 2D array #78 rows = encoding trials, 69924 columns = brain voxels, value = beta value # X_encTrials = masker.fit_transform(betas_enc4D) #177 rows = all trials, 69924 columns = brain voxels, value = beta value X_allTrials3D_A = masker.fit_transform(betas_all3D_A) # 4 junk conditions # X_allTrials_4condi = masker.fit_transform(betas_all_4condi) # print(X_encTrials.shape) #78 rows = trials, 69924 cols = voxels, val = beta weights print(X_allTrials3D_A.shape) #117 rows = trials, 69924 cols = voxels, val = beta weights # print(X_allTrials_4condi.shape) # - # Include only a subset of trials (e.g., to contrast hit vs missed trials, or correct vs wrong source) # Step 3: import the behavioural labels # + #All trial labels (different label sets) label_dir = '/Users/mombot/Documents/Simexp/CIMAQ/Data/Nistats/Events' labels_enco_ctl = glob.glob(os.path.join(label_dir, 'sub-'+id+'_enco_ctl.tsv'))[0] labels_hit_miss_ctl = glob.glob(os.path.join(label_dir, 'sub-'+id+'_ctl_miss_hit.tsv'))[0] labels_cs_ws_miss_ctl = glob.glob(os.path.join(label_dir, 'sub-'+id+'_ctl_miss_ws_cs.tsv'))[0] #Labels: Encoding and Control trials (all 117 trials) enco_ctl_labels = pd.read_csv(labels_enco_ctl, sep='\t') y_enco_ctl = enco_ctl_labels['condition'] print('Labels: Encoding and Control Trials') print(y_enco_ctl.head()) print(enco_ctl_labels.condition.value_counts()) #plot number of trials per label (enco, ctl) #Labels: Hit, Miss and Control trials (all 117 trials) hit_miss_ctl_labels = pd.read_csv(labels_hit_miss_ctl, sep='\t') y_hit_miss_ctl = hit_miss_ctl_labels['ctl_miss_hit'] print('\nLabels: Hit, Miss and Control Trials') print(y_hit_miss_ctl.head()) print(hit_miss_ctl_labels.ctl_miss_hit.value_counts()) #plot number of trials per label (hit, miss, ctl) #Labels: Correct Source, Wrong Source, Miss and Control trials (all 117 trials) cs_ws_miss_ctl_labels = pd.read_csv(labels_cs_ws_miss_ctl, sep='\t') y_cs_ws_miss_ctl = cs_ws_miss_ctl_labels['ctl_miss_ws_cs'] print('\nLabels: Correct Source, Wrong Source, Miss and Control Trials') print(y_cs_ws_miss_ctl.head()) print(cs_ws_miss_ctl_labels.ctl_miss_ws_cs.value_counts()) #plot number of trials per label (cs, ws, miss, ctl) # + #encoding trials modelled with single model #Encoding trial labels (78 labels): miss, wrong source, correct source # enclabel_dir = '/Users/mombot/Documents/Simexp/CIMAQ/Data/test/Output/Events' # label_enc = glob.glob(os.path.join(enclabel_dir, 'sub-*EncTrialTypes.tsv'))[0] # enc_labels = pd.read_csv(label_enc, sep='\t') # y_enc = enc_labels['enctrial_type'] #transform DataFrame into 1D array by extracting column # print('Labels: Correct Source, Wrong Source and Miss Trials (Encoding trials only)') # print(y_enc.head()) # enc_labels.enctrial_type.value_counts() # - # Step 4. Select a subset of trials of interest for classification (exclude other categories with a mask) # # + ## Create a mask from the labels (keep only labels of interest) ## Apply the mask to 2D fMRI data matrix: keep only rows (trials) of interest ## Apply the same mask to labels (exclude labels of no interest) #From model with all trials hit_miss_mask = y_hit_miss_ctl.isin(['hit', 'missed']) cs_miss_mask = y_cs_ws_miss_ctl.isin(['correctsource', 'missed']) cs_ws_mask_all = y_cs_ws_miss_ctl.isin(['wrongsource', 'correctsource']) X_HM_allTrials3D_A = X_allTrials3D_A[hit_miss_mask] print(X_HM_allTrials3D_A.shape) X_CsM_allTrials3D_A = X_allTrials3D_A[cs_miss_mask] print(X_CsM_allTrials3D_A.shape) X_CsWs_allTrials3D_A = X_allTrials3D_A[cs_ws_mask_all] print(X_CsWs_allTrials3D_A.shape) # X_HM_allTrials_4condi = X_allTrials_4condi[hit_miss_mask] # print(X_HM_allTrials_4condi.shape) # X_CsM_allTrials_4condi = X_allTrials_4condi[cs_miss_mask] # print(X_CsM_allTrials_4condi.shape) # X_CsWs_allTrials_4condi = X_allTrials_4condi[cs_ws_mask_all] # print(X_CsWs_allTrials_4condi.shape) y_hit_miss = y_hit_miss_ctl[hit_miss_mask] print(y_hit_miss.shape) y_cs_miss = y_cs_ws_miss_ctl[cs_miss_mask] print(y_cs_miss.shape) y_cs_ws_all = y_cs_ws_miss_ctl[cs_ws_mask_all] print(y_cs_ws_all.shape) # From model with only encoding trials # cs_ws_mask_enc= y_enc.isin(['wrongsource', 'correctsource']) # X_CsWs_encTrials = X_encTrials[cs_ws_mask_enc] # print(X_CsWs_encTrials.shape) # y_cs_ws_enc = y_enc[cs_ws_mask_enc] # print(y_cs_ws_enc.shape) # - # Step 5: stratify the data into training and testing sets # # See scikit-learn documentation here: # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html # # Define a training and a testing sample # Split the sample to training/test with a 60/40 ratio, stratify trials by condition, and shuffle the data # + ##Encoding vs control condition: #X_allTrials3D_A, y_enco_ctl #X_allTrials_4condi, y_enco_ctl ##Hit vs Miss: #X_HM_allTrials3D_A, y_hit_miss (58% correct; test_size: 0.3, cv:7) #X_HM_allTrials_4condi, y_hit_miss ##Correct Source vs Miss: #X_CsM_allTrials3D_A, y_cs_miss (66% correct; test_size: 0.3, cv: 7) #X_CsM_allTrials_4condi, y_cs_miss ##Correct vs Wrong Source: # one model (enc trials only): X_CsWs_encTrials, y_cs_ws_enc (35% correct) # one model per trial (all trials): X_CsWs_allTrials3D_A, y_cs_ws_all (58% correct) # one model, all trials, 4 junk condi: X_CsWs_allTrials_4condi, y_cs_ws_all X_train, X_test, y_train, y_test = train_test_split( X_CsM_allTrials3D_A, # x y_cs_miss, # y test_size = 0.4, # 60%/40% split shuffle = True, # shuffle dataset before splitting stratify = y_cs_miss, # keep distribution of conditions consistent betw. train & test sets random_state = 123) # same shuffle each time print('training:', len(X_train), 'testing:', len(X_test)) print(y_train.value_counts(), y_test.value_counts()) #fig,(ax1,ax2) = plt.subplots(2) #sns.countplot(y_train, ax=ax1, order=['Enc','CTL']) #ax1.set_title('Train') #sns.countplot(y_test, ax=ax2, order=['Enc','CTL']) #ax2.set_title('Test') # - # Step 6: train an SVM model # + my_first_svc = SVC(kernel='linear', class_weight='balanced') #define the model my_first_svc.fit(X_train, y_train) #train the model # MAKE SURE: mettre poids egal par categorie. # class-weight: balanced!!! # predict the training data based on the model y_pred = my_first_svc.predict(X_train) # calculate the model accuracy acc = my_first_svc.score(X_train, y_train) # calculate the model precision, recall and f1 in one report cr = classification_report(y_true=y_train, y_pred = y_pred) # get a table to help us break down these scores cm = confusion_matrix(y_true=y_train, y_pred = y_pred) # print results print('accuracy:', acc) print(cr) print(cm) # plot confusion matrix (training data) cmdf = pd.DataFrame(cm, index = ['Control','Encoding'], columns = ['Control','Encoding']) sns.heatmap(cmdf, cmap = 'RdBu_r') plt.xlabel('Predicted') plt.ylabel('Observed') # label cells in matrix for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j+0.5, i+0.5, format(cm[i, j], 'd'), horizontalalignment="center", color="white") # + #set up cross-validation to evaluate model performance #within 10 folds of training set # predict y_pred = cross_val_predict(my_first_svc, X_train, y_train, groups=y_train, cv=10) # scores acc = cross_val_score(my_first_svc, X_train, y_train, groups=y_train, cv=10) #Look at accuracy of prediction for each fold of the cross-validation for i in range(10): print('Fold %s -- Acc = %s'%(i, acc[i])) # + #look at the overall accuracy of the model overall_acc = accuracy_score(y_pred = y_pred, y_true = y_train) overall_cr = classification_report(y_pred = y_pred, y_true = y_train) overall_cm = confusion_matrix(y_pred = y_pred, y_true = y_train) print('Accuracy:',overall_acc) print(overall_cr) thresh = overall_cm.max() / 2 cmdf = pd.DataFrame(overall_cm, index = ['CTL','Enc'], columns = ['CTL','Enc']) sns.heatmap(cmdf, cmap='copper') plt.xlabel('Predicted') plt.ylabel('Observed') for i, j in itertools.product(range(overall_cm.shape[0]), range(overall_cm.shape[1])): plt.text(j+0.5, i+0.5, format(overall_cm[i, j], 'd'), horizontalalignment="center", color="white") # + #Scale the training data scaler = MinMaxScaler().fit(X_train) X_train_scl = scaler.transform(X_train) plt.imshow(X_train, aspect='auto') plt.colorbar() plt.title('Training Data') plt.xlabel('features') plt.ylabel('subjects') # - plt.imshow(X_train_scl, aspect='auto') plt.colorbar() plt.title('Scaled Training Data') plt.xlabel('features') plt.ylabel('subjects') # + #Repeat steps with scaled data # predict y_pred = cross_val_predict(my_first_svc, X_train_scl, y_train, groups=y_train, cv=10) # get scores overall_acc = accuracy_score(y_pred = y_pred, y_true = y_train) overall_cr = classification_report(y_pred = y_pred, y_true = y_train) overall_cm = confusion_matrix(y_pred = y_pred, y_true = y_train) print('Accuracy:',overall_acc) print(overall_cr) # plot thresh = overall_cm.max() / 2 cmdf = pd.DataFrame(overall_cm, index = ['Control','Encoding'], columns = ['Control','Encoding']) sns.heatmap(cmdf, cmap='copper') plt.xlabel('Predicted') plt.ylabel('Observed') for i, j in itertools.product(range(overall_cm.shape[0]), range(overall_cm.shape[1])): plt.text(j+0.5, i+0.5, format(overall_cm[i, j], 'd'), horizontalalignment="center", color="white") # + # Test model on unseen data from the test set # Scaled # Use the Scaler that was fit to X_train and apply to X_test, # rather than creating a new Scaler for X_test # X_test_scl = scaler.transform(X_test) # my_first_svc.fit(X_train_scl, y_train) # fit to training data # y_pred = my_first_svc.predict(X_test_scl) # classify age class using testing data # acc = my_first_svc.score(X_test_scl, y_test) # get accuracy # Unscaled my_first_svc.fit(X_train, y_train) y_pred = my_first_svc.predict(X_test) # classify age class using testing data acc = my_first_svc.score(X_test, y_test) # get accuracy cr = classification_report(y_pred=y_pred, y_true=y_test) # get prec., recall & f1 cm = confusion_matrix(y_pred=y_pred, y_true=y_test) # get confusion matrix # print results print('accuracy =', acc) print(cr) # plot results thresh = cm.max() / 2 cmdf = pd.DataFrame(cm, index = ['Control','Encoding'], columns = ['Control','Encoding']) sns.heatmap(cmdf, cmap='RdBu_r') plt.xlabel('Predicted') plt.ylabel('Observed') for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j+0.5, i+0.5, format(cm[i, j], 'd'), horizontalalignment="center", color="white") # + #Visualize model weights coef_ = my_first_svc.coef_ print(coef_.shape) #Return voxel weights into a nifti image using the NiftiMasker coef_img = masker.inverse_transform(coef_) print(coef_img) #Save .nii to file outdir = '/Users/mombot/Documents/Simexp/CIMAQ/Data/Nilearn/coefficient_maps' coef_img.to_filename(os.path.join(outdir, 'svm_coeff.nii')) #Plot on anatomical template plot_stat_map(stat_map_img=coef_img, bg_img=anat, cut_coords=(-2, -7, -7), threshold=0.0001, colorbar=True) # -
models/.ipynb_checkpoints/CIMAQ_withinSub_wholeBrain_SVM-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="breeding-extra" import PIL.Image import io from io import StringIO, BytesIO import IPython.display import numpy import ipywidgets from tha2.util import extract_PIL_image_from_filelike, resize_PIL_image, extract_pytorch_image_from_PIL_image, convert_output_image_from_torch_to_numpy import tha2.poser.modes.mode_20 import time import threading import torch FRAME_RATE = 30.0 DEVICE_NAME = 'cuda' device = torch.device(DEVICE_NAME) last_torch_input_image = None torch_input_image = None def show_pytorch_image(pytorch_image, output_widget=None): output_image = pytorch_image.detach().cpu() numpy_image = numpy.uint8(numpy.rint(convert_output_image_from_torch_to_numpy(output_image) * 255.0)) pil_image = PIL.Image.fromarray(numpy_image, mode='RGBA') IPython.display.display(pil_image) input_image_widget = ipywidgets.Output( layout={ 'border': '1px solid black', 'width': '256px', 'height': '256px' }) upload_input_image_button = ipywidgets.FileUpload( accept='.png', multiple=False, layout={ 'width': '256px' } ) output_image_widget = ipywidgets.Output( layout={ 'border': '1px solid black', 'width': '256px', 'height': '256px' } ) eyebrow_dropdown = ipywidgets.Dropdown( options=["troubled", "angry", "lowered", "raised", "happy", "serious"], value="troubled", description="Eyebrow:", ) eyebrow_left_slider = ipywidgets.FloatSlider( value=0.0, min=0.0, max=1.0, step=0.01, description="Left:", readout=True, readout_format=".2f" ) eyebrow_right_slider = ipywidgets.FloatSlider( value=0.0, min=0.0, max=1.0, step=0.01, description="Right:", readout=True, readout_format=".2f" ) eye_dropdown = ipywidgets.Dropdown( options=["wink", "happy_wink", "surprised", "relaxed", "unimpressed", "raised_lower_eyelid"], value="wink", description="Eye:", ) eye_left_slider = ipywidgets.FloatSlider( value=0.0, min=0.0, max=1.0, step=0.01, description="Left:", readout=True, readout_format=".2f" ) eye_right_slider = ipywidgets.FloatSlider( value=0.0, min=0.0, max=1.0, step=0.01, description="Right:", readout=True, readout_format=".2f" ) mouth_dropdown = ipywidgets.Dropdown( options=["aaa", "iii", "uuu", "eee", "ooo", "delta", "lowered_corner", "raised_corner", "smirk"], value="aaa", description="Mouth:", ) mouth_left_slider = ipywidgets.FloatSlider( value=0.0, min=0.0, max=1.0, step=0.01, description="Value:", readout=True, readout_format=".2f" ) mouth_right_slider = ipywidgets.FloatSlider( value=0.0, min=0.0, max=1.0, step=0.01, description=" ", readout=True, readout_format=".2f", disabled=True, ) def update_mouth_sliders(change): if mouth_dropdown.value == "lowered_corner" or mouth_dropdown.value == "raised_corner": mouth_left_slider.description = "Left:" mouth_right_slider.description = "Right:" mouth_right_slider.disabled = False else: mouth_left_slider.description = "Value:" mouth_right_slider.description = " " mouth_right_slider.disabled = True mouth_dropdown.observe(update_mouth_sliders, names='value') iris_small_left_slider = ipywidgets.FloatSlider( value=0.0, min=0.0, max=1.0, step=0.01, description="Left:", readout=True, readout_format=".2f" ) iris_small_right_slider = ipywidgets.FloatSlider( value=0.0, min=0.0, max=1.0, step=0.01, description="Right:", readout=True, readout_format=".2f", ) iris_rotation_x_slider = ipywidgets.FloatSlider( value=0.0, min=-1.0, max=1.0, step=0.01, description="X-axis:", readout=True, readout_format=".2f" ) iris_rotation_y_slider = ipywidgets.FloatSlider( value=0.0, min=-1.0, max=1.0, step=0.01, description="Y-axis:", readout=True, readout_format=".2f", ) head_x_slider = ipywidgets.FloatSlider( value=0.0, min=-1.0, max=1.0, step=0.01, description="X-axis:", readout=True, readout_format=".2f" ) head_y_slider = ipywidgets.FloatSlider( value=0.0, min=-1.0, max=1.0, step=0.01, description="Y-axis:", readout=True, readout_format=".2f", ) neck_z_slider = ipywidgets.FloatSlider( value=0.0, min=-1.0, max=1.0, step=0.01, description="Z-axis:", readout=True, readout_format=".2f", ) control_panel = ipywidgets.VBox([ eyebrow_dropdown, eyebrow_left_slider, eyebrow_right_slider, ipywidgets.HTML(value="<hr>"), eye_dropdown, eye_left_slider, eye_right_slider, ipywidgets.HTML(value="<hr>"), mouth_dropdown, mouth_left_slider, mouth_right_slider, ipywidgets.HTML(value="<hr>"), ipywidgets.HTML(value="<center><b>Iris Shrinkage</b></center>"), iris_small_left_slider, iris_small_right_slider, ipywidgets.HTML(value="<center><b>Iris Rotation</b></center>"), iris_rotation_x_slider, iris_rotation_y_slider, ipywidgets.HTML(value="<hr>"), ipywidgets.HTML(value="<center><b>Head Rotation</b></center>"), head_x_slider, head_y_slider, neck_z_slider, ]) controls = ipywidgets.HBox([ ipywidgets.VBox([ input_image_widget, upload_input_image_button ]), control_panel, ipywidgets.HTML(value="&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"), output_image_widget, ]) poser = tha2.poser.modes.mode_20.create_poser(device) pose_parameters = tha2.poser.modes.mode_20.get_pose_parameters() pose_size = poser.get_num_parameters() last_pose = torch.zeros(1, pose_size).to(device) iris_small_left_index = pose_parameters.get_parameter_index("iris_small_left") iris_small_right_index = pose_parameters.get_parameter_index("iris_small_right") iris_rotation_x_index = pose_parameters.get_parameter_index("iris_rotation_x") iris_rotation_y_index = pose_parameters.get_parameter_index("iris_rotation_y") head_x_index = pose_parameters.get_parameter_index("head_x") head_y_index = pose_parameters.get_parameter_index("head_y") neck_z_index = pose_parameters.get_parameter_index("neck_z") def get_pose(): pose = torch.zeros(1, pose_size) eyebrow_name = f"eyebrow_{eyebrow_dropdown.value}" eyebrow_left_index = pose_parameters.get_parameter_index(f"{eyebrow_name}_left") eyebrow_right_index = pose_parameters.get_parameter_index(f"{eyebrow_name}_right") pose[0, eyebrow_left_index] = eyebrow_left_slider.value pose[0, eyebrow_right_index] = eyebrow_right_slider.value eye_name = f"eye_{eye_dropdown.value}" eye_left_index = pose_parameters.get_parameter_index(f"{eye_name}_left") eye_right_index = pose_parameters.get_parameter_index(f"{eye_name}_right") pose[0, eye_left_index] = eye_left_slider.value pose[0, eye_right_index] = eye_right_slider.value mouth_name = f"mouth_{mouth_dropdown.value}" if mouth_name == "mouth_lowered_cornered" or mouth_name == "mouth_raised_corner": mouth_left_index = pose_parameters.get_parameter_index(f"{mouth_name}_left") mouth_right_index = pose_parameters.get_parameter_index(f"{mouth_name}_right") pose[0, mouth_left_index] = mouth_left_slider.value pose[0, mouth_right_index] = mouth_right_slider.value else: mouth_index = pose_parameters.get_parameter_index(mouth_name) pose[0, mouth_index] = mouth_left_slider.value pose[0, iris_small_left_index] = iris_small_left_slider.value pose[0, iris_small_right_index] = iris_small_right_slider.value pose[0, iris_rotation_x_index] = iris_rotation_x_slider.value pose[0, iris_rotation_y_index] = iris_rotation_y_slider.value pose[0, head_x_index] = head_x_slider.value pose[0, head_y_index] = head_y_slider.value pose[0, neck_z_index] = neck_z_slider.value return pose.to(device) display(controls) def update(change): global last_pose global last_torch_input_image if torch_input_image is None: return needs_update = False if last_torch_input_image is None: needs_update = True else: if (torch_input_image - last_torch_input_image).abs().max().item() > 0: needs_update = True pose = get_pose() if (pose - last_pose).abs().max().item() > 0: needs_update = True if not needs_update: return output_image = poser.pose(torch_input_image, pose)[0] with output_image_widget: output_image_widget.clear_output(wait=True) show_pytorch_image(output_image, output_image_widget) last_torch_input_image = torch_input_image last_pose = pose def upload_image(change): global torch_input_image for name, file_info in upload_input_image_button.value.items(): content = io.BytesIO(file_info['content']) if content is not None: pil_image = resize_PIL_image(extract_PIL_image_from_filelike(content)) w, h = pil_image.size if pil_image.mode != 'RGBA': with input_image_widget: input_image_widget.clear_output(wait=True) display(ipywidgets.HTML("Image must have an alpha channel!!!")) else: torch_input_image = extract_pytorch_image_from_PIL_image(pil_image).to(device) with input_image_widget: input_image_widget.clear_output(wait=True) show_pytorch_image(torch_input_image, input_image_widget) update(None) upload_input_image_button.observe(upload_image, names='value') eyebrow_dropdown.observe(update, 'value') eyebrow_left_slider.observe(update, 'value') eyebrow_right_slider.observe(update, 'value') eye_dropdown.observe(update, 'value') eye_left_slider.observe(update, 'value') eye_right_slider.observe(update, 'value') mouth_dropdown.observe(update, 'value') mouth_left_slider.observe(update, 'value') mouth_right_slider.observe(update, 'value') iris_small_left_slider.observe(update, 'value') iris_small_right_slider.observe(update, 'value') iris_rotation_x_slider.observe(update, 'value') iris_rotation_y_slider.observe(update, 'value') head_x_slider.observe(update, 'value') head_y_slider.observe(update, 'value') neck_z_slider.observe(update, 'value')
tha2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def model_hyperparam_search(layers, activation_functions=['tanh', 'softmax', 'relu']): iterations = len(activation_functions)**layers af_combs = make_pairwise_list(max_depth=layers, options=activation_functions) print(f'{layers}\t{activation_functions}\t{iterations} iterations required') for iteration in range(iterations): print(f"running interation {iteration}") print("create input layer") for layer in range(layers): print(f"create hidden layer {layer} of type {af_combs[iteration][layer]}") print("create output layer") print("") def layer_search(hidden_layers=[1,3,5], activation_functions=None): for layer_count in hidden_layers: if not activation_functions: model_hyperparam_search(layer_count) else: model_hyperparam_search(layer_count, activation_functions) # - layer_search() def make_pairwise_list(max_depth=2, options=['tanh', 'softmax', 'relu']): combinations = [] for i in range(len(options)**max_depth): state = [] for depth in range(max_depth): if depth == 0: #print(f"{i:4}: {options[i // len(options)**(max_depth-1)]}", end=' ') state.append(options[i // len(options)**(max_depth-1)]) elif depth == max_depth - 1: #print(f"{options[i % len(options)]}", end=' ') state.append(options[i % len(options)]) else: #print(f"{options[i // len(options)**(depth) % len(options)]}", end=' ') state.append(options[i // len(options)**(depth) % len(options)]) #print("") combinations.append(state) return combinations make_pairwise_list(max_depth=5) 3**5 l = [] l.append(3) l.append('foo') l[3] for i in range(8): print(i) def do_not_use_example(max_depth=2, options=['tanh', 'softmax', 'relu']): state = [] for i in range(max_depth): state.append(0) # first entry in options for i in range(len(options)**max_depth): print(f"{i}") #for depth in range(max_depth): #print(f"{depth} : {options[i % (max_depth+1) * depth]}") #print(f"{depth} : {options[i % len(options)**(depth+1)]}") depth = 1 print(f" {i // len(options)**(max_depth-1)}") print(f" {i // len(options)**(depth) % len(options)}") print(f" {i % len(options)}") def make_pairwise_list_old(max_depth=2, options=['tanh', 'softmax', 'relu']): state = [] for i in range(max_depth): state.append(0) # first entry in options for i in range(len(options)**max_depth): for depth in range(max_depth): if depth == 0: print(f"{i:4}: {i // len(options)**(max_depth-1)}", end=' ') elif depth == max_depth - 1: print(f"{i % len(options)}", end=' ') else: print(f"{i // len(options)**(depth) % len(options)}", end=' ') print("")
prototype/synthetic_data/.ipynb_checkpoints/hyper parameter search playtest-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # [🥭 Mango Markets](https://mango.markets/) support is available at: [Docs](https://docs.mango.markets/) | [Discord](https://discord.gg/67jySBhxrg) | [Twitter](https://twitter.com/mangomarkets) | [Github](https://github.com/blockworks-foundation) | [Email](mailto:<EMAIL>) # # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/blockworks-foundation/mango-explorer-examples/HEAD?labpath=PracticalRunMarketmakerSpot.ipynb) [Run this code](https://mybinder.org/v2/gh/blockworks-foundation/mango-explorer-examples/HEAD?labpath=PracticalRunMarketmakerSpot.ipynb) on Binder. # # _🏃‍♀️ To run this notebook press the ⏩ icon in the toolbar above._ # # 🥭 Practical: Run Marketmaker On Spot # # This notebook shows is very similar to the regular [Practical: Run Marketmaker example](https://mybinder.org/v2/gh/blockworks-foundation/mango-explorer-examples/HEAD?labpath=PracticalRunMarketmaker.ipynb). The only difference is this one runs on a Spot market instead of a Perp market. # # It contains the following sections: # 1. This section contains all the code that is identical to the [Practical: Run Marketmaker example](https://mybinder.org/v2/gh/blockworks-foundation/mango-explorer-examples/HEAD?labpath=PracticalRunMarketmaker.ipynb). # 2. This section contains the code that is new to this notebook, specifically for working with a Spot market. It contains one method - `build_spot_model_state()` - which loads Spot market data. # 3. This final section runs the marketmaker. # ## 1. Identical Code # # This code is all taken from the [Practical: Run Marketmaker example](https://mybinder.org/v2/gh/blockworks-foundation/mango-explorer-examples/HEAD?labpath=PracticalRunMarketmaker.ipynb) with no changes. If you followed along with that Practical then you have already seen all this code. # + import logging import mango import mango.marketmaking import time from datetime import timedelta from decimal import Decimal from mango.marketmaking.orderchain.chain import Chain from mango.marketmaking.orderchain.biasquoteonpositionelement import BiasQuoteOnPositionElement from mango.marketmaking.orderchain.ratioselement import RatiosElement from mango.marketmaking.orderchain.roundtolotsizeelement import RoundToLotSizeElement # Set logging so messages show up. logging.getLogger().setLevel(logging.INFO) def build_context(): return mango.ContextBuilder.build(cluster_name="devnet") def build_order_chain(): order_type = mango.OrderType.POST_ONLY spread = Decimal("0.005") # 0.5% position_size = Decimal("0.05") # 5% bias = [Decimal("0.0001")] ratios_element = RatiosElement(order_type, None, 20, [spread], [position_size], False) bias_element = BiasQuoteOnPositionElement(bias) round_to_lot_size_element = RoundToLotSizeElement() chain = Chain([ratios_element, bias_element, round_to_lot_size_element]) return chain def build_order_reconciler(): return mango.marketmaking.ToleranceOrderReconciler(Decimal("0.001"), Decimal("0.001"), timedelta(seconds=5)) def build_oracle(context, market): oracle_source_name = "pyth" oracle_provider = mango.create_oracle_provider(context, oracle_source_name) oracle = oracle_provider.oracle_for_market(context, market) if oracle is None: raise Exception(f"Could not find oracle for market {market.symbol} from provider {oracle_source_name}.") return oracle def build_marketmaker(context, wallet, account, market): chain = build_order_chain() order_reconciler = build_order_reconciler() instruction_builder = mango.instruction_builder(context, wallet, account, market.symbol, False) market_maker = mango.marketmaking.MarketMaker(wallet, market, instruction_builder, chain, order_reconciler, Decimal(0)) return market_maker # - # ## 2. Build the Spot `ModelState` # # In the previous [Practical: Run Marketmaker example](https://mybinder.org/v2/gh/blockworks-foundation/mango-explorer-examples/HEAD?labpath=PracticalRunMarketmaker.ipynb) the `build_model_state()` function handled building the `ModelState` for a Perp market. # # There are some important differences between Spot markets and Perp markets, so this section defines a `build_spot_model_state()` function to perform the equivalent actions to build the `ModelState` for a Spot market. def build_spot_model_state(context, account, market, oracle, group_address, cache_address): # Build a list of addresses and use `load_multiple()` to fetch them all in one go. addresses = [ group_address, cache_address, account.address, market.bids_address, market.asks_address, market.event_queue_address, *list([oo for oo in account.spot_open_orders if oo is not None]) ] account_infos = mango.AccountInfo.load_multiple(context, addresses) group = mango.Group.parse_with_context(context, account_infos[0]) cache = mango.Cache.parse(account_infos[1]) account = mango.Account.parse(account_infos[2], group, cache) spot_open_orders_account_infos_by_address = { str(account_info.address): account_info for account_info in account_infos[6:]} all_open_orders = {} for slot in account.slots: if slot.spot_open_orders is not None and str(slot.spot_open_orders) in spot_open_orders_account_infos_by_address: account_info: mango.AccountInfo = spot_open_orders_account_infos_by_address[str(slot.spot_open_orders)] open_orders: mango.OpenOrders = mango.OpenOrders.parse( account_info, slot.base_token_bank.token, account.shared_quote_token) all_open_orders[str(slot.spot_open_orders)] = open_orders group_slot = group.slot_by_spot_market_address(market.address) open_orders_address = account.spot_open_orders_by_index[group_slot.index] placed_orders_container: mango.PlacedOrdersContainer = all_open_orders[str(open_orders_address)] # Spot markets don't accrue MNGO liquidity incentives mngo = mango.token(context, "MNGO") mngo_accrued = mango.InstrumentValue(mngo, Decimal(0)) base_value = mango.InstrumentValue.find_by_symbol(account.net_values, market.base.symbol) quote_value = account.shared_quote.net_value event_queue = mango.SerumEventQueue.parse(account_infos[5]) frame = account.to_dataframe(group, all_open_orders, cache) available_collateral = account.init_health(frame) inventory = mango.Inventory(mango.InventorySource.ACCOUNT, mngo_accrued, available_collateral, base_value, quote_value) orderbook = market.parse_account_infos_to_orderbook(account_infos[3], account_infos[4]) price = oracle.fetch_price(context) return mango.ModelState(open_orders_address, market, mango.ManualUpdateWatcher(group), mango.ManualUpdateWatcher(account), mango.ManualUpdateWatcher(price), mango.ManualUpdateWatcher(placed_orders_container), mango.ManualUpdateWatcher(inventory), mango.ManualUpdateWatcher(orderbook), mango.ManualUpdateWatcher(event_queue)) # ## 8. Run the `MarketMaker` # # This code is also _nearly_ identical to the [Practical: Run Marketmaker example](https://mybinder.org/v2/gh/blockworks-foundation/mango-explorer-examples/HEAD?labpath=PracticalRunMarketmaker.ipynb). The differences are: # # * It loads market SOL/USDC instead of SOL-PERP # * It uses a `SpotCollateralCalculator` instead of a `PerpCollateralCalculator` # * It calls our new `build_spot_model_state()` to build the `ModelState` # # ### Caveat # # Please bear in mind that the code below uses a shared Solana `Keypair` and a shared Mango `Account`. If you're going to run this example more than once or twice, it might be better for you to set up a fresh devnet `Keypair` and `Account` for you to use - it's easy and free, and it means other people running this example won't accidentally cancel your orders or cause confusion. # + # Use our hard-coded devnet wallet for DeekipCw5jz7UgQbtUbHQckTYGKXWaPQV4xY93DaiM6h. # For real-world use you'd load the bytes from the environment or a file. wallet = mango.Wallet(bytes([67,218,68,118,140,171,228,222,8,29,48,61,255,114,49,226,239,89,151,110,29,136,149,118,97,189,163,8,23,88,246,35,187,241,107,226,47,155,40,162,3,222,98,203,176,230,34,49,45,8,253,77,136,241,34,4,80,227,234,174,103,11,124,146])) # Specify the market we're going to use market_symbol = "SOL/USDC" # Configure how long to pause between pulses pause_seconds = 30 # Create a 'devnet' Context context = build_context() # Load the wallet's account group = mango.Group.load(context) accounts = mango.Account.load_all_for_owner(context, wallet.address, group) account = accounts[0] # Load the market market = mango.market(context, market_symbol) oracle = build_oracle(context, market) marketmaker = build_marketmaker(context, wallet, account, market) stop_requested = False while not stop_requested: try: model_state = build_spot_model_state(context, account, market, oracle, group.address, group.cache) marketmaker.pulse(context, model_state) # Wait and hope for fills. print(f"Pausing for {pause_seconds} seconds.\n") time.sleep(pause_seconds) except KeyboardInterrupt: stop_requested = True except Exception as exception: print(f"Continuing after problem running market-making iteration: {exception}") market_operations = mango.operations(context, wallet, account, market.symbol, dry_run=False) for order in market_operations.load_my_orders(): market_operations.cancel_order(order, ok_if_missing=True) context.dispose() print("Example complete")
PracticalRunMarketmakerSpot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="T38hrerpErjZ" # ## Imports # + colab={} colab_type="code" id="ZXanaT_NErjc" outputId="52d11e59-a07a-4f87-f1d2-aff667eab719" # %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from scipy.spatial.distance import cdist # + from keras.models import Sequential from keras.layers import Dense, GRU, Embedding,LSTM from keras.optimizers import Adam from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import imdb # - imdb.maybe_download_and_extract() # + colab={} colab_type="code" id="qjkKqD0ZErkr" x_train_text, y_train = imdb.load_data(train=True) x_test_text, y_test = imdb.load_data(train=False) # + colab={} colab_type="code" id="oGnmAL9KErkw" outputId="e5e4654c-3d63-45a8-ecb6-05540190c9f9" print("Train-set size: ", len(x_train_text)) print("Test-set size: ", len(x_test_text)) # + colab={} colab_type="code" id="CPfric20Erk8" data_text = x_train_text + x_test_text # + colab={} colab_type="code" id="OkmXdmwNErlF" outputId="a3826def-e216-4da2-a965-f13f09482bfd" x_train_text[1] # + colab={} colab_type="code" id="aWZwW7VTErlV" outputId="52b1032a-6ab1-4374-a76a-8935a22d1eb3" y_train[1] # + colab={} colab_type="code" id="NESSYIENErlh" num_words = 10000 # + colab={} colab_type="code" id="fdW8mPjHErln" tokenizer = Tokenizer(num_words=num_words) # + colab={} colab_type="code" id="JK1PKKFqErlu" outputId="edd91066-8264-443c-c840-4f622b69d820" # %%time tokenizer.fit_on_texts(data_text) # + colab={} colab_type="code" id="fVGgdsFUErmA" outputId="a3cac7af-42bb-4798-aa51-27f9a21869c5" tokenizer.word_index # + colab={} colab_type="code" id="Mztk1-WhErmN" x_train_tokens = tokenizer.texts_to_sequences(x_train_text) # + colab={} colab_type="code" id="CoG5RqTOErmX" outputId="827ff97f-144f-476a-b4c3-757a81184c4c" x_train_text[1] # + colab={} colab_type="code" id="RsAn07c5Ermi" outputId="2e80c095-67f3-4dd0-f205-1b0afceb112d" np.array(x_train_tokens[1]) # + colab={} colab_type="code" id="HfJq6TlkErms" x_test_tokens = tokenizer.texts_to_sequences(x_test_text) # + colab={} colab_type="code" id="8M6qOOF6Ermx" num_tokens = [len(tokens) for tokens in x_train_tokens + x_test_tokens] num_tokens = np.array(num_tokens) # + colab={} colab_type="code" id="LDO-7jrYErm3" outputId="2d42f001-e38b-443f-d56a-0b392e35dd94" np.mean(num_tokens) # + colab={} colab_type="code" id="YSv5fZdvErm-" outputId="1565e3c0-f7dd-4812-b0d6-b69fe356965a" np.max(num_tokens) # + colab={} colab_type="code" id="LReoSdCpErnE" outputId="1308f52d-7861-4d3b-98d8-1ba04b8f9a02" max_tokens = np.mean(num_tokens) + 2 * np.std(num_tokens) max_tokens = int(max_tokens) max_tokens # + colab={} colab_type="code" id="_Tmr1gp2ErnM" outputId="c4699271-ed30-42d4-cf6c-8389c3685946" np.sum(num_tokens < max_tokens) / len(num_tokens) # + colab={} colab_type="code" id="pGmUKDAcErnW" pad = 'pre' # + colab={} colab_type="code" id="Vntz5mtdErng" x_train_pad = pad_sequences(x_train_tokens, maxlen=max_tokens, padding=pad, truncating=pad) # + colab={} colab_type="code" id="uDDMC0o3Ernl" x_test_pad = pad_sequences(x_test_tokens, maxlen=max_tokens, padding=pad, truncating=pad) # + colab={} colab_type="code" id="tdzEJH4HErnt" outputId="95079dfe-8493-4d23-fc33-a03936d77c12" x_train_pad.shape # + colab={} colab_type="code" id="lWs9qmFrErn1" outputId="9b2d910e-cae9-488f-9488-cb913a9befbf" x_test_pad.shape # + colab={} colab_type="code" id="p8quYteKErn_" outputId="48bfbcff-9862-4c4c-e7a5-746db00a8fbf" np.array(x_train_tokens[1]) # + colab={} colab_type="code" id="3BmbpeLuEroH" outputId="97c40ed2-1bad-433b-9598-ba8d56dba91d" x_train_pad[1] # + colab={} colab_type="code" id="-ZUM3gBnEroR" idx = tokenizer.word_index inverse_map = dict(zip(idx.values(), idx.keys())) # + colab={} colab_type="code" id="of7ufqzUEroY" def tokens_to_string(tokens): words = [inverse_map[token] for token in tokens if token != 0] text = " ".join(words) return text # + colab={} colab_type="code" id="hERApKXuErol" outputId="c6c7cb52-10f7-4a03-d587-d14bf15705db" x_train_text[1] # + colab={} colab_type="code" id="jDMxEB0PErot" outputId="e9f07cf4-bd72-4412-8949-e8504c217a2a" tokens_to_string(x_train_tokens[1]) # + colab={} colab_type="code" id="tADzVVW7Ero2" model = Sequential() # + colab={} colab_type="code" id="QDc9llG_ErpA" embedding_size = 8 # + colab={} colab_type="code" id="G_DkycySErpH" model.add(Embedding(input_dim=num_words, output_dim=embedding_size, input_length=max_tokens, name='layer_embedding')) # + colab={} colab_type="code" id="oB-SQXzwErpK" outputId="ce78d14e-92a2-4b52-bc45-95c16eef68c1" model.add(LSTM(units=16, return_sequences=True)) # + colab={} colab_type="code" id="Ag454EJIErpR" model.add(LSTM(units=8, return_sequences=True)) # + colab={} colab_type="code" id="ObyTYgH2ErpY" model.add(LSTM(units=4)) # + colab={} colab_type="code" id="zdpuajPLErpa" model.add(Dense(1, activation='sigmoid')) # + colab={} colab_type="code" id="dJCcLWuMErpe" optimizer = Adam(lr=1e-3) # + colab={} colab_type="code" id="edclRAJyErpn" outputId="d121b9d8-3fd8-4a01-9dcc-4aa20aa96cc0" model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # + colab={} colab_type="code" id="Juk3qPOhErpu" outputId="7d74b15a-9a4d-45dd-aefc-6d974a5f9e60" model.summary() # + colab={} colab_type="code" id="sQIWIvfCErpz" outputId="26897b0a-4843-4d00-fa14-c719be65c6c8" # %%time model.fit(x_train_pad, y_train, validation_split=0.05, epochs=1, batch_size=64) # + colab={} colab_type="code" id="jN3E9TOGErp2" outputId="c07fbd7a-7f89-4c63-a634-f6601b53b91c" # %%time result = model.evaluate(x_test_pad, y_test) # + colab={} colab_type="code" id="dMUrGwYdErp8" outputId="264f7de7-f9b2-4dd5-97d0-6ab5d185bcb1" print("Accuracy: {0:.2%}".format(result[1])) # + [markdown] colab_type="text" id="leteUZaSErqC" # ## Example of Mis-Classified Text # + colab={} colab_type="code" id="0wzSrjt5ErqC" outputId="8121bf5a-f4c0-4978-ed05-46a9cd9e71e1" # %%time y_pred = model.predict(x=x_test_pad[0:1000]) y_pred = y_pred.T[0] # + [markdown] colab_type="text" id="f_-i3_XHErqG" # These predicted numbers would be between 0.0 and 1.0. So we set a threshold value of 0.5,and say that all values above 0.5 are taken to be 1.0 and all values below 0.5 are taken to be 0.0. This gives us predicted "class" of either 0.0 or 1.0. # + colab={} colab_type="code" id="w8knCkGzErqK" cls_pred = np.array([1.0 if p>0.5 else 0.0 for p in y_pred]) # + colab={} colab_type="code" id="D5tDGWatErqO" cls_true = np.array(y_test[0:1000]) # + colab={} colab_type="code" id="BZwta8cAErqS" incorrect = np.where(cls_pred != cls_true) incorrect = incorrect[0] # + colab={} colab_type="code" id="otik73fIErqX" outputId="c6824166-4bd4-441e-e290-4dad388285f2" len(incorrect) # + colab={} colab_type="code" id="G-Ap_4k5Erqe" outputId="a76502c4-cab4-4c05-d1f2-010a6b12a668" idx = incorrect[0] idx # + [markdown] colab_type="text" id="Q3eLtei9Erqi" # The mis-classified text is: # + colab={} colab_type="code" id="qByrEz5zErqk" outputId="eb20d174-0d2b-4fee-bc7e-d5599108ee49" text = x_test_text[idx] text # + colab={} colab_type="code" id="WbP0aeHTErqr" outputId="5421a5b1-36be-47a9-aad6-9160a0228536" y_pred[idx] # + colab={} colab_type="code" id="gGVHbIUmErq0" outputId="7b9ca1e3-6668-4705-e4ac-7d1d0823e81e" cls_true[idx] # - # ## I have used LSTM layers,but you experiment with GRU layers too.With training for more epochs and increasing the dimensions of the recurrent layers accuracy can be improved # + [markdown] colab_type="text" id="uWnyKL_3ErrJ" # ## To gain more insights explore the embedding matrix # ## Don't forget to try with your own reviews
Sentiment Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # ## zeros((row, column)) x = np.zeros((3,2), dtype=int) print(x) print(x.dtype) # ## ones((row, column)) x = np.ones((3,2)) print(x) # ## Identity matrix # np.eye(n) x = np.eye(4) print(x) # ## arange(start, end, step) # if no end or step arugment is specified then first arugment will be end parameter and step will be 1. Though `arange` be used to floating point value but better solution is `linespace` x = np.arange(10) print(x) # ## linespace(start, end, step) x = np.linspace(4, 10.5, 25, endpoint=False) print(x) # ## reshape(ndarray, (row, column)) x = np.arange(20) x = np.reshape(x, (4,5)) print(x) x = np.arange(25).reshape(5,5) print(x) # ## random.random(row, column) # return a matrix with random value between 0 and 1 x = np.random.random((3,3)) print(x) # ## random.randint(start, end, (row, column)) # return a matrix with random integer value x = np.random.randint(4,10, (4,4)) print(x) # hello world
Jupyter-Notebook/Numpy/Using Built-in Functions to Create ndarrays.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from bs4 import BeautifulSoup as soup from urllib.request import urlopen as ureq from selenium import webdriver import time import re url = 'https://ucalgary.ca/pubs/calendar/current/en-4-1.html' # + chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--ignore-certificate-errors') chrome_options.add_argument('--incognito') chrome_options.add_argument('--headless') driver = webdriver.Chrome("C:\\Users\\jerry\\Downloads\\chromedriver", options=chrome_options) # - driver.get(url) time.sleep(3) # # 1. Collect Course links for driver to click on page_soup = soup(driver.page_source, 'lxml') container = page_soup.find("span", {"id": "ctl00_ctl00_pageContent_ctl00_ctl02_cnBody"}) container link_texts = container.findAll("a", {"class": "link-text"}) link_texts # # 2. Test run - try to scrape the first course pattern = re.compile("[A-Za-z ]*[0-9]{3}") # + link = driver.find_element_by_partial_link_text(link_text[0].text) link.click() driver.current_url # - # ### will need to extract the table id from the link to identify the target course's container type(driver.current_url) table_id = re.search("[0-9]{5}", driver.current_url) table_id = table_id.group() table_id page_soup = soup(driver.page_source, 'lxml') container = page_soup.find("a", {"name": table_id}) container.parent # + container = container.parent course_info = container.findAll("span", {"class": "course-code"}) course_code = course_info[0].text + " " + course_info[1].text course_name = course_info[2].text print(course_code) print(course_name) # - course_desc = container.find("span", {"class": "course-desc"}).text course_desc driver.back() driver.current_url # + driver.get("https://ucalgary.ca/pubs/calendar/current/en-4-1.html") link = driver.find_element_by_link_text(link_texts[1].text) link.click() page_soup = soup(driver.page_source, 'lxml') table_id = driver.current_url[-5:] container = page_soup.find("a", {"name": table_id}) driver.back() driver.current_url # - # # 3. Test run complete. Implement automation script to scrape all courses # + from selenium.common.exceptions import NoSuchElementException course_codes = [] course_names = [] course_descs = [] counter = 0 driver.get("https://ucalgary.ca/pubs/calendar/current/en-4-1.html") for link_text in link_texts: if bool(re.match(pattern, link_text.text)): #go to course page try: link = driver.find_element_by_link_text(link_text.text) except NoSuchElementException: print("no link for {}".format(link_text.text)) continue time.sleep(2) link.click() time.sleep(2) #scrape course info page_soup = soup(driver.page_source, 'lxml') #get the id of the table containing the specific course information starting_index = driver.current_url.index("#")+1 table_id = driver.current_url[starting_index:] #locate the container/table using the id container = page_soup.find("a", {"name": table_id}) container = container.parent course_info = container.findAll("span", {"class": "course-code"}) course_codes.append(course_info[0].text.strip() + " " + course_info[1].text.strip()) course_names.append(course_info[2].text.strip()) course_descs.append(container.find("span", {"class": "course-desc"}).text) print("Scraped ", course_codes[-1]) counter += 1 #go to course list page driver.back() time.sleep(2) print("Finished scraping {} courses".format(counter)) # - # # 4. Inspect and write to CSV course_codes course_names course_descs # + import pandas as pd df = pd.DataFrame({ "Course Number": course_codes, "Course Name": course_names, "Course Description": course_descs }) df.to_csv('UCalgary_Engineering_Common_First_Year_Courses.csv', index = False) # - driver.quit()
Web-Scraping Scripts and Data/Accredited Canadian English Undergrad MechEng Programs/UCalgary/WS_UCalgary_Engineering_Common_First_Year.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="b9918d88-edd6-44b5-8919-e7c24f94005a" tags=[] # # (study) 파이썬 입문 (13주차) 5월23일 # > 클래스 공부 6단계 # # - toc:true # - branch: master # - badges: true # - comments: false # - author: 최서연 # - categories: [Python, class, 사용자정의함수] # + [markdown] id="cc2db806-d941-4a46-a0d5-46433621532c" # ## 강의영상 # + [markdown] id="eee74c77-a438-4b4f-9216-38b1c7016437" # > youtube: https://youtube.com/playlist?list=PLQqh36zP38-xw_UHDXp5ZpYywkCxpN4SE # + [markdown] id="6e059796-2fac-4016-8f4d-140665866c91" # `-` (1/4) 인사관리 예제 (1) # # `-` (2/4) 인사관리 예제 (2) # # `-` (3/4) 리스트의 상속 # # `-` (4/4) 사용자정의 자료형의 유용함 # + [markdown] id="842d2d65-c3b6-495a-aca4-8c680c1fd8f1" # ## 클래스공부 6단계 # + [markdown] id="3efb395a-9882-4030-a00d-d246e8389240" # `-` 상속 # + [markdown] id="dbdcdb38-ce76-4e11-b961-d72cd89deb2a" # ### 인사관리 예제 # + [markdown] id="6f5ba495-4616-4961-86d5-795937591fd1" # `-` 아래와 같은 클래스를 만들자. # - 이름, 직급, 연봉에 대한 정보가 있다. # - 연봉을 올려주는 메소드가 존재함. # + id="a0c75b30-d56e-4009-9d5a-32ea3d6d2da4" class Employee: def __init__(self,name,position=None,pay=0): self.name = name self.position = position self.pay = pay def _repr_html_(self): html_str = """ 이름: {} <br/> 직급: {} <br/> 연봉: {} <br/> """.format(self.name,self.position,self.pay) return html_str def giveraise(self,pct): self.pay = self.pay * (1+pct) # + [markdown] id="dfbffc83-9a8e-4c30-ade5-a3da3290aa08" # `-` 확인 # + id="d95d671b-79d9-413d-b5d0-b87951df35b7" iu=Employee('iu',position='staff',pay=5000) hynn=Employee('hynn',position='staff',pay=4000) hd=Employee('hodong',position='mgr',pay=8000) # + id="09209c32-5c7d-414f-aa60-7dc6167b062d" outputId="d4bb0d33-3377-4b8f-c0d9-9a2e2e77a116" iu # + id="f5ae380c-d1be-419d-b24e-0d17d72d72d9" outputId="0477dd32-ac9a-4c68-91b8-0a2e4f3506e2" iu.giveraise(0.1) iu # + id="46b0d5f5-5da0-41b6-9dd3-b1963c38e0b1" outputId="9a955922-eddc-4a6b-96f9-ea51ecfba47b" hynn.giveraise(0.2) hynn # + [markdown] id="c0f2bb0d-b759-497f-8964-d5398b845dff" # `-` 회사의 모든 직원의 연봉을 10%씩 올려보자. # + id="a830ac20-a629-43b7-97e0-1f256f6a28b8" iu=Employee('iu',position='staff',pay=5000) hynn=Employee('hynn',position='staff',pay=4000) hd=Employee('hodong',position='mgr',pay=8000) # + id="bcd8303b-101a-48cc-a952-8b7c47f4dc38" for i in [iu, hynn, hd]: i.giveraise(0.1) # + id="83af4bca-785d-4251-8428-fa33d939d9dd" outputId="454b4dbc-9722-4f3c-87d6-50cc3a86f9ea" iu # + id="2c707924-d946-4bca-b91c-eff4c9b3d6fd" outputId="dd3c22dc-a8fc-4602-d152-a15b36b4473f" hynn # + id="b55e423d-3ecb-42d5-9eba-cbfa3a731265" outputId="cdd14c4c-a14e-4950-fdc7-05fbfdc591d2" hd # + [markdown] id="6f88fd31-6465-4805-9a3f-68081af1ec86" # `-` 매니저직급은 일반직원들의 상승분에서 5%의 보너스가 추가되어 상승한다고 가정하고 모든 직원의 연봉을 10%씩 올리는 코드를 구현해보자. # + [markdown] id="d271c537-317c-4a63-9884-e9ed5bd127a5" # (구현1) # + id="d01cf1cb-a271-4a6a-84a9-b40f66ca5e9d" iu=Employee('iu',position='staff',pay=5000) hynn=Employee('hynn',position='staff',pay=4000) hd=Employee('hodong',position='mgr',pay=8000) # + id="0e842e11-4e25-4dee-9be2-9b86241c3118" for i in [iu,hynn,hd]: if i.position == 'mgr': i.giveraise(0.1 + 0.05) else: i.giveraise(0.1) # + id="976e0669-8001-4207-82ea-7a5c1db58a9c" outputId="b660ffe6-d3e3-4c67-cb28-79a99f7fc289" iu # + id="1fbf37ef-20f4-47f7-a0bd-da6d8f40cb8d" outputId="3603a7d1-ff13-4265-d3e5-0274611f6c6c" hynn # + id="41c78669-4a41-48b3-96b2-6a93aeb7c3ba" outputId="0855a424-7dcf-4d21-cdc5-0fedcd0dd6a4" hd # + [markdown] id="3503967a-0601-41c3-87af-7e9c22dfb931" # (구현2) 새로운 클래스를 만들자 # + id="0601c97f-71f4-41d1-8ab5-5a17a12d8d7c" class Manager: def __init__(self,name,position=None,pay=0): self.name = name self.position = position self.pay = pay def _repr_html_(self): html_str = """ 이름: {} <br/> 직급: {} <br/> 연봉: {} <br/> """.format(self.name,self.position,self.pay) return html_str def giveraise(self,pct): self.pay = self.pay * (1+pct+0.05) # + id="9b3c460e-412f-446e-a7cc-627a166d6ab0" iu=Employee('iu',position='staff',pay=5000) hynn=Employee('hynn',position='staff',pay=4000) hd=Manager('hodong',position='mgr',pay=8000) # + id="77699b3f-94eb-435f-8c4a-19a646969ebd" for i in [iu,hynn,hd]: i.giveraise(0.1) # + id="2e625f58-e686-4af5-8646-eea6a6fe8f90" outputId="3dbd43a4-6273-4073-b460-6a3d82810ee0" iu # + id="6c18b268-1cea-48b9-818e-e5182e6aa353" outputId="3b4772d7-e530-42f4-cdba-0e4b2a765eef" hynn # + id="d9aa0577-ee2d-4f53-9dde-326ce92928e0" outputId="58ba4355-63bd-4065-93be-7882b50d0734" hd # + [markdown] id="63696539-febb-4b64-8663-4492002be013" # (구현3) 상속이용! # + id="ec845610-0b5b-4bc1-bc3d-b645896adab5" class Manager(Employee): def giveraise(self,pct): self.pay = self.pay * (1+pct+0.05) # + id="e2dc364e-ab0c-483d-80bf-9ba473c18842" iu=Employee('iu',position='staff',pay=5000) hynn=Employee('hynn',position='staff',pay=4000) hd=Manager('hodong',position='mgr',pay=8000) # + id="36aff4c6-a922-44e4-9d22-8b0bd719b548" for i in [iu,hynn,hd]: i.giveraise(0.1) # + id="871f48c2-9795-465c-bf10-952a2649deed" outputId="15e309d2-f2e6-40cb-932a-6178d07607b1" iu # + id="d6621ea0-6119-4c4b-8997-9ca226d95c1e" outputId="825f6711-e236-4e04-c2c5-2389291e34e1" hynn # + id="6659ddf9-a27c-432a-a286-6f88e314b7d0" outputId="bb3b2dbc-f477-4b05-baa3-0c9f03401918" hd # + [markdown] id="f9dd6bec-7d2c-476f-9d42-561d8fb50412" # `-` 요약: 이미 만들어진 클래스에서 대부분의 기능은 그대로 쓰지만 일부기능만 변경 혹은 추가하고 싶다면 클래스를 상속하면 된다! # + [markdown] id="cce1e030-7eef-49ae-9455-5c800828abd6" # ### 리스트의 상속 # + [markdown] id="da4e9e2f-b35b-473a-b40f-d180a06efefe" # - ref: http://www.kyobobook.co.kr/product/detailViewKor.laf?mallGb=KOR&ejkGb=KOR&barcode=9791165213190 # + [markdown] id="6b01fd93-fae1-4885-8ae3-33afcbd1734d" # `-` list와 비슷한데 멤버들의 빈도가 계산되는 메소드를 포함하는 새로운 나만의 list를 만들고 싶다. # + id="5ae7032a-cb96-41b2-892e-587eacaf826c" outputId="7bc59178-4a06-465e-b3ec-28ecbd36b8fa" lst = ['a','b','a','c','b','a','d'] lst # + [markdown] id="daa0d71c-95be-4917-b7b0-e1ae4767f687" # `-` 아래와 같은 딕셔너리를 만들고 싶다. # + id="d3f4be91-172b-4fbb-b210-fe874f5e2a56" outputId="48538748-7e3f-4337-979e-f66c2cf16d66" freq = {'a':3, 'b':2, 'c':1, 'd':1} freq # + [markdown] id="bb5a0891-6351-4d93-8792-99973fef15d8" # - `lst.frequency()`를 입력하면 위의 기능이 수행되도록 변형된 list를 쓰고 싶다. # + [markdown] id="36956582-5cc7-4e39-ba63-c96a462746be" # `-` 구현 # + [markdown] id="e8a8df8f-aee1-4b7e-bf63-26c805a4165c" # (시도1) 반쯤 성공? # + id="70c6221a-e2b5-4482-a031-0bd279e55736" outputId="ab839fa3-2076-4ce2-8fcf-97c5ca5dacbb" lst # + id="49b1c694-74e6-477e-9e23-eb661b421b21" outputId="a8ee1c03-c31b-47ca-9a52-b148e032e6ec" freq = {'a':0, 'b':0, 'c':0, 'd':0} freq # + id="cd2d2d5c-360f-4c24-a92c-fa112c712534" for item in lst: freq[item] = freq[item] + 1 # + id="4bd872e5-fff7-426c-98bb-68278f54ca75" outputId="2481b695-48fe-4fdb-f5f3-442b576f4d8e" freq # + [markdown] id="4f18d762-705f-4fd8-8daf-e32ecf0331bb" # (시도2) 실패 # + id="507ab8fb-25c2-40a8-8ee2-e52a1b324c74" outputId="fd114be9-8bb2-4c83-e713-b3aee5bdeec9" lst # + id="c855398b-eedf-4261-84f1-e6c0a18f755b" outputId="c6ec5f9d-82e8-49fb-d647-1dd64e3a5477" freq = dict() freq # + id="24986dc3-5216-4046-a4af-4b900a7863e1" outputId="c6d3f2ff-87b1-4a64-9fd5-5fb1f833d67e" for item in lst: freq[item] = freq[item] + 1 # + [markdown] id="ea1b55fd-8b1f-4ee5-a85a-1bc281e2d3b5" # 에러이유? `freq['a']`를 호출할 수 없다 -> freq.get('a',0) 이용 # + id="c124deaa-ec5d-4975-8580-86435af7477a" outputId="2a300c1b-7aaf-4c02-c121-0b7d850f8eee" freq['a'] # + id="aa44ea61-c767-4274-b7ec-ae4dd7d9a106" outputId="13ee692b-8176-47b7-c0ca-28671060f37d" # freq.get? # + [markdown] id="f2a32b70-055e-46c2-b73b-64d65f9a053f" # - key에 대응하는 값이 있으면 그 값을 리턴하고 없으면 default를 리턴 # + id="eff76469-8809-4665-8880-5af9e9cf2605" freq.get('a') # freq['a']에 해당하는 자료가 없어도 에러가 나지 않음 # + id="4ef1deff-307b-4f88-84da-f0e5165e68b1" outputId="ba7d3f82-d921-4e12-9f3c-241aaf9ea4f6" freq.get('a',0) # freq['a']에 해당하는 자료가 없어도 에러가 나지 않음 + freq['a']에 해당하는 자료가 없으면 0을 리턴 # + [markdown] id="60461141-6ffa-45d3-a564-220b08b293f9" # (시도3) # + id="791d95b6-2e51-4ff4-95c3-12c567c330f7" outputId="e0e369bf-241e-45b8-e036-02a27a6b4368" lst # + id="547b9217-9dbd-4450-b80b-802b53cc0a16" outputId="0a22e929-871b-4b94-ce58-94dfa2e144c9" freq = dict() freq # + id="2099c412-ea01-414a-ab1d-b4d1131d3651" for item in lst: freq[item] = freq.get(item,0) + 1 # + id="acf2f050-d826-4f79-950d-1893146eab8b" outputId="965a594a-b761-471e-b12e-6ea06e9fe18d" freq # + [markdown] id="a008c0fe-cf26-49f6-bcc7-7649730de252" # `-` 이것을 내가 정의하는 새로운 list의 메소드로 넣고 싶다. # + id="7507971e-97d9-44e0-93ae-2dd80879008e" class L(list): def frequency(self): freq = dict() for item in self: freq[item] = freq.get(item,0) + 1 return freq # + id="ed359a47-79b0-4575-8a3f-3ee73bb8fe06" lst = L([1,1,1,2,2,3]) # + id="c8982725-0fe8-40f6-85ad-138fa500ca8e" outputId="d1ccf8ce-880c-4f89-f996-be0615377be5" lst # 원래 list에 있는 repr 기능을 상속받아서 이루어지는 결과 # + # lst? # + id="6c0673df-80f2-4de1-8da3-56a3b0b4568a" outputId="b6dc6c6d-654f-4b66-9ab5-118dc0fd6a0d" _lst = L([4,5,6]) lst + _lst # L자료형끼리의 덧셈 # + id="84c848f4-df06-4441-89c5-aac60b26557d" outputId="4e321aeb-ec16-4b54-993b-0e51e531e420" lst + [4,5,6] # L자료형과 list자료형의 덧셈도 가능 # + [markdown] id="8fa629ac-34d1-4a10-9f9d-c182af00cdd4" # - L자료형의 덧셈은 list의 덧셈과 완전히 같음 # + id="81e99f37-09fe-4c7f-8780-0d56b091ac79" outputId="d0ab18f8-91bd-41d5-8ca2-67622d049b8b" lst.append(10) # append함수도 그대로 쓸 수 있음 lst # + [markdown] id="ad6112d2-ddb8-4200-89d0-245c2447e988" # `-` 기존리스트에서 추가로 frequency() 메소드가 존재함. # + id="2f311d63-f44d-4451-9412-7477cf0d2d2d" outputId="3717a659-85f5-4b92-bd8b-74b4ee7ac86e" lst.frequency() # + [markdown] id="993c4786-ecf4-4e37-b75f-cc0f2ad1c39d" # ## Appendix: 사용자정의 자료형의 유용함 # + [markdown] id="f4cc6218-0994-4f4b-8686-25837ba102f8" # `-` 사용자정의 자료형이 어떤 경우에는 유용할 수 있다. # + id="bb7ee8c2-a197-4c1c-a5a3-90df12ce6aeb" import pandas as pd import numpy as np import matplotlib.pyplot as plt # + [markdown] id="759a9cda-f60d-4360-8b77-63bd25a79c82" # `-` 예제1 # + id="5bed1172-7c4d-42f1-80de-516ac71385a6" year = ['2016','2017','2017','2017',2017,2018,2018,2019,2019] value = np.random.randn(9) # + id="ca9391d1-5bfe-464b-a709-18adc1112200" outputId="ac534d42-ea00-4379-f3d7-5835d674ff9a" df= pd.DataFrame({'year':year,'value':value}) df # + id="dbc4952e-cda0-4ed9-8d1a-0d45d5e56378" outputId="66081bf0-2651-46ea-9007-7fd12f87169d" plt.plot(df.year,df.value) # + [markdown] id="805662f9-7347-4771-97b8-de92778a58d2" # 에러의 이유: df.year에 str, int가 동시에 있음 # + id="095742d9-9169-462a-b02d-df5a430eaad3" outputId="ca83a9d5-fc3a-4d41-e71d-6313683a80b1" np.array(df.year) # + [markdown] id="11a60608-b71b-4bee-a4a7-e7cdabefdad8" # 자료형을 바꿔주면 해결할 수 있다. # + id="31c7ff35-7b86-4a8c-8362-24c480b04bac" outputId="b5f0cd51-cf45-4439-bda8-34a82f63b847" np.array(df.year, dtype=np.float64) #np.array(df.year).astype(np.float64) #df.year.astype(np.float64) # + id="e437d307-bc70-4192-b365-e3192b192e8d" outputId="90a6265f-113c-4896-c630-1ec6c117ac15" plt.plot(df.year.astype(np.float64),df.value,'.') # + [markdown] id="01774303-10e2-45f7-ab4e-6d809d4debfd" # `-` 예제2 # + id="56f4f949-7d05-4a47-bb0e-99c14616a082" year = ['2016','2017','2017','2017년','2017년',2018,2018,2019,2019] value = np.random.randn(9) # + id="6c4d4b0d-2d82-4925-ae5a-f4d8cdf3b613" outputId="70016df5-6a9d-4fa2-c460-e37284ff8da2" df= pd.DataFrame({'year':year,'value':value}) df # + id="31024b10-7afd-4d87-b630-9c39f4805118" outputId="2b420c15-3b70-48ec-be8e-f2946bb856b7" np.array(df.year,dtype=np.float64) # 타입을 일괄적으로 바꾸기 어렵다. # + id="6a0d10cd-e0f2-46a7-b20b-27a4d39decbd" outputId="7398576a-ef3b-42e6-abec-54cf63b1b6f5" L(df.year).frequency() # + [markdown] id="7e183ff6-7818-444e-823a-c1cc414b3b59" # - '2016'와 같은 형태, '2017년'와 같은 형태, 숫자형이 혼합 -> 맞춤형 변환이 필요함 # + id="37f10682-cbf5-488b-9a50-d71af183ea89" outputId="dc21f3fc-a0ca-4e52-ca8e-ff64facf0dff" '2017년'.replace("년","") # + id="f583eaec-eb3e-403f-9a70-2d8063d533d1" outputId="b319ae4e-d730-4087-d4cd-71d376372a23" L(df.year) # + id="7153275b-5c27-4863-9104-e2bb89c0e49e" def f(a): ## 사실 데이터의 구조를 모르면 이런 함수를 짤 수 없음 --> 자료의 구조를 확인해준다는 의미에서 freq가 있다면 편리하다. if type(a) is str: if "년" in a: return int(a.replace("년","")) else: return int(a) else: return a # + id="0199d944-300b-47bf-b47d-f48b2d4280f9" outputId="379e6668-0154-4302-fceb-ed4152b328bb" [f(a) for a in df.year] # + id="b31f561e-922f-4878-8392-b3c494f44b3e" df.year= [f(a) for a in df.year] # + id="e8f6cd2e-4460-4543-8a98-974728ec52cd" outputId="fb5401b7-3ef2-4bf5-c304-46b289c0c2b3" df # + id="c8ac2416-c3cc-41b0-baff-bfd150115a43" outputId="4a1f0245-99bd-49ed-c48a-7f99c45fca25" plt.plot(df.year, df.value, '.')
_notebooks/2022-05-30-python_class5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Class 5A - Data Wrangling with Pandas I # # We will begin soon! Until then, feel free to use the chat to socialize, and enjoy the music! # # <img src="images/pipelines.jpg" width=50% style="margin-left:auto; margin-right:auto"> # # # <div align = "left"> # <br> # <br> # Image by <a href="https://pixabay.com/photos/lost-places-factory-old-abandoned-2178884/">Hands off my tags! <NAME></a> from Pixabay. # </div> # # <div align = "right"> # October 4, 2021 <br> # <NAME> # </div> # + [markdown] slideshow={"slide_type": "slide"} # ## Class Outline # + [markdown] slideshow={"slide_type": "fragment"} # In this lecture we will talk about: # # - How to do Data Analysis # - Installing and Using Pandas # - Data Manipulation with Pandas # - Introducing DataFrames # - Creating a DataFrame # - Operating on Data in Pandas # + [markdown] slideshow={"slide_type": "slide"} # ## How to do Data Analysis # + [markdown] slideshow={"slide_type": "subslide"} # Here are some common steps of an analysis pipeline (the order isn't set, and not all elements are necessary): # + [markdown] slideshow={"slide_type": "fragment"} # 1. Load Data # - Check file types and encodings. # - Check delimiters (space, comma, tab). # - Skip rows and columns as needed. # + [markdown] slideshow={"slide_type": "fragment"} # 2. Clean Data # - Remove columns not being used. # - Deal with "incorrect" data. # - Deal with missing data. # # + [markdown] slideshow={"slide_type": "fragment"} # 3. Process Data # - Create any new columns needed that are combinations or aggregates of other columns (examples include weighted averages, categorizations, groups, etc...). # - Find and replace operations (examples inlcude replacing the string 'Strongly Agree' with the number 5). # - Other substitutions as needed. # - Deal with outliers. # # + [markdown] slideshow={"slide_type": "fragment"} # 4. Wrangle Data # - Restructure data format (columns and rows). # - Merge other data sources into your dataset. # # + [markdown] slideshow={"slide_type": "fragment"} # 5. Exploratory Data Analysis # - More about this later this week # + [markdown] slideshow={"slide_type": "fragment"} # 6. Data Analysis (not required until Task 3). # - In this course we will some data analysis, but the possibilities are endless here! # + [markdown] slideshow={"slide_type": "fragment"} # 7. Export reports/data analyses and visualizations. # + [markdown] slideshow={"slide_type": "slide"} # ## Data Manipulation with Pandas # + [markdown] slideshow={"slide_type": "fragment"} # ### Attribution # + [markdown] slideshow={"slide_type": "fragment"} # <!--BOOK_INFORMATION--> # <img align="left" style="padding-right:10px;" src="images/PDSH-cover-small.png"> # # *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>anderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* # # *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* # + [markdown] slideshow={"slide_type": "subslide"} # In this chapter, we will be looking in detail at the data structures provided by the Pandas library. # Pandas is a newer package built on top of NumPy, and provides an efficient implementation of a ``DataFrame``. # ``DataFrame``s are essentially multidimensional arrays with attached row and column labels, and often with heterogeneous types and/or missing data. # As well as offering a convenient storage interface for labeled data, Pandas implements a number of powerful data operations familiar to users of both database frameworks and spreadsheet programs. # # NumPy's ``ndarray`` data structure provides essential features for the type of clean, well-organized data typically seen in numerical computing tasks. # While it serves this purpose very well, its limitations become clear when we need more flexibility (e.g., attaching labels to data, working with missing data, etc.) and when attempting operations that do not map well to element-wise broadcasting (e.g., groupings, pivots, etc.), each of which is an important piece of analyzing the less structured data available in many forms in the world around us. # Pandas, and in particular its ``DataFrame`` object, building on the NumPy array structure and provides efficient access to these sorts of "data munging" tasks that occupy much of a data scientist's time. # # In this chapter, we will focus on the mechanics of using the ``DataFrame`` and related structures effectively. # We will use examples drawn from real datasets where appropriate, but these examples are not necessarily the focus. # + [markdown] slideshow={"slide_type": "subslide"} # ## Installing and Using Pandas # # Installation of Pandas on your system requires NumPy to be installed, and if building the library from source, requires the appropriate tools to compile the C and Cython sources on which Pandas is built. # Details on this installation can be found in the [Pandas documentation](http://pandas.pydata.org/). # # # To install `pandas`, open up a Terminal and install it using conda: # # `conda install pandas` # + [markdown] slideshow={"slide_type": "fragment"} # Just as we generally import NumPy under the alias ``np``, we will import Pandas under the alias ``pd``: # + slideshow={"slide_type": "fragment"} tags=[] import numpy as np import pandas as pd # + [markdown] slideshow={"slide_type": "fragment"} # This import convention will be used throughout the remainder of this book. # + [markdown] slideshow={"slide_type": "fragment"} # Once Pandas is installed, and imported you can check the version: # + jupyter={"outputs_hidden": false} slideshow={"slide_type": "fragment"} pd.__version__ # + [markdown] slideshow={"slide_type": "subslide"} # ## Reminder about Built-In Documentation # # As you read through this chapter, don't forget that IPython gives you the ability to quickly explore the contents of a package (by using the tab-completion feature) as well as the documentation of various functions (using the ``?`` character). (Refer back to [Help and Documentation in IPython](https://jakevdp.github.io/PythonDataScienceHandbook/01.01-help-and-documentation.html) if you need a refresher on this.) # # For example, to display all the contents of the pandas namespace, you can type # # ```ipython # In [3]: pd.<TAB> # ``` # # And to display Pandas's built-in documentation, you can use this: # # ```ipython # In [4]: pd? # ``` # # More detailed documentation, along with tutorials and other resources, can be found at http://pandas.pydata.org/. # + [markdown] slideshow={"slide_type": "subslide"} # ## Introducing DataFrames # + [markdown] slideshow={"slide_type": "fragment"} # At the very basic level, Pandas objects can be thought of as enhanced versions of NumPy structured arrays in which the rows and columns are identified with labels rather than simple integer indices. # As we will see during the course of this chapter, Pandas provides a host of useful tools, methods, and functionality on top of the basic data structures, but nearly everything that follows will require an understanding of what these structures are. # Thus, before we go any further, let's introduce these three fundamental Pandas data structures: the ``Series``, ``DataFrame``, and ``Index``. # # We will start our code sessions with the standard NumPy and Pandas imports: # # ``` # import numpy as np # import pandas as pd # ``` # + [markdown] slideshow={"slide_type": "fragment"} # The fundamental structure in Pandas is the ``DataFrame``. # Like the ``Series`` object discussed in the previous section, the ``DataFrame`` can be thought of either as a generalization of a NumPy array, or as a specialization of a Python dictionary. # We'll now take a look at each of these perspectives. # + [markdown] slideshow={"slide_type": "subslide"} # ## Creating a DataFrame # # There are several ways of creating DataFrames in Python using Pandas and though it can end up with the same end product, the method of creating them can depend on the format of your data. # # Let's look at a few examples: # + [markdown] slideshow={"slide_type": "subslide"} # ### DataFrame from URL # # If your dataset exists on the web as a publicly accessible file, you can create a DataFrame directly from the URL to the CSV. # # - `read_csv(path)` is a function from the Pandas package that creates a DataFrame from a CSV file. # - The argument `path` can be a URL or a reference to a local file. # + slideshow={"slide_type": "fragment"} import pandas as pd pd.read_csv("https://github.com/firasm/bits/raw/master/fruits.csv") # + [markdown] slideshow={"slide_type": "fragment"} # I can store the dataframe as an object like this: # + slideshow={"slide_type": "fragment"} fruits = pd.read_csv("https://github.com/firasm/bits/raw/master/fruits.csv") fruits # + [markdown] slideshow={"slide_type": "fragment"} # You can print the first 5 lines of the dataset using the `head()` function (this data set only has 3 lines): # + slideshow={"slide_type": "fragment"} fruits.head() # + slideshow={"slide_type": "fragment"} fruits['Colour'] # + slideshow={"slide_type": "fragment"} fruits['Rating'] # + slideshow={"slide_type": "fragment"} fruits['Fruit Name'] # + slideshow={"slide_type": "fragment"} fruits['Rating'].sum() # + [markdown] slideshow={"slide_type": "subslide"} # ### DataFrame from List of Dictionaries # + slideshow={"slide_type": "fragment"} fruit1 = {'Fruit Name': 'Apple', 'Mass(g)': 200, 'Colour': 'Red', 'Rating': 8} fruit2 = {'Fruit Name': 'Banana', 'Mass(g)': 250, 'Colour': 'Yellow', 'Rating': 9} fruit3 = {'Fruit Name': 'Cantoloupe', 'Mass(g)': 600, 'Colour': 'Orange', 'Rating': 10} # + slideshow={"slide_type": "fragment"} pd.DataFrame([fruit1, fruit2, fruit3]) # + [markdown] slideshow={"slide_type": "subslide"} # ### DataFrame from List of Tuples # + slideshow={"slide_type": "fragment"} fruit_tuples = [('Apple','Red', 200, 8), ('Banana', 250, 'Yellow', 9), ('Cantoloupe', 600, 'Orange', 10)] labels = ['Fruit Name', 'Mass(g)','Color','Rating'] # + slideshow={"slide_type": "fragment"} pd.DataFrame(fruit_tuples,columns=labels) # + [markdown] slideshow={"slide_type": "subslide"} # ### DataFrame from a Dictionary # + slideshow={"slide_type": "fragment"} fruit_dict = {'Fruit Name': {0: 'Apple', 1: 'Banana', 2: 'Cantoloupe'}, 'Mass(g)': {0: 200, 1: 250, 2: 600}, 'Colour': {0: 'Red', 1: 'Yellow', 2: 'Orange'}, 'Rating': {0: 8, 1: 9, 2: 10} } # + slideshow={"slide_type": "fragment"} pd.DataFrame.from_dict(fruit_dict) # + [markdown] slideshow={"slide_type": "subslide"} # Here's a great cheat sheet from "[Practical Business Python (PBP)](https://pbpython.com/pandas-list-dict.html)": # # <img src="https://pbpython.com/images/pandas-dataframe-shadow.png"> # + [markdown] slideshow={"slide_type": "subslide"} # Additionally, the ``DataFrame`` has a ``columns`` attribute, which is an ``Index`` object holding the column labels: # + jupyter={"outputs_hidden": false} slideshow={"slide_type": "fragment"} list(fruits.columns) # + [markdown] slideshow={"slide_type": "fragment"} # Thus the ``DataFrame`` can be thought of as a generalization of a two-dimensional NumPy array, where both the rows and columns have a generalized index for accessing the data. # + [markdown] slideshow={"slide_type": "subslide"} # ### Other ways of creating a DataFrame # + [markdown] slideshow={"slide_type": "subslide"} # #### From a list of dicts # # Any list of dictionaries can be made into a ``DataFrame``. # We'll use a simple list comprehension to create some data: # + jupyter={"outputs_hidden": false} slideshow={"slide_type": "fragment"} data = [{'a': i, 'b': 2 * i} for i in range(3)] # list comprehension! pd.DataFrame(data) # + [markdown] slideshow={"slide_type": "fragment"} # Even if some keys in the dictionary are missing, Pandas will fill them in with ``NaN`` (i.e., "not a number") values: # + jupyter={"outputs_hidden": false} slideshow={"slide_type": "fragment"} pd.DataFrame([{'a': 1, 'b': 2}, {'b': 3, 'c': 4}]) # + [markdown] slideshow={"slide_type": "subslide"} # #### From a two-dimensional NumPy array # # Given a two-dimensional array of data, we can create a ``DataFrame`` with any specified column and index names. # If omitted, an integer index will be used for each: # + jupyter={"outputs_hidden": false} slideshow={"slide_type": "fragment"} pd.DataFrame(np.random.rand(3, 2), columns=['foo', 'bar'], index=['Jack', 'Ali', 'Nusrat'] ) # + [markdown] slideshow={"slide_type": "subslide"} # #### From a NumPy structured array # # We covered structured arrays in [Structured Data: NumPy's Structured Arrays](02.09-Structured-Data-NumPy.ipynb). # A Pandas ``DataFrame`` operates much like a structured array, and can be created directly from one: # + slideshow={"slide_type": "fragment"} tags=[] A = np.zeros(3, dtype=[('A', 'i8'), ('B', 'f8')]) A # + slideshow={"slide_type": "fragment"} pd.DataFrame(A) # + [markdown] slideshow={"slide_type": "subslide"} # #### How to Access elements in a DataFrame # # Similarly, we can also think of a ``DataFrame`` as a specialization of a dictionary. # Where a dictionary maps a key to a value, a ``DataFrame`` maps a column name to a ``Series`` of column data. # For example, asking for the ``'Mass(g)'`` attribute returns a ``Series`` object: # + slideshow={"slide_type": "fragment"} fruits # + slideshow={"slide_type": "fragment"} fruits['Mass(g)'] # - fruits.loc[1,'Mass(g)'] fruits[fruits['Fruit Name'] == 'Banana'] fruits[fruits['Fruit Name'] == 'Banana']['Colour'] # + [markdown] slideshow={"slide_type": "fragment"} # For a ``DataFrame``, ``data['col0']`` will return the first **column**. # # Because of this, it is probably better to think about ``DataFrame``s as generalized dictionaries rather than generalized arrays, though both ways of looking at the situation can be useful. # We'll explore more flexible means of indexing ``DataFrame``s in [Data Indexing and Selection](https://jakevdp.github.io/PythonDataScienceHandbook/03.02-data-indexing-and-selection.html). # + [markdown] slideshow={"slide_type": "slide"} # ## Operating on Data in Pandas # # This is where the magic of Pandas really starts to shine - adding, and processing your dataset. # # Be back in 5 minutes at 1:05! # # # + [markdown] slideshow={"slide_type": "subslide"} # ### Summary Statistics on a Pandas DataFrame # + slideshow={"slide_type": "fragment"} fruits # + slideshow={"slide_type": "fragment"} fruits.describe() # + [markdown] slideshow={"slide_type": "subslide"} # ### You can compute other stats of numeric columns # + slideshow={"slide_type": "fragment"} fruits.select_dtypes(include='number').mean() # + slideshow={"slide_type": "fragment"} fruits.select_dtypes(include='number').median() # + slideshow={"slide_type": "fragment"} fruits.select_dtypes(include='number').mode() # + [markdown] slideshow={"slide_type": "subslide"} # ### Create a calculated column (from one other column) # + [markdown] slideshow={"slide_type": "fragment"} # Let's say we want to create a new column in our fruits DataFrame that is a calculated column. # # First let's print the DataFrame: # + slideshow={"slide_type": "fragment"} fruits # + [markdown] slideshow={"slide_type": "fragment"} # Now, let's add a column called `score` which takes the rating and multiplies it by 50. # # We can actually do all of this in just one line: # + slideshow={"slide_type": "fragment"} fruits['Score'] = fruits['Rating']*50 fruits # + [markdown] slideshow={"slide_type": "subslide"} # ### Create a calculated column (from multiple other columns) # + slideshow={"slide_type": "fragment"} fruits['Score2'] = fruits['Rating']*50 + fruits['Mass(g)']*0.25 fruits # + [markdown] slideshow={"slide_type": "subslide"} # ### Create and apply a custom function to your DataFrame # + [markdown] slideshow={"slide_type": "fragment"} # Let's say we wanted to do a complex custom operation on our DataFrame. # We have already learned how to write Python functions # + slideshow={"slide_type": "fragment"} def keep_or_discard(x): """Decides whether to keep or discard a piece of fruit. Takes in a Pandas row, and returns either 'keep' or 'discard' depending on the criteria. """ if (x['Mass(g)'] > 220) and (x['Rating'] > 5): return 'keep' else: return 'discard' # + slideshow={"slide_type": "fragment"} fruits.apply(keep_or_discard,axis='columns') # + [markdown] slideshow={"slide_type": "fragment"} # We can now assign the result of this to a new column in our DataFrame. # + slideshow={"slide_type": "fragment"} fruits['Status'] = fruits.apply(keep_or_discard,axis='columns') fruits # + [markdown] slideshow={"slide_type": "fragment"} # One important feature that we haven't yet talked about is the "axis" parameter. # # - `axis='rows'`, or `axis=0`, means apply this operation "row-wise". # - `axis='columns'`, or `axis=1`, means apply this operation "column-wise". # + [markdown] slideshow={"slide_type": "subslide"} # ## How to export data # + slideshow={"slide_type": "fragment"} fruits.to_csv('fruits_processed.csv',index=None) # + slideshow={"slide_type": "fragment"} pd.read_csv('fruits_processed.csv') # + [markdown] slideshow={"slide_type": "fragment"} # I think this amount of functionality should keep you busy for a while! # # More next time!
notes/week05/Class5A/Class5A.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Perform Illumination correction on an Image # The notebook takes an image as a parameter and uses [scikit-image](https://scikit-image.org/) to perform some illumination correction # ### Install dependencies if required # The cell below will install dependencies if you choose to run the notebook in [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb#recent=true). # %pip install omero-py # ### Import Packages from IPython import get_ipython from omero.gateway import BlitzGateway import matplotlib.pyplot as plt from skimage.morphology import disk, white_tophat from getpass import getpass # ### OMERO Credentials HOST = 'wss://workshop.openmicroscopy.org/omero-ws' conn = BlitzGateway(input("Username: "), getpass("OMERO Password: "), host=HOST, secure=True) conn.connect() # ### OMERO Image ID # To be modified # ex: Select an Image from the dataset named 'PTRE' and enter its Id image_id = 11270 # ### Print Image Name image = conn.getObject("Image", image_id) print(image.getName(), image.getDescription()) # ### Split channel view for an individual plane get_ipython().run_line_magic('matplotlib', 'inline') pixels = image.getPrimaryPixels() channels = image.getChannels() plt.figure(figsize=(25, 20)) size_c = image.getSizeC() for idx in range(0, size_c): plt.subplot(1, 5, idx+1) image_plane = pixels.getPlane(0, idx, 0) plt.imshow(image_plane, cmap='gray') plt.axis('off') plt.title('Channel' + str(idx)) # ### Tophat Filter and display the images # + get_ipython().run_line_magic('matplotlib', 'inline') image_plane = pixels.getPlane(0, 1, 0) selem = disk(25) w_tophat = white_tophat(image_plane, selem) plt.figure(figsize=(25, 20)) plt.subplot(1, 5, 1) plt.imshow(image_plane, cmap='gray') plt.axis('off') plt.title('Raw Image') plt.subplot(1, 5, 2) plt.imshow(w_tophat, cmap='gray') plt.axis('off') plt.title('Top-Hat Filtered Image') # - # ### Close the connection to the OMERO server conn.close() # ### License (BSD 2-Clause) # Copyright (C) 2019-2021 University of Dundee. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
notebooks/IlluminationCorrectionNotebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import io import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn # - teams = """Team Name Division Seed Ceiling Ceiling value Floor Floor value Brute Women's 1 Champion 13 Semis 5 Fury Women's 2 Champion 13 Semis 5 Riot Women's 3 Champion 13 Quarters 2 <NAME> Women's 4 Champion 13 Quarters 2 Scandal Women's 5 Semis 5 Quarters 2 Ozone Women's 6 Semis 5 Prequarters 1 6ixers Women's 7 Quarters 2 Prequarters 1 Nightlock Women's 8 Quarters 2 Prequarters 1 Nemesis Women's 9 Quarters 2 No Bracket 0 Rival Women's 10 Quarters 2 No Bracket 0 Phoenix Women's 11 Quarters 2 No Bracket 0 Heist Women's 12 Prequarters 1 No Bracket 0 Schwa Women's 13 Prequarters 1 No Bracket 0 Traffic Women's 14 Prequarters 1 No Bracket 0 Pop Women's 15 Prequarters 1 No Bracket 0 Wildfire Women's 16 Prequarters 1 No Bracket 0 Revolver Men's 1 Champion 13 Semis 5 Sockeye Men's 2 Champion 13 No Bracket 0 PoNY Men's 3 Champion 13 Quarters 2 Ring Men's 4 Champion 13 Quarters 2 Furious Men's 5 Quarters 2 No Bracket 0 Truck Men's 6 Champion 13 Quarters 2 DiG Men's 7 Finalist 9 Quarters 2 Madison Men's 8 Quarters 2 No Bracket 0 Machine Men's 9 Quarters 2 No Bracket 0 Rhino Men's 10 Quarters 2 No Bracket 0 Doublewide Men's 11 Semis 5 Prequarters 1 Sub Zero Men's 12 Quarters 2 Prequarters 1 High Five Men's 13 Prequarters 1 No Bracket 0 Temper Men's 14 Quarters 2 No Bracket 0 Bravo Men's 15 Prequarters 1 No Bracket 0 Chain Men's 16 Prequarters 1 No Bracket 0 AMP Mixed 1 Champion 13 Semis 5 Drag'n Mixed 2 Champion 13 Semis 5 Mixtape Mixed 3 Champion 13 Quarters 2 BFG Mixed 4 Finalist 9 Quarters 2 Snake Mixed 5 Semis 5 Prequarters 1 Space Heater Mixed 6 Semis 5 Prequarters 1 shame. Mixed 7 Semis 5 Prequarters 1 Blackbird Mixed 8 Quarters 2 Prequarters 1 Slow White Mixed 9 Semis 5 Prequarters 1 Mischief Mixed 10 Semis 5 No Bracket 0 Polar Bears Mixed 11 Quarters 2 No Bracket 0 No Touching! Mixed 12 Quarters 2 No Bracket 0 Toro Mixed 13 Prequarters 1 No Bracket 0 XIST Mixed 14 Prequarters 1 No Bracket 0 Jughandle Mixed 15 Quarters 2 No Bracket 0 Columbus Cocktails Mixed 16 Prequarters 1 No Bracket 0 """ full_df = pd.read_csv(io.StringIO(teams), sep='\t') full_df.style.bar(subset=["Ceiling value", "Floor value"], color="lightgreen") # + def make_line(x1, y1, x2, y2, linspace): slope = (y1-y2)/(x1-x2) y_intercept = (x1*y2 - x2*y1) / (x1-x2) # import pdb; pdb.set_trace() return slope * linspace + y_intercept fig, axes = plt.subplots(3, figsize=(14, 12)) plt.subplots_adjust(hspace=0.4) for i, division in enumerate(("Mixed", "Men's", "Women's")): df = full_df.loc[full_df["Division"] == division] ax = axes[i] ax.errorbar(df["Seed"], (df["Ceiling value"] + df["Floor value"]) / 2.0, yerr=(df["Ceiling value"] - df["Floor value"]) / 2.0, fmt='o', capsize=8) for _, row in df.iterrows(): linspace = np.linspace(0.1, 17, num=100) countour = make_line(17, 0, row["Seed"], ((row["Ceiling value"] + row["Floor value"]) / 2.0), linspace) ax.plot(linspace, countour, '-', color="k", linestyle='dotted', alpha=0.3) ax.set_xticks(np.arange(0, len(df) + 1)) xticks = ["0"] + ["{i}\n{t}".format(i=i+1, t=team) for i, team in enumerate(df["Team Name"])] ax.set_xticklabels(xticks, rotation=20, ha="right") ax.grid(linestyle='dotted', color="lightgray") ax.set_ylim(-1, 14) ax.set_xlabel(division) ax.set_ylabel("EV of #thegame") # plt.setp(ax.get_xticklabels(), visible=True) # - # Dotted lines are countour lines of seed vs "expected value" according to ultiworld's projections.
notebooks/2018 #thegame.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## NEWS DATA FETCHING # * We are going to fetch news based on category from a news api using the `requests` python module. # > [API](https://newsapi.org/) import requests import json import pandas as pd import numpy as np import csv class News: def __init__(self, urlToImage, author, title, description, url, publishedAt, content): self.urlToImage = urlToImage self.author = author self.title = title self.description = description self.url = url self.content = content # ## Get all countries # > get the list of countries in the `countries.csv` file. df = pd.read_csv('countries.csv') countries =[ 'ae','ar','at','au','be','bg', 'br', 'ca','ch','cn','co','cu','cz', 'de', 'eg', 'fr', 'gb', 'gr','hk', 'hu', 'id','ie','il','in','it','jp','kr','lt','lv', 'ma','mx','my','ng','nl','no','nz','ph','pl','pt','ro','rs','ru','sa','se','sg','si','sk', 'th','tr','tw','ua','us','ve','za' ] countries[:2] countries = ['za', 'us', 'gb', 'eg', 'fr', 'rs', 'gr', 'ca', 'ng', 'ru','sa','cn','co','no'] len(countries) API_KEY = "<KEY>" categories = ["business","entertainment","general","health","science","sports","technology"] END_POINT = "https://newsapi.org/v2/top-headlines?country=us&category=business&apiKey=150acdfbe4964f5b94b9c5fab701191b&pageSize=100" # ## Now let's fetch data and create # + news = [] for category in categories: for country in countries: END_POINT = f"https://newsapi.org/v2/top-headlines?country={country}&category={category}&apiKey={API_KEY}&pageSize=100" res = requests.get(END_POINT) data = json.loads(res.content)["articles"] for new in data: news_dict ={ "category": category.upper(), "country_code": country, 'urlToImage': new['urlToImage'], 'author': new['author'], 'title': new['title'], 'description':new['description'], 'url':new['url'], 'publishedAt':new['publishedAt'], 'content':new['content'] } news.append(news_dict) # - len(news) # > Now we have `5279` news from 14 different countries and 7 different categories. news[0] # ### Save News # > We are going to save our news as `news_categories.csv` path_name = "news_categories.csv" keys = news[0].keys() keys # + with open(path_name, 'w', newline='', encoding="utf-8") as writter: dict_writer = csv.DictWriter(writter, keys) dict_writer.writeheader() dict_writer.writerows(news) print("A NEWS csv FILE HAS BEEN CREATED!!") # - # > Done fetching data, now we need to do some cleaning on the data
Projects/news/Data Fetching.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="7765UFHoyGx6" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" id="KsOkK8O69PyT" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="ZS8z-_KeywY9" # # TF Lattice Canned Estimators # + [markdown] id="r61fkA2i9Y3_" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/lattice/tutorials/canned_estimators"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/canned_estimators.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/lattice/blob/master/docs/tutorials/canned_estimators.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/lattice/docs/tutorials/canned_estimators.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="WCpl-9WDVq9d" # ## Overview # # Canned estimators are quick and easy ways to train TFL models for typical use cases. This guide outlines the steps needed to create a TFL canned estimator. # + [markdown] id="x769lI12IZXB" # ## Setup # + [markdown] id="fbBVAR6UeRN5" # Installing TF Lattice package: # + id="bpXjJKpSd3j4" #@test {"skip": true} # !pip install tensorflow-lattice # + [markdown] id="jSVl9SHTeSGX" # Importing required packages: # + cellView="both" id="FbZDk8bIx8ig" import tensorflow as tf import copy import logging import numpy as np import pandas as pd import sys import tensorflow_lattice as tfl from tensorflow import feature_column as fc logging.disable(sys.maxsize) # + [markdown] id="svPuM6QNxlrH" # Downloading the UCI Statlog (Heart) dataset: # + cellView="both" id="j-k1qTR_yvBl" csv_file = tf.keras.utils.get_file( 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv') df = pd.read_csv(csv_file) target = df.pop('target') train_size = int(len(df) * 0.8) train_x = df[:train_size] train_y = target[:train_size] test_x = df[train_size:] test_y = target[train_size:] df.head() # + [markdown] id="nKkAw12SxvGG" # Setting the default values used for training in this guide: # + cellView="both" id="1T6GFI9F6mcG" LEARNING_RATE = 0.01 BATCH_SIZE = 128 NUM_EPOCHS = 500 PREFITTING_NUM_EPOCHS = 10 # + [markdown] id="0TGfzhPHzpix" # ## Feature Columns # # As for any other TF estimator, data needs to be passed to the estimator, which is typically via an input_fn and parsed using [FeatureColumns](https://www.tensorflow.org/guide/feature_columns). # + id="DCIUz8apzs0l" # Feature columns. # - age # - sex # - cp chest pain type (4 values) # - trestbps resting blood pressure # - chol serum cholestoral in mg/dl # - fbs fasting blood sugar > 120 mg/dl # - restecg resting electrocardiographic results (values 0,1,2) # - thalach maximum heart rate achieved # - exang exercise induced angina # - oldpeak ST depression induced by exercise relative to rest # - slope the slope of the peak exercise ST segment # - ca number of major vessels (0-3) colored by flourosopy # - thal 3 = normal; 6 = fixed defect; 7 = reversable defect feature_columns = [ fc.numeric_column('age', default_value=-1), fc.categorical_column_with_vocabulary_list('sex', [0, 1]), fc.numeric_column('cp'), fc.numeric_column('trestbps', default_value=-1), fc.numeric_column('chol'), fc.categorical_column_with_vocabulary_list('fbs', [0, 1]), fc.categorical_column_with_vocabulary_list('restecg', [0, 1, 2]), fc.numeric_column('thalach'), fc.categorical_column_with_vocabulary_list('exang', [0, 1]), fc.numeric_column('oldpeak'), fc.categorical_column_with_vocabulary_list('slope', [0, 1, 2]), fc.numeric_column('ca'), fc.categorical_column_with_vocabulary_list( 'thal', ['normal', 'fixed', 'reversible']), ] # + [markdown] id="hEZstmtT2CA3" # TFL canned estimators use the type of the feature column to decide what type of calibration layer to use. We use a `tfl.layers.PWLCalibration` layer for numeric feature columns and a `tfl.layers.CategoricalCalibration` layer for categorical feature columns. # # Note that categorical feature columns are not wrapped by an embedding feature column. They are directly fed into the estimator. # + [markdown] id="H_LoW_9m5OFL" # ## Creating input_fn # # As for any other estimator, you can use an input_fn to feed data to the model for training and evaluation. TFL estimators can automatically calculate quantiles of the features and use them as input keypoints for the PWL calibration layer. To do so, they require passing a `feature_analysis_input_fn`, which is similar to the training input_fn but with a single epoch or a subsample of the data. # + id="lFVy1Efy5NKD" train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( x=train_x, y=train_y, shuffle=False, batch_size=BATCH_SIZE, num_epochs=NUM_EPOCHS, num_threads=1) # feature_analysis_input_fn is used to collect statistics about the input. feature_analysis_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( x=train_x, y=train_y, shuffle=False, batch_size=BATCH_SIZE, # Note that we only need one pass over the data. num_epochs=1, num_threads=1) test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( x=test_x, y=test_y, shuffle=False, batch_size=BATCH_SIZE, num_epochs=1, num_threads=1) # Serving input fn is used to create saved models. serving_input_fn = ( tf.estimator.export.build_parsing_serving_input_receiver_fn( feature_spec=fc.make_parse_example_spec(feature_columns))) # + [markdown] id="uQlzREcm2Wbj" # ## Feature Configs # # Feature calibration and per-feature configurations are set using `tfl.configs.FeatureConfig`. Feature configurations include monotonicity constraints, per-feature regularization (see `tfl.configs.RegularizerConfig`), and lattice sizes for lattice models. # # If no configuration is defined for an input feature, the default configuration in `tfl.config.FeatureConfig` is used. # + id="vD0tNpiO3p9c" # Feature configs are used to specify how each feature is calibrated and used. feature_configs = [ tfl.configs.FeatureConfig( name='age', lattice_size=3, # By default, input keypoints of pwl are quantiles of the feature. pwl_calibration_num_keypoints=5, monotonicity='increasing', pwl_calibration_clip_max=100, # Per feature regularization. regularizer_configs=[ tfl.configs.RegularizerConfig(name='calib_wrinkle', l2=0.1), ], ), tfl.configs.FeatureConfig( name='cp', pwl_calibration_num_keypoints=4, # Keypoints can be uniformly spaced. pwl_calibration_input_keypoints='uniform', monotonicity='increasing', ), tfl.configs.FeatureConfig( name='chol', # Explicit input keypoint initialization. pwl_calibration_input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0], monotonicity='increasing', # Calibration can be forced to span the full output range by clamping. pwl_calibration_clamp_min=True, pwl_calibration_clamp_max=True, # Per feature regularization. regularizer_configs=[ tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4), ], ), tfl.configs.FeatureConfig( name='fbs', # Partial monotonicity: output(0) <= output(1) monotonicity=[(0, 1)], ), tfl.configs.FeatureConfig( name='trestbps', pwl_calibration_num_keypoints=5, monotonicity='decreasing', ), tfl.configs.FeatureConfig( name='thalach', pwl_calibration_num_keypoints=5, monotonicity='decreasing', ), tfl.configs.FeatureConfig( name='restecg', # Partial monotonicity: output(0) <= output(1), output(0) <= output(2) monotonicity=[(0, 1), (0, 2)], ), tfl.configs.FeatureConfig( name='exang', # Partial monotonicity: output(0) <= output(1) monotonicity=[(0, 1)], ), tfl.configs.FeatureConfig( name='oldpeak', pwl_calibration_num_keypoints=5, monotonicity='increasing', ), tfl.configs.FeatureConfig( name='slope', # Partial monotonicity: output(0) <= output(1), output(1) <= output(2) monotonicity=[(0, 1), (1, 2)], ), tfl.configs.FeatureConfig( name='ca', pwl_calibration_num_keypoints=4, monotonicity='increasing', ), tfl.configs.FeatureConfig( name='thal', # Partial monotonicity: # output(normal) <= output(fixed) # output(normal) <= output(reversible) monotonicity=[('normal', 'fixed'), ('normal', 'reversible')], ), ] # + [markdown] id="LKBULveZ4mr3" # ## Calibrated Linear Model # # To construct a TFL canned estimator, construct a model configuration from `tfl.configs`. A calibrated linear model is constructed using `tfl.configs.CalibratedLinearConfig`. It applies piecewise-linear and categorical calibration on the input features, followed by a linear combination and an optional output piecewise-linear calibration. When using output calibration or when output bounds are specified, the linear layer will apply weighted averaging on calibrated inputs. # # This example creates a calibrated linear model on the first 5 features. We use # `tfl.visualization` to plot the model graph with the calibrator plots. # + id="diRRozio4sAL" # Model config defines the model structure for the estimator. model_config = tfl.configs.CalibratedLinearConfig( feature_configs=feature_configs, use_bias=True, output_calibration=True, regularizer_configs=[ # Regularizer for the output calibrator. tfl.configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), ]) # A CannedClassifier is constructed from the given model config. estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns[:5], model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn, optimizer=tf.keras.optimizers.Adam(LEARNING_RATE), config=tf.estimator.RunConfig(tf_random_seed=42)) estimator.train(input_fn=train_input_fn) results = estimator.evaluate(input_fn=test_input_fn) print('Calibrated linear test AUC: {}'.format(results['auc'])) saved_model_path = estimator.export_saved_model(estimator.model_dir, serving_input_fn) model_graph = tfl.estimators.get_model_graph(saved_model_path) tfl.visualization.draw_model_graph(model_graph) # + [markdown] id="zWzPM2_p977t" # ## Calibrated Lattice Model # # A calibrated lattice model is constructed using `tfl.configs.CalibratedLatticeConfig`. A calibrated lattice model applies piecewise-linear and categorical calibration on the input features, followed by a lattice model and an optional output piecewise-linear calibration. # # This example creates a calibrated lattice model on the first 5 features. # # + id="C6EvVpKW4BbC" # This is calibrated lattice model: Inputs are calibrated, then combined # non-linearly using a lattice layer. model_config = tfl.configs.CalibratedLatticeConfig( feature_configs=feature_configs, regularizer_configs=[ # Torsion regularizer applied to the lattice to make it more linear. tfl.configs.RegularizerConfig(name='torsion', l2=1e-4), # Globally defined calibration regularizer is applied to all features. tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4), ]) # A CannedClassifier is constructed from the given model config. estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns[:5], model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn, optimizer=tf.keras.optimizers.Adam(LEARNING_RATE), config=tf.estimator.RunConfig(tf_random_seed=42)) estimator.train(input_fn=train_input_fn) results = estimator.evaluate(input_fn=test_input_fn) print('Calibrated lattice test AUC: {}'.format(results['auc'])) saved_model_path = estimator.export_saved_model(estimator.model_dir, serving_input_fn) model_graph = tfl.estimators.get_model_graph(saved_model_path) tfl.visualization.draw_model_graph(model_graph) # + [markdown] id="9494K_ZBKFcm" # ## Calibrated Lattice Ensemble # # When the number of features is large, you can use an ensemble model, which creates multiple smaller lattices for subsets of the features and averages their output instead of creating just a single huge lattice. Ensemble lattice models are constructed using `tfl.configs.CalibratedLatticeEnsembleConfig`. A calibrated lattice ensemble model applies piecewise-linear and categorical calibration on the input feature, followed by an ensemble of lattice models and an optional output piecewise-linear calibration. # # + [markdown] id="KjrzziMFKuCB" # ### Random Lattice Ensemble # # The following model config uses a random subset of features for each lattice. # + id="YBSS7dLjKExq" # This is random lattice ensemble model with separate calibration: # model output is the average output of separately calibrated lattices. model_config = tfl.configs.CalibratedLatticeEnsembleConfig( feature_configs=feature_configs, num_lattices=5, lattice_rank=3) # A CannedClassifier is constructed from the given model config. estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn, optimizer=tf.keras.optimizers.Adam(LEARNING_RATE), config=tf.estimator.RunConfig(tf_random_seed=42)) estimator.train(input_fn=train_input_fn) results = estimator.evaluate(input_fn=test_input_fn) print('Random ensemble test AUC: {}'.format(results['auc'])) saved_model_path = estimator.export_saved_model(estimator.model_dir, serving_input_fn) model_graph = tfl.estimators.get_model_graph(saved_model_path) tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15) # + [markdown] id="7uyO8s97FGJM" # ### RTL Layer Random Lattice Ensemble # # The following model config uses a `tfl.layers.RTL` layer that uses a random subset of features for each lattice. We note that `tfl.layers.RTL` only supports monotonicity constraints and must have the same lattice size for all features and no per-feature regularization. Note that using a `tfl.layers.RTL` layer lets you scale to much larger ensembles than using separate `tfl.layers.Lattice` instances. # + id="8v7dKg-FF7iz" # Make sure our feature configs have the same lattice size, no per-feature # regularization, and only monotonicity constraints. rtl_layer_feature_configs = copy.deepcopy(feature_configs) for feature_config in rtl_layer_feature_configs: feature_config.lattice_size = 2 feature_config.unimodality = 'none' feature_config.reflects_trust_in = None feature_config.dominates = None feature_config.regularizer_configs = None # This is RTL layer ensemble model with separate calibration: # model output is the average output of separately calibrated lattices. model_config = tfl.configs.CalibratedLatticeEnsembleConfig( lattices='rtl_layer', feature_configs=rtl_layer_feature_configs, num_lattices=5, lattice_rank=3) # A CannedClassifier is constructed from the given model config. estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn, optimizer=tf.keras.optimizers.Adam(LEARNING_RATE), config=tf.estimator.RunConfig(tf_random_seed=42)) estimator.train(input_fn=train_input_fn) results = estimator.evaluate(input_fn=test_input_fn) print('Random ensemble test AUC: {}'.format(results['auc'])) saved_model_path = estimator.export_saved_model(estimator.model_dir, serving_input_fn) model_graph = tfl.estimators.get_model_graph(saved_model_path) tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15) # + [markdown] id="LSXEaYAULRvf" # ### Crystals Lattice Ensemble # # TFL also provides a heuristic feature arrangement algorithm, called [Crystals](https://papers.nips.cc/paper/6377-fast-and-flexible-monotonic-functions-with-ensembles-of-lattices). The Crystals algorithm first trains a *prefitting model* that estimates pairwise feature interactions. It then arranges the final ensemble such that features with more non-linear interactions are in the same lattices. # # For Crystals models, you will also need to provide a `prefitting_input_fn` that is used to train the prefitting model, as described above. The prefitting model does not need to be fully trained, so a few epochs should be enough. # # + id="FjQKh9saMaFu" prefitting_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( x=train_x, y=train_y, shuffle=False, batch_size=BATCH_SIZE, num_epochs=PREFITTING_NUM_EPOCHS, num_threads=1) # + [markdown] id="fVnZpwX8MtPi" # You can then create a Crystal model by setting `lattice='crystals'` in the model config. # + id="f4awRMDe-eMv" # This is Crystals ensemble model with separate calibration: model output is # the average output of separately calibrated lattices. model_config = tfl.configs.CalibratedLatticeEnsembleConfig( feature_configs=feature_configs, lattices='crystals', num_lattices=5, lattice_rank=3) # A CannedClassifier is constructed from the given model config. estimator = tfl.estimators.CannedClassifier( feature_columns=feature_columns, model_config=model_config, feature_analysis_input_fn=feature_analysis_input_fn, # prefitting_input_fn is required to train the prefitting model. prefitting_input_fn=prefitting_input_fn, optimizer=tf.keras.optimizers.Adam(LEARNING_RATE), prefitting_optimizer=tf.keras.optimizers.Adam(LEARNING_RATE), config=tf.estimator.RunConfig(tf_random_seed=42)) estimator.train(input_fn=train_input_fn) results = estimator.evaluate(input_fn=test_input_fn) print('Crystals ensemble test AUC: {}'.format(results['auc'])) saved_model_path = estimator.export_saved_model(estimator.model_dir, serving_input_fn) model_graph = tfl.estimators.get_model_graph(saved_model_path) tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15) # + [markdown] id="Isb2vyLAVBM1" # You can plot feature calibrators with more details using the `tfl.visualization` module. # + id="DJPaREuWS2sg" _ = tfl.visualization.plot_feature_calibrator(model_graph, "age") _ = tfl.visualization.plot_feature_calibrator(model_graph, "restecg")
site/en-snapshot/lattice/tutorials/canned_estimators.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="gateB2MinsYB" import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.keras.layers import Conv2D, Input, Flatten, Reshape from tensorflow.keras.layers import Dense, Conv2DTranspose, BatchNormalization, Activation from tensorflow.keras.models import Model from tensorflow.keras.datasets import cifar10 from tensorflow.keras.layers import concatenate from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint from tensorflow.keras.utils import plot_model from tensorflow.keras import backend as K from tensorflow.keras.datasets import mnist import os import math # + colab={} colab_type="code" id="FiZNuaWyn0Ah" def rgb2gray(rgb): return np.dot(rgb[...,:3], [0.299, .587, 0.114]) # + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="lhcGJJv6oSWj" outputId="57335ea2-d889-4cc8-ab4d-4249cbb20b9e" (X_train, _), (X_test, _) = cifar10.load_data() img_rows = X_train.shape[1] img_cols = X_train.shape[2] channels = X_train.shape[3] imgs_dir = "saved_images" save_dir = os.path.join(os.getcwd(), imgs_dir) if not os.path.isdir(save_dir): os.makedirs(save_dir) # + colab={} colab_type="code" id="-xFR3DdRoew2" input_shape = (img_rows, img_cols, 1) batch_size = 32 kernel_size = 3 latent_dim = 256 layer_filters = [64, 128, 256] # + colab={"base_uri": "https://localhost:8080/", "height": 511} colab_type="code" id="8DGTLETypAA3" outputId="836defc7-1591-4a6f-8895-41ed4272b106" imgs = X_test[:100] imgs = imgs.reshape((10, 10, img_rows, img_cols, channels)) imgs = np.vstack([np.hstack(i) for i in imgs]) plt.figure() plt.axis('off') plt.title('Test color images (Ground Truth)') plt.imshow(imgs, interpolation='none') plt.savefig('%s/test_color.png' % imgs_dir) plt.show() X_train_gray = rgb2gray(X_train) X_test_gray = rgb2gray(X_test) imgs = X_test_gray[:100] imgs = imgs.reshape((10, 10, img_rows, img_cols)) imgs = np.vstack([np.hstack(i) for i in imgs]) plt.figure() plt.axis('off') plt.title('Test gray images (Input)') plt.imshow(imgs, interpolation='none', cmap='gray') plt.savefig('%s/test_gray.png' % imgs_dir) plt.show() # + colab={} colab_type="code" id="hCfKaguMpDx0" # normalize output train and test color images X_train = X_train.astype('float32') / 255 X_test = X_test.astype('float32') / 255 # normalize input train and test grayscale images X_train_gray = X_train_gray.astype('float32') / 255 X_test_gray = X_test_gray.astype('float32') / 255 X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols,channels) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channels) X_train_gray = X_train_gray.reshape(X_train_gray.shape[0], img_rows,img_cols, 1) X_test_gray = X_test_gray.reshape(X_test_gray.shape[0], img_rows, img_cols, 1) # + colab={"base_uri": "https://localhost:8080/", "height": 353} colab_type="code" id="8hoClNMUpgX8" outputId="5cdb3819-3319-49e6-f93e-5d81c1be819f" inputs = Input(shape=input_shape, name='encoder_input') x = inputs # Stack of Conv2D(32)-Conv2D(64) for filters in layer_filters: x = Conv2D(filters=filters, kernel_size=kernel_size, activation='relu', strides=2, padding='same')(x) shape = K.int_shape(x) # Latent Vector x = Flatten()(x) latent = Dense(latent_dim, name='latent_vector')(x) encoder = Model(inputs, latent, name='encoder') encoder.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 386} colab_type="code" id="uTYXy5U3pkxJ" outputId="0aae95dd-3b5c-4c86-b588-b89239b75c2e" latent_inputs = Input(shape=(latent_dim,), name='decoder_input') x = Dense((shape[1]*shape[2]*shape[3]))(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) for filters in layer_filters[::-1]: x = Conv2DTranspose(filters=filters, kernel_size=kernel_size, activation='relu', strides=2, padding='same')(x) # reconstruct the input outputs = Conv2DTranspose(filters=channels, kernel_size=kernel_size, activation='sigmoid', padding='same', name='decoder_output')(x) # instantiate decoder model decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 252} colab_type="code" id="72txI88FpqTa" outputId="2eefa43e-735c-4618-c12a-63ab01f6ca78" autoencoder = Model(inputs, decoder(encoder(inputs)), name='autoencoder') autoencoder.summary() # + colab={} colab_type="code" id="lW7qgm6Hp7mX" save_model_dir = os.path.join(os.getcwd(), "saved_models") model_name = 'colourizarion_autoencoder.{epoch:03d}.h5' if not os.path.isdir(save_model_dir): os.makedirs(save_model_dir) filepath = os.path.join(save_model_dir, model_name) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="WwQbg2wop-5y" outputId="fd365300-9c25-40bd-be72-f1e105e6b12c" lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, verbose=1, min_lr=0.5e-6) checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_loss', verbose=1, save_best_only=True) autoencoder.compile(loss='mse', optimizer='adam') callbacks = [lr_reducer, checkpoint] autoencoder.fit(X_train_gray, X_train, validation_data=(X_test_gray, X_test), epochs=50, batch_size=batch_size, callbacks=callbacks) # + colab={"base_uri": "https://localhost:8080/", "height": 264} colab_type="code" id="xirsBz7_qDiM" outputId="bfbf5593-40e1-41eb-c5c7-f8295e9e4602" x_decoded = autoencoder.predict(X_test_gray) # display the 1st 100 colorized images imgs = x_decoded[:100] imgs = imgs.reshape((10, 10, img_rows, img_cols, channels)) imgs = np.vstack([np.hstack(i) for i in imgs]) plt.figure() plt.axis('off') plt.title('Colorized test images (Predicted)') plt.imshow(imgs, interpolation='none') plt.savefig('%s/colorized.png' % imgs_dir) plt.show() # + colab={} colab_type="code" id="mNNHq0rzxsCw"
TensorFlow2/8-Autoencoder/.ipynb_checkpoints/Colourization Autoencoder-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import absolute_import, print_function import time import argparse import os import sys import torch.utils.data from torch.backends import cudnn from torch.autograd import Variable import models import losses from utils import FastRandomIdentitySampler, mkdir_if_missing, logging, display from utils.serialization import save_checkpoint, load_checkpoint from trainer import train from utils import orth_reg import DataSet import numpy as np import os.path as osp use_gpu = torch.cuda.is_available() # - # Batch Norm Freezer : bring 2% improvement on CUB def set_bn_eval(m): classname = m.__class__.__name__ if classname.find('BatchNorm') != -1: m.eval() def main(args): # s_ = time.time() save_dir = args.save_dir mkdir_if_missing(save_dir) sys.stdout = logging.Logger(os.path.join(save_dir, 'log.txt')) display(args) start = 0 model = models.create(args.net, pretrained=True, dim=args.dim) # for vgg and densenet if args.resume is None: model_dict = model.state_dict() else: # resume model print('load model from {}'.format(args.resume)) chk_pt = load_checkpoint(args.resume) weight = chk_pt['state_dict'] start = chk_pt['epoch'] model.load_state_dict(weight) model = torch.nn.DataParallel(model) if use_gpu: model = model.cuda() # freeze BN if args.freeze_BN is True: print(40 * '#', '\n BatchNorm frozen') model.apply(set_bn_eval) else: print(40 * '#', 'BatchNorm NOT frozen') # Fine-tune the model: the learning rate for pre-trained parameter is 1/10 new_param_ids = set(map(id, model.module.classifier.parameters())) new_params = [p for p in model.module.parameters() if id(p) in new_param_ids] base_params = [p for p in model.module.parameters() if id(p) not in new_param_ids] param_groups = [ {'params': base_params, 'lr_mult': 0.0}, {'params': new_params, 'lr_mult': 1.0}] print('initial model is save at %s' % save_dir) optimizer = torch.optim.Adam(param_groups, lr=args.lr, weight_decay=args.weight_decay) criterion = losses.create(args.loss, margin=args.margin, alpha=args.alpha, base=args.loss_base).cuda() # Decor_loss = losses.create('decor').cuda() data = DataSet.create(args.data, ratio=args.ratio, width=args.width, origin_width=args.origin_width, root=args.data_root) train_loader = torch.utils.data.DataLoader( data.train, batch_size=args.batch_size, sampler=FastRandomIdentitySampler(data.train, num_instances=args.num_instances), drop_last=True, pin_memory=True, num_workers=args.nThreads) # save the train information for epoch in range(start, args.epochs): train(epoch=epoch, model=model, criterion=criterion, optimizer=optimizer, train_loader=train_loader, args=args) if epoch == 1: optimizer.param_groups[0]['lr_mul'] = 0.1 if (epoch + 1) % args.save_step == 0 or epoch == 0: if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'epoch': (epoch + 1), }, is_best=False, fpath=osp.join(args.save_dir, 'ckp_ep' + str(epoch + 1) + '.pth.tar')) from types import SimpleNamespace args = SimpleNamespace( lr=1e-05, batch_size=80, num_instances=5, dim=512, width=227, origin_width=256, ratio=0.16, alpha=40, beta=0.1, orth_reg=0, k=16, margin=0.5, init='random', freeze_BN=True, data='cub', data_root='/data/81ac6083fc2442619b54e05eab96c148/', net='BN-Inception', loss='LiftedStructure', epochs=600, save_step=50, resume=None, print_freq=20, save_dir='ckps\LiftedStructure\cub\BN-Inception-dim512-lr0.000010-ratio0.160000-batch80', nThreads=8, momentum=0.9, weight_decay=5e-4, loss_base=0.75) print(args) main(args)
train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # + # Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) file_to_load = "Resources/purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data = pd.read_csv(file_to_load) purchase_data.head() # - print(purchase_data.info()) # ## Player Count # * Display the total number of players # player = len(purchase_data["SN"]. value_counts()) player_count= pd.DataFrame([player], columns = ["Total Players"]) player_count # ## Purchasing Analysis (Total) # * Run basic calculations to obtain number of unique items, average price, etc. # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # # + # Unique Items unique_items = len(purchase_data["Item ID"].unique()) unique_items # Average Purchase Price Average_price = purchase_data["Price"].count() Average_price Total_purchases = len(purchase_data["Item Name"]) # Total Revenue Revenue = purchase_data["Price"].sum() # New DataFrame Total_Purchasing_Analysis = pd.DataFrame({"Number of Unique Items": ["Unique Items"], "Average Price": [Average_price], "Number of Purchases": ["Total_Purchases"], "Total Revenue": [Revenue]}) # Formatting DataFrame Total_Purchasing_Analysis["Average Price"] = Total_Purchasing_Analysis["Average Price"].map("${:.2f}".format) Total_Purchasing_Analysis["Total Revenue"] = Total_Purchasing_Analysis["Total Revenue"].map("${:.2f}".format) Total_Purchasing_Analysis = Total_Purchasing_Analysis[["Number of Unique Items", "Average Price", "Number of Purchases", "Total Revenue"]] Total_Purchasing_Analysis # - # ## Gender Demographics # * Percentage and Count of Male Players # # # * Percentage and Count of Female Players # # # * Percentage and Count of Other / Non-Disclosed # # # # # ## Purchasing Analysis (Gender) # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender # # # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # ## Age Demographics # * Establish bins for ages # # # * Categorize the existing players using the age bins. Hint: use pd.cut() # # # * Calculate the numbers and percentages by age group # # # * Create a summary data frame to hold the results # # # * Optional: round the percentage column to two decimal points # # # * Display Age Demographics Table # # ## Purchasing Analysis (Age) # * Bin the purchase_data data frame by age # # # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below # # # * Create a summary data frame to hold the results # # # * Optional: give the displayed data cleaner formatting # # # * Display the summary data frame # ## Top Spenders # * Run basic calculations to obtain the results in the table below # # # * Create a summary data frame to hold the results # # # * Sort the total purchase value column in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # ## Most Popular Items # * Retrieve the Item ID, Item Name, and Item Price columns # # # * Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value # # # * Create a summary data frame to hold the results # # # * Sort the purchase count column in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # ## Most Profitable Items # * Sort the above table by total purchase value in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the data frame # #
HeroesOfPymoli_starter-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.5 64-bit (''.venv'': poetry)' # language: python # name: python3 # --- # + import librosa as lb from librosa import display import matplotlib.pyplot as plt def visualizing_sound(file): ''' Argument: a path for a (.wav) file return: 1. spectogram of the choosen file 2. waveform of the choosen file ''' x, fs = lb.load(file) lb.display.waveplot(x, sr=fs) X = lb.stft(x) Xdb = lb.amplitude_to_db(abs(X)) plt.title('Waveform') plt.figure(figsize=(14, 5)) plt.title('Spectogram') lb.display.specshow(Xdb, sr=fs, x_axis='time', y_axis='hz') plt.colorbar() visualizing_sound("happy/female2_happy_1a_1.wav")
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # # Enabling Data Collection for Models in Production # With this notebook, you can learn how to collect input model data from your Azure Machine Learning service in an Azure Blob storage. Once enabled, this data collected gives you the opportunity: # # * Monitor data drifts as production data enters your model # * Make better decisions on when to retrain or optimize your model # * Retrain your model with the data collected # # ## What data is collected? # * Model input data (voice, images, and video are not supported) from services deployed in Azure Kubernetes Cluster (AKS) # * Model predictions using production input data. # # **Note:** pre-aggregation or pre-calculations on this data are done by user and not included in this version of the product. # # ## What is different compared to standard production deployment process? # 1. Update scoring file. # 2. Update yml file with new dependency. # 3. Update aks configuration. # 4. Build new image and deploy it. # ## 1. Import your dependencies # + from azureml.core import Workspace, Run from azureml.core.compute import AksCompute, ComputeTarget from azureml.core.webservice import Webservice, AksWebservice from azureml.core.image import Image from azureml.core.model import Model import azureml.core print(azureml.core.VERSION) # - # ## 2. Set up your configuration and create a workspace # Follow Notebook 00 instructions to do this. # ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # ## 3. Register Model # Register an existing trained model, add descirption and tags. # + #Register the model from azureml.core.model import Model model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as tags = {'area': "diabetes", 'type': "regression"}, description = "Ridge regression model to predict diabetes", workspace = ws) print(model.name, model.description, model.version) # - # ## 4. *Update your scoring file with Data Collection* # The file below, compared to the file used in notebook 11, has the following changes: # ### a. Import the module # ```python # from azureml.monitoring import ModelDataCollector``` # ### b. In your init function add: # ```python # global inputs_dc, prediction_d # inputs_dc = ModelDataCollector("best_model", identifier="inputs", feature_names=["feat1", "feat2", "feat3", "feat4", "feat5", "Feat6"]) # prediction_dc = ModelDataCollector("best_model", identifier="predictions", feature_names=["prediction1", "prediction2"])``` # # * Identifier: Identifier is later used for building the folder structure in your Blob, it can be used to divide "raw" data versus "processed". # * CorrelationId: is an optional parameter, you do not need to set it up if your model doesn't require it. Having a correlationId in place does help you for easier mapping with other data. (Examples include: LoanNumber, CustomerId, etc.) # * Feature Names: These need to be set up in the order of your features in order for them to have column names when the .csv is created. # # ### c. In your run function add: # ```python # inputs_dc.collect(data) # prediction_dc.collect(result)``` # + # %%writefile score.py import pickle import json import numpy from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model from azureml.monitoring import ModelDataCollector import time def init(): global model print ("model initialized" + time.strftime("%H:%M:%S")) # note here "sklearn_regression_model.pkl" is the name of the model registered under the workspace # this call should return the path to the model.pkl file on the local disk. model_path = Model.get_model_path(model_name = 'sklearn_regression_model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) global inputs_dc, prediction_dc # this setup will help us save our inputs under the "inputs" path in our Azure Blob inputs_dc = ModelDataCollector(model_name="sklearn_regression_model", identifier="inputs", feature_names=["feat1", "feat2"]) # this setup will help us save our ipredictions under the "predictions" path in our Azure Blob prediction_dc = ModelDataCollector("sklearn_regression_model", identifier="predictions", feature_names=["prediction1", "prediction2"]) # note you can pass in multiple rows for scoring def run(raw_data): global inputs_dc, prediction_dc try: data = json.loads(raw_data)['data'] data = numpy.array(data) result = model.predict(data) print ("saving input data" + time.strftime("%H:%M:%S")) inputs_dc.collect(data) #this call is saving our input data into our blob prediction_dc.collect(result)#this call is saving our prediction data into our blob print ("saving prediction data" + time.strftime("%H:%M:%S")) # you can return any data type as long as it is JSON-serializable return result.tolist() except Exception as e: error = str(e) print (error + time.strftime("%H:%M:%S")) return error # - # ## 5. *Update your myenv.yml file with the required module* # + from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn']) myenv.add_pip_package("azureml-monitoring") with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) # - # ## 6. Create your new Image # + from azureml.core.image import ContainerImage image_config = ContainerImage.image_configuration(execution_script = "score.py", runtime = "python", conda_file = "myenv.yml", description = "Image with ridge regression model", tags = {'area': "diabetes", 'type': "regression"} ) image = ContainerImage.create(name = "myimage1", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) # - print(model.name, model.description, model.version) # ## 7. Deploy to AKS service # ### Create AKS compute if you haven't done so. # + # Use the default configuration (can also provide parameters to customize) prov_config = AksCompute.provisioning_configuration() aks_name = 'my-aks-test1' # Create the cluster aks_target = ComputeTarget.create(workspace = ws, name = aks_name, provisioning_configuration = prov_config) # - # %%time aks_target.wait_for_completion(show_output = True) print(aks_target.provisioning_state) print(aks_target.provisioning_errors) # If you already have a cluster you can attach the service to it: # ```python # # %%time # resource_id = '/subscriptions/<subscriptionid>/resourcegroups/<resourcegroupname>/providers/Microsoft.ContainerService/managedClusters/<aksservername>' # create_name= 'myaks4' # attach_config = AksCompute.attach_configuration(resource_id=resource_id) # aks_target = ComputeTarget.attach(workspace = ws, # name = create_name, # attach_configuration=attach_config) # ## Wait for the operation to complete # aks_target.wait_for_provisioning(True)``` # ### a. *Activate Data Collection and App Insights through updating AKS Webservice configuration* # In order to enable Data Collection and App Insights in your service you will need to update your AKS configuration file: #Set the web service configuration aks_config = AksWebservice.deploy_configuration(collect_model_data=True, enable_app_insights=True) # ### b. Deploy your service if aks_target.provisioning_state== "Succeeded": aks_service_name ='aks-w-dc0' aks_service = Webservice.deploy_from_image(workspace = ws, name = aks_service_name, image = image, deployment_config = aks_config, deployment_target = aks_target ) aks_service.wait_for_deployment(show_output = True) print(aks_service.state) else: raise ValueError("aks provisioning failed, can't deploy service") # ## 8. Test your service and send some data # **Note**: It will take around 15 mins for your data to appear in your blob. # The data will appear in your Azure Blob following this format: # # /modeldata/subscriptionid/resourcegroupname/workspacename/webservicename/modelname/modelversion/identifier/year/month/day/data.csv # + # %%time import json test_sample = json.dumps({'data': [ [1,2,3,4,54,6,7,8,88,10], [10,9,8,37,36,45,4,33,2,1] ]}) test_sample = bytes(test_sample,encoding = 'utf8') if aks_service.state == "Healthy": prediction = aks_service.run(input_data=test_sample) print(prediction) else: raise ValueError("Service deployment isn't healthy, can't call the service") # - # ## 9. Validate you data and analyze it # You can look into your data following this path format in your Azure Blob (it takes up to 15 minutes for the data to appear): # # /modeldata/**subscriptionid>**/**resourcegroupname>**/**workspacename>**/**webservicename>**/**modelname>**/**modelversion>>**/**identifier>**/*year/month/day*/data.csv # # For doing further analysis you have multiple options: # ### a. Create DataBricks cluter and connect it to your blob # https://docs.microsoft.com/en-us/azure/azure-databricks/quickstart-create-databricks-workspace-portal or in your databricks workspace you can look for the template "Azure Blob Storage Import Example Notebook". # # # Here is an example for setting up the file location to extract the relevant data: # # <code> file_location = "wasbs://mycontainer@storageaccountname.blob.core.windows.net/unknown/unknown/unknown-bigdataset-unknown/my_iterate_parking_inputs/2018/&deg;/&deg;/data.csv" # file_type = "csv"</code> # # ### b. Connect Blob to Power Bi (Small Data only) # 1. Download and Open PowerBi Desktop # 2. Select “Get Data” and click on “Azure Blob Storage” >> Connect # 3. Add your storage account and enter your storage key. # 4. Select the container where your Data Collection is stored and click on Edit. # 5. In the query editor, click under “Name” column and add your Storage account Model path into the filter. Note: if you want to only look into files from a specific year or month, just expand the filter path. For example, just look into March data: /modeldata/subscriptionid>/resourcegroupname>/workspacename>/webservicename>/modelname>/modelversion>/identifier>/year>/3 # 6. Click on the double arrow aside the “Content” column to combine the files. # 7. Click OK and the data will preload. # 8. You can now click Close and Apply and start building your custom reports on your Model Input data. # # Disable Data Collection aks_service.update(collect_model_data=False) # ## Clean up # %%time aks_service.delete() image.delete() model.delete()
how-to-use-azureml/deployment/enable-data-collection-for-models-in-aks/enable-data-collection-for-models-in-aks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Machine Learning with H2O - Tutorial 2: Basic Data Manipulation # # <hr> # # **Objective**: # # - This tutorial demonstrates basic data manipulation with H2O. # # <hr> # # **Titanic Dataset:** # # - Source: https://www.kaggle.com/c/titanic/data # # <hr> # # **Full Technical Reference:** # # - http://docs.h2o.ai/h2o/latest-stable/h2o-py/docs/frame.html # # <br> # # Start and connect to a local H2O cluster import h2o h2o.init(nthreads = -1) # <br> # Import Titanic data (local CSV) titanic = h2o.import_file("kaggle_titanic.csv") # Explore the dataset using various functions titanic.head(10) # <br> # # Explain why we need to transform # # <br> # Explore the column 'Survived' titanic['Survived'].summary() # Use hist() to create a histogram titanic['Survived'].hist() # Use table() to summarize 0s and 1s titanic['Survived'].table() # Convert 'Survived' to categorical variable titanic['Survived'] = titanic['Survived'].asfactor() # Look at the summary of 'Survived' again # The feature is now an 'enum' (enum is the name of categorical variable in Java) titanic['Survived'].summary() # <br> # # Doing the same for 'Pclass' # # <br> # Explore the column 'Pclass' titanic['Pclass'].summary() # Use hist() to create a histogram titanic['Pclass'].hist() # Use table() to summarize 1s, 2s and 3s titanic['Pclass'].table() # Convert 'Pclass' to categorical variable titanic['Pclass'] = titanic['Pclass'].asfactor() # Explore the column 'Pclass' again titanic['Pclass'].summary()
introduction_to_machine_learning/py_02_data_manipulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8 - AzureML # language: python # name: python38-azureml # --- # # Log metrics with MLflow in PyTorch Lightning # # description: log mlflow metrics in pytorch lightning with azureml as the backend tracking store # # Lightning supports many popular [logging frameworks](https://pytorch-lightning.readthedocs.io/en/stable/loggers.html). [MLflow](https://mlflow.org/) is a popular open-source library for managing the lifecycle of your ML projects. Azure ML offers integration with MLflow, including for training. Specifically, Azure ML integrates as a backend tracking store for MLflow's [Tracking](https://mlflow.org/docs/latest/tracking.html#) component for logging metrics and managing runs. This tutorial will cover using the MLflow logger and leveraging the Azure ML MLflow integration. # + from azureml.core import Workspace ws = Workspace.from_config() ws # + # training script source_dir = "src" script_name = "train-with-mlflow-logging.py" # environment file environment_file = "environment.yml" # azure ml settings environment_name = "pt-lightning" experiment_name = "pt-lightning-mlflow-tutorial" compute_name = "gpu-K80-2" # - # ## Create environment # # Define a conda environment YAML file with your training script dependencies and create an Azure ML environment. This notebook will use the same environment definition that was used for part 1 of the tutorial. The dependencies include **mlflow** and **azureml-mlflow**, which are needed for logging with MLflow. # + from azureml.core import Environment env = Environment.from_conda_specification(environment_name, environment_file) # specify a GPU base image env.docker.enabled = True env.docker.base_image = ( "mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.2-cudnn8-ubuntu18.04" ) # - # ## Enable logging in training script # # In *train_with_mlfow_logging.py*: # # ### 1. Create an MLFlowLogger # To configure the MLFlowLogger, you will need to provide the following: # # * **Tracking URI**: Specify the tracking URI to point to your Azure ML Workspace in order to use Azure ML as the backend tracking store for MLflow. You can get the URI with mlflow.get_tracking_uri(). # * **Experiment name**: Use the same name as the name of your Azure ML experiment. # * **Run ID**: You will need to link the MLFlowLogger's run ID to the ID of the Azure ML run. # # To get the MLflow Run object of the training run, use the mlflow `mlflow.start_run()` method. Once you have the Run object, you can then access the experiment information. # # ```python # import mlflow # import os # # with mlflow.start_run() as run: # mlflow_uri = mlflow.get_tracking_uri() # exp_id = run.info.experiment_id # exp_name = mlflow.get_experiment(exp_id).name # # mlf_logger = MLFlowLogger(experiment_name=exp_name, tracking_uri=mlflow_uri) # mlf_logger._run_id = run.info.run_id # # trainer = pl.Trainer.from_argparse_args(args, logger=mlf_logger) # ``` # # Lightning will then take care of setting the tracking URI, creating the MLflow experiment, starting the MLflow run, and creating the underlying `MlflowClient` object. # # # ### 2. Log metrics # You can then log metrics and other objects in your script. This tutorial's training script leverages Lightning's automatic log functionalities to log the loss metric by calling `self.log()` inside the `training_step()` method. Since logging too frequently can slow down training, the tutorial logs at the end of every epoch. # # ```python # self.log('loss', loss, on_epoch=True, on_step=False) # ``` # # For more information on logging and the configurable options, see Lightning's [Logging](https://pytorch-lightning.readthedocs.io/en/stable/logging.html) documentation and the [MLFlowLogger](https://pytorch-lightning.readthedocs.io/en/stable/logging.html#mlflow) reference documentation. # ### Configure and run training job # # Create a ScriptRunConfig to specify the training script & arguments, environment, and cluster to run on. # + from azureml.core import ScriptRunConfig, Experiment src = ScriptRunConfig( source_directory=source_dir, script=script_name, arguments=["--max_epochs", 25, "--gpus", 2, "--accelerator", "ddp"], compute_target=compute_name, environment=env, ) run = Experiment(ws, experiment_name).submit(src) run # - # If you navigate to the Azure ML studio UI, you can see the logged metrics visualized under the Experiment view and the "Metrics" tab of the individual Run view. run.wait_for_completion(show_output=True)
python-sdk/experimental/using-pytorch-lightning/3.log-with-mlflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JairAlonso0/daa_2021_1/blob/master/13Enero.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="2y7MoXN7ZTzE" class NodoArbol: def __init__( self , dato , hijo_izq=None , hijo_der=None): self.dato = dato self.left = hijo_izq self.right = hijo_der # + id="OYwIwkroS2Sd" class BinarySearchTree: def __init__( self ): self.__root = None def insert( self, value ): if self.__root == None: self.__root = NodoArbol(value,None,None) else: # preguntar si value es menor que root, de serl el caso # insertar a la izq, PERO... Puede ser el caso que el # sub árbol izq ya tenga muchos elementos self.__insert_nodo__( self.__root , value) def __insert_nodo__( self, nodo , value ): if nodo.data == value: pass elif value < nodo.data: # true va a la IZQ if nodo.left == None: # si hay espacio en la izq, nodo.left = NodoArbol(value, None,None) else: self.__insert_nodo__( nodo.left , value ) else: if noodo.right == None: nodo.right = NodoArbol(value , None, None) else: self.__insert_nodo__( nodo.right, value) def buscar( self, value): if self.__root == None: return False else: return self.__busca_nodo( self.__root , value ) def __busca_nodo( self , nodo , value ): if nodo == None: return None elif nodo.dato == value: return nodo elif value < nodo.dato: return self.__busca_nodo( nodo.left ,value ) else: return self.__busca_nodo( nodo.right ,value ) def transversal( self, format="inorden" ): if formart == "inorden": self.__Recorrido_in( self.root ) elif format == "preorden" : self.__recorrido_pre( self.__root ) elif format == "posorden": self.__recorrido_pos( self.__roo ) else: print( "Formato de recorrrido no válido" ) def __recorrido_pre( self , nodo ): if nodo != None: print(nodo.dato) self.__recorrido_pre( nodo.left ) self.__recorrido_pre( nodo.right ) def __recorrido_in( self, nodo ): if nodo != None: self.__recorrido_in(nodo.left) print(nodo.dato,end=",") self.__recorrido_in(nodo.right) def __recorrido_pos( self, nodo): if nodo != None: self.__recorrido_pos(nodo.left) self.__recorrido_pos(nodo.right) print(nodo.dato,end=",") # + id="JaBTe70sVU-e" bst = BinarySearchTree() bst.insert(50) bst.insert(30) bst.insert(20) res = bst.buscar(30) print(" Dato : "+str(res)) print( bst.buscar(40)) bst.transversal(formart = "preorden")
13Enero.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Correlation function of DR72 SDSS VAGC Catalog # First import all the modules such as healpy and astropy needed for analyzing the structure import healpix_util as hu import astropy as ap import numpy as np from astropy.io import fits from astropy.table import Table import astropy.io.ascii as ascii from astropy.io import fits from astropy.constants import c import matplotlib.pyplot as plt import math as m from math import pi #from scipy.constants import c import scipy.special as sp from astroML.decorators import pickle_results from scipy import integrate import warnings from sklearn.neighbors import BallTree import pickle import multiprocessing as mp import time from lccmetric import * from progressbar import * from tqdm import * from functools import partial import pymangle #from astroML.datasets import fetch_sdss_specgals #from astroML.correlation import bootstrap_two_point_angular # %matplotlib inline # Getting back the objects: with open('datsLCf.pkl') as f: # Python 3: open(..., 'rb') dat = pickle.load(f) dat bins=np.arange(0.,0.08,0.005) print bins Nbins=len(bins) Nbins binsq=(bins*0.007)**2 binsq LCcmetric(dat[0],dat[1]) # + # %%time BT_DLCc = BallTree(dat,metric='pyfunc',func=LCcmetric,leaf_size=5) with open('BTDdatsLCc.pkl', 'w') as f: pickle.dump(BT_DLCc,f) # + with open('BTDdatsLCc.pkl') as f: BTDLCc = pickle.load(f) BTDLCc # + # %%time start_time=time.time() counts_DD=BTDLCc.two_point_correlation(dat,binsq) print counts_DD end_time=time.time() tottime=end_time-start_time print "Total run time:" print tottime with open('BTDcDDLCc.pkl', 'w') as f: pickle.dump(counts_DD,f) # + with open('BTDcDDLCc.pkl') as f: counts_DD = pickle.load(f) counts_DD # - DD=np.diff(counts_DD) DD plt.plot(bins[1:len(bins)],DD,'ro-') # BallTree.two_point_correlation works almost 10 times faster! with leaf_size=5 Going with it to the random catalog # Getting back the objects: with open('rDR7200kLCsrarf.pkl') as f: # Python 3: open(..., 'rb') datR = pickle.load(f) datR # + # %%time BT_RLCc = BallTree(datR,metric='pyfunc',func=LCcmetric,leaf_size=5) with open('BTR200kdatsLCc.pkl', 'w') as f: pickle.dump(BT_RLCc,f) # + with open('BTR200kdatsLCc.pkl') as f: BTRLCc = pickle.load(f) BTRLCc # + # %%time start_time=time.time() counts_RR=BTRLCc.two_point_correlation(datR,binsq) print counts_RR end_time=time.time() tottime=end_time-start_time print "Total run time:" print tottime with open('BTR200kcRRLCc.pkl', 'w') as f: pickle.dump(counts_RR,f) # + with open('BTR200kcRRLCc.pkl') as f: counts_RR = pickle.load(f) counts_RR # - RR=np.diff(counts_RR) RR plt.plot(bins[1:len(bins)],RR,'bo-') RR_zero = (RR == 0) RR[RR_zero] = 1 # + # %%time start_time=time.time() counts_DR=BTRLCc.two_point_correlation(dat,binsq) print counts_DR end_time=time.time() tottime=end_time-start_time print "Total run time:" print tottime with open('BTR200kcDRLCc.pkl', 'w') as f: pickle.dump(counts_DR,f) # + with open('BTR200kcDRLCc.pkl') as f: counts_DR = pickle.load(f) counts_DR # - DR=np.diff(counts_DR) DR corrells=(4.0 * DD - 4.0 * DR + RR) / RR corrells plt.plot(bins[1:len(bins)],corrells,'go-') plt.plot(bins[1:len(bins)],bins[1:len(bins)]*bins[1:len(bins)]*corrells*(c*1e-5)**2,'go-') plt.plot(bins[2:len(bins)],bins[2:len(bins)]*bins[2:len(bins)]*corrells[1:len(bins)]*(c*1e-5)**2,'go-') plt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-') plt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-') plt.savefig("correl2xlsLCc.pdf") plt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'bo-') plt.savefig("correl2x1lsLCc.pdf") plt.yscale('log') plt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-') plt.savefig("correllsfiglogLCc.pdf") plt.yscale('log') plt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'ro-') plt.savefig("correllslog2xLCc.pdf") plt.yscale('log') plt.xscale('log') plt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-') plt.savefig("correllsloglogLCc.pdf")
DR72_VAGC_correl_V06_LCc_w07.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt df1 = pd.read_csv("data_after_clustering.csv") # - df1.head() # 数据中的分类类型数量 df1["TRUE VALUE"].value_counts() # data1是去掉真实分类信息的数据集(含有聚类后的结果) data1 = df1.drop("TRUE VALUE", axis=1) data1.head() df2 = pd.read_excel("data.xlsx", engine="openpyxl") df2.head() # 只含特征值的完整数据集 data2 = df2.drop("TRUE VALUE", axis=1) # 只含真实分类信息的完整数据集 labels = df2["TRUE VALUE"] # 查看使用kmeans聚类后的分类标签值,两类 data1['km_clustering_label'].hist() # + from sklearn.model_selection import StratifiedShuffleSplit # 基于kmeans聚类结果的分层抽样 split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(data1, data1["km_clustering_label"]): strat_train_set = data1.loc[train_index] strat_test_set = data1.loc[test_index] # - def clustering_result_propotions(data): """ 分层抽样后,训练集或测试集里不同分类标签的数量比 :param data: 训练集或测试集,纯随机取样或分层取样 """ return data["km_clustering_label"].value_counts() / len(data) # 分层抽样获取的测试集中,不同分类标签的数量比 clustering_result_propotions(strat_test_set) # 分层抽样获取的训练集中,不同分类标签的数量比 clustering_result_propotions(strat_train_set) # 完整的数据集中,不同分类标签的数量比 clustering_result_propotions(data1) # + from sklearn.model_selection import train_test_split # 纯随机取样 random_train_set, random_test_set = train_test_split(data1, test_size=0.2, random_state=42) # 完整的数据集、分层抽样后的测试集、纯随机抽样后的测试集中,不同分类标签的数量比 compare_props = pd.DataFrame({ "Overall": clustering_result_propotions(data1), "Stratified": clustering_result_propotions(strat_test_set), "Random": clustering_result_propotions(random_test_set), }).sort_index() # 计算分层抽样和纯随机抽样后的测试集中不同分类标签的数量比,和完整的数据集中不同分类标签的数量比的误差 compare_props["Rand. %error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100 compare_props["Start. %error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100 compare_props # + from sklearn.metrics import f1_score def get_classification_marks(model, data, labels, train_index, test_index): """ 获取分类模型(二元或多元分类器)的评分:F1值 :param data: 只含有特征值的数据集 :param labels: 只含有标签值的数据集 :param train_index: 分层抽样获取的训练集中数据的索引 :param test_index: 分层抽样获取的测试集中数据的索引 :return: F1评分值 """ m = model(random_state=42) m.fit(data.loc[train_index], labels.loc[train_index]) test_labels_predict = m.predict(data.loc[test_index]) score = f1_score(labels.loc[test_index], test_labels_predict, average="weighted") return score # + from sklearn.linear_model import SGDClassifier # 用分层抽样后的训练集训练分类模型后的评分值 start_marks = get_classification_marks(SGDClassifier, data2, labels, strat_train_set.index, strat_test_set.index) start_marks # - # 用纯随机抽样后的训练集训练分类模型后的评分值 random_marks = get_classification_marks(SGDClassifier, data2, labels, random_train_set.index, random_test_set.index) random_marks # + import numpy as np from sklearn.metrics import f1_score, r2_score from sklearn.model_selection import StratifiedKFold from sklearn.base import clone, BaseEstimator, TransformerMixin class stratified_cross_val_score(BaseEstimator, TransformerMixin): """实现基于分层抽样的k折交叉验证""" def __init__(self, model, random_state=0, cv=5, pattern="classification"): """ :model: 训练的模型(回归或分类) :random_state: 模型的随机种子值 :cv: 交叉验证的次数 :pattern: classification和regression两种选择 """ self.model = model self.random_state = random_state self.cv = cv self.pattern = pattern self.scores_ = [] self.best_score_ = [] self.estimators_ = [] self.best_estimator_ = [] self.i = 0 def fit(self, X, y, layer_tag): """ :param X: 只含有特征值的完整数据集 :param y: 只含有标签值的完整数据集 :param tag: 只含有分层依据的完整数据集(此例是KMeans聚类结果) """ skfolds = StratifiedKFold(n_splits=self.cv, random_state=self.random_state, shuffle=True) for train_index, test_index in skfolds.split(X, layer_tag): # 复制要训练的模型(分类或回归) clone_model = clone(self.model) strat_X_train_folds, strat_X_test_fold = X.iloc[train_index], X.iloc[test_index] strat_y_train_folds, strat_y_test_fold = y.iloc[train_index], y.iloc[test_index] # 训练模型 clone_model.fit(strat_X_train_folds, strat_y_train_folds) # 保留模型 self.estimators_.append(clone_model) # 预测值(这里是分类模型的分类结果) test_labels_pred = clone_model.predict(strat_X_test_fold) if self.pattern == "classification": # 分类模型用F1值 score_fold = f1_score(y.iloc[test_index], test_labels_pred, average="weighted") elif self.pattern == "regression": # 回归模型使用r2值 score_fold = r2_score(y.iloc[test_index], test_labels_pred) # 避免重复向列表里重复添加值 if self.i < self.cv: self.scores_.append(score_fold) else: None self.i += 1 # 获取评分最高模型的索引 argmax = np.argmax(self.scores_) self.best_score_ = self.scores_[argmax] self.best_estimator_ = self.estimators_[argmax] def transform(self, X, y=None): return self def mean(self): """返回交叉验证评分的平均值""" return np.array(self.scores_).mean() def std(self): """返回交叉验证评分的标准差""" return np.array(self.scores_).std() # + from sklearn.linear_model import SGDClassifier, LinearRegression # 分类模型 #clf_model = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42) # 回归模型 reg_model = LinearRegression() # 基于分层抽样的交叉验证,pattern默认值为classification,如若是回归模型,需设置pattern为regression reg_cross_val = stratified_cross_val_score(reg_model, cv=5, random_state=42, pattern="regression") # data2是只含有特征值的完整数据集, labels是只含分类标签值的完整数据集,data1是含有特征值和聚类结果的完整数据集 reg_cross_val.fit(data2, labels, data1["km_clustering_label"]) # - # 每折交叉验证的评分 reg_cross_val.scores_ # 五折交叉验证中最好的评分 reg_cross_val.best_score_ # 交叉验证评分的平均值 reg_cross_val.mean() # 交叉验证评分的标准差 reg_cross_val.std() # 五折交叉验证的所有模型 reg_cross_val.estimators_ # 五折交叉验证中的最优模型 best_model = reg_cross_val.best_estimator_
chinese/stratification/stratification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Este programa define las funciones likelihood, priors y posterior probability (quedan guardadas en Mis_funciones.py) # + def Modelo(Mags, Phi, Me, alpha): """ Modelo para ajustar Parameters ---------- Mags, ERR : list Magnitudes observadas Phi, Me, alpha : .float, .float, .float Parámetros del modelo Returns -------- F : list Valores de la función """ import numpy as np M = Mags # Definición para mejor vizualización F = [] # Contendrá valores de la función ij = 0 while ij<len(M): # Para que no sea tan larga la def. de "F": parto en factores a la función # F = f1*f2*f3 f1 = 0.4*np.log(10)*Phi f2 = 10**(-0.4*(M[ij]-Me)*(alpha+1)) f3 = np.exp( -10**(-0.4*(M[ij]-Me)) ) F.append( f1*f2*f3 ) ij = ij + 1 return F def Likelihood(Mags, Lum, ERR, Phi, Me, alpha): """ Función likelihood para el problema Parameters ---------- Mags : list Magnitudes observadas Lum, ERR : list, list Luminosidad y sus errores asociados Phi, Me, alpha : .float, .float, .float Parámetros del modelo Returns -------- LK : .float Valor del likelihood """ import numpy as np import scipy.stats as st Obs = np.array(Lum) Calc = np.array( Modelo(Mags=Mags, Phi=Phi, Me=Me, alpha=alpha) ) p = st.norm(loc=Calc, scale=ERR).pdf(Obs) LK = p.prod() return LK def PRIOR(Phi, Phimin, Phimax, Me, Memin, Memax, alpha, alphamin, alphamax): """Función prior, es un escalón en 3d Parameters ---------- Phi, Phimin, Phimax : .float, .float, .float Valor del parámetro Phi y sus limitens inferiores y superiores para el escalón Me, Memin, Memax : .float, .float, .float Valor del parámetro Me y sus limitens inferiores y superiores para el escalón alpha, alphamin, alphamax : .float, .float, .float Valor del parámetro alpha y sus limitens inferiores y superiores para el escalón Returns -------- Prob_norm: .float Mass probability function para el punto definido por Phi, Me y alpha """ norm = abs(Phimax - Phimin) * abs(Memax - Memin)* abs(alphamax - alphamin) rPhi = (Phi < Phimax) * (Phi > Phimin) # Rango para Phi rMe = (Me < Memax) * (Me > Memin) ralpha = (alpha < alphamax) * (alpha > alphamin) print(norm, rPhi, rMe, ralpha) Prob = 1. * rPhi * rMe * ralpha Prob_norm = Prob/norm # Normalizo return Prob_norm def POSTERIOR(Mags, Lum, ERR, Phi, Phimin, Phimax, Me, Memin, Memax, alpha, alphamin, alphamax): """ Devuelve el valor de la función posterior en función del likelihood y el prior Parameters ---------- Tiene los mismos parámetros que las funciones Likelihood() y PRIOR() Returns ------- post : .float Valor de la probalibidad posterior""" post = Likelihood(Mags, Lum, ERR, Phi, Me, alpha) * PRIOR(Phi, Phimin, Phimax, Me, Memin, Memax, alpha, alphamin, alphamax) return post
Guia_4/Problema_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="8cfe0690a55d0651ce9af5839d5bd492e271cca4" # <a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 300, align = "center"></a> # # <h1 align=center><font size = 5>Sets and Dictionaries</font></h1> # + [markdown] _uuid="29cdfa8027ab423470c19e9ce745cd4921a1640a" # # ## Table of Contents # # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <li><a href="#ref1">Sets</a></li> # # <br> # <p></p> # Estimated Time Needed: <strong>20 min</strong> # </div> # # <hr> # + [markdown] _uuid="716c33706084a9867291e0b8a4fc9b39016f13f4" # <a id="ref1"></a> # <center><h2>Sets</h2></center> # # In this lab, we are going to take a look at sets in Python. A set is a unique collection of objects in Python. You can denote a set with a curly bracket **{}**. Python will remove duplicate items: # # + _uuid="d1ba0abc519296f83c167c0ff7642b5869a79702" set1={"pop", "rock", "soul", "hard rock", "rock", "R&B", "rock", "disco"} set1 # + [markdown] _uuid="b92fed46536a8b1aab7d426b99f7eb9b9f9a9c8a" # The process of mapping is illustrated in the figure: # # + [markdown] _uuid="af8c759c9e3888b79add6005af595153a4c682cb" # <a ><img src = https://ibm.box.com/shared/static/i0xb9qbetek7kbh17krx05i4lqmywahm.png width = 1100, align = "center"></a> # # + [markdown] _uuid="51731f02486fcaedea8030ecc76eb807b29ff895" # You can also create a set from a list as follows: # + _uuid="2ff4c43caf3800becb8bf025c14e66b0e90b7bab" album_list =[ "<NAME>", "Thriller", 1982, "00:42:19", \ "Pop, Rock, R&B", 46.0, 65, "30-Nov-82", None, 10.0] album_set = set(album_list) album_set # + [markdown] _uuid="f081f2382c478405e0faac8297daabe053b84d29" # Now let us create a set of genres: # + _uuid="b588a803e16306acd18e27fec84dc2a46df14292" music_genres = set(["pop", "pop", "rock", "folk rock", "hard rock", "soul", \ "progressive rock", "soft rock", "R&B", "disco"]) music_genres # + [markdown] _uuid="781d0a9834511b69984e6502af66e63c663d1106" # #### Convert the following list to a set ['rap','house','electronic music', 'rap']: # + _uuid="95c0deeb017bcb0bbb31427cecebaf26bdbf57cb" new_music_genre = set(['rap','house','electronic music', 'rap']) new_music_genre # + [markdown] _uuid="51e3f44ad71619e00ff02e37029e538e4825f9fb" # Notice that the duplicates are removed and the output is sorted. # + [markdown] _uuid="c7604636f8a6fa948ffdd1f7855b4e28d42f8ba9" # Let us get the sum of the claimed sales: # + [markdown] _uuid="88ea818e4adc2b998ec8428cbca7b2aada98fe70" # #### Consider the list A=[1,2,2,1] and set B=set([1,2,2,1]), does sum(A)=sum(B) # + _uuid="8de4f540726526cde355e156f9713152378572c6" # + [markdown] _uuid="d9f89cc8719a0fff4c550e845957557c5243ffcc" # <div align="right"> # <a href="#2" class="btn btn-default" data-toggle="collapse">Click here for the solution</a> # # </div> # <div id="2" class="collapse"> # ``` # No, when casting a list to a set, the new set has no repeat elements. Run the following code to verify: # A=[1,2,2,1] # B=set([1,2,2,1]) # print("the sum of A is:",sum(A)) # print("the sum of B is:",sum(B)) # ``` # </div> # # + [markdown] _uuid="06d46b95ea49a53f6425f59a72ae3194e3526569" # Now let's determine the average rating: # + [markdown] _uuid="9c1a67bfebc586e33bcdad8e05b91f7439fa4268" # ### Set Operations # + [markdown] _uuid="1aec12b4d0cffceadd5196e0f90876de8065b830" # Let us go over Set Operations, as these can be used to change the set. Consider the set **A**: # + _uuid="46a789972e2b52d8fad2d378028652e2a62cef75" A = set(["Thriller","Back in Black", "AC/DC"] ) A # + [markdown] _uuid="d40271ac19dc330b37bee9897550efc2d1887262" # We can add an element to a set using the **add()** method: # + _uuid="de7740ead5bc9ecb5b086149d569c0d95890ff95" A.add("NSYNC") A # + [markdown] _uuid="5a461af38aed61ed645bee35d5110ae1057c25d3" # If we add the same element twice, nothing will happen as there can be no duplicates in a set: # # + _uuid="46b010d3da8189d8fc242c61725f9ffad2ef0bd5" A.add("NSYNC") A # + [markdown] _uuid="9b3cb96c5fc03b679f974110e77238c23680b84c" # We can remove an item from a set using the remove method: # + _uuid="73c14d5a2db16955a12560b30a86db62ea2b0865" A.remove("NSYNC") A # + [markdown] _uuid="117e908263b7c520cfe93834c2ff61481755258f" # We can verify if an element is in the set using the **in** command : # + _uuid="5ab73b6c8711193b536c03dce9b8648b68b9bcaa" "AC/DC" in A # + [markdown] _uuid="345425fbea8aa47b7880a8f161a9740fcdfd29e8" # ### Working with sets # + [markdown] _uuid="59af0f72a64441ab317bbd3ff9c0e874dc330c74" # Remember that with sets you can check the difference between sets, as well as the symmetric difference, intersection, and union: # + [markdown] _uuid="bfd05e7cc5778703900a6fe0adb833774944dadc" # Consider the following two sets: # + _uuid="3d0b4a98e3dfb93c76c237784824552f79f04c23" album_set1 = set(["Thriller",'AC/DC', 'Back in Black'] ) album_set2 = set([ "AC/DC","Back in Black", "The Dark Side of the Moon"] ) # + [markdown] _uuid="10f9beb9b4371ef20334b13be3ee7e163d64e685" # <a ><img src = "https://ibm.box.com/shared/static/bl6ijga6g8r7bdfkl17qw7zh62czte47.png" width = 850, align = "center"></a> # <h4 align=center> Visualizing the sets as two circles # # </h4> # + _uuid="b2b62446b9c2ee20a358702f5616cb5782a8379c" album_set1, album_set2 # + [markdown] _uuid="d0a737915438c3739c4fa65f97e3a4a51f56630a" # As both sets contain 'AC/DC' and 'Back in Black' we represent these common elements with the intersection of two circles. # # + [markdown] _uuid="3620dcb546bd6bc7d119f46458d3a053f6e60119" # <a ><img src = "https://ibm.box.com/shared/static/7ttuf8otui4s6axm23csmb4s3pxz16y2.png" width = 650, align = "center"></a> # <h4 align=center> Visualizing common elements with the intersection of two circles. # # </h4> # + [markdown] _uuid="a6e08482bb671c6befceb3d5f5eb277295b92702" # We can find the common elements of the sets as follows: # + _uuid="ed8cb57a544b19253e54a26b9c4fb87e20fd3747" album_set_3=album_set1 & album_set2 album_set_3 # + [markdown] _uuid="47ea98836dbad3b3f9bc61e84aeff90732a2ea13" # We can find all the elements that are only contained in **album_set1** using the **difference** method: # + _uuid="f8acc99c76e701887541f4a1c1f941cf06b0700f" album_set1.difference(album_set2) # + [markdown] _uuid="04e7a05479846e0515bcf0151ab0ea483a2ac80f" # We only consider elements in **album_set1**; all the elements in **album_set2**, including the intersection, are not included. # # + [markdown] _uuid="983fcf9275f8e76881812577a0127064a4b40c22" # <a ><img src = "https://ibm.box.com/shared/static/osmxw1qnb5t9odon2cx94wxhfzlkn1n8.png" width = 650, align = "center"></a> # <h4 align=center> The difference of “album_set1” and “album_set2 # # </h4> # + [markdown] _uuid="6842b1b79dd7191705c9bd210579cc8162b87f65" # The difference between **album_set2** and **album_set1** is given by: # + _uuid="97faaffae33f00206492f4d365bf9e4b3c7816e9" album_set2.difference(album_set1) # + [markdown] _uuid="409c162975ccf6fe329a7cc7f0da74d8391e0e3c" # <a ><img src = "https://ibm.box.com/shared/static/klgc09bgpsjudr9v3wtl8yk9s2lya3hl.png" width = 650, align = "center"></a> # <h4 align=center> The difference of **album_set2** and **album_set1** # # </h4> # + [markdown] _uuid="8f878ea867fcad980925cca23049c8cfbc531d49" # We can also find the intersection, i.e in both **album_list2** and **album_list1**, using the intersection command : # + _uuid="dd52d352094c77678e9d484f7bf7a64a71706d18" album_set1.intersection(album_set2) # + [markdown] _uuid="eb5537416db151def1f28a11eb8a4a0e7af532f6" # This corresponds to the intersection of the two circles: # + [markdown] _uuid="bb8e1c044f699e75dbb6f35f4b3f989e9b868fcb" # <a ><img src = "https://ibm.box.com/shared/static/s2xfytq43twp6jsvbvr4o2fir7wdablo.png" width = 650, align = "center"></a> # <h4 align=center> Intersection of set # # </h4> # + [markdown] _uuid="b8e01d2ed0bea1e0ba109cfbe91d3886c8715d40" # The union corresponds to all the elements in both sets, which is represented by colouring both circles: # # + [markdown] _uuid="0aac3f665eeebc6030a527e2c2f9d6fa119e31ac" # <a ><img src = "https://ibm.box.com/shared/static/vkczce5jh50g0oh53xn0ilgriflcrog0.png" width = 650, align = "center"></a> # <h4 align=center> Figure 7: Union of set # # </h4> # + [markdown] _uuid="3124f9e6d316c45e016038a2529e81930fc3197a" # The union is given by: # + _uuid="ba09f9fc5978f7e82632ac73c3159ca66b4078e0" album_set1.union(album_set2) # + [markdown] _uuid="7684b9063a0a9b9da99622c6ee5a73de874e6e40" # And you can check if a set is a superset or subset of another set, respectively, like this: # + _uuid="6468067cd7446e5fc386be0ec0971bee9ac6d5cc" set(album_set1).issuperset(album_set2) # + _uuid="948c2b113459ccc82a7e90ce2d4bd15e98c8247e" set(album_set2).issubset(album_set1) # + [markdown] _uuid="663750013454a520eac0371b0732aa78ee6c504e" # Here is an example where **issubset()** is **issuperset()** is true: # + _uuid="b2a850d80a8f3d5584da884c116960b6ce0d412d" set({"Back in Black", "AC/DC"}).issubset(album_set1) # + _uuid="ef916c994ebacad0132f51d22a3e896c83ef9aab" album_set1.issuperset({"Back in Black", "AC/DC"}) # + [markdown] _uuid="8e8a5d2c68e4f658b9b00c5bd84c9617bc840d09" # #### Create a new set “album_set3” that is the union of “album_set1” and “album_set2”: # + _uuid="173ea271d4fc9d8ee25e2122f46117426fe2700c" # + [markdown] _uuid="fd51a463af30fab573a4c92e131d6eaef373c3b7" # <div align="right"> # <a href="#4" class="btn btn-default" data-toggle="collapse">Click here for the solution</a> # # </div> # <div id="4" class="collapse"> # ``` # album_set3=album_set1.union(album_set2) # album_set3 # ``` # </div> # + [markdown] _uuid="13dff94ee3724243ae7087142ac5898146df9d95" # #### Find out if "album_set1" is a subset of "album_set3": # + _uuid="34f6a1f8907eed28b8923f0fb027cd813c9065b4" # + [markdown] _uuid="27e2738d6d051fe41e0374e549db5c2a4caa9fa4" # <div align="right"> # <a href="#5" class="btn btn-default" data-toggle="collapse">Click here for the solution</a> # # </div> # <div id="5" class="collapse"> # ``` # album_set1.issubset(album_set3) # # ``` # </div> # + [markdown] _uuid="c312225f8be1e1270a197a7d4d0ef46bebab9a84" # <a href="http://cocl.us/NotebooksPython101bottom"><img src = "https://ibm.box.com/shared/static/irypdxea2q4th88zu1o1tsd06dya10go.png" width = 750, align = "center"></a> # # # + [markdown] _uuid="fc35a3009a7d0e3ddf5fe2cc6af23cb349baf159" # # # # About the Authors: # # [<NAME>]( https://www.linkedin.com/in/joseph-s-50398b136/) has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. # # # + [markdown] _uuid="be270d77addca4340d65d128af2e47fb3a82ed8f" # <hr> # Copyright &copy; 2017 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
Jupyter notebook/IBM/python for data science/sets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="3rZ3ZWSkhxyS" # # DSE Course 1, Lab 1: Practice with Flow Control, Solved # # **Instructor**: <NAME> # # **Contact**: <EMAIL> # # <br> # # --- # # <br> # # In this lab we will continue to practice flow control. # # But before we begin, let's talk about data # # <br> # # --- # # # # + [markdown] id="_h5dcn5x0irv" # Breakout: # # * introduce yourself, data you use # * describe data goals # * most interested in learning about # * greatest challenges faced when dealing with data # # + [markdown] id="e9MwkxIGlc0k" # # L1 Q1: Practice with For Loops # # Create a Pyramid Using for loops # # # + [markdown] id="Qw7dGDLynBpZ" # ## Part A # # Example output: # # ``` # * # * * # * * * # * * * * # * * * * * # ``` # # # + colab={"base_uri": "https://localhost:8080/"} id="Vb1NyIqZmqYg" executionInfo={"status": "ok", "timestamp": 1618927639640, "user_tz": 300, "elapsed": 180, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gis2tewog0nYcz7REtNxkAs58_fKdVn5wvb3mXkPQ=s64", "userId": "17051665784581118920"}} outputId="eaa64f8d-45ae-47de-b434-d16bed1d678f" # Code Cell for L1 Q1 A for i in range(10): print("* "*i) # + [markdown] id="1WZkXH9Om75g" # ## Part B # # Example output: # # ``` # * # * * # * * * # * * * * # * * * * * # ``` # + colab={"base_uri": "https://localhost:8080/"} id="DNr1iPrJnGuf" executionInfo={"status": "ok", "timestamp": 1618927791068, "user_tz": 300, "elapsed": 215, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gis2tewog0nYcz7REtNxkAs58_fKdVn5wvb3mXkPQ=s64", "userId": "17051665784581118920"}} outputId="8da73d8a-97ba-4d93-f207-ad09ef8e29e0" # Code Cell for L1 Q1 B n = 10 for i in range(n): j = n - i print(" "*j+"* "*i) # + [markdown] id="MdYwm8EyncmA" # ## Part C # # Example output: # # ``` # * # * * # * * * # * * * * # * * * * * # ``` # + colab={"base_uri": "https://localhost:8080/"} id="4Dz-euqNnfWA" executionInfo={"status": "ok", "timestamp": 1618927815635, "user_tz": 300, "elapsed": 225, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "17051665784581118920"}} outputId="f30350aa-0321-42e3-fdc7-82c170217508" # Code Cell for L1 Q1 C n = 10 for i in range(n): j = n - i print(" "*j+"* "*i) # + [markdown] id="Sp832EkKi67C" # # L1 Q2: Practice with For Loops and Logic # # Complete the Fibonacci sequence up to the 10th place using any or all of the following: # * `for` # * `if` `elif` `else` # * `append` # * `range` # # The Fibonacci sequence with indices: # # 1. 0 # 2. 1 # 3. 1 # 4. 2 # 5. 3 # 6. 5 # 7. 8 # 8. etc.. # # Example input: # # `index = 10` # # Example output: # ``` # Index: 10 # Value: 34 # ``` # + colab={"base_uri": "https://localhost:8080/"} id="w-mykt4KhnA2" executionInfo={"status": "ok", "timestamp": 1618927485263, "user_tz": 300, "elapsed": 169, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gis2tewog0nYcz7REtNxkAs58_fKdVn5wvb3mXkPQ=s64", "userId": "17051665784581118920"}} outputId="381ae8cf-055e-41eb-e627-40edb9798439" index = 10 for i in range(index): if i == 0: fib = [0] elif i == 1 or i == 2: fib.append(1) else: fib.append(fib[-1] + fib[-2]) print("Index: {}\nValue: {}".format(index,fib[-1])) # + [markdown] id="GA0l_pwYn9g1" # # L1 Q3: Practice with Collections # # concatenate the following dictionaries to create a new one, if you need to refer to documentation for python dictionaries! # # Input: # ``` # dict1={1:10, 2:20} # dict2={3:30, 4:40} # dict3={5:50, 6:60} # ``` # # Output: # ``` # new_dect={1:10, 2:20, 3:30, 4:40, 5:50, 6:60} # ``` # + id="I-ZC9I0FoREe" dict1={1:10, 2:20} dict2={3:30, 4:40} dict3={5:50, 6:60} # + id="Qe6kDiQGrQjA" dict1.update(dict2) dict1.update(dict3) # + colab={"base_uri": "https://localhost:8080/"} id="JPvywwCYrS9E" executionInfo={"status": "ok", "timestamp": 1619465681025, "user_tz": 300, "elapsed": 155, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gis2tewog0nYcz7REtNxkAs58_fKdVn5wvb3mXkPQ=s64", "userId": "17051665784581118920"}} outputId="4cad7e98-bbb9-459e-ca15-108cb9af71da" dict1 # + colab={"base_uri": "https://localhost:8080/"} id="Tck-nlc-sid9" executionInfo={"status": "ok", "timestamp": 1619466155226, "user_tz": 300, "elapsed": 158, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gis2tewog0nYcz7REtNxkAs58_fKdVn5wvb3mXkPQ=s64", "userId": "17051665784581118920"}} outputId="94820253-25bb-4d63-842e-12f95054e5f8" {**dict1, **dict2, **dict3} # + id="5C0Ob76ItHtf"
courses/C1_ Build Your Base/SOLUTIONS/DSE C1 L1_ Practice with Flow Control - Solved.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Customizing visual appearance # # # HoloViews elements like the `Scatter` points illustrated in the [Introduction](1-Introduction.ipynb) contain two types of information: # # - **Your data**, in as close to its original form as possible, so that it can be analyzed and accessed as you see fit. # - **Metadata specifying what your data *is***, which allows HoloViews to construct an appropriate visual representation for it. # # What elements do *not* contain is: # # - The endless details that one might want to tweak about the visual representation, such as line widths, colors, fonts, and spacing. # # HoloViews is designed to let you work naturally with the meaningful features of your data, while making it simple to adjust the display details separately using the Options system. Among many other benefits, this [separation of *content* from *presentation*](https://en.wikipedia.org/wiki/Separation_of_content_and_presentation) simplifies your data analysis workflow, and makes it independent of any particular plotting backend. # ## Visualizing neural spike trains # # To illustrate how the options system works, we will use a dataset containing ["spike"](https://en.wikipedia.org/wiki/Action_potential) (neural firing) events extracted from the recorded electrical activity of a [neuron](https://en.wikipedia.org/wiki/Neuron). We will be visualizing the first trial of this [publicly accessible neural recording](http://www.neuralsignal.org/data/04/nsa2004.4/433l019). First, we import pandas and holoviews and load our data: # + import pandas as pd import holoviews as hv from holoviews import opts spike_train = pd.read_csv('../assets/spike_train.csv.gz') spike_train.head(n=3) # - # This dataset contains the spike times (in milliseconds) for each detected spike event in this five-second recording, along with a spiking frequency in Hertz (spikes per second), averaged over a rolling 200 millisecond window. We will now declare ``Curve`` and ``Spike`` elements using this data and combine them into a ``Layout``: # + curve = hv.Curve( spike_train, 'milliseconds', 'Hertz', label='Firing Rate') spikes = hv.Spikes(spike_train, 'milliseconds', [], label='Spike Train') layout = curve + spikes layout # - # Notice that the representation for this object is purely textual; so far we have not yet loaded any plotting system for HoloViews, and so all you can see is a description of the data stored in the elements. # # To be able to see a visual representation and adjust its appearance, we'll need to load a plotting system, and here let's load two so they can be compared: hv.extension('bokeh', 'matplotlib') # Even though we can happily create, analyze, and manipulate HoloViews objects without using any plotting backend, this line is normally executed just after importing HoloViews so that objects can have a rich graphical representation rather than the very-limited textual representation shown above. Putting 'bokeh' first in this list makes visualizations default to using [Bokeh](http://bokeh.pydata.org), but including [matplotlib](http://matplotlib.org) as well means that backend can be selected for any particular plot as shown below. # # Default appearance # # With the extension loaded, let's look at the default appearance as rendered with Bokeh: layout # As you can see, we can immediately appreciate more about this dataset than we could from the textual representation. The curve plot, in particular, conveys clearly that the firing rate varies quite a bit over this 5-second interval. However, the spikes plot is much more difficult to interpret, because the plot is nearly solid black. # # One thing we can do is click on one of the Bokeh plot's zoom tools to enable it, then zoom in until individual spikes are clearly visible. Even then, though, it's difficult to relate the spiking and firing-rate representations to each other. Maybe we can do better by adjusting the display options away from their default settings? # ## Customization # # Let's see what we can achieve when we do decide to customize the appearance: layout.opts( opts.Curve( height=200, width=900, xaxis=None, line_width=1.50, color='red', tools=['hover']), opts.Spikes(height=150, width=900, yaxis=None, line_width=0.25, color='grey')).cols(1) # Much better! It's the same underlying data, but now we can clearly see both the individual spike events and how they affect the moving average. You can also see how the moving average trails the actual spiking, due to how the window function was defined. # # A detailed breakdown of this exact customization is given in the [User Guide](../user_guide/03-Applying_Customization.ipynb), but we can use this example to understand a number of important concepts: # # * The options system is based around keyword settings supplied to the `.opts()` method. # * Collections of keyword options can be built for a given element type using an "options builder" object, such as `opts.Curve` and `opts.Spikes` here, so that we can set options separately for each component of a composite object (as for height here) # * Options builders also provide early *validation* of keywords (allowing errors to be detected even before the options are applied to an element) as well as *tab-completion* in IPython (try adding a comma to the `opts.Curve` or `opts.Spikes` keyword list to see what's available!). # * The layout container has a ``cols`` method to specify the number of columns in the layout. # # The corresponding [User Guide](../user_guide/03-Applying_Customization.ipynb) entry explains the keywords used in detail, but a quick summary is that when you tab-complete using the `opts.*` builders, you are completing across two fundamental types of options: ***plot options*** (processed by HoloViews) and ***style options*** (processed by the underlying backend, either Bokeh or Matplotlib here). If you only use a single backend, you don't need to worry much about this distinction because HoloViews will ensure that the option setting is given to the appropriate backend when needed. Here, for instance, the `color` and `line_width` keywords are not used by HoloViews; they will just be passed on to the corresponding [Bokeh glyphs](http://bokeh.pydata.org/en/latest/docs/user_guide/plotting.html). In this way you can control both HoloViews and the current backend, to customize almost any aspect of your plot. # # Discovering options # # In the above cell, the result of calling `opts.Curve()` is passed into the `.opts` method returning an `Options` object. `opts.Curve()` and the other option builders aren't always needed, but the are very helpful for validating options and offer tab completion to help you discover possible values: dotted_options = opts.Curve(color='purple', width=600, height=250, line_dash='dotted') dotted_options # Try tab-completing the options for `Curve` above or specifying an invalid keyword. Now the `dotted_options` object can be passed to the `.opts` method call to customize a `Curve`: dotted = hv.Curve(spike_train, 'milliseconds', 'Hertz') dotted.opts(dotted_options) # When working directly with a single element, you can omit the options builder entirely because it's clear what type the options apply to: dashed = hv.Curve( spike_train, 'milliseconds', 'Hertz') dashed.opts(color='orange', width=600, height=250, line_dash='dashed') # The code is then a bit shorter and more readable with the same result, but it no longer tab completes, and so omitting the builder is probably only useful for a final, published set of code, not during exploration. When using the `.opts` method on compositions of elements (i.e., layouts or overlays) you still need to use the options builders to indicate which type of object the options should be applied to. # # If you want to find out which options have been changed on a given object, you can use `.opts.info()`: dashed.opts.info() # For more information on how to work with options, see the the [User Guide](../user_guide/03-Applying_Customization.ipynb). # # Switching to matplotlib # # Now let's customize our `layout` with options appropriate for the [Matplotlib](http://matplotlib.org) renderer, by supplying the `backend='matplotlib'` argument to the `.opts` method: layout = layout.opts( opts.Curve( aspect=6, xaxis=None, color='blue', linewidth=2, show_grid=False, linestyle='dashed'), opts.Spikes(aspect=6, yaxis='bare', color='red', linewidth=0.25), opts.Layout(sublabel_format='', vspace=0.1, fig_size=200), backend='matplotlib') layout # The plot is still rendered with bokeh as we haven't switched to the matplotlib backend just yet (although matplotlib support was was loaded by `hv.extension` at the start of this notebook). The above code sets the options appropriate to matplotlib without immediately making use of them and naturally, a few changes needed to be made: # # * Some of the options are different because of differences in how the plotting backends work. For instance, matplotlib uses ``aspect`` instead of setting ``width`` and ``height``. In some cases, but not all, HoloViews can smooth over such differences in the *plotting* options to make it simpler to switch backends. # * The Bokeh hover tool is not supported by the matplotlib backend, as you might expect, nor are there any other interactive controls, because the Matplotlib backend generates static PNG or SVG images. # * Some options have different names; for instance, the Bokeh ``line_width`` option is called ``linewidth`` in matplotlib. These "style" options are directly inherited from the API of the plotting library backend, not defined by HoloViews. # * Containers like `Layout`s also have some options to control the arrangement of its components. Here we adjust the gap betwen the plots using ``vspace``. # # Now we can use the `hv.output` utility to to show the same elements in `layout` as rendered with these different customizations, in a different output format (SVG), with a completely different plotting library: hv.output(layout, backend='matplotlib', fig='svg') # This approach allows you to associate options for multiple different backends with the same object. See the [User Guide](../user_guide/03-Applying_Customization.ipynb) for more details, including information of how to use `hv.output` to affect global output settings. # ## Persistent styles # # Let's switch back to the default (Bokeh) plotting extension for this notebook and apply the ``.select`` operation illustrated in the Introduction, to the ``spikes`` object we made earlier: hv.output(backend='bokeh') spikes.select(milliseconds=(2000,4000)) # Note how HoloViews remembered the Bokeh-specific styles we previously applied to the `spikes` object! This feature allows us to style objects once and then keep that styling as we work, without having to repeat the styles every time we work with that object. Note that even though this styling is associated with the element, it is not actually stored on it, which is mostly an implementation detail but does define a strict separation between what HoloViews considers parts of your data (the Element) and what is part of the "look" or the "view" of that data (the options associated with the object, but stored separately). # # If we want to reset back to the original styling, we can call `.opts.clear()`: spikes.select(milliseconds=(2000,4000)).opts.clear() # You can learn more about the output utility and how the options system handles persistent options in the [User Guide](../user_guide/03-Applying_Customization.ipynb). # ## Setting axis labels # # If you look closely, the example above might worry you. First we defined our ``Spikes`` element with ``kdims=['milliseconds']``, which we then used as a keyword argument in ``select`` above. This is also the string used as the axis label. Does this mean we are limited to Python identifiers for axis labels, if we want to use the corresponding dimension with ``select``? # # Luckily, there is no limitation involved. Dimensions specified as strings are often convenient, but behind the scenes, HoloViews always uses a much richer ``Dimensions`` object that you can pass to the ``kdims`` and ``vdims`` explicitly (see the [User Guide](../user_guide/01-Annotating_Data.ipynb) for more information). One of the things each ``Dimension`` object supports is a long, descriptive ``label``, which complements the short programmer-friendly name. # # We can set the dimension labels on our existing ``spikes`` object as follows: spikes = spikes.redim.label(milliseconds='Time in milliseconds (10⁻³ seconds)') curve = curve.redim.label(Hertz='Frequency (Hz)') (curve + spikes).select(milliseconds=(2000,4000)).cols(1) # As you can see, we can set long descriptive labels on our dimensions (including unicode) while still making use of the short dimension name in methods like ``select``. # # Now that you know how to set up and customize basic visualizations, the next [Getting-Started sections](./3-Tabular_Datasets.ipynb) show how to work with various common types of data in HoloViews.
examples/getting_started/2-Customization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # reload packages # %load_ext autoreload # %autoreload 2 # ### Choose GPU (this may not be needed on your computer) # %env CUDA_DEVICE_ORDER=PCI_BUS_ID # %env CUDA_VISIBLE_DEVICES='' # ### load packages from tfumap.umap import tfUMAP import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tqdm.autonotebook import tqdm import umap import pandas as pd # ### Load dataset dataset = 'bison' import requests import json url = "https://raw.githubusercontent.com/duhaime/umap-zoo/03819ed0954b524919671a72f61a56032099ba11/data/json/bison.json" animal = np.array(json.loads(requests.get(url).text)['3d']) np.shape(animal) fig, ax = plt.subplots() ax.scatter(animal[:,2], animal[:,1], s = 1, c = animal[:,0], alpha = 0.1) ax.axis('equal') X_train = animal Y_train = animal[:, 2] X_train_flat = X_train # ### Create model and train from sklearn.decomposition import PCA pca = PCA(n_components=2) z = pca.fit_transform(X_train_flat) # ### plot output fig, ax = plt.subplots( figsize=(8, 8)) sc = ax.scatter( z[:, 0], z[:, 1], c=Y_train, #cmap="tab10", s=0.1, alpha=0.5, rasterized=True, ) ax.axis('equal') ax.set_title("PCA embedding", fontsize=20) plt.colorbar(sc, ax=ax); # ### Save model import os import pickle from tfumap.paths import ensure_dir, MODEL_DIR output_dir = MODEL_DIR/'projections'/ dataset / 'PCA' ensure_dir(output_dir) with open(os.path.join(output_dir, "model.pkl"), "wb") as output: pickle.dump(pca, output, pickle.HIGHEST_PROTOCOL) np.save(output_dir / 'z.npy', z) # ## tsne from openTSNE import TSNE tsne = TSNE( n_components = 2, perplexity=500, ) embedding_train = tsne.fit(X_train_flat) z = np.array(embedding_train) fig, ax = plt.subplots( figsize=(8, 8)) sc = ax.scatter( z[:, 0], z[:, 1], c=Y_train, #cmap="tab10", s=0.1, alpha=0.5, rasterized=True, ) ax.axis('equal') ax.set_title("PCA embedding", fontsize=20) plt.colorbar(sc, ax=ax); # #### save model # + import os import pickle from tfumap.paths import ensure_dir, MODEL_DIR output_dir = MODEL_DIR/'projections'/ dataset / 'TSNE' ensure_dir(output_dir) with open(os.path.join(output_dir, "model.pkl"), "wb") as output: pickle.dump(pca, output, pickle.HIGHEST_PROTOCOL) np.save(output_dir / 'z.npy', z) # -
notebooks/dataset-projections/bison/bison-pca-tsne.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fitting the Barnes Gyro relation using a latentfit import numpy as np import matplotlib.pyplot as plt import pystan import warnings warnings.filterwarnings('ignore') # We'll add the uncertainty with a latent implementation. # # $P = \tau^a \times b(B - V - c)^c$ # # where $P$ is rotation in days # $\tau$ is age (in Myr), # B-V are colour in magnitude, # the rest are free parameters # # + angus = [0.55, 0.40, 0.45, 0.31] def gyro(tau, bv, a, b, c, d): return (tau*1000)**a * b*(bv - c)**d # - # Instead of treating uncertainty in Period as emergent from uncertainty in colour and age, we are going to treat these as if each was obtained in an independent observation, therefore having uncorrelated levels on uncertainty. # + npts = 55 bprp_true = np.abs(np.sort(np.random.normal(1., 0.2, npts))) age_true = np.abs(np.random.normal(5., 3, npts)) period_true = gyro(age_true, bprp_true, *angus) sigma_p = 6.*np.ones(npts) period_obs = np.abs(period_true + np.random.randn(npts) * sigma_p) sigma_bprp = .01 * np.ones(npts) bprp_obs = bprp_true + np.random.randn(npts) * sigma_bprp sigma_age = np.random.uniform(.05, .30) * age_true age_obs = age_true + np.random.randn(npts) * sigma_age # + import matplotlib.gridspec as gridspec import mystyle as ms with plt.style.context(ms.ms): fig = plt.figure(figsize=(14,6)) gs = gridspec.GridSpec(2, 2) ax0 = plt.subplot(gs[0,:1]) ax0.scatter(bprp_obs, period_obs,zorder=1, s=20, label='observed') ax0.errorbar(bprp_obs, period_obs, yerr=sigma_p, xerr=sigma_bprp, fmt='|',zorder=0) ax0.set_xlabel('B-V (mag)') ax0.set_ylabel('period (day)') ax0.legend() ax1 = plt.subplot(gs[1,1]) ax1.scatter(age_obs, period_obs,zorder=1, s=20, label='observed') ax1.errorbar(age_obs, period_obs, yerr=sigma_p, xerr=sigma_age, fmt='|',zorder=0) ax1.set_xlabel('Age (Gyr)') ax1.set_ylabel('period (day)') ax1.legend() ax2 = plt.subplot(gs[1,0]) ax2.scatter(age_obs, bprp_obs,zorder=1, s=20, label='observed') ax2.errorbar(age_obs, bprp_obs, yerr=sigma_bprp, xerr=sigma_age, fmt='|',zorder=0) ax2.set_xlabel('Age (Gyr)') ax2.set_ylabel('B-V (mag)') ax2.legend() agex = np.linspace(age_true.min(), age_true.max(), 100) ax1.plot(agex, gyro(agex, bprp_true.min(), *angus), label='BPRP {:.2f}'.format(bprp_true.min()), lw=3, alpha=.5) ax1.plot(agex, gyro(agex, np.mean(bprp_true), *angus), label='BPRP {:.2f}'.format(np.median(bprp_true)), lw=3, alpha=.5) ax1.plot(agex, gyro(agex, bprp_true.max(), *angus), label='BPRP {:.2f}'.format(bprp_true.max()), lw=3, alpha=.5) ax1.legend() bprpx = np.linspace(bprp_true.min(), bprp_true.max(), 100) ax0.plot(bprpx, gyro(age_true.min(), bprpx, *angus), label='Age {:.2f}'.format(age_true.min()), lw=3, alpha=.5) ax0.plot(bprpx, gyro(np.mean(age_true), bprpx, *angus), label='Age {:.2f}'.format(np.median(age_true)), lw=3, alpha=.5) ax0.plot(bprpx, gyro(age_true.max(), bprpx, *angus), label='Age {:.2f}'.format(age_true.max()), lw=3, alpha=.5) ax0.legend() fig.tight_layout() plt.show() # - # Now lets fit for Period using a latent parameter model latentfit = ''' data { int<lower=1> N; real<lower=0> P[N]; real<lower=0> sigma_p[N]; real bprp[N]; real<lower=0> sigma_bprp[N]; real<lower=0> age[N]; real<lower=0> sigma_age[N]; } parameters { real<lower=0> a; real<lower=0> b; real<lower=0> c; real<lower=0> d; real raw_bprp[N]; real raw_age[N]; } transformed parameters { real<lower=0> true_age[N]; real true_bprp[N]; for (n in 1:N){ true_age[n] = sigma_age[n] * raw_age[n] + age[n]; true_bprp[n] = sigma_bprp[n] * raw_bprp[n] + bprp[n]; } } model { real mod[N]; for (n in 1:N){ mod[n] = (true_age[n]*1000)^a * b * (true_bprp[n] - c)^d; } a ~ normal(0., 1.); b ~ normal(0., 1.); c ~ normal(0., 1.); d ~ normal(0., 1.); raw_bprp ~ std_normal(); // implies true_bprp ~ normal(bprp, sigma_bprp) raw_age ~ std_normal(); // implies true_age ~ normal(age, sigma_age) mod ~ normal(P, sigma_p); } ''' sm = pystan.StanModel(model_code = latentfit, model_name='latentfit') # + data = {'N' : len(period_obs), 'P' : period_obs, 'sigma_p' : sigma_p, 'bprp' : bprp_obs, 'sigma_bprp' : sigma_bprp, 'age' : age_obs, 'sigma_age' : sigma_age} init = {'a' : angus[0], 'b' : angus[1], 'c' : angus[2], 'd' : angus[3], 'true_age' : age_true, 'true_bprp' : bprp_true} # - fit = sm.sampling(data=data, iter=20000, chains=4, seed=11, init = [init for n in range(4)]) print(fit) fit.plot() plt.show() import corner labels=['a','b','c','d'] chain = np.array([fit[label] for label in labels]) corner.corner(chain.T, labels=labels, truths=angus, quantiles=[0.16, 0.5, 0.84],show_titles=True) plt.show() # + import matplotlib.gridspec as gridspec import mystyle as ms gyrores = [np.median(fit[label]) for label in ['a','b','c','d']] with plt.style.context(ms.ms): fig = plt.figure(figsize=(14,6)) gs = gridspec.GridSpec(2, 2) ax0 = plt.subplot(gs[0,:1]) ax0.scatter(bprp_obs, period_obs,zorder=1, s=20, label='observed') ax0.errorbar(bprp_obs, period_obs, yerr=sigma_p, xerr=sigma_bprp, fmt='|',zorder=0) ax0.set_xlabel('B-V (mag)') ax0.set_ylabel('period (day)') ax0.legend() ax1 = plt.subplot(gs[1,1]) ax1.scatter(age_obs, period_obs,zorder=1, s=20, label='observed') ax1.errorbar(age_obs, period_obs, yerr=sigma_p, xerr=sigma_age, fmt='|',zorder=0) ax1.set_xlabel('Age (Gyr)') ax1.set_ylabel('period (day)') ax1.legend() ax2 = plt.subplot(gs[1,0]) ax2.scatter(age_obs, bprp_obs,zorder=1, s=20, label='observed') ax2.errorbar(age_obs, bprp_obs, yerr=sigma_bprp, xerr=sigma_age, fmt='|',zorder=0) ax2.set_xlabel('Age (Gyr)') ax2.set_ylabel('B-V (mag)') ax2.legend() agex = np.linspace(age_true.min(), age_true.max(), 100) ax1.plot(agex, gyro(agex, bprp_true.min(), *gyrores), label='BPRP {:.2f}'.format(bprp_true.min()), lw=3, alpha=.5) ax1.plot(agex, gyro(agex, np.mean(bprp_true), *gyrores), label='BPRP {:.2f}'.format(np.median(bprp_true)), lw=3, alpha=.5) ax1.plot(agex, gyro(agex, bprp_true.max(), *gyrores), label='BPRP {:.2f}'.format(bprp_true.max()), lw=3, alpha=.5) ax1.legend() bprpx = np.linspace(bprp_true.min(), bprp_true.max(), 100) ax0.plot(bprpx, gyro(age_true.min(), bprpx, *gyrores), label='Age {:.2f}'.format(age_true.min()), lw=3, alpha=.5) ax0.plot(bprpx, gyro(np.mean(age_true), bprpx, *gyrores), label='Age {:.2f}'.format(np.median(age_true)), lw=3, alpha=.5) ax0.plot(bprpx, gyro(age_true.max(), bprpx, *gyrores), label='Age {:.2f}'.format(age_true.max()), lw=3, alpha=.5) ax0.legend() fig.tight_layout() plt.show() # - gyropred = gyro(age_obs, bprp_obs, *gyrores) with plt.style.context(ms.ms): fig = plt.figure() ax= fig.gca() ax.scatter(age_obs, gyropred - period_true) ax.axhline(0., lw=3, c='r', alpha=.5) ax.set_ylabel('P Predicted / P True') ax.set_xlabel('Age') # ### Notes # We don't recover the true values for $a, b, c, d$. Is this because the uncertainties are unrelated? Even a latent parameter model can't go beyond its uncertainties too far. Maybe they can't be reconciled. # # It is worth noting that if I run with very small uncertainties and truth values we recover the correct $a, b, c, d$. So our model works as intended, at least!
code/tests/gyro_tests/gyro_latent_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Now You Code 2: Shopping List # # Write a program to input a list of grocery items for your shopping list and then writes them to a file a line at a time. The program should keep asking you to enter grocery items until you type `'done'`. # # After you complete the list, the program should then load the file and read the list back to you by printing each item out. # # Sample Run: # # ``` # Let's make a shopping list. Type 'done' when you're finished: # Enter Item: milk # Enter Item: cheese # Enter Item: eggs # Enter Item: beer # Enter Item: apples # Enter Item: done # Your shopping list: # milk # cheese # eggs # beer # apples # ``` # # ## Step 1: Problem Analysis # # Inputs: # # Outputs: # # Algorithm (Steps in Program): # # filename = "NYC2-shopping-list.txt" ## Step 2: write code here # ## Step 3: Refactoring # # Refactor the part of your program which reads the shopping list from the file into a separate user-defined function called `readShoppingList` # re-write your program to use this function. # # ## ReadShoppingList function # # Inputs: # # Outputs: # # Algorithm (Steps in Program): # # # # # + ## Step 4: Write program again with refactored code. def readShoppingList(): shoppingList = [] # todo read shopping list here return shoppingList # TODO Main code here # - # ## Step 5: Questions # # 1. Is the refactored code in step 4 easier to read? Why or why not? # 2. Explain how this program could be refactored further (there's one thing that's obvious). # 3. Describe how this program can be modified to support multiple shopping lists? # # ## Reminder of Evaluation Criteria # # 1. Was the problem attempted (analysis, code, and answered questions) ? # 2. Was the problem analysis thought out? (does the program match the plan?) # 3. Does the code execute without syntax error? # 4. Does the code solve the intended problem? # 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors) #
content/lessons/08/Now-You-Code/NYC2-Shopping-List.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from birdcall.data import * from birdcall.metrics import * import pandas as pd # - classes = pd.read_pickle('data/classes.pkl') train_ds = MelspecPoolDataset(pd.read_pickle('data/train_set.pkl'), classes, len_mult=30) valid_ds = MelspecPoolDataset(pd.read_pickle('data/val_set.pkl'), classes, len_mult=5) len(train_ds), len(valid_ds) import torch import torchvision from torch import nn train_dl = torch.utils.data.DataLoader(train_ds, batch_size=16, shuffle=True, num_workers=NUM_WORKERS) valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size=2*16, shuffle=False, num_workers=NUM_WORKERS) for b in train_dl: break b[0].shape, b[1] b[0].mean(), b[0].std() class Model(nn.Module): def __init__(self): super().__init__() self.cnn = nn.Sequential(*list(torchvision.models.resnet34(True).children())[:-2]) self.classifier = nn.Sequential(*[ nn.Linear(512, 512), nn.ReLU(), nn.Dropout(p=0.5), nn.BatchNorm1d(512), nn.Linear(512, 512), nn.ReLU(), nn.Dropout(p=0.5), nn.BatchNorm1d(512), nn.Linear(512, len(classes)) ]) def forward(self, x): bs, im_num, ch, y_dim, x_dim = x.shape x = self.cnn(x.view(-1, ch, y_dim, x_dim)) x = x.mean((2,3)) x = self.classifier(x) x = x.view(bs, im_num, -1) x = x.mean(-2) return x model = Model().cuda() # + import torch.optim as optim criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), 1e-3) scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, 10) # - from sklearn.metrics import accuracy_score, f1_score import time for epoch in range(330): t0 = time.time() running_loss = 0.0 for i, data in enumerate(train_dl, 0): model.train() inputs, labels = data[0].cuda(), data[1].cuda() optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() scheduler.step() running_loss += loss.item() if i % len(train_dl) == len(train_dl)-1: model.eval(); preds = [] targs = [] with torch.no_grad(): for data in valid_dl: inputs, labels = data[0].cuda(), data[1].cuda() outputs = model(inputs) preds.append(outputs.cpu().detach()) targs.append(labels.cpu().detach()) preds = torch.cat(preds) targs = torch.cat(targs) accuracy = accuracy_score(preds.sigmoid() > 0.5, targs) f1 = f1_score(preds.sigmoid() > 0.5, targs, average='micro') print(f'[{epoch + 1}, {time.time() - t0:.1f}] loss: {running_loss / (len(train_dl)-1):.3f}, acc: {accuracy:.3f}, f1: {f1:.3f}') running_loss = 0.0 if (epoch % 20 == 0) and (epoch != 0): torch.save(model.state_dict(), f'models/{epoch}_{round(f1, 2)}.pth') torch.save(model.state_dict(), f'models/{epoch}_{round(f1, 2)}.pth') valid_ds = MelspecPoolDataset(pd.read_pickle('data/val_set.pkl'), classes, len_mult=50) valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size=2*16, shuffle=False, num_workers=NUM_WORKERS) model = Model().cuda() model.load_state_dict(torch.load('models/329_0.62.pth')) # + # %%time model.eval(); preds = [] targs = [] with torch.no_grad(): for data in valid_dl: inputs, labels = data[0].cuda(), data[1].cuda() outputs = model(inputs) preds.append(outputs.cpu().detach()) targs.append(labels.cpu().detach()) preds = torch.cat(preds) targs = torch.cat(targs) # + import numpy as np f1s = [] ts = [] for t in np.linspace(0.4, 1, 61): f1s.append(f1_score(preds.sigmoid() > t, targs, average='micro')) ts.append(t) # - max(f1s) accuracy_score(preds.sigmoid() > ts[np.argmax(f1s)], targs) ts[np.argmax(f1s)]
02c_train_on_melspectrograms_pytorch_avg_pool.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline pd.options.display.max_columns=999 plt.style.use('fivethirtyeight') # # Body Image and Academic Performance of College Students # A student survey was conducted at a major university. Data were collected from a random sample of 239 undergraduate students, and the information that was collected included physical characteristics (such as height, handedness, etc.), study habits, academic performance and attitudes, and social behaviors. In this exercise, we will focus on exploring relationships between some of those variables. Note that the symbol * in the worksheet means that this observation is not available (this is known as a 'missing value'). # # **Q1.** Is there a relationship between students' college GPAs and their high school GPAs? # # **Q2.** Are there differences between males and females with respect to body image? # # **Q3.** Is students' academic performance in college related to their typical seating location in class? # # * **Gender:** Male or Female # * **Height:** Self-reported height (in inches) # * **GPA:** Student's cumulative college GPA # * **HS_GPA:** Student's high school GPA (senior year) # * **Seat:** Typical classroom seat location (F = Front, M = Middle, B = Back) # * **WtFeel:** Does the student feel that he/she is: Underweight, About Right, Overweight # * **Cheat:** Would the tell the instructor if he/she saw somebody cheating on exam? (No or Yes) # ### Understand the Problem # # Before we begin looking at specific questions, we examine some general features of the problem, such as the data structure, how the data were collected, and the overall study design. students = pd.read_excel('../Data/body_image.xls') students.head(10) # **Question-** Out of the first ten students in the datafile, how many typically sit in the back of the classroom? students_first10 = students.head(10) students_first10['Seat'].value_counts() # **Answer-** 2 # **Question-** Out of the first ten students in the datafile, how many students have a college GPA which is higher than their high school GPA? # Select the data(first 10) where college GPA is higher than high school GPA students_first10[students_first10['GPA'] > students_first10['HS GPA']] # **Answer-** 3 # **Question-** What is the typical seat of the student with the highest GPA among the first ten students in the datafile? # **Note-** The Following code will not work. Look below the later cell to know why? # First find the index number where the value of GPA is highest students_first10.idxmax(axis=1) # axis=1 for searchsing column wise students_first10.dtypes # We can see that the data type of the GPA column is object(string) which is why pandas is throwing an error. # As it is mentioned in the Problem description that there are missing values denoted by `*`. First we have to # replace these values with np.nan which pandas understand that it is a missing value. # replace the missing values for the whole dataframe students.replace('*',np.nan , inplace=True) # inplace=True means we want the operation to be permanent # now check the dtypes students.dtypes # We can see that GPA and other numerical columns are converted to float type from object(string). # Now reselect the first 10 data students_first10 = students.head(10) # First find the index number where the value of GPA is highest students_first10['GPA'].idxmax(axis=1) # axis=1 for searchsing column wise # show the data for this index students_first10.iloc[8, :] # **Answer-** M # ## Question One # Now that we have completed the first part, where we examined some general features of the problem, it is time to look at the questions that we are trying to answer using the data. # # **Question- Is there a relationship between students' college GPAs and their high school GPAs?** # #### Reflect on Question (Question One) # In this first step, we think about the question and use our intuition and/or experience to try and predict what the results will show. Later, we will compare what we initially thought to what we actually find when we analyze the data. # # Note that we will repeat the middle three steps in the work plan (Reflect on Question, Analyze Data, and Draw Conclusions) for each of the questions in the analysis. # **Question-** Before analyzing the data and discovering the relationship between students' high school GPAs and their college GPAs, try to predict what the data will show (use your own experience and intuition). # **Your Answer-** # ### Analyze Data (Question One) # In this step, we choose and conduct the analyses that are needed in order to address the current question. # #### Plan Analyses (Question One) # # Before choosing the appropriate analyses, it is helpful to: # # **Identify the relevant variables:** # # **Question-** Which variable(s) among those listed below is/are particularly relevant to the current question? # # * Gender # * Height # * GPA # * HS_GPA # * Seat # * WtFeel # * Cheat # # Select all that apply. # **Your Answer-** # **Classify the relevant variables:** # * The variable GPA is ____________ variable and is __________(Quantitative/categorical). # * The variable HS_GPA is__________ variable and is ____________. # **Your Answer-** # ### Exploratory Analysis (Question One) # Now that we have identified and classified the relevant variable(s), we use exploratory data analysis methods to help us make important features of the data visible. # #### Determine Displays and Measures (Question One) # **Question-** A meaningful display is: # # * Side-by-side boxplots # * Scatterplot # * Two-way Table # * Piechart # * Histogram # **Your Answer-** # **Question-** A meaningful numerical summary to supplement the above display is? # * Descriptive statistics(five point summary) # * Correlation r # * Conditional Percentages # **Your Answer-** # ### Conduct Analysis (Question One) # + import seaborn as sns # create a scatter plot sns.lmplot(x='HS GPA', y='GPA',height=6, data=students); # - # Correlation students['HS GPA'].corr(students['GPA']) # ### Results (Question One) # Remember, using the display and numerical summary, you need to describe the features of a single quantitative distribution. # # Do that by describing the key features of the display and by supporting your description with numerical measures. # # Keep in mind that the appropriate numerical measures for the current situation (i.e., measures of center and spread) will depend on the shape of the distribution you find. # **Your Answer-** # ### Draw Conclusions (Question One) # # In this step, Draw Conclusions, we interpret the results we got from out analyses in the context of the current question. # # Consider what results mean (Question One) # # **What do the results you got indicate about the relationship between students' high school GPAs and their college GPAs?** # **Your Answer-** # #### Reflect on Conclusions (Question One) # Relate the comments you made before analyzing the data by commenting on both of the following: # # * how your expectations differ (or do not differ) from the actual results # * if it is relevant or meaningful in context, think of a way that these results could be used in practice # **Your Answer-** # # Question Two # # Now that we are done with Question One, we are going to repeat the same three steps (Reflect on Question, Analyze Data, and Draw Conclusions) for Question Two. # # **Q2. Are there differences between males and females with respect to body image?** # #### Reflect on Question (Question Two) # Before analyzing the data and discovering whether there is a gender effect on body image, try to predict what the data will show (use your own experience and intuition). # **Your Answer-** # ### Analyze Data (Question Two) # #### Plan Analyses (Question Two) # # Before choosing the appropriate analyses, it is helpful to: # # **Identify the relevant variables:** # # **Question-** Which variable(s) among those listed below is/are particularly relevant to the current question? # # * Gender # * Height # * GPA # * HS_GPA # * Seat # * WtFeel # * Cheat # **Your Answer-** # **Classify the relevant variables:** # # * The variable Gender __________ is variable and is ____________. # * The variable WtFeel is ___________ variable and is _____________. # **Your Answer-** # ### Exploratory Analysis (Question Two) # #### Determine Displays and Measures (Question Two) # # **A meaningful display is:** # * Side-by-side boxplots # * Scatterplot # * Two-way Table # * Piechart # * Histogram # # **Your Answer-** # **A meaningful numerical summary to supplement the above display is-** # # * Descriptive statistics(five point summary) # * Correlation r # * Conditional Percentages # **Your Answer-** # ### Conduct Analysis (Question Two) # create a two-way table gender_wtfeel = students.pivot_table(index='Gender', columns='WtFeel', aggfunc='size') gender_wtfeel # create a two-way conditional percentage table gender_wtfeel_percent = gender_wtfeel.apply(lambda x: round(x / gender_wtfeel.sum(axis=1)*100, 2)) gender_wtfeel_percent gender_wtfeel_percent.plot.bar(); # ### Results (Question Two) # Remember, using the display and numerical summary, you need to describe the features of a single quantitative distribution. # # Do that by describing the key features of the display and by supporting your description with numerical measures. # # Keep in mind that the appropriate numerical measures for the current situation (i.e., measures of center and spread) will depend on the shape of the distribution you find. # **Your Answer-** # ### Draw Conclusions (Question Two) # **Question-** What do the results you got indicate about how males and females differ with respect to their body image? # **Your Answer-** # ### Reflect on Conclusions (Question Two) # Relate the comments you made before analyzing the data (these appear in the textbox above) by commenting on both of the following: # # * how your expectations differ (or do not differ) from the actual results # * if it is relevant or meaningful in context, think of a way that these results could be used in practice # **your Answer-** # ## Question Three # **Is students' academic performance in college related to their typical seating location in class?** # #### Reflect on Question (Question Three) # Before analyzing the data and discovering the relationship between student's academic performance and their typical seat in class, try to predict what the data will show (use your own experience and intuition). # **Your Answer-** # ### Analyze Data (Question Three) # #### Plan Analyses (Question Three) # Before choosing the appropriate analyses, it is helpful to: # # **Identify the relevant variables:** # # Which variable(s) among those listed below is/are particularly relevant to the current question? # # * Gender # * Height # * GPA # * HS_GPA # * Seat # * WtFeel # * Cheat # # **Your Answer-** # **Classify the relevant variables:** # * The variable **GPA** is __________ variable and is __________. # * The variable **Seat** is __________ variable and is __________. # **Your Answer-** # ### Exploratory Analysis (Question Three) # #### Determine Displays and Measures (Question Three) # A meaningful display is: # # * Side-by-side boxplots # * Scatterplot # * Two-way Table # * Piechart # * Histogram # # **Your Answer-** # **A meaningful numerical summary to supplement the above display is** # * Descriptive statistics(five point summary) # * Correlation r # * Conditional Percentages # **Your Answer-** # ### Conduct Analysis (Question Three) # create side by side box plot students.boxplot(by='Seat',column='GPA', grid=False, figsize=(7,6)); # summary statistics for GPA grouped by Seat students.groupby('Seat')['GPA'].describe() # ### Results (Question Three) # Remember, using the display and numerical summary, you need to describe the features of a single quantitative distribution. # # Do that by describing the key features of the display and by supporting your description with numerical measures. # # Keep in mind that the appropriate numerical measures for the current situation (i.e., measures of center and spread) will depend on the shape of the distribution you find. # **Your Answer-** # ### Draw Conclusions (Question Three) # **What do the results you got indicate about the relationship between typical seating location in class and academic performance in college?** # **Your Answer-** # #### Reflect on Conclusions (Question Three) # Relate the comments you made before analyzing the data (these appear in the textbox above) by commenting on both of the following: # # how your expectations differ (or do not differ) from the actual results # if it is relevant or meaningful in context, think of a way that these results could be used in practice # # **Your Answer-** # ### Summarize # Now that you have addressed all the questions individually, write a short summary report of the main findings that you discovered using the data. For your convenience, your individual conclusions are provided below. You may edit them to create your summary. # # After you write your summary, you are done # **Your Answer-** # # Summary (EDA) # This summary provides a quick recap of the material you've learned in the Exploratory Data Analysis section. Please note that this summary **does not provide complete coverage** of the material, but just lists the main points. We therefore recommend that you use this summary only as a checklist or a review before going on to the next section, or before an exam. # # * The purpose of exploratory data analysis (EDA) is to convert the available **data** from their raw form to an informative one, in which the main features of the data are illuminated. # # # * When performing EDA, we should always: # * use **visual displays** (graphs or tables) plus numerical summaries. # * describe the **overall pattern** and mention any **striking deviations** from that pattern. # * **interpret** the results we got in **context**. # # # * When examining the **distribution** of a single variable, we distinguish between a **categorical** variable and a **quantitative** variable. # # # * The distribution of a **categorical** variable is summarized using: # # * Display: pie-chart or bar-chart (variation: pictogram → can be misleading—beware!) # * Numerical summaries: category (group) percentages. # # # * The distribution of a **quantitative** variable is summarized using: # # # * Display: histogram (or stemplot, mainly for small data sets). When describing the distribution as displayed by the histogram, we should describe the: # * Overall pattern → shape, center, spread. # * Deviations from the pattern → outliers. # * Numerical summaries: descriptive statistics (measure of center plus measure of spread): # * If distribution is symmetric with no outliers, use mean and standard deviation. # * Otherwise, use the five-number summary, in particular, median and IQR (inter-quartile range). # # # * The five-number summary and the 1.5(IQR) Criterion for detecting outliers are the ingredients we need to build the boxplot. Boxplots are most effective when used side-by-side for comparing distributions (see also case C→Q in examining relationships). # # * In the special case of a distribution having the normal shape, the Standard Deviation Rule applies. This rule tells us approximately what percent of the observations fall within 1,2, or 3 standard deviations away from the mean. In particular, when a distribution is approximately normal, almost all the observations (99.7%) fall within 3 standard deviations of the mean. # # * When examining the relationship between two variables, the first step is to classify the two relevant variables according to their role and type: # # ![image](../img/role_type.png) # # and only then to determine the appropriate tools for summarizing the data. (We don't deal with case Q→C in this course). # # * Case C→Q: # # Exploring the relationship amounts to comparing the distributions of the quantitative response variable for each category of the explanatory variable. To do this, we use: # # * Display: side-by-side boxplots. # * Numerical summaries: descriptive statistics of the response variable, for each value (category) of the explanatory variable separately. # # Case C→C: # Exploring the relationship amounts to comparing the distributions of the categorical response variable, for each category of the explanatory variable. To do this, we use: # # * Display: two-way table. # * Numerical summaries: conditional percentages (of the response variable for each value (category) of the explanatory variable separately). # # Case Q→Q: # We examine the relationship using: # # * Display: scatterplot. When describing the relationship as displayed by the scatterplot, be sure to consider: # * Overall pattern → direction, form, strength. # * Deviations from the pattern → outliers. # * Labeling the scatterplot (including a relevant third categorical variable in our analysis), might add some insight into the nature of the relationship. # # In the special case that the scatterplot displays a linear relationship (and only then), we supplement the scatterplot with: # # * Numerical summaries: the correlation coefficient (r) measures the direction and, more importantly, the strength of the linear relationship. The closer r is to 1 (or -1), the stronger the positive (or negative) linear relationship. r is unitless, influenced by outliers, and should be used only as a supplement to the scatterplot. # # * When the relationship is linear (as displayed by the scatterplot, and supported by the correlation r), we can summarize the linear pattern using the least squares regression line. Remember that: # # * The slope of the regression line tells us the average change in the response variable that results from a 1-unit increase in the explanatory variable. # * When using the regression line for predictions, you should beware of extrapolation. # # * When examining the relationship between two variables (regardless of the case), any observed relationship (association) does not imply causation, due to the possible presence of lurking variables. # # * When we include a lurking variable in our analysis, we might need to rethink the direction of the relationship → Simpson's paradox. # #
06_EDA_Examining Relationships_Exercises_part6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Advanced usage # This notebook shows some more advanced features of `skorch`. More examples will be added with time. # # <table align="left"><td> # <a target="_blank" href="https://colab.research.google.com/github/skorch-dev/skorch/blob/master/notebooks/Advanced_Usage.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td><td> # <a target="_blank" href="https://github.com/skorch-dev/skorch/blob/master/notebooks/Advanced_Usage.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table> # ### Table of contents # * [Setup](#Setup) # * [Callbacks](#Callbacks) # * [Writing your own callback](#Writing-a-custom-callback) # * [Accessing callback parameters](#Accessing-callback-parameters) # * [Working with different data types](#Working-with-different-data-types) # * [Working with datasets](#Working-with-Datasets) # * [Working with dicts](#Working-with-dicts) # * [Multiple return values](#Multiple-return-values-from-forward) # * [Implementing a simple autoencoder](#Implementing-a-simple-autoencoder) # * [Training the autoencoder](#Training-the-autoencoder) # * [Extracting the decoder and the encoder output](#Extracting-the-decoder-and-the-encoder-output) ! [ ! -z "$COLAB_GPU" ] && pip install torch skorch import torch from torch import nn import torch.nn.functional as F torch.manual_seed(0) torch.cuda.manual_seed(0) # ## Setup # ### A toy binary classification task # We load a toy classification task from `sklearn`. import numpy as np from sklearn.datasets import make_classification np.random.seed(0) X, y = make_classification(1000, 20, n_informative=10, random_state=0) X, y = X.astype(np.float32), y.astype(np.int64) X.shape, y.shape, y.mean() # ### Definition of the `pytorch` classification `module` # We define a vanilla neural network with two hidden layers. The output layer should have 2 output units since there are two classes. In addition, it should have a softmax nonlinearity, because later, when calling `predict_proba`, the output from the `forward` call will be used. from skorch import NeuralNetClassifier class ClassifierModule(nn.Module): def __init__( self, num_units=10, nonlin=F.relu, dropout=0.5, ): super(ClassifierModule, self).__init__() self.num_units = num_units self.nonlin = nonlin self.dropout = dropout self.dense0 = nn.Linear(20, num_units) self.nonlin = nonlin self.dropout = nn.Dropout(dropout) self.dense1 = nn.Linear(num_units, 10) self.output = nn.Linear(10, 2) def forward(self, X, **kwargs): X = self.nonlin(self.dense0(X)) X = self.dropout(X) X = F.relu(self.dense1(X)) X = F.softmax(self.output(X), dim=-1) return X # ## Callbacks # Callbacks are a powerful and flexible way to customize the behavior of your neural network. They are all called at specific points during the model training, e.g. when training starts, or after each batch. Have a look at the `skorch.callbacks` module to see the callbacks that are already implemented. # ### Writing a custom callback # Although `skorch` comes with a handful of useful callbacks, you may find that you would like to write your own callbacks. Doing so is straightforward, just remember these rules: # * They should inherit from `skorch.callbacks.Callback`. # * They should implement at least one of the `on_`-methods provided by the parent class (e.g. `on_batch_begin` or `on_epoch_end`). # * As argument, the `on_`-methods first get the `NeuralNet` instance, and, where appropriate, the local data (e.g. the data from the current batch). The method should also have `**kwargs` in the signature for potentially unused arguments. # * *Optional*: If you have attributes that should be reset when the model is re-initialized, those attributes should be set in the `initialize` method. # Here is an example of a callback that remembers at which epoch the validation accuracy reached a certain value. Then, when training is finished, it calls a mock Twitter API and tweets that epoch. We proceed as follows: # * We set the desired minimum accuracy during `__init__`. # * We set the critical epoch during `initialize`. # * After each epoch, if the critical accuracy has not yet been reached, we check if it was reached. # * When training finishes, we send a tweet informing us whether our training was successful or not. # + from skorch.callbacks import Callback def tweet(msg): print("~" * 60) print("*tweet*", msg, "#skorch #pytorch") print("~" * 60) class AccuracyTweet(Callback): def __init__(self, min_accuracy): self.min_accuracy = min_accuracy def initialize(self): self.critical_epoch_ = -1 def on_epoch_end(self, net, **kwargs): if self.critical_epoch_ > -1: return # look at the validation accuracy of the last epoch if net.history[-1, 'valid_acc'] >= self.min_accuracy: self.critical_epoch_ = len(net.history) def on_train_end(self, net, **kwargs): if self.critical_epoch_ < 0: msg = "Accuracy never reached {} :(".format(self.min_accuracy) else: msg = "Accuracy reached {} at epoch {}!!!".format( self.min_accuracy, self.critical_epoch_) tweet(msg) # - # Now we initialize a `NeuralNetClassifier` and pass your new callback in a list to the `callbacks` argument. After that, we train the model and see what happens. net = NeuralNetClassifier( ClassifierModule, max_epochs=15, lr=0.02, warm_start=True, callbacks=[AccuracyTweet(min_accuracy=0.7)], ) net.fit(X, y) # Oh no, our model never reached a validation accuracy of 0.7. Let's train some more (this is possible because we set `warm_start=True`): net.fit(X, y) assert net.history[-1, 'valid_acc'] >= 0.7 # Finally, the validation score exceeded 0.7. Hooray! # ### Accessing callback parameters # Say you would like to use a learning rate schedule with your neural net, but you don't know what parameters are best for that schedule. Wouldn't it be nice if you could find those parameters with a grid search? With `skorch`, this is possible. Below, we show how to access the parameters of your callbacks. # To simplify the access to your callback parameters, it is best if you give your callback a name. This is achieved by passing the `callbacks` parameter a list of *name*, *callback* tuples, such as: # # callbacks=[ # ('scheduler', LearningRateScheduler)), # ... # ], # # This way, you can access your callbacks using the double underscore semantics (as, for instance, in an `sklearn` `Pipeline`): # # callbacks__scheduler__epoch=50, # # So if you would like to perform a grid search on, say, the number of units in the hidden layer and the learning rate schedule, it could look something like this: # # param_grid = { # 'module__num_units': [50, 100, 150], # 'callbacks__scheduler__epoch': [10, 50, 100], # } # # *Note*: If you would like to refresh your knowledge on grid search, look [here](http://scikit-learn.org/stable/modules/grid_search.html#grid-search), [here](http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html), or in the *Basic_Usage* notebok. # Below, we show how accessing the callback parameters works our `AccuracyTweet` callback: net = NeuralNetClassifier( ClassifierModule, max_epochs=10, lr=0.1, warm_start=True, callbacks=[ ('tweet', AccuracyTweet(min_accuracy=0.7)), ], callbacks__tweet__min_accuracy=0.6, ) net.fit(X, y) # As you can see, by passing `callbacks__tweet__min_accuracy=0.6`, we changed that parameter. The same can be achieved by calling the `set_params` method with the corresponding arguments: net.set_params(callbacks__tweet__min_accuracy=0.75) net.fit(X, y) # ## Working with different data types # ### Working with `Dataset`s # We encourage you to not pass `Dataset`s to `net.fit` but to let skorch handle `Dataset`s internally. Nonetheless, there are situations where passing `Dataset`s to `net.fit` is hard to avoid (e.g. if you want to load the data lazily during the training). This is supported by skorch but may have some unwanted side-effects relating to sklearn. For instance, `Dataset`s cannot split into train and validation in a stratified fashion without explicit knowledge of the classification targets. # Below we show what happens when you try to fit with `Dataset` and the stratified split fails: class MyDataset(torch.utils.data.Dataset): def __init__(self, X, y): self.X = X self.y = y assert len(X) == len(y) def __len__(self): return len(self.X) def __getitem__(self, i): return self.X[i], self.y[i] X, y = make_classification(1000, 20, n_informative=10, random_state=0) X, y = X.astype(np.float32), y.astype(np.int64) dataset = MyDataset(X, y) net = NeuralNetClassifier(ClassifierModule) try: net.fit(dataset, y=None) except ValueError as e: print("Error:", e) net.train_split.stratified # As you can see, the stratified split fails since `y` is not known. There are two solutions to this: # # * turn off stratified splitting ( `net.train_split.stratified=False`) # * pass `y` explicitly (if possible), even if it is implicitely contained in the `Dataset` # # The second solution is shown below: net.fit(dataset, y=y) # ### Working with dicts # #### The standard case # skorch has built-in support for dictionaries as data containers. Here we show a somewhat contrived example of how to use dicts, but it should get the point across. First we create data and put it into a dictionary `X_dict` with two keys `X0` and `X1`: X, y = make_classification(1000, 20, n_informative=10, random_state=0) X, y = X.astype(np.float32), y.astype(np.int64) X0, X1 = X[:, :10], X[:, 10:] X_dict = {'X0': X0, 'X1': X1} # When skorch passes the dict to the pytorch module, it will pass the data as keyword arguments to the forward call. That means that we should accept the two keys `XO` and `X1` in the forward method, as shown below: class ClassifierWithDict(nn.Module): def __init__( self, num_units0=50, num_units1=50, nonlin=F.relu, dropout=0.5, ): super(ClassifierWithDict, self).__init__() self.num_units0 = num_units0 self.num_units1 = num_units1 self.nonlin = nonlin self.dropout = dropout self.dense0 = nn.Linear(10, num_units0) self.dense1 = nn.Linear(10, num_units1) self.nonlin = nonlin self.dropout = nn.Dropout(dropout) self.output = nn.Linear(num_units0 + num_units1, 2) # NOTE: We accept X0 and X1, the keys from the dict, as arguments def forward(self, X0, X1, **kwargs): X0 = self.nonlin(self.dense0(X0)) X0 = self.dropout(X0) X1 = self.nonlin(self.dense1(X1)) X1 = self.dropout(X1) X = torch.cat((X0, X1), dim=1) X = F.relu(X) X = F.softmax(self.output(X), dim=-1) return X # As long as we keep this in mind, we are good to go. net = NeuralNetClassifier(ClassifierWithDict, verbose=0) net.fit(X_dict, y) # #### Working with sklearn `Pipeline` and `GridSearchCV` from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer from sklearn.model_selection import GridSearchCV # sklearn makes the assumption that incoming data should be numpy/sparse arrays or something similar. This clashes with the use of dictionaries. Unfortunately, it is sometimes impossible to work around that for now (for instance using skorch with `BaggingClassifier`). Other times, there are possibilities. # # When we have a preprocessing pipeline that involves `FunctionTransformer`, we have to pass the parameter `validate=False` (which is the default value now) so that sklearn allows the dictionary to pass through. Everything else works: pipe = Pipeline([ ('do-nothing', FunctionTransformer(validate=False)), ('net', net), ]) pipe.fit(X_dict, y) # When trying a grid or randomized search, it is not that easy to pass a dict. If we try, we will get an error: param_grid = { 'net__module__num_units0': [10, 25, 50], 'net__module__num_units1': [10, 25, 50], 'net__lr': [0.01, 0.1], } grid_search = GridSearchCV(pipe, param_grid, scoring='accuracy', verbose=1, cv=3) try: grid_search.fit(X_dict, y) except Exception as e: print(e) # The error above occurs because sklearn gets the length of the input data, which is 2 for the dict, and believes that is inconsistent with the length of the target (1000). # # To get around that, skorch provides a helper class called `SliceDict`. It allows us to wrap our dictionaries so that they also behave like a numpy array: from skorch.helper import SliceDict X_slice_dict = SliceDict(X0=X0, X1=X1) # X_slice_dict = SliceDict(**X_dict) would also work # The SliceDict shows the correct length, shape, and is sliceable across values: print("Length of dict: {}, length of SliceDict: {}".format(len(X_dict), len(X_slice_dict))) print("Shape of SliceDict: {}".format(X_slice_dict.shape)) print("Slicing the SliceDict slices across values: {}".format(X_slice_dict[:2])) # With this, we can call `GridSearchCV` just as expected: grid_search.fit(X_slice_dict, y) grid_search.best_score_, grid_search.best_params_ # ## Multiple return values from `forward` # Often, we want our `Module.forward` method to return more than just one value. There can be several reasons for this. Maybe, the criterion requires not one but several outputs. Or perhaps we want to inspect intermediate values to learn more about our model (say inspecting attention in a sequence-to-sequence model). Fortunately, `skorch` makes it easy to achieve this. In the following, we demonstrate how to handle multiple outputs from the `Module`. # To demonstrate this, we implement a very simple autoencoder. It consists of an encoder that reduces our input of 20 units to 5 units using two linear layers, and a decoder that tries to reconstruct the original input, again using two linear layers. # ### Implementing a simple autoencoder from skorch import NeuralNetRegressor class Encoder(nn.Module): def __init__(self, num_units=5): super().__init__() self.num_units = num_units self.encode = nn.Sequential( nn.Linear(20, 10), nn.ReLU(), nn.Linear(10, self.num_units), nn.ReLU(), ) def forward(self, X): encoded = self.encode(X) return encoded class Decoder(nn.Module): def __init__(self, num_units): super().__init__() self.num_units = num_units self.decode = nn.Sequential( nn.Linear(self.num_units, 10), nn.ReLU(), nn.Linear(10, 20), ) def forward(self, X): decoded = self.decode(X) return decoded # The autoencoder module below actually returns a tuple of two values, the decoded input and the encoded input. This way, we cannot only use the decoded input to calculate the normal loss but also have access to the encoded state. class AutoEncoder(nn.Module): def __init__(self, num_units): super().__init__() self.num_units = num_units self.encoder = Encoder(num_units=self.num_units) self.decoder = Decoder(num_units=self.num_units) def forward(self, X): encoded = self.encoder(X) decoded = self.decoder(encoded) return decoded, encoded # <- return a tuple of two values # Since the module's `forward` method returns two values, we have to adjust our objective to do the right thing with those values. If we don't do this, the criterion wouldn't know what to do with the two values and would raise an error. # # One strategy would be to only use the decoded state for the loss and discard the encoded state. For this demonstration, we have a different plan: We would like the encoded state to be sparse. Therefore, we add an L1 loss of the encoded state to the reconstruction loss. This way, the net will try to reconstruct the input as accurately as possible while keeping the encoded state as sparse as possible. # # To implement this, the right method to override is called `get_loss`, which is where `skorch` computes and returns the loss. It gets the prediction (our tuple) and the target as input, as well as other arguments and keywords that we pass through. We create a subclass of `NeuralNetRegressor` that overrides said method and implements our idea for the loss. class AutoEncoderNet(NeuralNetRegressor): def get_loss(self, y_pred, y_true, *args, **kwargs): decoded, encoded = y_pred # <- unpack the tuple that was returned by `forward` loss_reconstruction = super().get_loss(decoded, y_true, *args, **kwargs) loss_l1 = 1e-3 * torch.abs(encoded).sum() return loss_reconstruction + loss_l1 # *Note*: Alternatively, we could have used an unaltered `NeuralNetRegressor` but implement a custom criterion that is responsible for unpacking the tuple and computing the loss. # ### Training the autoencoder # Now that everything is ready, we train the model as usual. We initialize our net subclass with the `AutoEncoder` module and call the `fit` method with `X` both as input and as target (since we want to reconstruct the original data): net = AutoEncoderNet( AutoEncoder, module__num_units=5, lr=0.3, ) net.fit(X, X) # Voilà, the model was trained using our custom loss function that makes use of both predicted values. # ### Extracting the decoder and the encoder output # Sometimes, we may wish to inspect all the values returned by the `foward` method of the module. There are several ways to achieve this. In theory, we can always access the module directly by using the `net.module_` attribute. However, this is unwieldy, since this completely shortcuts the prediction loop, which takes care of important steps like casting `numpy` arrays to `pytorch` tensors and batching. # # Also, we cannot use the `predict` method on the net. This method will only return the first output from the forward method, in this case the decoded state. The reason for this is that `predict` is part of the `sklearn` API, which requires there to be only one output. This is shown below: y_pred = net.predict(X) y_pred.shape # only the decoded state is returned # However, the net itself provides two methods to retrieve all outputs. The first one is the `net.forward` method, which retrieves *all* the predicted batches from the `Module.forward` and concatenates them. Use this to retrieve the complete decoded and encoded state: decoded_pred, encoded_pred = net.forward(X) decoded_pred.shape, encoded_pred.shape # The other method is called `net.forward_iter`. It is similar to `net.forward` but instead of collecting all the batches, this method is lazy and only yields one batch at a time. This can be especially useful if the output doesn't fit into memory: for decoded_pred, encoded_pred in net.forward_iter(X): # do something with each batch break decoded_pred.shape, encoded_pred.shape # Finally, let's make sure that our initial goal of having a sparse encoded state was met. We check how many activities are close to zero: torch.isclose(encoded_pred, torch.zeros_like(encoded_pred)).float().mean() # As we had hoped, the encoded state is quite sparse, with the majority of outpus being 0.
notebooks/Advanced_Usage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![image](https://colab.research.google.com/assets/colab-badge.svg)](https://gishub.org/ym-colab) # [![image](https://mybinder.org/badge_logo.svg)](https://gishub.org/ym-binder) # [![image](https://mybinder.org/badge_logo.svg)](https://gishub.org/ym-binder-nb) # ![](https://i.imgur.com/5FhzCc0.png) # **Interactive Mapping and Geospatial Analysis with Leafmap and Jupyter** # # # This notebook was developed for the 90-min [leafmap workshop](https://www.eventbrite.com/e/interactive-mapping-and-geospatial-analysis-tickets-188600217327?keep_tld=1) taking place on November 9, 2021. The workshop is hosted by [YouthMappers](https://www.youthmappers.org). # # - Author: [<NAME>](https://github.com/giswqs) # - Slides: https://gishub.org/ym # - Streamlit web app: https://streamlit.gishub.org # # Launch this notebook to execute code interactively using: # - Google Colab: https://gishub.org/ym-colab # - Pangeo Binder JupyterLab: https://gishub.org/ym-binder # - Pangeo Binder Jupyter Notebook: https://gishub.org/ym-binder-nb # # ## Introduction # # ### Workshop description # # [Leafmap](https://leafmap.org) is a Python package for interactive mapping and geospatial analysis with minimal coding in a Jupyter environment. It is built upon a number of open-source packages, such as [folium](https://github.com/python-visualization/folium) and [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) (for creating interactive maps), [WhiteboxTools](https://github.com/jblindsay/whitebox-tools) and [whiteboxgui](https://github.com/giswqs/whiteboxgui) (for analyzing geospatial data), and [ipywidgets](https://github.com/jupyter-widgets/ipywidgets) (for designing interactive graphical user interface). The WhiteboxTools library currently contains 480+ tools for advanced geospatial analysis. Leafmap provides many convenient functions for loading and visualizing geospatial data with only one line of code. Users can also use the interactive user interface to load geospatial data without coding. Anyone with a web browser and Internet connection can use leafmap to perform geospatial analysis and data visualization in the cloud with minimal coding. The topics that will be covered in this workshop include: # # - A brief introduction to Jupyter and Colab # - A brief introduction to leafmap and relevant web resources # - Creating interactive maps using multiple plotting backends # - Searching and loading basemaps # - Loading and visualizing vector/raster data # - Using Cloud Optimized GeoTIFF (COG) and SpatialTemporal Asset Catalog (STAC) # - Downloading OpenStreetMap data # - Loading data from a PostGIS database # - Creating custom legends and colorbars # - Creating split-panel maps and linked maps # - Visualizing Planet global monthly/quarterly mosaic # - Designing and publishing interactive web apps # - Performing geospatial analysis (e.g., hydrological analysis) using whiteboxgui # # This workshop is intended for scientific programmers, data scientists, geospatial analysts, and concerned citizens of Earth. The attendees are expected to have a basic understanding of Python and the Jupyter ecosystem. Familiarity with Earth science and geospatial datasets is useful but not required. More information about leafmap can be found at https://leafmap.org. # # # ### Jupyter keyboard shortcuts # # - Shift+Enter: run cell, select below # - Ctrl+Enter: : run selected cells # - Alt+Enter: run cell and insert below # - Tab: code completion or indent # - Shift+Tab: tooltip # - Ctrl+/: comment out code # ## Set up environment # # ### Required Python packages: # * [leafmap](https://github.com/giswqs/leafmap) - A Python package for interactive mapping and geospatial analysis with minimal coding in a Jupyter environment. # * [keplergl](https://docs.kepler.gl/docs/keplergl-jupyter) - A high-performance web-based application for visual exploration of large-scale geolocation data sets. # * [pydeck](https://deckgl.readthedocs.io/en/latest) - High-scale spatial rendering in Python, powered by deck.gl. # * [geopandas](https://geopandas.org) - An open source project to make working with geospatial data in python easier. # * [xarray-leaflet](https://github.com/davidbrochart/xarray_leaflet) - An xarray extension for tiled map plotting. # # ### Use Google Colab # # Click the button below to open this notebook in Google Colab and execute code interactively. # # [![image](https://colab.research.google.com/assets/colab-badge.svg)](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/workshops/YouthMappers_2021.ipynb) import os import subprocess import sys # + import warnings warnings.filterwarnings("ignore") # - # A function for installing Python packages. def install(package): subprocess.check_call([sys.executable, "-m", "pip", "install", package]) # Install required Python packages in Google Colab. pkgs = [ 'leafmap', 'geopandas', 'keplergl', 'pydeck', 'xarray_leaflet', 'osmnx', 'pygeos', 'imageio', 'tifffile', ] if "google.colab" in sys.modules: for pkg in pkgs: install(pkg) # ### Use Pangeo Binder # # Click the buttons below to open this notebook in JupyterLab (first button) or Jupyter Notebook (second button) and execute code interactively. # # [![image](https://mybinder.org/badge_logo.svg)](https://gishub.org/ym-binder) # [![image](https://mybinder.org/badge_logo.svg)](https://gishub.org/ym-binder-nb) # # - JupyterLab: https://gishub.org/ym-binder # - Jupyter Notebook: https://gishub.org/ym-binder-nb # ### Use Miniconda/Anaconda # # If you have # [Anaconda](https://www.anaconda.com/distribution/#download-section) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html) installed on your computer, you can install leafmap using the following commands. Leafmap has an optional dependency - [geopandas](https://geopandas.org), which can be challenging to install on some computers, especially Windows. It is highly recommended that you create a fresh conda environment to install geopandas and leafmap. Follow the commands below to set up a conda env and install geopandas, leafmap, pydeck, keplergl, and xarray_leaflet. # # ``` # conda create -n geo python=3.8 # conda activate geo # conda install geopandas # conda install mamba -c conda-forge # mamba install leafmap keplergl pydeck xarray_leaflet -c conda-forge # mamba install osmnx pygeos imageio tifffile -c conda-forge # jupyter lab # ``` try: import leafmap except ImportError: install('leafmap') # ## Create an interactive map # # `Leafmap` has five plotting backends: [folium](https://github.com/python-visualization/folium), [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet), [here-map](https://github.com/heremaps/here-map-widget-for-jupyter), [kepler.gl](https://docs.kepler.gl/docs/keplergl-jupyter), and [pydeck](https://deckgl.readthedocs.io). Note that the backends do not offer equal functionality. Some interactive functionality in `ipyleaflet` might not be available in other plotting backends. To use a specific plotting backend, use one of the following: # # - `import leafmap.leafmap as leafmap` # - `import leafmap.foliumap as leafmap` # - `import leafmap.heremap as leafmap` # - `import leafmap.kepler as leafmap` # - `import leafmap.deck as leafmap` # # ### Use ipyleaflet # + import leafmap m = leafmap.Map() m # - # ### Use folium # + import leafmap.foliumap as leafmap m = leafmap.Map() m # - # ### Use kepler.gl # + import leafmap.kepler as leafmap m = leafmap.Map() m # - # If you encounter an error saying `Error displaying widget: model not found` when trying to display the map, you can use `m.static_map()` as a workaround until this [kepler.gl bug](https://github.com/keplergl/kepler.gl/issues/1165) has been resolved. # # + # m.static_map(width=1280, height=600) # - # ### Use pydeck import leafmap.deck as leafmap m = leafmap.Map() m # ## Customize the default map # # ### Specify map center and zoom level import leafmap m = leafmap.Map(center=(40, -100), zoom=4) # center=[lat, lon] m m = leafmap.Map(center=(51.5, -0.15), zoom=17) m # ### Change map size m = leafmap.Map(height="400px", width="800px") m # ### Set control visibility # # When creating a map, set the following controls to either `True` or `False` as appropriate. # # * attribution_control # * draw_control # * fullscreen_control # * layers_control # * measure_control # * scale_control # * toolbar_control m = leafmap.Map( draw_control=False, measure_control=False, fullscreen_control=False, attribution_control=False, ) m # Remove all controls from the map. m = leafmap.Map() m.clear_controls() m # ## Change basemaps # # Specify a Google basemap to use, can be one of ["ROADMAP", "TERRAIN", "SATELLITE", "HYBRID"]. import leafmap m = leafmap.Map(google_map="TERRAIN") # HYBIRD, ROADMAP, SATELLITE, TERRAIN m # Add a basemap using the `add_basemap()` function. m = leafmap.Map() m.add_basemap("Esri.NatGeoWorldMap") m # Print out the list of available basemaps. for basemap in leafmap.basemaps: print(basemap) # ![](https://i.imgur.com/T1oBWSz.png) m = leafmap.Map() m.add_tile_layer( url="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}", name="Google Satellite", attribution="Google", ) m # ## Add tile layers # # ### Add XYZ tile layer import leafmap m = leafmap.Map() m.add_tile_layer( url="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}", name="Google Satellite", attribution="Google", ) m # ### Add WMS tile layer # # More WMS basemaps can be found at the following websites: # # - USGS National Map: https://viewer.nationalmap.gov/services # - MRLC NLCD Land Cover data: https://www.mrlc.gov/data-services-page # - FWS NWI Wetlands data: https://www.fws.gov/wetlands/Data/Web-Map-Services.html m = leafmap.Map() naip_url = 'https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?' m.add_wms_layer( url=naip_url, layers='0', name='NAIP Imagery', format='image/png', shown=True ) m # ### Add xyzservices provider # # Add a layer from [xyzservices](https://github.com/geopandas/xyzservices) provider object. import os import xyzservices.providers as xyz basemap = xyz.OpenTopoMap basemap m = leafmap.Map() m.add_basemap(basemap) m # ## Add COG/STAC layers # # A Cloud Optimized GeoTIFF (COG) is a regular GeoTIFF file, aimed at being hosted on a HTTP file server, with an internal organization that enables more efficient workflows on the cloud. It does this by leveraging the ability of clients issuing HTTP GET range requests to ask for just the parts of a file they need. # # More information about COG can be found at <https://www.cogeo.org/in-depth.html> # # Some publicly available Cloud Optimized GeoTIFFs: # # * https://stacindex.org/ # * https://cloud.google.com/storage/docs/public-datasets/landsat # * https://www.digitalglobe.com/ecosystem/open-data # * https://earthexplorer.usgs.gov/ # # For this demo, we will use data from https://www.maxar.com/open-data/california-colorado-fires for mapping California and Colorado fires. A list of COGs can be found [here](https://github.com/giswqs/leafmap/blob/master/examples/data/cog_files.txt). # # ### Add COG layer import leafmap # + m = leafmap.Map() url = 'https://opendata.digitalglobe.com/events/california-fire-2020/pre-event/2018-02-16/pine-gulch-fire20/1030010076004E00.tif' url2 = 'https://opendata.digitalglobe.com/events/california-fire-2020/post-event/2020-08-14/pine-gulch-fire20/10300100AAC8DD00.tif' m.add_cog_layer(url, name="Fire (pre-event)") m.add_cog_layer(url2, name="Fire (post-event)") m # - # ### Add STAC layer # # The SpatioTemporal Asset Catalog (STAC) specification provides a common language to describe a range of geospatial information, so it can more easily be indexed and discovered. A 'spatiotemporal asset' is any file that represents information about the earth captured in a certain space and time. The initial focus is primarily remotely-sensed imagery (from satellites, but also planes, drones, balloons, etc), but the core is designed to be extensible to SAR, full motion video, point clouds, hyperspectral, LiDAR and derived data like NDVI, Digital Elevation Models, mosaics, etc. More information about STAC can be found at https://stacspec.org/ # # Some publicly available SpatioTemporal Asset Catalog (STAC): # # * https://stacindex.org # # For this demo, we will use STAC assets from https://stacindex.org/catalogs/spot-orthoimages-canada-2005#/?t=catalogs m = leafmap.Map() url = 'https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json' m.add_stac_layer(url, bands=['B3', 'B2', 'B1'], name='False color') m # ## Add local raster datasets # # The `add_raster` function relies on the `xarray_leaflet` package and is only available for the ipyleaflet plotting backend. Therefore, Google Colab is not supported. Note that `xarray_leaflet` does not work properly on Windows ([source](https://github.com/davidbrochart/xarray_leaflet/issues/30)). import os import leafmap # Download samples raster datasets. More datasets can be downloaded from https://viewer.nationalmap.gov/basic/ # + out_dir = os.getcwd() landsat = os.path.join(out_dir, 'landsat.tif') dem = os.path.join(out_dir, 'dem.tif') # - # Download a small Landsat imagery. if not os.path.exists(landsat): landsat_url = 'https://drive.google.com/file/d/1EV38RjNxdwEozjc9m0FcO3LFgAoAX1Uw/view?usp=sharing' leafmap.download_from_gdrive(landsat_url, 'landsat.tif', out_dir, unzip=False) # Download a small DEM dataset. if not os.path.exists(dem): dem_url = 'https://drive.google.com/file/d/1vRkAWQYsLWCi6vcTMk8vLxoXMFbdMFn8/view?usp=sharing' leafmap.download_from_gdrive(dem_url, 'dem.tif', out_dir, unzip=False) m = leafmap.Map() # Add local raster datasets to the map # # More colormap can be found at https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html m.add_raster(dem, colormap='terrain', layer_name='DEM') m.add_raster(landsat, bands=[5, 4, 3], layer_name='Landsat') m # ## Add legend # # ### Add built-in legend import leafmap # List all available built-in legends. legends = leafmap.builtin_legends for legend in legends: print(legend) # Add a WMS layer and built-in legend to the map. m = leafmap.Map() url = "https://www.mrlc.gov/geoserver/mrlc_display/NLCD_2019_Land_Cover_L48/wms?" m.add_wms_layer( url, layers="NLCD_2019_Land_Cover_L48", name="NLCD 2019 CONUS Land Cover", format="image/png", transparent=True, ) m.add_legend(builtin_legend='NLCD') m # Add U.S. National Wetlands Inventory (NWI). More info at https://www.fws.gov/wetlands. # + m = leafmap.Map(google_map="HYBRID") url1 = "https://www.fws.gov/wetlands/arcgis/services/Wetlands/MapServer/WMSServer?" m.add_wms_layer( url1, layers="1", format='image/png', transparent=True, name="NWI Wetlands Vector" ) url2 = "https://www.fws.gov/wetlands/arcgis/services/Wetlands_Raster/ImageServer/WMSServer?" m.add_wms_layer( url2, layers="0", format='image/png', transparent=True, name="NWI Wetlands Raster" ) m.add_legend(builtin_legend="NWI") m # - # ### Add custom legend # # There are two ways you can add custom legends: # # 1. Define legend labels and colors # 2. Define legend dictionary # # Define legend keys and colors. # + m = leafmap.Map() labels = ['One', 'Two', 'Three', 'Four', 'ect'] # color can be defined using either hex code or RGB (0-255, 0-255, 0-255) colors = ['#8DD3C7', '#FFFFB3', '#BEBADA', '#FB8072', '#80B1D3'] # colors = [(255, 0, 0), (127, 255, 0), (127, 18, 25), (36, 70, 180), (96, 68, 123)] m.add_legend(title='Legend', labels=labels, colors=colors) m # - # Define a legend dictionary. # + m = leafmap.Map() url = "https://www.mrlc.gov/geoserver/mrlc_display/NLCD_2019_Land_Cover_L48/wms?" m.add_wms_layer( url, layers="NLCD_2019_Land_Cover_L48", name="NLCD 2019 CONUS Land Cover", format="image/png", transparent=True, ) legend_dict = { '11 Open Water': '466b9f', '12 Perennial Ice/Snow': 'd1def8', '21 Developed, Open Space': 'dec5c5', '22 Developed, Low Intensity': 'd99282', '23 Developed, Medium Intensity': 'eb0000', '24 Developed High Intensity': 'ab0000', '31 Barren Land (Rock/Sand/Clay)': 'b3ac9f', '41 Deciduous Forest': '68ab5f', '42 Evergreen Forest': '1c5f2c', '43 Mixed Forest': 'b5c58f', '51 Dwarf Scrub': 'af963c', '52 Shrub/Scrub': 'ccb879', '71 Grassland/Herbaceous': 'dfdfc2', '72 Sedge/Herbaceous': 'd1d182', '73 Lichens': 'a3cc51', '74 Moss': '82ba9e', '81 Pasture/Hay': 'dcd939', '82 Cultivated Crops': 'ab6c28', '90 Woody Wetlands': 'b8d9eb', '95 Emergent Herbaceous Wetlands': '6c9fb8', } m.add_legend(title="NLCD Land Cover Classification", legend_dict=legend_dict) m # - # ## Add colormap # # The colormap functionality requires the ipyleaflet plotting backend. Folium is not supported. import leafmap import leafmap.colormaps as cm # ### Common colormaps # # Color palette for DEM data. cm.palettes.dem # Show the DEM palette. cm.plot_colormap(colors=cm.palettes.dem, axis_off=True) # Color palette for NDVI data. cm.palettes.ndvi # Show the NDVI palette. cm.plot_colormap(colors=cm.palettes.ndvi) # ### Custom colormaps # # Specify the number of classes for a palette. cm.get_palette('terrain', n_class=8) # Show the terrain palette with 8 classes. cm.plot_colormap(colors=cm.get_palette('terrain', n_class=8)) # Create a palette with custom colors, label, and font size. cm.plot_colormap(colors=["red", "green", "blue"], label="Temperature", font_size=12) # Create a discrete color palette. cm.plot_colormap( colors=["red", "green", "blue"], discrete=True, label="Temperature", font_size=12 ) # Specify the width and height for the palette. cm.plot_colormap( 'terrain', label="Elevation", width=8.0, height=0.4, orientation='horizontal', vmin=0, vmax=1000, ) # Change the orentation of the colormap to be vertical. cm.plot_colormap( 'terrain', label="Elevation", width=0.4, height=4, orientation='vertical', vmin=0, vmax=1000, ) # ### Horizontal colormap # # Add a horizontal colorbar to an interactive map. m = leafmap.Map() m.add_basemap("OpenTopoMap") m.add_colormap( 'terrain', label="Elevation", width=8.0, height=0.4, orientation='horizontal', vmin=0, vmax=4000, ) m # ### Vertical colormap # # Add a vertical colorbar to an interactive map. m = leafmap.Map() m.add_basemap("OpenTopoMap") m.add_colormap( 'terrain', label="Elevation", width=0.4, height=4, orientation='vertical', vmin=0, vmax=4000, ) m # ### List of available colormaps cm.plot_colormaps(width=12, height=0.4) # ## Add vector datasets # # ### Add CSV # # Read a CSV as a Pandas DataFrame. import os import leafmap in_csv = 'https://raw.githubusercontent.com/giswqs/data/main/world/world_cities.csv' df = leafmap.csv_to_pandas(in_csv) df # Create a point layer from a CSV file containing lat/long information. m = leafmap.Map() m.add_xy_data(in_csv, x="longitude", y="latitude", layer_name="World Cities") m # Set the output directory. out_dir = os.getcwd() out_shp = os.path.join(out_dir, 'world_cities.shp') # Convert a CSV file containing lat/long information to a shapefile. leafmap.csv_to_shp(in_csv, out_shp) # Convert a CSV file containing lat/long information to a GeoJSON. out_geojson = os.path.join(out_dir, 'world_cities.geojson') leafmap.csv_to_geojson(in_csv, out_geojson) # Convert a CSV file containing lat/long information to a GeoPandas GeoDataFrame. gdf = leafmap.csv_to_gdf(in_csv) gdf # ### Add GeoJSON # # Add a GeoJSON to the map. m = leafmap.Map(center=[0, 0], zoom=2) in_geojson = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/cable_geo.geojson' m.add_geojson(in_geojson, layer_name="Cable lines", info_mode='on_hover') m # Add a GeoJSON with random filled color to the map. m = leafmap.Map(center=[0, 0], zoom=2) url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/countries.geojson" m.add_geojson( url, layer_name="Countries", fill_colors=['red', 'yellow', 'green', 'orange'] ) m # Use the `style_callback` function for assigning a random color to each polygon. # + import random m = leafmap.Map(center=[0, 0], zoom=2) url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/countries.geojson" def random_color(feature): return { 'color': 'black', 'fillColor': random.choice(['red', 'yellow', 'green', 'orange']), } m.add_geojson(url, layer_name="Countries", style_callback=random_color) m # - # Use custom `style` and `hover_style` functions. m = leafmap.Map(center=[0, 0], zoom=2) url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/countries.geojson" style = { "stroke": True, "color": "#0000ff", "weight": 2, "opacity": 1, "fill": True, "fillColor": "#0000ff", "fillOpacity": 0.1, } hover_style = {"fillOpacity": 0.7} m.add_geojson(url, layer_name="Countries", style=style, hover_style=hover_style) m # ### Add shapefile m = leafmap.Map(center=[0, 0], zoom=2) in_shp = 'https://github.com/giswqs/leafmap/raw/master/examples/data/countries.zip' m.add_shp(in_shp, layer_name="Countries") m # ### Add KML import leafmap m = leafmap.Map() in_kml = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_states.kml' m.add_kml(in_kml, layer_name="US States KML") m # ### Add GeoDataFrame import geopandas as gpd m = leafmap.Map() gdf = gpd.read_file( "https://github.com/giswqs/leafmap/raw/master/examples/data/cable_geo.geojson" ) m.add_gdf(gdf, layer_name="Cable lines") m # Read the GeoPandas sample dataset as a GeoDataFrame. path_to_data = gpd.datasets.get_path("nybb") gdf = gpd.read_file(path_to_data) gdf m = leafmap.Map() m.add_gdf(gdf, layer_name="New York boroughs", fill_colors=["red", "green", "blue"]) m # + [markdown] tags=[] # ### Add point layer # # Add a point layer using the interactive GUI. # # ![](https://i.imgur.com/1QVEtlN.gif) # - m = leafmap.Map() m # Add a point layer programmatically. m = leafmap.Map() url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_cities.geojson" m.add_point_layer(url, popup=["name", "pop_max"], layer_name="US Cities") m # ### Add vector # # The `add_vector` function supports any vector data format supported by GeoPandas. m = leafmap.Map(center=[0, 0], zoom=2) url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/countries.geojson" m.add_vector( url, layer_name="Countries", fill_colors=['red', 'yellow', 'green', 'orange'] ) m # ## Download OSM data # # ### OSM from geocode # # Add OSM data of place(s) by name or ID to the map. Note that the leafmap custom layer control does not support GeoJSON, we need to use the ipyleaflet built-in layer control. import leafmap m = leafmap.Map(toolbar_control=False, layers_control=True) m.add_osm_from_geocode("New York City", layer_name='NYC') m m = leafmap.Map(toolbar_control=False, layers_control=True) m.add_osm_from_geocode("Chicago, Illinois", layer_name='Chicago, IL') m # ### OSM from place # # Add OSM entities within boundaries of geocodable place(s) to the map. m = leafmap.Map(toolbar_control=False, layers_control=True) place = "Bunker Hill, Los Angeles, California" tags = {"building": True} m.add_osm_from_place(place, tags, layer_name="Los Angeles, CA") m # Show OSM feature tags. # https://wiki.openstreetmap.org/wiki/Map_features # + # leafmap.osm_tags_list() # - # ### OSM from address m = leafmap.Map(toolbar_control=False, layers_control=True) m.add_osm_from_address( address="New York City", tags={"amenity": "bar"}, dist=1500, layer_name="NYC bars" ) m m = leafmap.Map(toolbar_control=False, layers_control=True) m.add_osm_from_address( address="New York City", tags={"landuse": ["retail", "commercial"], "building": True}, dist=1000, layer_name="NYC buildings", ) m # ### OSM from bbox m = leafmap.Map(toolbar_control=False, layers_control=True) north, south, east, west = 40.7551, 40.7454, -73.9738, -73.9965 m.add_osm_from_bbox( north, south, east, west, tags={"amenity": "bar"}, layer_name="NYC bars" ) m # ### OSM from point # # Add OSM entities within some distance N, S, E, W of a point to the map. m = leafmap.Map( center=[46.7808, -96.0156], zoom=12, toolbar_control=False, layers_control=True ) m.add_osm_from_point( center_point=(46.7808, -96.0156), tags={"natural": "water"}, dist=10000, layer_name="Lakes", ) m m = leafmap.Map( center=[39.9170, 116.3908], zoom=15, toolbar_control=False, layers_control=True ) m.add_osm_from_point( center_point=(39.9170, 116.3908), tags={"building": True, "natural": "water"}, dist=1000, layer_name="Beijing", ) m # ### OSM from view # # Add OSM entities within the current map view to the map. m = leafmap.Map(toolbar_control=False, layers_control=True) m.set_center(-73.9854, 40.7500, 16) m m.add_osm_from_view(tags={"amenity": "bar", "building": True}, layer_name="New York") # Create a GeoPandas GeoDataFrame from place. gdf = leafmap.osm_gdf_from_place("New York City", tags={"amenity": "bar"}) gdf # ## Use WhiteboxTools # # Use the built-in toolbox to perform geospatial analysis. For example, you can perform depression filling using the sample DEM dataset downloaded in the above step. # # ![](https://i.imgur.com/KGHly63.png) import os import leafmap import urllib.request # Download a sample DEM dataset. url = 'https://github.com/giswqs/whitebox-python/raw/master/whitebox/testdata/DEM.tif' urllib.request.urlretrieve(url, "dem.tif") m = leafmap.Map() m # Display the toolbox using the default mode. leafmap.whiteboxgui() # Display the toolbox using the collapsible tree mode. Note that the tree mode does not support Google Colab. leafmap.whiteboxgui(tree=True) # Perform geospatial analysis using the [whitebox](https://github.com/giswqs/whitebox-python) package. import os import whitebox wbt = whitebox.WhiteboxTools() wbt.verbose = False data_dir = os.getcwd() wbt.set_working_dir(data_dir) wbt.feature_preserving_smoothing("dem.tif", "smoothed.tif", filter=9) wbt.breach_depressions("smoothed.tif", "breached.tif") wbt.d_inf_flow_accumulation("breached.tif", "flow_accum.tif") # + import matplotlib.pyplot as plt import imageio # %matplotlib inline # - original = imageio.imread(os.path.join(data_dir, 'dem.tif')) smoothed = imageio.imread(os.path.join(data_dir, 'smoothed.tif')) breached = imageio.imread(os.path.join(data_dir, 'breached.tif')) flow_accum = imageio.imread(os.path.join(data_dir, 'flow_accum.tif')) # + fig = plt.figure(figsize=(16, 11)) ax1 = fig.add_subplot(2, 2, 1) ax1.set_title('Original DEM') plt.imshow(original) ax2 = fig.add_subplot(2, 2, 2) ax2.set_title('Smoothed DEM') plt.imshow(smoothed) ax3 = fig.add_subplot(2, 2, 3) ax3.set_title('Breached DEM') plt.imshow(breached) ax4 = fig.add_subplot(2, 2, 4) ax4.set_title('Flow Accumulation') plt.imshow(flow_accum) plt.show() # - # ## Create basemap gallery import leafmap for basemap in leafmap.basemaps: print(basemap) layers = list(leafmap.basemaps.keys())[17:117] leafmap.linked_maps(rows=20, cols=5, height="200px", layers=layers, labels=layers) # ## Create linked map import leafmap leafmap.basemaps.keys() layers = ['ROADMAP', 'HYBRID'] leafmap.linked_maps(rows=1, cols=2, height='400px', layers=layers) layers = ['Stamen.Terrain', 'OpenTopoMap'] leafmap.linked_maps(rows=1, cols=2, height='400px', layers=layers) # Create a 2 * 2 linked map to visualize land cover change. Specify the `center` and `zoom` parameters to change the default map center and zoom level. layers = [str(f"NLCD {year} CONUS Land Cover") for year in [2001, 2006, 2011, 2016]] labels = [str(f"NLCD {year}") for year in [2001, 2006, 2011, 2016]] leafmap.linked_maps( rows=2, cols=2, height='300px', layers=layers, labels=labels, center=[36.1, -115.2], zoom=9, ) # ## Create split-panel map # # Create a split-panel map by specifying the `left_layer` and `right_layer`, which can be chosen from the basemap names, or any custom XYZ tile layer. import leafmap leafmap.split_map(left_layer="ROADMAP", right_layer="HYBRID") # Hide the zoom control from the map. leafmap.split_map( left_layer="Esri.WorldTopoMap", right_layer="OpenTopoMap", zoom_control=False ) # Add labels to the map and change the default map center and zoom level. leafmap.split_map( left_layer="NLCD 2001 CONUS Land Cover", right_layer="NLCD 2019 CONUS Land Cover", left_label="2001", right_label="2019", label_position="bottom", center=[36.1, -114.9], zoom=10, ) # ## Create heat map # # Specify the file path to the CSV. It can either be a file locally or on the Internet. import leafmap m = leafmap.Map(layers_control=True) in_csv = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/world_cities.csv" m.add_heatmap( in_csv, latitude="latitude", longitude='longitude', value="pop_max", name="Heat map", radius=20, ) m # Use the folium plotting backend. from leafmap import foliumap m = foliumap.Map() in_csv = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/world_cities.csv" m.add_heatmap( in_csv, latitude="latitude", longitude='longitude', value="pop_max", name="Heat map", radius=20, ) colors = ['blue', 'lime', 'red'] m.add_colorbar(colors=colors, vmin=0, vmax=10000) m.add_title("World Population Heat Map", font_size="20px", align="center") m # ## Save map to HTML import leafmap m = leafmap.Map() m.add_basemap("Esri.NatGeoWorldMap") m # Specify the output HTML file name to save the map as a web page. m.to_html("mymap.html") # If the output HTML file name is not provided, the function will return a string containing contain the source code of the HTML file. html = m.to_html() # + # print(html) # - # ## Use kepler plotting backend import leafmap.kepler as leafmap # ### Create an interactive map # # Create an interactive map. You can specify various parameters to initialize the map, such as `center`, `zoom`, `height`, and `widescreen`. m = leafmap.Map(center=[40, -100], zoom=2, height=600, widescreen=False) m # If you encounter an error saying `Error displaying widget: model not found` when trying to display the map, you can use `m.static_map()` as a workaround until this [kepler.gl bug](https://github.com/keplergl/kepler.gl/issues/1165) has been resolved. # + # m.static_map(width=1280, height=600) # - # ### Add CSV # # Add a CSV to the map. If you have a map config file, you can directly apply config to the map. m = leafmap.Map(center=[37.7621, -122.4143], zoom=12) in_csv = ( 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/hex_data.csv' ) config = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/hex_config.json' m.add_csv(in_csv, layer_name="hex_data", config=config) m m.static_map(width=1280, height=600) # ### Save map config # # Save the map configuration as a JSON file. m.save_config("cache/config.json") # ### Save map as html # # Save the map to an interactive html. m.to_html(outfile="cache/kepler_hex.html") # ### Add GeoJONS # # Add a GeoJSON with US state boundaries to the map. m = leafmap.Map(center=[50, -110], zoom=2) polygons = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_states.json' m.add_geojson(polygons, layer_name="Countries") m m.static_map(width=1280, height=600) # ### Add shapefile # # Add a shapefile to the map. m = leafmap.Map(center=[20, 0], zoom=1) in_shp = "https://github.com/giswqs/leafmap/raw/master/examples/data/countries.zip" m.add_shp(in_shp, "Countries") m m.static_map(width=1280, height=600) # ### Add GeoDataFrame # # Add a GeoPandas GeoDataFrame to the map. import geopandas as gpd gdf = gpd.read_file( "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/world_cities.geojson" ) gdf m = leafmap.Map(center=[20, 0], zoom=1) m.add_gdf(gdf, "World cities") m # + # m.static_map(width=1280, height=600) # - # ## Use planet imagery # # First, you need to [sign up](https://www.planet.com/login/?mode=signup) a Planet account and get an API key. See https://developers.planet.com/quickstart/apis. # Uncomment the following line to pass in your API key. import os import leafmap if os.environ.get("PLANET_API_KEY") is None: os.environ["PLANET_API_KEY"] = 'your-api-key' quarterly_tiles = leafmap.planet_quarterly_tiles() for tile in quarterly_tiles: print(tile) monthly_tiles = leafmap.planet_monthly_tiles() for tile in monthly_tiles: print(tile) # Add a Planet monthly mosaic by specifying year and month. m = leafmap.Map() m.add_planet_by_month(year=2020, month=8) m # Add a Planet quarterly mosaic by specifying year and quarter. m = leafmap.Map() m.add_planet_by_quarter(year=2019, quarter=2) m # ## Use timeseries inspector import os import leafmap if os.environ.get("PLANET_API_KEY") is None: os.environ["PLANET_API_KEY"] = 'your-api-key' tiles = leafmap.planet_tiles() leafmap.ts_inspector(tiles, center=[40, -100], zoom=4) # ## Use time slider # Use the time slider to visualize Planet quarterly mosaic. # # ![](https://i.imgur.com/ipVJ4cb.gif) import os import leafmap if os.environ.get("PLANET_API_KEY") is None: os.environ["PLANET_API_KEY"] = 'your-api-key' # Specify the map center and zoom level. m = leafmap.Map(center=[38.2659, -103.2447], zoom=13) m # Use the time slider to visualize Planet quarterly mosaic. m = leafmap.Map() layers_dict = leafmap.planet_quarterly_tiles() m.add_time_slider(layers_dict, time_interval=1) m # Use the time slider to visualize basemaps. m = leafmap.Map() m.clear_layers() layers_dict = leafmap.basemap_xyz_tiles() m.add_time_slider(layers_dict, time_interval=1) m
examples/workshops/YouthMappers_2021.ipynb