code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from transformers import GPTNeoForCausalLM, GPT2Tokenizer model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B") tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B") input_text = "A geographic information system (GIS) is a system that creates, manages, analyzes, and maps all types of data. GIS connects data to a map, integrating location data" input_ids = tokenizer(input_text, return_tensors="pt").input_ids gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100) gen_text = tokenizer.batch_decode(gen_tokens)[0] gen_text
notebooks/GPT-Samples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from bs4 import BeautifulSoup import requests import pandas as pd import xml.etree.ElementTree as ET # Given the web page https://www.sicemdawgs.com/uga-baseball-roster/ # # - Extract in a series the names and positions of UGA Baseball Coaches # - Extract in a a data frame the game schedule table r = requests.get('https://www.sicemdawgs.com/uga-baseball-roster/') r.status_code soup = BeautifulSoup(r.text) marker = soup.find('h2', text="UGA Baseball Coaches") bullet_list = marker.find_next('ul') bullets = bullet_list.find_all('li') def extract_bullet(bullet): comps = bullet.text.split('โ€“') name = comps[0].strip() position = comps[1].strip() return name, position data = [extract_bullet(bullet) for bullet in bullets] data pd.Series(dict(data)) tables = pd.read_html('https://www.sicemdawgs.com/uga-baseball-roster/', header=0) table = tables[0] table.head() table.dtypes table.shape
28-problem-solution_html_scraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Objectives" data-toc-modified-id="Objectives-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Objectives</a></span></li><li><span><a href="#When-a-Good-Model-Goes-Bad" data-toc-modified-id="When-a-Good-Model-Goes-Bad-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>When a Good Model Goes Bad</a></span><ul class="toc-item"><li><span><a href="#Bias-Variance-Tradeoff" data-toc-modified-id="Bias-Variance-Tradeoff-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Bias-Variance Tradeoff</a></span><ul class="toc-item"><li><span><a href="#Underfitting" data-toc-modified-id="Underfitting-2.1.1"><span class="toc-item-num">2.1.1&nbsp;&nbsp;</span>Underfitting</a></span></li><li><span><a href="#Overfitting" data-toc-modified-id="Overfitting-2.1.2"><span class="toc-item-num">2.1.2&nbsp;&nbsp;</span>Overfitting</a></span></li></ul></li><li><span><a href="#How-Do-We-Identify-a-Bad-Model?-๐Ÿ•ต๏ธ" data-toc-modified-id="How-Do-We-Identify-a-Bad-Model?-๐Ÿ•ต๏ธ-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>How Do We Identify a Bad Model? ๐Ÿ•ต๏ธ</a></span><ul class="toc-item"><li><span><a href="#Solution---Model-Validation" data-toc-modified-id="Solution---Model-Validation-2.2.1"><span class="toc-item-num">2.2.1&nbsp;&nbsp;</span>Solution - Model Validation</a></span></li><li><span><a href="#Steps:" data-toc-modified-id="Steps:-2.2.2"><span class="toc-item-num">2.2.2&nbsp;&nbsp;</span>Steps:</a></span></li><li><span><a href="#The-Power-of-the-Validation-Set" data-toc-modified-id="The-Power-of-the-Validation-Set-2.2.3"><span class="toc-item-num">2.2.3&nbsp;&nbsp;</span>The Power of the Validation Set</a></span><ul class="toc-item"><li><span><a href="#From-Validation-to-Cross-Validation" data-toc-modified-id="From-Validation-to-Cross-Validation-2.2.3.1"><span class="toc-item-num">2.2.3.1&nbsp;&nbsp;</span>From Validation to Cross-Validation</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Preventing-Overfitting---Regularization" data-toc-modified-id="Preventing-Overfitting---Regularization-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Preventing Overfitting - Regularization</a></span><ul class="toc-item"><li><span><a href="#The-Strategy-Behind-Ridge-/-Lasso-/-Elastic-Net" data-toc-modified-id="The-Strategy-Behind-Ridge-/-Lasso-/-Elastic-Net-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>The Strategy Behind Ridge / Lasso / Elastic Net</a></span></li><li><span><a href="#Ridge-and-Lasso-Regression" data-toc-modified-id="Ridge-and-Lasso-Regression-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Ridge and Lasso Regression</a></span><ul class="toc-item"><li><span><a href="#Lasso:-L1-Regularization---Absolute-Value" data-toc-modified-id="Lasso:-L1-Regularization---Absolute-Value-3.2.1"><span class="toc-item-num">3.2.1&nbsp;&nbsp;</span>Lasso: L1 Regularization - Absolute Value</a></span></li><li><span><a href="#Ridge:-L2-Regularization---Squared-Value" data-toc-modified-id="Ridge:-L2-Regularization---Squared-Value-3.2.2"><span class="toc-item-num">3.2.2&nbsp;&nbsp;</span>Ridge: L2 Regularization - Squared Value</a></span></li><li><span><a href="#๐Ÿค”-Which-Do-I-Use?" data-toc-modified-id="๐Ÿค”-Which-Do-I-Use?-3.2.3"><span class="toc-item-num">3.2.3&nbsp;&nbsp;</span>๐Ÿค” Which Do I Use?</a></span></li><li><span><a href="#The-Best-of-Both-Worlds:-Elastic-Net" data-toc-modified-id="The-Best-of-Both-Worlds:-Elastic-Net-3.2.4"><span class="toc-item-num">3.2.4&nbsp;&nbsp;</span>The Best of Both Worlds: Elastic Net</a></span></li></ul></li><li><span><a href="#Code-it-Out!" data-toc-modified-id="Code-it-Out!-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Code it Out!</a></span><ul class="toc-item"><li><span><a href="#Producing-an-Overfit-Model" data-toc-modified-id="Producing-an-Overfit-Model-3.3.1"><span class="toc-item-num">3.3.1&nbsp;&nbsp;</span>Producing an Overfit Model</a></span><ul class="toc-item"><li><span><a href="#Train-Test-Split" data-toc-modified-id="Train-Test-Split-3.3.1.1"><span class="toc-item-num">3.3.1.1&nbsp;&nbsp;</span>Train-Test Split</a></span></li><li><span><a href="#First-simple-model" data-toc-modified-id="First-simple-model-3.3.1.2"><span class="toc-item-num">3.3.1.2&nbsp;&nbsp;</span>First simple model</a></span></li><li><span><a href="#Add-Polynomial-Features" data-toc-modified-id="Add-Polynomial-Features-3.3.1.3"><span class="toc-item-num">3.3.1.3&nbsp;&nbsp;</span>Add Polynomial Features</a></span></li></ul></li><li><span><a href="#Ridge-(L2)-Regression" data-toc-modified-id="Ridge-(L2)-Regression-3.3.2"><span class="toc-item-num">3.3.2&nbsp;&nbsp;</span>Ridge (L2) Regression</a></span></li><li><span><a href="#Cross-Validation-to-Optimize-the-Regularization-Hyperparameter" data-toc-modified-id="Cross-Validation-to-Optimize-the-Regularization-Hyperparameter-3.3.3"><span class="toc-item-num">3.3.3&nbsp;&nbsp;</span>Cross Validation to Optimize the Regularization Hyperparameter</a></span><ul class="toc-item"><li><span><a href="#Observation" data-toc-modified-id="Observation-3.3.3.1"><span class="toc-item-num">3.3.3.1&nbsp;&nbsp;</span>Observation</a></span></li></ul></li><li><span><a href="#LEVEL-UP---Elastic-Net!" data-toc-modified-id="LEVEL-UP---Elastic-Net!-3.3.4"><span class="toc-item-num">3.3.4&nbsp;&nbsp;</span>LEVEL UP - Elastic Net!</a></span><ul class="toc-item"><li><span><a href="#Note-on-ElasticNet()" data-toc-modified-id="Note-on-ElasticNet()-3.3.4.1"><span class="toc-item-num">3.3.4.1&nbsp;&nbsp;</span>Note on <code>ElasticNet()</code></a></span></li><li><span><a href="#Fitting-Regularized-Models-with-Cross-Validation" data-toc-modified-id="Fitting-Regularized-Models-with-Cross-Validation-3.3.4.2"><span class="toc-item-num">3.3.4.2&nbsp;&nbsp;</span>Fitting Regularized Models with Cross-Validation</a></span></li></ul></li></ul></li></ul></li></ul></div> # + from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder from sklearn.linear_model import Ridge, Lasso, ElasticNet, LinearRegression,\ LassoCV, RidgeCV, ElasticNetCV from sklearn.model_selection import train_test_split, KFold,\ cross_val_score, cross_validate, ShuffleSplit from sklearn.metrics import mean_squared_error import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # + [markdown] heading_collapsed=true # # Objectives # + [markdown] hidden=true # - Explain the notion of "validation data" # - Use the algorithm of cross-validation (with `sklearn`) # - Explain the concept of regularization # - Use Lasso and Ridge regularization in model design # + [markdown] hidden=true # One of the goals of a machine learning project is to make models which are highly predictive. # If the model fails to generalize to unseen data then the model is bad. # + [markdown] heading_collapsed=true # # When a Good Model Goes Bad # + [markdown] hidden=true # > One of the goals of a machine learning project is to make models which are highly predictive # + [markdown] hidden=true # Adding complexity to a model can find patterns to help make better predictions! # # But too much complexity can lead to the model finding patterns in the noise... # + [markdown] hidden=true # ![Overfitting Model](images/overfitting_model_meme.jpeg) # + [markdown] hidden=true # >So how do we know when our model is ~~a conspiracy theorist~~ overfitting? # + [markdown] heading_collapsed=true hidden=true # ## Bias-Variance Tradeoff # + [markdown] hidden=true # 1. High bias # 1. Systematic error in predictions # 2. Bias is about the strength of assumptions the model makes # 3. Underfit models tend to have high bias # 2. High variance # 1. The model is highly sensitive to changes in the data # 2. Overfit models tend to have low bias # + [markdown] hidden=true # ![](images/bias_vs_variance.png) # + [markdown] heading_collapsed=true hidden=true # ##### Aside: Example of high bias and variance # + [markdown] hidden=true # High bias is easy to wrap one's mind around: Imagine pulling three red balls from an urn that has hundreds of balls of all colors in a uniform distribution. Then my sample is a terrible representative of the whole population. If I were to build a model by extrapolating from my sample, that model would predict that _every_ ball produced would be red! That is, this model would be incredibly biased. # + [markdown] hidden=true # High variance is a little bit harder to visualize, but it's basically the "opposite" of this. Imagine that the population of balls in the urn is mostly red, but also that there are a few balls of other colors floating around. Now imagine that our sample comprises a few balls, none of which is red. In this case, we've essentially picked up on the "noise", rather than the "signal". If I were to build a model by extrapolating from my sample, that model would be needlessly complex. It might predict that balls drawn before noon will be orange and that balls drawn after 8pm will be green, when the reality is that a simple model that predicted 'red' for all balls would be a superior model! # + [markdown] hidden=true # The important idea here is that there is a *trade-off*: If we have too few data in our sample (training set), or too few predictors, we run the risk of high *bias*, i.e. an underfit model. On the other hand, if we have too many predictors (especially ones that are collinear), we run the risk of high *variance*, i.e. an overfit model. # + [markdown] hidden=true # [Here](https://en.wikipedia.org/wiki/Overfitting#/media/File:Overfitting.svg) is a nice illustration of the difficulty. # + [markdown] heading_collapsed=true hidden=true # ### Underfitting # + [markdown] hidden=true # > Underfit models fail to capture all of the information in the data # + [markdown] hidden=true # * low complexity --> high bias, low variance # * training error: large # * testing error: large # + [markdown] heading_collapsed=true hidden=true # ### Overfitting # + [markdown] hidden=true # > Overfit models fit to the noise in the data and fail to generalize # + [markdown] hidden=true # * high complexity --> low bias, high variance # * training error: low # * testing error: large # + [markdown] heading_collapsed=true hidden=true # ## How Do We Identify a Bad Model? ๐Ÿ•ต๏ธ # + [markdown] heading_collapsed=true hidden=true # ### Solution - Model Validation # + [markdown] hidden=true # Generally speaking we want to take more precautions than using just a test and train split. After all, we're still imagining building just one model on the training set and then crossing our fingers for its performance on the test set. # # Data scientists often distinguish *three* subsets of data: **training, validation (dev), and testing** # + [markdown] hidden=true # Roughly: # - Training data is for building the model; # - Validation data is for *tweaking* the model; # - Testing data is for evaluating the model on unseen data. # + [markdown] hidden=true # - Think of **training** data as what you study for a test # - Think of **validation** data is using a practice test (note sometimes called **dev**) # - Think of **testing** data as what you use to judge the model # - A **holdout** set is when your test dataset is never used for training (unlike in cross-validation) # + [markdown] hidden=true # ![](https://scikit-learn.org/stable/_images/grid_search_workflow.png) # > Image from Scikit-Learn https://scikit-learn.org/stable/modules/cross_validation.html # + [markdown] heading_collapsed=true hidden=true # ### Steps: # + [markdown] hidden=true # 1. Split data into training data and a holdout test # 2. Design a model # 3. Evaluate how well it generalizes with **cross-validation** (only training data) # 4. Determine if we should adjust model, use cross-validation to evaluate, and repeat # 5. After iteratively adjusting your model, do a _final_ evaluation with the holdout test set # 6. DON'T TOUCH THE MODEL!!! # + [markdown] heading_collapsed=true hidden=true # ### The Power of the Validation Set # + [markdown] hidden=true # This "tweaking" includes most of all the fine-tuning of model parameters (see below). Think of what this three-way distinction allows us to do: # # I can build a model on some data. Then, **before** I introduce the model to the testing data, I can introduce it to a different batch of data (the validation set). With respect to the validation data I can do things like measure error and tweak model parameters to minimize that error. Of course, I also don't want to lose sight of the error on the training data. If the model error has been minimized on the training error, then of course any changes I make to the model parameters will take me away from that minimum. But still the new information I've gained by looking at the model's performance on the validation data is valuable. I might for example go with a kind of compromising model whose parameters produce an error that's not too big on the training data and not too big on the validation data. # + [markdown] hidden=true # **Question**: What's different about this procedure from what we've described before? Aren't I just calling the test data "validation data" now? Is there any substantive difference? # + [markdown] heading_collapsed=true hidden=true # #### From Validation to Cross-Validation # + [markdown] hidden=true # Since my model will "see" the validation data in any case, I might as well use *all* of my training data to validate my model! How do I do this? # # Cross-validation works like this: First I'll partition my training data into $k$-many *folds*. Then I'll train a model on $k-1$ of those folds and "test" it on the remaining fold. I'll do this for all possible divisions of my $k$ folds into $k-1$ training folds and a single "testing" fold. Since there are $k\choose 1$$=k$-many ways of doing this, I'll be building $k$-many models! # + [markdown] hidden=true # ![](https://scikit-learn.org/stable/_images/grid_search_cross_validation.png) # + [markdown] heading_collapsed=true hidden=true # ##### Python Example # + hidden=true birds = sns.load_dataset('penguins') birds.sample(5) # + hidden=true birds.info() # + hidden=true # For simplicity's sake we'll limit our analysis to the numeric columns. numeric = birds[['bill_length_mm', 'bill_depth_mm', 'flipper_length_mm', 'body_mass_g']] # + hidden=true # We'll drop the rows with null values numeric = numeric.dropna().reset_index() # + [markdown] hidden=true # Suppose I want to model `body_mass_g` as a function of the other attributes. # + hidden=true X = numeric.drop('body_mass_g', axis=1) y = numeric['body_mass_g'] # + [markdown] hidden=true # We'll make ten models and record our evaluations of them. # + hidden=true lr2 = LinearRegression() # + hidden=true cv_results = cross_validate( X=X, y=y, estimator=lr2, cv=10, scoring=('r2', 'neg_mean_squared_error'), return_train_score=True ) # + hidden=true cv_results.keys() # + hidden=true cv_results.get('test_r2') # + hidden=true # See how we did compare the results? -1*cv_results.get('train_neg_mean_squared_error').mean(), (-1*cv_results.get('train_neg_mean_squared_error')).std() -1*cv_results.get('test_neg_mean_squared_error').mean(), (-1*cv_results.get('test_neg_mean_squared_error')).std() # + hidden=true -1*cv_results.get('test_neg_mean_squared_error').mean(), (-1*cv_results.get('test_neg_mean_squared_error')).std() # + [markdown] heading_collapsed=true # # Preventing Overfitting - Regularization # + [markdown] hidden=true # Again, complex models are very flexible in the patterns that they can model but this also means that they can easily find patterns that are simply statistical flukes of one particular dataset rather than patterns reflective of the underlying data-generating process. # + [markdown] hidden=true # When a model has large weights, the model is "too confident". This translates to a model with high variance which puts it in danger of overfitting! # + [markdown] hidden=true # ![](images/punishing_model_metaphor.jpeg) # + [markdown] hidden=true # We need to punish large (confident) weights by contributing them to the error function # + [markdown] hidden=true # **Some Types of Regularization:** # # 1. Reducing the number of features # 2. Increasing the amount of data # 3. Popular techniques: Ridge, Lasso, Elastic Net # # + [markdown] heading_collapsed=true hidden=true # ## The Strategy Behind Ridge / Lasso / Elastic Net # + [markdown] hidden=true # Overfit models overestimate the relevance that predictors have for a target. Thus overfit models tend to have **overly large coefficients**. # # Generally, overfitting models come from a result of high model variance. High model variance can be caused by: # # - having irrelevant or too many predictors # - multicollinearity # - large coefficients # + [markdown] hidden=true # The evaluation of many models, linear regression included, proceeds by measuring its **error**, some quantifiable expression of the discrepancy between its predictions and the ground truth. The best-fit line of LR, for example, minimizes the sum of squared residuals. # # Our new idea, then, will be ***to add a term representing the size of our coefficients to our loss function***. This will be our **cost function** $J$. # # The goal will still be to minimize this new function, but we can make progress toward this minimum *either* by reducing the size of our residuals *or* by reducing the size of our coefficients. # # Since coefficients can be either negative or positive, we have the familiar difficulty that we can't simply add them up to get a sense of how large they are in general. Once again there are two natural choices: We could focus either on the squares or the absolute values of the coefficients. The former strategy is the basis for **Ridge** (also called Tikhonov) regularization; the latter strategy results in **Lasso** (Least Absolute Shrinkage and Selection Operator) regularization. # # These tools, as we shall see, are easily implemented with `sklearn`. # + [markdown] hidden=true # -------- # + [markdown] hidden=true # Regularization is about introducing a factor into our model designed to enforce the stricture that the coefficients stay small, by _penalizing_ the ones that get too large. # # That is, we'll alter our loss function so that the goal now is not merely to minimize the difference between actual values and our model's predicted values. Rather, we'll add in a term to our loss function that represents the sizes of the coefficients. # + [markdown] heading_collapsed=true hidden=true # ## Ridge and Lasso Regression # + [markdown] hidden=true # The first problem is about picking up on noise rather than signal. # The second problem is about having a least-squares estimate that is highly sensitive to random error. # The third is about having highly sensitive predictors. # # Regularization is about introducing a factor into our model designed to enforce the stricture that the coefficients stay small, by penalizing the ones that get too large. # # That is, we'll alter our loss function so that the goal now is not merely to minimize the difference between actual values and our model's predicted values. Rather, we'll add in a term to our loss function that represents the sizes of the coefficients. # + [markdown] heading_collapsed=true hidden=true # ### Lasso: L1 Regularization - Absolute Value # + [markdown] hidden=true # - Tend to get sparse vectors (small weights go to 0) # - Reduce number of weights # - Good feature selection to pick out importance # # $$ J(W,b) = -\dfrac{1}{m} \sum^m_{i=1}\big[\mathcal{L}(\hat y_i, y_i)+ \dfrac{\lambda}{m}|w_i| \big]$$ # + [markdown] heading_collapsed=true hidden=true # ### Ridge: L2 Regularization - Squared Value # + [markdown] hidden=true # - Not sparse vectors (weights homogeneous & small) # - Tends to give better results for training # # # $$ J(W,b) = -\dfrac{1}{m} \sum^m_{i=1}\big[\mathcal{L}(\hat y_i, y_i)+ \dfrac{\lambda}{m}w_i^2 \big]$$ # + [markdown] heading_collapsed=true hidden=true # ### ๐Ÿค” Which Do I Use? # + [markdown] hidden=true # > Typically you'll want to use L2 regularization # + [markdown] hidden=true # - For a given value of $\lambda$, the ridge makes for a gentler reining in of runaway coefficients. When in doubt, try ridge first. # - The lasso will more quickly reduce the contribution of individual predictors down to insignificance. It is therefore most useful for trimming through the fat of datasets with many predictors or if a model with very few predictors is especially desirable. # + [markdown] heading_collapsed=true hidden=true # ##### Aside: Comparing L1 & L2 Regularization # + [markdown] hidden=true # This is a bit subtle: # - Consider vectors: [1,0] & [0.5, 0.5] # - Recall we want smallest value for our value # - L2 prefers [0.5,0.5] over [1,0] # + [markdown] hidden=true # For a nice discussion of these methods in Python, see [this post](https://towardsdatascience.com/ridge-and-lasso-regression-a-complete-guide-with-python-scikit-learn-e20e34bcbf0b). # + [markdown] heading_collapsed=true hidden=true # ### The Best of Both Worlds: Elastic Net # + [markdown] hidden=true # There is a combination of L1 and L2 regularization called the Elastic Net that can also be used. The idea is to use a scaled linear combination of the lasso and the ridge, where the weights add up to 100%. We might want 50% of each, but we also might want, say, 10% Lasso and 90% Ridge. # # The loss function for an Elastic Net Regression looks like this: # # Elastic Net: # # $\rho\Sigma^{n_{obs.}}_{i=1}[(y_i - \Sigma^{n_{feat.}}_{j=0}\beta_j\times x_{ij})^2 + \lambda\Sigma^{n_{feat.}}_{j=0}|\beta_j|] + (1 - \rho)\Sigma^{n_{obs.}}_{i=1}[(y_i - \Sigma^{n_{feat.}}_{j=0}\beta_j\times x_{ij})^2 + \lambda\Sigma^{n_{feat.}}_{j=0}\beta^2_j]$ # # Sometimes you will see this loss function represented with different scaling terms, but the basic idea is to have a combination of L1 and L2 regularization terms. # + [markdown] heading_collapsed=true hidden=true # ## Code it Out! # + [markdown] heading_collapsed=true hidden=true # ### Producing an Overfit Model # + [markdown] hidden=true # We can often produce an overfit model by including **interaction terms**. We'll start over with the penguins dataset. This time we'll include the categorical features. # + [markdown] heading_collapsed=true hidden=true # #### Train-Test Split # + hidden=true birds = sns.load_dataset('penguins') birds = birds.dropna() # + hidden=true birds.head() # + hidden=true X_train, X_test, y_train, y_test = train_test_split( birds.drop('body_mass_g', axis=1), birds['body_mass_g'], random_state=42 ) # + hidden=true # Taking in other features (category) ohe = OneHotEncoder(drop='first') dummies = ohe.fit_transform(X_train[['species', 'island', 'sex']]) # Getting a DF dummies_df = pd.DataFrame(dummies.todense(), columns=ohe.get_feature_names(), index=X_train.index) # What we'll feed int our model X_train_df = pd.concat([X_train[['bill_length_mm', 'bill_depth_mm', 'flipper_length_mm']], dummies_df], axis=1) X_train_df.head() # + [markdown] hidden=true # Our Test Data: # + hidden=true # Note the same transformation (not FIT) to match structure test_dummies = ohe.transform(X_test[['species', 'island', 'sex']]) test_df = pd.DataFrame(test_dummies.todense(), columns=ohe.get_feature_names(), index=X_test.index) X_test_df = pd.concat([X_test[['bill_length_mm', 'bill_depth_mm', 'flipper_length_mm']], test_df], axis=1) # + [markdown] heading_collapsed=true hidden=true # #### First simple model # + hidden=true lr1 = LinearRegression() lr1.fit(X_train_df, y_train) # + hidden=true lr1.score(X_train_df, y_train) # + [markdown] hidden=true # Let's do some cross-validation! # + hidden=true cv_results = cross_validate( X=X_train_df, y=y_train, estimator=lr1, cv=10, scoring=('r2', 'neg_mean_squared_error'), return_train_score=True ) # + hidden=true cv_results.keys() # + hidden=true train_res = cv_results['train_r2'] train_res # + hidden=true valid_res = cv_results['test_r2'] valid_res # + [markdown] heading_collapsed=true hidden=true # ##### Peeking at the end (test data) ๐Ÿ‘€ # + hidden=true pens_preds = lr1.predict(X_test_df) # + hidden=true lr1.score(X_test_df, y_test) # + hidden=true np.sqrt(mean_squared_error(pens_preds, y_test)) # + [markdown] heading_collapsed=true hidden=true # #### Add Polynomial Features # + hidden=true pf = PolynomialFeatures(degree=3) X_poly_train = pf.fit_transform(X_train_df) # + hidden=true X_poly_test = pf.transform(X_test_df) # + [markdown] hidden=true # Train the model and evaluate (with cross-validation) # + hidden=true poly_lr = LinearRegression() poly_lr.fit(X_poly_train, y_train) # + hidden=true poly_lr.score(X_poly_train, y_train) # + hidden=true cv_results = cross_validate( X=X_poly_train, y=y_train, estimator=poly_lr, cv=10, scoring=('r2', 'neg_mean_squared_error'), return_train_score=True ) # + hidden=true train_res = cv_results['train_r2'] train_res # + hidden=true valid_res = cv_results['test_r2'] valid_res # + [markdown] heading_collapsed=true hidden=true # ##### Peeking at the end (test data) ๐Ÿ‘€ # + hidden=true poly_lr.score(X_poly_test, y_test) # + hidden=true poly_preds = poly_lr.predict(X_poly_test) # + hidden=true np.sqrt(mean_squared_error(poly_preds, y_test)) # + [markdown] heading_collapsed=true hidden=true # ### Ridge (L2) Regression # + hidden=true ss = StandardScaler() pf = PolynomialFeatures(degree=3) # You should always be sure to _standardize_ your data before # applying regularization! X_train_processed = pf.fit_transform(ss.fit_transform(X_train_df)) X_test_processed = pf.transform(ss.transform(X_test_df)) # + hidden=true # 'Lambda' is the standard variable for the strength of the # regularization (as in the above formulas), but since lambda # is a key word in Python, these sklearn regularization tools # use 'alpha' instead. rr = Ridge(alpha=100, random_state=42) rr.fit(X_train_processed, y_train) # + hidden=true rr.score(X_train_processed, y_train) # + hidden=true cv_results = cross_validate( X=X_train_processed, y=y_train, estimator=rr, cv=10, scoring=('r2', 'neg_mean_squared_error'), return_train_score=True ) # + hidden=true cv_results['train_r2'] # + hidden=true cv_results['test_r2'].std() # + [markdown] heading_collapsed=true hidden=true # ##### Peeking at the end (test data) ๐Ÿ‘€ # + hidden=true ridge_preds = rr.predict(X_test_processed) # + hidden=true rr.score(X_test_processed, y_test) # + hidden=true np.sqrt(mean_squared_error(ridge_preds, y_test)) # + [markdown] hidden=true # Much better! But how do we know which value of `alpha` to pick? # + [markdown] heading_collapsed=true hidden=true # ### Cross Validation to Optimize the Regularization Hyperparameter # + [markdown] hidden=true # The regularization strength could sensibly be any nonnegative number, so there's no way to check "all possible" values. It's often useful to try several values that are different orders of magnitude. # + hidden=true alphas = [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000, 10_000] train_scores = [] test_scores = [] for alpha in alphas: rr = Ridge(alpha=alpha, random_state=42) rr.fit(X_train_processed, y_train) train_score = rr.score(X_train_processed, y_train) test_score = rr.score(X_test_processed, y_test) train_scores.append(train_score) test_scores.append(test_score) # + hidden=true plt.style.use('fivethirtyeight') fig, ax = plt.subplots() plt.xscale('log') plt.title('Ridge $R^2$ as a function of regularization strength') ax.set_xlabel('Regularization strength $\lambda$') ax.set_ylabel('$R^2$') ax.plot(alphas, train_scores, label='train') ax.plot(alphas, test_scores, label='test') plt.legend(); # + [markdown] hidden=true # It looks like the best value is somewhere around 100. If we wanted more precision, we could repeat the same sort of exercise with a set of alphas nearer to 100. # + [markdown] heading_collapsed=true hidden=true # #### Observation # + [markdown] hidden=true # Notice how the values increase but then decrease? Regularization helps with overfitting, but if the strength of the regularization becomes too great, then large coefficients will be punished more than they really should. What happens then is that the original error between truth and model predictions becomes neglected as a quantity to be minimized, and the bias of the model begins to outweigh its variance. # + [markdown] heading_collapsed=true hidden=true # ### LEVEL UP - Elastic Net! # + [markdown] hidden=true # Naturally, the Elastic Net has the same interface through sklearn as the other regularization tools! The only difference is that we now have to specify how much of each regularization term we want. The name of the parameter for this (represented by $\rho$ above) in sklearn is `l1_ratio`. # + hidden=true enet = ElasticNet(alpha=10, l1_ratio=0.1, random_state=42) enet.fit(X_train_processed, y_train) # + hidden=true enet.score(X_train_processed, y_train) # + hidden=true enet.score(X_test_processed, y_test) # + [markdown] hidden=true # Setting the `l1_ratio` to 1 is equivalent to the lasso: # + hidden=true ratios = np.linspace(0.01, 1, 100) # + hidden=true preds = [] for ratio in ratios: enet = ElasticNet(alpha=100, l1_ratio=ratio, random_state=42) enet.fit(X_train_processed, y_train) preds.append(enet.predict(X_test_processed[0].reshape(1, -1))) # + hidden=true fig, ax = plt.subplots() lasso = Lasso(alpha=100, random_state=42) lasso.fit(X_train_processed, y_train) lasso_pred = lasso.predict(X_test_processed[0].reshape(1, -1)) ax.plot(ratios, preds, label='elastic net') ax.scatter(1, lasso_pred, c='k', s=70, label='lasso') plt.legend(); # + [markdown] heading_collapsed=true hidden=true # #### Note on `ElasticNet()` # + [markdown] hidden=true # Is an Elastic Net with `l1_ratio` set to 0 equivalent to the ridge? In theory yes. But in practice no. It looks like the `ElasticNet()` predictions on the first test data point as `l1_ratio` shrinks are tending toward some value around 3400. Let's check to see what prediction `Ridge()` gives us: # + hidden=true ridge = Ridge(alpha=10, random_state=42) ridge.fit(X_train_processed, y_train) ridge.predict(X_test_processed[0].reshape(1, -1))[0] # + [markdown] hidden=true # If you check the docstring for the `ElasticNet()` class you will see: # - that the function being minimized is slightly different from what we saw above; and # - that the results are unreliable when `l1_ratio` $\leq 0.01$. # + [markdown] hidden=true # **Exercise**: Visualize the difference in this case between `ElasticNet(l1_ratio=0.01)` and `Ridge()` by making a scatterplot of each model's predicted values for the first ten points in `X_test_processed`. Use `alpha=10` for each model. # # Level Up: Make a second scatterplot that compares the predictions on the same data # points between ElasticNet(l1_ratio=1) and Lasso(). # + [markdown] hidden=true # <details> # <summary> Answer # </summary> # <code>fig, ax = plt.subplots() # enet_r = ElasticNet(alpha=10, l1_ratio=0.01, random_state=42) # enet_r.fit(X_train_processed, y_train) # preds_enr = enet_r.predict(X_test_processed[:10]) # preds_ridge = ridge.predict(X_test_processed[:10]) # ax.scatter(np.arange(10), preds_enr) # ax.scatter(np.arange(10), preds_ridge);</code> # </details> # + [markdown] hidden=true # <details> # <summary> # Level Up # </summary> # <code>fig, ax = plt.subplots() # enet_l = ElasticNet(alpha=10, l1_ratio=1, random_state=42) # enet_l.fit(X_train_processed, y_train) # preds_enl = enet_l.predict(X_test_processed[:10]) # preds_lasso = lasso.predict(X_test_processed[:10]) # ax.scatter(np.arange(10), preds_enl) # ax.scatter(np.arange(10), preds_lasso);</code> # </details # + [markdown] heading_collapsed=true hidden=true # #### Fitting Regularized Models with Cross-Validation # + [markdown] hidden=true # Our friend `sklearn` also includes tools that fit regularized regressions *with cross-validation*: `LassoCV`, `RidgeCV`, and `ElasticNetCV`. # + [markdown] hidden=true # **Exercise**: Use `RidgeCV` to fit a seven-fold cross-validated ridge regression model to our `X_train_processed` data and then calculate $R^2$ and the RMSE (root-mean-squared error) on our test set. # + [markdown] hidden=true # <details> # <summary> # Answer # </summary> # <code>rcv = RidgeCV(cv=7) # rcv.fit(X_train_processed, y_train) # rcv.score(X_test_processed, y_test) # np.sqrt(mean_squared_error(y_test, rcv.predict(X_test_processed)))</code> # </details>
Phase_3/ds-regularization-main/regularization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # hc # # Components for training models with high-content data (most notably HD-(f)MRI and HD cortical sensing). # ## Design # # Documentation on the design of thes components. # + # TODO(hekate1) # - # ## Setup # # Installation steps for running the contents of this notebook. # TODO(cwbeitel) deps = "https://github.com/projectclarify/clarify/tree/master/tools/get-clarify" # !wget {deps} | python - # This is the right setting for colab and what we'll leave as the # default but feel free to modify to match your system. WORKSPACE_ROOT="/content/clarify" # ## General usage # # Documentation on the primary means of using these components. # ##### Building # TODO(cwbeitel), provisionally: # !cd {WORKSPACE_ROOT} && bazel build //hc/... # ##### Testing # Subproject testing # TODO(cwbeitel), provisionally: # !cd {WORKSPACE_ROOT} && bazel test //hc/... # Integration testing # TODO(cwbeitel), provisionally: # !cd {WORKSPACE_ROOT} && bazel test //clarify/... # ##### Usage in context # + # TODO(cwbeitel), illustrate a tensor2tensor or tf.Datasets problem # that calls a compiled hc component. # - # ## Demos # # Demonstration of usage that illustrate component capabilities and value. # ##### Run the sampler with mock local "CBT" # + # TODO(hekate1), provisionally something like # !cd {WORKSPACE_ROOT} && \ # bazel run --define=mock_cbt=true //hc/sampler -- demo.cfg # resulting in a minor amount of logs illustrating what's happening. # - # ##### Run the sampler with actual CBT # + # TODO(hekate1), provisionally also something like # !cd {WORKSPACE_ROOT} && \ # bazel run //hc/sampler -- demo.cfg # that is the same as the above but with a remote CBT instance.
hc/docs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''venv'': venv)' # name: pythonjvsc74a57bd00361bfb4874960d2e0338b51473ec1ce7b59300af49c9ce626256c87107b9863 # --- # # Add Data Layers to Interactive Maps Using `folium` and [Tomorrow.io](https://app.tomorrow.io/home) # ## 1. Import Python modules from datetime import datetime from folium import Map, LayerControl from folium.raster_layers import TileLayer from folium.plugins import FloatImage # ## 2.Initialize map and API variables # + # Starting center of map center = (47.000, -119.000) # somewhere in eastern Washington # Tomorrow.io API Key apikey = 'yourapikey' # Current time time = datetime.now().isoformat(timespec='milliseconds') + "Z" # '2021-06-02T10:02:06.828Z' # - # ## 3. Create base map # + pnw = Map( location=center, min_zoom=1, max_zoom=12, zoom_start=6, tiles="Stamen Terrain", height=500, width=1000, control_scale=True, # dragging=False, # zoom_control=False ) # Show map pnw # - # ## 4. Add temperature layer from Tomorrow.io to base map # `folium` uses z, x, and y under the hood, so this is not a template string (string literal). Therefore, we are concatentating `time` and `API_KEY` instead of formatting them into the URL string with curly brackets. # temperature = TileLayer( name='Temperature', tiles='https://api.tomorrow.io/v4/map/tile/{z}/{x}/{y}/temperature/{time}.png?apikey={apikey}', min_zoom=1, max_zoom=12, max_native_zoom=12, overlay=True, attr='Temperature layer from <a target="_blank" href="https://www.tomorrow.io/weather-api/">Tomorrow.io Weather API</a>', time=time, apikey=apikey ).add_to(pnw) # ## 5. Add Layer Control # + LayerControl(position='topright').add_to(pnw) # Show map pnw # -
tomorrow-io-map-tiles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Arduino LCD Example using AdaFruit 1.8" LCD Shield # # This notebook shows a demo on Adafruit 1.8" LCD shield. # + deletable=true editable=true from pynq.overlays.base import BaseOverlay base = BaseOverlay("base.bit") # + [markdown] deletable=true editable=true # ## 1. Instantiate AdaFruit LCD controller # In this example, make sure that 1.8" LCD shield from Adafruit is placed on the Arduino interface. # # After instantiation, users should expect to see a PYNQ logo with pink background shown on the screen. # + deletable=true editable=true from pynq.lib.arduino import Arduino_LCD18 lcd = Arduino_LCD18(base.ARDUINO) # + [markdown] deletable=true editable=true # ## 2. Clear the LCD screen # Clear the LCD screen so users can display other pictures. # + deletable=true editable=true lcd.clear() # + [markdown] deletable=true editable=true # ## 3. Display a picture # # The screen is 160 pixels by 128 pixels. So the largest picture that can fit in the screen is 160 by 128. To resize a picture to a desired size, users can do: # ```python # from PIL import Image # img = Image.open('data/large.jpg') # w_new = 160 # h_new = 128 # new_img = img.resize((w_new,h_new),Image.ANTIALIAS) # new_img.save('data/small.jpg','JPEG') # img.close() # ``` # The format of the picture can be PNG, JPEG, BMP, or any other format that can be opened using the `Image` library. In the API, the picture will be compressed into a binary format having (per pixel) 5 bits for blue, 6 bits for green, and 5 bits for red. All the pixels (of 16 bits each) will be stored in DDR memory and then transferred to the IO processor for display. # # The orientation of the picture is as shown below, while currently, only orientation 1 and 3 are supported. Orientation 3 will display picture normally, while orientation 1 will display picture upside-down. # <img src="data/adafruit_lcd18.jpg" width="400px"/> # # To display the picture at the desired location, the position has to be calculated. For example, to display in the center a 76-by-25 picture with orientation 3, `x_pos` has to be (160-76/2)=42, and `y_pos` has to be (128/2)+(25/2)=76. # # The parameter `background` is a list of 3 components: [R,G,B], where each component consists of 8 bits. If it is not defined, it will be defaulted to [0,0,0] (black). # + deletable=true editable=true lcd.display('data/board_small.jpg',x_pos=0,y_pos=127, orientation=3,background=[255,255,255]) # + [markdown] deletable=true editable=true # ## 4. Animate the picture # # We can provide the number of frames to the method `display()`; this will move the picture around with a desired background color. # + deletable=true editable=true lcd.display('data/logo_small.png',x_pos=0,y_pos=127, orientation=1,background=[255,255,255],frames=100) # + [markdown] deletable=true editable=true # ## 5. Draw a line # # Draw a white line from upper left corner towards lower right corner. # # The parameter `background` is a list of 3 components: [R,G,B], where each component consists of 8 bits. If it is not defined, it will be defaulted to [0,0,0] (black). # # Similarly, the parameter `color` defines the color of the line, with a default value of [255,255,255] (white). # # All the 3 `draw_line()` use the default orientation 3. # # Note that if the background is changed, the screen will also be cleared. Otherwise the old lines will still stay on the screen. # + deletable=true editable=true lcd.clear() lcd.draw_line(x_start_pos=151,y_start_pos=98,x_end_pos=19,y_end_pos=13) # + [markdown] deletable=true editable=true # Draw a 100-pixel wide red horizontal line, on a yellow background. Since the background is changed, the screen will be cleared automatically. # + deletable=true editable=true lcd.draw_line(50,50,150,50,color=[255,0,0],background=[255,255,0]) # + [markdown] deletable=true editable=true # Draw a 80-pixel tall blue vertical line, on the same yellow background. # + deletable=true editable=true lcd.draw_line(50,20,50,120,[0,0,255],[255,255,0]) # + [markdown] deletable=true editable=true # ## 6. Print a scaled character # # Users can print a scaled string at a desired position with a desired text color and background color. # # The first `print_string()` prints "Hello, PYNQ!" at 1st row, 1st column, with white text color and blue background. # # The second `print_string()` prints today's date at 5th row, 10th column, with yellow text color and blue background. # # Note that if the background is changed, the screen will also be cleared. Otherwise the old strings will still stay on the screen. # + deletable=true editable=true text = 'Hello, PYNQ!' lcd.print_string(1,1,text,[255,255,255],[0,0,255]) # + deletable=true editable=true import time text = time.strftime("%d/%m/%Y") lcd.print_string(5,10,text,[255,255,0],[0,0,255]) # + [markdown] deletable=true editable=true # ## 7. Draw a filled rectangle # # The next 3 cells will draw 3 rectangles of different colors, respectively. All of them use the default black background and orientation 3. # + deletable=true editable=true lcd.draw_filled_rectangle(x_start_pos=10,y_start_pos=10, width=60,height=80,color=[64,255,0]) # + deletable=true editable=true lcd.draw_filled_rectangle(x_start_pos=20,y_start_pos=30, width=80,height=30,color=[255,128,0]) # + deletable=true editable=true lcd.draw_filled_rectangle(x_start_pos=90,y_start_pos=40, width=70,height=120,color=[64,0,255]) # + [markdown] deletable=true editable=true # ## 8. Read joystick button # + deletable=true editable=true button=lcd.read_joystick() if button == 1: print('Left') elif button == 2: print('Down') elif button==3: print('Center') elif button==4: print('Right') elif button==5: print('Up') else: print('Not pressed')
boards/Pynq-Z2/base/notebooks/arduino/arduino_lcd18.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %pylab inline numpy.random.seed(0) import seaborn; seaborn.set_style('whitegrid') from apricot import FeatureBasedSelection from apricot import FacilityLocationSelection # - # ### Sparse Inputs # # Sometimes your data has many zeroes in it. Sparse matrices, implemented through `scipy.sparse`, are a way of storing only those values that are non-zero. This can be an extremely efficient way to represent massive data sets that mostly have zero values, such as sentences that are featurized using the presence of n-grams. Simple modifications can be made to many algorithms to operate on the sparse representations of these data sets, enabling compute to be efficiently performed on data whose dense representation may not even fit in memory. The submodular optimization algorithms implemented in apricot are some such algorithms. # # Let's start off with loading three data sets in scikit-learn that have many zeros in them, and show the density, which is the percentage of non-zero elements in them. # + from sklearn.datasets import load_digits from sklearn.datasets import fetch_covtype from sklearn.datasets import fetch_rcv1 X_digits = load_digits().data.astype('float64') X_covtype = numpy.abs(fetch_covtype().data).astype('float64') X_rcv1 = fetch_rcv1().data[:5000].toarray() print("digits density: ", (X_digits != 0).mean()) print("covtype density: ", (X_covtype != 0).mean()) print("rcv1 density: ", (X_rcv1 != 0).mean()) # - # It looks like these three data sets have very different levels of sparsity. The digits data set is approximately half non-zeros, the covertypes data set is approximately one-fifth non-zeroes, and the rcv1 subset we're using is less than 0.2% non-zeroes. # # Let's see how long it takes to rank the digits data set using only naive greedy selection. # %timeit FeatureBasedSelection(X_digits.shape[0], 'sqrt').fit(X_digits) # We can turn our dense numpy array into a sparse array using `scipy.sparse.csr_matrix`. Currently, apricot only accepts `csr` formatted sparse matrices. This creates a matrix where each row is stored contiguously, rather than each column being stored contiguously. This is helpful for us because each row corresponds to an example in our data set. No other changes are needed other than passing in a `csr_matrix` rather than a numpy array. # + from scipy.sparse import csr_matrix X_digits_sparse = csr_matrix(X_digits) # %timeit FeatureBasedSelection(X_digits.shape[0], 'sqrt', X_digits.shape[0]).fit(X_digits_sparse) # - # Looks like things may have been slowed down a bit, likely due to a comination of the data set being small and not particularly sparse. # # Let's look at the covertypes data set, which is both much larger and much sparser. FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X_covtype).ranking[:10] # + X_covtype_sparse = csr_matrix(X_covtype) FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X_covtype_sparse).ranking[:10] # - # Seems like it might only be a little bit beneficial in terms of speed, here. # # Let's take a look at our last data set, the subset from rcv1, which is extremely sparse. FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X_rcv1).ranking[:10] # + X_rcv1_sparse = csr_matrix(X_rcv1) FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X_rcv1_sparse).ranking[:10] # - # It looks like there is a massive speed improvement here. It looks like the sparseness of a data set may contribute to the speed improvements one would get when using a sparse array versus a dense array. # # As a side note, only a small subset of the rcv1 data set is used here because, while the sparse array does fit in memory, the dense array does not. This illustrates that, even when there isn't a significant speed advantage, support for sparse arrays in general can be necessary for massive data problems. For example, here's an example of apricot easily finding the least redundant subset of size 10 from the entire 804,414 example x 47,236 feature rcv1 data set, which would require >250 GB to store at 64-bit floats. # + X_rcv1_sparse = fetch_rcv1().data FeatureBasedSelection(10000, 'sqrt', 100, verbose=True).fit(X_rcv1_sparse) # - # Clearly there seems to be a speed benefit as data sets become larger. But can we quantify it further? Let's look at a large, randomly generated sparse data set. numpy.random.seed(0) X = numpy.random.choice(2, size=(8000, 4000), p=[0.99, 0.01]).astype('float64') FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X).ranking[:10] # + X_sparse = csr_matrix(X) FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X_sparse).ranking[:10] # - # It looks much faster to use a sparse matrix for this data set. But, is it faster to use a sparse matrix because the data set is larger, or because we're leveraging the format of a sparse matrix? # + import time sizes = 500, 750, 1000, 1500, 2000, 3000, 5000, 7500, 10000, 15000, 20000, 30000, 50000 times, sparse_times = [], [] for n in sizes: X = numpy.random.choice(2, size=(n, 4000), p=[0.99, 0.01]).astype('float64') tic = time.time() FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X) times.append(time.time() - tic) X = csr_matrix(X) tic = time.time() FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X) sparse_times.append(time.time() - tic) # + ratio = numpy.array(times) / numpy.array(sparse_times) plt.figure(figsize=(12, 4)) plt.subplot(121) plt.title("Sparse and Dense Timings", fontsize=14) plt.plot(times, label="Dense Time") plt.plot(sparse_times, label="Sparse Time") plt.legend(fontsize=12) plt.xticks(range(len(sizes)), sizes, rotation=45) plt.xlabel("Number of Examples", fontsize=12) plt.ylabel("Time (s)", fontsize=12) plt.subplot(122) plt.title("Speed Improvement of Sparse Array", fontsize=14) plt.plot(ratio) plt.xticks(range(len(sizes)), sizes, rotation=45) plt.xlabel("Number of Examples", fontsize=12) plt.ylabel("Dense Time / Sparse Time", fontsize=12) plt.tight_layout() plt.show() # - # It looks like, at a fixed sparsity, the larger the data set is, the larger the speed up is. # # What happens if we vary the number of features in a data set with a fixed number of examples and sparsity? sizes = 5, 10, 25, 50, 100, 150, 200, 250, 500, 1000, 2000, 5000, 10000, 15000, 20000, 25000 times, sparse_times = [], [] for d in sizes: X = numpy.random.choice(2, size=(10000, d), p=[0.99, 0.01]).astype('float64') tic = time.time() FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X) times.append(time.time() - tic) X = csr_matrix(X) tic = time.time() FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X) sparse_times.append(time.time() - tic) # + ratio = numpy.array(times) / numpy.array(sparse_times) plt.figure(figsize=(12, 4)) plt.subplot(121) plt.title("Sparse and Dense Timings", fontsize=14) plt.plot(times, label="Dense Time") plt.plot(sparse_times, label="Sparse Time") plt.legend(fontsize=12) plt.xticks(range(len(sizes)), sizes, rotation=45) plt.xlabel("Number of Examples", fontsize=12) plt.ylabel("Time (s)", fontsize=12) plt.subplot(122) plt.title("Speed Improvement of Sparse Array", fontsize=14) plt.plot(ratio, label="Dense Time") plt.legend(fontsize=12) plt.xticks(range(len(sizes)), sizes, rotation=45) plt.xlabel("Number of Examples", fontsize=12) plt.ylabel("Dense Time / Sparse Time", fontsize=12) plt.tight_layout() plt.show() # - # Looks like we're getting a similar speed improvement as we increase the number of features. # # Lastly, what happens when we change the sparsity? ps = 0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.98, 0.99, 0.995, 0.999 times, sparse_times = [], [] for p in ps: X = numpy.random.choice(2, size=(10000, 500), p=[p, 1-p]).astype('float64') tic = time.time() FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X) times.append(time.time() - tic) X = csr_matrix(X) tic = time.time() FeatureBasedSelection(500, 'sqrt', 500, verbose=True).fit(X) sparse_times.append(time.time() - tic) # + ratio = numpy.array(times) / numpy.array(sparse_times) plt.figure(figsize=(12, 4)) plt.subplot(121) plt.title("Sparse and Dense Timings", fontsize=14) plt.plot(times, label="Dense Time") plt.plot(sparse_times, label="Sparse Time") plt.legend(fontsize=12) plt.xticks(range(len(ps)), ps, rotation=45) plt.xlabel("% Sparsity", fontsize=12) plt.ylabel("Time (s)", fontsize=12) plt.subplot(122) plt.title("Speed Improvement of Sparse Array", fontsize=14) plt.plot(ratio, label="Dense Time") plt.legend(fontsize=12) plt.xticks(range(len(ps)), ps, rotation=45) plt.xlabel("% Sparsity", fontsize=12) plt.ylabel("Dense Time / Sparse Time", fontsize=12) plt.tight_layout() plt.show() # - # This looks like it may be the most informative plot. This says that, given data sets of the same size, operating on a sparse array will be significantly slower than a dense array until the data set gets to a certain sparsity level. For this data set it was approximately 75% zeros, but for other data sets it may differ. # These examples have so far focused on the time it takes to select using feature based functions. However, facility location functions can take sparse input, as long as it is the pre-computed similarity matrix that is sparse, not the feature matrix. X = numpy.random.uniform(0, 1, size=(6000, 6000)) X = (X + X.T) / 2. X[X < 0.9] = 0.0 X_sparse = csr_matrix(X) FacilityLocationSelection(500, 'precomputed', 500, verbose=True).fit(X) FacilityLocationSelection(500, 'precomputed', 500, verbose=True).fit(X_sparse) # It looks selection works significantly faster on a sparse array than on a dense one. We can do a similar type of analysis as before to analyze the components. sizes = range(500, 8001, 500) times, sparse_times = [], [] for d in sizes: X = numpy.random.uniform(0, 1, size=(d, d)).astype('float64') X = (X + X.T) / 2 X[X <= 0.9] = 0 tic = time.time() FacilityLocationSelection(500, 'precomputed', 500, verbose=True).fit(X) times.append(time.time() - tic) X = csr_matrix(X) tic = time.time() FacilityLocationSelection(500, 'precomputed', 500, verbose=True).fit(X) sparse_times.append(time.time() - tic) # + ratio = numpy.array(times) / numpy.array(sparse_times) plt.figure(figsize=(12, 4)) plt.subplot(121) plt.title("Sparse and Dense Timings", fontsize=14) plt.plot(times, label="Dense Time") plt.plot(sparse_times, label="Sparse Time") plt.legend(fontsize=12) plt.xticks(range(len(sizes)), sizes, rotation=45) plt.xlabel("Number of Examples", fontsize=12) plt.ylabel("Time (s)", fontsize=12) plt.subplot(122) plt.title("Speed Improvement of Sparse Array", fontsize=14) plt.plot(ratio, label="Dense Time") plt.legend(fontsize=12) plt.xticks(range(len(sizes)), sizes, rotation=45) plt.xlabel("Number of Examples", fontsize=12) plt.ylabel("Dense Time / Sparse Time", fontsize=12) plt.tight_layout() plt.show() # - ps = 0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.98, 0.99, 0.995, 0.999 times, sparse_times = [], [] for p in ps: X = numpy.random.uniform(0, 1, size=(2000, 2000)).astype('float64') X = (X + X.T) / 2 X[X <= p] = 0 tic = time.time() FacilityLocationSelection(500, 'precomputed', 500, verbose=True).fit(X) times.append(time.time() - tic) X = csr_matrix(X) tic = time.time() FacilityLocationSelection(500, 'precomputed', 500, verbose=True).fit(X) sparse_times.append(time.time() - tic) # + ratio = numpy.array(times) / numpy.array(sparse_times) plt.figure(figsize=(12, 4)) plt.subplot(121) plt.title("Sparse and Dense Timings", fontsize=14) plt.plot(times, label="Dense Time") plt.plot(sparse_times, label="Sparse Time") plt.legend(fontsize=12) plt.xticks(range(len(ps)), ps, rotation=45) plt.xlabel("% Sparsity", fontsize=12) plt.ylabel("Time (s)", fontsize=12) plt.subplot(122) plt.title("Speed Improvement of Sparse Array", fontsize=14) plt.plot(ratio, label="Dense Time") plt.legend(fontsize=12) plt.xticks(range(len(ps)), ps, rotation=45) plt.xlabel("% Sparsity", fontsize=12) plt.ylabel("Dense Time / Sparse Time", fontsize=12) plt.tight_layout() plt.show() # - # Similarly to feature based selection, using a sparse array is only faster than a dense array when the array gets to a certain level of sparsity, but can then be significantly faster.
tutorials/3. Using Sparse Inputs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Flavors of Gradient Descent # Gradient descent (GD) is a tremendously popular optimization algorithm in the machine learning world. While it's been popular for a long time, there have been a number of variations on plain-vanilla SGD that have improved training speed and stability. The four we'll cover in this post are: # * Basic gradient descent # * Momentum # * RMSProp # * Adam # # First, a brief review. GD's purpose is simple: given a function, change the parameters of that function to find its minimum. One of the major "families" of (supervised) machine learning models is the set of *parametric* models, or those which work by tuning a set of _parameters_ (hence the name!) to optimize some objective criterion. In each case, we have a function that takes a series of samples, and for each, tries to predict a target; that is: # # $$ # \operatorname*{argmin}_fL(y,f(x)) # $$ # Where $f$ is a parametric supervised learning model, $y$ is a vector of ground-truth values, $x$ is a feature matrix $\in R^{mn}$ and $f$ is a function $R^{m*n} \Rightarrow R^{1*n}$; for simplicity, let's assume it's a basic linear regression, such as $f(x) = \beta_{0} + \beta_{1}x$. In this case, we want to find the parameters $\beta_0$ and $\beta_1$ that minimize the average _loss_ (function $l$ above) given a set of examples $y$ and a set of inputs $x$. For example's sake, let's assume that loss to be _Mean Squared Error (MSE)_: $\frac{1}{n}\sum^n_1(y_i - f(x_i))^2$. Gradient descent helps us find the parameters $\beta_0$ and $\beta_1$ that make the MSE as small as possible. # How is this done? The broad idea is straightforward: figure out how the loss moves in relation to each parmeter, and use that information to determine whether to increase or decrease the parameter. Repeat the process many times, until the loss won't go any lower. Each "flavor" of GD outlined above does this in some form, but the nuances of the approach differ. # Let's start by generating a bit of data on which we can run our GS algorithm to examine training performance. # %matplotlib inline import numpy as np from numpy.random import normal from matplotlib import pyplot as plt import seaborn as sns sns.set() import torch # # Generate Some Data # First, let's pick some constants to be our "true" $\beta_0$ and $\beta_1$. Remember: our goal is to take some data generated by a linear model with these parameters, and use it to back into what the parameters were algorithmically. We'll set $\beta_1 = 30$ and $\beta_0 = 15$. b1, b0 = 30, 15 # Next, we want to generated some input and output data. This will be our synthetic training data, which we'll use to back into the parameters we set above. Our data will be centered around the line $y = 30x + 15$, with a small amount of normally distributed noise added, with a mean of 0 and standard deviation of 1. x_train = np.linspace(0, 100, 100) y_train = b1 * x_train + b0 + normal(size=len(x_train)) # Here, we've used scipy's `normal` function to generate random error terms with mean 0 and standard deviation 1. fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(x_train, y_train) # The data clings pretty tightly to our ground-truth function, which is fine for now -- real-world data is almost never so clean, but it'll work for the purposes of this post. # # Basic SGD # The first variant we're going to work through is basic SGD. The idea in basic SGD is simple: we find the gradient of the error with respect to the inputs, and then we subtract that gradient (multipled by a small number) to improve the error a bit. More formally: # # $$ a_{t+1} = a_t - \alpha\frac{\delta{L}}{\delta{a}}$$ # where $\alpha$ is the learning rate, and $\frac{\delta{L}}{\delta{a}}$ is the gradient of the loss with respect to the parameter $a$. Basic SGD does exactly this, more-or-less, with no modifications. One thing worth noting is that the process changes based on _how many_ examples you use to determine the gradient. Broadly, there are three ways to do this: # * Batch gradient descent, which computes the gradient over every example in the training set, and then makes one parameter update per epoch (i.e. for each complete pass through the data) # * Mini-batch gradient descent, which breaks the training set into chunks and computes the gradient (and performs parameter updates) for each chunk # * Online gradient descent, which computes the gradient and updates the parameters for each individual training example # # There are advantages and disadvantages to each of these, primarily around training speed and stability. To scope this post down to a reasonable level, I'll save that discussion for a different post. For the purposes of this post, we're going to be using batch gradient descent for each variant of SGD. # + # set the epoch count to 50000 epochs = 50000 # initialize our parameters b0_hat, b1_hat = 0, 0 # set the learning rate lr = 1e-4 # we'll use this list to collect our losses for reporting basic_losses = [] # This is our main optimization loop. For each epoch, we're going to # compute the average gradient over the entire dataset, then use that # average gradient to update the parameters. for i in range(epochs): # vectorized prediction for the basic linear model predictions = np.dot(b1_hat, x_train) + b0_hat # compute the loss for each example loss = (predictions - y_train)**2 # log the MSE for the epoch basic_losses.append(loss.mean()) # compute the gradients for each parameter grad_b0 = (2 * (predictions - y_train)).mean() grad_b1 = (2 * (predictions - y_train) * x_train).mean() # update the parameters b0_hat = b0_hat - grad_b0 * lr b1_hat = b1_hat - grad_b1 * lr # every 10k epochs, print out the loss if (i % 10000 == 0): print("Epoch %s MSE: %s" % (i, basic_losses[-1])) print("b0_hat is %s" % b0_hat) print("b1_hat is %s" % b1_hat) print("Best loss: ", min(basic_losses)) # - # By the end of our training loop, you can see that the learned parameters, $\hat{\beta}_{0} = 13.12$ and $\hat{\beta}_{1} = 30.02$ are pretty close to our true parameters $\beta_{0} = 15$ and $\beta_{1} = 30$, so our algorithm is working as we expect. # ## SGD Basic - PyTorch Version # For each variant, I also wanted to take the time to demonstrate what it would look like in PyTorch, one of the more popular deep learning platforms. We start by creating a PyTorch tensor. `torch.tensor` is a constructor for `torch.Tensor` objects, which work a lot like numpy tensors with extra functionality that allow them to be stored on a GPU. We also add a feature of ones to the dataset to account for $\beta_{0}$, which can then be optimized with the same gradient calculation as $\beta_1$ in our training loop. _x_train_torch = np.concatenate((x_train[:,np.newaxis], np.ones((len(x_train)))[:,np.newaxis]), axis=1) x_train_torch = torch.tensor(_x_train_torch).double() x_train_torch.shape, w.shape x_train_torch[:5] def mse(y_true, y_pred): return ((y_true - y_pred)**2).mean() y_train_torch = torch.tensor(y_train) w = torch.nn.Parameter(torch.tensor([0.,0.]).double()) lr = 1e-4 basic_losses_torch = [] for i in range(50000): pred = x_train_torch@w loss = mse(y_train_torch, pred) basic_losses_torch.append(loss.item()) if i % 10000 == 0: print(np.sqrt(loss.detach().numpy())) loss.backward() with torch.no_grad(): w.sub_(lr * w.grad) w.grad.zero_() print(np.sqrt(min(basic_losses_torch))) # # Momentum epochs = 50000 a_hat, b_hat = 0., 0. lr = 1e-3 mom = 0.9 mom_losses = [] grad_a = 0.0 grad_b = 0.0 for i in range(epochs): prediction = np.dot(a_hat, x_train) + b_hat loss = np.mean((prediction - y_train)**2) mom_losses.append(loss) grad_a = np.mean(2 * (prediction - y_train) * x_train) * (1 - mom) + grad_a * mom grad_b = np.mean(2 * (prediction - y_train)) * (1 - mom) + grad_b * mom a_hat = a_hat - grad_a * lr b_hat = b_hat - grad_b * lr if (i % 10000 == 0): print("The most recent loss after epoch %s is %s" % (i, np.sqrt(loss))) print("a_hat is %s" % a_hat) print("b_hat is %s" % b_hat) print("Best loss: ", min(mom_losses)) def loss_list(losses): return list(map(lambda x: np.mean(x), losses)) fig = plt.figure() ax = fig.add_subplot(111) # momentum_losses = losses # ax.set_xlim(0,3000) ax.set_ylim(0,100) plt.plot(loss_list(mom_losses), color="blue") plt.plot(loss_list(basic_losses), color="red") # plot_losses(losses[1:], 1000) # We see that the model can handle a learning rate 10x larger, allowing us to learn much more quickly. The SGD-based version could not handle the larger learning rate, and immediately started to diverge. # ## Momentum - PyTorch Version w = torch.nn.Parameter(torch.tensor([0.,0.]).double()) lr = 1e-3 mom = 0.9 grad = torch.tensor([0.,0.]).double() mom_torch_losses = [] for i in range(50000): pred = x_train_torch@w loss = mse(y_train_torch, pred).double() mom_torch_losses.append(loss) if i % 10000 == 0: print(loss) loss.backward() with torch.no_grad(): grad_mom = mom * grad + (1 - mom) * w.grad w.sub_(lr * grad_mom) grad = grad_mom w.grad.zero_() # # RMSProp epochs = 50000 a_hat, b_hat = 0, 0 lr = 1e-2 beta = 0.9 rms_losses = [] r_a = 0 r_b = 0 grad_a = 0 grad_b = 0 eta = 1e-10 for i in range(1, epochs + 1): predictions = a_hat * x_train + b_hat loss = ((predictions - y_train)**2).mean() rms_losses.append(loss) grad_a = (2 * (predictions - y_train) * x_train).mean() grad_b = (2 * (predictions - y_train)).mean() r_a = (beta*r_a + (1 - beta)*grad_a**2) / (1 - beta**i) r_b = (beta*r_b + (1 - beta)*grad_b**2) / (1 - beta**i) v_a = grad_a*np.divide(lr, np.sqrt(r_a) + eta) v_b = grad_b*np.divide(lr, np.sqrt(r_b) + eta) a_hat = a_hat - v_a b_hat = b_hat - v_b if (i % 5000 == 0): print("The most recent after epoch %s is %s" % (i, rms_losses[-1])) print("a_hat is %s" % a_hat) print("b_hat is %s" % b_hat) print("Best loss: ", min(rms_losses)) fig = plt.figure() ax = fig.add_subplot(111) # momentum_losses = losses ax.set_xlim(0,50000) ax.set_ylim(0,10) plt.plot(loss_list(mom_losses), color="blue") plt.plot(loss_list(basic_losses), color="red") plt.plot(loss_list(rms_losses), color="green") # # RMSProp - PyTorch Version w = torch.nn.Parameter(torch.tensor([0.,0.]).double()) epochs = 50000 lr = 1e-2 beta = 0.9 exp_moving_avg = torch.tensor([0.,0.]).double() eta = 1e-10 for i in range(1, epochs + 1): pred = x_train_torch@w loss = mse(y_train_torch, pred).double() if i % 5000 == 0: print(loss) loss.backward() with torch.no_grad(): exp_moving_avg = (beta * exp_moving_avg + (1 - beta)*w.grad**2) / (1 - beta**i) w.sub_(w.grad * lr / (np.sqrt(exp_moving_avg) + eta)) w.grad.zero_() # # Adam # + epochs = 50000 a_hat, b_hat = 0, 0 lr = 1e-2 beta1 = 0.9 beta2 = 0.9 adam_losses = [] r_a = 0 r_b = 0 grad_a = 0 grad_b = 0 mom_a = 0 mom_b = 0 eta = 10e-1 for i in range(1, epochs + 1): predictions = a_hat * x_train + b_hat loss = ((predictions - y_train)**2).mean() adam_losses.append(loss) grad_b = (2 * (predictions - y_train)).mean() grad_a = (2 * (predictions - y_train) * x_train).mean() r_a = (beta2*r_a + (1 - beta2)*grad_a**2) / (1 - beta2**i) r_b = (beta2*r_b + (1 - beta2)*grad_b**2) / (1 - beta2**i) mom_a = (grad_a * (1 - beta1) + mom_a * beta1) / (1 - beta1**i) mom_b = (grad_b * (1 - beta1) + mom_b * beta1) / (1 - beta1**i) v_a = mom_a * lr / (np.sqrt(r_a) + eta) v_b = mom_b * lr / (np.sqrt(r_b) + eta) a_hat = a_hat - v_a b_hat = b_hat - v_b if (i % 10000 == 0): print("The most recent after epoch %s is %s" % (i, adam_losses[-1])) print("a_hat is %s" % a_hat) print("b_hat is %s" % b_hat) print("Best loss: ", min(adam_losses)) # - fig = plt.figure() ax = fig.add_subplot(111) # momentum_losses = losses ax.set_xlim(0,50000) ax.set_ylim(0,10) plt.plot(loss_list(mom_losses), color="blue") plt.plot(loss_list(basic_losses), color="red") plt.plot(loss_list(rms_losses), color="green") plt.plot(loss_list(adam_losses), color="orange") # # Adam - PyTorch Version w = torch.nn.Parameter(torch.tensor([0.,0.]).double()) lr = 1e-2 beta1 = 0.9 beta2 = 0.9 grad = torch.tensor([0.,0.]).double() mom_torch_losses = [] eta = 10e-1 moving_avg = 0 exp_moving_avg = 0 for i in range(50000): pred = x_train_torch@w loss = mse(y_train_torch, pred).double() mom_torch_losses.append(loss) if i % 10000 == 0: print(loss) loss.backward() with torch.no_grad(): moving_avg = (beta1 * moving_avg + (1 - beta1) * w.grad) / (1 - beta1**(i+1)) exp_moving_avg = (beta2 * exp_moving_avg + (1 - beta2) * w.grad**2) / (1 - beta2**(i+1)) w.sub_(moving_avg * lr / (np.sqrt(exp_moving_avg) + eta)) w.grad.zero_()
nbs/dl1/FlavorsOfGD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # utilizado quando da criaรงรฃo do notebook, para recarregar os arquivos externos automaticamente # %load_ext autoreload # %autoreload 2 # - # # Importando mรณdulos e arquivo de dados import input_data as inpdt # arquivo de funรงรตes criadas para tratar os dados import plots # arquivo de funรงรตes criadas para pltar os dados import matplotlib.pyplot as plt import pandas as pd file_path = "dados_brutos/notas_2019_nivel1_limpo.csv" df = inpdt.input_data(file_path) df.head() # # Tratando os dados de entrada # ## Removendo alunos faltosos df_presentes = inpdt.no_absents(df) df_presentes.head() df_presentes.describe() # ## Calculando as notas, organizando e describe df_pontos = inpdt.grades(df_presentes) df_pontos.head() # # **Aplicando o critรฉrio de desempate** # df_prem_nivel1 = inpdt.awards(df_pontos,35) df_prem_nivel1.head() # **Exportando os dados do nรญvel 1 para posterior comparaรงรฃo com os demais nรญveis** df_prem_nivel1.to_csv('nivel1python.csv') # # **Vendo a nota de corte para entrar nas menรงรตes honrosas** # df_prem_nivel1['Pontuaรงรฃo final'].head(10).min() # # **Verificar quantos alunos nรฃo zeraram a prova discursiva** # # + # alunos que nรฃo zeraram a prova discursiva. Comparar com ano passado. df_prem_nivel1['ALUNO'][df_prem_nivel1['Pontos - Discursiva'] != 0].count() # - # **Describe** df_prem_nivel1.describe() # # Grรกficos # ## Histograma e boxplot bins_nivel1 = inpdt.bins(df_prem_nivel1) bins_nivel1 inpdt.latex(bins_nivel1) # + fig1, (ax2, ax1) = plt.subplots(figsize=(12, 8), nrows=2, sharex=True, gridspec_kw={ "height_ratios": (.15, .85), 'hspace': 0.02 }) fig1.subplots_adjust(top=0.90) fig1.suptitle('Anรกlise das notas - Nรญvel 1', fontsize=20) plots.boxplot(df_prem_nivel1, 'Pontuaรงรฃo final', ax=ax2) plots.histogram(df_prem_nivel1, 'Pontuaรงรฃo final', ax=ax1) figname = 'images/hist_nivel1' fig1.savefig(figname, bbox_inches='tight', dpi=300) # - # ## Alunos por turma turmas_nivel1 = inpdt.pivot_tables(df_prem_nivel1, 'ALUNO', 'TURMA', None) turmas_nivel1 turmas_nivel1.T inpdt.latex(turmas_nivel1.T) # + fig2, axarr = plt.subplots(nrows=1, ncols=1, figsize=(12, 6), facecolor=(1.0, 1.0, 1.0)) values = inpdt.pivot_data(turmas_nivel1, 0) labels = inpdt.pivot_index(turmas_nivel1) plots.plot_pizza(values, labels, 'Alunos participantes por turma - Nรญvel 1', ax=axarr) figname = 'images/turmas_nivel1' fig2.savefig(figname, bbox_inches='tight', dpi=300) # - # ## Organizando a tabela de resumo estatรญstico df_prem_nivel1.describe() stats_nivel1 = inpdt.stats_table(df_prem_nivel1.describe()) stats_nivel1 inpdt.latex(stats_nivel1) # ## Anรกlise de semestre e turno dos premiados df_10_nivel1 = df_prem_nivel1.head(10) df_10_nivel1 df_10_nivel1 = inpdt.semester_shift(df_10_nivel1) df_10_nivel1 per_10_nivel1 = inpdt.pivot_tables(df_10_nivel1, 'ALUNO', 'Perรญodo', None) per_10_nivel1 inpdt.latex(per_10_nivel1) # + fig3, axarr = plt.subplots(nrows=1, ncols=1, figsize=(12, 6), facecolor=(1.0, 1.0, 1.0)) values = inpdt.pivot_data(per_10_nivel1, 0) labels = inpdt.pivot_index(per_10_nivel1) plots.plot_pizza(values, labels, 'Perรญodo dos 10 primeiros colocados - Nรญvel 1', ax=axarr) figname = 'images/per_10_nivel1' fig3.savefig(figname, bbox_inches='tight', dpi=300) # - curso_10_nivel1 = inpdt.pivot_tables(df_10_nivel1, 'ALUNO', 'cod', None) curso_10_nivel1 inpdt.latex(curso_10_nivel1) # + fig4, axarr = plt.subplots(nrows=1, ncols=1, figsize=(12,6), facecolor=(1.0, 1.0, 1.0)) values = inpdt.pivot_data(curso_10_nivel1, 0) labels = inpdt.pivot_index(curso_10_nivel1) plots.plot_pizza(values, labels, 'Curso dos 10 primeiros colocados - Nรญvel 1', ax=axarr) figname = 'images/curso_10_nivel1' fig4.savefig(figname, bbox_inches='tight', dpi=300) # -
resultados/nivel01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # *Python Machine Learning 2nd Edition* by [<NAME>](https://sebastianraschka.com), Packt Publishing Ltd. 2017 # # Code Repository: https://github.com/rasbt/python-machine-learning-book-2nd-edition # # Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-2nd-edition/blob/master/LICENSE.txt) # # Python Machine Learning - Code Examples # # Chapter 5 - Compressing Data via Dimensionality Reduction # Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s). # %load_ext watermark # %watermark -a "<NAME>" -u -d -p numpy,scipy,matplotlib,sklearn # *The use of `watermark` is optional. You can install this IPython extension via "`pip install watermark`". For more information, please see: https://github.com/rasbt/watermark.* # <br> # <br> # ### Overview # - [Unsupervised dimensionality reduction via principal component analysis 128](#Unsupervised-dimensionality-reduction-via-principal-component-analysis-128) # - [The main steps behind principal component analysis](#The-main-steps-behind-principal-component-analysis) # - [Extracting the principal components step-by-step](#Extracting-the-principal-components-step-by-step) # - [Total and explained variance](#Total-and-explained-variance) # - [Feature transformation](#Feature-transformation) # - [Principal component analysis in scikit-learn](#Principal-component-analysis-in-scikit-learn) # - [Supervised data compression via linear discriminant analysis](#Supervised-data-compression-via-linear-discriminant-analysis) # - [Principal component analysis versus linear discriminant analysis](#Principal-component-analysis-versus-linear-discriminant-analysis) # - [The inner workings of linear discriminant analysis](#The-inner-workings-of-linear-discriminant-analysis) # - [Computing the scatter matrices](#Computing-the-scatter-matrices) # - [Selecting linear discriminants for the new feature subspace](#Selecting-linear-discriminants-for-the-new-feature-subspace) # - [Projecting samples onto the new feature space](#Projecting-samples-onto-the-new-feature-space) # - [LDA via scikit-learn](#LDA-via-scikit-learn) # - [Using kernel principal component analysis for nonlinear mappings](#Using-kernel-principal-component-analysis-for-nonlinear-mappings) # - [Kernel functions and the kernel trick](#Kernel-functions-and-the-kernel-trick) # - [Implementing a kernel principal component analysis in Python](#Implementing-a-kernel-principal-component-analysis-in-Python) # - [Example 1 โ€“ separating half-moon shapes](#Example-1:-Separating-half-moon-shapes) # - [Example 2 โ€“ separating concentric circles](#Example-2:-Separating-concentric-circles) # - [Projecting new data points](#Projecting-new-data-points) # - [Kernel principal component analysis in scikit-learn](#Kernel-principal-component-analysis-in-scikit-learn) # - [Summary](#Summary) # <br> # <br> from IPython.display import Image # %matplotlib inline # # Unsupervised dimensionality reduction via principal component analysis # ## The main steps behind principal component analysis Image(filename='images/05_01.png', width=400) # ## Extracting the principal components step-by-step # + import pandas as pd df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/' 'machine-learning-databases/wine/wine.data', header=None) # if the Wine dataset is temporarily unavailable from the # UCI machine learning repository, un-comment the following line # of code to load the dataset from a local path: # df_wine = pd.read_csv('wine.data', header=None) df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline'] df_wine.head() # - # <hr> # Splitting the data into 70% training and 30% test subsets. # + from sklearn.model_selection import train_test_split X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.3, stratify=y, random_state=0) # - # Standardizing the data. # + from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train_std = sc.fit_transform(X_train) X_test_std = sc.transform(X_test) # - # --- # # **Note** # # Accidentally, I wrote `X_test_std = sc.fit_transform(X_test)` instead of `X_test_std = sc.transform(X_test)`. In this case, it wouldn't make a big difference since the mean and standard deviation of the test set should be (quite) similar to the training set. However, as remember from Chapter 3, the correct way is to re-use parameters from the training set if we are doing any kind of transformation -- the test set should basically stand for "new, unseen" data. # # My initial typo reflects a common mistake is that some people are *not* re-using these parameters from the model training/building and standardize the new data "from scratch." Here's simple example to explain why this is a problem. # # Let's assume we have a simple training set consisting of 3 samples with 1 feature (let's call this feature "length"): # # - train_1: 10 cm -> class_2 # - train_2: 20 cm -> class_2 # - train_3: 30 cm -> class_1 # # mean: 20, std.: 8.2 # # After standardization, the transformed feature values are # # - train_std_1: -1.21 -> class_2 # - train_std_2: 0 -> class_2 # - train_std_3: 1.21 -> class_1 # # Next, let's assume our model has learned to classify samples with a standardized length value < 0.6 as class_2 (class_1 otherwise). So far so good. Now, let's say we have 3 unlabeled data points that we want to classify: # # - new_4: 5 cm -> class ? # - new_5: 6 cm -> class ? # - new_6: 7 cm -> class ? # # If we look at the "unstandardized "length" values in our training datast, it is intuitive to say that all of these samples are likely belonging to class_2. However, if we standardize these by re-computing standard deviation and and mean you would get similar values as before in the training set and your classifier would (probably incorrectly) classify samples 4 and 5 as class 2. # # - new_std_4: -1.21 -> class 2 # - new_std_5: 0 -> class 2 # - new_std_6: 1.21 -> class 1 # # However, if we use the parameters from your "training set standardization," we'd get the values: # # - sample5: -18.37 -> class 2 # - sample6: -17.15 -> class 2 # - sample7: -15.92 -> class 2 # # The values 5 cm, 6 cm, and 7 cm are much lower than anything we have seen in the training set previously. Thus, it only makes sense that the standardized features of the "new samples" are much lower than every standardized feature in the training set. # # --- # Eigendecomposition of the covariance matrix. # + import numpy as np cov_mat = np.cov(X_train_std.T) eigen_vals, eigen_vecs = np.linalg.eig(cov_mat) print('\nEigenvalues \n%s' % eigen_vals) # - # **Note**: # # Above, I used the [`numpy.linalg.eig`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html) function to decompose the symmetric covariance matrix into its eigenvalues and eigenvectors. # <pre>>>> eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)</pre> # This is not really a "mistake," but probably suboptimal. It would be better to use [`numpy.linalg.eigh`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html) in such cases, which has been designed for [Hermetian matrices](https://en.wikipedia.org/wiki/Hermitian_matrix). The latter always returns real eigenvalues; whereas the numerically less stable `np.linalg.eig` can decompose nonsymmetric square matrices, you may find that it returns complex eigenvalues in certain cases. (S.R.) # # <br> # <br> # ## Total and explained variance tot = sum(eigen_vals) var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)] cum_var_exp = np.cumsum(var_exp) # + import matplotlib.pyplot as plt plt.bar(range(1, 14), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(1, 14), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal component index') plt.legend(loc='best') plt.tight_layout() # plt.savefig('images/05_02.png', dpi=300) plt.show() # - # <br> # <br> # ## Feature transformation # + # Make a list of (eigenvalue, eigenvector) tuples eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i]) for i in range(len(eigen_vals))] # Sort the (eigenvalue, eigenvector) tuples from high to low eigen_pairs.sort(key=lambda k: k[0], reverse=True) # - w = np.hstack((eigen_pairs[0][1][:, np.newaxis], eigen_pairs[1][1][:, np.newaxis])) print('Matrix W:\n', w) # **Note** # Depending on which version of NumPy and LAPACK you are using, you may obtain the Matrix W with its signs flipped. Please note that this is not an issue: If $v$ is an eigenvector of a matrix $\Sigma$, we have # # $$\Sigma v = \lambda v,$$ # # where $\lambda$ is our eigenvalue, # # # then $-v$ is also an eigenvector that has the same eigenvalue, since # $$\Sigma \cdot (-v) = -\Sigma v = -\lambda v = \lambda \cdot (-v).$$ X_train_std[0].dot(w) # + X_train_pca = X_train_std.dot(w) colors = ['r', 'b', 'g'] markers = ['s', 'x', 'o'] for l, c, m in zip(np.unique(y_train), colors, markers): plt.scatter(X_train_pca[y_train == l, 0], X_train_pca[y_train == l, 1], c=c, label=l, marker=m) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.legend(loc='lower left') plt.tight_layout() # plt.savefig('images/05_03.png', dpi=300) plt.show() # - # <br> # <br> # ## Principal component analysis in scikit-learn # **NOTE** # # The following four code cells has been added in addition to the content to the book, to illustrate how to replicate the results from our own PCA implementation in scikit-learn: # + from sklearn.decomposition import PCA pca = PCA() X_train_pca = pca.fit_transform(X_train_std) pca.explained_variance_ratio_ # + plt.bar(range(1, 14), pca.explained_variance_ratio_, alpha=0.5, align='center') plt.step(range(1, 14), np.cumsum(pca.explained_variance_ratio_), where='mid') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.show() # - pca = PCA(n_components=2) X_train_pca = pca.fit_transform(X_train_std) X_test_pca = pca.transform(X_test_std) plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1]) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.show() # + from matplotlib.colors import ListedColormap def plot_decision_regions(X, y, classifier, resolution=0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) # plot class samples for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.6, c=cmap(idx), edgecolor='black', marker=markers[idx], label=cl) # - # Training logistic regression classifier using the first 2 principal components. # + from sklearn.linear_model import LogisticRegression pca = PCA(n_components=2) X_train_pca = pca.fit_transform(X_train_std) X_test_pca = pca.transform(X_test_std) lr = LogisticRegression() lr = lr.fit(X_train_pca, y_train) # - plot_decision_regions(X_train_pca, y_train, classifier=lr) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.legend(loc='lower left') plt.tight_layout() # plt.savefig('images/05_04.png', dpi=300) plt.show() plot_decision_regions(X_test_pca, y_test, classifier=lr) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.legend(loc='lower left') plt.tight_layout() # plt.savefig('images/05_05.png', dpi=300) plt.show() pca = PCA(n_components=None) X_train_pca = pca.fit_transform(X_train_std) pca.explained_variance_ratio_ # <br> # <br> # # Supervised data compression via linear discriminant analysis # ## Principal component analysis versus linear discriminant analysis Image(filename='images/05_06.png', width=400) # ## The inner workings of linear discriminant analysis # <br> # <br> # ## Computing the scatter matrices # Calculate the mean vectors for each class: # + np.set_printoptions(precision=4) mean_vecs = [] for label in range(1, 4): mean_vecs.append(np.mean(X_train_std[y_train == label], axis=0)) print('MV %s: %s\n' % (label, mean_vecs[label - 1])) # - # Compute the within-class scatter matrix: # + d = 13 # number of features S_W = np.zeros((d, d)) for label, mv in zip(range(1, 4), mean_vecs): class_scatter = np.zeros((d, d)) # scatter matrix for each class for row in X_train_std[y_train == label]: row, mv = row.reshape(d, 1), mv.reshape(d, 1) # make column vectors class_scatter += (row - mv).dot((row - mv).T) S_W += class_scatter # sum class scatter matrices print('Within-class scatter matrix: %sx%s' % (S_W.shape[0], S_W.shape[1])) # - # Better: covariance matrix since classes are not equally distributed: print('Class label distribution: %s' % np.bincount(y_train)[1:]) d = 13 # number of features S_W = np.zeros((d, d)) for label, mv in zip(range(1, 4), mean_vecs): class_scatter = np.cov(X_train_std[y_train == label].T) S_W += class_scatter print('Scaled within-class scatter matrix: %sx%s' % (S_W.shape[0], S_W.shape[1])) # Compute the between-class scatter matrix: # + mean_overall = np.mean(X_train_std, axis=0) d = 13 # number of features S_B = np.zeros((d, d)) for i, mean_vec in enumerate(mean_vecs): n = X_train[y_train == i + 1, :].shape[0] mean_vec = mean_vec.reshape(d, 1) # make column vector mean_overall = mean_overall.reshape(d, 1) # make column vector S_B += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T) print('Between-class scatter matrix: %sx%s' % (S_B.shape[0], S_B.shape[1])) # - # <br> # <br> # ## Selecting linear discriminants for the new feature subspace # Solve the generalized eigenvalue problem for the matrix $S_W^{-1}S_B$: eigen_vals, eigen_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B)) # **Note**: # # Above, I used the [`numpy.linalg.eig`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html) function to decompose the symmetric covariance matrix into its eigenvalues and eigenvectors. # <pre>>>> eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)</pre> # This is not really a "mistake," but probably suboptimal. It would be better to use [`numpy.linalg.eigh`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html) in such cases, which has been designed for [Hermetian matrices](https://en.wikipedia.org/wiki/Hermitian_matrix). The latter always returns real eigenvalues; whereas the numerically less stable `np.linalg.eig` can decompose nonsymmetric square matrices, you may find that it returns complex eigenvalues in certain cases. (S.R.) # # Sort eigenvectors in descending order of the eigenvalues: # + # Make a list of (eigenvalue, eigenvector) tuples eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i]) for i in range(len(eigen_vals))] # Sort the (eigenvalue, eigenvector) tuples from high to low eigen_pairs = sorted(eigen_pairs, key=lambda k: k[0], reverse=True) # Visually confirm that the list is correctly sorted by decreasing eigenvalues print('Eigenvalues in descending order:\n') for eigen_val in eigen_pairs: print(eigen_val[0]) # + tot = sum(eigen_vals.real) discr = [(i / tot) for i in sorted(eigen_vals.real, reverse=True)] cum_discr = np.cumsum(discr) plt.bar(range(1, 14), discr, alpha=0.5, align='center', label='individual "discriminability"') plt.step(range(1, 14), cum_discr, where='mid', label='cumulative "discriminability"') plt.ylabel('"discriminability" ratio') plt.xlabel('Linear Discriminants') plt.ylim([-0.1, 1.1]) plt.legend(loc='best') plt.tight_layout() # plt.savefig('images/05_07.png', dpi=300) plt.show() # - w = np.hstack((eigen_pairs[0][1][:, np.newaxis].real, eigen_pairs[1][1][:, np.newaxis].real)) print('Matrix W:\n', w) # <br> # <br> # ## Projecting samples onto the new feature space # + X_train_lda = X_train_std.dot(w) colors = ['r', 'b', 'g'] markers = ['s', 'x', 'o'] for l, c, m in zip(np.unique(y_train), colors, markers): plt.scatter(X_train_lda[y_train == l, 0], X_train_lda[y_train == l, 1] * (-1), c=c, label=l, marker=m) plt.xlabel('LD 1') plt.ylabel('LD 2') plt.legend(loc='lower right') plt.tight_layout() # plt.savefig('images/05_08.png', dpi=300) plt.show() # - # <br> # <br> # ## LDA via scikit-learn # + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA lda = LDA(n_components=2) X_train_lda = lda.fit_transform(X_train_std, y_train) # + from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr = lr.fit(X_train_lda, y_train) plot_decision_regions(X_train_lda, y_train, classifier=lr) plt.xlabel('LD 1') plt.ylabel('LD 2') plt.legend(loc='lower left') plt.tight_layout() # plt.savefig('images/05_09.png', dpi=300) plt.show() # + X_test_lda = lda.transform(X_test_std) plot_decision_regions(X_test_lda, y_test, classifier=lr) plt.xlabel('LD 1') plt.ylabel('LD 2') plt.legend(loc='lower left') plt.tight_layout() # plt.savefig('images/05_10.png', dpi=300) plt.show() # - # <br> # <br> # # Using kernel principal component analysis for nonlinear mappings Image(filename='images/05_11.png', width=500) # <br> # <br> # ## Implementing a kernel principal component analysis in Python # + from scipy.spatial.distance import pdist, squareform from scipy import exp from scipy.linalg import eigh import numpy as np def rbf_kernel_pca(X, gamma, n_components): """ RBF kernel PCA implementation. Parameters ------------ X: {NumPy ndarray}, shape = [n_samples, n_features] gamma: float Tuning parameter of the RBF kernel n_components: int Number of principal components to return Returns ------------ X_pc: {NumPy ndarray}, shape = [n_samples, k_features] Projected dataset """ # Calculate pairwise squared Euclidean distances # in the MxN dimensional dataset. sq_dists = pdist(X, 'sqeuclidean') # Convert pairwise distances into a square matrix. mat_sq_dists = squareform(sq_dists) # Compute the symmetric kernel matrix. K = exp(-gamma * mat_sq_dists) # Center the kernel matrix. N = K.shape[0] one_n = np.ones((N, N)) / N K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n) # Obtaining eigenpairs from the centered kernel matrix # scipy.linalg.eigh returns them in ascending order eigvals, eigvecs = eigh(K) eigvals, eigvecs = eigvals[::-1], eigvecs[:, ::-1] # Collect the top k eigenvectors (projected samples) X_pc = np.column_stack((eigvecs[:, i] for i in range(n_components))) return X_pc # - # <br> # ### Example 1: Separating half-moon shapes # + import matplotlib.pyplot as plt from sklearn.datasets import make_moons X, y = make_moons(n_samples=100, random_state=123) plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5) plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5) plt.tight_layout() # plt.savefig('images/05_12.png', dpi=300) plt.show() # + from sklearn.decomposition import PCA scikit_pca = PCA(n_components=2) X_spca = scikit_pca.fit_transform(X) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3)) ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1], color='red', marker='^', alpha=0.5) ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1], color='blue', marker='o', alpha=0.5) ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02, color='red', marker='^', alpha=0.5) ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02, color='blue', marker='o', alpha=0.5) ax[0].set_xlabel('PC1') ax[0].set_ylabel('PC2') ax[1].set_ylim([-1, 1]) ax[1].set_yticks([]) ax[1].set_xlabel('PC1') plt.tight_layout() # plt.savefig('images/05_13.png', dpi=300) plt.show() # + X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2) fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(7,3)) ax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='^', alpha=0.5) ax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='o', alpha=0.5) ax[1].scatter(X_kpca[y==0, 0], np.zeros((50,1))+0.02, color='red', marker='^', alpha=0.5) ax[1].scatter(X_kpca[y==1, 0], np.zeros((50,1))-0.02, color='blue', marker='o', alpha=0.5) ax[0].set_xlabel('PC1') ax[0].set_ylabel('PC2') ax[1].set_ylim([-1, 1]) ax[1].set_yticks([]) ax[1].set_xlabel('PC1') plt.tight_layout() # plt.savefig('images/05_14.png', dpi=300) plt.show() # - # <br> # ### Example 2: Separating concentric circles # + from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5) plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5) plt.tight_layout() # plt.savefig('images/05_15.png', dpi=300) plt.show() # + scikit_pca = PCA(n_components=2) X_spca = scikit_pca.fit_transform(X) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3)) ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1], color='red', marker='^', alpha=0.5) ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1], color='blue', marker='o', alpha=0.5) ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02, color='red', marker='^', alpha=0.5) ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02, color='blue', marker='o', alpha=0.5) ax[0].set_xlabel('PC1') ax[0].set_ylabel('PC2') ax[1].set_ylim([-1, 1]) ax[1].set_yticks([]) ax[1].set_xlabel('PC1') plt.tight_layout() # plt.savefig('images/05_16.png', dpi=300) plt.show() # + X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3)) ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1], color='red', marker='^', alpha=0.5) ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1], color='blue', marker='o', alpha=0.5) ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02, color='red', marker='^', alpha=0.5) ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02, color='blue', marker='o', alpha=0.5) ax[0].set_xlabel('PC1') ax[0].set_ylabel('PC2') ax[1].set_ylim([-1, 1]) ax[1].set_yticks([]) ax[1].set_xlabel('PC1') plt.tight_layout() # plt.savefig('images/05_17.png', dpi=300) plt.show() # - # <br> # <br> # ## Projecting new data points # + from scipy.spatial.distance import pdist, squareform from scipy import exp from scipy.linalg import eigh import numpy as np def rbf_kernel_pca(X, gamma, n_components): """ RBF kernel PCA implementation. Parameters ------------ X: {NumPy ndarray}, shape = [n_samples, n_features] gamma: float Tuning parameter of the RBF kernel n_components: int Number of principal components to return Returns ------------ X_pc: {NumPy ndarray}, shape = [n_samples, k_features] Projected dataset lambdas: list Eigenvalues """ # Calculate pairwise squared Euclidean distances # in the MxN dimensional dataset. sq_dists = pdist(X, 'sqeuclidean') # Convert pairwise distances into a square matrix. mat_sq_dists = squareform(sq_dists) # Compute the symmetric kernel matrix. K = exp(-gamma * mat_sq_dists) # Center the kernel matrix. N = K.shape[0] one_n = np.ones((N, N)) / N K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n) # Obtaining eigenpairs from the centered kernel matrix # scipy.linalg.eigh returns them in ascending order eigvals, eigvecs = eigh(K) eigvals, eigvecs = eigvals[::-1], eigvecs[:, ::-1] # Collect the top k eigenvectors (projected samples) alphas = np.column_stack((eigvecs[:, i] for i in range(n_components))) # Collect the corresponding eigenvalues lambdas = [eigvals[i] for i in range(n_components)] return alphas, lambdas # - X, y = make_moons(n_samples=100, random_state=123) alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1) x_new = X[25] x_new x_proj = alphas[25] # original projection x_proj # + def project_x(x_new, X, gamma, alphas, lambdas): pair_dist = np.array([np.sum((x_new - row)**2) for row in X]) k = np.exp(-gamma * pair_dist) return k.dot(alphas / lambdas) # projection of the "new" datapoint x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas) x_reproj # + plt.scatter(alphas[y == 0, 0], np.zeros((50)), color='red', marker='^', alpha=0.5) plt.scatter(alphas[y == 1, 0], np.zeros((50)), color='blue', marker='o', alpha=0.5) plt.scatter(x_proj, 0, color='black', label='original projection of point X[25]', marker='^', s=100) plt.scatter(x_reproj, 0, color='green', label='remapped point X[25]', marker='x', s=500) plt.legend(scatterpoints=1) plt.tight_layout() # plt.savefig('images/05_18.png', dpi=300) plt.show() # - # <br> # <br> # ## Kernel principal component analysis in scikit-learn # + from sklearn.decomposition import KernelPCA X, y = make_moons(n_samples=100, random_state=123) scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15) X_skernpca = scikit_kpca.fit_transform(X) plt.scatter(X_skernpca[y == 0, 0], X_skernpca[y == 0, 1], color='red', marker='^', alpha=0.5) plt.scatter(X_skernpca[y == 1, 0], X_skernpca[y == 1, 1], color='blue', marker='o', alpha=0.5) plt.xlabel('PC1') plt.ylabel('PC2') plt.tight_layout() # plt.savefig('images/05_19.png', dpi=300) plt.show() # - # <br> # <br> # # Summary # ... # --- # # Readers may ignore the next cell. # ! python ../.convert_notebook_to_script.py --input ch05.ipynb --output ch05.py
Python Machine Learning__Raschka, S.-Packt Publishing (2015).pdf/Chapter05/.ipynb_checkpoints/ch05-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import spark # %reload_ext spark # + # %%ignite # Change the background color if the red square collides with the blue square base_back_color = "rgb(255, 255, 200)" back_color = base_back_color def setup(): size(500, 500) def draw(): global base_back_color, back_color background(back_color) fill_style("red") square(mouse_x, mouse_y, 100) b1 = bounding_box(mouse_x, mouse_y, 100, 100) fill_style("blue") square(200, 200, 100) b2 = bounding_box(200, 200, 100, 100) if collided(b1, b2): back_color = "green" else: back_color = base_back_color # + # %%ignite # Collide and stick - Red and blue rects should collide in middle and stop moving x1 = 0 y1 = 200 w1 = 50 h1 = 100 speed1 = 1.5 x2 = 475 y2 = 225 w2 = 25 h2 = 50 speed2 = -1 def setup(): size(500, 500) def draw(): global x1, y1, w1, h1, speed1 global x2, y2, w2, h2, speed2 background("pink") if collided(bounding_box(x1 + speed1, y1, w1, h1), bounding_box(x2 + speed2, y2, w2, h2)): speed1 = 0 speed2 = 0 x1 += speed1 x2 += speed2 fill_style("red") rect(x1, y1, w1, h1) fill_style("blue") rect(x2, y2, w2, h2) # + # %%ignite # Collide and bounce - Red and blue rects should collide in middle and bounce in opposire directions x1 = 0 y1 = 200 w1 = 50 h1 = 100 speed1 = 3 x2 = 475 y2 = 225 w2 = 25 h2 = 50 speed2 = -4 def setup(): size(500, 500) def draw(): global x1, y1, w1, h1, speed1 global x2, y2, w2, h2, speed2 background("purple") if collided(bounding_box(x1, y1, w1, h1), bounding_box(x2, y2, w2, h2)): speed1 *= -1 speed2 *= -1 x1 += speed1 x2 += speed2 fill_style("red") rect(x1, y1, w1, h1) fill_style("blue") rect(x2, y2, w2, h2) # + # %%ignite # Check that bounding_box always is aligned from the top-left corner with positive width/height base_back_color = "rgb(255, 255, 200)" back_color = base_back_color def setup(): global b2 size(500, 500) b2 = bounding_box(300, 400, -100, -50) if b2[0] != 200: raise Exception("Expected b2 left edge to be 200, got " + str(b2[0])) if b2[1] != 350: raise Exception("Expected b2 top edge to be 350, got " + str(b2[1])) if b2[2] != 100: raise Exception("Expected b2 absolute width to be 100, got " + str(b2[2])) if b2[3] != 50: raise Exception("Expected b2 absolute width to be 50, got " + str(b2[3])) def draw(): global base_back_color, back_color, b2 background(back_color) fill_style("red") square(mouse_x, mouse_y, 100) b1 = bounding_box(mouse_x, mouse_y, 100, 100) fill_style("blue") rect(*b2) if collided(b1, b2): back_color = "green" else: back_color = base_back_color # + # %%ignite # Unhappy path # Negative inputs are given directly to collided() def setup(): print("collided throws expected exceptions") size(100, 100) expect_arg_error(with_negative_width1, "collided expected bounding_box1 width to be greater or equal to 0, got -100") expect_arg_error(with_negative_height2, "collided expected bounding_box2 height to be greater or equal to 0, got -50") def expect_arg_error(func, expected_error): try: func() except Exception as e: if str(e) != expected_error: print("FAIL:\n\tExpected " + str(func.__name__) + " to raise error:\n\t\t" + expected_error) print("\tbut received:\n\t\t" + str(e).replace("\n","\n\t\t")) else: print("FAIL:\n\tExpected " + str(func.__name__) + " to raise error:\n\t\t" + expected_error) print("\tbut it didn't raise any errors") def with_negative_width1(): print("with_negative_width1") collided([0, 0, -100, 0], [0, 0, 0, 50]) def with_negative_height2(): print("with_negative_height2") collided([0, 0, 100, 0], [0, 0, 0, -50])
test/CollidedTest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic BCI Demo # # Categorization of cognitive states in real time is important for effective BCI. Here we demonstrate a basic machine learning categorization technique to distinguish between two different thinking tasks. We will show the machine two different brain activities and train a machine learning model. We can then show the machine new data, in real time, and the model will predict which thinking task it is. The support vector machine (ML) is only looking to distinguish between the states, so we donโ€™t get any information about what these states are. Rather, the computer should tell us which state the subject is in, so long as the brain processes associated with each task are significant. # + import numpy as np # Module that simplifies computations on matrices import matplotlib.pyplot as plt # Module used for plotting from pylsl import StreamInlet, resolve_byprop # Module to receive EEG data import bci_workshop_tools as BCIw # Our own functions for the workshop import ipython_bell import time print("Setup complete.") # - # %matplotlib # %bell -n notify if __name__ == "__main__": """ 1. CONNECT TO EEG STREAM """ # Search for active LSL stream print('Looking for an EEG stream...') streams = resolve_byprop('type', 'EEG', timeout=2) if len(streams) == 0: raise RuntimeError('Can\'t find EEG stream.') # Set active EEG stream to inlet and apply time correction print("Start acquiring data") inlet = StreamInlet(streams[0], max_chunklen=12) eeg_time_correction = inlet.time_correction() # Get the stream info, description, sampling frequency, number of channels info = inlet.info() description = info.desc() fs = int(info.nominal_srate()) n_channels = info.channel_count() # Get names of all channels ch = description.child('channels').first_child() ch_names = [ch.child_value('label')] for i in range(1, n_channels): ch = ch.next_sibling() ch_names.append(ch.child_value('label')) """ 2. SET EXPERIMENTAL PARAMETERS """ # Length of the EEG data buffer (in seconds) # This buffer will hold last n seconds of data and be used for calculations buffer_length = 15 # Length of the epochs used to compute the FFT (in seconds) epoch_length = 1 # Amount of overlap between two consecutive epochs (in seconds) overlap_length = 0.8 # Amount to 'shift' the start of each next consecutive epoch shift_length = epoch_length - overlap_length # Index of the channel (electrode) to be used # 0 = left ear, 1 = left forehead, 2 = right forehead, 3 = right ear index_channel = [0, 1, 2, 3] # Name of our channel for plotting purposes ch_names = [ch_names[i] for i in index_channel] n_channels = len(index_channel) # Get names of features # ex. ['delta - CH1', 'pwr-theta - CH1', 'pwr-alpha - CH1',...] feature_names = BCIw.get_feature_names(ch_names) # Number of seconds to collect training data for (one class) training_length = 30 """ 3. RECORD TRAINING DATA """ # Record data for mental activity 0 b.beep(frequency=530, secs=0.7, blocking=True) eeg_data0, timestamps0 = inlet.pull_chunk( timeout=training_length+1, max_samples=fs * training_length) eeg_data0 = np.array(eeg_data0)[:, index_channel] print('\n Stimuli \n') # Record data for mental activity 1 b.beep(frequency=530, secs=0.7, blocking=True) eeg_data1, timestamps1 = inlet.pull_chunk( timeout=training_length+1, max_samples=fs * training_length) eeg_data1 = np.array(eeg_data1)[:, index_channel] # Divide data into epochs eeg_epochs0 = BCIw.epoch(eeg_data0, epoch_length * fs, overlap_length * fs) eeg_epochs1 = BCIw.epoch(eeg_data1, epoch_length * fs, overlap_length * fs) """ 4. COMPUTE FEATURES AND TRAIN CLASSIFIER """ feat_matrix0 = BCIw.compute_feature_matrix(eeg_epochs0, fs) feat_matrix1 = BCIw.compute_feature_matrix(eeg_epochs1, fs) [classifier, mu_ft, std_ft] = BCIw.train_classifier( feat_matrix0, feat_matrix1, 'SVM') winsound.Beep(600,500) """ 5. USE THE CLASSIFIER IN REAL-TIME""" # Initialize the buffers for storing raw EEG and decisions eeg_buffer = np.zeros((int(fs * buffer_length), n_channels)) filter_state = None # for use with the notch filter decision_buffer = np.zeros((30, 1)) plotter_decision = BCIw.DataPlotter(30, ['Decision']) # The try/except structure allows to quit the while loop by aborting the # script with <Ctrl-C> print('Press Ctrl-C in the console to break the while loop.') try: while True: """ 3.1 ACQUIRE DATA """ # Obtain EEG data from the LSL stream eeg_data, timestamp = inlet.pull_chunk( timeout=1, max_samples=int(shift_length * fs)) # Only keep the channel we're interested in ch_data = np.array(eeg_data)[:, index_channel] # Update EEG buffer eeg_buffer, filter_state = BCIw.update_buffer( eeg_buffer, ch_data, notch=True, filter_state=filter_state) """ 3.2 COMPUTE FEATURES AND CLASSIFY """ # Get newest samples from the buffer data_epoch = BCIw.get_last_data(eeg_buffer, epoch_length * fs) # Compute features feat_vector = BCIw.compute_feature_vector(data_epoch, fs) y_hat = BCIw.test_classifier(classifier, feat_vector.reshape(1, -1), mu_ft, std_ft) print(y_hat) decision_buffer, _ = BCIw.update_buffer(decision_buffer, np.reshape(y_hat, (-1, 1))) """ 3.3 VISUALIZE THE DECISIONS """ plotter_decision.update_plot(decision_buffer) plt.pause(0.00001) except KeyboardInterrupt: print('Closed!') winsound.Beep(600,500)
src/data/2019-07-29_SVMdemo_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic Britishpoliticalspeech.org Scraper (TXT) # # This python based scraper will scrape British political speeches from political leaders in the UK from [Britishpoliticalspeech.org](http://britishpoliticalspeech.org/). When fully run the scraper will output a directory with txt files of all the individual speeches held. These could be used for specific textual analyses. # + id="5292d9d0-5c94-412c-b0ce-aaffaf0b1c92" import sys import requests import re import os from bs4 import BeautifulSoup # + id="86543cd3-8e52-468b-972b-2f0feb173afc" # This function loads a webpage def load_page(url): with requests.get(url) as f: page = f.text return page # + [markdown] id="9e91d3ca-5820-4553-b49e-cb3a548b20ee" # ## Locate the speeches # Here we define two functions to first extract the hyperlinks from the speeches from the main content table of the archive using `get_speech_data()`, and secondly to download the speech texts on the specific speech pages linked in the content table using the `get_speech()` function. # + id="117749c4-9370-417d-8c4b-f48d105f784d" def get_speech_data(url): speech_page = BeautifulSoup(load_page(url), 'lxml') # Open the webpage if not speech_page: print('Something went wrong!', file=sys.stderr) sys.exit() data = [] for row in speech_page.find_all('tr')[2:]: speech = row.find_all('td')[3] #Find the name of the every speech link = row.find('a').get('href') #Find the hyperlink for every speech data.append({ 'link': 'http://britishpoliticalspeech.org/' + link }) #Store the hyperlinks in 'data' return data # + id="9f936da2" def get_speech(url): speech_page = BeautifulSoup(load_page(url), 'lxml') #Open the speech webpage interesting_html = (speech_page.find(class_='speech-content').text.strip() .replace('\xa0\n', '').replace('\n','').replace('\x85','').replace('\u2011','')) #Find the full text of the speech skip_check = 'Owing to a copyright issue this speech has been removed.' #Check of this text is in the speech, otherwise this can be skipped if not interesting_html or skip_check in interesting_html: # or not speaker_html or not location_html don't really care about not finding these #print('Skipped - No information available for {}'.format(url), file=sys.stderr) return {} return {'speech' : interesting_html} #returns the full text of the speech # - # ## Scraping the Data # # The following code will proceed to apply the previously made functions for scraping the desired data and writes the output in txt files in a newly created directory called "speeches". This directory will be in the created wherever you stored this notebook. # + id="cfb4d759" outputId="95a5dc13-ffc0-473a-f5d7-c37d2adfe619" index_url = 'http://britishpoliticalspeech.org/speech-archive.htm' # Contains a list of speeches list_speech_data = get_speech_data(index_url) # Get speeches with metadata list_rows_to_remove = [] #print (" - - - - - " + str(len(list_speech_data))) for count, row in enumerate(list_speech_data): #print('Scraping info on {}.'.format(row['name speech'])) # Might be useful for debugging url = row['link'] speech_info = get_speech(url) # Get the speech, if available if speech_info == {}: list_rows_to_remove.append(count) else: for key, value in speech_info.items(): row[key] = value # Add the new data to our dictionary #print('Scraped info on {}.'.format(row['name speech']) + '\t from {}.'.format(row['speakers'])) for d_elem in reversed(list_rows_to_remove): # Delete list rows in reverse to avoid errors #print("Speech missing - Deleted: " + str(d_elem)) del list_speech_data[d_elem] #print (" - - - - - " + str(len(list_speech_data))) print('Done scraping!') # + id="8c73e775" outputId="1fb24862-e638-4375-dd90-e19e498a55dd" path = "speeches/" # Check whether the specified path exists or not isExist = os.path.exists(path) if not isExist: # Create a new directory because it does not exist os.makedirs(path) print("The new directory is created!") # Write the speeches in txt files with the id as file name number = 1 for row in list_speech_data: filename = f'political_speech_{number}.txt' #filename = row['id'] #print(filename) file1 = open(path + filename,"w") number += 1 file1.writelines(row['speech']) file1.close() #to change file access modes # -
Scrapers/Britishpoliticalspeech_org_Scraper(TXT).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="xWIFGXHIS5W0" # # Notebook article: *Strong field double ionization : The phase space perspective* # # --- # # > Authors: [<NAME>](https://orcid.org/0000-0003-3667-259X) and [<NAME>](https://orcid.org/0000-0002-6976-5488) # # based on [[<NAME>, <NAME>, <NAME>, *Strong field double ionization: The phase space perspective*, Physical Review Letters 102, 173002 (2009)](https://dx.doi.org/10.1103/PhysRevLett.102.173002)] # # > *Abstract*: We identify the phase-space structures that regulate atomic double ionization in strong ultrashort laser pulses. The emerging dynamical picture complements the recollision scenario by clarifying the distinct roles played by the recolliding and core electrons, and leads to verifiable predictions on the characteristic features of the "knee", a hallmark of the nonsequential process. # # *Last modified:* 2020-10-28 15:01:00 # + [markdown] id="LHRGS7oyT2yy" # ## Introduction # One of the most striking surprises of recent years in intense laser-matter interactions has come from multiple ionization by intense short laser pulses: Correlated (nonsequential) double ionization rates were found to be # several orders of magnitude higher than the uncorrelated sequential mechanism allows. This discrepancy has made the characteristic "knee" shape in the # double ionization yield versus intensity plot into one of the most dramatic manifestations of electron-electron correlation in nature. The precise mechanism that makes correlation so effective is far from settled. Different scenarios have been proposed to explain the mechanism behind ionization [1-18] and have been confronted with experiments [19,20], the recollision scenario [2,3], in which the ionized electron is hurled back at the ion core by the laser, being in best accord with experiments. In Fig. 1, a typical double ionization probability as a function of the intensity of the laser field is plotted. Similar knees have been observed in experimental data [1,4,20-26] and successfully reproduced by quantal computations on atoms and molecules [5,27-29]. In a recent series of articles [8,9,12,13,29-31] characteristic features of double ionization were reproduced using classical trajectories and this success was ascribed to the paramount role of correlation [12]. Indeed, entirely classical interactions turn out to be adequate to generate the strong two-electron correlation needed for double ionization. # # In this Letter, we complement the well-known recollision scenario by identifying the organizing principles which explain the statistical properties of the classical trajectories such as ionization probabilities. In addition to the dynamical picture of the ionized electron provided by the recollision scenario, we connect the dynamics of the core electron and the energy flow leading to double ionization to relevant phase space structures (periodic orbits or invariant tori). The resulting picture leads to two verifiable predictions for key points which make up the knee in Fig. 1: namely the laser intensity where nonsequential double ionization is maximal and the intensity where the double ionization is complete. Of course, the saturation intensity emerges naturally in quantum mechanical calculations (e.g., Refs. [5,28]) provided they cover a wide enough intensity range. # + [markdown] id="j0_CCzPWUyaQ" # ## Hamiltonian model # We work with the classical Hamiltonian model of the helium atom with soft Coulomb potentials [32,33]. The Hamiltonian is given by [10]: # $$ # {\mathcal H}(x,y,p_{x}, p_{y},t) = \frac{p_{x}^{2}}{2} + \frac{ # p_{y}^{2}}{2}+(x+y)E(t) + \frac{1}{\sqrt{(x-y)^{2}+b^2}} - \frac{2}{\sqrt{x^{2}+a^2}} - \frac{2}{\sqrt{y^{2}+a^2}}, \tag{1}$$ # # where $x$, $y$ and $p_{x}$, $p_{y}$ are the positions and # (canonically conjugate) momenta of each electron respectively. The energy is initially fixed at the ground state ${\cal E}_g$ [34]. # The laser field is modeled by a sinusoidal pulse with an envelope, i.e. # $E(t)= E_{0} \ f(t) \ \sin \omega t$ where $E_{0}$ is the maximum amplitude and $\omega=2\pi/\lambda$ the laser frequency. The # pulse envelope $f(t)$ is chosen as a trapezoidal function with two laser cycle ramp-up and two laser cycle ramp-down [12,13,29,30]. # Typical ionizing trajectories of Hamiltonian (1) show two # qualitatively different routes to double ionization: # nonsequential double ionization (NSDI), where the two electrons leave # the core (inner) region at about the same time, and sequential double # ionization (SDI), where one electron leaves the inner region long time # after the other one has ionized. # # --- # > The following cell defines the widgets for the parameters of the atom (energy of the ground state $\mathcal{E}_g$ and softening parameters $a$, $b$, where $4 < 2 | \mathcal{E}_g | a < 3\sqrt{3}$) and the parameters of the laser field (wavelength $\lambda$, intensity $I$ and pulse duration in laser cycles $t_d$) used throughout this notebook. # + cellView="form" hide_input=true id="h7HjIrBNKv2m" jupyter={"source_hidden": true} tags=["hide-input"] #@title Parameters of the notebook from IPython.display import display, clear_output, Math, Latex from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets import matplotlib.pyplot as plt import numpy as np import math import time from scipy.optimize import fsolve, fmin import warnings warnings.filterwarnings('ignore', 'The iteration is not making good progress') ## Parameters a = 1.0 #@param {type:"number"} b = 1.0 #@param {type:"number"} E_g = -2.24 #@param {type:"number"} lambda_nm = 780 #@param {type:"number"} tau_lc = 8 #@param {type:"number"} h_step = 0.05 #@markdown --- ## Classes class Atom: def __init__(self, a, b, ground_state_energy, size=5.0, thresh=100): self.a = a self.b = b self.a2 = a ** 2 self.b2 = b ** 2 self.E = ground_state_energy self.size = size self.thresh = thresh class Field: def __init__(self, wavelength, intensity=[], envelope=[], params_envelop=[]): self.wavelength = wavelength self.frequency = 45.597 / wavelength self.period = 2.0 * np.pi / self.frequency self.intensity = intensity self.amplitude = 5.338e-9 * np.sqrt(self.intensity) self.envelope = envelope self.params = params_envelop self.times = np.cumsum(self.params) class Integration: def __init__(self, step, t_f, N=1, precision=np.float32): self.t_f = t_f self.N = N self.h = step self.precision = precision ## Functions def trapezoidal(t, field): tau = t / field.period if tau < field.times[0]: return tau / field.times[0] elif field.times[0] <= tau <= field.times[1]: return 1.0 elif field.times[1] < tau < field.times[2]: return (field.times[2] - tau) / field.params[2] else: return 0.0 def potential_2e(x, atom, is_deriv=False): r12 = x[0, ] - x[1, ] if is_deriv: dv_dt = 2.0 * x / np.power(x ** 2 + atom.a2, 1.5) rho12 = r12 / np.power(r12 ** 2 + atom.b2, 1.5) dv_dt[0, ] -= rho12 dv_dt[1, ] += rho12 return dv_dt else: return - 2.0 / np.sqrt(x[0, ] ** 2 + atom.a2) - 2.0 / np.sqrt(x[1, ] ** 2 + atom.a2) \ + 1.0 / np.sqrt(r12 ** 2 + atom.b2) def potential_1e(x, atom, is_deriv=False): if is_deriv: return 2.0 * x / np.power(x ** 2 + atom.a2, 1.5) else: return - 2.0 / np.sqrt(x ** 2 + atom.a2) def generate_initial_conditions(atom, integration): yf = np.empty((4, 0)) nt = 0 while nt <= integration.N - 1: x_rand = 2.0 * atom.size * np.random.random((2, integration.N)).astype(integration.precision) \ - atom.size values_potential = potential_2e(x_rand, atom) index = (values_potential >= atom.E) x_rand = x_rand[:, np.logical_not(index)] values_potential = values_potential[np.logical_not(index)] length_valid = len(values_potential) p_rand = 2.0 * np.random.random((2, length_valid)).astype(integration.precision) - 1.0 p_rand *= np.sqrt(2.0 * (atom.E - values_potential)) nt += length_valid y = np.concatenate([x_rand, p_rand], axis=0) yf = np.concatenate([yf, y], axis=1) return integration.precision(yf[:4, :integration.N]) def eqn_motion_2e(x, atom, field, integration, traj=False, output_times=[], Jacobian=False, matrix_J=np.array([])): t = 0.0 xf = x.copy() if Jacobian==False: xt = np.empty((4,0)) if traj: n_output = np.rint(output_times / integration.h) if 0 in n_output: xt = x.copy().reshape((4,-1)) n_index = 1 while t <= integration.t_f: e0_eff = field.amplitude * field.envelope(t, field) * np.sin(field.frequency * t) xf[:2] += integration.h * 0.5 * xf[2:] xf[2:] -= integration.h * (potential_2e(xf[:2], atom, is_deriv=True) + e0_eff) xf[:2] += integration.h * 0.5 * xf[2:] if traj and (n_index in n_output): xt = np.hstack((xt, xf.reshape(4,-1))) t += integration.h n_index += 1 if traj: return xt else: return xf if Jacobian==True: if matrix_J.size==0: J = np.identity(4).reshape(-1) J = np.repeat(J[:,np.newaxis], parameters.N, axis=1) else: J = matrix_J Jf = J.copy() while t <= integration.t_f: e0_eff = field.amplitude * field.envelope(t, field) * np.sin(field.frequency * t) xf[:2] += integration.h * 0.5 * xf[2:] xf[2:] -= integration.h * (potential_2e(xf[:2], atom, is_deriv=True) + e0_eff) Vxx = 2.0 * (atom.a2 - 2.0 * xf[0] ** 2) / np.power(xf[0] ** 2 + atom.a2, 2.5) - (atom.b2 - 2.0 * (xf[0]-xf[1]) ** 2) / np.power((xf[0]-xf[1]) ** 2 + atom.b2, 2.5) Vyy = 2.0 * (atom.a2 - 2.0 * xf[1] ** 2) / np.power(xf[1] ** 2 + atom.a2, 2.5) - (atom.b2 - 2.0 * (xf[0]-xf[1]) ** 2) / np.power((xf[0]-xf[1]) ** 2 + atom.b2, 2.5) Vxy = (atom.b2 - 2.0 * (xf[0]-xf[1]) ** 2) / np.power((xf[0]-xf[1]) ** 2 + atom.b2, 2.5) xf[:2] += integration.h * 0.5 * xf[2:] Jf[0:13:4] = (1.0 - integration.h ** 2 * 0.5 * Vxx) * J[0:13:4] - integration.h ** 2 * 0.5 * Vxy * J[1:14:4] + integration.h * ((1.0 - 0.25 * integration.h ** 2 * Vxx) * J[2:15:4] - 0.25 * integration.h ** 2 * Vxy * J[3:16:4]) Jf[1:14:4] = (1.0 - integration.h ** 2 * 0.5 * Vyy) * J[1:14:4] - integration.h ** 2 * 0.5 * Vxy * J[0:13:4] + integration.h * ((1.0 - 0.25 * integration.h ** 2 * Vyy) * J[3:16:4] - 0.25 * integration.h ** 2 * Vxy * J[2:15:4]) Jf[2:15:4] = - integration.h * Vxx * J[0:13:4] - integration.h * Vxy * J[1:14:4] + (1.0 - integration.h ** 2 * 0.5 * Vxx) * J[2:15:4] - integration.h ** 2 * 0.5 * Vxy * J[3:16:4] Jf[3:16:4] = - integration.h * Vyy * J[1:14:4] - integration.h * Vxy * J[0:13:4] + (1.0 - integration.h ** 2 * 0.5 * Vyy) * J[3:16:4] - integration.h ** 2 * 0.5 * Vxy * J[2:15:4] J = Jf.copy() t += integration.h return xf, Jf def eqn_motion_2e_section(x, atom, integration, output_times=[]): t = 0.0 eps = 1.0e-14 xf = x.copy() gc, sc = Poincare_section(xf) h = integration.h while t <= integration.t_f: xf[:2] += h * 0.5 * xf[2:] xf[2:] -= h * potential_2e(xf[:2], atom, is_deriv=True) xf[:2] += h * 0.5 * xf[2:] t += h gf, sc = Poincare_section(xf) if np.abs(gf) < eps: return t, xf if (gc * gf < 0) & (gc * h * sc > 0): h *= - 0.1 gc = gf return t, xf def Poincare_section(y): return y[2], 1.0 def eqn_motion_1e(y, atom, field, integration, traj=False, output_times=[]): t = 0.0 yf = y.copy() yt = np.empty((2,0)) if traj: n_output = np.rint(output_times / integration.h) if 0 in n_output: yt = y.copy().reshape((2,-1)) n_index = 0 while t <= integration.t_f: e0_eff = field.amplitude * np.sin(field.frequency * t) yf[0] += integration.h * 0.5 * yf[1] yf[1] -= integration.h * (potential_1e(yf[0], atom, is_deriv=True) + e0_eff) yf[0] += integration.h * 0.5 * yf[1] if traj and (n_index in n_output): yt = np.hstack((yt, yf.reshape(2,-1))) t += integration.h n_index += 1 if traj: return yt else: return yf envelope_name = trapezoidal # + [markdown] id="N-KhVyKVXpY0" # ### Figure 1: Double ionization (DI) probability for Hamiltonian (1) as a function of the intensity of the field $I$. # The double ionization probability curve for Hamiltonian (1) is computed by counting, for each intensity, the percentage of trajectories which undergo a double ionization. The set of trajectories we use is initiated using a microcanonical distribution. # # > *Default parameters: $a=1$, $b=1$, ${\cal E}_g=-2.24$, $\lambda=780\; \mathrm{nm}$, pulse duration $t_d = 8 \; \mathrm{l.c.}$, intensity range: $I_{\min} = 3\times 10^{13} \; \mathrm{W}/\mathrm{cm}^2$ and $I_{\max} = 5\times 10^{16} \; \mathrm{W}/\mathrm{cm}^2$, number of intensities $N_I = 20$ and $5\times 10^4$ trajectories per intensity.* # # > *Typical execution time: 9 minutes.* # # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 869} executionInfo={"elapsed": 525628, "status": "ok", "timestamp": 1603893236162, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiDuaMJBJyPVQYlNzXYWVH66FhPBipdSStoVltpeU0=s64", "userId": "10196882246576949447"}, "user_tz": -60} id="dEQd3avTZoL6" outputId="6de6b35a-c898-4472-f13a-564aece99b70" tags=["hide-input"] #@title Execute Figure 1 I_min = 3e13 #@param {type:"number"} I_max = 5e16 #@param {type:"number"} Number_of_Intensities = 20 #@param {type:"integer"} N = 50000 #@markdown --- my_atom = Atom(a=a, b=b, ground_state_energy=E_g) params_envelop = (2, tau_lc-4, 2, 2) my_field = Field(wavelength=lambda_nm, intensity=[], envelope=envelope_name, params_envelop=params_envelop) parameters = Integration(N=N, step=h_step, t_f=np.sum(my_field.params) * my_field.period) print(" Intensity (W/cm2) DI Probability CPU time (seconds) ") print("____________________________________________________________________________") Intensities = 10 ** np.linspace(np.log10(I_min), np.log10(I_max), Number_of_Intensities) proba_di = np.zeros(Number_of_Intensities) for it in range(Number_of_Intensities): X0 = generate_initial_conditions(my_atom, parameters) my_field = Field(wavelength=lambda_nm, intensity=Intensities[it], envelope=envelope_name, params_envelop=params_envelop) start_di = time.time() Xf = eqn_motion_2e(X0, my_atom, my_field, parameters) cond_di = (np.abs(Xf[0]) >= my_atom.thresh) & (np.abs(Xf[1]) >= my_atom.thresh) proba_di[it] = float(np.count_nonzero(cond_di) / parameters.N) end_di = time.time() print(" {:.2e} {:.4f} {} ".format(Intensities[it], proba_di[it], int(np.rint(end_di-start_di)))) if proba_di[it] == 0: parameters.N = N else: parameters.N = min(N, int(np.floor(N/100/proba_di[it]))) print("____________________________________________________________________________") plt.figure(figsize=(9.5,7.5)) plt.xlabel('$I$ (W/cm$^2$)') plt.ylabel('Probability') plt.loglog(Intensities, proba_di, marker='o', color='r', linestyle='None', label='Double ionization') plt.show() # + [markdown] id="TMqkUQmSkmoN" # ## Dynamics without the field # We first analyze the dynamics of Hamiltonian (1) without the field ($E_0=0$) using linear stability properties such as obtained by the finite-time Lyapunov (FTL) exponents [35,36]. With each initial condition on a plane [e.g., $(x,p_x)$ with $y=0$, and $p_y$ determined by ${\mathcal H}={\mathcal E}_g$ on Fig. 2] for Hamiltonian (1), we associate a coefficient which quantifies the degree of chaos experienced by the trajectory up to a given time. # A typical FTL map is depicted in Fig. 2 # for Hamiltonian (1) without the field. It clearly displays strong and global chaos by showing fine details of the # stretching and folding of trajectories [35]. In particular, there are no regular elliptic islands of stability contrary to what is common with Hamiltonian systems on a bounded energy manifold. By examining typical trajectories, we notice that the motion of the two electrons tracks, at different times, one of four hyperbolic periodic orbits. Their period is 29 a.u., i.e., much shorter than the duration of the laser pulse (of order 800 a.u.). The projections of two of them, $O_1$ and $O_2$, on the $(x,p_x)$ plane, are displayed in Fig. 2. The two other ones are obtained from $O_1$ and $O_2$ using the symmetries of Hamiltonian (1). In particular, if one electron is on the inner curve in $(x,p_x)$, the second electron is on the outer curve in $(y,p_y)$. Consequently, a typical two-electron trajectory is composed of one electron close to the nucleus (the "inner" electron, in blue) and another further away (the "outer" electron, in red), with quick exchanges of the roles of each electron. This distinction is crucial when the laser field is turned on: Since the contribution of the field-electron interaction to Hamiltonian (1) is proportional to the position, the action of the field is larger for the outer electron, while the inner electron is mainly driven by the interaction with the # nucleus. # + [markdown] id="hwRzF7uhlgXZ" # ### Figure 2: FTL map of Hamiltonian (1) without the field # The Finite-Time Lyapunov (FTL) exponent is given by $\mu = \log (\max |\lambda|)/t_f$, where $\lambda$ are the eigenvalues of the tangent flow $\mathbb{J}$ of Hamiltonian (1) and $t_f$ is the integration time. The equations of motion for the tangent flow are given by $\dot{\mathbb{J}} = \mathbb{A} \mathbb{J}$, with $A_{ij} = \partial \dot{z_i} / \partial z_j$, where $\mathbf{z} = (x,y,p_x,p_y)$. The values of the FTL exponents are represented for initial conditions in the plane $(x,p_{x})$ with $y=0$. # The periodic orbit without the field is determined using a Poincarรฉ section $p_x=0$. The projection $(x,p_x)$ of the periodic orbit is in red and the projection $(y,p_y)$ is in blue. # There is the possibility to visualize the values of the FTL map in real time. # # > *Default parameters: $a=1$, $b=1$, ${\cal E}_g=-2.24$, animation display: false, $t_f=43$ a.u., resolution $256$.* # # > *Typical execution time: 30 seconds.* # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 491} executionInfo={"elapsed": 26671, "status": "ok", "timestamp": 1603961631292, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiDuaMJBJyPVQYlNzXYWVH66FhPBipdSStoVltpeU0=s64", "userId": "10196882246576949447"}, "user_tz": -60} id="X63mvvYQGREv" jupyter={"source_hidden": true} outputId="958c4b12-b163-4614-83f3-5a28cca64892" tags=["hide-input"] #@title Execute Figure 2 tf = 43.0 #@param {type:"number"} resolution = 256 #@param ["128", "256", "512", "1024"] {type:"raw", allow-input: true} animation = True #@param ["True", "False"] {type:"raw"} x_min = -4.5 x_max = 4.5 p_min = -1.5 p_max = 1.5 #@markdown --- my_atom = Atom(a=a, b=b, ground_state_energy=E_g) params_envelop = (2, tau_lc-4, 2, 2) my_field = Field(wavelength=lambda_nm, intensity=0.0, envelope=envelope_name, params_envelop=params_envelop) x0 = np.linspace(x_min, x_max, resolution) px0 = np.linspace(p_min, p_max, resolution) X0 = np.tile(x0, resolution) Px0 = np.repeat(px0, resolution) Q = my_atom.E - 0.5 * Px0 ** 2 - potential_2e(np.array([X0, np.zeros(X0.size)]), my_atom) indices = np.where(Q >= 0) Py0 = np.sqrt(2.0 * Q[indices]) x = np.array([X0[indices], np.zeros(Py0.size), Px0[indices], Py0]) fig1 = plt.figure(figsize=(9.5,7.5)) if not animation: parameters = Integration(N=Py0.size, t_f=tf, step=h_step) x, J = eqn_motion_2e(x, my_atom, my_field, parameters, Jacobian=True) Lambda = np.empty((resolution ** 2)) Lambda[:] = np.NaN for i in np.arange(parameters.N): w, v = np.linalg.eig(J[:,i].reshape(4,4)) Lambda[indices[0][i]] = np.log(np.amax(np.abs(w))) / parameters.t_f #pcol = plt.pcolor(x0, px0, Lambda.reshape((resolution, resolution)), cmap='magma', vmin=0, vmax=np.nanmax(Lambda), shading='auto') pcol = plt.pcolor(x0, px0, Lambda.reshape((resolution, resolution)), cmap='magma', vmin=0, vmax=np.nanmax(Lambda)) fig1.colorbar(pcol) plt.xlabel('$x$') plt.ylabel('$p_x$') plt.xlim(x_min, x_max) plt.ylim(p_min, p_max) else: Ni = math.floor(1.0 / h_step) + 1.0 parameters = Integration(N=Py0.size, t_f=Ni*h_step, step=h_step) J = np.identity(4).reshape(-1) J = np.repeat(J[:,np.newaxis], parameters.N, axis=1) tf_ind = 0.0 while tf_ind < tf: x, J = eqn_motion_2e(x, my_atom, my_field, parameters, Jacobian=True, matrix_J=J) Lambda = np.empty((resolution ** 2)) Lambda[:] = np.NaN for i in np.arange(parameters.N): w, v = np.linalg.eig(J[:,i].reshape(4,4)) Lambda[indices[0][i]] = np.log(np.amax(np.abs(w))) / (tf_ind+Ni*h_step) #pcol = plt.pcolor(x0, px0, Lambda.reshape((resolution, resolution)), cmap='magma', vmin=0, vmax=np.nanmax(Lambda), shading='auto') pcol = plt.pcolor(x0, px0, Lambda.reshape((resolution, resolution)), cmap='magma', vmin=0, vmax=np.nanmax(Lambda)) if tf_ind > 0.0: cb.remove() cb = fig1.colorbar(pcol) tf_ind += Ni * h_step plt.title('time: %i' %tf_ind) plt.xlabel('$x$') plt.ylabel('$p_x$') plt.xlim(x_min, x_max) plt.ylim(p_min, p_max) clear_output(wait=True) display(plt.gcf()) def diff_motion_2e(y, atom, integration): Y0 = np.array([y[0], y[1], 0.0, np.sqrt(2.0 * (atom.E - potential_2e(y, atom)))]) tf, Yf = eqn_motion_2e_section(Y0, atom, integration) return sum((Yf-Y0) ** 2) x0 = 2.5 y0 = 0.1 if my_atom.E - potential_2e(np.array([x0,y0]), my_atom) < 0: print('The initial guess does not belong to the initial energy level.') else: init_guess = np.array([x0,y0]) parameters = Integration(N=1, t_f=100.0, step=0.05, precision=np.float64) Y0 = fmin(diff_motion_2e, init_guess, args=(my_atom, parameters), xtol=1.0e-8, maxiter=500, disp=False) if diff_motion_2e(Y0, my_atom, parameters) > 1.0e-10: print('The periodic orbit has not been found') plt.show() else: Y0 = np.array([Y0[0],Y0[1],0.0,np.sqrt(2.0 * (my_atom.E - potential_2e(Y0, my_atom)))]) tf, yf = eqn_motion_2e_section(Y0, my_atom, parameters) Tt = np.linspace(0.0, tf, 1000) parameters.t_f = tf Yt = eqn_motion_2e(Y0[:4], my_atom, my_field, parameters, traj=True, output_times=Tt) plt.plot(Yt[0], Yt[2], color='r', linewidth=2) plt.plot(Yt[1], Yt[3], color='b', linewidth=2) clear_output(wait=True) #display(plt.gcf()) plt.show() print('The period of the periodic orbit is {:.4f} a.u.'.format(tf)) # + [markdown] id="Q-wLCkoYmRZs" # ### Figure 3: Typical trajectories of Hamiltonian (1) # The trajectories of Hamiltonian (1) are integrated for random initial conditions in the ground state. The positions of the two electrons are displayed as a function of time in red and in blue. The type of outcome ('No ionization', 'Single ionization' or 'Double ionization') is indicated in each panel. # # > *Default parameters: $a=1$, $b=1$, ${\cal E}_g=-2.24$, $\lambda=780 \; \mathrm{nm}$, pulse duration $t_d = 8 \; \mathrm{l.c.}$, $I=2\times 10^{14} \; \mathrm{W}/\mathrm{cm}^2$, number of trajectories $N=2$, plot range in $x$: $[-10,20]$.* # # > *Typical execution time: 3 seconds.* # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 501} executionInfo={"elapsed": 2623, "status": "ok", "timestamp": 1603961658733, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiDuaMJBJyPVQYlNzXYWVH66FhPBipdSStoVltpeU0=s64", "userId": "10196882246576949447"}, "user_tz": -60} id="EWM_onLKGrk-" jupyter={"source_hidden": true} outputId="4a985f15-79ca-4e94-cc52-65c216cbbcba" tags=["hide-input"] #@title Execute Figure 3 Intensity_Wcm2 = 2e14 #@param {type:"number"} Number_of_Trajectories = 2 #@param {type:"integer"} x_min = -10 #@param {type:"number"} x_max = 20 #@param {type:"number"} my_atom = Atom(a=a, b=b, ground_state_energy=E_g) params_envelop = (2, tau_lc-4, 2, 2) my_field = Field(wavelength=lambda_nm, intensity=Intensity_Wcm2, envelope=envelope_name, params_envelop=params_envelop) parameters = Integration(N=Number_of_Trajectories, t_f=np.sum(my_field.params) * my_field.period, step=h_step, precision=np.float64) X0 = generate_initial_conditions(my_atom, parameters) Tt = np.linspace(0.0, parameters.t_f, 1000) for it in range(Number_of_Trajectories): X0_traj = X0[:, it] Xt = eqn_motion_2e(X0_traj, my_atom, my_field, parameters, traj=True, output_times=Tt) R1 = np.abs(Xt[0, -1]) R2 = np.abs(Xt[1, -1]) if (R1 <= my_atom.thresh) and (R2 <= my_atom.thresh): label_ionization = "No ionization" elif (R1 >= my_atom.thresh) and (R2 >= my_atom.thresh): label_ionization = "Double ionization" else: label_ionization = "Single ionization" plt.figure(figsize=(10.5,3.5)) plt.plot(Tt/my_field.period, Xt[0], color='b', linewidth=2) plt.plot(Tt/my_field.period, Xt[1], color='r', linewidth=2) plt.annotate(label_ionization, (8, 10)) plt.xlabel('$t/T$') plt.ylabel('Position (a.u.)') plt.xlim([0, parameters.t_f/my_field.period]) plt.ylim(x_min, x_max) plt.show() # + [markdown] id="qLFeamV9mtjZ" # ## Single ionization # By switching on the field, the outer electron is picked up and swept away from the nucleus. Consequently, its effective Hamiltonian is: # $$ # {\mathcal H}_1=\frac{p_x^2}{2} + E_0 x f(t) \sin\omega t. \tag{2} # $$ # We notice that Hamiltonian ${\mathcal H}_1$ is integrable. Its solutions are approximately composed of linear escape from the nucleus (at time $t_0$) modulated by the action of the field [2,37,38] (see the red trajectory in Fig. 3). # # For the inner electron, the effective Hamiltonian contains the interaction with the nucleus and with the laser field: # $$ # {\mathcal H}_2=\frac{p_y^2}{2}-\frac{2}{\sqrt{y^2+a^2}}+yE_0\sin\omega t. \tag{3} # $$ # # In the absence of the field ($E_0=0$), ${\mathcal H}_2$ is also integrable and the inner electron is confined on a periodic orbit. Since it stays close to the nucleus, its approximate period is $2\pi a^{3/2}/\sqrt{2}$ obtained from the harmonic approximation, as observed in Fig. 3. # + [markdown] id="JKW7Uco5nJ5b" # ## Sequential double ionization (SDI) # Once an electron has been ionized (usually during the ramp-up of the field), the other electron is left with the nucleus and the field. Its effective Hamiltonian is ${\mathcal H}_2$. A contour plot of the electron excursions after two laser cycles and a Poincar\'e section of ${\mathcal H}_2$ are depicted in Fig. 4. They clearly show two distinct regions: # The first one is the core region which is composed of a collection of invariant tori which are slight deformations of the ones obtained in the integrable case ${\mathcal H}_2$ without the field. This elliptic region is organized around a main elliptic periodic orbit which has the same period as the field $2\pi/\omega$. In this region, the electrons are fairly insensitive to the field, and do not ionize. The second region is the one outside the core where trajectories ionize quickly. It corresponds to sequential double ionization. In between these two regions, any weak interaction (with the outer electron for instance) may move the inner electron confined on the outermost regular tori (but still inside the brown elliptical region) to the outer region where it ionizes quickly. # # If the laser intensity $I$ is too small, then the phase space is filled with invariant tori and no sequential double ionization can occur because the motion is regular. The sequential double ionization probability depends then on the size of the regular region around the elliptic periodic orbit, and hence on $I$. # We have numerically computed the location and the stability of this periodic orbit [35]. When it exists, this periodic orbit stays elliptic in the whole range of intensities we have considered. On the stroboscopic plot (with frequency $\omega$) the periodic orbit is located at $y=0$. In Fig. 4, the momentum $p_y$ of the periodic orbit on the stroboscopic plot is represented as a function of $I$. We notice that for a large set of intensities in the range $[10^{14}, 10^{16}]~ \mbox{W} / \mbox{cm}^{2}$, this periodic orbit is located close to $p_y= 0$. For intensities larger than a critical intensity $I_c$, the periodic orbit does not exist, and no major islands of regularity remain. Therefore, it is expected that the sequential double ionization probability is equal to one in this range of intensities, as observed on the probability curve on Fig. 1. The location of the local maximum of the potential of Hamiltonian (3) predicts that the intensity $I_c$ is approximately independent of $\omega$, and it is given by $I_c\approx 16/(27a^4)$. # + [markdown] id="ItL_PfjkoFF1" # ### Figure 4: Phase-space picture # **Upper panel:** Contour plot of the electron location $y(t)$ of Hamiltonian (3), and Poincarรฉ sections (stroboscopic plot) of selected trajectories in the elliptic central region. The Poincarรฉ sections are computed by displaying $(y,p_y)$ at each period of the laser field. The inset shows the central periodic orbit. It is computed by determining the fixed point under the Poincarรฉ section. # # **Lower panel:** Momentum of the central periodic orbit (on the Poincarรฉ section) of Hamiltonian (3) as a function of the laser intensity. # # > *Default parameters: $a=1$, $b=1$, ${\cal E}_g=-2.24$, $\lambda = 780 \; \mathrm{nm}$, pulse duration $t_d = 8 \; \mathrm{l.c.}$, $I = 5\times10^{15} \; \mathrm{W}/\mathrm{cm}^2$, resolution: $256$, integration time $t_f = 2 \; \mathrm{l.c.}$.* # # > *Typical execution time: 70 seconds.* # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 666} executionInfo={"elapsed": 69100, "status": "ok", "timestamp": 1603893628185, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiDuaMJBJyPVQYlNzXYWVH66FhPBipdSStoVltpeU0=s64", "userId": "10196882246576949447"}, "user_tz": -60} id="3LKFMJAiG7Mk" jupyter={"source_hidden": true} outputId="5864f5e3-8c7f-4ba8-99e1-44967923068d" tags=["hide-input"] #@title Execute Figure 4 Intensity_Wcm2 = 5e15 #@param {type:"number"} resolution = 256 #@param ["128", "256", "512", "1024"] {type:"raw", allow-input: true} tf_lc = 2 #@param {type:"number"} y_min = -4.5 y_max = 4.5 py_min = -1.5 py_max = 1.5 PStraj = 7 Points_per_PStraj = 300 Imin = 1.70e16 Imax = 1.86e16 Number_of_intensities = 100 init_guess = [0.0, -0.02] params_envelop = (2, tau_lc-4, 2, 2) my_field = Field(wavelength=lambda_nm, intensity=Intensity_Wcm2, params_envelop=params_envelop) parameters = Integration(step=my_field.period/np.floor(my_field.period/h_step),t_f=tf_lc * my_field.period) y = np.linspace(y_min, y_max, resolution) py = np.linspace(py_min, py_max, resolution) Yf = eqn_motion_1e(np.meshgrid(y, py), my_atom, my_field, parameters) fig1 = plt.figure(figsize=(9.5,7.5)) pcol = plt.pcolor(y, py, np.log10(np.abs(Yf[0])), cmap='pink', vmin=-2, vmax=3) #pcol = plt.pcolor(y, py, np.log10(np.abs(Yf[0])), cmap='pink', vmin=-2, vmax=3, shading='auto') fig1.colorbar(pcol) plt.xlabel('$y$') plt.ylabel('$p_y$') plt.xlim(y_min, y_max) plt.ylim(py_min, py_max) parameters = Integration(step=my_field.period/np.floor(my_field.period/h_step), t_f=Points_per_PStraj * my_field.period, precision=np.float64) t_out = my_field.period * np.arange(3, Points_per_PStraj +1) y = np.linspace(0.0, my_atom.a, PStraj) Yt = eqn_motion_1e(np.vstack((y, np.zeros(PStraj))), my_atom, my_field, parameters, traj=True, output_times=t_out) plt.plot(Yt[0], Yt[1], marker='.', color='k', linestyle='None') def diff_motion_1e(y, atom, field, integration): yf = eqn_motion_1e(y, atom, field, integration) return yf-y parameters = Integration(step=my_field.period/np.floor(my_field.period/(h_step/10)), t_f=my_field.period, precision=np.float64) Y0 = fsolve(diff_motion_1e, init_guess, args=(my_atom, my_field, parameters), xtol=1e-08, maxfev=30) parameters.t_f = 1.001 * my_field.period Yt = eqn_motion_1e(Y0, my_atom, my_field, parameters, traj=True, output_times=np.linspace(0.0, parameters.t_f, 500)) plt.arrow(1.1, 0.7, -1.088, -0.654, width = 0.01, head_width=0.08) ax_inset = fig1.add_axes([0.53, 0.7, 0.2, 0.15]) ax_inset.plot(Yt[0], Yt[1]) ax_inset.set_xlabel('$y$') ax_inset.set_ylabel('$p_y$') plt.show() parameters = Integration(step=my_field.period/np.floor(my_field.period/h_step), t_f=my_field.period, precision=np.float64) Intensity_range = np.linspace(Imin, Imax, Number_of_intensities) pdata = [] for intensity_element in Intensity_range: my_field = Field(wavelength=lambda_nm, intensity=intensity_element) Yf, info, ier, msg = fsolve(diff_motion_1e, init_guess, args=(my_atom, my_field, parameters), xtol=1e-06, maxfev=30, full_output=True) if ier == 1: pval = Yf[1] init_guess = Yf else: pval = np.nan pdata = np.append(pdata, pval) plt.figure(figsize=(7.5,2.5)) plt.plot(Intensity_range, pdata, linewidth=2, color='b') plt.xlabel('$I$ (W/cm$^2$)') plt.ylabel('$p_y$') plt.xlim(Imin, Imax) plt.ylim(-0.025, 0.02) plt.show() # + [markdown] id="SImPJbmBpfTq" # ## Nonsequential double ionization (NSDI) # As noted before, when the field is turned on, its action is concentrated on only one electron, the outer one, as a first step. The field drives the outer electron away from the nucleus, leaving the inner electron nearly unaffected by the field because its position remains small. # From the recollision process [2,3], the outer electron might come back close to the nucleus during the pulse plateau, if the field amplitude is not too large. In this case, it transfers a part of its energy to the inner electron through the electron-electron interaction term. # From then on, two outcomes are possible: If the energy brought in by the outer electron is sufficient for the other electron to escape from the regular region (as in Fig. 3, upper panel), then it might ionize together with the outer electron. The maximum energy ${\mathcal E}_x$ of the outer electron when it returns to the inner region (after having left the inner region with a small momentum $p_0$ close to zero) is obtained from Hamiltonian (2) and is # ${\mathcal E}_x= \kappa U_p$, where $U_p=E_0^2/(4\omega^2)$ is the ponderomotive energy and $\kappa\approx 3.17$ is the maximum recollision kinetic energy in units of $U_p$ [2,37,38]. We complement the recollision scenario (which focuses on the outer electron) by providing the phase space picture of the inner electron: In order to ionize the core electron, the energy brought back by the outer electron has to be of order of the energy difference between the core ($y=0$) and the boundary of the stable region ($y=y_m$) of ${\mathcal H}_2$ (see Fig. 4) which is equal to # $$ # \Delta {\mathcal E}_y=2-\frac{2}{\sqrt{y_m^2+a^2}}. \tag{4} # $$ # A good approximation to $y_m=y_m(E_0)$ is given by the value where the potential is locally maximum, i.e. $E_0= 2y_m/(y_m^2+a^2)^{3/2}$. # The equal-sharing relation which links the classical picture of the outer electron $x$ with the one of the inner electron $y$, # $$ # \Delta {\mathcal E}_y=\frac{{\mathcal E}_x}{2}= \frac{\kappa}{2\omega^2}\frac{y_m^2}{(y_m^2+a^2)^3}, \tag{5} # $$ # defines (through an implicit equation) the expected value of the field $E_0^{(c)}$ for maximal NSDI, because it describes the case when each outer electron brings back enough energy to ionize the inner electron, while remaining ionized itself. However, fulfilling this energy requirement does not guarantee NSDI: The outcome depends on the number and efficiency of recollisions. The predicted value of the amplitude $E_0^{(c)}$ as given by Eq. (5) corresponds to an intensity $I^{(c)}$ given below which agrees very well with the simulations shown in Fig. 1. # In a wide range of frequencies, an accurate expansion of $E_0^{(c)}$ is obtained from Eqs. (4)-(5) and given by # $$ # E_0^{(c)}= \frac{4\omega}{\sqrt{\kappa}}-\left(\frac{2\omega}{\sqrt{\kappa}} \right)^{3/2}+O\left(\frac{4\omega^2}{\kappa}\right), \tag{6} # $$ # for sufficiently small $\omega$. To leading order the corresponding intensity varies as $\omega^2$. The approximate intensity given by Eq. (6) is in excellent agreement with Fig. 1. # When the field $E_0$ is too small, then the outer electron cannot gain enough energy to ionize the inner electron. When the field $E_0$ is too large, then the outer electron does not recollide since it leaves the interaction region nearly linearly. These two limits explain the bell shape of the resulting nonsequential double ionization probability, which, when put together with the monotonic rise of the SDI probability at higher intensities, adds up to the knee in question. # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 63} executionInfo={"elapsed": 954, "status": "ok", "timestamp": 1603891694948, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiDuaMJBJyPVQYlNzXYWVH66FhPBipdSStoVltpeU0=s64", "userId": "10196882246576949447"}, "user_tz": -60} id="Vrpwvue2ldeb" jupyter={"source_hidden": true} outputId="508ef530-9f53-4f35-89fe-3a349236bb75" tags=["hide-input"] #@title def fexp(f): return int(np.floor(np.log10(abs(f)))) if f != 0 else 0 def fman(f): return f/10**fexp(f) my_atom = Atom(a=a, b=b, ground_state_energy=E_g) my_field = Field(wavelength=lambda_nm) kappa = 3.17 I0c = 3.51e16 * (4*my_field.frequency/np.sqrt(kappa)-(2*my_field.frequency/np.sqrt(kappa)) ** 1.5) **2 I0_c = 3.51e16 * 16/(27*my_atom.a **4) display(Math(r'I_0^{{(c)}} \approx {:.2} \times 10^{{{}}} ~{{\rm W / cm}}^2 \\ I_c \approx {:.2} \times 10^{{{}}} ~{{\rm W / cm}}^2'.format(fman(I0c),fexp(I0c),fman(I0_c),fexp(I0_c)))) # + [markdown] id="nmdn-GnL1LJD" # # # --- # # # <NAME>. acknowledges financial support from the PICS # program of the CNRS. This work is partially funded by # NSF. We thank <NAME>, <NAME>, and <NAME> # for useful discussions. # # --- # # # ## References # # [[1](http://link.aps.org/doi/10.1103/PhysRevLett.69.2642)] <NAME>, <NAME>, <NAME>, and <NAME>. # Kulander, Phys. Rev. Lett. 69, 2642 (1992). # # [[2](http://link.aps.org/doi/10.1103/PhysRevLett.71.1994)] <NAME>, Phys. Rev. Lett. 71, 1994 (1993). # # [[3](http://link.aps.org/doi/10.1103/PhysRevLett.70.1599)] <NAME>, <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 70, 1599 (1993). # # [[4](http://link.aps.org/doi/10.1103/PhysRevLett.73.1227)] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. # Schafer, and <NAME>, Phys. Rev. Lett. 73, 1227 # (1994). # # [[5a](http://dx.doi.org/10.1088/0953-4075/29/6/005), [5b](http://dx.doi.org/10.1088/0953-4075/32/14/101)] <NAME> and <NAME>, J. Phys. B 29, L197 (1996); J. Phys. B 32, L335 ( 1999). # # [[6](http://link.aps.org/doi/10.1103/PhysRevLett.85.3781)] <NAME>, <NAME>, <NAME>, and <NAME>, Phys. # Rev. Lett. 85, 3781 (2000). # # [[7](http://link.aps.org/doi/10.1103/PhysRevLett.85.4707)] <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 85, 4707 (2000). # # [[8](http://link.aps.org/doi/10.1103/PhysRevA.63.043414)] <NAME> and <NAME>, Phys. Rev. A 63, 043414 # (2001). # # [[9](http://link.aps.org/doi/10.1103/PhysRevA.63.043416)] <NAME>, <NAME>, <NAME>, and <NAME>, Phys. Rev. A 63, 043416 (2001). # # [[10](https://doi.org/10.1364/OE.8.000431)] <NAME>, <NAME>, and <NAME>, Opt. Express 8, 431 (2001). # # [[11](http://link.aps.org/doi/10.1103/PhysRevLett.93.053201)] <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 93, 053201 (2004). # # [[12](http://link.aps.org/doi/10.1103/PhysRevLett.94.093002)] <NAME>, <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 94, 093002 (2005). # # [[13](http://link.aps.org/doi/10.1103/PhysRevLett.95.193002)] <NAME> and <NAME>, Phys. Rev. Lett. 95, 193002 # (2005). # # [[14](http://link.aps.org/doi/10.1103/PhysRevLett.94.063002)] <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 94, 063002 (2005). # # [[15](http://link.aps.org/doi/10.1103/PhysRevA.76.030701)] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, Phys. Rev. A 76, 030701(R) (2007). # # [[16](http://link.aps.org/doi/10.1103/PhysRevLett.98.203002)] <NAME>, <NAME>, <NAME>, and # <NAME>, Phys. Rev. Lett. 98, 203002 (2007). # # [[17](http://link.aps.org/doi/10.1103/PhysRevA.77.043420)] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. # Schneider, <NAME>, and <NAME>, Phys. Rev. A # 77, 043420 (2008). # # [[18](http://dx.doi.org/10.1140/epjd/e2003-00272-8)] <NAME> and <NAME>, Eur. Phys. J. D 27, 287 (2003). # # [[19](http://dx.doi.org/10.1038/nphys310)] <NAME>, <NAME>, <NAME>, <NAME>. # English, <NAME>, <NAME>, <NAME>, <NAME>. # Turcu, <NAME>, and <NAME> et al., Nature Phys. # 2, 379 (2006). # # [[20](http://dx.doi.org/10.1038/35015033)] <NAME>, <NAME>, <NAME>, <NAME>, A. # Staudte, <NAME>, <NAME>, <NAME>, M. # Vollmer, and <NAME>, Nature (London) 405, 658 (2000). # # [[21](http://link.aps.org/doi/10.1103/PhysRevA.48.R2531)] <NAME>, <NAME>, <NAME>, <NAME>, and S. # Watanabe, Phys. Rev. A 48, R2531 (1993). # # [[22](http://dx.doi.org/10.1088/0953-4075/31/6/008)] <NAME>, <NAME>, and <NAME>, J. Phys. B 31, 1201 (1998). # # [[23](http://link.aps.org/doi/10.1103/PhysRevA.62.023403)] <NAME> and <NAME>, Phys. Rev. A 62, 023403 # (2000). # # [[24](http://link.aps.org/doi/10.1103/PhysRevA.63.040701)] <NAME> and <NAME>, Phys. Rev. A 63, 040701 (2001). # # [[25](http://link.aps.org/doi/10.1103/PhysRevLett.87.153001)] <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 87, 153001 (2001). # # [[26](http://link.aps.org/doi/10.1103/PhysRevLett.92.203001)] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 92, 203001 (2004). # # [[27](http://link.aps.org/doi/10.1103/PhysRevLett.78.1884)] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 78, 1884 (1997). # # [[28](http://dx.doi.org/10.1088/0953-4075/31/6/001)] <NAME> and <NAME>, J. Phys. B 31, L249 # (1998). # # [[29](http://link.aps.org/doi/10.1103/PhysRevA.67.043402)] <NAME> and <NAME>, Phys. Rev. A 67, 043402 (2003). # # [[30](http://link.aps.org/doi/10.1103/PhysRevLett.89.113001)] <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 89, 113001 (2002). # # [[31](http://link.aps.org/doi/10.1103/PhysRevLett.99.013003)] <NAME>, <NAME>, <NAME>, and <NAME>, Phys. Rev. Lett. 99, 013003 (2007). # # [[32](http://link.aps.org/doi/10.1103/PhysRevA.44.5997)] <NAME> and <NAME>, Phys. Rev. A 44, 5997 (1991). # # [[33](http://link.aps.org/doi/10.1103/PhysRevA.38.3430)] <NAME>, <NAME>, and <NAME>, Phys. Rev. A 38, 3430 (1988). # # [[34](http://link.aps.org/doi/10.1103/PhysRevA.50.378)] <NAME>, <NAME>, and <NAME>, Phys. Rev. A 50, 378 (1994). # # [[35](http://chaosbook.org)] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, Chaos: Classical and Quantum (Niels Bohr # Institute, Copenhagen, 2008). # # [[36](http://link.aps.org/doi/10.1103/PhysRevA.74.043417)] <NAME>, <NAME>, and <NAME>, Phys. Rev. A 74, 043417 (2006). # # [[37](http://link.aps.org/doi/10.1103/PhysRevA.50.1540)] <NAME>, <NAME>, and <NAME>, Phys. Rev. A 50, # 1540 (1994). # # [[38](http://dx.doi.org/10.1080/09500340410001729582)] <NAME>, <NAME>, and <NAME>, <NAME>. # Opt. 52, 411 (2005). # # #
Notebook_Article.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import matplotlib.pyplot as plt import cv2 from PIL import Image import numpy as np import pandas as pd labels = pd.read_csv("/Volumes/SEANDUAL/257-support/with-without-mask/archive/maskdata/maskdata/train_labels.csv") # loading the labels labels.head() # will display the first five rows in labels dataframe file_paths = [[fname, '/Volumes/SEANDUAL/257-support/with-without-mask/archive/maskdata/maskdata/train/train/' + fname] for fname in labels['filename']] # file_paths images = pd.DataFrame(file_paths, columns=['filename', 'filepaths']) images.head() train_data = pd.merge(images, labels, how = 'inner', on = 'filename') train_data.head() data = [] # initialize an empty numpy array image_size = 100 # image size taken is 100 here. one can take other size too for i in range(len(train_data)): img_array = cv2.imread(train_data['filepaths'][i]) # Uses BGR instead of RGB, have to change it back img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB) # Converts to correct image encoding new_img_array = cv2.resize(img_array, (image_size, image_size)) # resizing the image array # encoding the labels. with_mask = 1 and without_mask = 0 if train_data['label'][i] == 'with_mask': data.append([new_img_array, 1]) else: data.append([new_img_array, 0]) # The shape of an image array # data = [[img-array], [img-class: with_mask = 1 and without_mask = 0]] data = np.array(data) data[0][0].shape # think of it as 2d array with every element consist of 3 element rgb plt.imshow(data[0][0])
Sean/Simple-Image-Extraction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Imports # + #Omid55 import requests import pandas as pd import pickle as pk import csv import time import numpy as np import random from scipy.stats import pearsonr as corr import seaborn as sns import matplotlib.pylab as plt % matplotlib inline # + URL = { 'BASE': 'https://{proxy}.api.pvp.net/api/lol/{region}/{rest}', 'STATIC_BASE': 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/{rest}', 'MATCHLIST_URL': 'v2.2/matchlist/by-summoner/{summonerId}?seasons=SEASON{season}', 'MATCH_URL': 'v2.2/match/{matchId}', 'CHAMPION_URL': 'champion/{id}?champData=all', 'SUMMONER_URL': 'v1.4/summoner/{summonerId}', #'SUMMONER_WINS_LOSSES_URL': 'v2.5/league/by-summoner/{summonerId}' # NOT TRUE (VERY LARGE NUMBERS) 'SUMMONER_WINS_LOSSES_PER_CHAMPION_URL': 'v1.3/stats/by-summoner/{summonerId}/ranked?season=SEASON{season}' # /api/lol/{region}/v1.3/stats/by-summoner/{summonerId}/ranked: this is for getting the experience # of player (summonerId) with different champions and also ALL EXPERIENCE one person has # /api/lol/{region}/v1.3/game/by-summoner/{summonerId}/recent: games that one summoner plays # with other people } REGIONS = { 'north america': 'na', 'europe west': 'euw' } # - class Match(object): def __init__(self): self.winners = [] self.losers = [] self.duration = -1 # + class RiotAPI(object): def __init__(self, api_key, region=REGIONS['north america']): self.api_key = api_key self.region = region self.champions = {} # self.champions_allinfo = {} # self.champions_allinfo_saved = False self.summoner_wins_losses = {} def _request(self, base, rest, params={}): args = {'api_key': self.api_key} args.update(params) response = requests.get( URL[base].format( rest=rest, proxy=self.region, region=self.region, ), params=args ) #print(response.request.url) time.sleep(1.2) return response.json() def _base_request(self, rest, params={}): return self._request('BASE', rest, params) def _static_request(self, rest, params={}): return self._request('STATIC_BASE', rest, params) # functions def get_summoner_level(self, sid): rest = URL['SUMMONER_URL'].format( summonerId=sid ) return self._base_request(rest) def _get_list_of_match_ids(self, sid, season): rest = URL['MATCHLIST_URL'].format( summonerId=sid, season=season ) result = self._base_request(rest) if 'matches' in result: for match in result['matches']: yield match['matchId'] def _get_match(self, mid): rest = URL['MATCH_URL'].format( matchId=mid ) result = self._base_request(rest) return result def get_champion_all_info(self, championId): rest = URL['CHAMPION_URL'].format( id=championId ) return self._static_request(rest) # def get_champion_index(self, championId): # if championId not in self.champions: # myid = len(self.champions) # self.champions_allinfo[myid] = self.get_champion_all_info(championId) # if not self.champions_allinfo_saved and len(self.champions_allinfo) == 132: # with open('DATAChampionsAllInfo.pkl', 'wb') as output: # pk.dump(self.champions_allinfo, output) # self.champions_allinfo_saved = True # self.champions[championId] = myid # return self.champions[championId] def get_matches_champions_and_summonerIds_before_game(self, season, just_Ids=True): #for sid in range(1,1000000): #for sid in range(1000000,5000000): with open('summonerId_list.pkl', 'rb') as f: all_summoners = pk.load(f) summoners = [ all_summoners[i] for i in sorted(random.sample(range(len(all_summoners)), 1000)) ] for sid in summoners: matchids = self._get_list_of_match_ids(sid, season) for matchid in matchids: match = self._get_match(matchid) if 'participants' in match: losers = [] winners = [] winners_sid = [] losers_sid = [] for member in match['participants']: suId = [pi['player']['summonerId'] for pi in match['participantIdentities'] if pi['participantId']==member['participantId']][0] if member['stats']['winner']: winners_sid.append(suId) if just_Ids: winners += [member['championId']] else: winners += (self.get_champion_capabilities(member['championId'])) else: losers_sid.append(suId) if just_Ids: losers += [member['championId']] else: losers += (self.get_champion_capabilities(member['championId'])) data = {'matchId': match['matchId'], 'duration': match['matchDuration'], 'champions': winners + losers, 'summoners': winners_sid + losers_sid} yield data # -------------------------------------------------------------------------------------- def get_summoner_wins_losses(self, sid, season): key = str(sid)+','+str(season) if key not in self.summoner_wins_losses: res = {} rest = URL['SUMMONER_WINS_LOSSES_PER_CHAMPION_URL'].format( summonerId=sid, season=season ) result = self._base_request(rest) if 'champions' in result: for ch in result['champions']: if ch['id']: res[ch['id']] = [ch['stats']['totalSessionsWon'], ch['stats']['totalSessionsLost']] self.summoner_wins_losses[key] = res return self.summoner_wins_losses[key] def get_win_stats(self, team_summoners, team_champs, season): # -------------------------------------------------------------------------------------- def get_matches_champions_and_summonerIds_before_game_for_those_summoners_have_similar_stats_2_seasons(self, season, just_Ids=True): with open('summonerId_list.pkl', 'rb') as f: all_summoners = pk.load(f) for sid in all_summoners: #for sid in range(1,1000000000): matchids = self._get_list_of_match_ids(sid, season) for matchid in matchids: nodata = 0 match = self._get_match(matchid) if 'participants' in match: losers = [] winners = [] winners_sid = [] losers_sid = [] for member in match['participants']: suId = [pi['player']['summonerId'] for pi in match['participantIdentities'] if pi['participantId']==member['participantId']][0] if not self.does_she_have_similar_history_in_two_seasons(suId, season-1, season): nodata += 1 if nodata >= 2: break if member['stats']['winner']: winners_sid.append(suId) if just_Ids: winners += [member['championId']] else: winners += (self.get_champion_capabilities(member['championId'])) else: losers_sid.append(suId) if just_Ids: losers += [member['championId']] else: losers += (self.get_champion_capabilities(member['championId'])) if nodata >= 2: continue data = {'matchId': match['matchId'], 'duration': match['matchDuration'], 'champions': winners + losers, 'summoners': winners_sid + losers_sid} yield data def does_she_have_similar_history_in_two_seasons(self, sid, season1, season2): h1 = self.get_summoner_wins_losses(sid, season1) h2 = self.get_summoner_wins_losses(sid, season2) c1 = len(set(list(h1.keys()) + list(h1.keys()))) return c1 !=0 and len(list(set(h1.keys()) & set(h2.keys()))) / float(c1) >= 0.8 # h1 = self.get_summoner_wins_losses(sid, season1) # h2 = self.get_summoner_wins_losses(sid, season2) # wr1 = [] # wr2 = [] # for k in list(set(h1.keys()) & set(h2.keys())): # wr1.append(h1[k][0]/float(h1[k][0]+h1[k][1])) # wr2.append(h2[k][0]/float(h2[k][0]+h2[k][1])) # if len(wr1)<3 or len(wr2)<3: # return False # c = corr(wr1, wr2) # return c[1]<1 and c[0]>0.3 with open('../MyKey1.key', 'r') as key_file: KEY = key_file.readline().strip() api = RiotAPI(KEY) # - # # Run the script # + LIMIT = 1400 dt = api.get_matches_champions_and_summonerIds_before_game_for_those_summoners_have_similar_stats_2_seasons(season=2016) data = [] for d in dt: data.append(d) if not len(data) % 10: print(len(data)) if len(data) > LIMIT: break # - print(len(data), 'samples are saving...') with open('ReallyGoodSummonersDataChampionProficiencyPlaynet.pkl', 'wb') as dfile: pk.dump(data, dfile) print('Done.') goods = [] for d in data: good = 0 for i in range(10): if len(d['champions'])==10 and api.does_she_have_similar_history_in_two_seasons(d['summoners'][i], 2015, 2016): good += 1 goods.append(good) plt.hist(goods) # + dt = [] #sampl_data = [data[i] for i in sorted(random.sample(range(len(data)), 1500))] for d in data: if len(d['champions'])==10: winner = api.get_win_stats(d['summoners'][:5], d['champions'][:5], 2015) if not winner: continue loser = api.get_win_stats(d['summoners'][5:], d['champions'][5:], 2015) if not loser: continue dt.append(winner + loser) if len(dt) % 10 == 0: print(len(dt)) dataset = pd.DataFrame(data=dt, columns=['winner_avg_game_count', 'winner_std_game_count', 'winner_avg_win_ratio', 'winner_std_win_ratio', 'winner_avg_same_champion_game_count', 'winner_std_same_champion_game_count', 'winner_avg_same_champion_win_ratio', 'winner_std_same_champion_win_ratio', 'loser_avg_game_count', 'loser_std_game_count', 'loser_avg_win_ratio', 'loser_std_win_ratio', 'loser_avg_same_champion_game_count', 'loser_std_same_champion_game_count', 'loser_avg_same_champion_win_ratio', 'loser_std_same_champion_win_ratio']) dataset.to_csv('ReallyGoodStat2015_for_Classification2016_Dataset.csv') # - # # Filtering summoners using history of games in 2015 and 2016 stats def plot_distribution_of_correlation(summoner_ids): r = [] p = [] for sid in summoner_ids: h1 = api.get_summoner_wins_losses(sid, 2015) h2 = api.get_summoner_wins_losses(sid, 2016) wr1 = [] wr2 = [] for k in list(set(h1.keys()) & set(h2.keys())): wr1.append(h1[k][0]/float(h1[k][0]+h1[k][1])) wr2.append(h2[k][0]/float(h2[k][0]+h2[k][1])) # wr1.append(h1[k][0]+h1[k][1]) # wr2.append(h2[k][0]+h2[k][1]) c = corr(wr1, wr2) r.append(c[0]) p.append(c[1]) plt.hist(np.array(r)[np.where(np.array(p)<0.05)]) # Selecting summoners based on 80% of conflicting champion choice with open('summonerId_list.pkl', 'rb') as dfile: summoners = pk.load(dfile) good_sids = [] for sid in summoners: h1 = api.get_summoner_wins_losses(sid, 2015) h2 = api.get_summoner_wins_losses(sid, 2016) c1 = len(set(list(h1.keys()) + list(h1.keys()))) if c1 !=0 and len(list(set(h1.keys()) & set(h2.keys()))) / c1 < 0.8: continue good_sids.append(sid) print(len(good_sids)) plot_distribution_of_correlation(good_sids) # Selecting summoners based on positive (larger than 0.3) of 2015 and 2016 win ratio with open('summonerId_list.pkl', 'rb') as dfile: summoners = pk.load(dfile) gggood_sids = [] for sid in summoners: h1 = api.get_summoner_wins_losses(sid, 2015) h2 = api.get_summoner_wins_losses(sid, 2016) wr1 = [] wr2 = [] for k in list(set(h1.keys()) & set(h2.keys())): wr1.append(h1[k][0]/float(h1[k][0]+h1[k][1])) wr2.append(h2[k][0]/float(h2[k][0]+h2[k][1])) if len(wr1)<3 or len(wr2)<3: continue c = corr(wr1, wr2) if c[1]<1 and c[0]>0.3: gggood_sids.append(sid) print(len(gggood_sids)) plot_distribution_of_correlation(gggood_sids)
Experiment2_prediction_before_game/download_championprof_playnetwork_good_based_on_2015_from 2016.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import cv2 import csv import math import h5py import time import pickle import random import argparse import numpy as np from datetime import datetime from skimage.transform import pyramid_gaussian, resize import torch from torch import optim from torch import nn from torch.nn import functional as F import multiprocessing as mp import threading from collections import deque, namedtuple # - # # Model Parameters # + parser = argparse.ArgumentParser(description='ACER') parser.add_argument('--seed', type=int, default=123, help='Random seed') parser.add_argument('--num-processes', type=int, default=3, metavar='N', help='Number of training async agents (does not include single validation agent)') parser.add_argument('--T-max', type=int, default=100000000, metavar='STEPS', help='Number of training steps') parser.add_argument('--t-max', type=int, default=100, metavar='STEPS', help='Max number of forward steps for A3C before update') parser.add_argument('--max-episode-length', type=int, default=15, metavar='LENGTH', help='Maximum episode length') parser.add_argument('--hidden-size', type=int, default=49, metavar='SIZE', help='Hidden size of LSTM cell') parser.add_argument('--n-dr-elements', type=int, default=49, metavar='SIZE', help='Number of objects in display') parser.add_argument('--present-action', type=int, default=49, metavar='SIZE', help='Present Action Value') parser.add_argument('--absent-action', type=int, default=50, metavar='SIZE', help='Absent Action Value') parser.add_argument('--memory-capacity', type=int, default=100000, metavar='CAPACITY', help='Experience replay memory capacity') parser.add_argument('--replay-ratio', type=int, default=4, metavar='r', help='Ratio of off-policy to on-policy updates') parser.add_argument('--replay-start', type=int, default=20000, metavar='EPISODES', help='Number of transitions to save before starting off-policy training') parser.add_argument('--discount', type=float, default=0.99, metavar='ฮณ', help='Discount factor') parser.add_argument('--trace-decay', type=float, default=1, metavar='ฮป', help='Eligibility trace decay factor') parser.add_argument('--trace-max', type=float, default=10, metavar='c', help='Importance weight truncation (max) value') parser.add_argument('--trust-region-decay', type=float, default=0.99, metavar='ฮฑ', help='Average model weight decay rate') parser.add_argument('--trust-region-threshold', type=float, default=1, metavar='ฮด', help='Trust region threshold value') parser.add_argument('--lr', type=float, default=0.0001, metavar='ฮท', help='Learning rate') parser.add_argument('--rmsprop-decay', type=float, default=0.99, metavar='ฮฑ', help='RMSprop decay factor') parser.add_argument('--batch-size', type=int, default=8, metavar='SIZE', help='Off-policy batch size') parser.add_argument('--entropy-weight', type=float, default=0.0001, metavar='ฮฒ', help='Entropy regularisation weight') parser.add_argument('--max-gradient-norm', type=float, default=40, metavar='VALUE', help='Gradient L2 normalisation') parser.add_argument('--evaluation-interval', type=int, default=25000, metavar='STEPS', help='Number of training steps between evaluations (roughly)') parser.add_argument('--evaluation-episodes', type=int, default=10, metavar='N', help='Number of evaluation episodes to average over') parser.add_argument('--name', type=str, default='results', help='Save folder') parser.add_argument('--on-policy', action='store_true', help='Use pure on-policy training (A3C)') parser.add_argument('--trust-region', action='store_true', help='Use trust region') parser.add_argument('--pretrain-model-available', action='store_true', help='Use pre trained model weights') N_DR_ELEMENTS = 49 N_ACTIONS = N_DR_ELEMENTS + 2 PRESENT = N_DR_ELEMENTS ABSENT = N_DR_ELEMENTS+1 MAX_STEPS = 15 N_ROWS = 7 PATH="./Lab_Model" IMG_HEIGHT = 64 IMG_WIDTH = 64 IMG_CHANNELs = 3 PYRAMID_LEVEL = 4 #excluding the original image as a level. CONVERSION_FACTOR = 6/15.5 #number of pixels per degree. Here, 15.5 is the display size in degree (size used in the experiment). FOVEA = 2 #in degrees FOVEA_RADIUS = int(np.round(FOVEA/CONVERSION_FACTOR)) #In pixels # - # # Utility Functions # + class Counter(): def __init__(self): self.val = mp.Value('i', 0) self.lock = mp.Lock() def increment(self): with self.lock: self.val.value += 1 def value(self): with self.lock: return self.val.value def state_to_tensor(state): return torch.from_numpy(state).float().unsqueeze(0) #setup mapping of fixated location to its corresponding x,y coordinate in the image. fixated_location = 0 FIXATION_DICT = {} hor = 5 ver = 5 for row in range(7): for col in range(7): FIXATION_DICT[str(fixated_location)] = (hor, ver) fixated_location += 1 hor = hor + 9 ver = ver + 9 hor = 5 # - # # Replay Memory Transition = namedtuple('Transition', ('state', 'action', 'reward', 'policy')) class EpisodicReplayMemory(): def __init__(self, capacity, max_episode_length): # Max number of transitions possible will be the memory capacity, could be much less self.num_episodes = capacity // max_episode_length self.memory = deque(maxlen=self.num_episodes) self.trajectory = [] def append(self, state, action, reward, policy): self.trajectory.append(Transition(state, action, reward, policy)) # Save s_i, a_i, r_i+1, ยต(ยท|s_i) # Terminal states are saved with actions as None, so switch to next episode if action is None: self.memory.append(self.trajectory) self.trajectory = [] # Samples random trajectory def sample(self, maxlen=0): mem = self.memory[random.randrange(len(self.memory))] T = len(mem) # Take a random subset of trajectory if maxlen specified, otherwise return full trajectory if maxlen > 0 and T > maxlen + 1: t = random.randrange(T - maxlen - 1) # Include next state after final "maxlen" state return mem[t:t + maxlen + 1] else: return mem # Samples batch of trajectories, truncating them to the same length def sample_batch(self, batch_size, maxlen=0): batch = [self.sample(maxlen=maxlen) for _ in range(batch_size)] minimum_size = min(len(trajectory) for trajectory in batch) batch = [trajectory[:minimum_size] for trajectory in batch] # Truncate trajectories return list(map(list, zip(*batch))) # Transpose so that timesteps are packed together def length(self): # Return number of epsiodes saved in memory return len(self.memory) def __len__(self): return sum(len(episode) for episode in self.memory) # # Optimiser class SharedRMSprop(optim.RMSprop): def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0): super(SharedRMSprop, self).__init__(params, lr=lr, alpha=alpha, eps=eps, weight_decay=weight_decay, momentum=0, centered=False) # State initialisation (must be done before step, else will not be shared between threads) for group in self.param_groups: for p in group['params']: state = self.state[p] state['step'] = p.data.new().resize_(1).zero_() state['square_avg'] = p.data.new().resize_as_(p.data).zero_() def share_memory(self): for group in self.param_groups: for p in group['params']: state = self.state[p] state['step'].share_memory_() state['square_avg'].share_memory_() def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data state = self.state[p] square_avg = state['square_avg'] alpha = group['alpha'] state['step'] += 1 if group['weight_decay'] != 0: grad = grad.add(group['weight_decay'], p.data) # g = ฮฑg + (1 - ฮฑ)ฮ”ฮธ^2 square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # ฮธ โ† ฮธ - ฮทฮ”ฮธ/โˆš(g + ฮต) avg = square_avg.sqrt().add_(group['eps']) p.data.addcdiv_(-group['lr'], grad, avg) return loss # # Network # + class Unit(nn.Module): def __init__(self,in_channels,out_channels, padding): super(Unit,self).__init__() self.conv = nn.Conv2d(in_channels=in_channels,kernel_size=3,out_channels=out_channels,stride=1,padding=padding) self.bn = nn.BatchNorm2d(num_features=out_channels) self.relu = nn.ReLU() def forward(self,input): output = self.conv(input) output = self.bn(output) output = self.relu(output) return output class CNN_Module(nn.Module): def __init__(self): super(CNN_Module, self).__init__() self.unit1 = Unit(in_channels=3,out_channels=8, padding=0) self.unit2 = Unit(in_channels=8,out_channels=16, padding=0) self.unit3 = Unit(in_channels=16,out_channels=24, padding=0) self.unit4 = Unit(in_channels=24,out_channels=32, padding=0) self.pool1 = nn.MaxPool2d(kernel_size=2) self.unit5 = Unit(in_channels=32,out_channels=32, padding=1) self.unit6 = Unit(in_channels=32,out_channels=32, padding=1) self.unit7 = Unit(in_channels=32,out_channels=32, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2) self.unit8 = Unit(in_channels=32,out_channels=32, padding=1) self.unit9 = Unit(in_channels=32,out_channels=32, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2) self.unit10 = Unit(in_channels=32,out_channels=32, padding=1) self.net = nn.Sequential(self.unit1, self.unit2, self.unit3, self.unit4, self.pool1, self.unit5, self.unit6, \ self.unit7, self.pool2, self.unit8, self.unit9, self.pool3, self.unit10) self.fc1 = nn.Linear(in_features=7 * 7 * 32, out_features=392) self.activation1 = nn.ReLU() self.dropout = nn.Dropout(0.2) self.fc2 = nn.Linear(in_features=392, out_features=98) self.activation2 = nn.Sigmoid() def forward(self, input): input = input.view(-1, 3, 64, 64) output = self.net(input) output = output.view(-1, 7 * 7 * 32) output = self.fc1(output) output = self.activation1(output) output = self.fc2(output) output = self.activation2(output) return output class ActorCritic(nn.Module): def __init__(self, state_size, action_size, hidden_size): super(ActorCritic, self).__init__() #self.cnn = CNN_Module() self.fc1 = nn.Linear(state_size, hidden_size) self.lstm = nn.LSTMCell(hidden_size, hidden_size) self.fc_actor = nn.Linear(hidden_size, action_size) self.fc_critic = nn.Linear(hidden_size, action_size) def forward(self, x, h): x = self.fc1(x) h = self.lstm(x, h) # h is (hidden state, cell state) x = h[0] policy = F.softmax(self.fc_actor(x), dim=1).clamp(max=1 - 1e-20) # Prevent 1s and hence NaNs Q = self.fc_critic(x) V = (Q * policy).sum(1, keepdim=True) # V is expectation of Q under ฯ€ return policy, Q, V, h # - # # Environment class Env(): def __init__(self, args): self.num_feats = N_DR_ELEMENTS self.num_actions = N_ACTIONS self.steps = 0 self.total_time = 0.0 self.image = None self.state = None self.model = CNN_Module() self.model.load_state_dict(torch.load(PATH, map_location='cpu')) #self.model.to(torch.device('cpu')) self.correct = 0 self.target_present = False path = os.path.join('.','dr_data.h5') self.dr_data = h5py.File(path, 'r') def step(self, action): self.steps += 1 done = False reward = -0.1 info = '' if action < N_DR_ELEMENTS: fixation_loc = FIXATION_DICT[str(action)] fixate_x = int(action / N_ROWS) fixate_y = int(action % N_ROWS) input_image = self.sampling(self.image, fixation_loc[0], fixation_loc[1]) with torch.no_grad(): self.prob_out = self.model(torch.from_numpy(input_image).float().to(torch.device('cpu'))) self.state = self.prob_out[0].detach().numpy() elif (action == PRESENT and self.target_present) or (action == ABSENT and not self.target_present): reward = 2.0 done = True self.correct = 1 else: reward = -2.0 done = True self.correct = 0 if self.steps >= MAX_STEPS: done = True self.correct = 0 return self.state.flatten(), reward, done, info def reset(self): idx = np.random.randint(len(self.dr_data["Images"])) self.image = self.dr_data["Images"][idx] self.image = self.image.astype('uint8') self.steps = 0 self.total_time = 0.0 self.correct = 0 self.target_present = True if self.dr_data["target_status"][idx] == 1 else False self.state = np.zeros((1, N_DR_ELEMENTS+N_DR_ELEMENTS)) return self.state.flatten() def get_eccentricity(self, fixated_x, fixated_y): #Generate a mask with shape similar to the image. mask = 255*np.ones((IMG_HEIGHT, IMG_WIDTH), dtype='uint8') #Fovea is represented as a circle at fixated_x, fixated_y of radius FOVEA_RADIUS. cv2.circle(mask, (fixated_x, fixated_y), FOVEA_RADIUS, 0, -1) #Apply distance transform to mask. Open cv implementation of ecludian distance from fovea. eccentricity = cv2.distanceTransform(mask, cv2.DIST_L2, 3) eccentricity = eccentricity/CONVERSION_FACTOR eccentricity = (eccentricity / np.max(eccentricity)) * PYRAMID_LEVEL eccentricity = np.round(eccentricity) eccentricity = eccentricity.astype(np.int) return eccentricity def smooth_pyramid(self, image, layers=4): pyr_img = [] for (i, resized) in enumerate(pyramid_gaussian(image, max_layer=layers, downscale=1.7, multichannel=True)): pyr_img.append(resize(resized, (64,64), anti_aliasing=False, preserve_range=True, anti_aliasing_sigma=i**4, mode='constant')) return pyr_img def sampling(self, image, fixate_x, fixate_y): eccentricity = self.get_eccentricity(fixate_x, fixate_y) image = cv2.cvtColor(image, cv2.COLOR_RGB2Lab) pyramid = self.smooth_pyramid(image, PYRAMID_LEVEL) im_ = np.zeros(image.shape) for ecc in range(np.max(eccentricity)+1): i = np.argwhere(eccentricity == ecc) if len(i) > 0: im_[i[:,0], i[:,1]] = pyramid[ecc][i[:,0], i[:,1]] im_ = im_.reshape(-1,64,64,3) #Pytorch accepts images as [channel, width, height] im_ = np.swapaxes(im_, 3, 2) im_ = np.swapaxes(im_, 2, 1) return im_ # # Test def test(rank, args, T, shared_model): torch.manual_seed(args.seed + rank) env = env = Env(args) model = ActorCritic(N_DR_ELEMENTS+N_DR_ELEMENTS, N_ACTIONS, args.hidden_size) model.eval() save_dir = os.path.join('.', args.name) can_test = True # Test flag t_start = 1 # Test step counter to check against global counter rewards, accuracy, steps = [], [], [] # Rewards and steps for plotting l = str(len(str(args.T_max))) # Max num. of digits for logging steps done = True # Start new episode # stores step, reward, avg_steps and time results_dict = {'t': [], 'reward': [], 'accuracy': [], 'avg_steps': [], 'time': []} while T.value() <= args.T_max: if can_test: t_start = T.value() # Reset counter # Evaluate over several episodes and average results avg_rewards, avg_episode_lengths, avg_accuracy = [], [], [] for _ in range(args.evaluation_episodes): while True: # Reset or pass on hidden state if done: # Sync with shared model every episode model.load_state_dict(shared_model.state_dict()) hx = torch.zeros(1, args.hidden_size) cx = torch.zeros(1, args.hidden_size) # Reset environment and done flag state = state_to_tensor(env.reset()) done, episode_length = False, 0 reward_sum = 0 # Calculate policy with torch.no_grad(): policy, _, _, (hx, cx) = model(state, (hx, cx)) # Choose action greedily action = policy.max(1)[1][0] # Step state, reward, done, _ = env.step(action.item()) state = state_to_tensor(state) reward_sum += reward episode_length += 1 # Increase episode counter # Log and reset statistics at the end of every episode if done: avg_rewards.append(reward_sum) avg_episode_lengths.append(episode_length) avg_accuracy.append(env.correct) break print(('[{}] Step: {:<' + l + '} Avg. Reward: {:<8} Avg. Episode Length: {:<8} Avg. Accuracy: {:<8}').format( datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S,%f')[:-3],t_start, sum(avg_rewards) / args.evaluation_episodes,sum(avg_episode_lengths) / args.evaluation_episodes,sum(avg_accuracy) / args.evaluation_episodes)) fields = [t_start, sum(avg_rewards) / args.evaluation_episodes, sum(avg_episode_lengths) / args.evaluation_episodes, sum(accuracy) / args.evaluation_episodes, str(datetime.now())] # storing data in the dictionary. results_dict['t'].append(t_start) results_dict['reward'].append(sum(avg_rewards) / args.evaluation_episodes) results_dict['avg_steps'].append(sum(avg_episode_lengths) / args.evaluation_episodes) results_dict['time'].append(str(datetime.now())) results_dict['accuracy'].append(sum(avg_accuracy) / args.evaluation_episodes) # Dumping the results in pickle format with open(os.path.join(save_dir, 'results.pck'), 'wb') as f: pickle.dump(results_dict, f) # Saving the data in csv format with open(os.path.join(save_dir, 'test_results.csv'), 'a') as f: writer = csv.writer(f) writer.writerow(fields) torch.save(model.state_dict(), os.path.join(save_dir, 'model.pth')) # Save model params can_test = False # Finish testing else: if T.value() - t_start >= args.evaluation_interval: can_test = True time.sleep(0.001) # Check if available to test every millisecond # Dumping the results in pickle format with open(os.path.join(save_dir, 'results.pck'), 'wb') as f: pickle.dump(results_dict, f) # # Training # + # Knuth's algorithm for generating Poisson samples def _poisson(lmbd): L, k, p = math.exp(-lmbd), 0, 1 while p > L: k += 1 p *= random.uniform(0, 1) return max(k - 1, 0) # Transfers gradients from thread-specific model to shared model def _transfer_grads_to_shared_model(model, shared_model): for param, shared_param in zip(model.parameters(), shared_model.parameters()): if shared_param.grad is not None: return shared_param._grad = param.grad # Adjusts learning rate def _adjust_learning_rate(optimiser, lr): for param_group in optimiser.param_groups: param_group['lr'] = lr # Updates networks def _update_networks(args, T, model, shared_model, shared_average_model, loss, optimiser): # Zero shared and local grads optimiser.zero_grad() """ Calculate gradients for gradient descent on loss functions Note that math comments follow the paper, which is formulated for gradient ascent """ loss.backward() # Gradient L2 normalisation nn.utils.clip_grad_norm_(model.parameters(), args.max_gradient_norm) # Transfer gradients to shared model and update _transfer_grads_to_shared_model(model, shared_model) optimiser.step() # Update shared_average_model for shared_param, shared_average_param in zip(shared_model.parameters(), shared_average_model.parameters()): shared_average_param = args.trust_region_decay * shared_average_param + (1 - args.trust_region_decay) * shared_param # Computes an "efficient trust region" loss (policy head only) based on an existing loss and two distributions def _trust_region_loss(model, distribution, ref_distribution, loss, threshold, g, k): kl = - (ref_distribution * (distribution.log()-ref_distribution.log())).sum(1).mean(0) # Compute dot products of gradients k_dot_g = (k*g).sum(1).mean(0) k_dot_k = (k**2).sum(1).mean(0) # Compute trust region update if k_dot_k.item() > 0: trust_factor = ((k_dot_g - threshold) / k_dot_k).clamp(min=0).detach() else: trust_factor = torch.zeros(1) # z* = g - max(0, (k^Tโˆ™g - ฮด) / ||k||^2_2)โˆ™k trust_loss = loss + trust_factor*kl return trust_loss # - # Trains model def _train(args, T, model, shared_model, shared_average_model, optimiser, policies, Qs, Vs, actions, rewards, Qret, average_policies, old_policies=None): off_policy = old_policies is not None action_size = policies[0].size(1) policy_loss, value_loss = 0, 0 # Calculate n-step returns in forward view, stepping backwards from the last state t = len(rewards) for i in reversed(range(t)): # Importance sampling weights ฯ โ† ฯ€(โˆ™|s_i) / ยต(โˆ™|s_i); 1 for on-policy if off_policy: rho = policies[i].detach() / old_policies[i] else: rho = torch.ones(1, action_size) # Qret โ† r_i + ฮณQret Qret = rewards[i] + args.discount * Qret # Advantage A โ† Qret - V(s_i; ฮธ) A = Qret - Vs[i] # Log policy log(ฯ€(a_i|s_i; ฮธ)) log_prob = policies[i].gather(1, actions[i]).log() # g โ† min(c, ฯ_a_i)โˆ™โˆ‡ฮธโˆ™log(ฯ€(a_i|s_i; ฮธ))โˆ™A single_step_policy_loss = -(rho.gather(1, actions[i]).clamp(max=args.trace_max) * log_prob * A.detach()).mean(0) # Average over batch # Off-policy bias correction if off_policy: # g โ† g + ฮฃ_a [1 - c/ฯ_a]_+โˆ™ฯ€(a|s_i; ฮธ)โˆ™โˆ‡ฮธโˆ™log(ฯ€(a|s_i; ฮธ))โˆ™(Q(s_i, a; ฮธ) - V(s_i; ฮธ) bias_weight = (1 - args.trace_max / rho).clamp(min=0) * policies[i] single_step_policy_loss -= (bias_weight * policies[i].log() * (Qs[i].detach() - Vs[i].expand_as(Qs[i]).detach())).sum(1).mean(0) if args.trust_region: # KL divergence k โ† โˆ‡ฮธ0โˆ™DKL[ฯ€(โˆ™|s_i; ฮธ_a) || ฯ€(โˆ™|s_i; ฮธ)] k = -average_policies[i].gather(1, actions[i]) / (policies[i].gather(1, actions[i]) + 1e-10) if off_policy: g = (rho.gather(1, actions[i]).clamp(max=args.trace_max) * A / (policies[i] + 1e-10).gather(1, actions[i]) \ + (bias_weight * (Qs[i] - Vs[i].expand_as(Qs[i]))/(policies[i] + 1e-10)).sum(1)).detach() else: g = (rho.gather(1, actions[i]).clamp(max=args.trace_max) * A / (policies[i] + 1e-10).gather(1, actions[i])).detach() # Policy update dฮธ โ† dฮธ + โˆ‚ฮธ/โˆ‚ฮธโˆ™z* policy_loss += _trust_region_loss(model, policies[i].gather(1, actions[i]) + 1e-10, average_policies[i].gather(1, actions[i]) + 1e-10, single_step_policy_loss, args.trust_region_threshold, g, k) else: # Policy update dฮธ โ† dฮธ + โˆ‚ฮธ/โˆ‚ฮธโˆ™g policy_loss += single_step_policy_loss # Entropy regularisation dฮธ โ† dฮธ + ฮฒโˆ™โˆ‡ฮธH(ฯ€(s_i; ฮธ)) policy_loss -= args.entropy_weight * -(policies[i].log() * policies[i]).sum(1).mean(0) # Sum over probabilities, average over batch # Value update dฮธ โ† dฮธ - โˆ‡ฮธโˆ™1/2โˆ™(Qret - Q(s_i, a_i; ฮธ))^2 Q = Qs[i].gather(1, actions[i]) value_loss += ((Qret - Q) ** 2 / 2).mean(0) # Least squares loss # Truncated importance weight ฯยฏ_a_i = min(1, ฯ_a_i) truncated_rho = rho.gather(1, actions[i]).clamp(max=1) # Qret โ† ฯยฏ_a_iโˆ™(Qret - Q(s_i, a_i; ฮธ)) + V(s_i; ฮธ) Qret = truncated_rho * (Qret - Q.detach()) + Vs[i].detach() # Update networks _update_networks(args, T, model, shared_model, shared_average_model, policy_loss + value_loss, optimiser) # Acts and trains model def train(rank, args, T, shared_model, shared_average_model, optimiser): torch.manual_seed(args.seed + rank) env = Env(args) model = ActorCritic(N_DR_ELEMENTS+N_DR_ELEMENTS, N_ACTIONS, args.hidden_size) model.train() if not args.on_policy: # Normalise memory capacity by number of training processes memory = EpisodicReplayMemory(args.memory_capacity // args.num_processes, args.max_episode_length) t = 1 # Thread step counter done = True # Start new episode while T.value() <= args.T_max: # On-policy episode loop while True: # Sync with shared model at least every t_max steps model.load_state_dict(shared_model.state_dict()) # Get starting timestep t_start = t # Reset or pass on hidden state if done: hx, avg_hx = torch.zeros(1, args.hidden_size), torch.zeros(1, args.hidden_size) cx, avg_cx = torch.zeros(1, args.hidden_size), torch.zeros(1, args.hidden_size) # Reset environment and done flag state = state_to_tensor(env.reset()) done, episode_length, prev_action = False, 0, -1 else: # Perform truncated backpropagation-through-time (allows freeing buffers after backwards call) hx = hx.detach() cx = cx.detach() # Lists of outputs for training policies, Qs, Vs, actions, rewards, average_policies = [], [], [], [], [], [] while not done and t - t_start < args.t_max: # Calculate policy and values policy, Q, V, (hx, cx) = model(state, (hx, cx)) average_policy, _, _, (avg_hx, avg_cx) = shared_average_model(state, (avg_hx, avg_cx)) # Sample action action = torch.multinomial(policy, 1)[0, 0] # Step next_state, reward, done, _ = env.step(action.item()) next_state = state_to_tensor(next_state) episode_length += 1 # Increase episode counter if not args.on_policy: # Save (beginning part of) transition for offline training memory.append(state, action, reward, policy.detach()) # Save just tensors # Save outputs for online training [arr.append(el) for arr, el in zip((policies, Qs, Vs, actions, rewards, average_policies), (policy, Q, V, torch.LongTensor([[action]]), torch.Tensor([[reward]]), average_policy))] # Increment counters t += 1 T.increment() # Update state state = next_state prev_action = action # Break graph for last values calculated (used for targets, not directly as model outputs) if done: # Qret = 0 for terminal s if prev_action == PRESENT or prev_action == ABSENT: Qret = torch.zeros(1, 1) else: _, _, Qret, _ = model(state, (hx, cx)) Qret = Qret.detach() if not args.on_policy: # Save terminal state for offline training memory.append(state, None, None, None) else: # Qret = V(s_i; ฮธ) for non-terminal s _, _, Qret, _ = model(state, (hx, cx)) Qret = Qret.detach() # Train the network on-policy _train(args, T, model, shared_model, shared_average_model, optimiser, policies, Qs, Vs, actions, rewards, Qret, average_policies) # Finish on-policy episode if done: break # Train the network off-policy when enough experience has been collected if not args.on_policy and len(memory) >= args.replay_start: # Sample a number of off-policy episodes based on the replay ratio for _ in range(_poisson(args.replay_ratio)): # Act and train off-policy for a batch of (truncated) episode trajectories = memory.sample_batch(args.batch_size, maxlen=args.t_max) # Reset hidden state hx, avg_hx = torch.zeros(args.batch_size, args.hidden_size), torch.zeros(args.batch_size, args.hidden_size) cx, avg_cx = torch.zeros(args.batch_size, args.hidden_size), torch.zeros(args.batch_size, args.hidden_size) # Lists of outputs for training policies, Qs, Vs, actions, rewards, old_policies, average_policies = [], [], [], [], [], [], [] # Loop over trajectories (bar last timestep) for i in range(len(trajectories) - 1): # Unpack first half of transition state = torch.cat(tuple(trajectory.state for trajectory in trajectories[i]), 0) action = torch.LongTensor([trajectory.action for trajectory in trajectories[i]]).unsqueeze(1) reward = torch.Tensor([trajectory.reward for trajectory in trajectories[i]]).unsqueeze(1) old_policy = torch.cat(tuple(trajectory.policy for trajectory in trajectories[i]), 0) # Calculate policy and values policy, Q, V, (hx, cx) = model(state, (hx, cx)) average_policy, _, _, (avg_hx, avg_cx) = shared_average_model(state, (avg_hx, avg_cx)) # Save outputs for offline training [arr.append(el) for arr, el in zip((policies, Qs, Vs, actions, rewards, average_policies, old_policies), (policy, Q, V, action, reward, average_policy, old_policy))] # Unpack second half of transition next_state = torch.cat(tuple(trajectory.state for trajectory in trajectories[i + 1]), 0) done = torch.Tensor([trajectory.action is None for trajectory in trajectories[i + 1]]).unsqueeze(1) # Do forward pass for all transitions _, _, Qret, _ = model(next_state, (hx, cx)) # Qret = 0 for terminal s, V(s_i; ฮธ) otherwise Qret = ((1 - done) * Qret).detach() # Train the network off-policy _train(args, T, model, shared_model, shared_average_model, optimiser, policies, Qs, Vs, actions, rewards, Qret, average_policies, old_policies=old_policies) done = True if __name__ == '__main__': # BLAS setup os.environ['OMP_NUM_THREADS'] = '2' os.environ['MKL_NUM_THREADS'] = '2' # Setup args = parser.parse_args(args=[]) # Creating directories. save_dir = os.path.join('.', args.name) if not os.path.exists(save_dir): os.makedirs(save_dir) torch.manual_seed(args.seed) T = Counter() # Global shared counter shared_model = ActorCritic(N_DR_ELEMENTS+N_DR_ELEMENTS, N_ACTIONS, args.hidden_size) shared_model.share_memory() if args.pretrain_model_available: # Load pretrained weights shared_model.load_state_dict(torch.load('model.pth')) # Create average network shared_average_model = ActorCritic(N_DR_ELEMENTS+N_DR_ELEMENTS, N_ACTIONS, args.hidden_size) shared_average_model.load_state_dict(shared_model.state_dict()) shared_average_model.share_memory() for param in shared_average_model.parameters(): param.requires_grad = False # Create optimiser for shared network parameters with shared statistics optimiser = SharedRMSprop(shared_model.parameters(), lr=args.lr, alpha=args.rmsprop_decay) optimiser.share_memory() fields = ['t', 'rewards', 'avg_steps', 'accuracy', 'time'] with open(os.path.join(save_dir, 'test_results.csv'), 'w') as f: writer = csv.writer(f) writer.writerow(fields) processes = [] # Start validation agent p = threading.Thread(target=test, args=(0, args, T, shared_model)) p.start() processes.append(p) # Start training agents for rank in range(1, args.num_processes + 1): t = threading.Thread(target=train, args=(rank, args, T, shared_model, shared_average_model, optimiser)) t.start() #p = mp.Process(target=train, args=(rank, args, T, shared_model, shared_average_model, optimiser)) #p.start() print('Process ' + str(rank) + ' started') processes.append(t) for p in processes: print(p.is_alive()) # Clean up for p in processes: p.join() args = parser.parse_args(args=[]) args.pretrain_model_available
A3C_Pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This is the course outlined in: https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/ # # # Another nice course: https://machinelearningmastery.com/how-to-develop-machine-learning-models-for-multivariate-multi-step-air-pollution-time-series-forecasting/ # # 1. Data preparation # + import pandas as pd from datetime import datetime fname = './data/time_series_course_data/pollution.txt' # load data def parse(x): return datetime.strptime(x, '%Y %m %d %H') data = pd.read_csv(fname, parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse) data.drop('No', axis=1, inplace=True) # manually specify column names data.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain'] data.index.name = 'date' # mark all NA values with 0 data['pollution'].fillna(0, inplace=True) # drop the first 24 hours data = data[24:] # - data.head() # ### Make some plots # import matplotlib.pyplot as plt # %matplotlib inline values = data.values # specify columns to plot groups = [0, 1, 2, 3, 5, 6, 7] i = 1 # plot each column plt.figure() for group in groups: plt.subplot(len(groups), 1, i) plt.plot(values[:, group]) plt.title(data.columns[group], y=0.5, loc='right') i += 1 plt.show() # # 2. Multivariate LSTM Forecast Model # The first step is to prepare the pollution dataset for the LSTM. # # This involves framing the dataset as a supervised learning problem and normalizing the input variables. # # We will frame the supervised learning problem as predicting the pollution at the current hour (t) given the pollution measurement and weather conditions at the prior time step. # # This formulation is straightforward and just for this demonstration. Some alternate formulations you could explore include: # # - Predict the pollution for the next hour based on the weather conditions and pollution over the last 24 hours. # - Predict the pollution for the next hour as above and given the โ€œexpectedโ€ weather conditions for the next hour. # # Some data preparation # # # This data preparation is simple and there is more we could explore. Some ideas you could look at include: # # - One-hot encoding wind direction. # - Making all series stationary with differencing and seasonal adjustment. # - Providing more than 1 hour of input time steps. # - This last point is perhaps the most important given the use of Backpropagation through time by LSTMs when learning sequence prediction problems. # + from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder # convert series to supervised learning def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = pd.DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # print(names) # put it all together agg = pd.concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg values = data.values # integer encode wind direction encoder = LabelEncoder() values[:,4] = encoder.fit_transform(values[:,4]) # ensure all data is float values = values.astype('float32') # normalize features scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(values) print(scaled.shape) # frame as supervised learning window_size = 3 reframed = series_to_supervised(scaled, window_size, 1) # drop columns we don't want to predict reframed.drop(reframed.columns[-7:], axis=1, inplace=True) print(reframed.head()) print(reframed.shape) # - # ## Define and Fit Model # In this section, we will fit an LSTM on the multivariate input data. # # First, we must split the prepared dataset into train and test sets. To speed up the training of the model for this demonstration, we will only fit the model on the first year of data, then evaluate it on the remaining 4 years of data. If you have time, consider exploring the inverted version of this test harness. # # The example below splits the dataset into train and test sets, then splits the train and test sets into input and output variables. Finally, the inputs (X) are reshaped into the 3D format expected by LSTMs, namely [samples, timesteps, features]. # split into train and test sets values = reframed.values n_train_hours = 365 * 24 train = values[:n_train_hours, :] test = values[n_train_hours:, :] # split into input and outputs train_X, train_y = train[:, :-1], train[:, -1] test_X, test_y = test[:, :-1], test[:, -1] print(train_X.shape, len(train_X), train_y.shape) # reshape input to be 3D [samples, timesteps, features] n_features = scaled.shape[1] #number of features train_X = train_X.reshape((train_X.shape[0], window_size, n_features)) test_X = test_X.reshape((test_X.shape[0], window_size, n_features)) print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) # Now we can define and fit our LSTM model. # # We will define the LSTM with 50 neurons in the first hidden layer and 1 neuron in the output layer for predicting pollution. The input shape will be 1 time step with 8 features. # # We will use the Mean Absolute Error (MAE) loss function and the efficient Adam version of stochastic gradient descent. # # The model will be fit for 50 training epochs with a batch size of 72. Remember that the internal state of the LSTM in Keras is reset at the end of each batch, so an internal state that is a function of a number of days may be helpful (try testing this). # # Finally, we keep track of both the training and test loss during training by setting the validation_data argument in the fit() function. At the end of the run both the training and test loss are plotted. (train_X.shape[1], train_X.shape[2]) # + from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Flatten # design network model = Sequential() # model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))) # model.add(Dense(1)) # model.compile(loss='mae', optimizer='adam') model.add(Dense(400, activation='relu', input_shape=(train_X.shape[1], train_X.shape[2]))) model.add(Flatten()) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit network history = model.fit(train_X, train_y, epochs=10, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False) # plot history plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show() # - # Running the example first creates a plot showing the train and test loss during training. # # Interestingly, we can see that test loss drops below training loss. The model may be overfitting the training data. Measuring and plotting RMSE during training may shed more light on this. # ## Evaluate Model # After the model is fit, we can forecast for the entire test dataset. # # We combine the forecast with the test dataset and invert the scaling. We also invert scaling on the test dataset with the expected pollution numbers. # # With forecasts and actual values in their original scale, we can then calculate an error score for the model. In this case, we calculate the Root Mean Squared Error (RMSE) that gives error in the same units as the variable itself. # make a prediction yhat = model.predict(test_X) test_X_reshaped = test_X.reshape((test_X.shape[0], n_hours*n_features)) # invert scaling for forecast inv_yhat = np.concatenate((yhat, test_X_reshaped[:, -7:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:,0] # invert scaling for actual test_y = test_y.reshape((len(test_y), 1)) inv_y = np.concatenate((test_y, test_X_reshaped[:, -7:]), axis=1) inv_y = scaler.inverse_transform(inv_y) inv_y = inv_y[:,0] # calculate RMSE rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat)) print('Test RMSE: %.3f' % rmse) # RMSE with LSTM was 27 # # Evaluate MLP model # + import pandas as pd from datetime import datetime import numpy as np from sklearn.metrics import mean_squared_error from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Flatten def run_model(experiment_label, window_size=1): fname = './data/time_series_course_data/pollution.txt' # load data def parse(x): return datetime.strptime(x, '%Y %m %d %H') data = pd.read_csv(fname, parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse) data.drop('No', axis=1, inplace=True) # manually specify column names data.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain'] data.index.name = 'date' # mark all NA values with 0 data['pollution'].fillna(0, inplace=True) # drop the first 24 hours data = data[24:] # convert series to supervised learning def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = pd.DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # print(names) # put it all together agg = pd.concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg values = data.values # integer encode wind direction encoder = LabelEncoder() values[:,4] = encoder.fit_transform(values[:,4]) # ensure all data is float values = values.astype('float32') # normalize features scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(values) # frame as supervised learning reframed = series_to_supervised(scaled, window_size, 1) # drop columns we don't want to predict reframed.drop(reframed.columns[-7:], axis=1, inplace=True) print(reframed.head()) # split into train and test sets values = reframed.values n_train_hours = 365 * 24 train = values[:n_train_hours, :] test = values[n_train_hours:, :] # split into input and outputs train_X, train_y = train[:, :-1], train[:, -1] test_X, test_y = test[:, :-1], test[:, -1] print(train_X.shape, len(train_X), train_y.shape) # reshape input to be 3D [samples, timesteps, features] n_features = scaled.shape[1] #number of features train_X = train_X.reshape((train_X.shape[0], window_size, n_features)) test_X = test_X.reshape((test_X.shape[0], window_size, n_features)) print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) # design network model = Sequential() # model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))) # model.add(Dense(1)) # model.compile(loss='mae', optimizer='adam') model.add(Dense(100, activation='relu', input_shape=(train_X.shape[1], train_X.shape[2]))) model.add(Flatten()) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # fit network history = model.fit(train_X, train_y, epochs=50, batch_size=48, validation_data=(test_X, test_y), verbose=0, shuffle=False) # plot history plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.title(f'Experiment {experiment_label}') plt.legend() plt.show() # make a prediction yhat = model.predict(test_X) n_features = scaled.shape[1] test_X_reshaped = test_X.reshape((test_X.shape[0], window_size*n_features)) # invert scaling for forecast inv_yhat = np.concatenate((yhat, test_X_reshaped[:, -7:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:,0] # invert scaling for actual test_y = test_y.reshape((len(test_y), 1)) inv_y = np.concatenate((test_y, test_X_reshaped[:, -7:]), axis=1) inv_y = scaler.inverse_transform(inv_y) inv_y = inv_y[:,0] # calculate RMSE rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat)) print('Test RMSE: %.3f' % rmse) return model, scaler, scaled, train_X, test_X, train_y, test_y, inv_y, inv_yhat # - window_size = 5 model, scaler, scaled, train_X, test_X, train_y, test_y, inv_y, inv_yhat = run_model('MLP_window_5', window_size) data = pd.DataFrame({'y': inv_y, 'y_pred': inv_yhat}) print(data.head()) ii = 200 jj = 300 plt.plot(data.index.values[ii:jj], data['y'].values[ii:jj], label='test') plt.plot(data.index.values[ii:jj], data['y_pred'].values[ii:jj], label='prediction') plt.legend() plt.show() # evaluate # make a prediction yhat = model.predict(test_X) test_X_reshaped = test_X.reshape((test_X.shape[0], test_X.shape[2])) # invert scaling for forecast inv_yhat = np.concatenate((yhat, test_X_reshaped[:, 1:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat)
forecasting/00.TimeSeriesForeCasting.Multivariate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from abc import ABC, abstractmethod # + class AnimalFactory(ABC): @abstractmethod def produce_animal(self): pass class CatFactory(AnimalFactory): @staticmethod def produce_animal(name): return Cat(name) class ElephantFactory(AnimalFactory): @staticmethod def produce_animal(name): return Elephant(name) class Animal(ABC): def __init__(self, name): self.name = name @abstractmethod def hello(): pass @abstractmethod def __repr__(self): pass class Cat(Animal): def __repr__(self): return f'\U0001F408 {self.name}' def hello(self): print('meow...') class Elephant(Animal): def __repr__(self): return f'\U0001F418 {self.name}' def hello(self): print('mooo...') # + cat = CatFactory.produce_animal('Trevor') elephant = ElephantFactory.produce_animal('Doris') menagerie = [cat, elephant] # - for animal in menagerie: print(animal) animal.hello() print()
abstract-classes-factory-method.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pickle import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import t5 import pandas as pd import numpy as np import random import math from tqdm import tqdm tqdm.pandas() from sklearn.model_selection import train_test_split import torch from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler device = torch.device("cpu") torch.cuda.empty_cache() from transformers import T5Tokenizer, MT5ForConditionalGeneration, T5ForConditionalGeneration, Adafactor # - # ____ # # Load data # + # Data... datasets = ['long', 'long_fully_aligned', 'long_softly_aligned', 'short', 'short_fully_aligned', 'short_softly_aligned'] wa = datasets[0] # without_alignment ls = datasets[1] # long_strict lr = datasets[2] # long_relaxed ss = datasets[4] # short_strict sr = datasets[5] # short_relaxed # Model... models = ['t5-small', 't5-base', 't5-large', 'google/mt5-small', 'google/mt5-base'] model_version = models[0] # Encoding... spa_char_encode = True # + data_wa = pickle.load(open('../Datasets/wikimusica_'+wa+'.p', "rb")) input_test_wa = data_wa[1] output_test_wa = data_wa[3] data_ls = pickle.load(open('../Datasets/wikimusica_'+ls+'.p', "rb")) input_test_ls = data_ls[1] output_test_ls = data_ls[3] data_lr = pickle.load(open('../Datasets/wikimusica_'+lr+'.p', "rb")) input_test_lr = data_lr[1] output_test_lr = data_lr[3] data_ss = pickle.load(open('../Datasets/wikimusica_'+ss+'.p', "rb")) input_test_ss = data_ss[1] output_test_ss = data_ss[3] data_sr = pickle.load(open('../Datasets/wikimusica_'+sr+'.p', "rb")) input_test_sr = data_sr[1] output_test_sr = data_sr[3] # - # # Select samples for testing # + # Select samples after manual checking ss_ind = [0, 2, 3, 5, 7, 8, 10, 12, 14, 15, 16, 18, 21, 22, 23, 29, 30, 31, 32, 34,] ls_ind = [35, 36, 37, 38, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 57, 59, 60, 61, 62, 63, 66, 70, 72, 76, 77, 80,] sr_ind = [81, 82, 84, 85, 86, 89, 90, 95, 96, 97, 99, 100, 101, 102, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 125] lr_ind = [127, 129, 130, 131, 133, 136, 137, 140, 142, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 158, 159, 160, 162, 153, 166, 167, 168, 170, 173] wa_ind = [175, 178, 179, 180, 182, 184, 185, 187, 188, 190, 191, 193, 196, 198, 200, 201, 202, 203, 205, 206, 207, 217, 224, 225, 228, 229, 234, 240, 241, 242, 259, 277, 289, 312, 314, 328, 341, 368, 374, 395,] # + input_test = list() output_ref_test = list() [input_test.append(input_test_ss[d]) for d in ss_ind] [input_test.append(input_test_ls[d]) for d in ls_ind] [input_test.append(input_test_sr[d]) for d in sr_ind] [input_test.append(input_test_lr[d]) for d in lr_ind] [input_test.append(input_test_wa[d]) for d in wa_ind] [output_ref_test.append(output_test_ss[d]) for d in ss_ind] [output_ref_test.append(output_test_ls[d]) for d in ls_ind] [output_ref_test.append(output_test_sr[d]) for d in sr_ind] [output_ref_test.append(output_test_lr[d]) for d in lr_ind] [output_ref_test.append(output_test_wa[d]) for d in wa_ind] # - # **Drop some selected samples, (we want just 100)** # + # Finally, instead of 150 samples, we will test over 100 to_drop = [14, 21, 26, 34, 35, 42, 48, 49, 51, 54, 59, 65, 67, 68, 70, 80, 81, 84, 95, 96, 97, 102, 103, 23, 25, 29, 31, 38, 39, 53, 147, 145, 141, 140, 137, 135, 130, 129, 127, 125, 119, 115, 113, 111, 57, 66, 72, 100, 109, 134] new_input_test = list() new_output_ref_test = list() for i in range(150): if i not in to_drop: new_input_test.append(input_test[i]) new_output_ref_test.append(output_ref_test[i]) input_test = new_input_test output_ref_test = new_output_ref_test # - # **Check attribute count for selected samples** # + input_attr_num = list() input_attr = list() for i in range(150): print(i) attr = {} attr_num = {'stagename':0, 'birthname':0, 'birthplace':0, 'nation':0, 'birthdate':0, 'deathplace':0, 'deathdate':0, 'occupation':0, 'instrument':0, 'voice':0, 'genre':0, 'group':0,} for n in input_test[i].split('wikimusic: ')[1].split(' โ€ข '): a = n.split(' | ')[0] b = n.split(' | ')[1] attr_num[a] += 1 try: attr[a].append(b) except: attr[a] = [b] print(n) input_attr_num.append(attr_num) input_attr.append(attr) print('----') print('') # - df_attr_num = pd.DataFrame(input_attr_num) df_attr_num.groupby('group').count() input_test[0] # ___ # **Prepare input data for human evaluation** # + name = ['stagename', 'birthname'] birth = ['birthplace', 'nation', 'birthdate', 'deathplace', 'deathdate'] info = ['occupation', 'instrument', 'voice', 'genre', 'group'] input_1_name = list() input_1_birth = list() input_1_info = list() for i in input_attr: text_name = '' text_birth = '' text_info = '' for k in i.keys(): for e in i[k]: if k in name: text_name += k + ': ' + e + '\n' elif k in birth: text_birth += k + ': ' + e + '\n' else: text_info += k + ': ' + e + '\n' input_1_name.append(text_name) input_1_birth.append(text_birth) input_1_info.append(text_info) ###### input_final = list() for i in range(100): input_final.append(input_1_name[i] + '-----\n' + input_1_birth[i] + '-----\n' + input_1_info[i]) # - # **Create Dataframe for evaluation** # + test_records = list() for i in range(100): record = { 'reference': output_ref_test[i], 'input_for_model': input_test[i], 'attributes': input_final[i], } test_records.append(record) df_test = pd.DataFrame(test_records) # - # **Add unique id-s for tracing each generated text** # + ids = list(range(1,1101)) df_test['text_model_v1'] = '' df_test['text_model_v1_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v1_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v2'] = '' df_test['text_model_v2_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v2_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v3'] = '' df_test['text_model_v3_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v3_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v4'] = '' df_test['text_model_v4_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v4_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v5'] = '' df_test['text_model_v5_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v5_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v6'] = '' df_test['text_model_v6_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v6_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v7'] = '' df_test['text_model_v7_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v7_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v8'] = '' df_test['text_model_v8_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v8_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v9'] = '' df_test['text_model_v9_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v9_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v10'] = '' df_test['text_model_v10_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v10_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] df_test['text_model_v11'] = df_test['reference'] df_test['text_model_v11_id'] = random.sample(ids, k=df_test.shape[0]) ids_to_remove = df_test['text_model_v11_id'].tolist() _ = [ids.remove(i) for i in ids_to_remove] # Shuffle data df_test = df_test.sample(frac = 1) # - # _____ # # Generate texts for evaluation # + def encode_unseen_characters(text: str): text = (text.replace('รญ','%i%') .replace('ร','%I%') .replace('รบ','%u%') .replace('รš','%U%') .replace('ร','%A%') .replace('ร“','%O%') .replace('รฑ','%n%') .replace('ร‘','%N%')) return text def decode_unseen_characters(text: str): text = (text.replace('%i%','รญ') .replace('%I%','ร') .replace('%u%','รบ') .replace('%U%','รš') .replace('%A%','ร') .replace('%O%','ร“') .replace('%n%','รฑ') .replace('%N%','ร‘')) return text #### #### #### # - device = torch.device("cpu") # **Load models** # + model_version = 't5-base' t5_base_tokenizer = T5Tokenizer.from_pretrained(model_version) #################### model_name = 'jumping-jazz-13-3.pt' # t-5 vanila (long) v1_vanila = torch.load('../Models/'+model_name) v1_vanila.to(device); ### model_name = 'classic-puddle-20-3.pt' v2_t5base_strict = torch.load('../Models/'+model_name) v2_t5base_strict.to(device); ### #model_name = 'trim-firebrand-17-3.pt' model_name = 'vivid-darkness-18-3.pt' v6_t5base_relaxed = torch.load('../Models/'+model_name) v6_t5base_relaxed.to(device); # + model_version = 't5-small' t5_small_tokenizer = T5Tokenizer.from_pretrained(model_version) #################### model_name = 'kind-hill-15-2.pt' v7_t5small_strict = torch.load('../Models/'+model_name) v7_t5small_strict.to(device); ### model_name = 'noble-oath-3-5.pt' v8_t5small_relaxed = torch.load('../Models/'+model_name) v8_t5small_relaxed.to(device); # + model_version = 'google/mt5-small' mt5_small_tokenizer = T5Tokenizer.from_pretrained(model_version) #################### model_name = 'peach-star-28-7.pt' v9_mt5small_strict = torch.load('../Models/'+model_name) v9_mt5small_strict.to(device); ### model_name = 'zany-haze-27-4.pt' v10_mt5small_relaxed = torch.load('../Models/'+model_name) v10_mt5small_relaxed.to(device); # - # ___ def check_occupation(text:str): ''' Function for checking if any occupation is present in input data''' has_occ = text.find('occupation |') if has_occ!=-1: return text else: has_ins = text.find('instrument |') if has_ins!=-1: return text else: return text + ' โ€ข occupation | m%u%sico' # **Generate eval text for each model** def generate_text(input_text, model, tokenizer, num_beams=10, min_length=30, num_return_sequences=1, length_penalty=1, no_repeat_ngram_size=0, dynamic_min_length=False, test=False): input_text = encode_unseen_characters(input_text) if dynamic_min_length: attrs = input_text.split('โ€ข') input_split = [a.split('|')[1].strip() for a in attrs] input_split = ' '.join(input_split) min_length = len(tokenizer.tokenize(input_split)) + 10 features = tokenizer([input_text], return_tensors='pt') outputs = model.generate(input_ids=features['input_ids'], attention_mask=features['attention_mask'], max_length=512, min_length=min_length, num_beams=num_beams, num_return_sequences=num_return_sequences, length_penalty=length_penalty, no_repeat_ngram_size=no_repeat_ngram_size) for output in outputs: t = tokenizer.decode(output, skip_special_tokens=True) t = decode_unseen_characters(t) if test: print('\n-- ** -- ** --\n') print(t) else: return t df_test['text_model_v1'] = df_test['input_for_model'].progress_apply(generate_text, model=v1_vanila, tokenizer=t5_base_tokenizer, num_beams=1) df_test['text_model_v2'] = df_test['input_for_model'].progress_apply(generate_text, model=v2_t5base_strict, tokenizer=t5_base_tokenizer, num_beams=1) df_test['text_model_v3'] = df_test['input_for_model'].progress_apply(generate_text, model=v2_t5base_strict, tokenizer=t5_base_tokenizer, num_beams=2) df_test['text_model_v4'] = df_test['input_for_model'].progress_apply(generate_text, model=v2_t5base_strict, tokenizer=t5_base_tokenizer, num_beams=5) df_test['text_model_v5'] = df_test['input_for_model'].progress_apply(generate_text, model=v2_t5base_strict, tokenizer=t5_base_tokenizer, num_beams=10) df_test['text_model_v6'] = df_test['input_for_model'].progress_apply(generate_text, model=v6_t5base_relaxed, tokenizer=t5_base_tokenizer, num_beams=10) df_test['text_model_v7'] = df_test['input_for_model'].progress_apply(generate_text, model=v7_t5small_strict, tokenizer=t5_small_tokenizer, num_beams=10) df_test['text_model_v8'] = df_test['input_for_model'].progress_apply(generate_text, model=v8_t5small_relaxed, tokenizer=t5_small_tokenizer, num_beams=10) df_test['text_model_v9'] = df_test['input_for_model'].progress_apply(generate_text, model=v9_mt5small_strict, tokenizer=mt5_small_tokenizer, num_beams=10) df_test['text_model_v10'] = df_test['input_for_model'].progress_apply(generate_text, model=v10_mt5small_relaxed, tokenizer=mt5_small_tokenizer, num_beams=10) # **Correct some reference texts for evaluation** df_test.loc[2, 'text_model_v11'] = '<NAME> (Baichen, Jilin, 1971), es una de las cantantes mรกs famosas de รณpera pekinesa.' df_test.loc[21, 'text_model_v11'] = '<NAME> conocido artรญsticamente como Basilio (Ciudad de Panamรก, Panamรก, 13 de octubre de 1947 - Miami, Estados Unidos, 11 de octubre de 2009) fue un cantante panameรฑo.' df_test.loc[26, 'text_model_v11'] = '<NAME> (Queens, 14 de noviembre de 1951) es un baterista estadounidense, reconocido por su trabajo con la banda de heavy metal multiplatino Quiet Riot, siendo el รบnico miembro que queda de la formaciรณn clรกsica de la banda.' df_test.loc[28, 'text_model_v11'] = '<NAME> (en azerรญ: Vaq<NAME>ษ™; 16 de marzo de 1940-16 de diciembre de 1979) fue un mรบsico azerbaiyano de jazz, pianista y compositor.' df_test.loc[43, 'text_model_v11'] = '<NAME> (Lebrija, 1863-Utrera, circa 1930) mรกs conocido como <NAME>ini o El Pinini, fue un cantaor flamenco.' df_test.loc[59, 'text_model_v11'] = '<NAME>, mรกs conocido como <NAME> (Chiswick, Middlesex, Inglaterra, 30 de enero de 1951), es un baterista, cantante, compositor, productor y actor britรกnico, y uno de los artistas de mayor รฉxito de la mรบsica pop y soft rock.' df_test.loc[60, 'text_model_v11'] = '<NAME> (nacido como <NAME> el 13 de octubre de 1960 en Oswego, Nueva York) es un vocalista y baterista de heavy metal y thrash metal, mรกs conocido por ser el cantante de la banda Anthrax.' df_test.loc[61, 'text_model_v11'] = '<NAME> (n. 25 de octubre de 1943, Redhill, Surrey, Inglaterra) es un mรบsico y compositor inglรฉs, conocido por haber sido teclista de la banda de rock Status Quo' df_test.loc[62, 'text_model_v11'] = '<NAME> (Chicago, Illinois, 28 de mayo de 1926-Las Vegas, Nevada, 27 de junio de 2002) fue un pianista y compositor estadounidense de jazz.' df_test.loc[65, 'text_model_v11'] = '<NAME> (Tarxien, Malta; 15 de marzo de 1992) es una cantante maltesa.' df_test.loc[69, 'text_model_v11'] = '8 de abril de 1956, Dolores, provincia de Buenos Aires), apodado el "Chacarero Cantor", es un popular cantante folclรณrico argentino.' df_test.loc[72, 'text_model_v11'] = '<NAME>, mรกs conocido como Maniac (Noruega, 4 de febrero de 1969), es el vocalista de la banda de black metal Skitliv. Maniac es conocido principalmente por haber sido el vocalista de la banda pionera del black metal noruego, Mayhem.' df_test.loc[73, 'text_model_v11'] = '<NAME> (n. 11 de marzo de 1996 en Rhode Island, Estados Unidos), tambiรฉn conocida como <NAME> es una Cantante, compositora y Filรกntropa estadounidense' df_test.loc[78, 'text_model_v11'] = 'Sal Valentino (Salvat<NAME>, 8 de septiembre de 1942) es un cantante, compositor y productor discogrรกfico estadounidense, reconocido por haber sido el cantante de la agrupaciรณn The Beau Brummels.' df_test.loc[80, 'text_model_v11'] = '<NAME> (Distrito de Sumbilca, Huaral, 28 de mayo de 1969-Lima, 28 de mayo de 2007), mรกs conocida como <NAME>, fue una cantante folclรณrica de huayno y huaylasrh reconocida en el Perรบ.' df_test.loc[81, 'text_model_v11'] = '<NAME>, es un mรบsico venezolano, concertista de cuatro venezolano, guitarra, compositor, arreglista y docente.' df_test.loc[84, 'text_model_v11'] = '<NAME> (nacida el 13 de julio de 1973), mรบsico, es la vocalista y principal letrista de la banda chilena Crisรกlida, que toma elementos del rock, el Metal y la mรบsica progresiva.' df_test.loc[85, 'text_model_v11'] = '<NAME> (Newcastle, 22 de abril de 1979) es un mรบsico, vocalista, compositor, guitarrista y pianista australiano. Fue el lรญder de la banda de rock Silverchair.' df_test.loc[86, 'text_model_v11'] = '<NAME>, CBE (Wallsend, Tyneside del Norte, Inglaterra, 2 de octubre de 1951), mรกs conocido como Sting, es un mรบsico britรกnico que se desempeรฑรณ inicialmente como bajista, y mรกs tarde como cantante y bajista del grupo musical The Police, formando luego su propia banda.' df_test.loc[87, 'text_model_v11'] = '<NAME> (Seรบl, 29 de noviembre de 1990), conocido como Minhyuk, es un cantante, rapero, actor y MC surcoreano. Es integrante de grupo masculino BtoB' df_test.loc[91, 'text_model_v11'] = '<NAME> (n. 15 de junio de 1951) es un mรบsico, cantante y compositor conocido principalmente por su trabajo como miembro de la banda estadounidense de rock progresivo Kansas.' df_test.loc[96, 'text_model_v11'] = '<NAME> (Okemah, Oklahoma, 14 de julio de 1912-Nueva York, 3 de octubre de 1967), conocido como <NAME>, fue un mรบsico y cantautor folk estadounidense.' df_test.loc[99, 'text_model_v11'] = '<NAME> (Bilbao, 3 de agosto de 1964), conocido como simplemente <NAME> o con el apodo de Uoho, es un mรบsico multiinstrumentista, compositor y productor de rock espaรฑol. Fue guitarrista de la bandas de rock Extremoduro.' # # Save data for evaluation # **For human evaluation** pickle.dump(df_test, open( "../Evaluation/eval_data.p", "wb")) # **For automatic evaluation** def write_file(file_name, text): file_name = '/home/hlaboa-server/jupyter/TFM/nlg_wikimusica/Evaluation/'+file_name+'.txt' text_file = open(file_name, 'w') n = text_file.write(text) text_file.close() # + # Remove <NAME>, it's duplicates eval_data = eval_data.drop([70]) # Take just 80 rows for evaluation eval_data = eval_data.iloc[:80] col_names = ['reference', 'text_model_v1', 'text_model_v2', 'text_model_v3', 'text_model_v4', 'text_model_v5', 'text_model_v12', 'text_model_v7', 'text_model_v8', 'text_model_v9', 'text_model_v10'] file_names = ['references', 't5-base_vanilla_b0', 't5-base_strict_b0', 't5-base_strict_b2', 't5-base_strict_b5', 't5-base_strict_b10', 't5-base_relaxed_b10', 't5-small_strict_b10', 't5-small_relaxed_b10', 'mt5-small_strict_b10', 'mt5-small_relaxed_b10'] for ind,col in enumerate(col_names): text = '\n'.join(eval_data[col].tolist()) write_file(file_names[ind], text) # - # ____
Scripts/7_evaluation_I.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import sys import musicalrobot # + # sys.path.insert(0, '../musicalrobot/') # - from musicalrobot import irtemp from musicalrobot import edge_detection as ed from musicalrobot import pixel_analysis as pa from musicalrobot import data_encoding as de # %matplotlib inline # Inputting the video as a stack of arrays frames = ed.input_file('../musicalrobot/data/10_17_19_PPA_Shallow_plate.tiff') len(frames) plt.imshow(frames[515]) # Cropping all the frames in the video crop_frame = [] for frame in frames: crop_frame.append(frame[25:90,50:120]) plt.imshow(crop_frame[515]) plt.colorbar() # Performing Image equalization to determine sample position img_eq = pa.image_eq(crop_frame) # Determining the sum of pixels in each column and row column_sum, row_sum = pa.pixel_sum(img_eq) # Determining the plate and sample locations r_peaks, c_peaks = pa.peak_values(column_sum, row_sum, 3, 3, freeze_heat=False) sample_location = pa.locations(r_peaks, c_peaks, img_eq) r_peaks c_peaks sample_location # Extracting temperature profiles at all the sample and plate locations temp, plate_temp = pa.pixel_intensity(sample_location, crop_frame, 'Row', 'Column', 'plate_location') # + # Uncomment the following block of code to save the temperature profile of the samples as a pickle # import pickle # with open('ppa_temp.pkl','wb') as f: # pickle.dump(temp, f) # Uncomment the following block of code to save the temperature profile data of the plate locations as a pickle # import pickle # with open('ppa_plate_temp.pkl','wb') as f: # pickle.dump(plate_temp, f) # - # Finding inflection temperature s_peaks, s_infl = ed.peak_detection(temp, plate_temp, 'Sample') np.asarray(s_infl)[:,0] result_df = de.final_result(temp, plate_temp, path='../musicalrobot/data/') result_df # ### Using wrapping function result_df1 = pa.pixel_temp(crop_frame,n_columns = 3, n_rows = 3, freeze_heat=False, path='../musicalrobot/data/') result_df1 # Plotting temperature profiles for i in range(len(temp)): plt.plot(plate_temp[i], temp[i]) plt.title('PPA') plt.xlabel('Plate temperature($^{\circ}$C)') plt.ylabel('Temperature($^{\circ}$C)') # plt.savefig('../temp_profiles/ppa_'+ str(i+1)+ '.png') plt.show()
examples/10_17_19_PPA_Shallow_plate-pa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nltk nltk.download_shell() # l to list then d to download then download stopwords package import string mess = 'Sample message! Notice: me you i am punctunation.' # remove punctuation nopunc = [c for c in mess if c not in string.punctuation] type(nopunc) # list # convert list of characters to a string nopunc = ''.join(nopunc) # ['a', 'b', 'c'] = 'abc' type(nopunc) # str nopunc.split() # to split it again # + # remove stop words from nltk.corpus import stopwords stopwords.words('english') # me, my, myself, we, you # change it all to lower case and iterate to remove stop words clean_mess = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')] clean_mess # + # put it into a function def text_process(mess): """ 1. remove punctuation 2. remove stop words 3. return list of clean text words Usage: pdmessages['message'].apply(text_process) """ nopunc = [c for c in mess if c not in string.punctuation] nopunc = ''.join(nopunc) # ['a', 'b', 'c'] = 'abc' return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')] # -
vm6_ML_NLP_basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 with Spark 2.1 # language: python # name: python2-spark21 # --- from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) import tensorflow as tf # %matplotlib inline import matplotlib.pyplot as plt batch_xs, batch_ys = mnist.train.next_batch(1) X = batch_xs X = X.reshape([28, 28]); plt.gray() plt.imshow(X) x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.InteractiveSession() tf.global_variables_initializer().run() for _ in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
coursera_ai/week2/tensorflow/tfintro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example ODE # In this notebook we provide a simple example of the DeepMoD algorithm by applying it on the Burgers' equation. # # We start by importing the required libraries and setting the plotting style: # + # General imports import numpy as np import torch import matplotlib.pylab as plt # DeepMoD stuff from deepymod_torch.DeepMod import DeepMod from deepymod_torch.training import train_deepmod, train_mse from deepymod_torch.library_functions import library_1D_in from scipy.integrate import odeint # Settings for reproducibility np.random.seed(40) torch.manual_seed(0) # %load_ext autoreload # %autoreload 2 # - # Next, we prepare the dataset. def dU_dt_sin(U, t): # Here U is a vector such that y=U[0] and z=U[1]. This function should return [y', z'] return [U[1], -1*U[1] - 5*np.sin(U[0])] U0 = [2.5, 0.4] ts = np.linspace(0, 8, 500) Y = odeint(dU_dt_sin, U0, ts) T = ts.reshape(-1,1) # Here we can potentially rescale the Y and T axis and we plot the results T_rs = T Y_rs = Y/np.max(np.abs(Y),axis=0) # Let's plot it to get an idea of the data: # + fig, ax = plt.subplots() ax.plot(T_rs, Y_rs[:,0]) ax.plot(T_rs, Y_rs[:,1]) ax.set_xlabel('t') plt.show() # + number_of_samples = 400 idx = np.random.permutation(Y.shape[0]) X_train = torch.tensor(T_rs[idx, :][:number_of_samples], dtype=torch.float32, requires_grad=True) y_train = torch.tensor(Y_rs[idx, :][:number_of_samples], dtype=torch.float32) # - print(X_train.shape, y_train.shape) # # Setup a custom library from torch.autograd import grad from itertools import combinations, product from functools import reduce # Here we show an example where we create a custom library. $\theta$ in this case containe $[1,u,v, sin(u),cos(u)]$ to showcase that non-linear terms can easily be added to the library def library_non_linear_ODE(input, poly_order, diff_order): prediction, data = input samples = prediction.shape[0] # Construct the theta matrix C = torch.ones_like(prediction[:,0]).view(samples, -1) u = prediction[:,0].view(samples, -1) v = prediction[:,1].view(samples, -1) theta = torch.cat((C, u, v, torch.cos(u), torch.sin(u)),dim=1) # Construct a list of time_derivatives time_deriv_list = [] for output in torch.arange(prediction.shape[1]): dy = grad(prediction[:,output], data, grad_outputs=torch.ones_like(prediction[:,output]), create_graph=True)[0] time_deriv = dy[:, 0:1] time_deriv_list.append(time_deriv) return time_deriv_list, theta # ## Configuring DeepMoD # We now setup the options for DeepMoD. The setup requires the dimensions of the neural network, a library function and some args for the library function: ## Running DeepMoD config = {'n_in': 1, 'hidden_dims': [40, 40, 40, 40, 40, 40], 'n_out': 2, 'library_function': library_non_linear_ODE, 'library_args':{'poly_order': 1, 'diff_order': 0}} # Now we instantiate the model. Note that the learning rate of the coefficient vector can typically be set up to an order of magnitude higher to speed up convergence without loss in accuracy model = DeepMod(**config) optimizer = torch.optim.Adam([{'params': model.network_parameters(), 'lr':0.001}, {'params': model.coeff_vector(), 'lr':0.005}]) # ## Run DeepMoD # We can now run DeepMoD using all the options we have set and the training data. We need to slightly preprocess the input data for the derivatives: train_deepmod(model, X_train, y_train, optimizer, 50000, {'l1': 1e-5}) # Now that DeepMoD has converged, it has found the following numbers: solution = model(X_train)[0].detach().numpy() np.max(np.abs(Y),axis=0) print(model.fit.coeff_vector[0],model.fit.coeff_vector[1]) plt.scatter(X_train.detach().numpy().squeeze(),solution[:,0]) plt.plot(T_rs,Y_rs[:,0]) plt.scatter(X_train.detach().numpy().squeeze(),solution[:,1]) plt.plot(T_rs,Y_rs[:,1])
examples/ODE_Example_coupled_nonlin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="LxUsSi6g6AfW" colab_type="text" # # Introduction # # In this tutorial, we'll discuss one of the foundational algorithms of machine learning in this post: *Linear regression*. We'll create a model that predicts crop yields for apples and oranges (*target variables*) by looking at the average temperature, rainfall and humidity (*input variables or features*) in a region. # # # Problem Statement # Here's the training data: # # ![linear-regression-training-data](https://i.imgur.com/6Ujttb4.png) # # In a linear regression model, each target variable is estimated to be a weighted sum of the input variables, offset by some constant, known as a biasย : # # ``` # yield_apple = w11 * temp + w12 * rainfall + w13 * humidity + b1 # yield_orange = w21 * temp + w22 * rainfall + w23 * humidity + b2 # ``` # # Visually, it means that the yield of apples is a linear or planar function of temperature, rainfall and humidity: # # ![linear-regression-graph](https://i.imgur.com/4DJ9f8X.png) # # The *learning* part of linear regression is to figure out a set of weights `w11, w12,... w23, b1 & b2` by looking at the training data, to make accurate predictions for new data (i.e. to predict the yields for apples and oranges in a new region using the average temperature, rainfall and humidity). This is done by adjusting the weights slightly many times to make better predictions, using an optimization technique called *gradient descent*. # - # ! python -m pip -q install pip # ! python -m pip -q install jovian # ! python -m pip -q install numpy torch torchvision # + id="SenRKV1Zfj9l" colab_type="code" colab={} import torch import numpy as np # + [markdown] id="8uLlXXdd6Kks" colab_type="text" # # Training Data # + id="svqL-KEnf0R8" colab_type="code" colab={} X = np.array([ [73, 67, 43], [91, 88, 43], [87, 134, 43], [102, 43, 43], [69, 96, 43], ], dtype='float32') # Targets (apples, oranges) Y = np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype='float32') # + id="xSoHJ5W-5o-D" colab_type="code" outputId="d9617d16-088d-46aa-9929-e621bd55f36b" colab={"base_uri": "https://localhost:8080/", "height": 50} # convert inputs and targets to tensors inputs = torch.from_numpy(X) targets = torch.from_numpy(Y) print(inputs.shape) print(targets.shape) # + [markdown] id="w8kuf5HM7eT3" colab_type="text" # ## Linear regression model fromย scratch # # The weights and biases (`w11, w12,... w23, b1 & b2`) can also be represented as matrices, initialized as random values. The first row of `w` and the first element of `b` are used to predict the first target variable i.e. yield of apples, and similarly the second for oranges. # + id="nY8JeRYH7P54" colab_type="code" outputId="1d05dfde-c7bb-47ee-8bf7-924906de4749" colab={"base_uri": "https://localhost:8080/", "height": 67} w = torch.randn(2, 3, requires_grad=True) b = torch.randn(1, 2, requires_grad=True) print(w) print(b) # + [markdown] id="r7g-Ts6p77Je" colab_type="text" # `torch.randn` creates a tensor with the given shape, with elements picked randomly from a [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) with mean 0 and standard deviation 1. # # Our *model* is simply a function that performs a matrix multiplication of the `inputs` and the weights `w` (transposed) and adds the bias `b` (replicated for each observation). # # ![matrix-mult](https://i.imgur.com/WGXLFvA.png) # # We can define the model as follows: # + id="9mY8vQ397xf8" colab_type="code" colab={} def LinearRegression(X): return X @ w.t() + b # + [markdown] id="URySj18r8SRy" colab_type="text" # `@` represents matrix multiplication in PyTorch, and theย `.t` method returns the transpose of a tensor. # # The matrix obtained by passing the input data into the model is a set of predictions for the target variables. # + id="6X_o8bv-8Eik" colab_type="code" outputId="5cb28e3b-db1e-41e4-b186-acd6465d61a4" colab={"base_uri": "https://localhost:8080/", "height": 100} preds = LinearRegression(inputs) print(preds) # + id="Z-6Uc8-C8qJS" colab_type="code" outputId="2dc8ca5a-ac49-42eb-83ed-5be602bc7135" colab={"base_uri": "https://localhost:8080/", "height": 100} print(targets) # + id="4sfrLq0R9S3Q" colab_type="code" outputId="6ef81d4b-d87f-4b51-e894-e83a761da4eb" colab={"base_uri": "https://localhost:8080/", "height": 33} diff = (preds - targets) # diff.numel() torch.sum(diff ** 2)/diff.numel() # + [markdown] id="3WUc_xZr9Ehm" colab_type="text" # ## Loss function # # Before we improve our model, we need a way to evaluate how well our model is performing. We can compare the model's predictions with the actual targets, using the following method: # # * Calculate the difference between the two matrices (`preds` and `targets`). # * Square all elements of the difference matrix to remove negative values. # * Calculate the average of the elements in the resulting matrix. # # The result is a single number, known as the **mean squared error** (MSE). # + id="glTDAlAf880M" colab_type="code" colab={} # MSE loss def MSE(t1, t2): diff = t1 - t2 return torch.sum(diff * diff) / diff.numel() # + [markdown] id="d3lXfecq92kN" colab_type="text" # Hereโ€™s how we can interpret the result: *On average, each element in the prediction differs from the actual target by about 178.3390 (square root of the loss 31804.8008)*. And thatโ€™s pretty bad, considering the numbers we are trying to predict are themselves in the range 50โ€“200. Also, the result is called the *loss*, because it indicates how bad the model is at predicting the target variables. Lower the loss, better the model. # + id="p9u2kav99z76" colab_type="code" outputId="442ca2c3-dd4c-468c-ee2c-70545f82f136" colab={"base_uri": "https://localhost:8080/", "height": 33} # compute loss loss = MSE(preds, targets) print(loss) # + [markdown] id="BV8VMU6j-mto" colab_type="text" # ## Compute gradients # # With PyTorch, we can automatically compute the gradient or derivative of the loss w.r.t. to the weights and biases, because they have `requires_grad` set to `True`. # + id="zFRGP39O9_hZ" colab_type="code" colab={} # gradient computation loss.backward() # + [markdown] id="W8J6IaSm-t1T" colab_type="text" # The gradients are stored in theย `.grad` property of the respective tensors. Note that the derivative of the loss w.r.t. the weights matrix is itself a matrix, with the same dimensions. # + id="hb06ZZv0-rXC" colab_type="code" outputId="6514676f-889d-4fbe-e86c-494eefca8fd5" colab={"base_uri": "https://localhost:8080/", "height": 0} print(w) print(w.grad) # + [markdown] id="vyIr_T0g_Fb5" colab_type="text" # The loss is a [quadratic function](https://en.wikipedia.org/wiki/Quadratic_function) of our weights and biases, and our objective is to find the set of weights where the loss is the lowest. If we plot a graph of the loss w.r.t any individual weight or bias element, it will look like the figure shown below. A key insight from calculus is that the gradient indicates the rate of change of the loss, or the [slope](https://en.wikipedia.org/wiki/Slope) of the loss function w.r.t. the weights and biases. # # If a gradient element is **positive**: # * **increasing** the element's value slightly will **increase** the loss. # * **decreasing** the element's value slightly will **decrease** the loss # # ![postive-gradient](https://i.imgur.com/hFYoVgU.png) # # If a gradient element is **negative**: # * **increasing** the element's value slightly will **decrease** the loss. # * **decreasing** the element's value slightly will **increase** the loss. # # ![negative=gradient](https://i.imgur.com/w3Wii7C.png) # # The increase or decrease in loss by changing a weight element is proportional to the value of the gradient of the loss w.r.t. that element. This forms the basis for the optimization algorithm that we'll use to improve our model. # + [markdown] id="cfRM8zqVBCsc" colab_type="text" # Before we proceed, we reset the gradients to zero by callingย `.zero_()` method. We need to do this, because PyTorch accumulates, gradients i.e. the next time we callย `.backward` on the loss, the new gradient values will get added to the existing gradient values, which may lead to unexpected results. # + id="4pMCvDBP-wsZ" colab_type="code" outputId="ee040cf5-3d14-425f-952c-155c0b6c12f9" colab={"base_uri": "https://localhost:8080/", "height": 0} w.grad.zero_() b.grad.zero_() print(w.grad) print(b.grad) # + id="Iw9zRfY_BH4S" colab_type="code" outputId="f8641b8c-e5e7-4989-fb58-6903f321887a" colab={"base_uri": "https://localhost:8080/", "height": 0} w, b # + [markdown] id="gWCbP4utBlTc" colab_type="text" # ## Adjust weights and biases using gradientย descent # # We'll reduce the loss and improve our model using the gradient descent optimization algorithm, which has the following steps: # # 1. Generate predictions # # 2. Calculate the loss # # 3. Compute gradients w.r.t the weights and biases # # 4. Adjust the weights by subtracting a small quantity proportional to the gradient # # 5. Reset the gradients to zero # # Let's implement the above step by step. # + id="T4yMqaWMBhuD" colab_type="code" outputId="9063c6a5-b7a8-48da-86be-81918ada2ce4" colab={"base_uri": "https://localhost:8080/", "height": 0} # generate predictions preds = LinearRegression(inputs) print(f"Predictions: {preds}") # calculate the loss loss = MSE(preds, targets) print(loss) # computer gradients loss.backward() print(w.grad) print(b.grad) # adjust weights & reset gradients with torch.no_grad(): w -= w.grad * 1e-5 b -= b.grad * 1e-5 w.grad.zero_() b.grad.zero_() # + [markdown] id="I97AR4qYDEdE" colab_type="text" # A few things to note above: # # * We use `torch.no_grad` to indicate to PyTorch that we shouldn't track, calculate or modify gradients while updating the weights and biases.ย  # # * We multiply the gradients with a really small number (`10^-5` in this case), to ensure that we don't modify the weights by a really large amount, since we only want to take a small step in the downhill direction of the gradient. This number is called the *learning rate* of the algorithm.ย  # # * After we have updated the weights, we reset the gradients back to zero, to avoid affecting any future computations. # + id="WUHOhkm3DE6f" colab_type="code" outputId="eaa1b907-8351-45a3-9dfb-f0edc7a7119d" colab={"base_uri": "https://localhost:8080/", "height": 0} # Let's take a look at the new weights and biases. print(w) print(b) # + [markdown] id="kJACzHwlDZpF" colab_type="text" # We have already achieved a significant reduction in the loss, simply by adjusting the weights and biases slightly using gradient descent. # + id="tQfqJFFtDUt_" colab_type="code" outputId="1c45e56e-1dff-48d3-d2f8-519b6e971961" colab={"base_uri": "https://localhost:8080/", "height": 0} # calculate loss preds = LinearRegression(inputs) loss = MSE(preds, targets) print(loss) # + [markdown] id="C_CVHlX6Endg" colab_type="text" # ## Train for multipleย epochs # # To reduce the loss further, we can repeat the process of adjusting the weights and biases using the gradients multiple times. Each iteration is called an epoch. Let's train the model for 100 epochs. # + [markdown] id="yi6hQ27YEjH4" colab_type="text" # We have already achieved a significant reduction in the loss, simply by adjusting the weights and biases slightly using gradient descent. # + id="5eyVPYRrEftJ" colab_type="code" colab={} for i in range(1000): preds = LinearRegression(inputs) loss = MSE(preds, targets) loss.backward() with torch.no_grad(): w -= w.grad * 1e-5 b -= b.grad * 1e-5 w.grad.zero_() b.grad.zero_() # + id="Yu-K9UBqE8cj" colab_type="code" outputId="c11df762-6404-4a7b-edec-be6e11f61d4d" colab={"base_uri": "https://localhost:8080/", "height": 33} # calculate loss preds = LinearRegression(inputs) loss = MSE(preds, targets) print(loss) # + id="6EazF9rZFCaI" colab_type="code" outputId="d166f081-2bde-4d16-d756-944a9c3dd848" colab={"base_uri": "https://localhost:8080/", "height": 100} preds # + id="PZwnPliNInTz" colab_type="code" outputId="88358140-5dde-4359-c004-553a90d37e44" colab={"base_uri": "https://localhost:8080/", "height": 100} targets # + [markdown] id="wRLNTijMJqHb" colab_type="text" # ## Linear regression using PyTorch built-ins # # The model and training process above were implemented using basic matrix operations. But since this such a common patternย , PyTorch has several built-in functions and classes to make it easy to create and train models. # # Let's begin by importing the `torch.nn` package from PyTorch, which contains utility classes for building neural networks. # + id="llQyUdRWI51x" colab_type="code" colab={} import torch.nn as nn # + [markdown] id="1MvT84kaJymi" colab_type="text" # As before, we represent the inputs and targets and matrices. # + id="KCfIBHmGJs4i" colab_type="code" colab={} # Input (temp, rainfall, humidity) X = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70]], dtype='float32') # Targets (apples, oranges) Y = np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119], [56, 70], [81, 101], [119, 133], [22, 37], [103, 119], [56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype='float32') inputs = torch.from_numpy(X) targets = torch.from_numpy(Y) # + id="ssBf0jMQJ2Il" colab_type="code" outputId="89a6a1d6-2f19-4a86-efab-2df0f2eb4a08" colab={"base_uri": "https://localhost:8080/", "height": 0} inputs, targets # + id="y4FZ2KZWOsU_" colab_type="code" outputId="ee047f13-c4a3-404b-eb21-d09b9b84b0d1" colab={"base_uri": "https://localhost:8080/", "height": 0} inputs.shape, targets.shape # + [markdown] id="rA86_c9vKM80" colab_type="text" # We are using 15 training examples this time, to illustrate how to work with large datasets in small batches. # + [markdown] id="61GeJn4yKRDN" colab_type="text" # ## Dataset and DataLoader # # We'll create a `TensorDataset`, which allows access to rows from `inputs` and `targets` as tuples, and provides standard APIs for working with many different types of datasets in PyTorch. # + id="37MnEP29J-wV" colab_type="code" colab={} from torch.utils.data import TensorDataset # + id="GzZdHYUFKnmf" colab_type="code" outputId="4eb583b3-de13-4c40-9daf-f6bb2a31dc10" colab={"base_uri": "https://localhost:8080/", "height": 0} train_ds = TensorDataset(inputs, targets) train_ds[:3] # + [markdown] id="PPOhFK8nO38Q" colab_type="text" # The `TensorDataset` allows us to access a small section of the training data using the array indexing notation (`[0:3]` in the above code). It returns a tuple (or pair), in which the first element contains the input variables for the selected rows, and the second contains the targets. # # We'll also create a `DataLoader`, which can split the data into batches of a predefined size while training. It also provides other utilities like shuffling and random sampling of the data. # + id="RcJrKfdAKxLA" colab_type="code" colab={} from torch.utils.data import DataLoader # + id="wgCHCJP6PQ67" colab_type="code" colab={} # Define data loader batch_size = 5 train_dl = DataLoader(train_ds, batch_size, shuffle=True) # + [markdown] id="lSeUS40RPcAq" colab_type="text" # The data loader is typically used in a `for-in` loop. Let's look at an example. # + id="58X-FJXJPU9n" colab_type="code" outputId="5f0b73f6-31a7-4cfd-e3de-83337dc9203c" colab={"base_uri": "https://localhost:8080/", "height": 0} for xb, yb in train_dl: print(xb) print(yb) break # + [markdown] id="Od-hzjFHQEpT" colab_type="text" # In each iteration, the data loader returns one batch of data, with the given batch size. If `shuffle` is set to `True`, it shuffles the training data before creating batches. Shuffling helps randomize the input to the optimization algorithm, which can lead to faster reduction in the loss. # + [markdown] id="o2N6F4GKQI7X" colab_type="text" # ## nn.Linear # # Instead of initializing the weights & biases manually, we can define the model using the `nn.Linear` class from PyTorch, which does it automatically. # + id="EsCq51r6P0dE" colab_type="code" outputId="cfefda32-fdda-4b58-e0cd-18c074e2eeb4" colab={"base_uri": "https://localhost:8080/", "height": 0} # define model model = nn.Linear(3, 2) print(model.weight) print(model.bias) # + [markdown] id="1a4yN5TCQX5m" colab_type="text" # PyTorch models also have a helpful `.parameters` method, which returns a list containing all the weights and bias matrices present in the model. For our linear regression model, we have one weight matrix and one bias matrix. # + id="etKzuOkGQTs_" colab_type="code" outputId="4def55c9-e3a7-4095-f53d-ba2027faeb24" colab={"base_uri": "https://localhost:8080/", "height": 0} list(model.parameters()) # + [markdown] id="Z9BonT4ZQjzh" colab_type="text" # We can use the model to generate predictions in the exact same way as before: # + id="vK-wHlBkQavE" colab_type="code" outputId="157535ce-2db4-4b5c-a37e-d9e0a2383c55" colab={"base_uri": "https://localhost:8080/", "height": 0} preds = model(inputs) print(preds) # + [markdown] id="OCfAtDIERnw3" colab_type="text" # ## Loss Function # # Instead of defining a loss function manually, we can use the built-in loss function `mse_loss`. # + id="HP6meuMzRd1m" colab_type="code" colab={} from torch.nn import functional as func # + id="q-8Bit05Rtt8" colab_type="code" colab={} loss_fn = func.mse_loss # + id="P-ezX1tBR9_4" colab_type="code" outputId="831d2608-c309-417d-b753-604b754c678a" colab={"base_uri": "https://localhost:8080/", "height": 0} loss = loss_fn(model(inputs), targets) print(loss) # + [markdown] id="hlVl5fA7SWzh" colab_type="text" # ## Optimizer # # Instead of manually manipulating the model's weights & biases using gradients, we can use the optimizer `optim.SGD`. SGD stands for `stochastic gradient descent`. It is called `stochastic` because samples are selected in batches (often with random shuffling) instead of as a single group. # + id="iVgjRupjSDT7" colab_type="code" colab={} from torch import optim opt = optim.SGD(model.parameters(), lr=1e-5) # + [markdown] id="4yWzYIUUSqEY" colab_type="text" # Note that `model.parameters()` is passed as an argument to `optim.SGD`, so that the optimizer knows which matrices should be modified during the update step. Also, we can specify a learning rate which controls the amount by which the parameters are modified. # + [markdown] id="OBfff-XISqIf" colab_type="text" # ## Train the model # # We are now ready to train the model. We'll follow the exact same process to implement gradient descent: # # 1. Generate predictions # # 2. Calculate the loss # # 3. Compute gradients w.r.t the weights and biases # # 4. Adjust the weights by subtracting a small quantity proportional to the gradient # # 5. Reset the gradients to zero # # The only change is that we'll work batches of data, instead of processing the entire training data in every iteration. Let's define a utility function `fit` which trains the model for a given number of epochs. # + [markdown] id="mzboOGbNds_R" colab_type="text" # * The model's parameter gradient are calculated from the `loss.backward()` as `loss(loss_fn(model(..), y)`. # * Because of this the model's parameters gradient is calculated and stored in their method i.e in `model.weight.grad`, the optimizer uses this gradient to update the weights. # # [Stack overflow](https://stackoverflow.com/a/53975741/6805747) # + id="RLQr7xJ5Sl4i" colab_type="code" colab={} def fit(num_epochs, model, loss_function, optimizer, data_loader): for epoch in range(num_epochs): for xb, yb in data_loader: # generate predictions pred = model(xb) # calculate loss loss = loss_function(pred, yb) # compute gradients # print("before") # print(list(model.parameters())) # print(model.weight.grad) loss.backward() # update parameters using gradients optimizer.step() # print("after") # print(list(model.parameters())) # print(model.weight.grad) # reset gradients to zero optimizer.zero_grad() # break # break # Print the progress if (epoch+1) % 100 == 0: print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # + id="v6Si5g69aPTS" colab_type="code" outputId="1c70415c-a2aa-4900-c828-62ed728cb207" colab={"base_uri": "https://localhost:8080/", "height": 184} fit(1000, model, loss_fn, opt, train_dl) # + id="4F6jJ9r1adde" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 268} outputId="c0b98663-de41-43e1-e158-24f4a044da25" preds = model(inputs) preds # - targets # + import jovian jovian.commit()
Week 1/02-Linear-Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Writing markdown documents using jupyter notebook # This guide explains how to use *jupyter notebook* to create markdown documents containing formulae and figures. First, change the type of the cell to *Markdown*: # ![](images/doc_howto/markdown_menu.png) # Then write what your text using the markdown syntax. Do not worry ff you are not familiar with the syntax. It takes only a few minutes to master it. Here is a [guide](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Working%20With%20Markdown%20Cells.html) for the markdown syntax. The figure below shows an example paragraph: # ![](images/doc_howto/markdown_source.png) # When you "run" this cell, the markdown code is rendered into this: # ![](images/doc_howto/markdown_rendered.png) # We can also easily add images to the document using the markdown syntax: # ![](images/doc_howto/markdown_image_source.png) # which will be rendered into the following: # ![](images/doc_howto/markdown_image_rendered.png) # Once you are done preparing the document, you can simply upload it to *GitHub* and it will be rendered there properly. # ## How to push your notebook to the [repository](https://github.com/babaki3/covid-19-model) # # 1. Put your notebook in the `docs` directory # 2. If you have images, put all of them in a directory (say `images_bp`) and put that directory in `docs/images`). # # Make sure that your notebook renders correctly, that is, all images are referring to the correct locations. Then commit these changes and submit a [pull request](https://github.com/babaki3/covid-19-model/pulls).
docs/doc_howto.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="oXlNAOWROB8m" colab_type="code" outputId="e1349ecb-a68d-427d-ad62-1fda2bb13260" executionInfo={"status": "ok", "timestamp": 1581897120776, "user_tz": -60, "elapsed": 1687, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 851} import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error, SCORERS from sklearn.model_selection import cross_val_score #sorted(sklearn.metrics.SCORERS.keys()) sorted(SCORERS.keys()) # + id="XzX0EXvEPniZ" colab_type="code" outputId="4013b550-98b7-4dd8-9cf3-5e1db183e2fd" executionInfo={"status": "ok", "timestamp": 1581894296310, "user_tz": -60, "elapsed": 1448, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 52} # cd '/content/drive/My Drive/Colab Notebooks/dw_matrix' # note / in from because we did not mount # + id="X0kcMDbjP1ga" colab_type="code" outputId="65915cb5-74cb-4188-c00f-9fcb16dc42f0" executionInfo={"status": "ok", "timestamp": 1581894319320, "user_tz": -60, "elapsed": 2560, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # %pwd # + id="WU_vfz5RP63X" colab_type="code" outputId="3edecec1-e800-4eae-cf06-ed91896ca94c" executionInfo={"status": "ok", "timestamp": 1581894386983, "user_tz": -60, "elapsed": 1607, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # cd 'drive/My Drive/Colab Notebooks/dw_matrix' # + id="R44CMQ-HQL4L" colab_type="code" outputId="0f2a85fe-c258-42fe-c840-28fd0fe0e474" executionInfo={"status": "ok", "timestamp": 1581894890697, "user_tz": -60, "elapsed": 3827, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} df = pd.read_csv('data/men_shoes.csv', low_memory=False) df.shape # + id="gXjHnC2hQZ4I" colab_type="code" outputId="49b014e6-4ca4-4f40-b106-adf7d7d98d63" executionInfo={"status": "ok", "timestamp": 1581894906702, "user_tz": -60, "elapsed": 2249, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 225} df.columns # + id="AKqLQdRDSKkk" colab_type="code" outputId="43444d71-4f59-4a36-f6b3-e1d2e2c6cb4b" executionInfo={"status": "ok", "timestamp": 1581895016672, "user_tz": -60, "elapsed": 990, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} mean_price = np.mean(df.prices_amountmin) mean_price # + id="GSUjEgBLSfti" colab_type="code" outputId="97576d21-34a1-4f66-e522-d50df7e70a4e" executionInfo={"status": "ok", "timestamp": 1581895050938, "user_tz": -60, "elapsed": 1482, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} [7] * 3 # + id="ak8pjxHZSuAY" colab_type="code" colab={} y_true = df.prices_amountmin y_pred = [mean_price] * y_true.shape[0] # + id="CHi4lYBmS4hm" colab_type="code" outputId="d2e24875-80b4-42ed-ce05-9d6b567c499c" executionInfo={"status": "ok", "timestamp": 1581895190324, "user_tz": -60, "elapsed": 1714, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} mean_absolute_error(y_true, y_pred) # + id="WCiKStHlTP_Q" colab_type="code" outputId="7cce3bfa-a55f-41d2-9157-263399f812b2" executionInfo={"status": "ok", "timestamp": 1581895359857, "user_tz": -60, "elapsed": 1186, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 285} #np.log( y_true +1 ).hist(bins=100) np.log1p( y_true +1 ).hist(bins=100) # + id="lA0CVKwITgyI" colab_type="code" outputId="bbd83a18-e624-4a8c-9178-59d3c205cb33" executionInfo={"status": "ok", "timestamp": 1581895326490, "user_tz": -60, "elapsed": 1629, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 69} np.log(0) np.log(0 +1) # + id="ElJs0NLFTxQK" colab_type="code" outputId="18abad02-3426-4cf5-9c2b-5e35510fa73a" executionInfo={"status": "ok", "timestamp": 1581895459134, "user_tz": -60, "elapsed": 1062, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # median is better then mean value y_pred = [np.median(y_true)] * y_true.shape[0] mean_absolute_error(y_true, y_pred) # + id="AYX-yQLsURxS" colab_type="code" outputId="dc956cca-c3a4-4097-8d22-4c98547eaa53" executionInfo={"status": "ok", "timestamp": 1581895834232, "user_tz": -60, "elapsed": 1349, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # log-exp transform # m: log is symetric so less outliers #price_log_mean = np.exp( np.mean( np.log1p(y_true) ) ) -1 price_log_mean = np.expm1( np.mean( np.log1p(y_true) ) ) y_pred = [price_log_mean] * y_true.shape[0] mean_absolute_error(y_true, y_pred) # + id="HBzWfVTbVtSF" colab_type="code" outputId="f8dcdfa8-9951-4bbb-bb67-6178c7e828bd" executionInfo={"status": "ok", "timestamp": 1581895959635, "user_tz": -60, "elapsed": 1060, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 225} df.brand.value_counts() # + id="8aMbelIIWk2Z" colab_type="code" colab={} def run_model(features): X = df[features].values y = df.prices_amountmin.values model = DecisionTreeRegressor(max_depth=5) scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + id="OD17BN11b69p" colab_type="code" outputId="c26af0c4-05e1-4bd6-b022-a4f46e031b29" executionInfo={"status": "ok", "timestamp": 1581897625347, "user_tz": -60, "elapsed": 1225, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} df['brand_cat'] = df.brand.factorize()[0] # assign ids run_model(['brand_cat']) # + id="m9ZfjfqGX4hu" colab_type="code" outputId="2cfff2bd-2c9a-4169-cb90-e3f154a96c2d" executionInfo={"status": "ok", "timestamp": 1581898129263, "user_tz": -60, "elapsed": 1082, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 245} df.features.value_counts() # + id="VBCZ54_6Z2hp" colab_type="code" outputId="41bc4eef-54b7-4c30-9ee5-2579f98043e0" executionInfo={"status": "ok", "timestamp": 1581899258861, "user_tz": -60, "elapsed": 941, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} colab={"base_uri": "https://localhost:8080/", "height": 34} df["features_cat"] = df.features.factorize()[0] run_model(['features_cat', 'brand_cat']) # + id="UZj-k-cYixeo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="abf3a7ba-5b0b-4813-f70b-aebc071cab9d" executionInfo={"status": "ok", "timestamp": 1581899271959, "user_tz": -60, "elapsed": 1411, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} # %pwd # + id="q9wbaSs5i0kT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9671c68d-a6f6-4bc1-d3ff-0ffec47ff19d" executionInfo={"status": "ok", "timestamp": 1581899285578, "user_tz": -60, "elapsed": 4395, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} # %ls # + id="OFNbIeCIi3Kr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="85835a99-7933-4d51-d578-2cd61d689fc2" executionInfo={"status": "ok", "timestamp": 1581899302064, "user_tz": -60, "elapsed": 1454, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} # cd matrix_one/ # + id="wkffSMUSi752" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ec0d5601-fef1-414d-d4cc-ced32fcbfb1e" executionInfo={"status": "ok", "timestamp": 1581899309338, "user_tz": -60, "elapsed": 3875, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} # ls # + id="crrIprWRi89t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="0d1cf8e8-0236-4cbc-ca49-d7245a7e7631" executionInfo={"status": "ok", "timestamp": 1581899353574, "user_tz": -60, "elapsed": 13510, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} # !git status # + id="-38zp2cnjFh0" colab_type="code" colab={} # !git add DAY3.ipynb DAY4.ipynb # + id="WV-Or-kkjSfd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="747d46ff-c81e-4f80-905b-9ac26a4b2c41" executionInfo={"status": "ok", "timestamp": 1581899431352, "user_tz": -60, "elapsed": 3919, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} # !git commit -m "update DAY3 and add new DAY4" # + id="972bpwPHja3y" colab_type="code" colab={} # !git config --global <EMAIL> # + id="pYRHHZFVi_ab" colab_type="code" colab={} # !git config --global user.name "Rafal" # + id="iMUj9GP4jlfz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="9fad1418-8e39-43fe-fcd2-46a21684755e" executionInfo={"status": "ok", "timestamp": 1581899484546, "user_tz": -60, "elapsed": 3591, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} # !git pusg # + id="KspKuAUkjnjW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="335ea294-e649-49fb-970a-4205bde9d879" executionInfo={"status": "ok", "timestamp": 1581899498165, "user_tz": -60, "elapsed": 5807, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCVt5UHU40tjK_NEFM86QhaMtA6LhRtOVIAsw5_2w=s64", "userId": "03278349516083772753"}} # !git push # + id="M9wacIaPjqt1" colab_type="code" colab={}
matrix_one/DAY4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from __future__ import division #from photutils import aperture_photometry #from photutils import RectangularAnnulus #import photutils import glob import re import os, sys from astropy.io.fits import getheader, getdata from astropy.wcs import WCS import astropy.units as u import numpy as np from scipy import interpolate import logging from time import time import matplotlib.pyplot as plt from pylab import * import matplotlib as mpl import matplotlib.ticker as mtick from scipy.special import gamma from astroquery.sdss import SDSS import aplpy from astropy import units as u, utils from astropy.coordinates import SkyCoord from astropy.io import fits from matplotlib import gridspec as gridspec, lines as mlines, pyplot as plt import numpy as np import pyvo as v # + def make_obj(flux, grat_wave, f_lam_index): ''' ''' #w = np.arange(3000)+3000. w = 5000 p_A = flux/(2.e-8/w)*(w/grat_wave)**f_lam_index return w, p_A def inst_throughput(wave, grat): ''' ''' eff_bl = np.asarray([0.1825,0.38,0.40,0.46,0.47,0.44]) eff_bm = np.asarray([0.1575, 0.33, 0.36, 0.42, 0.48, 0.45]) eff_bh1 = np.asarray([0., 0.0, 0.0, 0.0, 0.0, 0.]) eff_bh2 = np.asarray([0., 0.18, 0.3, 0.4, 0.28, 0.]) eff_bh3 = np.asarray([0., 0., 0., 0.2, 0.29, 0.31]) wave_0 = np.asarray([355.,380.,405.,450.,486.,530.])*10. wave_bl = np.asarray([355., 530.])*10. wave_bm = np.asarray([355., 530.])*10. wave_bh1 = np.asarray([350., 450.])*10. wave_bh2 = np.asarray([405., 486.])*10. wave_bh3 = np.asarray([405., 530.])*10. trans_atmtel = np.asarray([0.54, 0.55, 0.56, 0.56, 0.56, 0.55]) if grat=='BL': eff = eff_bl*trans_atmtel wave_range = wave_bl if grat=='BM': eff = eff_bm*trans_atmtel wave_range = wave_bm if grat=='BH1': eff = eff_bh1*trans_atmtel wave_range = wave_bh1 if grat=='BH2': eff = eff_bh2*trans_atmtel wave_range = wave_bh2 if grat=='BH3': eff = eff_bh3*trans_atmtel wave_range = wave_bh3 wave1 = np.ones(5000)*5000 interpfunc = interpolate.interp1d(wave_0, eff, fill_value="extrapolate") #this is the only way I've gotten this interpolation to work eff_int = interpfunc(wave1) idx = np.where((wave1 <= wave_range[0]) | (wave1 > wave_range[1])) eff_int[idx] = 0. return eff_int[0] def obj_cts(w, f0, grat, exposure_time): ''' ''' A_geo = np.pi/4.*(10.e2)**2 eff = inst_throughput(w, grat) cts = eff*A_geo*exposure_time*f0 return cts def sky(wave): ''' ''' with open('mk_sky.dat') as f: lines = (line for line in f if not line.startswith('#')) skydata = np.loadtxt(lines, skiprows=2) ws = skydata[:,0] fs = skydata[:,1] f_nu_data = getdata('lris_esi_skyspec_fnu_uJy.fits') f_nu_hdr = getheader('lris_esi_skyspec_fnu_uJy.fits') dw = f_nu_hdr["CDELT1"] w0 = f_nu_hdr["CRVAL1"] ns = len(fs) ws = np.arange(ns)*dw + w0 f_lam = f_nu_data[:len(ws)]*1e-29*3.*1e18/ws/ws interpfunc = interpolate.interp1d(ws,f_lam, fill_value="extrapolate") fs_int = interpfunc(wave) return fs_int def sky_mk(wave): ''' ''' with open('mk_sky.dat') as f: lines = (line for line in f if not line.startswith('#')) skydata = np.loadtxt(lines, skiprows=2) ws = skydata[:,0] fs = skydata[:,1] f_nu_data = getdata('lris_esi_skyspec_fnu_uJy.fits') f_nu_hdr = getheader('lris_esi_skyspec_fnu_uJy.fits') dw = f_nu_hdr["CDELT1"] w0 = f_nu_hdr["CRVAL1"] ns = len(fs) ws = np.arange(ns)*dw + w0 f_lam = f_nu_data[:len(ws)]*1e-29*3.*1e18/ws/ws p_lam = f_lam/(2.e-8/ws) interpfunc = interpolate.interp1d(ws,p_lam, fill_value="extrapolate") #using linear since argument not set in idl ps_int = interpfunc(wave) return ps_int def sky_cts(w, grat, exposure_time, airmass=1.2, area=1.0): ''' ''' A_geo = np.pi/4.*(10.e2)**2 eff = inst_throughput(w, grat) cts = eff*A_geo*exposure_time*sky_mk(w)*airmass*area return cts # - def ETC(slicer, grating, grat_wave, f_lam_index, seeing, exposure_time, ccd_bin, spatial_bin=[], spectral_bin=None, nas=True, sb=True, mag_AB=None, flux=None, Nframes=1, emline_width=None): """ Parameters ========== slicer: str L/M/S (Large, Medium or Small) grating: str BH1, BH2, BH3, BM, BL grating wavelength: float or int 3400. < ref_wave < 6000. f_lam_index: float source f_lam ~ lam^f_lam_index, default = 0 seeing: float arcsec exposure_time: float seconds for source image (total) for all frames ccd_bin: str '1x1','2x2'" spatial_bin: list [dx,dy] bin in arcsec x arcsec for binning extended emission flux. if sb=True then default is 1 x 1 arcsec^2' spectral_bin: float or int Ang to bin for S/N calculation, default=None nas: boolean nod and shuffle sb: boolean surface brightness m_AB in mag arcsec^2; flux = cgs arcsec^-2' mag_AB: float or int continuum AB magnitude at wavelength (ref_wave)' flux: float erg cm^-2 s^-1 Ang^1 (continuum source [total]); erg cm^-2 s^1 (point line source [total]) [emline = width in Ang] EXTENDED: erg cm^-2 s^-1 Ang^1 arcsec^-2 (continuum source [total]); erg cm^-2 s^1 arcsec^-2 (point line source [total]) [emline = width in Ang] Nframes: int number of frames (default is 1) emline_width: float flux is for an emission line, not continuum flux (only works for flux), and emission line width is emline_width Ang """ # logger = logging.getLogger(__name__) #logger.info('Running KECK/ETC') t0 = time() slicer_OPTIONS = ('L', 'M','S') grating_OPTIONS = ('BH1', 'BH2', 'BH3', 'BM', 'BL') if slicer not in slicer_OPTIONS: raise ValueError("slicer must be L, M, or S, wrongly entered {}".format(slicer)) #logger.info('Using SLICER=%s', slicer) if grating not in grating_OPTIONS: raise ValueError("grating must be L, M, or S, wrongly entered {}".format(grating)) #logger.info('Using GRATING=%s', grating) if grat_wave < 3400. or grat_wave > 6000: raise ValueError('wrong value for grating wavelength') #logger.info('Using reference wavelength=%.2f', grat_wave) if len(spatial_bin) != 2 and len(spatial_bin) !=0: raise ValueError('wrong spatial binning!!') #logger.info('Using spatial binning, spatial_bin=%s', str(spatial_bin[0])+'x'+str(spatial_bin[1])) bin_factor = 1. if ccd_bin == '2x2': bin_factor = 0.25 if ccd_bin == '2x2' and slicer == 'S': print('******** WARNING: DO NOT USE 2x2 BINNING WITH SMALL SLICER') read_noise = 2.7 # electrons Nf = Nframes chsz = 3 #what is this???? nas_overhead = 10. #seconds per half cycle seeing1 = seeing seeing2 = seeing pixels_per_arcsec = 1./0.147 if slicer == 'L': seeing2 = 1.38 snr_spatial_bin = seeing1*seeing2 pixels_spectral = 8 arcsec_per_slice = 1.35 if slicer == 'M': seeing2 = max(0.69,seeing) snr_spatial_bin = seeing1*seeing2 pixels_spectral = 4 arcsec_per_slice = 0.69 if slicer == 'S': seeing2 = seeing snr_spatial_bin = seeing1*seeing2 pixels_spectral = 2 arcsec_per_slice = 0.35 N_slices = seeing/arcsec_per_slice if len(spatial_bin) == 2: N_slices = spatial_bin[1]/arcsec_per_slice snr_spatial_bin = spatial_bin[0]*spatial_bin[1] pixels_spatial_bin = pixels_per_arcsec * N_slices #print("GRATING :"), grating if grating == 'BL': A_per_pixel = 0.625 if grating == 'BM': A_per_pixel = 0.28 if grating == 'BH2' or grating == 'BH3': A_per_pixel = 0.125 #print('A_per_pixel'), A_per_pixel #logger.info('f_lam ~ lam = %.2f',f_lam_index) #logger.info('SEEING: %.2f, %s', seeing, ' arcsec') #logger.info('Ang/pixel: %.2f', A_per_pixel) #logger.info('spectral pixels in 1 spectral resolution element: %.2f',pixels_spectral) A_per_spectral_bin = pixels_spectral*A_per_pixel #logger.info('Ang/resolution element: =%.2f',A_per_spectral_bin) if spectral_bin is not None: snr_spectral_bin = spectral_bin else: snr_spectral_bin = A_per_spectral_bin #logger.info('Ang/SNR bin: %.2f', snr_spectral_bin) pixels_per_snr_spec_bin = snr_spectral_bin/A_per_pixel #logger.info('Pixels/Spectral SNR bin: %.2f', pixels_per_snr_spec_bin) #logger.info('SNR Spatial Bin [arcsec^2]: %.2f', snr_spatial_bin) #logger.info('SNR Spatial Bin [pixels^2]: %.2f', pixels_spatial_bin) flux1 = 0 if flux is not None: flux1 = flux if flux is not None and emline_width is not None: flux1 = flux/emline_width if flux1 == 0 and emline_width is not None: raise ValueError('Dont use mag_AB for emission line') if mag_AB is not None: flux1 = (10**(-0.4*(mag_AB+48.6)))*(3.e18/grat_wave)/grat_wave w, p_A = make_obj(flux1,grat_wave, f_lam_index) if sb==False and mag_AB is not None: flux_input = ' mag_AB' #logger.info('OBJECT mag: %.2f, %s', mag_AB,flux_input) if sb==True and mag_AB is not None: flux_input = ' mag_AB / arcsec^2' #logger.info('OBJECT mag: %.2f, %s',mag_AB,flux_input) if flux is not None and sb==False and emline_width is None: flux_input = 'erg cm^-2 s^-1 Ang^-1' if flux is not None and sb==False and emline_width is not None: flux_input = 'erg cm^-2 s^-1 in '+ str(emline_width) +' Ang' if flux is not None and sb and emline_width is None: flux_input = 'erg cm^-2 s^-1 Ang^-1 arcsec^-2' if flux is not None and sb and emline_width is not None: flux_input = 'erg cm^-2 s^-1 arcsec^-2 in '+ str(emline_width) +' Ang' #if flux is not None: #logger.info('OBJECT Flux %.2f, %s',flux,flux_input) #if emline_width is not None: #logger.info('EMISSION LINE OBJECT --> flux is not per unit Ang') t_exp = exposure_time if nas==False: c_o = obj_cts(w,p_A,grating,t_exp)*snr_spatial_bin*snr_spectral_bin c_s = sky_cts(w,grating,exposure_time,airmass=1.2,area=1.0)*snr_spatial_bin*snr_spectral_bin c_r = Nf*read_noise**2*pixels_per_snr_spec_bin*pixels_spatial_bin*bin_factor snr = c_o/np.sqrt(c_s+c_o+c_r) if nas==True: n_cyc = np.floor((exposure_time-nas_overhead)/2./(nas+nas_overhead)+0.5) total_exposure = (2*n_cyc*(nas+nas_overhead))+nas_overhead logger.info('NAS: Rounding up to ',n_cyc, ' Cycles of NAS for total exposure of',total_exposure,' s') t_exp = n_cyc*nas c_o = obj_cts(w,p_A,grating,t_exp)*snr_spatial_bin*snr_spectral_bin c_s = sky_cts(w,grating,t_exp,airmass=1.2,area=1.0)*snr_spatial_bin*snr_spectral_bin c_r = 2.*Nf*read_noise**2*pixels_per_snr_spec_bin*pixels_spatial_bin*bin_factor snr = c_o/np.sqrt(2.*c_s+c_o+c_r) fig=figure(num=1, figsize=(12, 16), dpi=80, facecolor='w', edgecolor='k') subplots_adjust(hspace=0.001) ''' ax0 = fig.add_subplot(611) ax0.plot(w, snr, 'k-') ax0.minorticks_on() ax0.tick_params(axis='both',which='minor',direction='in', length=5,width=2) ax0.tick_params(axis='both',which='major',direction='in', length=8,width=2,labelsize=8) ylabel('SNR / %.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12) ax1 = fig.add_subplot(612) ax1.plot(w,c_o, 'k--') ax1.minorticks_on() ax1.tick_params(axis='both',which='minor',direction='in',length=5,width=2) ax1.tick_params(axis='both',which='major',direction='in',length=8,width=2,labelsize=12) ylabel('Obj cts / %.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12) ax2 = fig.add_subplot(613) ax2.plot(w,c_s, 'k--') ax2.minorticks_on() ax2.tick_params(axis='both',which='minor',direction='in', length=5,width=2) ax2.tick_params(axis='both',which='major',direction='in', length=8,width=2,labelsize=12) ylabel('Sky cts / %.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12) ax3 = fig.add_subplot(614) ax3.plot(w,c_r*np.ones(len(w)), 'k--') ax3.minorticks_on() ax3.tick_params(axis='both',which='minor', direction='in', length=5,width=2) ax3.tick_params(axis='both',which='major', direction='in', length=8,width=2,labelsize=12) ylabel('Rd. Noise cts / %.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12) ax4 = fig.add_subplot(615) yval = w[c_s > 0] num = c_o[c_s > 0] den = c_s[c_s > 0] ax4.plot(yval, num/den, 'k--') #some c_s are zeros ax4.minorticks_on() xlim(min(w), max(w)) #only show show valid data! ax4.tick_params(axis='both',which='minor', direction='in', length=5,width=2) ax4.tick_params(axis='both',which='major', direction='in', length=8,width=2,labelsize=12) ylabel('Obj/Sky cts /%.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12) ax5 = fig.add_subplot(616) ax5.plot(w,p_A, 'k--') ax5.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e')) ax5.minorticks_on() ax5.tick_params(axis='both',which='minor',direction='in', length=5,width=2) ax5.tick_params(axis='both',which='major',direction='in', length=8,width=2,labelsize=12) ylabel('Flux ['r'$\rm ph\ cm^{-2}\ s^{-1}\ \AA^{-1}$]', fontsize=12) xlabel('Wavelength ['r'$\rm \AA$]', fontsize=12) show() fig.savefig('{}.pdf'.format('KCWI_ETC_calc'), format='pdf', transparent=True, bbox_inches='tight') ''' return(snr) logger.info('KCWI/ETC run successful!') logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s', stream=sys.stdout) logger = logging.getLogger(__name__) # + ''' @author: <NAME> 4.16.18 ''' def get_fits(RA, DEC, frame='icrs'): image = SDSS.get_images(coordinates=SkyCoord(RA*u.deg, DEC*u.deg, frame='icrs'), band='g') return image def get_image(RA, DEC, frame='icrs'): image = get_fits(RA, DEC, frame) plt.imshow(image[0][0].data, vmax=np.percentile(image[0][0].data, 99), cmap='gray') plt.show() def get_fits(RA, DEC, frame='icrs', band='g'): '''Call astroquery to retrieve SDSS fits image at specified coordinates and band.''' image = SDSS.get_images(coordinates=SkyCoord(RA*u.deg, DEC*u.deg, frame='icrs'), band=band) return image def rebin(data, factor=2): '''Rebin data''' if data.shape[0] % 2 != 0: data = data[:-1,:] if data.shape[0] % 2 != 0: data = data[:,:-1] shaper = (data.shape[0]//factor, data.shape[0]//(data.shape[0]//factor), data.shape[1]//factor, data.shape[1]//(data.shape[1]//factor)) data = data.reshape(shaper).mean(-1).mean(1) return data def plot_coords(RA, DEC, frame='icrs', band='g', vmaxpercent=99): '''Call astroquery to retrieve SDSS fits image at specified coordinates and band, and then automatically plot this image.''' image = get_fits(RA, DEC, frame=frame, band=band) plt.imshow(image[0][0].data, vmax=np.percentile(image[0][0].data, vmaxpercent), cmap='gray') plt.show() # + """ ##################################################################### Copyright (C) 2001-2017, <NAME> E-mail: <EMAIL> Updated versions of the software are available from my web page http://purl.org/cappellari/software If you have found this software useful for your research, we would appreciate an acknowledgment to use of `the Voronoi binning method by Cappellari & Copin (2003)'. This software is provided as is without any warranty whatsoever. Permission to use, for non-commercial purposes is granted. Permission to modify for personal or internal use is granted, provided this copyright and disclaimer are included unchanged at the beginning of the file. All other rights are reserved. ##################################################################### NAME: VORONOI_2D_BINNING AUTHOR: <NAME>, University of Oxford michele.cappellari_at_physics.ox.ac.uk PURPOSE: Perform adaptive spatial binning of Integral-Field Spectroscopic (IFS) data to reach a chosen constant signal-to-noise ratio per bin. This method is required for the proper analysis of IFS observations, but can also be used for standard photometric imagery or any other two-dimensional data. This program precisely implements the algorithm described in section 5.1 of the reference below. EXPLANATION: Further information on VORONOI_2D_BINNING algorithm can be found in Cappellari M., Copin Y., 2003, MNRAS, 342, 345 http://adsabs.harvard.edu/abs/2003MNRAS.342..345C CALLING SEQUENCE: binNum, xBin, yBin, xBar, yBar, sn, nPixels, scale = \ voronoi_2d_binning(x, y, signal, noise, targetSN, cvt=True, pixelsize=None, plot=True, quiet=True, sn_func=None, wvt=True) INPUTS: X: Vector containing the X coordinate of the pixels to bin. Arbitrary units can be used (e.g. arcsec or pixels). In what follows the term "pixel" refers to a given spatial element of the dataset (sometimes called "spaxel" in the IFS community): it can be an actual pixel of a CCD image, or a spectrum position along the slit of a long-slit spectrograph or in the field of view of an IFS (e.g. a lenslet or a fiber). It is assumed here that pixels are arranged in a regular grid, so that the pixel size is a well defined quantity. The pixel grid however can contain holes (some pixels can be excluded from the binning) and can have an irregular boundary. See the above reference for an example and details. Y: Vector (same size as X) containing the Y coordinate of the pixels to bin. SIGNAL: Vector (same size as X) containing the signal associated with each pixel, having coordinates (X,Y). If the `pixels' are actually the apertures of an integral-field spectrograph, then the signal can be defined as the average flux in the spectral range under study, for each aperture. If pixels are the actual pixels of the CCD in a galaxy image, the signal will be simply the counts in each pixel. NOISE: Vector (same size as X) containing the corresponding noise (1 sigma error) associated with each pixel. TARGETSN: The desired signal-to-noise ratio in the final 2D-binned data. E.g. a S/N~50 per pixel may be a reasonable value to extract stellar kinematics information from galaxy spectra. KEYWORDS: CVT: Set this keyword to skip the Centroidal Voronoi Tessellation (CVT) step (vii) of the algorithm in Section 5.1 of Cappellari & Copin (2003). This may be useful if the noise is strongly non Poissonian, the pixels are not optimally weighted, and the CVT step appears to introduces significant gradients in the S/N. A similar alternative consists of using the /WVT keyword below. PLOT: Set this keyword to produce a plot of the two-dimensional bins and of the corresponding S/N at the end of the computation. PIXSIZE: Optional pixel scale of the input data. This can be the size of a pixel of an image or the size of a spaxel or lenslet in an integral-field spectrograph. - The value is computed automatically by the program, but this can take a long times when (X, Y) have many elements. In those cases the PIXSIZE keyword should be given. SN_FUNC: Generic function to calculate the S/N of a bin with spaxels "index" with the form: "sn = func(index, signal, noise)". If this keyword is not set, or is set to None, the program uses the _sn_func(), included in the program file, but another function can be adopted if needed. See the documentation of _sn_func() for more details. QUIET: by default the program shows the progress while accreting pixels and then while iterating the CVT. Set this keyword to avoid printing progress results. WVT: When this keyword is set, the routine bin2d_cvt_equal_mass is modified as proposed by <NAME> (2006, MNRAS, 368, 497). In this case the final step of the algorithm, after the bin-accretion stage, is not a modified Centroidal Voronoi Tessellation, but it uses a Weighted Voronoi Tessellation. This may be useful if the noise is strongly non Poissonian, the pixels are not optimally weighted, and the CVT step appears to introduces significant gradients in the S/N. A similar alternative consists of using the /NO_CVT keyword above. If you use the /WVT keyword you should also include a reference to `the WVT modification proposed by <NAME> (2006).' OUTPUTS: BINNUMBER: Vector (same size as X) containing the bin number assigned to each input pixel. The index goes from zero to Nbins-1. IMPORTANT: THIS VECTOR ALONE IS ENOUGH TO MAKE *ANY* SUBSEQUENT COMPUTATION ON THE BINNED DATA. EVERYTHING ELSE IS OPTIONAL! XBIN: Vector (size Nbins) of the X coordinates of the bin generators. These generators uniquely define the Voronoi tessellation. Note: USAGE OF THIS VECTOR IS DEPRECATED AS IT CAN CAUSE CONFUSION YBIN: Vector (size Nbins) of Y coordinates of the bin generators. Note: USAGE OF THIS VECTOR IS DEPRECATED AS IT CAN CAUSE CONFUSION XBAR: Vector (size Nbins) of X coordinates of the bins luminosity weighted centroids. Useful for plotting interpolated data. YBAR: Vector (size Nbins) of Y coordinates of the bins luminosity weighted centroids. SN: Vector (size Nbins) with the final SN of each bin. NPIXELS: Vector (size Nbins) with the number of pixels of each bin. SCALE: Vector (size Nbins) with the scale length of the Weighted Voronoi Tessellation, when the /WVT keyword is set. In that case SCALE is *needed* together with the coordinates XBIN and YBIN of the generators, to compute the tessellation (but one can also simply use the BINNUMBER vector). PROCEDURES USED: The following procedures are contained in the main VORONOI_2D_BINNING program. _SN_FUNC -- Example routine to calculate the S/N of a bin. WEIGHTED_CENTROID -- computes weighted centroid of one bin BIN_ROUNDNESS -- equation (5) of Cappellari & Copin (2003) BIN_ACCRETION -- steps (i)-(v) in section 5.1 REASSIGN_BAD_BINS -- steps (vi)-(vii) in section 5.1 CVT_EQUAL_MASS -- the modified Lloyd algorithm in section 4.1 COMPUTE_USEFUL_BIN_QUANTITIES -- self explanatory DISPLAY_PIXELS -- plotting of colored pixels MODIFICATION HISTORY: V1.0.0: First implementation. <NAME>, Leiden, June 2001 V2.0.0: Major revisions. Stable version. MC, Leiden, 11 September 2001 V2.1.0: First released version. Written documentation. MC, Vicenza, 13 February 2003 V2.2.0: Added computation of useful bin quantities in output. Deleted some safety checks for zero size bins in CVT. Minor polishing of the code. MC, Leiden, 11 March 2003 V2.3.0: Unified the three tests to stop the accretion of one bin. This can improve some bins at the border. MC, Leiden, 9 April 2003 V2.3.1: Do *not* assume the first bin is made of one single pixel. Added computation of S/N scatter and plotting of 1-pixel bins. MC, Leiden, 13 April 2003 V2.4.0: Addedd basic error checking of input S/N. Reintroduced the treatment for zero-size bins in CVT, which was deleted in V2.2. Thanks to <NAME> and <NAME> for reporting problems. MC, Leiden, 10 December 2003. V2.4.1: Added /QUIET keyword and verbose output during the computation. After suggestion by <NAME>. MC, Leiden, 14 December 2003 V2.4.2: Use LONARR instead of INTARR to define the CLASS vector, to be able to deal with big images. Thanks to <NAME>. MC, Leiden, 4 August 2004 V2.4.3: Corrected bug introduced in version 2.3.1. It went undetected for a long time because it could only happen in special conditions. Now we recompute the index of the good bins after computing all centroids of the reassigned bins in reassign_bad_bins. Many thanks to <NAME> for her clear analysis of the problem and the solution. MC, Leiden, 29 November 2004 V2.4.4: Prevent division by zero for pixels with signal=0 and noise=sqrt(signal)=0, as can happen from X-ray data. MC, Leiden, 30 November 2004 V2.4.5: Added BIN2D prefix to internal routines to avoid possible naming conflicts. MC, Leiden, 3 December 2004 V2.4.6: Added /NO_CVT keyword to optionally skip the CVT step of the algorithm. MC, Leiden, 27 August 2005 V2.4.7: Verify that SIGNAL and NOISE are non negative vectors. MC, Leiden, 27 September 2005 V2.4.8: Use geometric centroid of a bin during the bin-accretion stage, to allow the routine to deal with negative signal (e.g. in background-subtracted X-ray images). Thanks to <NAME> for pointing out the usefulness of dealing with negative signal. MC, Leiden, 23 December 2005 V2.5.0: Added two new lines of code and the corresponding /WVT keyword to implement the nice modification to the algorithm proposed by Diehl & Statler (2006). MC, Leiden, 9 March 2006 V2.5.1: Updated documentation. MC, Oxford, 3 November 2006 V2.5.2: Print number of unbinned pixels. MC, Oxford, 28 March 2007 V2.5.3: Fixed program stop, introduced in V2.5.0, with /NO_CVT keyword. MC, Oxford, 3 December 2007 V2.5.4: Improved color shuffling for final plot. MC, Oxford, 30 November 2009 V2.5.5: Added PIXSIZE keyword. MC, Oxford, 28 April 2010 V2.5.6: Use IDL intrinsic function DISTANCE_MEASURE for automatic pixelSize, when PIXSIZE keyword is not given. MC, Oxford, 11 November 2011 V2.5.7: Included safety termination criterion of Lloyd algorithm to prevent loops using /WVT. MC, Oxford, 24 March 2012 V2.5.8: Update Voronoi tessellation at the exit of bin2d_cvt_equal_mass. This is only done when using /WVT, as DIFF may not be zero at the last iteration. MC, La Palma, 15 May 2012 V2.6.0: Included new SN_FUNCTION to illustrate the fact that the user can define his own function to estimate the S/N of a bin if needed. MC, London, 19 March 2014 V3.0.0: Translated from IDL into Python and tested against the original. MC, London, 19 March 2014 V3.0.1: Support both Python 2.7 and Python 3. MC, Oxford, 25 May 2014 V3.0.2: Avoid potential runtime warning while plotting. MC, Oxford, 2 October 2014 V3.0.3: Use for loop to calculate Voronoi tessellation of large arrays to reduce memory usage. Thanks to <NAME> (Potsdam) for reporting the problem and providing the solution. MC, Oxford, 31 March 2016 V3.0.4: Included keyword "sn_func" to pass a function which calculates the S/N of a bin, rather than editing _sn_func(). Included test to prevent the addition of a pixel from ever decreasing the S/N during the accretion stage. MC, Oxford, 12 April 2016 V3.0.5: Fixed deprecation warning in Numpy 1.11. MC, Oxford, 18 April 2016 V3.0.6: Use interpolation='nearest' to avoid crash on MacOS. Thanks to <NAME> (Portsmouth) for reporting the problem. Allow for zero noise. MC, Oxford, 14 June 2016 V3.0.7: Print execution time. MC, Oxford, 23 January 2017 V3.0.8: New voronoi_tessellation() function. MC, Oxford, 15 February 2017 V3.0.9: Do not iterate down to diff==0 in _cvt_equal_mass(). Request `pixelsize` when dataset is large. Thanks to <NAME> (Potsdam) for the feedback. Make `quiet` really quiet. Fixd some instances where sn_func() was not being used (only relevant when passing the `sn_func` keyword). MC, Oxford, 10 July 2017 V3.1.0: Use cKDTree for un-weighted Voronoi Tessellation. Removed loop over bins from Lloyd's algorithm with CVT. MC, Oxford, 17 July 2017 """ from __future__ import print_function from time import clock import numpy as np import matplotlib.pyplot as plt from scipy.spatial import distance, cKDTree from scipy import ndimage #---------------------------------------------------------------------------- def _sn_func(index, u): """ Default function to calculate the S/N of a bin with spaxels "index". The Voronoi binning algorithm does not require this function to have a specific form and this default one can be changed by the user if needed by passing a different function as ... = voronoi_2d_binning(..., sn_func=sn_func) The S/N returned by sn_func() does not need to be an analytic function of S and N. There is also no need for sn_func() to return the actual S/N. Instead sn_func() could return any quantity the user needs to equalize. For example sn_func() could be a procedure which uses ppxf to measure the velocity dispersion from the coadded spectrum of spaxels "index" and returns the relative error in the dispersion. Of course an analytic approximation of S/N, like the one below, speeds up the calculation. :param index: integer vector of length N containing the indices of the spaxels for which the combined S/N has to be returned. The indices refer to elements of the vectors signal and noise. :param signal: vector of length M>N with the signal of all spaxels. :param noise: vector of length M>N with the noise of all spaxels. :return: scalar S/N or another quantity that needs to be equalized. """ sn = ETC('S','BL', 5110., 0., 0.75, 3600., '1x1', spatial_bin=[0.4,0.4], spectral_bin=None, nas=False, sb=True, mag_AB=u, flux=None, Nframes=1, emline_width=None) # The following commented line illustrates, as an example, how one # would include the effect of spatial covariance using the empirical # Eq.(1) from http://adsabs.harvard.edu/abs/2015A%26A...576A.135G # Note however that the formula is not accurate for large bins. # # sn /= 1 + 1.07*np.log10(index.size) return sn #---------------------------------------------------------------------- def voronoi_tessellation(x, y, xnode, ynode, scale): """ Computes (Weighted) Voronoi Tessellation of the pixels grid """ if scale[0] == 1: # non-weighted VT tree = cKDTree(np.column_stack([xnode, ynode])) classe = tree.query(np.column_stack([x, y]))[1] else: if x.size < 1e4: classe = np.argmin(((x[:, None] - xnode)**2 + (y[:, None] - ynode)**2)/scale**2, axis=1) else: # use for loop to reduce memory usage classe = np.zeros(x.size, dtype=int) for j, (xj, yj) in enumerate(zip(x, y)): classe[j] = np.argmin(((xj - xnode)**2 + (yj - ynode)**2)/scale**2) return classe #---------------------------------------------------------------------- def _centroid(x, y, density): """ Computes weighted centroid of one bin. Equation (4) of Cappellari & Copin (2003) """ mass = np.sum(density) xBar = x.dot(density)/mass yBar = y.dot(density)/mass return xBar, yBar #---------------------------------------------------------------------- def _roundness(x, y, pixelSize): """ Implements equation (5) of Cappellari & Copin (2003) """ n = x.size equivalentRadius = np.sqrt(n/np.pi)*pixelSize xBar, yBar = np.mean(x), np.mean(y) # Geometric centroid here! maxDistance = np.sqrt(np.max((x - xBar)**2 + (y - yBar)**2)) roundness = maxDistance/equivalentRadius - 1. return roundness #---------------------------------------------------------------------- def _accretion(x, y, snr, u, targetSN, pixelsize, quiet, sn_func): """ Implements steps (i)-(v) in section 5.1 of Cappellari & Copin (2003) """ n = x.size classe = np.zeros(n, dtype=int) # will contain the bin number of each given pixel good = np.zeros(n, dtype=bool) # will contain 1 if the bin has been accepted as good # For each point, find the distance to all other points and select the minimum. # This is a robust but slow way of determining the pixel size of unbinned data. # if pixelsize is None: if x.size < 1e4: pixelsize = np.min(distance.pdist(np.column_stack([x, y]))) else: raise ValueError("Dataset is large: Provide `pixelsize`") currentBin = np.argmax(snr) # Start from the pixel with highest S/N SN = sn_func(currentBin, u) # Rough estimate of the expected final bins number. # This value is only used to give an idea of the expected # remaining computation time when binning very big dataset. # w = snr < targetSN maxnum = int(np.sum((SN)**2)/targetSN**2 + np.sum(~w)) # The first bin will be assigned CLASS = 1 # With N pixels there will be at most N bins # for ind in range(1, n+1): if not quiet: print(ind, ' / ', maxnum) classe[currentBin] = ind # Here currentBin is still made of one pixel xBar, yBar = x[currentBin], y[currentBin] # Centroid of one pixels while True: if np.all(classe): break # Stops if all pixels are binned # Find the unbinned pixel closest to the centroid of the current bin # unBinned = np.flatnonzero(classe == 0) k = np.argmin((x[unBinned] - xBar)**2 + (y[unBinned] - yBar)**2) # (1) Find the distance from the closest pixel to the current bin # minDist = np.min((x[currentBin] - x[unBinned[k]])**2 + (y[currentBin] - y[unBinned[k]])**2) # (2) Estimate the `roundness' of the POSSIBLE new bin # nextBin = np.append(currentBin, unBinned[k]) roundness = _roundness(x[nextBin], y[nextBin], pixelsize) # (3) Compute the S/N one would obtain by adding # the CANDIDATE pixel to the current bin # SNOld = SN SN = sn_func(nextBin, u) # Test whether (1) the CANDIDATE pixel is connected to the # current bin, (2) whether the POSSIBLE new bin is round enough # and (3) whether the resulting S/N would get closer to targetSN # if (np.sqrt(minDist) > 1.2*pixelsize or roundness > 0.3 or abs(SN - targetSN) > abs(SNOld - targetSN) or SNOld > SN): if SNOld > 0.8*targetSN: good[currentBin] = 1 break # If all the above 3 tests are negative then accept the CANDIDATE # pixel, add it to the current bin, and continue accreting pixels # classe[unBinned[k]] = ind currentBin = nextBin # Update the centroid of the current bin # xBar, yBar = np.mean(x[currentBin]), np.mean(y[currentBin]) # Get the centroid of all the binned pixels # binned = classe > 0 if np.all(binned): break # Stop if all pixels are binned xBar, yBar = np.mean(x[binned]), np.mean(y[binned]) # Find the closest unbinned pixel to the centroid of all # the binned pixels, and start a new bin from that pixel. # unBinned = np.flatnonzero(classe == 0) k = np.argmin((x[unBinned] - xBar)**2 + (y[unBinned] - yBar)**2) currentBin = unBinned[k] # The bin is initially made of one pixel SN = sn_func(currentBin, signal, noise) classe *= good # Set to zero all bins that did not reach the target S/N return classe, pixelsize #---------------------------------------------------------------------------- def _reassign_bad_bins(classe, x, y): """ Implements steps (vi)-(vii) in section 5.1 of Cappellari & Copin (2003) """ # Find the centroid of all successful bins. # CLASS = 0 are unbinned pixels which are excluded. # good = np.unique(classe[classe > 0]) xnode = ndimage.mean(x, labels=classe, index=good) ynode = ndimage.mean(y, labels=classe, index=good) # Reassign pixels of bins with S/N < targetSN # to the closest centroid of a good bin # bad = classe == 0 index = voronoi_tessellation(x[bad], y[bad], xnode, ynode, [1]) classe[bad] = good[index] # Recompute all centroids of the reassigned bins. # These will be used as starting points for the CVT. # good = np.unique(classe) xnode = ndimage.mean(x, labels=classe, index=good) ynode = ndimage.mean(y, labels=classe, index=good) return xnode, ynode #---------------------------------------------------------------------------- def _cvt_equal_mass(x, y, snr, u, xnode, ynode, pixelsize, quiet, sn_func, wvt): """ Implements the modified Lloyd algorithm in section 4.1 of Cappellari & Copin (2003). NB: When the keyword WVT is set this routine includes the modification proposed by Diehl & Statler (2006). """ dens2 = (snr)**4 # See beginning of section 4.1 of CC03 scale = np.ones_like(xnode) # Start with the same scale length for all bins for it in range(1, xnode.size): # Do at most xnode.size iterations xnode_old, ynode_old = xnode.copy(), ynode.copy() classe = voronoi_tessellation(x, y, xnode, ynode, scale) # Computes centroids of the bins, weighted by dens**2. # Exponent 2 on the density produces equal-mass Voronoi bins. # The geometric centroids are computed if WVT keyword is set. # good = np.unique(classe) SN = sn_func(nextBin, u) w = snr < targetSN maxnum = int(np.sum((SN)**2)/targetSN**2 + np.sum(~w)) if wvt: for k in good: index = np.flatnonzero(classe == k) # Find subscripts of pixels in bin k. xnode[k], ynode[k] = np.mean(x[index]), np.mean(y[index]) sn = sn_func(index, u, maxnum) scale[k] = np.sqrt(index.size/sn) # Eq. (4) of Diehl & Statler (2006) else: mass = ndimage.sum(dens2, labels=classe, index=good) xnode = ndimage.sum(x*dens2, labels=classe, index=good)/mass ynode = ndimage.sum(y*dens2, labels=classe, index=good)/mass diff2 = np.sum((xnode - xnode_old)**2 + (ynode - ynode_old)**2) diff = np.sqrt(diff2)/pixelsize if not quiet: print('Iter: %4i Diff: %.4g' % (it, diff)) if diff < 0.1: break # If coordinates have changed, re-compute (Weighted) Voronoi Tessellation of the pixels grid # if diff > 0: classe = voronoi_tessellation(x, y, xnode, ynode, scale) good = np.unique(classe) # Check for zero-size Voronoi bins # Only return the generators and scales of the nonzero Voronoi bins return xnode[good], ynode[good], scale[good], it #----------------------------------------------------------------------- def _compute_useful_bin_quantities(x, y, u, xnode, ynode, scale, sn_func): """ Recomputes (Weighted) Voronoi Tessellation of the pixels grid to make sure that the class number corresponds to the proper Voronoi generator. This is done to take into account possible zero-size Voronoi bins in output from the previous CVT (or WVT). """ # classe will contain the bin number of each given pixel classe = voronoi_tessellation(x, y, xnode, ynode, scale) # At the end of the computation evaluate the bin luminosity-weighted # centroids (xbar, ybar) and the corresponding final S/N of each bin. # xbar = np.empty_like(xnode) ybar = np.empty_like(xnode) sn = np.empty_like(xnode) area = np.empty_like(xnode) good = np.unique(classe) for k in good: index = np.flatnonzero(classe == k) # index of pixels in bin k. xbar[k], ybar[k] = _centroid(x[index], y[index], signal[index]) sn[k] = sn_func(index, u) area[k] = index.size return classe, xbar, ybar, sn, area #----------------------------------------------------------------------- def _display_pixels(x, y, counts, pixelsize): """ Display pixels at coordinates (x, y) coloured with "counts". This routine is fast but not fully general as it assumes the spaxels are on a regular grid. This needs not be the case for Voronoi binning. """ xmin, xmax = np.min(x), np.max(x) ymin, ymax = np.min(y), np.max(y) nx = int(round((xmax - xmin)/pixelsize) + 1) ny = int(round((ymax - ymin)/pixelsize) + 1) img = np.full((nx, ny), np.nan) # use nan for missing data j = np.round((x - xmin)/pixelsize).astype(int) k = np.round((y - ymin)/pixelsize).astype(int) img[j, k] = counts plt.imshow(np.rot90(img), interpolation='nearest', cmap='prism', extent=[xmin - pixelsize/2, xmax + pixelsize/2, ymin - pixelsize/2, ymax + pixelsize/2]) #---------------------------------------------------------------------- def voronoi_2d_binning(x, y, snr, u, targetSN, cvt=True, pixelsize=None, plot=True, quiet=True, sn_func=None, wvt=True): """ PURPOSE: Perform adaptive spatial binning of Integral-Field Spectroscopic (IFS) data to reach a chosen constant signal-to-noise ratio per bin. This method is required for the proper analysis of IFS observations, but can also be used for standard photometric imagery or any other two-dimensional data. This program precisely implements the algorithm described in section 5.1 of the reference below. EXPLANATION: Further information on VORONOI_2D_BINNING algorithm can be found in <NAME>., <NAME>., 2003, MNRAS, 342, 345 CALLING SEQUENCE: binNum, xBin, yBin, xBar, yBar, sn, nPixels, scale = \ voronoi_2d_binning(x, y, signal, noise, targetSN, cvt=True, pixelsize=None, plot=True, quiet=True, sn_func=None, wvt=True) """ # This is the main program that has to be called from external programs. # It simply calls in sequence the different steps of the algorithms # and optionally plots the results at the end of the calculation. assert x.size == y.size == snr.size == u.size, \ 'Input vectors (x, y, SNR, magnitude) must have the same size' assert np.all((noise > 0) & np.isfinite(noise)), \ 'NOISE must be positive and finite' if sn_func is None: sn_func = _sn_func # Perform basic tests to catch common input errors # if sn_func(noise > 0, u) < targetSN: raise ValueError("""Not enough S/N in the whole set of pixels. Many pixels may have noise but virtually no signal. They should not be included in the set to bin, or the pixels should be optimally weighted. See Cappellari & Copin (2003, Sec.2.1) and README file.""") if np.min(snr) > targetSN: raise ValueError('All pixels have enough S/N and binning is not needed') t = clock() if not quiet: print('Bin-accretion...') classe, pixelsize = _accretion( x, y, snr, u, targetSN, pixelsize, quiet, sn_func) if not quiet: print(np.max(classe), ' initial bins.') print('Reassign bad bins...') xnode, ynode = _reassign_bad_bins(classe, x, y) if not quiet: print(xnode.size, ' good bins.') if cvt: if not quiet: print('Modified Lloyd algorithm...') xnode, ynode, scale, it = _cvt_equal_mass( x, y, snr, u, xnode, ynode, pixelsize, quiet, sn_func, wvt) if not quiet: print(it - 1, ' iterations.') else: scale = np.ones_like(xnode) classe, xBar, yBar, sn, area = _compute_useful_bin_quantities( x, y, snr, u, xnode, ynode, scale, sn_func) w = area == 1 if not quiet: print('Unbinned pixels: ', np.sum(w), ' / ', x.size) print('Fractional S/N scatter (%):', np.std(sn[~w] - targetSN, ddof=1)/targetSN*100) print('Elapsed time: %.2f seconds' % (clock() - t)) if plot: plt.clf() plt.subplot(211) rnd = np.argsort(np.random.random(xnode.size)) # Randomize bin colors _display_pixels(x, y, rnd[classe], pixelsize) plt.plot(xnode, ynode, '+w', scalex=False, scaley=False) # do not rescale after imshow() plt.xlabel('R (arcsec)') plt.ylabel('R (arcsec)') plt.title('Map of Voronoi bins') plt.subplot(212) rad = np.sqrt(xBar**2 + yBar**2) # Use centroids, NOT generators plt.plot(rad[~w], sn[~w], 'or', label='Voronoi bins') plt.xlabel('R (arcsec)') plt.ylabel('Bin S/N') plt.axis([np.min(rad), np.max(rad), 0, np.max(sn)]) # x0, x1, y0, y1 if np.sum(w) > 0: plt.plot(rad[w], sn[w], 'xb', label='single spaxels') plt.axhline(targetSN) plt.legend() plt.pause(1) # allow plot to appear in certain cases return classe, xnode, ynode, xBar, yBar, sn, area, scale #---------------------------------------------------------------------------- # + # #!/usr/bin/env python from __future__ import print_function from os import path import numpy as np from voronoi_2d_binning import voronoi_2d_binning def voronoi_binning(): x, y, snr, u = np.genfromtxt('kcwi_vals.txt', delimiter=',', unpack=True) targetSN = 2.0 binNum, xNode, yNode, xBar, yBar, sn, nPixels, scale = voronoi_2d_binning(x, y, snr, u, targetSN, plot=1, quiet=0) np.savetxt('kcwi_vis_output.txt', np.column_stack([x, y, binNum, snr]), fmt=b'%10.6f,%10.6f,%8i,%8i') voronoi_binning() # + x, y, binNum, snr = np.genfromtxt('kcwi_vis_output.txt', delimiter=',',unpack=True) binNum = binNum.reshape((30,25)) x = x.reshape((30,25)) y = y.reshape((30,25)) snr = snr.reshape((30,25)) plt.imshow(snr, cmap='Vega20b') plt.colorbar() plt.show() # - fits = get_fits(184.8461922, 5.79484358) fits[0][0].header get_image(184.8461922, 5.79484358) brightness = fits[0][0].data plt.imshow(brightness[655:685,1540:1566],vmax=np.percentile(brightness, 99.5), cmap='jet') plt.colorbar() plt.show() #VCC344 sf_bright = brightness[655:685,1540:1565] m = 22.5 - 2.5 * np.log10(sf_bright) u = m + 2.5 * np.log10(0.16) a = sum(sf_bright) a RECTANGULAR ANNULI # + max_val = np.max(sf_bright) center = np.where(sf_bright == max_val) x = center[0][0] y = center[1][0] #data = np.arange(1,26).reshape(5,5) data = sf_bright.copy() target_row = x target_col = y iteration = [] for step in range(max(x,y,data.shape[0]-2,data.shape[1]-2)): annulus = [] for i in range(-step,step+1): if i == -step or i == step: for j in range(-step,step+1): if target_row+i >= 0 and target_row+j >= 0: try: annulus.append(data[target_row+i][target_col+j]) except IndexError: pass else: for j in (-step, step): if target_row+i >= 0 and target_row+j >= 0: try: annulus.append(data[target_row+i][target_col+j]) except IndexError: pass iteration.append(annulus) averages = [] for i in range(len(iteration)): averages.append(np.mean(iteration[i])) for step in range(max(x,y,data.shape[0]-2,data.shape[1]-2)): for i in range(-step,step+1): if i == -step or i == step: for j in range(-step,step+1): if target_row+i >= 0 and target_col+j >= 0: try: data[target_row+i][target_col+j] = float(averages[step]) except IndexError: pass else: for j in (-step, step): if target_row+i >= 0 and target_col+j >= 0: try: data[target_row+i][target_col+j] = float(averages[step]) except IndexError: pass m = 22.5 - 2.5 * np.log10(data) u = m + 2.5 * np.log10(0.16) # - ELLIPTICAL ANNULI def ellip_bin(): plt.hist(sf_bright) plt.show() plt.xlabel('Flux') plt.ylabel('Number') elliptical = sf_bright.copy() bins = [] bins.append(np.mean(sf_bright[(sf_bright > 9) & (sf_bright < 10)])) elliptical[(elliptical > 9) & (elliptical < 10)] = bins[0] bins.append(np.mean(sf_bright[(sf_bright > 8) & (sf_bright < 9)])) elliptical[(elliptical > 8) & (elliptical < 9)] = bins[1] bins.append(np.mean(sf_bright[(sf_bright > 7) & (sf_bright < 8)])) elliptical[(elliptical > 7) & (elliptical < 8)] = bins[2] bins.append(np.mean(sf_bright[(sf_bright > 6) & (sf_bright < 7)])) elliptical[(elliptical > 6) & (elliptical < 7)] = bins[3] bins.append(np.mean(sf_bright[(sf_bright > 5) & (sf_bright < 6)])) elliptical[(elliptical > 5) & (elliptical < 6)] = bins[4] bins.append(np.mean(sf_bright[(sf_bright > 4) & (sf_bright < 5)])) elliptical[(elliptical > 4) & (elliptical < 5)] = bins[5] bins.append(np.mean(sf_bright[(sf_bright > 3) & (sf_bright < 4)])) elliptical[(elliptical > 3) & (elliptical < 4)] = bins[6] bins.append(np.mean(sf_bright[(sf_bright > 2) & (sf_bright < 3)])) elliptical[(elliptical > 2) & (elliptical < 3)] = bins[7] bins.append(np.mean(sf_bright[(sf_bright > 1) & (sf_bright < 2)])) elliptical[(elliptical > 1) & (elliptical < 2)] = bins[8] bins.append(np.mean(sf_bright[(sf_bright > 0) & (sf_bright < 1)])) elliptical[(elliptical > 0) & (elliptical < 1)] = bins[9] m = 22.5 - 2.5 * np.log10(elliptical) u = m + 2.5 * np.log10(0.16) SNR = np.zeros((len(u),len(u[0]))) for i in range(len(u)): for j in range(len(u[0])): SNR[i][j] = ETC('S','BL', 5110., 0., 0.75, 3600., '1x1', spatial_bin=[0.4,0.4], spectral_bin=None, nas=False, sb=True, mag_AB=u[i][j], flux=None, Nframes=1, emline_width=None) plt.imshow(SNR,vmax=np.percentile(SNR, 99.5), cmap='jet') plt.colorbar() plt.show() m = 22.5 - 2.5 * np.log10(elliptical) u = m + 2.5 * np.log10(0.16) # + SNR = np.zeros((len(u),len(u[0]))) for i in range(len(u)): for j in range(len(u[0])): SNR[i][j] = ETC('S','BL', 5110., 0., 0.75, 3600., '1x1', spatial_bin=[0.4,0.4], spectral_bin=None, nas=False, sb=True, mag_AB=u[i][j], flux=None, Nframes=1, emline_width=None) # - plt.imshow(SNR,vmax=np.percentile(SNR, 99.5), cmap='jet') plt.colorbar() plt.show() m = 22.5 - 2.5 * np.log10(a) #f = 3075.14 * 10**(0.4 * 0.04) #m = -(2.5 / np.log(10)) * ((np.arcsinh(f)/(2*(0.9e-10)) + np.log(0.9e-10))) m # + SNR = np.zeros((len(u),len(u[0]))) for i in range(len(u)): for j in range(len(u[0])): SNR[i][j] = ETC('S','BL', 5110., 0., 0.75, 3600., '1x1', spatial_bin=[0.4,0.4], spectral_bin=None, nas=False, sb=True, mag_AB=u[i][j], flux=None, Nframes=1, emline_width=None) # S/N ~ 20/Ang, binned over ~1 R_e aperture #ETC('M','BM', 5110., 0., 0.75, 3600., '2x2', spatial_bin=[0.4,0.4], spectral_bin=None, nas=False, sb=True, mag_AB=25, flux=None, Nframes=1, emline_width=None) # - plt.imshow(SNR,vmax=np.percentile(SNR, 99.5), cmap='jet') plt.colorbar() plt.show() # + x = [] y = [] mag = [] sn = [] for i in range(len(u)): for j in range(len(u[0])): x.append(i) y.append(j) mag.append(u[i][j]) sn.append(SNR[i][j]) np.savetxt('kcwi_vals.txt', np.column_stack([x, y, sn, mag]), fmt=b'%1.1f,%1.1f,%8.3f,%8.3f') # + def get_sfbright(): ''' Grabs surface brightness data from fits file and displays 2D plot of data. ''' fits = get_fits(184.8461922, 5.79484358) fits[0][0].header brightness = fits[0][0].data plt.imshow(brightness[655:685,1540:1566],vmax=np.percentile(brightness, 99.5), cmap='binary') plt.show() return brightness def calc_mag(sf_bright): ''' Looks only at object of interest. Takes surface brightness data that is in units of nanomaggies and calculates magnitude ''' sf_bright = sf_bright[655:685,1540:1565] m = 22.5 - 2.5 * np.log10(sf_bright) u = m + 2.5 * np.log10(0.16) return u def ETC_all(mag): ''' Performs exposure time calculation on all pixels in frame. Displays SNR values. ''' SNR = np.zeros((len(mag),len(mag[0]))) for i in range(len(mag[0])): for j in range(len(mag[1])): SNR[i][j] = ETC('M','BM', 5110., 0., 0.75, 3600., '2x2', spatial_bin=[0.4,0.4], spectral_bin=None, nas=False, sb=True, mag_AB=mag[i][j], flux=None, Nframes=1, emline_width=None) plt.imshow(SNR,vmax=np.percentile(SNR, 99.5), cmap='binary') plt.colorbar() plt.show() # -
KCWI/ETC2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # <h1> 2c. Loading large datasets progressively with the tf.data.Dataset </h1> # # In this notebook, we continue reading the same small dataset, but refactor our ML pipeline in two small, but significant, ways: # # 1. Refactor the input to read data from disk progressively. # 2. Refactor the feature creation so that it is not one-to-one with inputs. # # The Pandas function in the previous notebook first read the whole data into memory -- on a large dataset, this won't be an option. # - # !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst # Ensure the right version of Tensorflow is installed. # !pip freeze | grep tensorflow==2.5 # + deletable=true editable=true from google.cloud import bigquery import tensorflow as tf import numpy as np import shutil print(tf.__version__) # + [markdown] deletable=true editable=true # <h2> 1. Refactor the input </h2> # # Read data created in Lab1a, but this time make it more general, so that we can later handle large datasets. We use the Dataset API for this. It ensures that, as data gets delivered to the model in mini-batches, it is loaded from disk only when needed. # + deletable=true editable=true CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key'] DEFAULTS = [[0.0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']] # TODO: Create an appropriate input function read_dataset def read_dataset(filename, mode): #TODO Add CSV decoder function and dataset creation and methods return dataset def get_train_input_fn(): return read_dataset('./taxi-train.csv', mode = tf.estimator.ModeKeys.TRAIN) def get_valid_input_fn(): return read_dataset('./taxi-valid.csv', mode = tf.estimator.ModeKeys.EVAL) # + [markdown] deletable=true editable=true # <h2> 2. Refactor the way features are created. </h2> # # For now, pass these through (same as previous lab). However, refactoring this way will enable us to break the one-to-one relationship between inputs and features. # + deletable=true editable=true INPUT_COLUMNS = [ tf.feature_column.numeric_column('pickuplon'), tf.feature_column.numeric_column('pickuplat'), tf.feature_column.numeric_column('dropofflat'), tf.feature_column.numeric_column('dropofflon'), tf.feature_column.numeric_column('passengers'), ] def add_more_features(feats): # Nothing to add (yet!) return feats feature_cols = add_more_features(INPUT_COLUMNS) # + [markdown] deletable=true editable=true # <h2> Create and train the model </h2> # # Note that we train for num_steps * batch_size examples. # + deletable=true editable=true tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) OUTDIR = 'taxi_trained' shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time model = tf.compat.v1.estimator.LinearRegressor( feature_columns = feature_cols, model_dir = OUTDIR) model.train(input_fn = get_train_input_fn, steps = 200) # + [markdown] deletable=true editable=true # <h3> Evaluate model </h3> # # As before, evaluate on the validation data. We'll do the third refactoring (to move the evaluation into the training loop) in the next lab. # + deletable=true editable=true metrics = model.evaluate(input_fn = get_valid_input_fn, steps = None) print('RMSE on dataset = {}'.format(np.sqrt(metrics['average_loss']))) # + [markdown] deletable=true editable=true # ## Challenge Exercise # # Create a neural network that is capable of finding the volume of a cylinder given the radius of its base (r) and its height (h). Assume that the radius and height of the cylinder are both in the range 0.5 to 2.0. Unlike in the challenge exercise for b_estimator.ipynb, assume that your measurements of r, h and V are all rounded off to the nearest 0.1. Simulate the necessary training dataset. This time, you will need a lot more data to get a good predictor. # # Hint (highlight to see): # <p style='color:white'> # Create random values for r and h and compute V. Then, round off r, h and V (i.e., the volume is computed from the true value of r and h; it's only your measurement that is rounded off). Your dataset will consist of the round values of r, h and V. Do this for both the training and evaluation datasets. # </p> # # Now modify the "noise" so that instead of just rounding off the value, there is up to a 10% error (uniformly distributed) in the measurement followed by rounding off. # + [markdown] deletable=true editable=true # Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
courses/machine_learning/deepdive/03_tensorflow/labs/c_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ** ะšั€ะฐะตะฝ ัั€ะพะบ ะทะฐ ะฟั€ะตะดะฐะฒะฐะฝะต ะฝะฐ ะดะพะผะฐัˆะฝะพั‚ะพ: 15.01.2016** # ## ะ—ะฐะดะฐั‡ะฐ 1 - ะšะฒะฐะดั€ะฐั‚ะฝะพ ัƒั€ะฐะฒะฝะตะฝะธะต # # ะ’ัŠะฒ ั„ะฐะนะป, ะบะพะนั‚ะพ ัะต ะบะฐะทะฒะฐ `quadratic_equation.py`, ะฝะฐะฟะธัˆะตั‚ะต ะฟั€ะพะณั€ะฐะผะฐ, # ะบะพัั‚ะพ ะฟั€ะธ ะฒัŠะฒะตะถะดะฐะฝะต ะฝะฐ ะบะพะตั„ะธั†ะธะตะฝั‚ะธั‚ะต (a, b ะธ c) # ะฝะฐ ะบะฒะฐะดั€ะฐั‚ะฝะพ ัƒั€ะฐะฒะฝะตะฝะธะต: ax^2 + bx + c, ะธะทั‡ะธัะปัะฒะฐ ะธ ะธะทะฒะตะถะดะฐ ะฝะตะณะพะฒะธั‚ะต # ั€ะตะฐะปะฝะธ ะบะพั€ะตะฝะธ (ะฐะบะพ ะธะผะฐ ั‚ะฐะบะธะฒะฐ). # ะšะฒะฐะดั€ะฐั‚ะฝะธั‚ะต ัƒั€ะฐะฒะฝะตะฝะธั ะผะพะณะฐั‚ ะดะฐ ะธะผะฐั‚ 0, 1 ะธะปะธ 2 ั€ะตะฐะปะฝะธ ะบะพั€ะตะฝะฐ. # # * ะงะตั‚ะต ะพั‚ ะฟะพั‚ั€ะตะฑะธั‚ะตะป, ะฟัŠั€ะฒะพั‚ะพ ั‡ะธัะปะพ `a` # * ะงะตั‚ะต ะพั‚ ะฟะพั‚ั€ะตะฑะธั‚ะตะป, ะฒั‚ะพั€ะพั‚ะพ ั‡ะธัะปะพ `b` # * ะงะตั‚ะต ะพั‚ ะฟะพั‚ั€ะตะฑะธั‚ะตะป, ั‚ั€ะตั‚ะพั‚ะพ ั‡ะธัะปะพ `c` # # * ะ˜ะทะบะฐั€ะฒะฐ ะฝะฐ ะตะบั€ะฐะฝะฐ ะบะพั€ะตะฝะธั‚ะต ะฝะฐ ัƒั€ะฐะฒะฝะตะฝะธะตั‚ะพ `x1` ะธ `x2`, ะทะฐะบั€ัŠะณะปะตะฝะธ ะดะพ ะดะฒะฐ ะทะฝะฐะบะฐ ัะปะตะด ะดะตัะตั‚ะธั‡ะฝะฐั‚ะฐ ั‚ะพั‡ะบะฐ. # * ะะบะพ ะฝัะผะฐ ั€ะตะฐะปะฝะธ ะบะพั€ะตะฝะธ ะธะทะฒะตะถะด `NRK`. # # **ะŸั€ะธะผะตั€ะฝะพ ะธะทะฟะพะปะทะฒะฐะฝะต:** # # ``` # Enter a: 2 # Enter b: 3 # Enter b: 4 # NRK # ``` # # ``` # Enter a: 1 # Enter b: 3 # Enter b: -4 # x1 = -4 # x2 = 1 # ``` # # ``` # Enter a: 9 # Enter b: 12 # Enter b: 4 # x1 = 0.67 # x2 = 0.67 # ``` # ะ—ะฐ ะดะฐ ะธะทั‡ะธัะปะธั‚ะต ะบะฒะฐะดั€ะฐั‚ะตะฝ ะบะพั€ะตะฝ ะธะทะฟะพะปะทะฒะฐะนั‚ะต ั„ัƒะฝะบั†ะธัั‚ะฐ `sqrt`. # + from math import sqrt print(sqrt(4)) print(sqrt(6)) # - # ะ—ะฐ ะดะฐ ะทะฐะบั€ัŠะณะปะธั‚ะต ั€ะตะทัƒะปั‚ะฐั‚ะฐ ะดะพ ะดะฒะฐ ะทะฝะฐะบะฐ ัะปะตะด ะทะฐะฟะตั‚ะฐัั‚ะฐ ะธะทะฟะพะปะทะฒะฐะนั‚ะต ั„ัƒะฝะบั†ะธัั‚ะฐ `round`. round(2.4494, 2)
archive/2015/week6/homework.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Bayesian Multiple Regression Example # + # Author: jake-westfall # Created At: Aug 29, 2016 # Last Run: Mar 27, 2019 # - import bambi as bmb import pandas as pd import numpy as np import statsmodels.api as sm import pymc3 as pm import seaborn as sns # ## Load and examine Eugene-Springfield community sample data data = pd.read_csv('data/ESCS.csv') np.round(data.describe(), 2) # It's always a good idea to start off with some basic plotting. Here's what our outcome variable 'drugs' (some index of self-reported illegal drug use) looks like: data['drugs'].hist(); # The five predictor variables that we'll use are sum-scores measuring participants' standings on the Big Five personality dimensions. The dimensions are: # - O = Openness to experience # - C = Conscientiousness # - E = Extraversion # - A = Agreeableness # - N = Neuroticism # # Here's what our predictors look like: sns.pairplot(data[['o','c','e','a','n']]); # ## Specify model and examine priors # We're going to fit a pretty straightforward multiple regression model predicting drug use from all 5 personality dimension scores. It's simple to specify the model using a familiar formula interface. Here we tell bambi to run two parallel Markov Chain Monte Carlo (MCMC) chains, each one drawing 2000 samples from the joint posterior distribution of all the parameters. model = bmb.Model(data) fitted = model.fit('drugs ~ o + c + e + a + n', samples=1000, tune=1000, init=None) # Great! But this is a Bayesian model, right? What about the priors? # # If no priors are given explicitly by the user, then bambi chooses smart default priors for all parameters of the model based on the implied partial correlations between the outcome and the predictors. Here's what the default priors look like in this case -- the plots below show 1000 draws from each prior distribution: model.plot(); # Normal priors on the coefficients {x.name:x.prior.args for x in model.terms.values()} # Uniform prior on the residual standard deviation model.y.prior.args['sd'].args # Some more info about the default prior distributions can be found in [this technical paper](https://arxiv.org/abs/1702.01201). # Notice the small SDs of the slope priors. This is due to the relative scales of the outcome and the predictors: remember from the plots above that the outcome, `drugs`, ranges from 1 to about 4, while the predictors all range from about 20 to 180 or so. So a one-unit change in any of the predictors -- which is a trivial increase on the scale of the predictors -- is likely to lead to a very small absolute change in the outcome. Believe it or not, these priors are actually quite wide on the partial correlation scale! # ## Examine the model results # Let's start with a pretty picture of the parameter estimates! fitted.plot(); # The left panels show the marginal posterior distributions for all of the modelโ€™s parameters, which summarize the most plausible values of the regression coefficients, given the data we have now observed. These posterior density plots show two overlaid distributions because we ran two MCMC chains. The panels on the right are "trace plots" showing the sampling paths of the two MCMC chains as they wander through the parameter space. # A much more succinct (non-graphical) summary of the parameter estimates can be found like so: fitted.summary() # When there are multiple MCMC chains, the default summary output includes some basic convergence diagnostic info (the effective MCMC sample sizes and the Gelman-Rubin "R-hat" statistics), although in this case it's pretty clear from the trace plots above that the chains have converged just fine. # ## Summarize effects on partial correlation scale # Let's grab the samples and put them in a format where we can easily work with them. We can do this really easily using the `to_df()` method of the fitted `MCMCResults` object. samples = fitted.to_df() samples.head() # It turns out that we can convert each regresson coefficient into a partial correlation by multiplying it by a constant that depends on (1) the SD of the predictor, (2) the SD of the outcome, and (3) the degree of multicollinearity with the set of other predictors. Two of these statistics are actually already computed and stores in the fitted model object, in a dictionary called `dm_statistics` (for design matrix statistics), because they are used internally. The others we will compute manually. # + # the names of the predictors varnames = ['o', 'c', 'e', 'a', 'n'] # compute the needed statistics r2_x = model.dm_statistics['r2_x'] sd_x = model.dm_statistics['sd_x'] r2_y = pd.Series([sm.OLS(endog=data['drugs'], exog=sm.add_constant(data[[p for p in varnames if p != x]])).fit().rsquared for x in varnames], index=varnames) sd_y = data['drugs'].std() # compute the products to multiply each slope with to produce the partial correlations slope_constant = sd_x[varnames] * (1 - r2_x[varnames])**.5 \ / sd_y / (1 - r2_y)**.5 slope_constant # - # Now we just multiply each sampled regression coefficient by its corresponding `slope_constant` to transform it into a sampled partial correlation coefficient. pcorr_samples = samples[varnames] * slope_constant pcorr_samples.head() # And voilร ! We now have a joint posterior distribution for the partial correlation coefficients. Let's plot the marginal posterior distributions: pcorr_samples.plot.kde(xlim=[-.5, .5]).axvline(x=0, color='k', linestyle='--') # The means of these distributions serve as good point estimates of the partial correlations: pcorr_samples.mean(axis=0).sort_values() # Naturally, these results are consistent with the OLS results. For example, we can see that these estimated partial correlations are roughly proportional to the t-statistics from the corresponding OLS regression: sm.OLS(endog=data['drugs'], exog=sm.add_constant( data[varnames])).fit().summary() # ## Relative importance: Which predictors have the strongest effects (defined in terms of partial $\eta^2$)? # The partial $\eta^2$ statistics for each predictor are just the squares of the partial correlation coefficients, so it's easy to get posteriors on that scale too: (pcorr_samples**2).plot.kde(xlim=[0, .2], ylim=[0, 80]) # With these posteriors we can ask: What is the probability that the partial $\eta^2$ for Openness (yellow) is greater than the partial $\eta^2$ for Conscientiousness (green)? (pcorr_samples['o']**2 > pcorr_samples['c']**2).mean() # For each predictor, what is the probability that it has the largest $\eta^2$? (pcorr_samples**2).idxmax(axis=1).value_counts() / len(pcorr_samples.index) # Agreeableness is clearly the strongest predictor of drug use among the Big Five personality traits, but it's still not a particularly strong predictor in an absolute sense. <NAME> famously claimed that it is rare to see correlations between personality measurse and relevant behavioral outcomes exceed 0.3. In this case, the probability that the agreeableness partial correlation exceeds 0.3 is: (np.abs(pcorr_samples['a']) > .3).mean()
docs/notebooks/ESCS_multiple_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab inline import numpy Hubble_h = 0.73 BoxSize = 62.5 directory = '../src/auxdata/trees/modified-SF-2/' #directory1 = '../src/auxdata/trees/modified-H2-CF/' filename = 'model' redshift = [0.000] #snapshot = list(range(63, -1, -1)) firstfile = 0 lastfile = 7 def galdtype(): # Define the data-type for the public version of SAGE Galdesc_full = [ ('SnapNum' , np.int32), ('Type' , np.int32), ('GalaxyIndex' , np.int64), ('CentralGalaxyIndex' , np.int64), ('SAGEHaloIndex' , np.int32), ('SAGETreeIndex' , np.int32), ('SimulationHaloIndex' , np.int64), ('mergeType' , np.int32), ('mergeIntoID' , np.int32), ('mergeIntoSnapNum' , np.int32), ('dT' , np.float32), ('Pos' , (np.float32, 3)), ('Vel' , (np.float32, 3)), ('Spin' , (np.float32, 3)), ('Len' , np.int32), ('Mvir' , np.float32), ('CentralMvir' , np.float32), ('Rvir' , np.float32), ('Vvir' , np.float32), ('Vmax' , np.float32), ('VelDisp' , np.float32), ('ColdGas' , np.float32), ('f_H2' , np.float32), ('f_HI' , np.float32), ('cf' , np.float32), ('Zp' , np.float32), ('StellarMass' , np.float32), ('BulgeMass' , np.float32), ('HotGas' , np.float32), ('EjectedMass' , np.float32), ('BlackHoleMass' , np.float32), ('IntraClusterStars' , np.float32), ('MetalsColdGas' , np.float32), ('MetalsStellarMass' , np.float32), ('MetalsBulgeMass' , np.float32), ('MetalsHotGas' , np.float32), ('MetalsEjectedMass' , np.float32), ('MetalsIntraClusterStars' , np.float32), ('SfrDisk' , np.float32), ('SfrBulge' , np.float32), ('SfrDiskZ' , np.float32), ('SfrBulgeZ' , np.float32), ('DiskRadius' , np.float32), ('Cooling' , np.float32), ('Heating' , np.float32), ('QuasarModeBHaccretionMass' , np.float32), ('TimeOfLastMajorMerger' , np.float32), ('TimeOfLastMinorMerger' , np.float32), ('OutflowRate' , np.float32), ('infallMvir' , np.float32), ('infallVvir' , np.float32), ('infallVmax' , np.float32) ] names = [Galdesc_full[i][0] for i in range(len(Galdesc_full))] formats = [Galdesc_full[i][1] for i in range(len(Galdesc_full))] Galdesc = np.dtype({'names':names, 'formats':formats}, align=True) return Galdesc def read_one_file(name): fin = open(name, 'rb') Ntrees = np.fromfile(fin,np.dtype(np.int32),1)[0] NtotGals = np.fromfile(fin,np.dtype(np.int32),1)[0] GalsPerTree = np.fromfile(fin, np.dtype((np.int32, Ntrees)),1)[0] G = np.fromfile(fin, Galdesc, NtotGals) G = G.view(recarray) return G # + Galdesc = galdtype() for i in range(len(redshift)): G_snap = [] G_snap1 = [] GalsTree = [] for j in range(firstfile, lastfile+1): name = (directory+filename+'_z'+f'{redshift[i]:.3f}'+'_'+f'{j}') #name1 = (directory1+filename+'_z'+f'{redshift[i]:.3f}'+'_'+f'{j}') #snapshot 0 = redshift 127, snapshot 63 = redshift 0 G = read_one_file(name) #G1 = read_one_file(name1) G_snap.extend(G) #G_snap1.extend(G1) G_snap = np.array(G_snap) #G_snap1 = np.array(G_snap1) G_snap = G_snap.view(recarray) #G_snap1 = G_snap1.view(recarray) # - len(G_snap) stellarmass = G_snap.StellarMass * 1.e10 / Hubble_h g = np.where(G_snap.ColdGas > 0.0)[0] #w = np.where(((G_snap.BulgeMass / G_snap.StellarMass) < 0.3) & (G_snap.ColdGas > 0.0))[0] gasmass = G_snap.ColdGas[g] * 1.e10 / Hubble_h H2mass = G_snap.f_H2[g] * G_snap.ColdGas[g] * 1.e10 / Hubble_h HImass = G_snap.f_HI[g] * G_snap.ColdGas[g] * 1.e10 / Hubble_h max(H2mass) ''' stellarmass1 = G_snap1.StellarMass * 1.e10 / Hubble_h #w = np.where(G_snap.ColdGas > 0.0)[0] w = np.where(((G_snap1.BulgeMass / G_snap1.StellarMass) < 0.3) & (G_snap1.ColdGas > 0.0))[0] gasmass1 = G_snap1.ColdGas[w] * 1.e10 / Hubble_h H2mass1 = G_snap1.H2Gas[w] * 1.e10 / Hubble_h HImass1 = G_snap1.HIGas[w] * 1.e10 / Hubble_h ''' from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.pyplot as plt plt.rcParams['figure.dpi'] = 80 # + binwidth = 0.3 ax0 = plt.subplot2grid((1,1), (0,0)) divider = make_axes_locatable(ax0) #ax1 = divider.append_axes("bottom", size="50%", pad=0.3) # Baldry+ 2008 modified data used for the MCMC fitting Zwaan = np.array([[6.933, -0.333], [7.057, -0.490], [7.209, -0.698], [7.365, -0.667], [7.528, -0.823], [7.647, -0.958], [7.809, -0.917], [7.971, -0.948], [8.112, -0.927], [8.263, -0.917], [8.404, -1.062], [8.566, -1.177], [8.707, -1.177], [8.853, -1.312], [9.010, -1.344], [9.161, -1.448], [9.302, -1.604], [9.448, -1.792], [9.599, -2.021], [9.740, -2.406], [9.897, -2.615], [10.053, -3.031], [10.178, -3.677], [10.335, -4.448], [10.492, -5.083] ], dtype=np.float32) ObrRaw = np.array([ [7.300, -1.104], [7.576, -1.302], [7.847, -1.250], [8.133, -1.240], [8.409, -1.344], [8.691, -1.479], [8.956, -1.792], [9.231, -2.271], [9.507, -3.198], [9.788, -5.062 ] ], dtype=np.float32) ObrCold = np.array([ [8.009, -1.042], [8.215, -1.156], [8.409, -0.990], [8.604, -1.156], [8.799, -1.208], [9.020, -1.333], [9.194, -1.385], [9.404, -1.552], [9.599, -1.677], [9.788, -1.812], [9.999, -2.312], [10.172, -2.656], [10.362, -3.500], [10.551, -3.635], [10.740, -5.010] ], dtype=np.float32) ObrCold_xval = np.log10(10**(ObrCold[:, 0]) /Hubble_h/Hubble_h) ObrCold_yval = (10**(ObrCold[:, 1]) * Hubble_h*Hubble_h*Hubble_h) Zwaan_xval = np.log10(10**(Zwaan[:, 0]) /Hubble_h/Hubble_h) Zwaan_yval = (10**(Zwaan[:, 1]) * Hubble_h*Hubble_h*Hubble_h) ObrRaw_xval = np.log10(10**(ObrRaw[:, 0]) /Hubble_h/Hubble_h) ObrRaw_yval = (10**(ObrRaw[:, 1]) * Hubble_h*Hubble_h*Hubble_h) ax0.plot(ObrCold_xval, ObrCold_yval, color='black', lw = 7, alpha=0.25, label='Obr. \& Raw. 2009 (Cold Gas)') ax0.plot(Zwaan_xval, Zwaan_yval, color='blue', lw = 7, alpha=0.25, label='Zwaan et al. 2005 (HI)') ax0.plot(ObrRaw_xval, ObrRaw_yval, color='red', lw = 7, alpha=0.25, label='Obr. \& Raw. 2009 (H2)') mi = np.floor(min(np.log10(gasmass))) - 2 ma = np.floor(max(np.log10(gasmass))) + 2 NB = int((ma - mi) / binwidth) (g_counts, g_binedges) = np.histogram(np.log10(gasmass), range=(mi, ma), bins=NB) (H2_counts, H2_binedges) = np.histogram(np.log10(H2mass), range=(mi, ma), bins=NB) (HI_counts, HI_binedges) = np.histogram(np.log10(HImass), range=(mi, ma), bins=NB) ''' (g1_counts, g1_binedges) = np.histogram(np.log10(gasmass1), range=(mi, ma), bins=NB) (H21_counts, H21_binedges) = np.histogram(np.log10(H2mass1), range=(mi, ma), bins=NB) (HI1_counts, HI1_binedges) = np.histogram(np.log10(HImass1), range=(mi, ma), bins=NB) ''' # Set the x-axis values to be the centre of the bins g_x = g_binedges[:-1] + 0.5 * binwidth H2_x = H2_binedges[:-1] + 0.5 * binwidth HI_x = HI_binedges[:-1] + 0.5 * binwidth ''' g1_x = g1_binedges[:-1] + 0.5 * binwidth H21_x = H21_binedges[:-1] + 0.5 * binwidth HI1_x = HI1_binedges[:-1] + 0.5 * binwidth ''' # Overplot the model histograms ''' ax0.plot(g1_x, g1_counts / (BoxSize/Hubble_h)**3 / binwidth, 'k', label='Cold Gas', lw=1.0) ax0.plot(HI1_x, HI1_counts / (BoxSize/Hubble_h)**3 / binwidth, 'b', label='HI', lw=1.0) ax0.plot(H21_x, H21_counts / (BoxSize/Hubble_h)**3 / binwidth, 'r', label='H2', lw=1.0) ''' ax0.plot(g_x, g_counts / (BoxSize/Hubble_h)**3 / binwidth, 'k--', label='Stars from H2', lw=1.0) ax0.plot(HI_x, HI_counts / (BoxSize/Hubble_h)**3 / binwidth, 'b--', lw=1.0) ax0.plot(H2_x, H2_counts / (BoxSize/Hubble_h)**3 / binwidth, 'r--', lw=1.0) #plt.plot(mass, label='computed') #plt.plot(Mass, label='intrinsic') ax0.set_yscale('log', nonposy='clip') #plt.axis([8.0, 12.5, 1.0e-6, 1.0e-1]) # Set the x-axis minor ticks ax0.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) ax0.set_ylabel(r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1}$)') # Set the y... ax0.set_xlabel(r'$\log_{10} M_{\mathrm{X}}\ (M_{\odot})$') ax0.set_xlim(8.0, 11.5) leg = ax0.legend(loc=0, numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') plt.tight_layout() #plt.savefig('GMF-flatcf.png') plt.show() # - w = np.where(HImass > gasmass)[0] print(w) # + binwidth = 0.3 ax0 = plt.subplot2grid((1,1), (0,0)) divider = make_axes_locatable(ax0) # Baldry+ 2008 modified data used for the MCMC fitting Baldry = np.array([ [7.05, 1.3531e-01, 6.0741e-02], [7.15, 1.3474e-01, 6.0109e-02], [7.25, 2.0971e-01, 7.7965e-02], [7.35, 1.7161e-01, 3.1841e-02], [7.45, 2.1648e-01, 5.7832e-02], [7.55, 2.1645e-01, 3.9988e-02], [7.65, 2.0837e-01, 4.8713e-02], [7.75, 2.0402e-01, 7.0061e-02], [7.85, 1.5536e-01, 3.9182e-02], [7.95, 1.5232e-01, 2.6824e-02], [8.05, 1.5067e-01, 4.8824e-02], [8.15, 1.3032e-01, 2.1892e-02], [8.25, 1.2545e-01, 3.5526e-02], [8.35, 9.8472e-02, 2.7181e-02], [8.45, 8.7194e-02, 2.8345e-02], [8.55, 7.0758e-02, 2.0808e-02], [8.65, 5.8190e-02, 1.3359e-02], [8.75, 5.6057e-02, 1.3512e-02], [8.85, 5.1380e-02, 1.2815e-02], [8.95, 4.4206e-02, 9.6866e-03], [9.05, 4.1149e-02, 1.0169e-02], [9.15, 3.4959e-02, 6.7898e-03], [9.25, 3.3111e-02, 8.3704e-03], [9.35, 3.0138e-02, 4.7741e-03], [9.45, 2.6692e-02, 5.5029e-03], [9.55, 2.4656e-02, 4.4359e-03], [9.65, 2.2885e-02, 3.7915e-03], [9.75, 2.1849e-02, 3.9812e-03], [9.85, 2.0383e-02, 3.2930e-03], [9.95, 1.9929e-02, 2.9370e-03], [10.05, 1.8865e-02, 2.4624e-03], [10.15, 1.8136e-02, 2.5208e-03], [10.25, 1.7657e-02, 2.4217e-03], [10.35, 1.6616e-02, 2.2784e-03], [10.45, 1.6114e-02, 2.1783e-03], [10.55, 1.4366e-02, 1.8819e-03], [10.65, 1.2588e-02, 1.8249e-03], [10.75, 1.1372e-02, 1.4436e-03], [10.85, 9.1213e-03, 1.5816e-03], [10.95, 6.1125e-03, 9.6735e-04], [11.05, 4.3923e-03, 9.6254e-04], [11.15, 2.5463e-03, 5.0038e-04], [11.25, 1.4298e-03, 4.2816e-04], [11.35, 6.4867e-04, 1.6439e-04], [11.45, 2.8294e-04, 9.9799e-05], [11.55, 1.0617e-04, 4.9085e-05], [11.65, 3.2702e-05, 2.4546e-05], [11.75, 1.2571e-05, 1.2571e-05], [11.85, 8.4589e-06, 8.4589e-06], [11.95, 7.4764e-06, 7.4764e-06], ], dtype=np.float32) Baldry_xval = np.log10(10 ** Baldry[:, 0] /Hubble_h/Hubble_h) #if(whichimf == 1): Baldry_xval = Baldry_xval - 0.26 # convert back to Chabrier IMF Baldry_yvalU = (Baldry[:, 1]+Baldry[:, 2]) * Hubble_h*Hubble_h*Hubble_h Baldry_yvalL = (Baldry[:, 1]-Baldry[:, 2]) * Hubble_h*Hubble_h*Hubble_h plt.fill_between(Baldry_xval, Baldry_yvalU, Baldry_yvalL, facecolor='purple', alpha=0.25, label='Baldry et al. 2008 (z=0.1)') # This next line is just to get the shaded region to appear correctly in the legend #plt.plot(xaxeshisto, counts / self.volume * Hubble_h*self.Hubble_h*self.Hubble_h / binwidth, label='Baldry et al. 2008', color='purple', alpha=0.3) mi = 8 ma = 12 NB = int((ma - mi) / binwidth) (s_counts, s_binedges) = np.histogram(np.log10(stellarmass), range=(mi, ma), bins=NB) s_x = s_binedges[:-1] + 0.5 * binwidth #(s1_counts, s1_binedges) = np.histogram(np.log10(stellarmass1), range=(mi, ma), bins=NB) #s1_x = s1_binedges[:-1] + 0.5 * binwidth ax0.plot(s_x, s_counts / (BoxSize/Hubble_h)**3 / binwidth, color='blue', label='Stellar Mass from H2', lw=1.0) #ax0.plot(s1_x, s1_counts / (BoxSize/Hubble_h)**3 / binwidth, color='k', label='Stellar Mass from Cold Gas', lw=1.0) ax0.set_yscale('log', nonposy='clip') ax0.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) ax0.set_ylabel(r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1}$)') # Set the y... ax0.set_xlabel(r'$\log_{10} M_{\mathrm{*}}\ (M_{\odot})$') ax0.set_xlim(8.0, 11.5) leg = ax0.legend(loc=0, numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') plt.tight_layout() #plt.savefig('SMF-flatcf.png') plt.show() # - w = np.where(stellarmass > 0)[0] len(w) wg = np.where((G_snap.f_H2 < 0.5) & (G_snap.f_H2 > 0.495))[0] G_snap.ColdGas[wg] plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure w = np.where(G_snap.Zp > 0)[0] Z = (G_snap.Zp) cf = G_snap.cf plt.plot(Z, cf, '.') plt.xlim(-0.1, 3.0) #plt.xscale('log', nonposy='clip') plt.xlabel(r'Z(Z$\odot)$') plt.ylabel('Clumping Factor') #plt.savefig('flatcf.png') plt.show() # + plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure w = np.where(G_snap.ColdGas > 0)[0] cg = np.log10(G_snap.ColdGas[w] * 1.e10 /Hubble_h) f_H2 = G_snap.f_H2[w] plt.plot(cg, f_H2, '.') #plt.xlim(0.0, 0.25e10) #plt.xlabel(r'$12\ +\ \log_{10}[\mathrm{O/H}]$') #plt.ylabel('Clumping Factor') plt.show() # - len(w)
analysis/.ipynb_checkpoints/H2_H1_Mass-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import array # %run chart_theme.py # ### Income Devoted to Housing (distribution) file = pd.read_csv('county_rent_mort_inc_units_5yr.csv') cols = ['Year', 'FIPS', 'County', 'Med_Mort', 'Med_Rent', 'Med_Mort_Inc', 'Med_Rent_Inc', 'Mort_Share_Inc', 'Rent_Share_Inc', 'Total_Owned', 'Total_Rented'] last = file[cols][file.Year == 2016] last['Rent_Share_Inc_Bin'] = last.Rent_Share_Inc.round().values last['Mort_Share_Inc_Bin'] = last.Mort_Share_Inc.round().values vals_dict = {} for val in last.Rent_Share_Inc_Bin.dropna().unique(): bin_fips = last.FIPS[last.Rent_Share_Inc_Bin == val].values vals_dict[val] = len(bin_fips) df = pd.DataFrame(vals_dict.items()) df.columns = ['Bin', 'Count'] # + chart = alt.Chart(df, title='Renter Income Devoted to Housing (2014-2018)' ).mark_bar(size=11, color='#cede54' ).encode(x=alt.X('Bin', title='Income Devoted to Housing', axis=alt.Axis(titleY=35, titleOpacity=.8, titleFontSize=18, labelOpacity=0, labelFontSize=16)), y=alt.Y('Count', title='In most counties, the median share of renter income devoted to housing was', axis=alt.Axis(values=list(range(100,300,100)), titleX=-30, titleY=-30, titleOpacity=.8, titleFontSize=18, labelOpacity=.8, labelPadding=30, labelFontSize=16), scale=alt.Scale(domain=[0,305]))) y_title2 = alt.Chart(pd.DataFrame({'x':[10.2], 'y':[363], 'text':['between 20% and 40%']})).mark_text(font='lato', fontSize=18, opacity=.8).encode(x='x', y='y', text='text') x_text_df = pd.DataFrame({'x':list(range(10,60,10)), 'y':[0]*5, 'text':[' 10%'] + [str(i) for i in range(20,60,10)]}) x_text = alt.Chart(x_text_df).mark_text(dx=0, dy=17, fontSize=16, font='lato', opacity=.8).encode(x=alt.X('x'), y='y', text='text') y_text_df = pd.DataFrame({'x':[10], 'y':[300], 'text':['300 counties']}) y_text = alt.Chart(y_text_df).mark_text(dx=-44, dy=0, fontSize=16, font='lato', opacity=.8).encode(x=alt.X('x', scale=alt.Scale(domain=[10,50])), y='y', text='text') line = alt.Chart(pd.DataFrame({'x1':[10.2], 'x2':[55], 'y1':[300]})).mark_rule(opacity=.15).encode(x=alt.X('x1', scale=alt.Scale(domain=[10,50])), x2='x2',y='y1') rent_inc_dist = (chart + line + x_text + y_text + y_title2).configure_title(fontSize=24) # - rent_inc_dist rent_inc_dist.save('rent_inc_share_dist.svg') vals_dict = {} for val in last.Mort_Share_Inc_Bin.dropna().unique(): bin_fips = last.FIPS[last.Mort_Share_Inc_Bin == val].values vals_dict[val] = len(bin_fips) df = pd.DataFrame(vals_dict.items()) df.columns = ['Bin', 'Count'] # + chart = alt.Chart(df, title='Homeowner Income Devoted to Housing (2014-2018)' ).mark_bar(size=11, color='#97c2f2' ).encode(x=alt.X('Bin', title='Income Devoted to Housing', axis=alt.Axis(titleY=35, titleOpacity=.8, titleFontSize=18, labelOpacity=0, labelFontSize=16)), y=alt.Y('Count', title='In most counties, the median share of homeowner income devoted to housing was', axis=alt.Axis(values=list(range(100,500,100)), titleX=-30, titleY=-30, titleFontSize=18, titleOpacity=.8, labelOpacity=.8, labelPadding=30, labelFontSize=16), scale=alt.Scale(domain=[0,501]))) y_title2 = alt.Chart(pd.DataFrame({'x':[10.2], 'y':[570], 'text':['between 10% and 20%']})).mark_text(font='lato', fontSize=18, opacity=.8).encode(x='x', y='y', text='text') x_text_df = pd.DataFrame({'x':list(range(5,30,5)), 'y':[0]*5, 'text':[' 5%'] + [str(i) for i in range(10,30,5)]}) x_text = alt.Chart(x_text_df).mark_text(dx=0, dy=17, fontSize=16, font='lato', opacity=.8).encode(x=alt.X('x'), y='y', text='text') y_text_df = pd.DataFrame({'x':[10], 'y':[500], 'text':['500 counties']}) y_text = alt.Chart(y_text_df).mark_text(dx=-44, dy=0, fontSize=16, font='lato', opacity=.8).encode(x=alt.X('x', scale=alt.Scale(domain=[10,50])), y='y', text='text') line = alt.Chart(pd.DataFrame({'x1':[10.3], 'x2':[55], 'y1':[500]})).mark_rule(opacity=.15).encode(x=alt.X('x1', scale=alt.Scale(domain=[10,50])), x2='x2',y='y1') mort_inc_dist = (line + chart + x_text + y_text + y_title2).configure_title(fontSize=24) # - mort_inc_dist mort_inc_dist.save('mort_inc_share_dist.svg') # --- # Digging deeper into income devoted to housing: rent_hi = last[last.Med_Rent > last.Med_Mort] mort_hi = last[last.Med_Mort > last.Med_Rent] len(rent_hi) len(mort_hi) # + jupyter={"outputs_hidden": true} rent_hi['Rent_Housing_Rent_Higher_Bin'] = rent_hi.Med_Rent.round().values rent_hi['Rent_Housing_Rent_Higher_Inc_Bin'] = rent_hi.Med_Rent_Inc.round().values rent_hi['Rent_Housing_Rent_Higher_Share_Inc_Bin'] = rent_hi.Rent_Share_Inc.round().values mort_hi['Rent_Housing_Mort_Higher_Bin'] = mort_hi.Med_Rent.round().values mort_hi['Rent_Housing_Mort_Higher_Inc_Bin'] = mort_hi.Med_Rent_Inc.round().values mort_hi['Rent_Housing_Mort_Higher_Share_Inc_Bin'] = mort_hi.Rent_Share_Inc.round().values rent_hi['Mort_Housing_Rent_Higher_Bin'] = rent_hi.Med_Mort.round().values rent_hi['Mort_Housing_Rent_Higher_Inc_Bin'] = rent_hi.Med_Mort_Inc.round().values rent_hi['Mort_Housing_Rent_Higher_Share_Inc_Bin'] = rent_hi.Mort_Share_Inc.round().values mort_hi['Mort_Housing_Mort_Higher_Bin'] = mort_hi.Med_Mort.round().values mort_hi['Mort_Housing_Mort_Higher_Inc_Bin'] = mort_hi.Med_Mort_Inc.round().values mort_hi['Mort_Housing_Mort_Higher_Share_Inc_Bin'] = mort_hi.Mort_Share_Inc.round().values rent_dict = {} dfs = [rent_hi, rent_hi, rent_hi, mort_hi, mort_hi, mort_hi]*2 divs = [40, 200, 1]*2 + [40, 200, 1]*2 counter = 0 for col in ['Rent_Housing_Rent_Higher_Bin', 'Rent_Housing_Rent_Higher_Inc_Bin', 'Rent_Housing_Rent_Higher_Share_Inc_Bin', 'Rent_Housing_Mort_Higher_Bin', 'Rent_Housing_Mort_Higher_Inc_Bin', 'Rent_Housing_Mort_Higher_Share_Inc_Bin', 'Mort_Housing_Rent_Higher_Bin', 'Mort_Housing_Rent_Higher_Inc_Bin', 'Mort_Housing_Rent_Higher_Share_Inc_Bin', 'Mort_Housing_Mort_Higher_Bin', 'Mort_Housing_Mort_Higher_Inc_Bin', 'Mort_Housing_Mort_Higher_Share_Inc_Bin']: df = dfs[counter] df[col] = [int(round(i/divs[counter])*divs[counter]) for i in df[col].values] vals_dict = {} for val in df[col].dropna().unique(): bin_fips = df.FIPS[df[col] == val].values vals_dict[val] = len(bin_fips) dataframe = pd.DataFrame(vals_dict.items()) dataframe.columns = ['Bin', 'Count'] rent_dict[col] = dataframe counter += 1 # - # ### Income Devoted to Housing # #### (1) Renters: # %run chart_theme.py # + charts_dict = {} keys = ['Rent_Housing_Rent_Higher_Bin', 'Mort_Housing_Rent_Higher_Bin', 'Rent_Housing_Mort_Higher_Bin', 'Mort_Housing_Mort_Higher_Bin'] x_titles = ['Monthly Renter Housing Expense', 'Monthly Homeowner Housing Expense','',''] x_title_pads = [350,350,0,0] y_titles1 = ['Renting More','','Owning More',''] y_titles2 = ['Expensive','','Expensive',''] y_titles1_pad = [-270,200]*2 y_titles2_pad = [-337,200]*2 opacities = [.8,0]*2 y_paddings = [50,0]*2 colors = ['#cede54','#97c2f2']*2 y_texts = [-2,500]*2 line_x = [20,-30]*2 line_len = [92,110]*2 counter = 0 for key in keys: df = rent_dict[key] df = df[df.Bin < 2200] # Calculating Median Bin (to display as text on chart) bins = array.array('i') for bn in df.Bin: count = df.Count[df.Bin == bn].iloc[0] bins.extend(np.full(count, bn)) chart = alt.Chart(df).mark_bar(size=13, color=colors[counter] ).encode(x=alt.X('Bin', title=x_titles[counter], axis=alt.Axis(values=list(range(800,2400,400)), titleOpacity=.8, titleFontSize=40, titleX=x_title_pads[counter], titleY=-550, labelOpacity=.8, labelFontSize=28, format='a'), scale=alt.Scale(domain=[400,2000])), y=alt.Y('Count', axis=alt.Axis(values=list(range(80,240,80)), labelPadding=y_paddings[counter], labelFontSize=28, labelOpacity=opacities[counter]), scale=alt.Scale(domain=[0,300]))) text_df = pd.DataFrame({'x':[380,293,1300,2460], 'y':[0,240,240,270], 'text':['$400','240 counties','_'*line_len[counter],285]}) line = alt.Chart(text_df.query('x==1300')).mark_text(dx=line_x[counter], dy=-4, opacity=.09, fontSize=11.5, fontWeight='bold').encode(x=alt.X('x', scale=alt.Scale(domain=[300,1000])), y='y', text='text') x_text = alt.Chart(text_df.query('x==380')).mark_text(dx=0, dy=24, fontSize=28, font='lato', opacity=.8).encode(x='x', y='y', text='text') y_text = alt.Chart(text_df.query('x==293')).mark_text(dx=y_texts[counter], dy=0, fontSize=28, font='lato', opacity=opacities[counter]).encode(x='x', y='y', text='text') y_title1 = alt.Chart(pd.DataFrame({'x':[y_titles1_pad[counter]], 'y':[130], 'text':y_titles1[counter]})).mark_text(font='lato', fontSize=30, opacity=.8).encode(x='x', y='y', text='text') y_title2 = alt.Chart(pd.DataFrame({'x':[y_titles2_pad[counter]], 'y':[110], 'text':y_titles2[counter]})).mark_text(font='lato', fontSize=30, opacity=.8).encode(x='x', y='y', text='text') med_inc_text = alt.Chart(pd.DataFrame({'x':[1900], 'y':[30], 'text':['Median: $' + str(int(np.median(bins)))]})).mark_text(font='lato', fontSize=24, opacity=.8).encode(x='x', y='y', text='text') #chart_spacing = alt.Chart(text_df.query('x==2460')).mark_rule(opacity=0, size=3).encode(x='x', y='y', y2='text') combined = (line + chart + x_text + y_text + y_title1 + y_title2 + med_inc_text).properties(width=700, height=550) charts_dict[counter] = combined #combined.save(keys[counter] + str(counter) + '.png', scale_factor=6) counter += 1 # - renter_owner_hous_exp_charts1 = charts_dict[0] | charts_dict[1] renter_owner_hous_exp_charts2 = charts_dict[2] | charts_dict[3] renter_owner_hous_exp_charts1 renter_owner_hous_exp_charts2 renter_owner_hous_exp_charts1.save('renter_owner_hous_exp1.svg') renter_owner_hous_exp_charts2.save('renter_owner_hous_exp2.svg') # + #renter_owner_hous_exp_charts = (charts_dict[0] | charts_dict[3]) & (charts_dict[2] | charts_dict[1]) #renter_owner_hous_exp_charts # - # --- # #### (2) Homeowners charts_dict = {} keys = ['Rent_Housing_Rent_Higher_Share_Inc_Bin', 'Mort_Housing_Rent_Higher_Share_Inc_Bin', 'Rent_Housing_Mort_Higher_Share_Inc_Bin', 'Mort_Housing_Mort_Higher_Share_Inc_Bin'] titles = ['Renter Income Devoted to Housing', 'Homeowner Income Devoted to Housing','',''] y_titles = ['Counties where renting is more expensive than owning']*2 + ['Counties where owning is more expensive than renting']*2 opacities = [.8,0]*2 colors = ['#cede54','#97c2f2']*2 vals = [list(range(10,60,10)), list(range(10,30,5))]*2 counter = 0 for key in keys: df = rent_dict[key] chart = alt.Chart(df, title=titles[counter] ).mark_bar(size=14, color=colors[counter] ).encode(x=alt.X('Bin', axis=alt.Axis(values=vals[counter], labelFontSize=32, labelOpacity=.7), scale=alt.Scale(domain=[10,50])), y=alt.Y('Count', title=y_titles[counter], axis=alt.Axis(values=list(range(100,300,100)), titleX=-60, titleY=0, titleOpacity=.8, titleFontSize=36, labelPadding=60, labelFontSize=32, labelOpacity=opacities[counter]), scale=alt.Scale(domain=[0,350]))) text_df = pd.DataFrame({'x':[34,6,7,60], 'y':[300,0,300,0], 'text':['_'*94, '5%', '300 counties',-50]}) line = alt.Chart(text_df.query('x==34')).mark_text(dx=-7, dy=-4, opacity=.09, fontSize=11.5, fontWeight='bold').encode(x=alt.X('x', scale=alt.Scale(domain=[10,50])), y='y', text='text') x_text = alt.Chart(text_df.query('x==6')).mark_text(dx=-5, dy=26.5, fontSize=32, font='lato', opacity=.7).encode(x='x', y='y', text='text') y_text = alt.Chart(text_df.query('x==7')).mark_text(dx=2, dy=0, fontSize=32, font='lato', opacity=opacities[counter]).encode(x='x', y='y', text='text') chart_spacing = alt.Chart(text_df.query('x==60')).mark_rule(opacity=0, size=3).encode(x='x', y='y', y2='text') combined = (line + chart + x_text + y_text + chart_spacing).properties(width=750, height=450) charts_dict[counter] = combined #combined.save(keys[counter] + str(counter) + '.png', scale_factor=6) counter += 1 # + jupyter={"outputs_hidden": true} inc_housing_charts = (charts_dict[0] | charts_dict[1]) & (charts_dict[2] | charts_dict[3]) inc_housing_charts # - inc_housing_charts.save('renter_homeowner_inc_dev_hous.svg') # %run chart_theme.py # + charts_dict = {} keys = ['Rent_Housing_Rent_Higher_Inc_Bin', 'Mort_Housing_Rent_Higher_Inc_Bin', 'Rent_Housing_Mort_Higher_Inc_Bin', 'Mort_Housing_Mort_Higher_Inc_Bin'] x_titles = ['Monthly Renter Incomes', 'Monthly Homeowner Incomes','',''] x_title_pads = [350,350,0,0] y_titles1 = ['Renting More','','Owning More',''] y_titles2 = ['Expensive','','Expensive',''] y_titles1_pad = [-2400,100]*2 y_titles2_pad = [-2780,100]*2 opacities = [.8,0]*2 x_paddings = [50,0]*2 colors = ['#cede54', '#97c2f2']*2 y_texts = [-2,500]*2 line_x = [0,-50]*2 line_len = [94,110]*2 counter = 0 for key in keys: df = rent_dict[key] df = df[df.Bin < 11000] # Excluding outliers to minimize plot width # Calculating Median Bin (to display as text on chart) bins = array.array('i') for bn in df.Bin: count = df.Count[df.Bin == bn].iloc[0] bins.extend(np.full(count, bn)) chart = alt.Chart(df).mark_bar(size=11.8, color=colors[counter] ).encode(x=alt.X('Bin', title=x_titles[counter], axis=alt.Axis(values=list(range(4000, 11000, 2000)), format='~s', titleOpacity=.8, titleFontSize=40, titleX=x_title_pads[counter], titleY=-580, labelFontSize=28, labelOpacity=.8), scale=alt.Scale(domain=[1000,10000])), y=alt.Y('Count', axis=alt.Axis(values=list(range(100,300,100)), labelPadding=x_paddings[counter], labelFontSize=28, labelOpacity=opacities[counter]), scale=alt.Scale(domain=[0,350]))) text_df = pd.DataFrame({'x':[6300,1950,500,13200], 'y':[300,0,300,0], 'text':['_'*line_len[counter], '$2k', '300 counties',-40]}) line = alt.Chart(text_df.query('x==6300')).mark_text(dx=line_x[counter], dy=-4, opacity=.09, fontSize=11.5, fontWeight='bold').encode(x=alt.X('x', scale=alt.Scale(domain=[1000,10000])), y='y', text='text') x_text = alt.Chart(text_df.query('x==1950')).mark_text(dx=-5, dy=24.5, fontSize=28, font='lato', opacity=.8).encode(x='x', y='y', text='text') y_text = alt.Chart(text_df.query('x==500')).mark_text(dx=y_texts[counter], dy=0, fontSize=28, font='lato', opacity=opacities[counter]).encode(x='x', y='y', text='text') y_title1 = alt.Chart(pd.DataFrame({'x':[y_titles1_pad[counter]], 'y':[170.5], 'text':y_titles1[counter]})).mark_text(font='lato', fontSize=30, opacity=.8).encode(x='x', y='y', text='text') y_title2 = alt.Chart(pd.DataFrame({'x':[y_titles2_pad[counter]], 'y':[145.5], 'text':y_titles2[counter]})).mark_text(font='lato', fontSize=30, opacity=.8).encode(x='x', y='y', text='text') med_inc_text = alt.Chart(pd.DataFrame({'x':[9600], 'y':[35], 'text':['Median: $' + str(int(np.median(bins)))]})).mark_text(font='lato', fontSize=24, opacity=.8).encode(x='x', y='y', text='text') #chart_spacing = alt.Chart(text_df.query('x==13200')).mark_rule(opacity=0, size=3).encode(x='x', y='y', y2='text') combined = (line + chart + x_text + y_text + y_title1 + y_title2 + med_inc_text).properties(width=700, height=550) charts_dict[counter] = combined #combined.save(keys[counter] + str(counter) + '.png', scale_factor=6) counter += 1 # - incomes_charts1 = charts_dict[0] | charts_dict[1] incomes_charts2 = charts_dict[2] | charts_dict[3] incomes_charts1 incomes_charts2 incomes_charts1.save('renter_owner_incomes1.svg') incomes_charts2.save('renter_owner_incomes2.svg') # ---
Notebooks/Housing_Dist_Charts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9 (tensorflow) # language: python # name: tensorflow # --- # + from math import sqrt def primecheck(num): #checks if number is prime if num <= 1: return False else: for i in range(2, int(sqrt(num))+1): if num%i == 0: return False return True # - def nprime(num): #finds numth prime number n = 7 check = 3 while check < num: if primecheck(n): check+=1 n+=2 print(f"The {check}th Prime number is {n-2}") nprime(10001)
10001st Prime (E7).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # Classification of Organisms # Using a Digital Dichotomous Key # # This Juptyer Notebook will allow you to search through different organisms based on their physical characteristics using a tool known as a Dichotomous Key. Start out by going to Kernal -> Restart & Run All to begin! # Import modules that contain functions we need import pandas as pd import numpy as np # %matplotlib inline import matplotlib.pyplot as plt # + # Our data is the dichotomous key table and is defined as the word 'key'. # key is set equal to the .csv file that is read by pandas. # The .csv file must be in the same directory as the program. #If the data is being pulled locally use the code that is commented out below #key = pd.read_csv("Classification of Organisms- Jupyter Data.csv") #key2 = pd.read_csv("Classification of Organisms- Jupyter Data KEY 2.csv") key = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/f4d51945a169ef3125234c57b878e058/raw/bebeaae8038f0b418ed37c2a98b82aa9d3cc38d1/Classification%2520of%2520Organisms-Jupyter%2520Data.csv") key2 = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/4060d993635e90cdcc46fe637c92ee37/raw/d9031747855b9762b239dea07a60254eaa6051f7/Classification%2520of%2520Organisms-%2520Jupyter%2520Data%2520KEY%25202.csv") # This sets Organism as the index instead of numbers #key = data.set_index("organism") # - # ## Pre-Questions # ### A Dichotomous Key is.... # a tool that allows scienctists to identify and classify organisms in the natural world. Based on their characterists, scienctists can narrow down species into groups such as trees, flowers, mammals, reptiles, rocks, and fish. A Dichotomous Key can help to understand how scientists have classified organisms using Bionomial Nomenclature. # # [Dichotomous Key Video](https://www.youtube.com/watch?v=M51AKJqx-7s) # Here is a helpful image of a sample Dichotomous Key! from IPython.display import Image from IPython.core.display import HTML Image(url= 'http://biology-igcse.weebly.com/uploads/1/5/0/7/15070316/8196495_orig.gif') # ## PART 1: Sorting Organisms by One Characteristic # # We will be looking at the characterists of 75 unique organisms in our Dichotomous Key. The imput below will provide us with a some of the possible organisms you may discover. # Animal options in Dichotomous Key # Displays all row titles as an array key.organism # #### Importing the Organism Characteristics/Conditions # Conditions/Questions for finding the correct animal # Displays all column titles as an array key.columns # ## PART 2: Sorting Organisms by Many Characteristics # # These are the conditions or the characteristics in which ceratin answers are categorized for certain organisms. Each characteristic/condition has a yes/no except for the Kingdoms. Change the conditionals in the code below to change what organism(s) are displayed. For most, the only change needs to be the 'yes' or 'no'. # # **Capitalization matters so be careful. You also must put in only allowed answers in every condition or the code will break!** key[(key['decomposer'] == 'yes')] # + # This conditional allows us to query a column and if the data within that cell matches it will display the animal(s). #if you are unsure of what to put try making that column a comment by adding # in front of it. key[ #physical characteristics (key['fur'] == 'yes') & \ (key['feathers'] == 'no') & \ (key['poisonous'] == 'no') & \ (key['scales'] == 'no') & \ (key['multicellular'] == 'yes') & \ (key['fins'] == 'no') & \ (key['wings'] == 'no') & \ (key['vertebrate'] == 'yes') & \ #environmental characteristics (key['marine'] == 'no') & \ (key['terrestrial'] == 'yes') & \ #feeding characteristics #decomposers get their food by breaking down decaying organisms (key['decomposer'] == 'no') & \ #carnivores get their food by eating animals (key['carnivore'] == 'no') & \ #herbivores get their food by eating plants (key['herbivore'] == 'yes') & \ #omnivores get their food by eating both plants and animals (key['omnivore'] == 'no') & \ #photosynthesis is the process of making food using energy from sunlight (key['photosynthesis'] == 'no') & \ #autotrophs are organisms that generate their own food inside themselves (key['autotroph'] == 'no') & \ #possible kingdoms include: animalia, plantae, fungi (key['kingdom'] == 'animalia') & \ #cell type (key['eukaryotic'] == 'yes') & \ (key['prokaryotic'] == 'no') ] # - # ## Part 3: Scientific Classification of Organisms # + #sort your organisms by their taxonomical classification # This conditional allows us to query a column and if the data within that cell matches, # it will display the corresponding animal(s) key2[(key2['kingdom'] == 'animalia')] # - #Done?? Insert a image for one of the organisms you found using the dichotomous key. from IPython.display import Image from IPython.core.display import HTML Image(url= 'https://lms.mrc.ac.uk/wp-content/uploads/insert-pretty-picture-here1.jpg')
Classification of Organisms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os # TensorFlow and tf.keras import tensorflow as tf from tensorflow import keras # Helper libraries import numpy as np import matplotlib.pyplot as plt import cv2 import pandas as pd # Sklearn from sklearn.model_selection import train_test_split # Helps with organizing data for training from sklearn.metrics import confusion_matrix # Helps present results as a confusion-matrix print(tf.__version__) from keras.applications.vgg19 import VGG19 as VGG16 from keras.models import load_model from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau config = tf.ConfigProto() config.gpu_options.allow_growth = True # + config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) keras.backend.set_session(session) imagepaths = [] categories_set = set() # Go through all the files and subdirectories inside a folder and save path to images inside list for root, dirs, files in os.walk("Folds_Dataset_Final", topdown=False): for name in files: path = os.path.join(root, name) if name.startswith('c'): continue if path.endswith("PNG"): # We want only the images imagepaths.append(path) categories_set.add(os.path.split(root)[1]) categories_list = list(sorted(categories_set)) categories = dict(zip(categories_list, range(len(categories_list)))) print(categories) # + print(len(imagepaths)) # If > 0, then a PNG image was loaded size = 50, 50 X = [] # Image data y = [] # Labels # Loops through imagepaths to load images and labels into arrays for path in imagepaths: img = cv2.imread(path) # Reads image and returns np.array img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Converts into the corret colorspace (GRAY) img = cv2.resize(img, size) # Reduce image size so training can be faster X.append(img) # Processing label in image path category = os.path.split(os.path.split(path)[0])[1] label = categories[category] y.append(label) # Turn X and y into np.array to speed up train_test_split X = np.array(X, dtype="uint8") X = X.reshape(len(imagepaths), *size, 1) # Needed to reshape so CNN knows it's different images y = np.array(y) print("Images loaded: ", len(X)) print("Labels loaded: ", len(y)) # + print(y[0], imagepaths[0]) # Debugging ts = 0.3 # Percentage of images that we want to use for testing. The rest is used for training. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=ts, random_state=42) from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D, MaxPooling2D from keras.models import Sequential, Model, load_model img_height,img_width = size num_classes = len(categories) #If imagenet weights are being loaded, #input must have a static square shape (one of (128, 128), (160, 160), (192, 192), or (224, 224)) model = Sequential() model.add(Conv2D(32, (5, 5), activation='relu', input_shape=(img_height, img_width, 1))) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D((2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.1)) model.add(Dense(128, activation='relu')) model.add(Dropout(0.1)) model.add(Dense(len(categories), activation='softmax')) model.compile(optimizer='adam', # Optimization routine, which tells the computer how to adjust the parameter values to minimize the loss function. loss='sparse_categorical_crossentropy', # Loss function, which tells us how bad our predictions are. metrics=['accuracy']) # List of metrics to be evaluated by the model during training and testing. # earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save = ModelCheckpoint('.mdl_wts.hdf5', save_best_only=True, monitor='val_loss', mode='min') # reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') model.fit(X_train, y_train, epochs=100, batch_size=64, verbose=2, validation_data=(X_test, y_test), callbacks=[mcp_save,]) test_loss, test_acc = model.evaluate(X_test, y_test) print(f'Test accuracy: {test_acc*100:2.2f}%') predictions = model.predict(X_test) # Make predictions towards the test set # + y_pred = np.argmax(predictions, axis=1) # Transform predictions into 1-D array with label number df = pd.DataFrame(confusion_matrix(y_test, y_pred), columns=[f"Predicted {x}" for x in categories_list ], index=[f"Actual {x}" for x in categories_list ]) fig, axis = plt.subplots(figsize=(5, 4), dpi= 100) heatmap = axis.pcolor(df.values, cmap=plt.cm.RdYlGn) plt.colorbar(heatmap) # + fig.savefig('heatmap_lasic_lenet_dropout') df.to_csv('lasic_results_lenet_dropout.csv') model.save('Lasic_lenet_dropout.h5') # - session.close()
lasic_lenet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" import requests res = requests.get('https://codedamn.com') print(res.text) print(res.status_code) # + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" import requests # Make a request to https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/ # Store the result in 'res' variable res = requests.get( 'https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/') txt = res.text status = res.status_code print(txt, status) # print the result # + from bs4 import BeautifulSoup page = requests.get("https://codedamn.com") soup = BeautifulSoup(page.content, 'html.parser') title = soup.title.text # gets you the text of the <title>(...)</title> # + import requests from bs4 import BeautifulSoup # Make a request to https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/ page = requests.get( "https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/") soup = BeautifulSoup(page.content, 'html.parser') # Extract title of page page_title = soup.title.text # print the result print(page_title) # + import requests from bs4 import BeautifulSoup # Make a request page = requests.get( "https://codedamn.com") soup = BeautifulSoup(page.content, 'html.parser') # Extract title of page page_title = soup.title.text # Extract body of page page_body = soup.body # Extract head of page page_head = soup.head # print the result print(page_body, page_head) # + import requests from bs4 import BeautifulSoup # Make a request page = requests.get( "https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/") soup = BeautifulSoup(page.content, 'html.parser') # Extract title of page page_title = soup.title # Extract body of page page_body = soup.body # Extract head of page page_head = soup.head # print the result print(page_title, page_head) # + import requests from bs4 import BeautifulSoup # Make a request page = requests.get( "https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/") soup = BeautifulSoup(page.content, 'html.parser') # Extract first <h1>(...)</h1> text first_h1 = soup.select('h1')[0].text # + import requests from bs4 import BeautifulSoup # Make a request page = requests.get( "https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/") soup = BeautifulSoup(page.content, 'html.parser') # Create all_h1_tags as empty list all_h1_tags = [] # Set all_h1_tags to all h1 tags of the soup for element in soup.select('h1'): all_h1_tags.append(element.text) # Create seventh_p_text and set it to 7th p element text of the page seventh_p_text = soup.select('p')[6].text print(all_h1_tags, seventh_p_text) # - info = { "title": 'Asus AsusPro Adv... '.strip(), "review": '2 reviews\n\n\n'.strip() } # + import requests from bs4 import BeautifulSoup # Make a request page = requests.get( "https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/") soup = BeautifulSoup(page.content, 'html.parser') # Create top_items as empty list top_items = [] # Extract and store in top_items according to instructions on the left products = soup.select('div.thumbnail') for elem in products: title = elem.select('h4 > a.title')[0].text review_label = elem.select('div.ratings')[0].text info = { "title": title.strip(), "review": review_label.strip() } top_items.append(info) print(top_items) # + import requests from bs4 import BeautifulSoup # Make a request page = requests.get( "https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/") soup = BeautifulSoup(page.content, 'html.parser') # Create top_items as empty list image_data = [] # Extract and store in top_items according to instructions on the left images = soup.select('img') for image in images: src = image.get('src') alt = image.get('alt') image_data.append({"src": src, "alt": alt}) print(image_data) # - info = { "href": "<link here>", "text": "<link text here>" } # + import requests from bs4 import BeautifulSoup # Make a request page = requests.get( "https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/") soup = BeautifulSoup(page.content, 'html.parser') # Create top_items as empty list all_links = [] # Extract and store in top_items according to instructions on the left links = soup.select('a') for ahref in links: text = ahref.text text = text.strip() if text is not None else '' href = ahref.get('href') href = href.strip() if href is not None else '' all_links.append({"href": href, "text": text}) print(all_links) # + import requests from bs4 import BeautifulSoup import csv # Make a request page = requests.get( "https://codedamn-classrooms.github.io/webscraper-python-codedamn-classroom-website/") soup = BeautifulSoup(page.content, 'html.parser') # Create top_items as empty list all_products = [] # Extract and store in top_items according to instructions on the left products = soup.select('div.thumbnail') for product in products: name = product.select('h4 > a')[0].text.strip() description = product.select('p.description')[0].text.strip() price = product.select('h4.price')[0].text.strip() reviews = product.select('div.ratings')[0].text.strip() image = product.select('img')[0].get('src') all_products.append({ "name": name, "description": description, "price": price, "reviews": reviews, "image": image }) keys = all_products[0].keys() with open('products.csv', 'w', newline='') as output_file: dict_writer = csv.DictWriter(output_file, keys) dict_writer.writeheader() dict_writer.writerows(all_products)
web-scraping-python-tutorial-how-to-scrape-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import time import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt # %pylab inline import scipy from scipy.stats import poisson from hmmlearn.utils import normalize sys.path.append('../') from rl.mmpp import MMPP # # Helper functions # load indexed log def load_dataframes(prefix, n_run, n=None): if n is None: n = n_run files = [prefix + "_{}.log".format(i) for i in range(n)] file_list = ['./log/index/'+ prefix +'_x{}/'.format(n_run) +'index_'+file+'.csv' for file in files] df_list = [None]*n for i in range(n): t = time.time() df = pd.read_csv(file_list[i], delimiter=';', index_col=0) df.loc[:, 'start_ts'] = df['start_ts'].apply(lambda x: pd.to_datetime(x)) df.set_index('start_ts', inplace=True) df['total_reward'] = df['tr_reward'] + df['op_cost'] df_list[i] = df print df.shape, print files[i], print "{:.2f} sec".format(time.time()-t) return df_list # get per-step reward from loaded DataFrame def get_step_reward(file_prefix, num_total, num_load): df_list = load_dataframes(file_prefix, num_total, num_load) df_list = filter(lambda x: x.shape[0]==302400, df_list) start = pd.to_datetime("2014-10-16 9:30:00") end = pd.to_datetime("2014-10-21 9:30:00") delta = pd.Timedelta('2 seconds') step_reward = np.zeros(len(df_list)) for i, df in enumerate(df_list): df = df.loc[start:end] print (i, df.shape[0]) step = (df.index-df.index[0])/delta+1 ts = df['total_reward'].cumsum()/step step_reward[i] = ts.iloc[-1] return step_reward # Get baseline per-step_reward from loaded DataFrame def get_baseline(file_prefix, num_total, num_load): df_list = load_dataframes(file_prefix, num_total, num_load) df_list = filter(lambda x: x.shape[0]==302400, df_list) start = pd.to_datetime("2014-10-16 9:30:00") end = pd.to_datetime("2014-10-21 9:30:00") delta = pd.Timedelta('2 seconds') baselines = np.zeros(len(df_list)) for i, df in enumerate(df_list): df = df.loc[start:end] print (i, df.shape[0]) step = (df.index-df.index[0])/delta+1 ts = df['req_generated'].cumsum()/step baselines[i] = ts.iloc[-1] - 5.0 return baselines # Load per-step rewards from .reward file and calculate statistics def get_stats(f, n, baseline=0.0): with open('./log/index/{f}_x{n}/{f}.reward'.format(f=f, n=n), 'r') as f_reward: line = f_reward.readlines()[0] step_reward = np.array(map(float, line[1:-2].split(','))) step_reward -= baseline print "mean {:.3f}, std {:.3f},".format(step_reward.mean(), step_reward.std()), print "10% {:.3f}, 50% {:.3f}, 90% {:.3f},".format(*np.percentile(step_reward, [10, 50, 90])), print "{} sims".format(len(step_reward)) return step_reward, step_reward.mean(), step_reward.std(), np.percentile(step_reward, 10), np.percentile(step_reward, 50), np.percentile(step_reward, 90) # --- # # Table 3 baseline_dh3 = -3.9641868942 baseline_dsy = 2.49294833512 baseline_mechcenter = -4.28022864092 baseline_dormW = -4.71388095425 baseline_mediaB = -2.91390934919 baseline_newgym = -4.4175304744 ################################################################## # # Uncomment below to calculate baseline from indexed log files ################################################################## # baselines = get_baseline("msg_QNN_Jan31_1154", 10, 3); baseline_dh3 = np.mean(baselines) # baselines = get_baseline("msg_QNN_Feb1_1740", 14, 3); baseline_dsy = np.mean(baselines) # baselines = get_baseline("msg_QNN_Feb2_0930", 14, 3); baseline_mechcenter = np.mean(baselines) # baselines = get_baseline("msg_QNN_Feb2_0944", 14, 3); baseline_dormW = np.mean(baselines) # baselines = get_baseline("msg_QNN_Feb2_0953", 14, 3); baseline_mediaB = np.mean(baselines) # baselines = get_baseline("msg_QNN_Feb2_1004", 14, 3); baseline_newgym = np.mean(baselines) # + print " "*25+"dh3" print "{:20s}".format('Baseline'), print " {:.2f}".format(baseline_dh3) exps = [ ('QL-d', "msg_DynaQtable_130_Feb12_2217", 14), ('DQN-m', "msg_DynaQNN_130_Feb15_2000", 14), ('DQN-d', "msg_DynaQNN_130_Feb12_2215", 14), ('DQN', "msg_QNN_Jan31_1154", 10), ] step_rewards, params, means, stds, p10s, p50s, p90s = [], [], [], [], [], [], [] for p, f, n in exps: print "{:20s}".format(p), step_reward, mean, std, p10, p50, p90 = get_stats(f, n, baseline_dh3) step_rewards.append(step_reward.tolist()), params.append(p), means.append(mean); stds.append(std); p10s.append(p10); p50s.append(p50); p90s.append(p90) print " "*25+"dsy" print "{:20s}".format('Baseline'), print " {:.2f}".format(baseline_dsy) exps = [ ('QL-d', "msg_DynaQtable_Feb12_2232", 14), ('DQN-m', "msg_DynaQNN_Feb15_2050", 14), ('DQN-d', "msg_DynaQNN_Feb12_2226", 14), ('DQN', "msg_QNN_Feb1_1740", 14), ] step_rewards, params, means, stds, p10s, p50s, p90s = [], [], [], [], [], [], [] for p, f, n in exps: print "{:20s}".format(p), step_reward, mean, std, p10, p50, p90 = get_stats(f, n, baseline_dsy) step_rewards.append(step_reward.tolist()), params.append(p), means.append(mean); stds.append(std); p10s.append(p10); p50s.append(p50); p90s.append(p90) print " "*25+"dmW" print "{:20s}".format('Baseline'), print " {:.2f}".format(baseline_dormW) exps = [ ('QL-d', "msg_DynaQtable_Feb7_1052", 14), ('DQN-m', "msg_DynaQNN_130_Feb10_2316", 14), ('DQN-d', "msg_DynaQNN_Feb5_1007", 14), ('DQN', "msg_QNN_Feb2_0944", 14), ] step_rewards, params, means, stds, p10s, p50s, p90s = [], [], [], [], [], [], [] for p, f, n in exps: print "{:20s}".format(p), step_reward, mean, std, p10, p50, p90 = get_stats(f, n, baseline_dormW) step_rewards.append(step_reward.tolist()), params.append(p), means.append(mean); stds.append(std); p10s.append(p10); p50s.append(p50); p90s.append(p90) print " "*25+"mhC" print "{:20s}".format('Baseline'), print " {:.2f}".format(baseline_mechcenter) exps = [ ('QL-d', "msg_DynaQtable_130_Feb14_0027", 14), ('DQN-m', "msg_DynaQNN_130_Feb15_2001", 14), ('DQN-d', "msg_DynaQNN_130_Feb14_0026", 14), ('DQN', "msg_QNN_Feb2_0930", 14), ] step_rewards, params, means, stds, p10s, p50s, p90s = [], [], [], [], [], [], [] for p, f, n in exps: print "{:20s}".format(p), step_reward, mean, std, p10, p50, p90 = get_stats(f, n, baseline_mechcenter) step_rewards.append(step_reward.tolist()), params.append(p), means.append(mean); stds.append(std); p10s.append(p10); p50s.append(p50); p90s.append(p90) print " "*25+"mdB" print "{:20s}".format('Baseline'), print " {:.2f}".format(baseline_mediaB) exps = [ ('QL-d', "msg_DynaQtable_Feb13_2359", 14), ('DQN-m', "msg_DynaQNN_Feb15_2051", 14), ('DQN-d', "msg_DynaQNN_Feb13_2358", 14), ('DQN', "msg_QNN_Feb2_0953", 14), ] step_rewards, params, means, stds, p10s, p50s, p90s = [], [], [], [], [], [], [] for p, f, n in exps: print "{:20s}".format(p), step_reward, mean, std, p10, p50, p90 = get_stats(f, n, baseline_mediaB) step_rewards.append(step_reward.tolist()), params.append(p), means.append(mean); stds.append(std); p10s.append(p10); p50s.append(p50); p90s.append(p90) print " "*25+"gym" print "{:20s}".format('Baseline'), print " {:.2f}".format(baseline_newgym) exps = [ ('QL-d', "msg_DynaQtable_130_Feb14_0029", 14), ('DQN-m', "msg_DynaQNN_130_Feb15_2002", 14), ('DQN-d', "msg_DynaQNN_130_Feb14_0028", 14), ('DQN', "msg_QNN_Feb2_1004", 14), ] step_rewards, params, means, stds, p10s, p50s, p90s = [], [], [], [], [], [], [] for p, f, n in exps: print "{:20s}".format(p), step_reward, mean, std, p10, p50, p90 = get_stats(f, n, baseline_newgym) step_rewards.append(step_reward.tolist()), params.append(p), means.append(mean); stds.append(std); p10s.append(p10); p50s.append(p50); p90s.append(p90) # - # # Figure 2 # + f = './log/index_message_2016-6-8_XXX.log.csv' df = pd.read_csv(f, delimiter=';', index_col=0) df.loc[:, 'start_ts'] = df['start_ts'].apply(lambda x: pd.to_datetime(x)) df.set_index('start_ts', inplace=True) df['total_reward'] = df['tr_reward'] - df['op_cost'] delta = pd.Timedelta('2 seconds') # - plt.figure(1) ax = plt.subplot(311) start = pd.to_datetime("2014-09-25 9:20:00"); end = pd.to_datetime("2014-09-26 9:20:00") ts = df[['req_generated']][start:end].resample('1Min') ts = ts.sample(frac=0.5) ts.columns = pd.Index(['# requests/time step']) ts.plot(legend=True, ax=ax, style=['k-'], fontsize=11 ) ax.get_xaxis().set_visible(False) plt.legend(fontsize=10, bbox_to_anchor=(0.5,1), ) ax.yaxis.set_label_text('# requests') ax.yaxis.label.set_fontsize(12) # ------------------------------ ax = plt.subplot(312) ts = df[['batch_dist_wake', 'batch_dist_sleep']][start:end].resample('1Min') ts = ts.sample(frac=0.5) ts.columns = pd.Index(['P(WAKE)', 'P(SLEEP)']) ts.plot(legend=True, ax=ax, style=['r-','b-+'], fontsize=12 ) ax.yaxis.set_label_text('Probability') ax.yaxis.label.set_fontsize(12) plt.legend(fontsize=8) ax.get_xaxis().set_visible(False) plt.legend(bbox_to_anchor=(0.55,0.9), fontsize=10) # ------------------------------ ax = plt.subplot(313) ts = df[['q_wake', 'q_sleep']][start:end].resample('1Min') ts.columns = pd.Index(['Q(WAKE)', 'Q(SLEEP)']) ts.index.name = 'Time' ts = ts.sample(frac=0.5) ts.plot(legend=True, ax=ax, style=['r-','b-+'], fontsize=12 ) ax.xaxis.label.set_fontsize(15) ax.yaxis.set_label_text('Scaled\nQ values') ax.yaxis.label.set_fontsize(12) plt.legend(fontsize=8, bbox_to_anchor=(0.4,1),) # ------------------------------ # # Figure 3 f1 = './log/index_message_2016-6-11_1230_FR1000_G5.log.csv' df1 = pd.read_csv(f1, delimiter=';', index_col=0) df1.loc[:, 'start_ts'] = df1['start_ts'].apply(lambda x: pd.to_datetime(x)) df1.set_index('start_ts', inplace=True) df1['total_reward'] = df1['tr_reward'] - df1['op_cost'] f2 = './log/index_message_2016-6-11_1230_FR20_G5.log.csv' df2 = pd.read_csv(f2, delimiter=';', index_col=0) df2.loc[:, 'start_ts'] = df2['start_ts'].apply(lambda x: pd.to_datetime(x)) df2.set_index('start_ts', inplace=True) df2['total_reward'] = df2['tr_reward'] - df2['op_cost'] f3 = './log/index_message_2016-6-11_1230_FR1_G5.log.csv' df3 = pd.read_csv(f3, delimiter=';', index_col=0) df3.loc[:, 'start_ts'] = df3['start_ts'].apply(lambda x: pd.to_datetime(x)) df3.set_index('start_ts', inplace=True) df3['total_reward'] = df3['tr_reward'] - df3['op_cost'] plt.figure(1) # ------------- ax = plt.subplot(311) start = pd.to_datetime("2014-09-25 9:20:00") end = pd.to_datetime("2014-09-26 9:20:00") ts1 = df1[['q_wake', 'q_sleep']][start:end].resample('1Min') ts1.columns = pd.Index(['Q(WAKE)', 'Q(SLEEP)']) ts1.plot( legend=True, ax=ax, style=['r-','b--'], fontsize=12, title='R=1000', ylim=(-1.1,1.1) ) ax.get_xaxis().set_visible(False) plt.legend(bbox_to_anchor=(0.4,1), fontsize=12) ax.title.set_position((0.5, 0.7)) # ------------- ax = plt.subplot(312) start = pd.to_datetime("2014-09-25 9:20:00") end = pd.to_datetime("2014-09-26 9:20:00") ts2 = df2[['q_wake', 'q_sleep']][start:end].resample('1Min') ts2.columns = pd.Index(['Q(WAKE)', 'Q(SLEEP)']) ts2.plot( # figsize=(15, 4), legend=False, ax=ax, style=['r-','b--'], fontsize=12, title='R=20', ylim=(-1.1,1.1) ) ax.get_xaxis().set_visible(False) ax.title.set_position((0.5, 0.7)) ax.yaxis.set_label_text('Scaled Q values') ax.yaxis.label.set_fontsize(15) # ------------- ax = plt.subplot(313) start = pd.to_datetime("2014-09-25 9:20:00") end = pd.to_datetime("2014-09-26 9:20:00") ts3 = df3[['q_wake', 'q_sleep']][start:end].resample('1Min') ts3.columns = pd.Index(['Q(WAKE)', 'Q(SLEEP)']) ts3.index.name = 'Time' ts3.plot( legend=False, ax=ax, style=['r-','b--'], fontsize=12, ylim=(-1.1,1.1) ) ax.xaxis.label.set_fontsize(15);ax.xaxis.label.set_position((0.5, 0.2)); ax.xaxis.set_label_coords(0.5, -0.4) ax.annotate('R=1', size=13, xy=(0.51,0.35), xycoords='figure fraction') # # Figure 4 f = './log/index_message_2016-6-12_G5_BUF2_AR1_b5.log.csv' df = pd.read_csv(f, delimiter=';', index_col=0) df.loc[:, 'start_ts'] = df['start_ts'].apply(lambda x: pd.to_datetime(x)) df.set_index('start_ts', inplace=True) df['total_reward'] = 0.5*df['tr_reward'] - 0.5*df['op_cost'] df['a_wake'] = (df.agent_action=='(False, \'serve_all\')') df['a_sleep'] = (df.agent_action=='(True, None)') # start = pd.to_datetime("2014-10-16 9:30:00") end = pd.to_datetime("2014-11-07 0:00:00") df = df.loc[:end] plt.figure(1) #------------------ ax = plt.subplot(411) ts = df[['req_generated']].resample('1Min') ts.columns = pd.Index(['# requests']) ts.plot( figsize=(10, 8), ax=ax, legend=False, style=['k-'], fontsize=12, title='(a)', ) ax.title.set_position((0.5, -0.22)) ax.get_xaxis().set_visible(False) ax.yaxis.set_label_text('# requests') ax.yaxis.label.set_fontsize(15) ax.title.set_fontsize(15) ax.grid() #------------------ ax = plt.subplot(412) ts = df[['a_wake']].resample('1Min')*100 ts.columns = pd.Index(['waking percentage']) ts.plot( figsize=(10, 8), ax=ax, legend=False, style=['r-'], fontsize=12, title='(b)', ) ax.title.set_position((0.5, -0.22)) ax.get_xaxis().set_visible(False) ax.yaxis.set_label_text('Prob. waking') ax.yaxis.label.set_fontsize(15) ax.title.set_fontsize(15) ax.grid() #------------------ ax = plt.subplot(413) ts = df[['q_wake', 'q_sleep']].resample('1Min') ts.q_wake *= df.reward_scaling.resample('1Min'); ts.q_sleep*=df.reward_scaling.resample('1Min') ts.columns =pd.Index(['Q(WAKE)', 'Q(SLEEP)']) ts.plot( figsize=(10, 8), legend=True, ax=ax, style=['r-','b-'], fontsize=12, title='(c)', ) ax.title.set_position((0.5, -0.22)) ax.get_xaxis().set_visible(False) ax.yaxis.set_label_text('Q values') ax.yaxis.label.set_fontsize(15) ax.title.set_fontsize(15) ax.grid() #------------------ ax = plt.subplot(414) step = (df.index-df.index[0])/delta+1 ts = df['total_reward'].cumsum()/step ts_on = (0.5*df['req_generated']-0.5*5).cumsum()/step (ts-ts_on).plot( figsize=(10, 8), legend=False, ax=ax, style=['k-'], # ylim=(0, 4), fontsize=12, title='(d)' ) ax.title.set_position((0.5, -0.6)) ax.yaxis.set_label_text('Sleeping gain') ax.yaxis.label.set_fontsize(15) ax.xaxis.label.set_fontsize(15); ax.xaxis.label.set_text('Time'); ax.xaxis.set_label_coords(0.5, -0.2) ax.title.set_fontsize(15) ax.grid() # # Figure 5 f = './log/index_message_2016-6-8_2130_AR1.log.csv' df = pd.read_csv(f, delimiter=';', index_col=0) df.loc[:, 'start_ts'] = df['start_ts'].apply(lambda x: pd.to_datetime(x)) df.set_index('start_ts', inplace=True) df['total_reward'] = df['tr_reward'] - df['op_cost'] df['a_wake'] = (df.agent_action=='(False, \'serve_all\')') df['a_sleep'] = (df.agent_action=='(True, None)') # + plt.figure(1) #------------------- start = pd.to_datetime("2014-11-05 00:00:00") end = pd.to_datetime("2014-11-05 00:02:06") ax = plt.subplot(221) ts = df.ob_last_t[start:end] ts.name = 'Last arrival' ts.plot(legend=True, ax=ax, # figsize=(15, 8), style=['g--+']) ax.get_xaxis().set_visible(False) ax.yaxis.set_label_text('# requests') ax.yaxis.label.set_fontsize(20) #------------------- ax = plt.subplot(221) ts = df.ob_new_q[start:end] ts.name = 'Last queue' ts.plot(legend=True, ax=ax, # figsize=(15, 8), style=['k-d'], ylim=(0,6)) ax.get_xaxis().set_visible(False) ax.yaxis.set_label_text('# requests') ax.yaxis.label.set_fontsize(20) plt.legend(bbox_to_anchor=(0.3,1), fontsize=12) plt.legend(fontsize=17) #------------------- ax = plt.subplot(223) ts = df.loc[start:end][['q_wake', 'q_sleep']] ts.columns = pd.Index(['Q(WAKE)','Q(SLEEP)']) ts.plot(legend=True, ax=ax, figsize=(10, 8),style=['r-','b--']) ax.yaxis.set_label_text('Scaled Q values') ax.yaxis.label.set_fontsize(20) #------------------- ax = plt.subplot(223) ts = df[['a_wake','a_sleep']].loc[start:end]*3-3.05 ts.columns = pd.Index(['wake', 'sleep']) ts.plot(legend=True, ax=ax, #figsize=(15, 8), style=['ro','b+'], ylim=(-1, 0)) ax.xaxis.label.set_text('Time');ax.xaxis.label.set_fontsize(20); ax.xaxis.set_label_coords(0.5, -0.1) ax.title.set_text('(a)'); ax.title.set_fontsize(20); ax.title.set_position((0.5, -0.3)) plt.legend(fontsize=17, bbox_to_anchor=(0.6, 0.6)) #------------------- start = pd.to_datetime("2014-11-07 00:02:00") end = pd.to_datetime("2014-11-07 00:04:00") ax = plt.subplot(222) ts = df.ob_last_t[start:end] ts.columns = pd.Index(['# request in last step']) ts.plot(legend=False, ax=ax, #figsize=(15, 8), style=['g--+']) ax.get_xaxis().set_visible(False) ax.yaxis.set_ticks_position('right') #------------------- ax = plt.subplot(222) ts = df.ob_new_q[start:end] ts.columns = pd.Index(['# request in last step']) ts.plot(legend=False, ax=ax, # figsize=(15, 8), style=['k-d']) ax.get_xaxis().set_visible(False) ax.yaxis.set_ticks_position('right') #------------------- ax = plt.subplot(224) ts = df.loc[start:end][['q_wake', 'q_sleep']] ts.columns = pd.Index(['Q(WAKE)','Q(SLEEP)']) ts.plot(legend=False, ax=ax, # figsize=(15, 8), style=['r-','b--']) ax.yaxis.set_ticks_position('right') #------------------- ax = plt.subplot(224) ts = df[['a_wake','a_sleep']].loc[start:end]*3-3.05 ts.columns = pd.Index(['wake', 'sleep']) ts.plot(legend=False, ax=ax, # figsize=(15, 8), style=['ro','b+'], ylim=(-0.6, 0)) ax.xaxis.label.set_text('Time');ax.xaxis.label.set_fontsize(20); ax.title.set_text('(b)'); ax.title.set_fontsize(20); ax.title.set_position((0.5, -0.3)) plt.savefig("policy.png", bbox_inches='tight', dpi=300) # - # # Figure 6 exps = [ (0, "msg_DynaQNN_130_Feb10_2316", 14), # dormW, msg_DynaQNN_130_Feb10_2316_x14 (14), n_sim=0, 220min (2, "msg_DynaQNN_130_Feb10_2317", 14), # dormW, msg_DynaQNN_130_Feb10_2317_x14 (14), n_sim=2, 415min (5, "msg_DynaQNN_Feb5_1007", 14), # dormW, msg_DynaQNN_Feb5_1007_x14 (14), num_sim=5, 129min x2 (10, "msg_DynaQNN_Feb10_2300", 14), # dormW, msg_DynaQNN_Feb10_2300_x14 (14), n_sim=10, 212min x 2 (12, "msg_DynaQNN_Feb10_2305", 14), # dormW, msg_DynaQNN_Feb10_2305_x14 (14), n_sim=12, 255min x 2 (16, "msg_DynaQNN_Feb10_2302", 14), # dormW, msg_DynaQNN_Feb10_2302_x14 (14), n_sim=16, 320min x 2 (20, "msg_DynaQNN_Feb10_2303", 14), # dormW, msg_DynaQNN_Feb10_2303_x14 (14), n_sim=20, 395min x 2 ] step_rewards, params, means, stds, p10s, p50s, p90s = [], [], [], [], [], [], [] for p, f, n in exps: print "{:2d}".format(p), step_reward, mean, std, p10, p50, p90 = get_stats(f, n, baseline_dormW) step_rewards.append(step_reward), params.append(p), means.append(mean); stds.append(std); p10s.append(p10); p50s.append(p50); p90s.append(p90) plt.figure(); ax = plt.subplot(111) pd.DataFrame(np.array(step_rewards).transpose(), columns=params).plot(kind='box', ax=ax) ax.xaxis.set_label_text('Number of simulated experience') ax.xaxis.label.set_fontsize(15) ax.yaxis.label.set_fontsize(15) ax.grid() # # Figure 7 exps = [ (2, "msg_DynaQtable_Feb7_1324", 14), # dormW, msg_DynaQtable_Feb7_1324_x14 (14), num_sim=5, n_bins=2, 50min (5, "msg_DynaQtable_Feb7_1052", 14), # dormW, msg_DynaQtable_Feb7_1052_x14 (14), num_sim=5, n_bins=5, 50min (7, "msg_DynaQtable_Feb7_1609", 14), # dormW, msg_DynaQtable_Feb7_1609_x14 (14), num_sim=5, n_bins=7, 50min (10, "msg_DynaQtable_Feb6_2008", 14), # dormW, msg_DynaQtable_Feb6_2008_x14 (14), num_sim=5, n_bins=10, 50min (15, "msg_DynaQtable_Feb7_1053", 14), # dormW, msg_DynaQtable_Feb7_1053_x14 (14), num_sim=5, n_bins=15, 50min (25, "msg_DynaQtable_Feb6_2010", 14), # dormW, msg_DynaQtable_Feb6_2010_x14 (14), num_sim=5, n_bins=25, 50min (50, "msg_DynaQtable_Feb6_1543", 14), # dormW, msg_DynaQtable_Feb6_1543_x14 (14), num_sim=5, n_bins=50, 50min (100, "msg_DynaQtable_Feb2_0946", 14), # dormW, msg_DynaQtable_Feb2_0946_x14 (14), num_sim=5, n_bins=100, 50min (250, "msg_DynaQtable_Feb6_1544", 14), # dormW, msg_DynaQtable_Feb6_1544_x14 (14), num_sim=5, n_bins=250, 50min ] step_rewards, params, means, stds, p10s, p50s, p90s = [], [], [], [], [], [], [] for p, f, n in exps: print "{:5d}".format(int(p)), step_reward, mean, std, p10, p50, p90 = get_stats(f, n, baseline_dormW) step_rewards.append(step_reward); params.append(p); means.append(mean); stds.append(std); p10s.append(p10); p50s.append(p50); p90s.append(p90) plt.figure(); ax = plt.subplot(111) pd.DataFrame(np.array(step_rewards).transpose(), columns=params).plot(kind='box', ax=ax) ax.xaxis.set_label_text('Number of quantized belief bins') ax.xaxis.label.set_fontsize(15) ax.yaxis.set_label_text('Sleeping gain') ax.yaxis.label.set_fontsize(15) ax.grid() # # Figure 8 # model fitting overflow control def adjust(model, epsilon): model.startprob_ += epsilon model.transmat_ += epsilon model.emissionrates_[0] += epsilon # model.emissionrates_[1] += epsilon # when the model is general MMPP model.emissionrates_[1] = 0.0 # when the model is IPP normalize(model.startprob_) normalize(model.transmat_, axis=1) f = './log/index/figure8.csv' df = pd.read_csv(f, delimiter=';') df.loc[:, 'start_ts'] = df['start_ts'].apply(lambda x: pd.to_datetime(x)) df.set_index('start_ts', inplace=True) x = df['req_generated'] len(x) model = MMPP(n_components=2, n_iter=1, init_params='', verbose=False) model.startprob_ = np.array([.5, .5]) model.transmat_ = np.array([[0.5, 0.5], [0.5, 0.5]]) model.emissionrates_ = np.array([1.0, 0.0]) print 'Start: ', print model.startprob_ print 'Transitions: ' print model.transmat_ print 'Emission: ', print model.emissionrates_ # + stride = 2 total_steps = 7000 window_size = 50 epsilon = 1e-22 offset = 0 n_iter = 3 prob0 = np.zeros(total_steps) trans00 = np.zeros(total_steps) trans11 = np.zeros(total_steps) rate0 = np.zeros(total_steps) rate1 = np.zeros(total_steps) score = np.zeros(total_steps) score_exp = np.zeros(total_steps) for i in range(total_steps): x_window = x.iloc[(offset+i*stride):(offset+i*stride+window_size)].as_matrix()[:, None] for _ in range(n_iter): model.fit(x_window) adjust(model, epsilon) prob0[i] = model.startprob_[0] rate0[i] = model.emissionrates_[0] rate1[i] = model.emissionrates_[1] trans00[i] = model.transmat_[0, 0] trans11[i] = model.transmat_[1, 1] # adjust(model, epsilon) score[i] = model.score(x_window)/window_size score_exp[i] = model.score(model.sample(100)[0])/100 if i%100 == 0: print i, print model.startprob_, print model.transmat_, print model.emissionrates_, print score[i], print score_exp[i] print ticks = range((offset+window_size), (offset+(total_steps-1)*stride+window_size+1), stride) time_stamps = x.index[ticks] rate0 = pd.Series(rate0, index=time_stamps); rate1 = pd.Series(rate1, index=time_stamps) trans00 = pd.Series(trans00, index=time_stamps); trans11 = pd.Series(trans11, index=time_stamps) score = pd.Series(score, index=time_stamps); score_exp = pd.Series(score_exp, index=time_stamps) # - plt.figure() ax = plt.subplot(311) df = pd.DataFrame(); df['Real trace'] = x[time_stamps]; df['Fitted rate'] = rate0 df.plot(ax=ax, style=['k-', 'c-']) ax.get_xaxis().set_visible(False) ax.yaxis.set_label_text('# requests'); ax.yaxis.label.set_fontsize(15) ax.grid() # -------------------- ax = plt.subplot(312) df = pd.DataFrame(columns=['P11', 'P00']); df.P11 = trans00; df.P00 = trans11; df.plot(ax=ax, ylim=[-0.1, 1.1], style=['r', 'b']) ax.get_xaxis().set_visible(False) ax.yaxis.set_label_text('Prob.'); ax.yaxis.label.set_fontsize(15) plt.legend(bbox_to_anchor=(1.0,0.9)) ax.grid() # -------------------- ax = plt.subplot(313) df = pd.DataFrame(); df['Observed'] = score; df['Expected'] = score_exp; ts = score-score_exp; ts.index.name = 'Time'; ts.name = 'Per-step likelihood diff.'; df.plot(ax=ax, style=['r-','b--'],) ax.xaxis.label.set_fontsize(15) ax.yaxis.set_label_text('Likelihood'); ax.yaxis.label.set_fontsize(15) ax.grid() plt.legend(bbox_to_anchor=(1.02,0.65)) # # Figure 9 def extract_df(f): df = pd.read_csv(f, delimiter=';', index_col=0) df.loc[:, 'start_ts'] = df['start_ts'].apply(lambda x: pd.to_datetime(x)) df.set_index('start_ts', inplace=True) df['total_reward'] = 0.7*df['tr_reward'] - 0.3*df['op_cost'] df['a_wake'] = (df.agent_action=='(False, \'serve_all\')') df['a_sleep'] = (df.agent_action=='(True, None)') print f return df def extract_baseline_ts(df, name): step = (df.index-df.index[0])/delta+1 ts = df['total_reward'].cumsum()/step ts_on = (0.7*df['req_generated']-0.3*5).cumsum()/step ts = (ts-ts_on) ts.name = name return ts delta = pd.to_timedelta('2 Seconds') plt.figure(1) #-------------------- ax = plt.subplot(121) f = './log/index_message_2016-6-13_G5_BUF1_FR20_1_2.log.csv' df = extract_df(f) ts = extract_baseline_ts(df, 'Uniform') ts.plot(figsize=(9, 6), legend=True, ax=ax, style=['k--'], ) f = './log/index_message_2016-6-13_G5_BUF2_FR20_1.log.csv' df = extract_df(f) ts = extract_baseline_ts(df, 'Action-wise') ts.plot(figsize=(9, 6), legend=True, ax=ax, style=['r-'], ylim=(0.8,1)) ax.title.set_text('(a) Sleeping gain'); ax.title.set_fontsize(14); ax.title.set_position((0.5, -0.3)) ax.xaxis.label.set_fontsize(14); ax.xaxis.label.set_text('Time') ax.yaxis.label.set_text('Sleeping gain'); ax.yaxis.label.set_fontsize(14) ax.grid() #--------------------- start = pd.to_datetime("2014-9-27 09:20:00") end = pd.to_datetime("2014-9-27 16:00:00") ax = plt.subplot(322) f = './log/index_message_2016-6-13_G5_BUF1_FR20_1_1.log.csv' df = extract_df(f) ts = df.req_generated[start:end].resample('1Min') ts.plot(figsize=(9, 6), legend=False, ax=ax, style=['k--'] ) ax.get_xaxis().set_visible(False) ax.yaxis.label.set_text('# requests'); ax.yaxis.label.set_fontsize(14); ax.yaxis.set_label_coords(1.17, 0.5) ax.yaxis.set_label_position('right'); ax.yaxis.set_ticks_position('right') ax.grid() # #------------------------------ ax = plt.subplot(324) ts = df.batch_dist_wake[start:end].resample('1Min') ts.plot(figsize=(9, 6), legend=False, ax=ax, style=['k-'], ylim=(0,1) ) ax.get_xaxis().set_visible(False) ax.yaxis.label.set_text('P(Wake)'); ax.yaxis.label.set_fontsize(14); ax.yaxis.set_label_coords(1.17, 0.5) ax.yaxis.set_label_position('right');ax.yaxis.set_ticks_position('right') ax.grid() #------------------------------ ax = plt.subplot(326) ts = df[['q_wake', 'q_sleep']][start:end].resample('1Min') ts.plot(figsize=(8, 6), legend=True, ax=ax, style=['r-', 'b--'], ylim=(-1,1), rot=30 ) plt.legend(bbox_to_anchor=(1,1.2), fontsize=13) ax.xaxis.label.set_fontsize(14); ax.xaxis.label.set_text('Time'); ax.xaxis.set_label_coords(0.5, -0.5) ax.yaxis.label.set_text('Q values'); ax.yaxis.label.set_fontsize(14);ax.yaxis.set_label_position('right'); ax.yaxis.set_ticks_position('right') ax.title.set_text('(b) Example period'); ax.title.set_fontsize(14); ax.title.set_position((0.5, -1)) ax.grid() # # Figure 10 # + def extract_df(f): df = pd.read_csv(f, delimiter=';', index_col=0) df.loc[:, 'start_ts'] = df['start_ts'].apply(lambda x: pd.to_datetime(x)) df.set_index('start_ts', inplace=True) df['total_reward'] = 0.5*df['tr_reward'] - 0.5*df['op_cost'] print f return df def extract_baseline_ts(df, name): step = (df.index-df.index[0])/delta+1 ts = df['total_reward'].cumsum()/step ts_on = (0.5*df['req_generated']-0.5*5).cumsum()/step ts = (ts-ts_on) ts.name = name return ts delta = pd.to_timedelta('2 Seconds') # + plt.figure(1) #---------- #-------------------- start = pd.to_datetime("2014-11-05 09:20:36") end = pd.to_datetime("2014-11-05 15:00:00") ax = plt.subplot(322) df = extract_df('./log/index_message_2016-6-12_G5_BUF2_AR1.log.csv') df[start:end].reward_scaling.resample('1Min').plot(ax=ax) ax.get_xaxis().set_visible(False) ax.grid() ax.yaxis.label.set_text('Reward\nscaling');ax.yaxis.label.set_fontsize(12); ax.yaxis.set_label_position('right');ax.yaxis.set_ticks_position('right') ax = plt.subplot(324) df[start:end].loss.resample('1Min').plot(ax=ax, logy=True) ax.get_xaxis().set_visible(False) ax.grid() ax.yaxis.label.set_text('Loss');ax.yaxis.label.set_fontsize(12);ax.yaxis.set_label_position('right');ax.yaxis.set_ticks_position('right') ax = plt.subplot(326) df[start:end][['q_wake','q_sleep']].resample('1Min').plot(ax=ax, # ylim=(-1.1, 1.1), style=['r-', 'b--'], rot=30) ax.xaxis.label.set_fontsize(12);ax.xaxis.label.set_text('Time') ax.grid() ax.yaxis.label.set_text('Q values');ax.yaxis.label.set_fontsize(12); ax.yaxis.set_label_position('right');ax.yaxis.set_ticks_position('right') plt.legend(bbox_to_anchor=(1,1.3), fontsize=12) ax.title.set_text('(c) Example period'); ax.title.set_fontsize(12); ax.title.set_position((0.5, -1)) #-------------------- ax = plt.subplot(221) df = extract_df('./log/index_message_2016-6-12_G5_BUF2_AR1.log.csv') ts = extract_baseline_ts(df, 'Adaptive, 1') ts.iloc[1:-1:10000].plot(legend=True, ax=ax, style=['r-o']) df = extract_df('./log/index_message_2016-6-11_BUF2_G5_FR1.log.csv') ts = extract_baseline_ts(df, 'Fixed, 1') ts.iloc[1:-1:10000].plot(legend=True, ax=ax,style=['r--']) df = extract_df('./log/index_message_2016-6-11_BUF2_G5.log.csv') ts = extract_baseline_ts(df, 'Fixed, 20') ts.iloc[1:-1:10000].plot(legend=True, ax=ax,style=['r-.']) df = extract_df('./log/index_message_2016-6-11_BUF2_G5_FR100.log.csv') ts = extract_baseline_ts(df, 'Fixed, 100') ts.iloc[1:-1:10000].plot(legend=True, ax=ax, style=['r-'], ylim=(-0.5, 2) ) ax.grid() ax.get_xaxis().set_visible(False) ax.title.set_text('(a) Gamma=0.5'); ax.title.set_fontsize(12); ax.title.set_position((0.5,-0.2)) plt.legend(bbox_to_anchor=(1,0.9), fontsize=8) ax.yaxis.label.set_text('Sleeping gain'); ax.yaxis.label.set_fontsize(12); ax.yaxis.set_label_coords(-0.15,0.5) #-------------------- ax = plt.subplot(223) df = extract_df('./log/index_message_2016-6-12_G9_BUF2_AR1.log.csv') ts = extract_baseline_ts(df, 'Adaptive, 1') ts.iloc[1:-1:10000].plot(legend=True, ax=ax, style=['b-o']) df = extract_df('./log/index_message_2016-6-12_G9_BUF2_FR1.log.csv') ts = extract_baseline_ts(df, 'Fixed, 1') ts.iloc[1:-1:10000].plot(legend=True, ax=ax,style=['b--']) df = extract_df('./log/index_message_2016-6-12_G9_BUF2_FR20.log.csv') ts = extract_baseline_ts(df, 'Fixed, 20') ts.iloc[1:-1:10000].plot(legend=True, ax=ax,style=['b-.']) df = extract_df('./log/index_message_2016-6-12_G9_BUF2_FR100.log.csv') ts = extract_baseline_ts(df, 'Fixed, 100') ts.iloc[1:-1:10000].plot(legend=True, ax=ax, style=['b-'], ylim=(1.5, 2)) ax.grid() ax.xaxis.label.set_text('Time'); ax.xaxis.label.set_fontsize(12); ax.xaxis.set_label_coords(0.5, -0.2) ax.title.set_text('(b) Gamma=0.9'); ax.title.set_fontsize(12); ax.title.set_position((0.5,-0.65)) plt.legend(bbox_to_anchor=(1.35,1.23), fontsize=8) ax.yaxis.label.set_text('Sleeping gain'); ax.yaxis.label.set_fontsize(12) #---------------------- plt.savefig("adaptive.png", bbox_inches='tight', dpi=300) # - # # Figure 11 f_list = [ "message_2016-6-12_G5_BUF2_AR1_b1.log", "message_2016-6-12_G5_BUF2_AR1_b15.log", "message_2016-6-12_G5_BUF2_AR1_b2.log", "message_2016-6-12_G5_BUF2_AR1_b25.log", "message_2016-6-12_G5_BUF2_AR1_b3.log", "message_2016-6-12_G5_BUF2_AR1_b35.log", "message_2016-6-12_G5_BUF2_AR1_b4.log", "message_2016-6-12_G5_BUF2_AR1_b5.log", "message_2016-6-12_G5_BUF2_AR1_b55.log", "message_2016-6-12_G5_BUF2_AR1_b6.log", "message_2016-6-12_G5_BUF2_AR1_b65.log", "message_2016-6-12_G5_BUF2_AR1_b7.log", "message_2016-6-12_G5_BUF2_AR1_b8.log", ] def get_reward(df): tr_wait = -1.0*sum(df.tr_reward_wait)/sum(df.req_generated) op_cost = mean(df.op_cost) return tr_wait, op_cost def get_df(f): f = './log/index_'+f+'.csv' df = pd.read_csv(f, delimiter=';', index_col=0) df.loc[:, 'start_ts'] = df['start_ts'].apply(lambda x: pd.to_datetime(x)) df.set_index('start_ts', inplace=True) return df tups = [None]*len(f_list) for i, f in enumerate(f_list): print f tups[i] = get_reward(get_df(f)) plt.figure(1) #-------------------- ax = plt.subplot(111) beta = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.55, 0.6, 0.65, 0.7, 0.8,] y=[tup[0] for i, tup in enumerate(tups)] x=[tup[1] for i, tup in enumerate(tups)] plt.plot(x[:],y[:], 'b-*') for i in range(len(x)/2+1): ax.annotate(str(beta[i]), xy=(x[i]+0.02, y[i]+0.1)) for i in range(len(x)/2+1, len(x)): ax.annotate(str(beta[i]), xy=(x[i]+0, y[i]+0.1)) ax.annotate('weight values', xy=(1, 1), xytext=(1.2,1.2), size=13, arrowprops=dict(arrowstyle="->")) ax.yaxis.label.set_fontsize(13) ax.yaxis.label.set_text('Delay (wait epochs/request)') ax.xaxis.label.set_fontsize(13) ax.xaxis.label.set_text('Energy consumption (op_cost/time step)') ax.grid()
experiments/paper_figure_tables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # ๆ•ฐๅญฆๅŸบ็ก€ # # ๆœฌ่Š‚ๆ€ป็ป“ไบ†ๆœฌไนฆไธญๆถ‰ๅŠ็š„ๆœ‰ๅ…ณ็บฟๆ€งไปฃๆ•ฐใ€ๅพฎๅˆ†ๅ’Œๆฆ‚็އ็š„ๅŸบ็ก€็Ÿฅ่ฏ†ใ€‚ไธบ้ฟๅ…่ต˜่ฟฐๆœฌไนฆๆœชๆถ‰ๅŠ็š„ๆ•ฐๅญฆ่ƒŒๆ™ฏ็Ÿฅ่ฏ†๏ผŒๆœฌ่Š‚ไธญ็š„ๅฐ‘ๆ•ฐๅฎšไน‰็จๆœ‰็ฎ€ๅŒ–ใ€‚ # # # ## ็บฟๆ€งไปฃๆ•ฐ # # ไธ‹้ขๅˆ†ๅˆซๆฆ‚ๆ‹ฌไบ†ๅ‘้‡ใ€็Ÿฉ้˜ตใ€่ฟ็ฎ—ใ€่Œƒๆ•ฐใ€็‰นๅพๅ‘้‡ๅ’Œ็‰นๅพๅ€ผ็š„ๆฆ‚ๅฟตใ€‚ # # ### ๅ‘้‡ # # ๆœฌไนฆไธญ็š„ๅ‘้‡ๆŒ‡็š„ๆ˜ฏๅˆ—ๅ‘้‡ใ€‚ไธ€ไธช$n$็ปดๅ‘้‡$\boldsymbol{x}$็š„่กจ่พพๅผๅฏๅ†™ๆˆ # # $$ # \boldsymbol{x} = # \begin{bmatrix} # x_{1} \\ # x_{2} \\ # \vdots \\ # x_{n} # \end{bmatrix}, # $$ # # ๅ…ถไธญ$x_1, \ldots, x_n$ๆ˜ฏๅ‘้‡็š„ๅ…ƒ็ด ใ€‚ๆˆ‘ไปฌๅฐ†ๅ„ๅ…ƒ็ด ๅ‡ไธบๅฎžๆ•ฐ็š„$n$็ปดๅ‘้‡$\boldsymbol{x}$่ฎฐไฝœ$\boldsymbol{x} \in \mathbb{R}^{n}$ๆˆ–$\boldsymbol{x} \in \mathbb{R}^{n \times 1}$ใ€‚ # # # ### ็Ÿฉ้˜ต # # ไธ€ไธช$m$่กŒ$n$ๅˆ—็Ÿฉ้˜ต็š„่กจ่พพๅผๅฏๅ†™ๆˆ # # $$ # \boldsymbol{X} = # \begin{bmatrix} # x_{11} & x_{12} & \dots & x_{1n} \\ # x_{21} & x_{22} & \dots & x_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # x_{m1} & x_{m2} & \dots & x_{mn} # \end{bmatrix}, # $$ # # ๅ…ถไธญ$x_{ij}$ๆ˜ฏ็Ÿฉ้˜ต$\boldsymbol{X}$ไธญ็ฌฌ$i$่กŒ็ฌฌ$j$ๅˆ—็š„ๅ…ƒ็ด ๏ผˆ$1 \leq i \leq m, 1 \leq j \leq n$๏ผ‰ใ€‚ๆˆ‘ไปฌๅฐ†ๅ„ๅ…ƒ็ด ๅ‡ไธบๅฎžๆ•ฐ็š„$m$่กŒ$n$ๅˆ—็Ÿฉ้˜ต$\boldsymbol{X}$่ฎฐไฝœ$\boldsymbol{X} \in \mathbb{R}^{m \times n}$ใ€‚ไธ้šพๅ‘็Žฐ๏ผŒๅ‘้‡ๆ˜ฏ็‰นๆฎŠ็š„็Ÿฉ้˜ตใ€‚ # # # ### ่ฟ็ฎ— # # ่ฎพ$n$็ปดๅ‘้‡$\boldsymbol{a}$ไธญ็š„ๅ…ƒ็ด ไธบ$a_1, \ldots, a_n$๏ผŒ$n$็ปดๅ‘้‡$\boldsymbol{b}$ไธญ็š„ๅ…ƒ็ด ไธบ$b_1, \ldots, b_n$ใ€‚ๅ‘้‡$\boldsymbol{a}$ไธŽ$\boldsymbol{b}$็š„็‚นไน˜๏ผˆๅ†…็งฏ๏ผ‰ๆ˜ฏไธ€ไธชๆ ‡้‡๏ผš # # $$\boldsymbol{a} \cdot \boldsymbol{b} = a_1 b_1 + \ldots + a_n b_n.$$ # # # ่ฎพไธคไธช$m$่กŒ$n$ๅˆ—็Ÿฉ้˜ต # # $$ # \boldsymbol{A} = # \begin{bmatrix} # a_{11} & a_{12} & \dots & a_{1n} \\ # a_{21} & a_{22} & \dots & a_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m1} & a_{m2} & \dots & a_{mn} # \end{bmatrix},\quad # \boldsymbol{B} = # \begin{bmatrix} # b_{11} & b_{12} & \dots & b_{1n} \\ # b_{21} & b_{22} & \dots & b_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # b_{m1} & b_{m2} & \dots & b_{mn} # \end{bmatrix}. # $$ # # ็Ÿฉ้˜ต$\boldsymbol{A}$็š„่ฝฌ็ฝฎๆ˜ฏไธ€ไธช$n$่กŒ$m$ๅˆ—็Ÿฉ้˜ต๏ผŒๅฎƒ็š„ๆฏไธ€่กŒๅ…ถๅฎžๆ˜ฏๅŽŸ็Ÿฉ้˜ต็š„ๆฏไธ€ๅˆ—๏ผš # $$ # \boldsymbol{A}^\top = # \begin{bmatrix} # a_{11} & a_{21} & \dots & a_{m1} \\ # a_{12} & a_{22} & \dots & a_{m2} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{1n} & a_{2n} & \dots & a_{mn} # \end{bmatrix}. # $$ # # # ไธคไธช็›ธๅŒๅฝข็Šถ็š„็Ÿฉ้˜ต็š„ๅŠ ๆณ•ๆ˜ฏๅฐ†ไธคไธช็Ÿฉ้˜ตๆŒ‰ๅ…ƒ็ด ๅšๅŠ ๆณ•๏ผš # # $$ # \boldsymbol{A} + \boldsymbol{B} = # \begin{bmatrix} # a_{11} + b_{11} & a_{12} + b_{12} & \dots & a_{1n} + b_{1n} \\ # a_{21} + b_{21} & a_{22} + b_{22} & \dots & a_{2n} + b_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m1} + b_{m1} & a_{m2} + b_{m2} & \dots & a_{mn} + b_{mn} # \end{bmatrix}. # $$ # # ๆˆ‘ไปฌไฝฟ็”จ็ฌฆๅท$\odot$่กจ็คบไธคไธช็Ÿฉ้˜ตๆŒ‰ๅ…ƒ็ด ไน˜ๆณ•็š„่ฟ็ฎ—๏ผŒๅณ้˜ฟ่พพ็Ž›๏ผˆHadamard๏ผ‰็งฏ๏ผš # # $$ # \boldsymbol{A} \odot \boldsymbol{B} = # \begin{bmatrix} # a_{11} b_{11} & a_{12} b_{12} & \dots & a_{1n} b_{1n} \\ # a_{21} b_{21} & a_{22} b_{22} & \dots & a_{2n} b_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m1} b_{m1} & a_{m2} b_{m2} & \dots & a_{mn} b_{mn} # \end{bmatrix}. # $$ # # ๅฎšไน‰ไธ€ไธชๆ ‡้‡$k$ใ€‚ๆ ‡้‡ไธŽ็Ÿฉ้˜ต็š„ไน˜ๆณ•ไนŸๆ˜ฏๆŒ‰ๅ…ƒ็ด ๅšไน˜ๆณ•็š„่ฟ็ฎ—๏ผš # # # $$ # k\boldsymbol{A} = # \begin{bmatrix} # ka_{11} & ka_{12} & \dots & ka_{1n} \\ # ka_{21} & ka_{22} & \dots & ka_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # ka_{m1} & ka_{m2} & \dots & ka_{mn} # \end{bmatrix}. # $$ # # ๅ…ถไป–่ฏธๅฆ‚ๆ ‡้‡ไธŽ็Ÿฉ้˜ตๆŒ‰ๅ…ƒ็ด ็›ธๅŠ ใ€็›ธ้™ค็ญ‰่ฟ็ฎ—ไธŽไธŠๅผไธญ็š„็›ธไน˜่ฟ็ฎ—็ฑปไผผใ€‚็Ÿฉ้˜ตๆŒ‰ๅ…ƒ็ด ๅผ€ๆ นๅทใ€ๅ–ๅฏนๆ•ฐ็ญ‰่ฟ็ฎ—ไนŸๅฐฑๆ˜ฏๅฏน็Ÿฉ้˜ตๆฏไธชๅ…ƒ็ด ๅผ€ๆ นๅทใ€ๅ–ๅฏนๆ•ฐ็ญ‰๏ผŒๅนถๅพ—ๅˆฐๅ’ŒๅŽŸ็Ÿฉ้˜ตๅฝข็Šถ็›ธๅŒ็š„็Ÿฉ้˜ตใ€‚ # # ็Ÿฉ้˜ตไน˜ๆณ•ๅ’ŒๆŒ‰ๅ…ƒ็ด ็š„ไน˜ๆณ•ไธๅŒใ€‚่ฎพ$\boldsymbol{A}$ไธบ$m$่กŒ$p$ๅˆ—็š„็Ÿฉ้˜ต๏ผŒ$\boldsymbol{B}$ไธบ$p$่กŒ$n$ๅˆ—็š„็Ÿฉ้˜ตใ€‚ไธคไธช็Ÿฉ้˜ต็›ธไน˜็š„็ป“ๆžœ # # $$ # \boldsymbol{A} \boldsymbol{B} = # \begin{bmatrix} # a_{11} & a_{12} & \dots & a_{1p} \\ # a_{21} & a_{22} & \dots & a_{2p} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{i1} & a_{i2} & \dots & a_{ip} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m1} & a_{m2} & \dots & a_{mp} # \end{bmatrix} # \begin{bmatrix} # b_{11} & b_{12} & \dots & b_{1j} & \dots & b_{1n} \\ # b_{21} & b_{22} & \dots & b_{2j} & \dots & b_{2n} \\ # \vdots & \vdots & \ddots & \vdots & \ddots & \vdots \\ # b_{p1} & b_{p2} & \dots & b_{pj} & \dots & b_{pn} # \end{bmatrix} # $$ # # ๆ˜ฏไธ€ไธช$m$่กŒ$n$ๅˆ—็š„็Ÿฉ้˜ต๏ผŒๅ…ถไธญ็ฌฌ$i$่กŒ็ฌฌ$j$ๅˆ—๏ผˆ$1 \leq i \leq m, 1 \leq j \leq n$๏ผ‰็š„ๅ…ƒ็ด ไธบ # # $$a_{i1}b_{1j} + a_{i2}b_{2j} + \ldots + a_{ip}b_{pj} = \sum_{k=1}^p a_{ik}b_{kj}. $$ # # # ### ่Œƒๆ•ฐ # # ่ฎพ$n$็ปดๅ‘้‡$\boldsymbol{x}$ไธญ็š„ๅ…ƒ็ด ไธบ$x_1, \ldots, x_n$ใ€‚ๅ‘้‡$\boldsymbol{x}$็š„$L_p$่Œƒๆ•ฐไธบ # # $$\|\boldsymbol{x}\|_p = \left(\sum_{i=1}^n \left|x_i \right|^p \right)^{1/p}.$$ # # ไพ‹ๅฆ‚๏ผŒ$\boldsymbol{x}$็š„$L_1$่Œƒๆ•ฐๆ˜ฏ่ฏฅๅ‘้‡ๅ…ƒ็ด ็ปๅฏนๅ€ผไน‹ๅ’Œ๏ผš # # $$\|\boldsymbol{x}\|_1 = \sum_{i=1}^n \left|x_i \right|.$$ # # ่€Œ$\boldsymbol{x}$็š„$L_2$่Œƒๆ•ฐๆ˜ฏ่ฏฅๅ‘้‡ๅ…ƒ็ด ๅนณๆ–นๅ’Œ็š„ๅนณๆ–นๆ น๏ผš # # $$\|\boldsymbol{x}\|_2 = \sqrt{\sum_{i=1}^n x_i^2}.$$ # # ๆˆ‘ไปฌ้€šๅธธ็”จ$\|\boldsymbol{x}\|$ๆŒ‡ไปฃ$\|\boldsymbol{x}\|_2$ใ€‚ # # ่ฎพ$\boldsymbol{X}$ๆ˜ฏไธ€ไธช$m$่กŒ$n$ๅˆ—็Ÿฉ้˜ตใ€‚็Ÿฉ้˜ต$\boldsymbol{X}$็š„Frobenius่Œƒๆ•ฐไธบ่ฏฅ็Ÿฉ้˜ตๅ…ƒ็ด ๅนณๆ–นๅ’Œ็š„ๅนณๆ–นๆ น๏ผš # # $$\|\boldsymbol{X}\|_F = \sqrt{\sum_{i=1}^m \sum_{j=1}^n x_{ij}^2},$$ # # ๅ…ถไธญ$x_{ij}$ไธบ็Ÿฉ้˜ต$\boldsymbol{X}$ๅœจ็ฌฌ$i$่กŒ็ฌฌ$j$ๅˆ—็š„ๅ…ƒ็ด ใ€‚ # # # ### ็‰นๅพๅ‘้‡ๅ’Œ็‰นๅพๅ€ผ # # # ๅฏนไบŽไธ€ไธช$n$่กŒ$n$ๅˆ—็š„็Ÿฉ้˜ต$\boldsymbol{A}$๏ผŒๅ‡่ฎพๆœ‰ๆ ‡้‡$\lambda$ๅ’Œ้ž้›ถ็š„$n$็ปดๅ‘้‡$\boldsymbol{v}$ไฝฟ # # $$\boldsymbol{A} \boldsymbol{v} = \lambda \boldsymbol{v},$$ # # ้‚ฃไนˆ$\boldsymbol{v}$ๆ˜ฏ็Ÿฉ้˜ต$\boldsymbol{A}$็š„ไธ€ไธช็‰นๅพๅ‘้‡๏ผŒๆ ‡้‡$\lambda$ๆ˜ฏ$\boldsymbol{v}$ๅฏนๅบ”็š„็‰นๅพๅ€ผใ€‚ # # # # ## ๅพฎๅˆ† # # ๆˆ‘ไปฌๅœจ่ฟ™้‡Œ็ฎ€่ฆไป‹็ปๅพฎๅˆ†็š„ไธ€ไบ›ๅŸบๆœฌๆฆ‚ๅฟตๅ’Œๆผ”็ฎ—ใ€‚ # # # ### ๅฏผๆ•ฐๅ’Œๅพฎๅˆ† # # ๅ‡่ฎพๅ‡ฝๆ•ฐ$f: \mathbb{R} \rightarrow \mathbb{R}$็š„่พ“ๅ…ฅๅ’Œ่พ“ๅ‡บ้ƒฝๆ˜ฏๆ ‡้‡ใ€‚ๅ‡ฝๆ•ฐ$f$็š„ๅฏผๆ•ฐ # # $$f'(x) = \lim_{h \rightarrow 0} \frac{f(x+h) - f(x)}{h},$$ # # ไธ”ๅ‡ๅฎš่ฏฅๆž้™ๅญ˜ๅœจใ€‚็ป™ๅฎš$y = f(x)$๏ผŒๅ…ถไธญ$x$ๅ’Œ$y$ๅˆ†ๅˆซๆ˜ฏๅ‡ฝๆ•ฐ$f$็š„่‡ชๅ˜้‡ๅ’Œๅ› ๅ˜้‡ใ€‚ไปฅไธ‹ๆœ‰ๅ…ณๅฏผๆ•ฐๅ’Œๅพฎๅˆ†็š„่กจ่พพๅผ็ญ‰ไปท๏ผš # # $$f'(x) = y' = \frac{\text{d}y}{\text{d}x} = \frac{\text{d}f}{\text{d}x} = \frac{\text{d}}{\text{d}x} f(x) = \text{D}f(x) = \text{D}_x f(x),$$ # # ๅ…ถไธญ็ฌฆๅท$\text{D}$ๅ’Œ$\text{d}/\text{d}x$ไนŸๅซๅพฎๅˆ†่ฟ็ฎ—็ฌฆใ€‚ๅธธ่ง็š„ๅพฎๅˆ†ๆผ”็ฎ—ๆœ‰$\text{D}C = 0$๏ผˆ$C$ไธบๅธธๆ•ฐ๏ผ‰ใ€$\text{D}x^n = nx^{n-1}$๏ผˆ$n$ไธบๅธธๆ•ฐ๏ผ‰ใ€$\text{D}e^x = e^x$ใ€$\text{D}\ln(x) = 1/x$็ญ‰ใ€‚ # # ๅฆ‚ๆžœๅ‡ฝๆ•ฐ$f$ๅ’Œ$g$้ƒฝๅฏๅฏผ๏ผŒ่ฎพ$C$ไธบๅธธๆ•ฐ๏ผŒ้‚ฃไนˆ # # $$ # \begin{aligned} # \frac{\text{d}}{\text{d}x} [Cf(x)] &= C \frac{\text{d}}{\text{d}x} f(x),\\ # \frac{\text{d}}{\text{d}x} [f(x) + g(x)] &= \frac{\text{d}}{\text{d}x} f(x) + \frac{\text{d}}{\text{d}x} g(x),\\ # \frac{\text{d}}{\text{d}x} [f(x)g(x)] &= f(x) \frac{\text{d}}{\text{d}x} [g(x)] + g(x) \frac{\text{d}}{\text{d}x} [f(x)],\\ # \frac{\text{d}}{\text{d}x} \left[\frac{f(x)}{g(x)}\right] &= \frac{g(x) \frac{\text{d}}{\text{d}x} [f(x)] - f(x) \frac{\text{d}}{\text{d}x} [g(x)]}{[g(x)]^2}. # \end{aligned} # $$ # # # ๅฆ‚ๆžœ$y=f(u)$ๅ’Œ$u=g(x)$้ƒฝๆ˜ฏๅฏๅฏผๅ‡ฝๆ•ฐ๏ผŒไพๆฎ้“พๅผๆณ•ๅˆ™๏ผŒ # # $$\frac{\text{d}y}{\text{d}x} = \frac{\text{d}y}{\text{d}u} \frac{\text{d}u}{\text{d}x}.$$ # # # ### ๆณฐๅ‹’ๅฑ•ๅผ€ # # ๅ‡ฝๆ•ฐ$f$็š„ๆณฐๅ‹’ๅฑ•ๅผ€ๅผๆ˜ฏ # # $$f(x) = \sum_{n=0}^\infty \frac{f^{(n)}(a)}{n!} (x-a)^n,$$ # # ๅ…ถไธญ$f^{(n)}$ไธบๅ‡ฝๆ•ฐ$f$็š„$n$้˜ถๅฏผๆ•ฐ๏ผˆๆฑ‚$n$ๆฌกๅฏผๆ•ฐ๏ผ‰๏ผŒ$n!$ไธบ$n$็š„้˜ถไน˜ใ€‚ๅ‡่ฎพ$\epsilon$ๆ˜ฏไธ€ไธช่ถณๅคŸๅฐ็š„ๆ•ฐ๏ผŒๅฆ‚ๆžœๅฐ†ไธŠๅผไธญ$x$ๅ’Œ$a$ๅˆ†ๅˆซๆ›ฟๆขๆˆ$x+\epsilon$ๅ’Œ$x$๏ผŒๅฏไปฅๅพ—ๅˆฐ # # $$f(x + \epsilon) \approx f(x) + f'(x) \epsilon + \mathcal{O}(\epsilon^2).$$ # # ็”ฑไบŽ$\epsilon$่ถณๅคŸๅฐ๏ผŒไธŠๅผไนŸๅฏไปฅ็ฎ€ๅŒ–ๆˆ # # $$f(x + \epsilon) \approx f(x) + f'(x) \epsilon.$$ # # # # ### ๅๅฏผๆ•ฐ # # ่ฎพ$u$ไธบไธ€ไธชๆœ‰$n$ไธช่‡ชๅ˜้‡็š„ๅ‡ฝๆ•ฐ๏ผŒ$u = f(x_1, x_2, \ldots, x_n)$๏ผŒๅฎƒๆœ‰ๅ…ณ็ฌฌ$i$ไธชๅ˜้‡$x_i$็š„ๅๅฏผๆ•ฐไธบ # # $$ \frac{\partial u}{\partial x_i} = \lim_{h \rightarrow 0} \frac{f(x_1, \ldots, x_{i-1}, x_i+h, x_{i+1}, \ldots, x_n) - f(x_1, \ldots, x_i, \ldots, x_n)}{h}.$$ # # # ไปฅไธ‹ๆœ‰ๅ…ณๅๅฏผๆ•ฐ็š„่กจ่พพๅผ็ญ‰ไปท๏ผš # # $$\frac{\partial u}{\partial x_i} = \frac{\partial f}{\partial x_i} = f_{x_i} = f_i = \text{D}_i f = \text{D}_{x_i} f.$$ # # ไธบไบ†่ฎก็ฎ—$\partial u/\partial x_i$๏ผŒๅช้œ€ๅฐ†$x_1, \ldots, x_{i-1}, x_{i+1}, \ldots, x_n$่ง†ไธบๅธธๆ•ฐๅนถๆฑ‚$u$ๆœ‰ๅ…ณ$x_i$็š„ๅฏผๆ•ฐใ€‚ # # # # ### ๆขฏๅบฆ # # # ๅ‡่ฎพๅ‡ฝๆ•ฐ$f: \mathbb{R}^n \rightarrow \mathbb{R}$็š„่พ“ๅ…ฅๆ˜ฏไธ€ไธช$n$็ปดๅ‘้‡$\boldsymbol{x} = [x_1, x_2, \ldots, x_n]^\top$๏ผŒ่พ“ๅ‡บๆ˜ฏๆ ‡้‡ใ€‚ๅ‡ฝๆ•ฐ$f(\boldsymbol{x})$ๆœ‰ๅ…ณ$\boldsymbol{x}$็š„ๆขฏๅบฆๆ˜ฏไธ€ไธช็”ฑ$n$ไธชๅๅฏผๆ•ฐ็ป„ๆˆ็š„ๅ‘้‡๏ผš # # $$\nabla_{\boldsymbol{x}} f(\boldsymbol{x}) = \bigg[\frac{\partial f(\boldsymbol{x})}{\partial x_1}, \frac{\partial f(\boldsymbol{x})}{\partial x_2}, \ldots, \frac{\partial f(\boldsymbol{x})}{\partial x_n}\bigg]^\top.$$ # # # ไธบ่กจ็คบ็ฎ€ๆด๏ผŒๆˆ‘ไปฌๆœ‰ๆ—ถ็”จ$\nabla f(\boldsymbol{x})$ไปฃๆ›ฟ$\nabla_{\boldsymbol{x}} f(\boldsymbol{x})$ใ€‚ # # ๅ‡่ฎพ$\boldsymbol{x}$ๆ˜ฏไธ€ไธชๅ‘้‡๏ผŒๅธธ่ง็š„ๆขฏๅบฆๆผ”็ฎ—ๅŒ…ๆ‹ฌ # # $$ # \begin{aligned} # \nabla_{\boldsymbol{x}} \boldsymbol{A}^\top \boldsymbol{x} &= \boldsymbol{A}, \\ # \nabla_{\boldsymbol{x}} \boldsymbol{x}^\top \boldsymbol{A} &= \boldsymbol{A}, \\ # \nabla_{\boldsymbol{x}} \boldsymbol{x}^\top \boldsymbol{A} \boldsymbol{x} &= (\boldsymbol{A} + \boldsymbol{A}^\top)\boldsymbol{x},\\ # \nabla_{\boldsymbol{x}} \|\boldsymbol{x} \|^2 &= \nabla_{\boldsymbol{x}} \boldsymbol{x}^\top \boldsymbol{x} = 2\boldsymbol{x}. # \end{aligned} # $$ # # ็ฑปไผผๅœฐ๏ผŒๅ‡่ฎพ$\boldsymbol{X}$ๆ˜ฏไธ€ไธช็Ÿฉ้˜ต๏ผŒ้‚ฃไนˆ # $$\nabla_{\boldsymbol{X}} \|\boldsymbol{X} \|_F^2 = 2\boldsymbol{X}.$$ # # # # # ### ๆตทๆฃฎ็Ÿฉ้˜ต # # ๅ‡่ฎพๅ‡ฝๆ•ฐ$f: \mathbb{R}^n \rightarrow \mathbb{R}$็š„่พ“ๅ…ฅๆ˜ฏไธ€ไธช$n$็ปดๅ‘้‡$\boldsymbol{x} = [x_1, x_2, \ldots, x_n]^\top$๏ผŒ่พ“ๅ‡บๆ˜ฏๆ ‡้‡ใ€‚ๅ‡ๅฎšๅ‡ฝๆ•ฐ$f$ๆ‰€ๆœ‰็š„ไบŒ้˜ถๅๅฏผๆ•ฐ้ƒฝๅญ˜ๅœจ๏ผŒ$f$็š„ๆตทๆฃฎ็Ÿฉ้˜ต$\boldsymbol{H}$ๆ˜ฏไธ€ไธช$n$่กŒ$n$ๅˆ—็š„็Ÿฉ้˜ต๏ผš # # $$ # \boldsymbol{H} = # \begin{bmatrix} # \frac{\partial^2 f}{\partial x_1^2} & \frac{\partial^2 f}{\partial x_1 \partial x_2} & \dots & \frac{\partial^2 f}{\partial x_1 \partial x_n} \\ # \frac{\partial^2 f}{\partial x_2 \partial x_1} & \frac{\partial^2 f}{\partial x_2^2} & \dots & \frac{\partial^2 f}{\partial x_2 \partial x_n} \\ # \vdots & \vdots & \ddots & \vdots \\ # \frac{\partial^2 f}{\partial x_n \partial x_1} & \frac{\partial^2 f}{\partial x_n \partial x_2} & \dots & \frac{\partial^2 f}{\partial x_n^2} # \end{bmatrix}, # $$ # # ๅ…ถไธญไบŒ้˜ถๅๅฏผๆ•ฐไธบ # # $$\frac{\partial^2 f}{\partial x_i \partial x_j} = \frac{\partial }{\partial x_j} \left(\frac{\partial f}{ \partial x_i}\right).$$ # # # # ## ๆฆ‚็އ # # ๆœ€ๅŽ๏ผŒๆˆ‘ไปฌ็ฎ€่ฆไป‹็ปๆกไปถๆฆ‚็އใ€ๆœŸๆœ›ๅ’Œๅ‡ๅŒ€ๅˆ†ๅธƒใ€‚ # # ### ๆกไปถๆฆ‚็އ # # ๅ‡่ฎพไบ‹ไปถ$A$ๅ’Œไบ‹ไปถ$B$็š„ๆฆ‚็އๅˆ†ๅˆซไธบ$P(A)$ๅ’Œ$P(B)$๏ผŒไธคไธชไบ‹ไปถๅŒๆ—ถๅ‘็”Ÿ็š„ๆฆ‚็އ่ฎฐไฝœ$P(A \cap B)$ๆˆ–$P(A, B)$ใ€‚็ป™ๅฎšไบ‹ไปถ$B$๏ผŒไบ‹ไปถ$A$็š„ๆกไปถๆฆ‚็އ # # $$P(A \mid B) = \frac{P(A \cap B)}{P(B)}.$$ # # ไนŸๅฐฑๆ˜ฏ่ฏด๏ผŒ # # $$P(A \cap B) = P(B) P(A \mid B) = P(A) P(B \mid A).$$ # # ๅฝ“ๆปก่ถณ # # $$P(A \cap B) = P(A) P(B)$$ # # ๆ—ถ๏ผŒไบ‹ไปถ$A$ๅ’Œไบ‹ไปถ$B$็›ธไบ’็‹ฌ็ซ‹ใ€‚ # # # ### ๆœŸๆœ› # # ็ฆปๆ•ฃ็š„้šๆœบๅ˜้‡$X$็š„ๆœŸๆœ›๏ผˆๆˆ–ๅนณๅ‡ๅ€ผ๏ผ‰ไธบ # # $$E(X) = \sum_{x} x P(X = x).$$ # # # # ### ๅ‡ๅŒ€ๅˆ†ๅธƒ # # ๅ‡่ฎพ้šๆœบๅ˜้‡$X$ๆœไปŽ$[a, b]$ไธŠ็š„ๅ‡ๅŒ€ๅˆ†ๅธƒ๏ผŒๅณ$X \sim U(a, b)$ใ€‚้šๆœบๅ˜้‡$X$ๅ–$a$ๅ’Œ$b$ไน‹้—ดไปปๆ„ไธ€ไธชๆ•ฐ็š„ๆฆ‚็އ็›ธ็ญ‰ใ€‚ # # # # # ## ๅฐ็ป“ # # * ๆœฌ้™„ๅฝ•ๆ€ป็ป“ไบ†ๆœฌไนฆไธญๆถ‰ๅŠ็š„ๆœ‰ๅ…ณ็บฟๆ€งไปฃๆ•ฐใ€ๅพฎๅˆ†ๅ’Œๆฆ‚็އ็š„ๅŸบ็ก€็Ÿฅ่ฏ†ใ€‚ # # # ## ็ปƒไน  # # * ๆฑ‚ๅ‡ฝๆ•ฐ$f(\boldsymbol{x}) = 3x_1^2 + 5e^{x_2}$็š„ๆขฏๅบฆใ€‚ # # # # # ## ๆ‰ซ็ ็›ด่พพ[่ฎจ่ฎบๅŒบ](https://discuss.gluon.ai/t/topic/6966) # # ![](../img/qr_math.svg)
ๆทฑๅบฆๅญฆไน /d2l-zh-1.1/chapter_appendix/math.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Excercises Electric Machinery Fundamentals # ## Chapter 1 # ## Problem 1-7 # + slideshow={"slide_type": "skip"} # %pylab notebook # %precision 4 from scipy import constants as c # we like to use some constants # - # ### Description # A two-legged core is shown in Figure P1-4 below: # <img src="figs/FigC_P1-4.jpg" width="70%"> # The winding on the left leg of the core ($N_1$) has 600 turns, # and the winding on the right ($N_2$) has 200 turns. The coils are wound in the directions shown in the figure. # # * If the dimensions are as shown, then what flux would be produced by currents $i_1 = 0.5\,A$ and $i_2 = 1.0\,A$? # # Assume $\mu_r = 1200$ and constant. N1 = 600 N2 = 200 i1 = 0.5 # A i2 = 1.0 # A mu_r = 1200 mu = mu_r * c.mu_0 # ### SOLUTION # The two coils on this core are wound so that their magnetomotive forces are additive, so the # total magnetomotive force on this core is # $$\mathcal{F}_\text{TOT} = N_1 i_1 + N_2 I_2$$ F_tot = N1 * i1 + N2 * i2 print('F_tot = {:.1f} At'.format(F_tot)) # The total reluctance in the core is $\mathcal{R}_\text{TOT} = \frac{l}{\mu_0 \mu_r A}$: l = 4 * (0.075 + 0.5 + 0.075) # [m] core length on all 4 sides. A = 0.15**2 # [mยฒ] R_tot = l / (mu * A) print('R_tot = {:.1f} kAt/Wb'.format(R_tot/1000)) # and the flux in the core is $\phi = \frac{\mathcal{F}_\text{TOT}}{\mathcal{R}_\text{TOT}}$: phi = F_tot / R_tot print('phi = {:.3f} mWb'.format(phi*1000))
Chapman/Ch1-Problem_1-07.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ihasdapie/Colab-Notebooks/blob/master/DataProcessingForSignatureVerification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="B4F34cvz9h7b" colab_type="code" cellView="form" colab={} #@title imports from __future__ import division import keras from keras.preprocessing.image import img_to_array, load_img import os from matplotlib import pyplot as plt import sklearn.metrics import numpy as np import cv2 from keras import Sequential from keras.layers import Dense, Dropout, Input, Lambda, Flatten, Convolution2D, MaxPooling2D, ZeroPadding2D from keras import backend from keras.layers.advanced_activations import LeakyReLU from keras.layers import BatchNormalization from keras.activations import sigmoid #import cupy as cp from keras.regularizers import l2 # from keras.engine.topology import Layer from keras.layers import Layer #or do i have to use the keras.engine.topolgy? will see. from keras.utils import to_categorical, plot_model from keras.models import Model from keras.optimizers import Adam import time from keras.utils import Sequence from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score # plt.xkcd() import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) # + id="bA0m_ROm-CZy" colab_type="code" colab={} h, w = 500,300 shape = (h,w,1) refdir = "/content/drive/My Drive/Brians_Signature_Verification_Project/Dataset/Genuine" fakedir = "/content/drive/My Drive/Brians_Signature_Verification_Project/Dataset/Forgeries" targetrefdir = "/content/drive/My Drive/Brians_Signature_Verification_Project/Dataset/ProcessedGenuine" targetfakedir = "/content/drive/My Drive/Brians_Signature_Verification_Project/Dataset/ProcessedForgeries" zipdir = "/content/drive/My Drive/Brians_Signature_Verification_Project/Dataset/ZipDataSet" # + id="xWGqW0iY9ks8" colab_type="code" cellView="both" colab={} #@title save_img_to_dir(img_array,dir,label) def save_img_to_dir(img_array, directory, label): for x in range(len(img_array)): cv2.imwrite(directory+"/"+label+str(x)+".png", img_array[x][:,:,0]) # + id="jGD4QtoR9pak" colab_type="code" cellView="both" colab={} #@title load_data(dir, shape, label): def load_data(dir, shape, label = "nolabel" ): img_array = [cv2.convertScaleAbs(img_to_array(load_img(dir+"/"+x, color_mode = "grayscale", target_size=(shape[0:2])))) for x in os.listdir(dir)] img_array = [img.reshape(shape) for img in img_array] img_array = np.asarray(img_array) if label == "nolabel": return img_array else: label_array = [label for x in range(len(img_array))] #use numpy arrays instead. # n_img = len(os.listdir(dir)) # img_array = np.zeros((n_img, 1)) # for x in os.listdir(dir): # np.append(img_array, cv2.convertScaleAbs(img_to_array(load_img(dir+"/"+x, color_mode = "grayscale" )))) # label_array = np.zeros((n_img, 1)) # for x in range(n_img): # np.append(label_array, label) return img_array, label_array # + id="UOM84zvo9qJj" colab_type="code" cellView="both" colab={} #@title make_training_set(real,fake) def make_training_set(real_dir, fake_dir, shape, size): # return pairs & true/false labels X_real = load_data(real_dir, shape)/255 X_fake = load_data(fake_dir, shape)/255 # 1 = genuine pair # 0 = false pair # list of size size of 0,1 for genuine/fake pairs # 0 = real,real -> Y = 1 (true match) # 1 = real,fake -> Y = 0 (no match) #return_shape = (n_pairs, pairsize (2), h, w, 1) X_train = np.zeros(shape = (size, 2, shape[0], shape[1], 1)) Y_train = np.zeros(shape = (size, 1)) seed = np.random.randint(0, 2, size = size) for x in range(seed.shape[0]): if seed[x] == 0: img_1 = X_real[np.random.randint(0,X_real.shape[0])] img_2 = X_real[np.random.randint(0,X_real.shape[0])] X_train[x, 0, :, :, 0] = img_1.reshape(shape[0], shape[1]) X_train[x, 1, :, :, 0] = img_2.reshape(shape[0], shape[1]) Y_train[x] = 1 if seed[x] == 1: img_1 = X_real[np.random.randint(0,X_real.shape[0])] img_2 = X_fake[np.random.randint(0,X_fake.shape[0])] X_train[x, 0, :, :, 0] = img_1.reshape(shape[0], shape[1]) X_train[x, 1, :, :, 0] = img_2.reshape(shape[0], shape[1]) Y_train[x] = 0 # Y_train = to_categorical(Y_train) #techically not necessary because everything is 0/1 already, but I wanted to do it. return X_train, Y_train # + id="CfKwmxK6-69a" colab_type="code" colab={} #@title show_img def show_img(img, title= ""): plt.imshow(img, cmap = 'gray') plt.title(title) # + id="lDePjUpP9U4T" colab_type="code" cellView="form" colab={} #@title pre_process_data(dir, h, w): def pre_process_data(dir, h, w): #returns X_array w/ superimposed centroids & bounding boxes, labels #load images in grayscale, resize, turn to array grayscale_img_array = [(img_to_array(load_img(dir+"/"+x, color_mode = "grayscale", target_size = (h,w)))) for x in os.listdir(dir)] #try using openCV imread # grayscale_img_array = [cv2.imread(img, cv2.IMREAD_GRAYSCALE) for img in os.listdir(dir)] # cv2_imshow(grayscale_img_array[1]) grayscale_img_array=[(cv2.convertScaleAbs(img)) for img in grayscale_img_array] #convert to uint8 format #Thresholded images are used to find the weighted centroids of each image. #Thresholding is unnecessary but it was a learning experience. thresholded_img_array = [cv2.threshold(img, 170,255,cv2.THRESH_BINARY_INV) for img in grayscale_img_array] #threshold binary is set higher that 127 due to image faintness thresholded_img_array = [thresh[1] for thresh in thresholded_img_array] #threshold returns (ret, thresh). Only thresh is needed. img_moments = [cv2.moments(thresh) for thresh in thresholded_img_array] #"center of mass" of image. This can be used to find the centroid of the image. center_x_pos = [int(m["m10"] / m["m00"]) for m in img_moments] center_y_pos = [int(m["m01"] / m["m00"]) for m in img_moments] # add "crosshair" to center of grayscale images # is there a way to do this in one list comprehension hm for x in range(len(grayscale_img_array)): (grayscale_img_array[x])[:, center_x_pos[x]] = 0 (grayscale_img_array[x])[center_y_pos[x], :] = 0 #add bounding box to grayscale images img_contour_array = [cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for thresh in thresholded_img_array] for i in range(len(grayscale_img_array)): x,y,w,h= cv2.boundingRect(img_contour_array[i][0]) grayscale_img_array[i] = cv2.rectangle(grayscale_img_array[i],(x,y),(x+w,y+h),(0,255,0),1) #get filenames labels = [x.split(".")[0] for x in os.listdir(dir)] return grayscale_img_array, labels # + id="bjIVkjfj-gUD" colab_type="code" colab={} #Cannot train at high enough resolution to justify the data-preprocessing done, so will simply load it & turn into grayscale ref = load_data(refdir, shape) fake = load_data(fakedir, shape) # + id="xQgPWQM9BSHz" colab_type="code" outputId="b9685f33-1b31-461c-dfbc-63f1de47885b" colab={"base_uri": "https://localhost:8080/", "height": 33} ref[:,:,:,0].shape # + id="LsNKbOGj_UC6" colab_type="code" colab={} #save to dir save_img_to_dir( fake, targetfakedir, "Fake_") save_img_to_dir(ref, targetrefdir, "Real_") # + id="9SZCkhH8EWVL" colab_type="code" outputId="b8e51faa-58cc-4e10-fb70-c7ccac43059e" colab={"base_uri": "https://localhost:8080/", "height": 147} #access .zip files pip install gdown # + id="X74ENKjXEZ6a" colab_type="code" outputId="4a86e646-6e57-4a6b-9234-3c6335887549" colab={"base_uri": "https://localhost:8080/", "height": 82} # !gdown --id 1v5eGVp6aaSIsozIgmB1oOQjQU-l0AH90 --output /content/data.zip # + id="CkZ7qivbEzBq" colab_type="code" colab={} import zipfile z = zipfile.ZipFile('/content/data.zip', 'r') z.extractall('/content/dataset') z.close()
DataProcessingForSignatureVerification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('../..') import pyotc # + from scipy import pi from pyotc.utilities import logspace_points_per_decade from pyotc.psd import lorentzian_psd, hydro_psd, low_pass_filter from pyotc.physics import MATERIALS f = logspace_points_per_decade(10, 1e6) T = (273.15 + 29) R = 0.5e-6 rho = MATERIALS['ps'] l = 4000e-6 kappa = 0.004 # N/m f_c_0 = kappa / (2 * pi * pyotc.drag(R, T)).real D_0 = pyotc.k_B * T / pyotc.drag(R, T).real lor = lorentzian_psd(f, D_0, f_c_0) / lorentzian_psd(0.0, D_0, f_c_0) hyd = hydro_psd(f, D_0, f_c_0, height=l, radius=R, temp=T, rho=rho) / hydro_psd(0.0, D_0, f_c_0, height=l, radius=R, temp=T) alp = 0.1 f3dB = 8000 lor_lp = lor * low_pass_filter(f, alpha=alp, f3dB=f3dB) hyd_lp = hyd * low_pass_filter(f, alpha=alp, f3dB=f3dB) # + import matplotlib.pyplot as plt from pyotc import add_plot_to_figure from pyotc.plotting import col_dict plt.close('all') ax = add_plot_to_figure(None, f, lor, label='Lorentzian', fmt='-', color=col_dict[0]) fig = ax.figure add_plot_to_figure(fig, f, hyd, label='hydro', fmt='-', color=col_dict[3]) add_plot_to_figure(fig, f, lor_lp, label='Lorentzian + LP', fmt='--', color=col_dict[0]) add_plot_to_figure(fig, f, hyd_lp, label='hydro + LP', fmt='--', color=col_dict[3], logplot=True, showLegend=True, legend_kwargs={'loc':3}, xlabel='Frequency (Hz)', ylabel='P(f)/P(0)') ax.set_ylim([1e-5, 10]); s = ('Titania particle' ' \n' 'R = 1 ยตm \n' 'T = 29.0 degC \n' 'h = 40 ยตm \n\n' '$\kappa$ = {0:1.1f} pN/nm \n\n'.format(kappa*1e3) + 'QPD \n' r'$\alpha$ = 0.1' '\n' r'$\mathsf{f_{3dB}}$' ' = 8 kHz') fig.text(0.142, 0.35, s, fontsize=14, bbox=dict(facecolor='none', edgecolor='black')) fig # + #ptf = './' #fig.savefig(ptf, dpi=100, format='png') # -
examples/notebooks/plot_psd_theory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <p align="center"> # <img src="https://github.com/GeostatsGuy/GeostatsPy/blob/master/TCG_color_logo.png?raw=true" width="220" height="240" /> # # </p> # # ## GeostatsPy: Cell-based Declustering with Basic Univariate Statistics and Distribution Representativity for Subsurface Data Analytics in Python # # #### <NAME>, Associate Professor, University of Texas at Austin # # ##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) | [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy) # ### Basic Univariate Summary Statistics and Data Distribution Representativity Plotting in Python with GeostatsPy # # Here's a simple workflow with some basic univariate statistics and distribution representativity. This should help you get started data declustering to address spatial sampling bias. # # #### Geostatistical Sampling Representativity # # In general, we should assume that all spatial data that we work with is biased. # # ##### Source of Spatial Sampling Bias # # Data is collected to answer questions: # * how far does the contaminant plume extend? โ€“ sample peripheries # * where is the fault? โ€“ drill based on seismic interpretation # * what is the highest mineral grade? โ€“ sample the best part # * who far does the reservoir extend? โ€“ offset drilling # and to maximize NPV directly: # * maximize production rates # # **Random Sampling**: when every item in the population has a equal chance of being chosen. Selection of every item is independent of every other selection. # Is random sampling sufficient for subsurface? Is it available? # * it is not usually available, would not be economic # * data is collected answer questions # * how large is the reservoir, what is the thickest part of the reservoir # * and wells are located to maximize future production # * dual purpose appraisal and injection / production wells! # # **Regular Sampling**: when samples are taken at regular intervals (equally spaced). # * less reliable than random sampling. # * Warning: may resonate with some unsuspected environmental variable. # # What do we have? # * we usually have biased, opportunity sampling # * we must account for bias (debiasing will be discussed later) # # So if we were designing sampling for representativity of the sample set and resulting sample statistics, by theory we have 2 options, random sampling and regular sampling. # # * What would happen if you proposed random sampling in the Gulf of Mexico at $150M per well? # # We should not change current sampling methods as they result in best economics, we should address sampling bias in the data. # # Never use raw spatial data without access sampling bias / correcting. # # ##### Mitigating Sampling Bias # # In this demonstration we will take a biased spatial sample data set and apply declustering using **GeostatsPy** functionality. # # #### Objective # # In the PGE 383: Stochastic Subsurface Modeling class I want to provide hands-on experience with building subsurface modeling workflows. Python provides an excellent vehicle to accomplish this. I have coded a package called GeostatsPy with GSLIB: Geostatistical Library (Deutsch and Journel, 1998) functionality that provides basic building blocks for building subsurface modeling workflows. # # The objective is to remove the hurdles of subsurface modeling workflow construction by providing building blocks and sufficient examples. This is not a coding class per se, but we need the ability to 'script' workflows working with numerical methods. # # #### Getting Started # # Here's the steps to get setup in Python with the GeostatsPy package: # # 1. Install Anaconda 3 on your machine (https://www.anaconda.com/download/). # 2. From Anaconda Navigator (within Anaconda3 group), go to the environment tab, click on base (root) green arrow and open a terminal. # 3. In the terminal type: pip install geostatspy. # 4. Open Jupyter and in the top block get started by copy and pasting the code block below from this Jupyter Notebook to start using the geostatspy functionality. # # You will need to copy the data file to your working directory. They are available here: # # * Tabular data - sample_data_biased.csv at https://git.io/fh0CW # # There are exampled below with these functions. You can go here to see a list of the available functions, https://git.io/fh4eX, other example workflows and source code. import geostatspy.GSLIB as GSLIB # GSLIB utilies, visualization and wrapper import geostatspy.geostats as geostats # GSLIB methods convert to Python # We will also need some standard packages. These should have been installed with Anaconda 3. import numpy as np # ndarrys for gridded data import pandas as pd # DataFrames for tabular data import os # set working directory, run executables import matplotlib.pyplot as plt # for plotting from scipy import stats # summary statistics # #### Set the working directory # # I always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time). os.chdir("c:/PGE383") # set the working directory # #### Loading Tabular Data # # Here's the command to load our comma delimited data file in to a Pandas' DataFrame object. df = pd.read_csv('sample_data_biased.csv') # load our data table (wrong name!) # No error now! It worked, we loaded our file into our DataFrame called 'df'. But how do you really know that it worked? Visualizing the DataFrame would be useful and we already leard about these methods in this demo (https://git.io/fNgRW). # # We can preview the DataFrame by printing a slice or by utilizing the 'head' DataFrame member function (with a nice and clean format, see below). With the slice we could look at any subset of the data table and with the head command, add parameter 'n=13' to see the first 13 rows of the dataset. #print(df.iloc[0:5,:]) # display first 4 samples in the table as a preview df.head(n=13) # we could also use this command for a table preview # #### Summary Statistics for Tabular Data # # The table includes X and Y coordinates (meters), Facies 1 and 2 (1 is sandstone and 0 interbedded sand and mudstone), Porosity (fraction), and permeability as Perm (mDarcy). # # There are a lot of efficient methods to calculate summary statistics from tabular data in DataFrames. The describe command provides count, mean, minimum, maximum, and quartiles all in a nice data table. We use transpose just to flip the table so that features are on the rows and the statistics are on the columns. df.describe() # #### Specify the Area of Interest # # It is natural to set the x and y coordinate and feature ranges manually. e.g. do you want your color bar to go from 0.05887 to 0.24230 exactly? Also, let's pick a color map for display. I heard that plasma is known to be friendly to the color blind as the color and intensity vary together (hope I got that right, it was an interesting Twitter conversation started by <NAME> from Agile if I recall correctly). We will assume a study area of 0 to 1,000m in x and y and omit any data outside this area. xmin = 0.0; xmax = 1000.0 # range of x values ymin = 0.0; ymax = 1000.0 # range of y values pormin = 0.05; pormax = 0.25; # range of porosity values cmap = plt.cm.inferno # color map # Visualizing Tabular Data with Location Mapsยถ # Let's try out locmap. This is a reimplementation of GSLIB's locmap program that uses matplotlib. I hope you find it simpler than matplotlib, if you want to get more advanced and build custom plots lock at the source. If you improve it, send me the new code. Any help is appreciated. To see the parameters, just type the command name: GSLIB.locmap # Now we can populate the plotting parameters and visualize the porosity data. GSLIB.locmap(df,'X','Y','Porosity',xmin,xmax,ymin,ymax,pormin,pormax,'Well Data - Porosity','X(m)','Y(m)','Porosity (fraction)',cmap,'locmap_Porosity') # Look carefully, and you'll notice the the spatial samples are more dense in the high porosity regions and lower in the low porosity regions. There is preferential sampling. We cannot use the naive statistics to represent this region. We have to correct for the clustering of the samples in the high porosity regions. # # Let's try cell declustering. We can interpret that we will want to minimize the declustering mean and that a cell size of between 100 - 200m is likely a good cell size, this is 'an ocular' estimate of the largest average spacing in the sparsely sampled regions. # # Let's check out the declus program reimplimented from GSLIB. geostats.declus # We can now populate the parameters. The parameters are: # # * **df** - DataFrame with the spatial dataset # * **xcol** - column with the x coordinate # * **ycol** - column with the y coordinate # * **vcol** - column with the feature value # * **iminmax** - if 1 use the cell size that minimizes the declustered mean, if 0 the cell size that maximizes the declustered mean # * **noff** - number of cell mesh offsets to average the declustered weights over # * **ncell** - number of cell sizes to consider (between the **cmin** and **cmax**) # * **cmin** - minimum cell size # * **cmax** - maximum cell size # # We will run a very wide range of cell sizes, from 10m to 2,000m ('cmin' and 'cmax') and take the cell size that minimizes the declustered mean ('iminmax' = 1 minimize, and = 0 maximize). Multiple offsets (number of these is 'noff') uses multiple grid origins and averages the results to remove sensitivity to grid position. The ncell is the number of cell sizes. # # The output from this program is: # # * **wts** - an array with the weigths for each data (they sum to the number of data, 1 indicates nominal weight) # * **cell_sizes** - an array with the considered cell sizes # * **dmeans** - an array with the declustered mean for each of the **cell_sizes** # # The **wts** are the declustering weights for the selected (minimizing or maximizing cell size) and the **cell_sizes** and **dmeans** are plotted to build the diagnostic declustered mean vs. cell size plot (see below). wts, cell_sizes, dmeans = geostats.declus(df,'X','Y','Porosity',iminmax = 1, noff= 10, ncell=100,cmin=1,cmax=2000) df['Wts'] = wts # add weights to the sample data DataFrame df.head() # preview to check the sample data DataFrame dmeans # Let's look at the location map of the weights. GSLIB.locmap(df,'X','Y','Wts',xmin,xmax,ymin,ymax,0.5,2.5,'Well Data Weights','X(m)','Y(m)','Weights',cmap,'locmap_Weights') # Does it look correct? See the weight varies with local sampling density? # # Now let's add the distribution of the weights and the naive and declustered porosity distributions. You should see the histogram bars adjusted by the weights. Also note the change in the mean due to the weights. There is a significant change. # + plt.subplot(221) GSLIB.locmap_st(df,'X','Y','Wts',xmin,xmax,ymin,ymax,0.0,2.0,'Declustering Weights','X (m)','Y (m)','Weights',cmap) plt.subplot(222) GSLIB.hist_st(df['Wts'],0.5,2.5,log=False,cumul=False,bins=20,weights=None,xlabel="Weights",title="Declustering Weights") plt.ylim(0.0,60) plt.subplot(223) GSLIB.hist_st(df['Porosity'],0.05,0.25,log=False,cumul=False,bins=20,weights=None,xlabel="Porosity",title="Naive Porosity") plt.ylim(0.0,60) plt.subplot(224) GSLIB.hist_st(df['Porosity'],0.05,0.25,log=False,cumul=False,bins=20,weights=df['Wts'],xlabel="Porosity",title="Declustered Porosity") plt.ylim(0.0,60) por_mean = np.average(df['Porosity'].values) por_dmean = np.average(df['Porosity'].values,weights=df['Wts'].values) print('Porosity naive mean is ' + str(round(por_mean,3))+'.') print('Porosity declustered mean is ' + str(round(por_dmean,3))+'.') cor = (por_mean-por_dmean)/por_mean print('Correction of ' + str(round(cor,4)) +'.') print('\nSummary statistics of the declsutering weights:') print(stats.describe(wts)) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=2.5, wspace=0.2, hspace=0.2) plt.show() # - # Now let's look at the plot of the declustered porosity mean vs. the declustering cell size over the 100 runs. At very small and very large cell size the declustered mean is the naive mean. plt.subplot(111) plt.scatter(cell_sizes,dmeans, s=30, alpha = 0.2, edgecolors = "black", facecolors = 'red') plt.xlabel('Cell Size (m)') plt.ylabel('Declustered Porosity Mean (fraction)') plt.title('Declustered Porosity Mean vs. Cell Size') plt.plot([0,2000],[por_mean,por_mean],color = 'black') plt.plot([200,200],[0.10,0.16],color = 'black',linestyle='dashed') plt.text(300., 0.136, r'Naive Porosity Mean') plt.text(500., 0.118, r'Declustered Porosity Mean') plt.text(230., 0.154, r'Minimizing') plt.text(230., 0.150, r'Cell Size') plt.ylim(0.10,0.16) plt.xlim(0,2000) plt.subplots_adjust(left=0.0, bottom=0.0, right=1.2, top=1.2, wspace=0.2, hspace=0.2) plt.show() # The cell size that minimizes the declustered mean is about 200m (estimated from the figure). This makes sense given our previous observation of the data spacing. # #### Comments # # This was a basic demonstration of declustering to correct for sampling bias. Much more could be done, I have other demonstrations on the basics of working with DataFrames, ndarrays and many other workflows availble at https://github.com/GeostatsGuy/PythonNumericalDemos and https://github.com/GeostatsGuy/GeostatsPy. # # I hope this was helpful, # # *Michael* # # <NAME>, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin # # #### More Resources Available at: [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) #
Workflows/GeostatsPy_declustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Boundary conditions tutorial # This tutorial aims to demonstrate how users can implement various boundary conditions in Devito, building on concepts introduced in previous tutorials. Over the course of this notebook we will go over the implementation of both free surface boundary conditions and perfectly-matched layers (PMLs) in the context of the first-order acoustic wave equation. This tutorial is based on a simplified version of the method outlined in Liu and Tao's 1997 paper (https://doi.org/10.1121/1.419657). # # We will set up our domain with PMLs along the left, right, and bottom edges, and free surface boundaries at the top as shown below. # # <img src="figures/boundary_conditions.png" style="width: 220px;"/> # # Note that whilst in practice we would want the PML tapers to overlap in the corners, this requires additional subdomains. As such, they are omitted for simplicity. # # As always, we will begin by specifying some parameters for our `Grid`: # + # %matplotlib inline shape = (101, 101) extent = (2000., 2000.) nbpml = 10 # Number of PMLs on each side # - # We will need to use subdomains to accomodate the modified equations in the PML regions. As `Grid` objects cannot have subdomains added retroactively, we must define our subdomains beforehand. # + from devito import SubDomain class MainDomain(SubDomain): # Main section with no damping name = 'main' def __init__(self, PMLS): super().__init__() self.PMLS = PMLS def define(self, dimensions): x, y = dimensions return {x: ('middle', self.PMLS, self.PMLS), y: ('middle', 0, self.PMLS)} class Left(SubDomain): # Left PML region name = 'left' def __init__(self, PMLS): super().__init__() self.PMLS = PMLS def define(self, dimensions): x, y = dimensions return {x: ('left', self.PMLS), y: y} class Right(SubDomain): # Right PML region name = 'right' def __init__(self, PMLS): super().__init__() self.PMLS = PMLS def define(self, dimensions): x, y = dimensions return {x: ('right', self.PMLS), y: y} class Base(SubDomain): # Base PML region name = 'base' def __init__(self, PMLS): super().__init__() self.PMLS = PMLS def define(self, dimensions): x, y = dimensions return {x: ('middle', self.PMLS, self.PMLS), y: ('right', self.PMLS)} main_domain = MainDomain(nbpml) left = Left(nbpml) right = Right(nbpml) base = Base(nbpml) # - # And create the grid, attaching our subdomains: # + from devito import Grid grid = Grid(shape=shape, extent=extent, subdomains=(main_domain, left, right, base)) x, y = grid.dimensions # - # We can then begin to specify our problem starting with some parameters. density = 1. # 1000kg/m^3 velocity = 4. # km/s gamma = 0.0002 # Absorption coefficient # We also need a `TimeFunction` object for each of our wavefields. As particle velocity is a vector, we will choose a `VectorTimeFunction` object to encapsulate it. # + from devito import TimeFunction, VectorTimeFunction, NODE p = TimeFunction(name='p', grid=grid, time_order=1, space_order=6, staggered=NODE) v = VectorTimeFunction(name='v', grid=grid, time_order=1, space_order=6) # - # A `VectorTimeFunction` is near identical in function to a standard `TimeFunction`, albeit with a field for each grid dimension. The fields associated with each component can be accessed as follows: print(v[0].data) # Print the data attribute associated with the x component of v # You may have also noticed the keyword `staggered` in the arguments when we created these functions. As one might expect, these are used for specifying where derivatives should be evaluated relative to the grid, as required for implementing formulations such as the first-order acoustic wave equation or P-SV elastic. Passing a function `staggered=NODE` specifies that its derivatives should be evaluated at the node. One can also pass `staggered=x` or `staggered=y` to stagger the grid by half a spacing in those respective directions. Additionally, a tuple of dimensions can be passed to stagger in multiple directions (e.g. `staggered=(x, y)`). `VectorTimeFunction` objects have their associated grids staggered by default. # # We will also need to define a field for integrating pressure over time: p_i = TimeFunction(name='p_i', grid=grid, time_order=1, space_order=1, staggered=NODE) # Next we prepare the source term: # + import numpy as np from examples.seismic import TimeAxis, RickerSource t0 = 0. # Simulation starts at t=0 tn = 400. # Simulation length in ms dt = 1e2*(1. / np.sqrt(2.)) / 60. # Time step time_range = TimeAxis(start=t0, stop=tn, step=dt) f0 = 0.02 src = RickerSource(name='src', grid=grid, f0=f0, npoint=1, time_range=time_range) # Position source centrally in all dimensions src.coordinates.data[0, :] = 1000. # + tags=["nbval-skip"] src.show() # - # For our PMLs, we will need some damping parameter. In this case, we will use a quadratic taper over the absorbing regions on the left and right sides of the domain. # Damping parameterisation d_l = (1-0.1*x)**2 # Left side d_r = (1-0.1*(grid.shape[0]-1-x))**2 # Right side d_b = (1-0.1*(grid.shape[1]-1-y))**2 # Base edge # Now for our main domain equations: # + from devito import Eq, grad, div eq_v = Eq(v.forward, v - dt*grad(p)/density, subdomain=grid.subdomains['main']) eq_p = Eq(p.forward, p - dt*velocity**2*density*div(v.forward), subdomain=grid.subdomains['main']) # - # We will also need to set up `p_i` to calculate the integral of `p` over time for out PMLs: eq_p_i = Eq(p_i.forward, p_i + dt*(p.forward+p)/2) # And add the equations for our damped region: # + # Left side eq_v_damp_left = Eq(v.forward, (1-d_l)*v - dt*grad(p)/density, subdomain=grid.subdomains['left']) eq_p_damp_left = Eq(p.forward, (1-gamma*velocity**2*dt-d_l*dt)*p - d_l*gamma*velocity**2*p_i - dt*velocity**2*density*div(v.forward), subdomain=grid.subdomains['left']) # Right side eq_v_damp_right = Eq(v.forward, (1-d_r)*v - dt*grad(p)/density, subdomain=grid.subdomains['right']) eq_p_damp_right = Eq(p.forward, (1-gamma*velocity**2*dt-d_r*dt)*p - d_r*gamma*velocity**2*p_i - dt*velocity**2*density*div(v.forward), subdomain=grid.subdomains['right']) # Base edge eq_v_damp_base = Eq(v.forward, (1-d_b)*v - dt*grad(p)/density, subdomain=grid.subdomains['base']) eq_p_damp_base = Eq(p.forward, (1-gamma*velocity**2*dt-d_b*dt)*p - d_b*gamma*velocity**2*p_i - dt*velocity**2*density*div(v.forward), subdomain=grid.subdomains['base']) # - # Add our free surface boundary conditions: # + def freesurface_top(p_func, v_func): time = p_func.grid.stepping_dim pos = int(max(p_func.space_order, v_func.space_order)/2) bc_p = [Eq(p[time+1, x, pos], 0.)] bc_v = [Eq(v[1][time+1, x, i], -v[1][time+1, x, 2*pos-i]) for i in range(pos)] return bc_p + bc_v bc = freesurface_top(p, v) # - # And our source terms: src_term = src.inject(field=p.forward, expr=src) # Construct our operator and run: # + tags=["nbval-ignore-output"] from devito import Operator op = Operator([eq_v, eq_v_damp_left, eq_v_damp_base, eq_v_damp_right, eq_p, eq_p_damp_left, eq_p_damp_base, eq_p_damp_right, eq_p_i] + src_term + bc) op(time=time_range.num-1) # - # It is important to remember that the ordering of equations when an `Operator` is created dictates the order of loops within the generated c code. As such, the `v` equations all need to be placed before the `p` ones otherwise the operator will attempt to use the updated `v` fields before they have been updated. # # Now let's plot the wavefield. # + tags=["nbval-skip"] import matplotlib.pyplot as plt p.data[:, :, :3] = 0 # Mute out mirrored wavefield above free surface scale = np.max(p.data[1]) # fig = plt.figure() plt.imshow(p.data[1].T/scale, origin="upper", vmin=-1, vmax=1, extent=[0, grid.extent[0], grid.extent[1], 0], cmap='seismic') plt.colorbar() plt.xlabel("x (m)") plt.ylabel("y (m)") plt.show() # - # As we can see, the wave is effectively damped at the edge of the domain by the 10 layers of PMLs, with diminished reflections back into the domain.
examples/userapi/04_boundary_conditions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pycaret.datasets import get_data data = get_data('jewellery') from pycaret.clustering import * # ## Initialize setup s = setup(data, normalize=True, session_id=786) # ## Create Model kmeans = create_model(model='kmeans') kmodes = create_model(model='kmodes') skmeans = create_model(model='skmeans') hclust = create_model(model='hclust') birch = create_model(model='birch') dbscan = create_model(model='dbscan') # ## Assign Model assign_model(kmeans) assign_model(kmodes) assign_model(skmeans) assign_model(hclust) assign_model(birch) assign_model(dbscan) # ## Plot Model plot_model(kmeans) plot_model(kmeans, plot = 'tsne') plot_model(kmeans, plot = 'elbow') plot_model(kmeans, plot = 'silhouette') plot_model(kmeans, plot = 'distance') plot_model(kmeans, plot = 'distribution') plot_model(kmeans, plot = 'distribution', feature='Age') # ## Tune Model (Classifier) data = get_data('juice') s2 = setup(data, normalize=True, session_id = 786) # %%time tuned_classifier = tune_model(model='kmodes', supervised_target = 'Purchase', estimator = 'xgboost') # ## Tune Model (Regression) data = get_data('boston') s3 = setup(data, normalize=True) # %%time tuned_regressor = tune_model(model='skmeans', supervised_target = 'medv', estimator = 'lightgbm')
Test Cases/PyCaret Clustering - Test Case.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np import statsmodels.api as sm from statsmodels.formula.api import ols from scipy import stats # read the csv path. you might need to change this path pathName = r"C:\MyFiles\Dropbox\Teaching\Urban_Data_Analsysis\Spring 2021\Data\NYC_Census_Tracts.csv" # read the csv file as a dataframe df = pd.read_csv(pathName) # get a list of columns df.columns # get a sample of rows df.sample(3) df.info() # get the unique values of a categorical variable df['HeatVulIndex'].unique() # + # calculate a new column using other columns df['pop_density_2000'] = df['Population_2000']/df['AreaAcre'] df['pop_density_0610'] = df['Population_610']/df['AreaAcre'] df['pop_density_1216'] = df['Population_1216']/df['AreaAcre'] df['Pct_NH_White_2000'] = df['NH_White_2000'] * 100 / df['Population_2000'] df['Pct_NH_White_0610'] = df['NH_White_610'] * 100 / df['Population_610'] df['Pct_NH_White_1216'] = df['NH_White_1216'] * 100 / df['Population_1216'] df['Housing_Density_2000'] = df['HousingUnits_2000'] / df['AreaAcre'] df['Housing_Density_0610'] = df['HousingUnits_610'] / df['AreaAcre'] df['Housing_Density_1216'] = df['HousingUnits_1216'] / df['AreaAcre'] df['pct_black2000']=df['NH_Black_2000']/df['Population_2000'] df['pct_black0610']=df['NH_Black_610']/df['Population_610'] df['pct_black1216']=df['NH_Black_1216']/df['Population_1216'] df['rentBurden_2000'] = (df['MedianRent_2000']*12)/df['MedHHIncome_2000'] df['rentBurden_0610'] = (df['MedianRent_610']*12)/df['MedHHIncome_610'] # df['rentBurden_1216'] = (df['MedianRent_1216']*12)/df['MedHHIncome_1216'] # this code does not work because there are some income values as 0 and any number / 0 is inf. # we can use this code instead # 'col_to_write' can be a new column or an existing one that you want to rewrite # Condition limits the rows that need to be written. for example: df['a_col'] > number # value can be a number or a fromula that uses the columns of the df # syntax: df.loc[(condition),'col_to_write'] = value df.loc[(df['MedHHIncome_1216']>0),'rentBurden_1216'] = (df['MedianRent_1216']*12)/df['MedHHIncome_1216'] # - # another example for running a formula on a subset df.loc[(df['Population_1216']>0),'pct_black_1216'] = (df['NH_Black_1216']*100)/df['Population_1216'] # + # df.drop('rentBurden_1216',axis=1,inplace=True) # - df['rentBurden_2000'].std() df['rentBurden_2000'].quantile(0.75) df['rentBurden_2000'].quantile(0.25) df.sample(5) sns.histplot(data=df,x='Housing_Density_2000',bins=50,color='Yellow') sns.histplot(data=df,x='Housing_Density_0610',bins=50,color='Blue',alpha=0.3) sns.histplot(data=df,x='Housing_Density_1216',bins=50,color='Red',alpha=0.3) sns.histplot(data=df,x='rentBurden_1216',bins=70,color='Red') sns.histplot(data=df,x='rentBurden_0610',bins=70,color='Green',alpha= 0.4) # simple scatter plot sns.scatterplot(data=df, x ='rentBurden_1216',y ='pct_black_1216',alpha=0.2, hue='Borough_Name') sns.displot(df, x ='rentBurden_1216',y ='pct_black_1216',hue='Borough_Name') sns.jointplot(data=df, x ='rentBurden_1216',y ='pct_black_1216',alpha=0.3,hue = 'Borough_Name') sns.boxplot(x="HeatVulIndex", y="rentBurden_1216", data=df , palette="Set1") newDf=df[df['HeatVulIndex']>=0] sns.boxplot(x="Borough_Name", y="rentBurden_1216", data=newDf , palette="Set1",hue='HeatVulIndex') sns.regplot(x="rentBurden_1216", y="pct_black_1216", data=df,scatter_kws={'alpha':0.1}); sns.lmplot(x="rentBurden_1216", y="pct_black_1216", hue = 'Borough_Name', data=df,scatter_kws={'alpha':0.1}); # + jupyter={"outputs_hidden": true} # stats.pearsonr? # - # ## Correlation stats.pearsonr(df['pct_black2000'],df['rentBurden_2000']) # + # get a subset of columns dfRun = df[['pct_black2000','rentBurden_2000']] # replace inf and -inf with nan dfRun.replace([np.inf, -np.inf], np.nan) # drop nan values dfRun.dropna(inplace=True) # stats.pearsonr(x, y) stats.pearsonr(dfRun['pct_black2000'],dfRun['rentBurden_2000']) # the first output is the correlation value and the second outcome is the p-value # - # ### Get a subset of a df # Condition limits the rows that need to be written. for example: df['a_col'] > number # syntax: df.loc[(condition)] dfSub = df.loc[df['Borough_Name']=='Bronx']
Cheatsheet_for_pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Ethan-Jeong/test_deeplearning/blob/master/Multi_perceptron.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="HbBI8VpYxSeA" import tensorflow as tf # + colab={"base_uri": "https://localhost:8080/"} id="b33GNwfHxqOV" outputId="1cf5e39d-8532-47d4-d6bb-09b2346a46b1" x_data = [[0,0], [1,0], [0,1], [1,1]] y_data = [[0], [1], [1], [1]] type(x_data), type(y_data), # + colab={"base_uri": "https://localhost:8080/"} id="TXcemb6Qx0Kx" outputId="da61a6b5-d65a-4743-e16c-1f9357da3394" import numpy as np x_train = np.array(x_data) y_train = np.array(y_data) x_train.shape, y_train.shape # + [markdown] id="cuK1i7Hd0tTg" # ## create model # + colab={"base_uri": "https://localhost:8080/"} id="KStfxBPXyIiH" outputId="4f1e0ccf-5235-44dd-d189-39d55ff6205b" model = tf.keras.models.Sequential() # solve XOR model model.add(tf.keras.Input(shape=(2,))) # input layer model.add(tf.keras.layers.Dense(2, activation='sigmoid')) # ๊ธฐ๋Šฅ layer # model.add(tf.keras.layers.Dense(128, activation='sigmoid')) # ๊ธฐ๋Šฅ layer model.add(tf.keras.layers.Dense(1, activation='sigmoid')) # output layer model.compile(optimizer='sgd', loss='mse', metrics=['acc']) # + id="_mvqQo6h8vJe" # tf.keras.utils.plot_model(model, show_shapes=True) # + id="84pH1aqT8Uv9" colab={"base_uri": "https://localhost:8080/"} outputId="6db5de9e-e9b7-405b-f457-3613f3a1b8ed" model.fit(x_train, y_train, epochs=100) # + id="sMa5mMAG9bKj" # model.predict([[0,1]]) # dense : 2, epochs : 100, loss : 0.20 --> 0.53 # + id="xqacjq75_7I8" # model.predict([[0,1]]) # dense : 128, epochs : 100, loss: 0.2101 --> 0.72344226 # + colab={"base_uri": "https://localhost:8080/"} id="175qxRycA9rA" outputId="b3c4138b-f199-45aa-b1b6-85f562fa6b9d" model.predict([[0,1]]) # dense : 128*2, loss: 0.1864 --> 0.73873436 # + colab={"base_uri": "https://localhost:8080/"} id="mkPbwGKZBE4_" outputId="4128715a-1715-4514-d64e-43c4b1190ed1" model.evaluate(x_train, y_train) # + id="gnb5n5w_CWuL" colab={"base_uri": "https://localhost:8080/"} outputId="21658e44-20b7-4af0-8b75-49b81fd0f21d" model.weights # + colab={"base_uri": "https://localhost:8080/"} id="L-_MUDwD1usv" outputId="b89f4ba3-1659-450e-e17a-b31ae52b7feb" model.summary() # + id="J8NA-UuA117F"
Multi_perceptron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ENGR 1330 Exam 1 Sec D01/D04 Spring 2021 # --- # Instructions: # - Work directly in this notebook, upon completion render the notebook as a PDF file. # - Upload your solution PDF **AND** the .csv and .ipynb (3 files) to the Blackboard repository for grading! # - Multiple attempts are allowed in the 24-hour time window # - Internet resources are allowed, remember to cite what you copy from elsewhere. # - Consulting with each other is not permitted. # - Sharing work with each other is not permitted. # - Working together is not permitted. # # --- # # ## Full name # ## R#: # ## HEX: # ## ENGR 1330 Exam 1 Sec 003/004 # ## Date: # <hr> # ## Question 1 (1 pts): # Run the cell below, and leave the results in your notebook. # If you get an ERROR message, leave it and continue. #### RUN! this Cell #### import sys # ! hostname # ! whoami print(sys.executable) # OK if generates an exception message on Windows machines # tested ok MacOS, arm linux, x86-64 linux, Windows 10VM, Windows Server # --- # ## Question 2 (5 pts): # # The script below is intended to print out all the leap years from 1952 to 1996 (inclusive). # However, the programmer keeps getting an error and has become frustrated with it! Please have a look at the code, the error(s), and the output and fix it so that it does what the user wants. Then, explain what was wrong with the script and how you have managed to fix it. # + jupyter={"outputs_hidden": false} for years in range(1952,1996,4.0): print(years) # - # ### Deliverables # - Error-free script # - Correct Output (years 1952,1956 .... , 1996) # - Explain the repairs (either direct comment in the code or narrative in markdown cell) # --- # # ## Question 3 (10 pts): # # The script below is intended to compute the cosine of values from 0 to 10 (inclusive), store them in a separate list called ylist, and ultimately print out a table. However, the programmer keeps getting an error and has become frustrated with it! Please have a look at the code, the error(s), and the output and fix it so that it does what the user wants. Then, explain what was wrong with the script and how you have managed to fix it. # + jupyter={"outputs_hidden": false} xlist = () ylist = () print(" Cosines ") print(" x ","|"," cos(x) ") print("--------|--------") for x in range(0,10): xlist.append(x*1.0) ylist.append(math.cos(x*1.0)) print("%.3f" % xlist[x], " |", " %.2f " % ylist[x]) # - # ### Deliverables # - Error-free script # - Correct Output (a table of x and cos(x)) for x values of (0.000,1.000,...,10.000) # - Explain the repairs (either direct comment in the code or narrative in markdown cell) # **************************************************************************************************************** # # Question 4 (10 pts): # # Create a function, named "myplotfunc" that takes the following positional arguments as inputs: # # - A list of numeric for x # - A list of numeric for y # - A string for the plot title # - A string for the x-axis label # - A string for the y-axis label <br> # # and produces (plots) a curve with "red" color, and a '\*' shaped marker, and the 'dashdot' linestyle. # # Use your "myplotfunc" function to create a plot of: # # $$ y = -5x^5 +4sin(x)^4 -3x^3 +2cos(x)^2 -x $$ # # for x raging from -10 to 10 (inclusive) in steps of 0.5. # + jupyter={"outputs_hidden": false} # build your script here # - # ### Deliverables # - Error-free script # - Correct Output (a plot of x and y) for x values of (-10.0,-9.5,...,10.0) # - Correct color for curve # - Correct line style # - Correct marker type # - Axes labels meaningful # - Plot title meaningful # --- # # ## Question 5 (20 pts): # Follow the steps below: # 1. STEP 0: install necessary libraries (numpy and pandas) # 2. STEP 1: There are 8 digits in your R#. Define a 2x4 array with these 8 digits, name it "Rarray", and print it # step 0 and step 1 go here! # 3. STEP 2: Find the maximum value of the "Rarray" and its position # step 2 goes here! # 4. STEP 3: Sort the "Rarray" along the rows, store it in a new array named "Rarraysort", and print the new array out # step 3 goes here! # 5. STEP 4: Define and print a 4x4 array that has the "Rarray" as its two first rows, and "Rarraysort" as its next rows. Name this new array "DoubleRarray" # + # step 4 goes here! # - # 6. STEP 5: Slice and print a 4x3 array from the "DoubleRarray" that contains the last three columns of it. Name this new array "SliceRarray". # step 5 goes here! # 7. STEP 6: Define the "SliceRarray" as a panda dataframe: # - name it "Rdataframe", # - name the rows as "Row A","Row B","Row C", and "Row D" # - name the columns as "Column 1", "Column 2", and "Column 3" # + # step 6 goes here! # - # 8. STEP 7: Print the first few rows of the "Rdataframe". # + # step 7 goes here! # - # 9. STEP 8: Create a new dataframe object ("R2dataframe") by adding a column to the "Rdataframe", name it "Column X" and fill it with "None" values. Then, use the appropriate descriptor function and print the data model (data column count, names, data types) of the "R2dataframe" # # + # step 8 goes here! # - # 10. STEP 9: Replace the **'None'** in the "R2dataframe" with 0. Then, print the summary statistics of each numeric column in the data frame. # # + # step 9 goes here! # - # 11. STEP 10: Define a function based on the equation below: # # $$ y = x^2 - 5x +7 $$ # # apply the function to the entire "R2dataframe", store the results in a new dataframe ("R3dataframe"), and print the results and the summary statistics again. # + # step 10 goes here! # - # 12. STEP 11: Print the number of occurrences of each unique value in "Column 3" # # + # step 11 goes here! # - # 13. STEP 12: Sort the data frame with respect to "Column 1" with a descending order and print it # # # + # step 12 goes here! # - # 14. STEP 13: Write the final format of the "R3dataframe" on a CSV file, named "Rfile.csv" # # + # step 13 goes here! # - # 15. STEP14: Read the "Rfile.csv" and print its content.<br> # ** __Make sure to attach the "Rfile.csv" file to your midterm exam submission.__ # + # step 14 goes here! # - # --- # ## Question 6 (10 pts): # # Create a class to compute the average grade (out of 10) of the students based on their grades in Quiz1, Quiz2, the Mid-term, Quiz3, and the Final exam.__ # # | Student Name | Quiz 1 | Quiz 2 | Mid-term | Quiz 3 | Final Exam | # | ------------- | -----------| -----------| -------------| -----------| -------------| # | Harry | 8 | 9 | 8 | 10 | 9 | # | Ron | 7 | 8 | 8 | 7 | 9 | # | Hermione | 10 | 10 | 9 | 10 | 10 | # | Draco | 8 | 7 | 9 | 8 | 9 | # | Luna | 9 | 8 | 7 | 6 | 5 | # # 1. Use docstrings to describe the purpose of the class. # 2. Create an object for each student and display the output as shown below. # # "Student Name": **Average Grade** # # 3. Create and print out a dictionary with the student names as keys and their average grades as data. # # + # script goes here # - # ### Deliverables # - Error-free script # - Print each student and average grade # - Print entire dictionary of students and averages # # --- # ## Question 7 (10 pts): # Fun with functions: # # When it is 8:00 in Lubbock, # - It is 9:00 in New York # - It is 14:00 in London # - It is 15:00 in Cairo # - It is 16:00 in Istanbul # - It is 19:00 in Hyderabad # - It is 22:00 in Tokyo <br> # # Write a function that reports the time in New York, London, Cairo, Istanbul, Hyderabad, and Tokyo based on the time in Lubbock. Use a 24-hour time format. Include error trapping that:<br> # # 1. Issues a message like "Please Enter A Number from 00 to 23" if the first input is numeric but outside the range of [0,23].<br> # 2. Takes any numeric input for "Lubbock time" selection , and forces it into an integer.<br> # 3. Issues an appropriate message if the user's selection is non-numeric.<br> # # Test your function for these times: # - 8:00 # - 15:00 # - 0:00 # + # script goes here # - # ### Deliverables # - Error-free script # - Runs that demonstrate: # - Initial prompt for input # - Detect non-numeric entry issues message # - Detect numeric entry, if acceptable perform time conversions # - Runs (3) for the requested times # # --- # ### Question 8 (20 pts): # # Write a pseudo-code or an algorithm for the problem below. Add comments and clearly explain what you want to happen in each step and why. **Describe the algorithm, DO NOT write a python script for this problem -- a flowchart, psuedo-code, or detailed algorithm is sufficient** # # *The "Treasure Hunt Problem" is from the HackerRank.com avaiable at https://www.hackerrank.com/contests/startatastartup/challenges/treasure-hunt* # # #### Tresure Hunt Problem # +-------------------------+ # ยฆ 34 ยฆ 21 ยฆ 32 ยฆ 41 ยฆ 25 ยฆ # +----+----+----+----+-----ยฆ # ยฆ 14 ยฆ 42 ยฆ 43 ยฆ 14 ยฆ 31 ยฆ # +----+----+----+----+-----ยฆ # ยฆ 54 ยฆ 45 ยฆ 52 ยฆ 42 ยฆ 23 ยฆ # +----+----+----+----+-----ยฆ # ยฆ 33 ยฆ 15 ยฆ 51 ยฆ 31 ยฆ 35 ยฆ # +----+----+----+----+-----ยฆ # ยฆ 21 ยฆ 52 ยฆ 33 ยฆ 13 ยฆ 23 ยฆ # +-------------------------+ # # Do you like treasure hunts? In this problem you are to write a program to explore the above array for a treasure. The values in the array are clues. Each cell contains an integer between 11 and 55; for each value the ten's digit represents the row number and the unit's digit represents the column number of the cell containing the next clue. Starting in the upper left corner (at 1,1), use the clues to guide your search of the array. (The first three clues are 11, 34, 42). The treasure is a cell whose value is the same as its coordinates. Your program must first read in the treasure map data into a 5 by 5 array. Your program should output the cells it visits during its search, and a message indicating where you found the treasure. # + # change this cell to markdown put your algorithm here # insert figures in markdown as # ![](figure_file_name.png) # - # --- # # ## Bonus Question for extra credit! # # Create a VOLUME Function to compute the volume of Cylinders, Spheres, Cones, and Rectangular Boxes. This function should: # - First, ask the user about __the shape of the object__ of interest using something like:<br> # - *"Please choose the shape of the object. Enter 1 for "Cylinder", 2 for "Sphere", 3 for "Cone", or 4 for "Rectangular Box""*<br> # - Second, based on user's choice in the previous step, __ask for the right inputs__. # - Third, print out an statement with __the input values and the calculated volumes__. # # #### Include error trapping that: # # 1. Issues a message that *"The object should be either a Cylinder, a Sphere, a Cone, or a Rectangular Box. Please Enter A Number from 1,2,3, and 4!"* if the first input is non-numeric. # 2. Takes any numeric input for the initial selection , and force it into an integer. # 4. Issues an appropriate message if the user's selection is numeric but outside the range of [1,4] # 3. Takes any numeric input for the shape characteristics , and force it into a float. # 4. Issues an appropriate message if the object characteristics are as non-numerics. # # #### Test the script for: # 1. Sphere, r=10 # 2. r=10 , Sphere # 3. Rectangular Box, w=5, h=10, l=0.5 # # # - <font color=orange>__Volume of a Cylinder = ฯ€rยฒh__</font> # - <font color=orange>__Volume of a Sphere = 4(ฯ€r3)/3__</font> # - <font color=orange>__Volume of a Cone = (ฯ€rยฒh)/3__</font> # - <font color=orange>__Volume of a Rectangular Box = whl__</font> # + # script goes here # - # ### Deliverables # - Error-free script # - Runs that demonstrate: # - Initial prompt for input # - Message for incorrect inputs # - Secondary prompt for input based on shape # - Message for incorrect inputs # - Compute and report values # - Runs (3) for the requested shapes # # ---
5-ExamProblems/Exam1/Exam1/spring2021/.ipynb_checkpoints/Exam1-Spring2021-Deploy-Copy1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from itertools import combinations import numpy as np import networkx as nx import matplotlib.pyplot as plt # %matplotlib inline # - # ## algorithm def k_cliques(graph): # 2-cliques cliques = [{i, j} for i, j in graph.edges() if i != j] k = 2 while cliques: # result yield k, cliques # merge k-cliques into (k+1)-cliques cliques_1 = set() for u, v in combinations(cliques, 2): w = u ^ v if len(w) == 2 and graph.has_edge(*w): cliques_1.add(tuple(u | w)) # remove duplicates cliques = list(map(set, cliques_1)) k += 1 def print_cliques(graph): for k, cliques in k_cliques(graph): print('%d-cliques: #%d, %s ...' % (k, len(cliques), cliques[:3])) # ## graph #1 nodes = 6 graph = nx.Graph() graph.add_nodes_from(range(nodes)) graph.add_edges_from(combinations(range(nodes), 2)) plt.figure(figsize=(8, 8)) nx.draw_networkx(graph) print_cliques(graph) # ## graph #2 nodes, edges = 10, 50 graph = nx.Graph() graph.add_nodes_from(range(nodes)) graph.add_edges_from(np.random.randint(0, nodes, (edges, 2))) plt.figure(figsize=(12, 12)) nx.draw_networkx(graph) print_cliques(graph) # ## graph #3 nodes, edges = 100, 1000 graph = nx.Graph() graph.add_nodes_from(range(nodes)) graph.add_edges_from(np.random.randint(0, nodes, (edges, 2))) print_cliques(graph)
100days/day 64 - k-clique.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # These exercises follow on from the [array indexing page](../03/array_indexing). # # Run this cell to start: import numpy as np # So far we have seen arrays that contain numbers. Here is an # array that contains strings: some_words = np.array(['to', 'be', 'or', 'not', 'to', 'be']) some_words # ### Indexing with integers # # Use indexing with integers to display the first word in # `some_words`. # + # Your code here # - # Use indexing with integers to display the word "not". # + # Your code here # - # Here is a Boolean array that has `True` where the word in the matching position of `some_words` is "be": bees = some_words == 'be' bees # Use Boolean indexing to show an array with the two instances of "be" from `some_words`. # # + # Your code here # - # Use Boolean indexing to show an array with the two instances of "to" from `some_words`. # + # Your code here # - # Use Boolean indexing to show an array with the single instance # of "not" from `some_words`. # + # Your code here
notebooks/exercises/array_indexing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Question 1 # ### 1.1 # False # # MV optimization does not optimize based on Sharpe ratio. Rather it minimized the variance with a given return. # # ### 1.2 # False # # Leveraged ETFs can usually match daily returns well but for longer timeframes, leveraged ETFs usually fails to exactly replicate the returns in the underlying * leverage. # # ### 1.3 # We should include an intercept. If we don't include an intercept, the betas will try to chase the trend (mean). Since the mean is poorly estimated, the betas will be wonky. When we include an intercept, we allow the betas to only try to capture the variation. # # ### 1.4 # HDG tracks HFRI decently in sample. However since it is based on lagging factors, out of sample it doesn't do as well. # # ### 1.5 # It could be that the 6 Merrill Lynch factors are not good at explaining the particular returns of a single hedge fund. We can look at the R^2 to see how well our factor explains the returns. # # Additionally, the Merrill Lynch factors were used to track returns of thousands of hedge funds. Since we know the hedge fund returns have high kurtosis, there are lots of outliers. It is possible that the factors don't explain the return of one particular hedge fund well. # ## Question 2 # + import numpy as np import pandas as pd import os #importing data data = pd.read_excel('proshares_analysis_data.xlsx', sheet_name='merrill_factors').set_index('date') data_rf = data.subtract(data["USGG3M Index"], axis=0) data_rf = data_rf.drop(columns=["USGG3M Index"]) data_rf.head(5) # + #define tangent function def tangent_portfolio(df, annualizing_factor, diagonalize): #define sigma matrix df_sigma = df.cov() #define n to be used for the ones vector n = df_sigma.shape[0] #inverse sigma #if diagonalizing is required, set diagonalize to True df_sigma_adj = df_sigma.copy() if diagonalize: df_sigma_adj.loc[:,:] = np.diag(np.diag(df_sigma_adj)) df_sigma_inv = np.linalg.inv(df_sigma_adj) #define mu df_mu = (df*annualizing_factor).mean() #calculate tangent portfolio, @ is used for matrix multiplication omega_tan = df_sigma_inv @ df_mu / (np.ones(n) @ df_sigma_inv @ df_mu) #map omega_tan back to pandas df_omega_tan = pd.Series(omega_tan, index=df_mu.index) #return omega_tan, sigma_inv, mu return df_omega_tan, df_sigma_inv, df_sigma_adj, df_mu # + # function to calculate target MV portfolio def target_mv_portfolio(df, target_return, annualizing_factor, diagonalize): #calculate omega, sigma^-1, sigma, and mu from the tangent_portfolio function omega_tan, sigma_inv, Sigma , mu_tilde = tangent_portfolio(df, annualizing_factor, diagonalize=diagonalize) #inverse sigma #if diagonalizing is required, set diagonalize to True Sigma_adj = Sigma.copy() if diagonalize: Sigma_adj.loc[:,:] = np.diag(np.diag(Sigma_adj)) Sigma_inv = np.linalg.inv(Sigma_adj) #define n to be used for the ones vector n = len(omega_tan) delta = (np.ones(n) @ sigma_inv @ mu_tilde)/(mu_tilde.transpose() @ sigma_inv @ mu_tilde) * target_return #final weights are allocated weights to risky asset * weights in the risky assets omega_star = delta * omega_tan return delta, omega_star, Sigma_adj, mu_tilde # + # function to evaluate performance measure of a portfolio def performance_measure(omega, mu_tilde, Sigma, annualizing_factor): #mean of omega_p is omega_p' * mean excess return annualized omega_p_mu = mu_tilde @ omega #vol of omega_p is omega_p' * sigma * omega_p omega_p_sigma = np.sqrt(omega.transpose() @ Sigma @ omega) * np.sqrt(annualizing_factor) #sharpe ratio of omega_tan sharpe_p = omega_p_mu / omega_p_sigma # returns portfolio mean, volatility and sharpe ratio #return omega_p_mu, omega_p_sigma, sharpe_p return pd.DataFrame(data = [omega_p_mu, omega_p_sigma, sharpe_p], index = ['Mean', 'Volatility', 'Sharpe'], columns = ['Portfolio Stats']) # + #2.1 #annualized using 12 here omega_tan, sigma_inv, sigma, mu_tilde = tangent_portfolio(data_rf, 12, False) #this is the tangent portfolio weights omega_tan # + #2.2 delta, omega_star, sigma, mu_tilde = target_mv_portfolio(data_rf, 0.02*12, 12, False) print(delta) # this is how much is allocated to the portfolio, 1-delta is allocated to risk free print(omega_star) # this is the final weights of the portfolio # + #2.3 performance_measure(omega_star, mu_tilde, sigma, 12) # + #2.4 #using only data through 2018 to get omega, run performance on 2019-2021 data_rf_2018 = data.loc['2018':, :] data_rf_19_21 = data.loc['2019':'2021', :] #getting omega star from data up to 18 _, omega_star_2018, _, _ = target_mv_portfolio(data_rf_2018, 0.02*12, 12, False) #need to get mu and siga from 19-21 data _, _, sigma_19_21, mu_tilde_19_21 = tangent_portfolio(data_rf_19_21, 12, False) performance_measure(omega_star_2018, mu_tilde_19_21, sigma_19_21, 12) # - # #### 2.5 # I think the out of sample fragility problem would be better if we are optimizing on commodities because commodities are less correlated to each other than equities. Less correlation allows the inversion of the sigma to be better. # ## Question 3 # + # regression function without int import warnings import statsmodels.api as sm def regression_no_int(df_y, df_x): with warnings.catch_warnings(): warnings.simplefilter("ignore") y = df_y #X = sm.add_constant(df_x) X = df_x #use if no intercept results = sm.OLS(y,X, missing='drop').fit() #alpha = results.params['const'] #comment out if no intercept #beta = results.params.drop(index='const') beta = results.params #use if no intercept r_squared = results.rsquared residuals = results.resid res_volatility = residuals.std() summary = results.summary() return _, beta, r_squared, res_volatility, summary # - #regress EEM on SPY, do not include intercept _, hedge_beta, _, _, _ = regression_no_int(data_rf['EEM US Equity'],data_rf['SPY US Equity']) # + # 3.1 print(hedge_beta) #since beta on the regression is 0.92566, The optimal hedge ration is 0.92566. #So for every dollar invested in EEM, you would invest -0.92556 in SPY # + # 3.2 #calculate the returns of the hedged position data_EEM_hedge = data_rf[['EEM US Equity']].subtract(data["SPY US Equity"]*0.92566, axis=0) data_EEM_hedge.head() # + #define a function to calculate sumarry stats def summary_stats(data, annualizing_factor): mu = data.mean() * annualizing_factor # sigma should be annualized by multiplying sqrt from the monthly std -> sigma = data.std()*np.sqrt(12) # which is equivalent to doing this: sigma = data.std() * np.sqrt(annualizing_factor) sharpe = mu/sigma table = pd.DataFrame({'Mean':mu, 'Volatility':sigma, 'Sharpe':sharpe}).sort_values(by='Sharpe') return round(table, 4) # - summary_stats(data_EEM_hedge, 12) #the mean, vol, sharpe of the hedged position is shown below # 3.3 # # mean of the non-hedged EEM is shown below # the means are clearly different between the hedged and non-hedged positions. # this is because after the hedge, we are just left with alpha and epsilon so we should expect the means to be different. summary_stats(data_rf[['EEM US Equity']], 12) # 3.4 # # If we also included IWM as a regressor, the regression might be difficult to use because IWM and SPY are very correlated. Having the additional factor does not add much to attribution if the factors are very correlated. # # ## Question 4 # + # 4.1 import scipy #take the logs of both returns data_spy_log = np.log(data[['SPY US Equity']]+1) data_efa_log = np.log(data[['EFA US Equity']]+1) #find the log mean returns spy_log_mu = data_spy_log.mean()*12 efa_log_mu = data_efa_log.mean()*12 #find the sigma spy_log_vol = data_spy_log.std() * np.sqrt(12) efa_log_vol = data_efa_log.std() * np.sqrt(12) #for some reason I can't use the variable names to calculate the z score, so I hard coded the equation #x =-np.sqrt(10) * (spy_log_mu - efa_log_mu)/spy_log_vol is the equation i used x = np.sqrt(10) * (0.137664 - 0.056607)/0.135106 val = scipy.stats.norm.cdf(x) print('The probability spy outperforms efa over the next 10 years is ' + str(val*100) + '%') # + # 4.2 # rolling volatility of EFA m = 60 sigma_roll = data['EFA US Equity'].shift(1).dropna().rolling(m).apply(lambda x: ((x**2).sum()/m)**(0.5), raw=False).dropna() #sigma_roll is the rolling volatility of EFA sigma_roll.tail(10) # + print(sigma_roll.tail(1)) #the Sept 2021 estimate of vol is 0.041899 #z score for 1st quantile in a normal distribution is approximately -2.33 #assuming the question is asking for a VaR return var = -2.33*0.041899 print('Sept 2021 estimate of 1-mon, 1% VaR return is '+str(var))
solutions/mid1/submissions/lijin_167588_6241746_FINM 36700 Midterm 1_Jin Li.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import uuid import json roiIndex = 1 cellIndex = 1 def get_annotation(filename): with open(filename) as f: data = json.load(f) f.close() return data['regions'] def get_rois(regions,tagGroup,formatNameLookup): rois = [] index = 0 global roiIndex for region in regions: if region['tags'][0].startswith(tagGroup): if region['tags'][0].startswith("ROLLNUMBERID"): extractionValue = "NUMERIC_CLASSIFICATION" else: extractionValue = "CELL_OMR" try: annotationTagsValue = formatNameLookup[region['tags'][0]] except KeyError as ke: annotationTagsValue = region['tags'][0] rois.append({ "annotationTags": annotationTagsValue, "extractionMethod": extractionValue, "roiId": str(roiIndex), "index": index, "rect": { "top": int(region['boundingBox']['top']), "left": int(region['boundingBox']['left']), "bottom": int(region['boundingBox']['top']) + int(region['boundingBox']['height']), "right": int(region['boundingBox']['left']) + int(region['boundingBox']['width']) } }) index = index + 1 roiIndex = roiIndex +1 return rois def get_cells(regions,tagGroups,formatLookup,formatNameLookup): cells_data = [] renderIndex = 1 global cellIndex for tagGroup in tagGroups: try: formatValue = formatLookup[str(tagGroup)] except KeyError as ke: formatValue = str(tagGroup) try: formatName = formatNameLookup[str(tagGroup)] except KeyError as ke: formatName = str(tagGroup) cells_data.append({ "cellId": str(cellIndex), "rois": get_rois(regions,tagGroup,formatNameLookup), "render": { "index": renderIndex }, "format": { "name": formatName, "value": formatValue }, "validate": { "regExp": "" } }) renderIndex = renderIndex +1 cellIndex = cellIndex +1 return cells_data def get_layout(cells): layout_data = [] layout_data.append({ "layout": { "version": "1.0", "name": "HINDI4S20QOMR Exam Sheet Form", "cells": cells } }) return layout_data[0] def pp_json(json_thing, sort=True, indents=4): if type(json_thing) is str: print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) else: print(json.dumps(json_thing, sort_keys=sort, indent=indents)) return None # - regions=get_annotation("HINDI4S20Q_vottraw.json") # + # tagGroups = ["ROLLNUMBERID1", "ROLLNUMBER1_QONE","ROLLNUMBER1_QTWO","ROLLNUMBER1_QTHREE","ROLLNUMBER1_QFOUR","ROLLNUMBER1_Q5","ROLLNUMBER1_Q6","ROLLNUMBER1_Q7","ROLLNUMBER1_Q8","ROLLNUMBER1_Q9","ROLLNUMBER1_QTEN","ROLLNUMBER1_Q11","ROLLNUMBER1_Q12","ROLLNUMBER1_Q13","ROLLNUMBER1_Q14","ROLLNUMBER1_Q15","ROLLNUMBER1_Q16","ROLLNUMBER1_Q17","ROLLNUMBER1_Q18","ROLLNUMBER1_Q19","ROLLNUMBER1_Q20","ROLLNUMBERID2","ROLLNUMBER2_Q1","ROLLNUMBER2_Q2","ROLLNUMBER2_Q3","ROLLNUMBER2_Q4","ROLLNUMBER2_Q5","ROLLNUMBER2_Q6","ROLLNUMBER2_Q7","ROLLNUMBER2_Q8","ROLLNUMBER2_Q9","ROLLNUMBER2_QTEN","ROLLNUMBER2_Q11","ROLLNUMBER2_Q12","ROLLNUMBER2_Q13","ROLLNUMBER2_Q14","ROLLNUMBER2_Q15","ROLLNUMBER2_Q16","ROLLNUMBER2_Q17","ROLLNUMBER2_Q18","ROLLNUMBER2_Q19","ROLLNUMBER2_QTWENTY","ROLLNUMBERID3", "ROLLNUMBER3_Q1","ROLLNUMBER3_Q2","ROLLNUMBER3_Q3","ROLLNUMBER3_Q4","ROLLNUMBER3_Q5","ROLLNUMBER3_Q6","ROLLNUMBER3_Q7","ROLLNUMBER3_Q8","ROLLNUMBER3_Q9","ROLLNUMBER3_Q10","ROLLNUMBER3_Q11","ROLLNUMBER3_Q12","ROLLNUMBER3_Q13","ROLLNUMBERID4", "ROLLNUMBER4_Q1","ROLLNUMBER4_Q2","ROLLNUMBER4_Q3","ROLLNUMBER4_Q4","ROLLNUMBER4_Q5","ROLLNUMBER4_Q6","ROLLNUMBER4_Q7","ROLLNUMBER4_Q8","ROLLNUMBER4_Q9","ROLLNUMBER4_Q10","ROLLNUMBER4_Q11","ROLLNUMBER4_Q12","ROLLNUMBER4_Q13","ROLLNUMBER4_Q14","ROLLNUMBER4_Q15","ROLLNUMBER4_Q16","ROLLNUMBER4_Q17","ROLLNUMBER4_Q18","ROLLNUMBER4_Q19","ROLLNUMBER4_Q20"] tagGroups = [] for i in range(1,5): tagGroups.append("ROLLNUMBERID"+str(i)) for j in ["ONE","TWO","THREE","FOUR","FIVE","SIX","SEVEN","EIGHT","NINE","TEN","ELEVEN","TWL","THRTN","FORTN","FIFT","SXTN","SVNTN","EGTN","NINT","TWENTY"]: tagGroups.append("ROLLNUMBER"+str(i)+"_Q"+str(j)) formatLookup= { 'ROLLNUMBERID1': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1', 'ROLLNUMBER1_QONE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q1 ', 'ROLLNUMBER1_QTWO': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q2', 'ROLLNUMBER1_QTHREE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q3 ', 'ROLLNUMBER1_QFOUR': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q4', 'ROLLNUMBER1_QFIVE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q5', 'ROLLNUMBER1_QSIX': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q6', 'ROLLNUMBER1_QSEVEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q7', 'ROLLNUMBER1_QEIGHT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q8', 'ROLLNUMBER1_QNINE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q9', 'ROLLNUMBER1_QTEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q10', 'ROLLNUMBER1_QELEVEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q11', 'ROLLNUMBER1_QTWL': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q12', 'ROLLNUMBER1_QTHRTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q13', 'ROLLNUMBER1_QFORTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q14 ', 'ROLLNUMBER1_QFIFT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q15', 'ROLLNUMBER1_QSXTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q16', 'ROLLNUMBER1_QSVNTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q17', 'ROLLNUMBER1_QEGTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q18', 'ROLLNUMBER1_QNINT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q19', 'ROLLNUMBER1_QTWENTY': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€1 Q20', 'ROLLNUMBERID2': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2', 'ROLLNUMBER2_QONE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q1', 'ROLLNUMBER2_QTWO': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q2', 'ROLLNUMBER2_QTHREE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q3', 'ROLLNUMBER2_QFOUR': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q4', 'ROLLNUMBER2_QFIVE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q5', 'ROLLNUMBER2_QSIX': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q6', 'ROLLNUMBER2_QSEVEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q7', 'ROLLNUMBER2_QEIGHT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q8', 'ROLLNUMBER2_QNINE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q9', 'ROLLNUMBER2_QTEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q10', 'ROLLNUMBER2_QELEVEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q11', 'ROLLNUMBER2_QTWL': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q12', 'ROLLNUMBER2_QTHRTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q13', 'ROLLNUMBER2_QFORTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q14 ', 'ROLLNUMBER2_QFIFT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q15', 'ROLLNUMBER2_QSXTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q16', 'ROLLNUMBER2_QSVNTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q17', 'ROLLNUMBER2_QEGTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q18', 'ROLLNUMBER2_QNINT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q19', 'ROLLNUMBER2_QTWENTY': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€2 Q20', 'ROLLNUMBERID3': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3', 'ROLLNUMBER3_QONE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q1', 'ROLLNUMBER3_QTWO': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q2', 'ROLLNUMBER3_QTHREE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q3', 'ROLLNUMBER3_QFOUR': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q4', 'ROLLNUMBER3_QFIVE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q5', 'ROLLNUMBER3_QSIX': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q6', 'ROLLNUMBER3_QSEVEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q7', 'ROLLNUMBER3_QEIGHT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q8', 'ROLLNUMBER3_QNINE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q9', 'ROLLNUMBER3_QTEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q10', 'ROLLNUMBER3_QELEVEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q11', 'ROLLNUMBER3_QTWL': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q12', 'ROLLNUMBER3_QTHRTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q13', 'ROLLNUMBER3_QFORTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q14 ', 'ROLLNUMBER3_QFIFT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q15', 'ROLLNUMBER3_QSXTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q16', 'ROLLNUMBER3_QSVNTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q17', 'ROLLNUMBER3_QEGTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q18', 'ROLLNUMBER3_QNINT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q19', 'ROLLNUMBER3_QTWENTY': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€3 Q20', 'ROLLNUMBERID4': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€', 'ROLLNUMBER4_QONE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q1', 'ROLLNUMBER4_QTWO': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q2', 'ROLLNUMBER4_QTHREE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q3', 'ROLLNUMBER4_QFOUR': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q4', 'ROLLNUMBER4_QFIVE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q5', 'ROLLNUMBER4_QSIX': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q6', 'ROLLNUMBER4_QSEVEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q7', 'ROLLNUMBER4_QEIGHT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q8', 'ROLLNUMBER4_QNINE': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q9', 'ROLLNUMBER4_QTEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q10', 'ROLLNUMBER4_QELEVEN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q11', 'ROLLNUMBER4_QTWL': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q12', 'ROLLNUMBER4_QTHRTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q13', 'ROLLNUMBER4_QFORTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q14 ', 'ROLLNUMBER4_QFIFT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q15', 'ROLLNUMBER4_QSXTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q16', 'ROLLNUMBER4_QSVNTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q17', 'ROLLNUMBER4_QEGTN': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q18', 'ROLLNUMBER4_QNINT': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q19', 'ROLLNUMBER4_QTWENTY': 'เค›เคพเคคเฅเคฐ เค†เคˆเคกเฅ€4 Q20' } formatNameLookup= { 'ROLLNUMBER1_QONE': 'ROLLNUMBER1_Q1 ', 'ROLLNUMBER1_QTWO': 'ROLLNUMBER1_Q2', 'ROLLNUMBER1_QTHREE': 'ROLLNUMBER1_Q3 ', 'ROLLNUMBER1_QFOUR': 'ROLLNUMBER1_Q4', 'ROLLNUMBER1_QFIVE': 'ROLLNUMBER1_Q5', 'ROLLNUMBER1_QSIX': 'ROLLNUMBER1_Q6', 'ROLLNUMBER1_QSEVEN': 'ROLLNUMBER1_Q7', 'ROLLNUMBER1_QEIGHT': 'ROLLNUMBER1_Q8', 'ROLLNUMBER1_QNINE': 'ROLLNUMBER1_Q9', 'ROLLNUMBER1_QTEN': 'ROLLNUMBER1_Q10', 'ROLLNUMBER1_QELEVEN': 'ROLLNUMBER1_Q11', 'ROLLNUMBER1_QTWL': 'ROLLNUMBER1_Q12', 'ROLLNUMBER1_QTHRTN': 'ROLLNUMBER1_Q13', 'ROLLNUMBER1_QFORTN': 'ROLLNUMBER1_Q14 ', 'ROLLNUMBER1_QFIFT': 'ROLLNUMBER1_Q15', 'ROLLNUMBER1_QSXTN': 'ROLLNUMBER1_Q16', 'ROLLNUMBER1_QSVNTN': 'ROLLNUMBER1_Q17', 'ROLLNUMBER1_QEGTN': 'ROLLNUMBER1_Q18', 'ROLLNUMBER1_QNINT': 'ROLLNUMBER1_Q19', 'ROLLNUMBER1_QTWENTY': 'ROLLNUMBER1_Q20', 'ROLLNUMBER2_QONE': 'ROLLNUMBER2_Q1', 'ROLLNUMBER2_QTWO': 'ROLLNUMBER2_Q2', 'ROLLNUMBER2_QTHREE': 'ROLLNUMBER2_Q3', 'ROLLNUMBER2_QFOUR': 'ROLLNUMBER2_Q4', 'ROLLNUMBER2_QFIVE': 'ROLLNUMBER2_Q5', 'ROLLNUMBER2_QSIX': 'ROLLNUMBER2_Q6', 'ROLLNUMBER2_QSEVEN': 'ROLLNUMBER2_Q7', 'ROLLNUMBER2_QEIGHT': 'ROLLNUMBER2_Q8', 'ROLLNUMBER2_QNINE': 'ROLLNUMBER2_Q9', 'ROLLNUMBER2_QTEN': 'ROLLNUMBER2_Q10', 'ROLLNUMBER2_QELEVEN': 'ROLLNUMBER2_Q11', 'ROLLNUMBER2_QTWL': 'ROLLNUMBER2_Q12', 'ROLLNUMBER2_QTHRTN': 'ROLLNUMBER2_Q13', 'ROLLNUMBER2_QFORTN': 'ROLLNUMBER2_Q14 ', 'ROLLNUMBER2_QFIFT': 'ROLLNUMBER2_Q15', 'ROLLNUMBER2_QSXTN': 'ROLLNUMBER2_Q16', 'ROLLNUMBER2_QSVNTN': 'ROLLNUMBER2_Q17', 'ROLLNUMBER2_QEGTN': 'ROLLNUMBER2_Q18', 'ROLLNUMBER2_QNINT': 'ROLLNUMBER2_Q19', 'ROLLNUMBER2_QTWENTY': 'ROLLNUMBER2_Q20', 'ROLLNUMBER3_QONE': 'ROLLNUMBER3_Q1', 'ROLLNUMBER3_QTWO': 'ROLLNUMBER3_Q2', 'ROLLNUMBER3_QTHREE': 'ROLLNUMBER3_Q3', 'ROLLNUMBER3_QFOUR': 'ROLLNUMBER3_Q4', 'ROLLNUMBER3_QFIVE': 'ROLLNUMBER3_Q5', 'ROLLNUMBER3_QSIX': 'ROLLNUMBER3_Q6', 'ROLLNUMBER3_QSEVEN': 'ROLLNUMBER3_Q7', 'ROLLNUMBER3_QEIGHT': 'ROLLNUMBER3_Q8', 'ROLLNUMBER3_QNINE': 'ROLLNUMBER3_Q9', 'ROLLNUMBER3_QTEN': 'ROLLNUMBER3_Q10', 'ROLLNUMBER3_QELEVEN': 'ROLLNUMBER3_Q11', 'ROLLNUMBER3_QTWL': 'ROLLNUMBER3_Q12', 'ROLLNUMBER3_QTHRTN': 'ROLLNUMBER3_Q13', 'ROLLNUMBER3_QFORTN': 'ROLLNUMBER3_Q14 ', 'ROLLNUMBER3_QFIFT': 'ROLLNUMBER3_Q15', 'ROLLNUMBER3_QSXTN': 'ROLLNUMBER3_Q16', 'ROLLNUMBER3_QSVNTN': 'ROLLNUMBER3_Q17', 'ROLLNUMBER3_QEGTN': 'ROLLNUMBER3_Q18', 'ROLLNUMBER3_QNINT': 'ROLLNUMBER3_Q19', 'ROLLNUMBER3_QTWENTY': 'ROLLNUMBER3_Q20', 'ROLLNUMBER4_QONE': 'ROLLNUMBER4_Q1', 'ROLLNUMBER4_QTWO': 'ROLLNUMBER4_Q2', 'ROLLNUMBER4_QTHREE': 'ROLLNUMBER4_Q3', 'ROLLNUMBER4_QFOUR': 'ROLLNUMBER4_Q4', 'ROLLNUMBER4_QFIVE': 'ROLLNUMBER4_Q5', 'ROLLNUMBER4_QSIX': 'ROLLNUMBER4_Q6', 'ROLLNUMBER4_QSEVEN': 'ROLLNUMBER4_Q7', 'ROLLNUMBER4_QEIGHT': 'ROLLNUMBER4_Q8', 'ROLLNUMBER4_QNINE': 'ROLLNUMBER4_Q9', 'ROLLNUMBER4_QTEN': 'ROLLNUMBER4_Q10', 'ROLLNUMBER4_QELEVEN': 'ROLLNUMBER4_Q11', 'ROLLNUMBER4_QTWL': 'ROLLNUMBER4_Q12', 'ROLLNUMBER4_QTHRTN': 'ROLLNUMBER4_Q13', 'ROLLNUMBER4_QFORTN': 'ROLLNUMBER4_Q14 ', 'ROLLNUMBER4_QFIFT': 'ROLLNUMBER4_Q15', 'ROLLNUMBER4_QSXTN': 'ROLLNUMBER4_Q16', 'ROLLNUMBER4_QSVNTN': 'ROLLNUMBER4_Q17', 'ROLLNUMBER4_QEGTN': 'ROLLNUMBER4_Q18', 'ROLLNUMBER4_QNINT': 'ROLLNUMBER4_Q19', 'ROLLNUMBER4_QTWENTY': 'ROLLNUMBER4_Q20', } # - cells=get_cells(regions,tagGroups,formatLookup,formatNameLookup) pp_json(get_layout(cells),False)
specs/v1.5/jupyter-notebook/transform_HINDI4S20Q_vott_to_roi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Text Analysis # Example notebook # All the imports from pprint import pformat, pprint import requests import json from bs4 import BeautifulSoup from nltk.sentiment import SentimentIntensityAnalyzer import numpy from nltk.stem import PorterStemmer # + # Use api to get news articles about a specific ticker url = "https://apidojo-yahoo-finance-v1.p.rapidapi.com/stock/get-news" querystring = {"category":"IBM","region":"US"} headers = { 'x-rapidapi-key': "", 'x-rapidapi-host': "apidojo-yahoo-finance-v1.p.rapidapi.com" } response = requests.request("GET", url, headers=headers, params=querystring) # - # Check if response is successful then get text # Otherwise, we would wait try the call again if response.status_code == 200: raw_response = response.text # Print raw response print(pformat(raw_response)[:2500]) # Convert api response to json and extract text content for all articles raw_text_list = [] for r in response_dict['items']['result']: if r['content']: raw_text_list.append(r['content']) print(len(raw_text_list)) # get text titles for all articles raw_title_list = [] for r in response_dict['items']['result']: if r['content']: raw_title_list.append(r['title']) print(len(raw_title_list)) # Grab a short one to use as an example raw_text = raw_text_list[0] for rt in raw_text_list: if len(rt) < len(raw_text): raw_text = rt # I like number 6 though... raw_text = raw_text_list[5] # Remove html tags soup = BeautifulSoup(raw_text) clean_text = soup.get_text() print(clean_text) # --- # ### What do we need? # - stopwords: A list of really common words, like articles, pronouns, prepositions, and conjunctions # - punkt: A data model created by <NAME> that NLTK uses to split full texts into word lists # Download the appropriate dictionaries and corpora nltk.download() # Let's look at some stop words stopwords = nltk.corpus.stopwords.words("english") stopwords[:25] # Tokenize text by sentence and word sentences = nltk.sent_tokenize(clean_text) tokens = nltk.word_tokenize(clean_text) # How many words do we have? print("sentences", len(sentences)) print("words", len(tokens)) # Remove stop words word_list = [t for t in tokens if t not in stopwords] print("non-stop words", len(word_list)) # Remove stop words and lower before check, see the difference? word_list = [t for t in tokens if t.lower() not in stopwords] print("lower non-stop words", len(word_list)) # What did we remove? stop_words_removed = [t for t in tokens if t.lower() in stopwords] pprint(set(stop_words_removed)) # Identify parts of speech; print first 50 parts_of_speech = nltk.pos_tag(word_list) parts_of_speech[:50] # Let's look at a frequency distribution of words fd = nltk.FreqDist(word_list) fd.most_common(20) # Only get alphanumeric words word_list = [word for word in word_list if word.isalnum()] # Show word frequency fd = nltk.FreqDist(word_list) fd.tabulate(12) # Lowered can be different frequency lower_fd = nltk.FreqDist([w.lower() for w in word_list]) lower_fd.tabulate(12) # Stemming stemmed_word_list = [] for w in word_list: stemmed_word_list.append(PorterStemmer().stem(w)) for i in range(0,30): print(word_list[i], " => ", stemmed_word_list[i]) # Lowered can be different frequency stemmed_lower_fd = nltk.FreqDist([w.lower() for w in stemmed_word_list]) stemmed_lower_fd.tabulate(12) # ## What can we use word frequency for? # Get a big list of words, like a million # !wget http://norvig.com/big.txt # + # norvig.com/spell-correct.html import re from collections import Counter def words(text): return re.findall(r'\w+', text.lower()) WORDS = Counter(words(open('big.txt').read())) def P(word, N=sum(WORDS.values())): "Probability of `word`." return WORDS[word] / N def correction(word): "Most probable spelling correction for word." return max(candidates(word), key=P) def candidates(word): "Generate possible spelling corrections for word." return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word]) def known(words): "The subset of `words` that appear in the dictionary of WORDS." return set(w for w in words if w in WORDS) def edits1(word): "All edits that are one edit away from `word`." letters = 'abcdefghijklmnopqrstuvwxyz' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts) def edits2(word): "All edits that are two edits away from `word`." return (e2 for e1 in edits1(word) for e2 in edits1(e1)) # - # Spell check all words for w in word_list: if correction(w) != w: print(w, correction(w)) # Not quite what we had in mind, would be better to use a dictionary with more domain/industrty specific terms # NLTK built in sentiment analysis sia = SentimentIntensityAnalyzer() print("clean", sia.polarity_scores(clean_text)) print("raw", sia.polarity_scores(raw_text)) # Sentence sentiment sentence_sentiment = [] for s in sentences: score = sia.polarity_scores(s)['compound'] sentence_sentiment.append(score) print("mean", numpy.mean(sentence_sentiment)) print("max", numpy.max(sentence_sentiment)) print("min", numpy.min(sentence_sentiment)) # TF-IDF; from sklearn example from sklearn.feature_extraction.text import TfidfVectorizer corpus = [ 'This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?', ] vectorizer = TfidfVectorizer() vectors = vectorizer.fit_transform(corpus) feature_names = vectorizer.get_feature_names() print(feature_names) print(vectors.shape) # Lets look at the vectors dense = vectors.todense() denselist = dense.tolist() df = pd.DataFrame(denselist, columns=feature_names) df.head() # Lets vectorize a new doc or string to query on search_str = "find me the second doc" response = vectorizer.transform([search_str]) print(response) # Now that we have them vectorized, lets compare how close they are using cosine similarity from sklearn.metrics.pairwise import cosine_similarity cosine_similarity(response, dense) # What does the tf-idf vector look like for our documents? # Probably should clean up our input... vectorizer = TfidfVectorizer() vectors = vectorizer.fit_transform(raw_text_list) feature_names = vectorizer.get_feature_names() print(feature_names[:50]) print(vectors.shape) # Now lets vectorize a query term search_str = "dividends" response = vectorizer.transform([search_str]) print(response) # What documents are about dividens? from sklearn.metrics.pairwise import cosine_similarity cosine_similarity(response, vectors) # ## Word2vec # From gensim tutorial # https://radimrehurek.com/gensim/models/word2vec.html # Import Googles pre-trained w2c model import gensim.downloader as api wv = api.load('word2vec-google-news-300') # What does the vector for keyword king look like? vec_king = wv['king'] print(vec_king[:20]) # Can see similarities between keywords? pairs = [ ('car', 'minivan'), # a minivan is a kind of car ('car', 'bicycle'), # still a wheeled vehicle ('car', 'airplane'), # ok, no wheels, but still a vehicle ('car', 'cereal'), # ... and so on ('car', 'communism'), ] for w1, w2 in pairs: print('%r\t%r\t%.2f' % (w1, w2, wv.similarity(w1, w2))) # What keywords are most similar and limit to top 5 print(wv.most_similar(positive=['car', 'minivan'], topn=5)) # Build an array of array of tokens after removing html sentences2 = [nltk.word_tokenize(BeautifulSoup(doc).get_text()) for doc in raw_text_list] # Display top 50 tokens of the first document sentences2[0][:50] # + # Lets build a word2vec model import gensim.models model = gensim.models.Word2Vec(sentences=sentences2) # - # What does IBM look like as a vector from out model? Only first 20 wv['IBM'][:20] # What terms are most similar to IBM? print(wv.most_similar(positive=['IBM', 'ibm'], topn=5)) # Looks good that International Business Machines matches! # Now lets create some ngrams # https://www.kaggle.com/rtatman/tutorial-getting-n-grams import collections bigrams = nltk.ngrams([a for s in sentences2 for a in s], 2) bigram_fq = collections.Counter(bigrams) bigram_fq.most_common(20) # How about tri-grams? trigrams = nltk.ngrams([a for s in sentences2 for a in s], 3) trigram_fq = collections.Counter(trigrams) trigram_fq.most_common(20) # + # Lets extract some topics using LDA via gensim # https://towardsdatascience.com/the-complete-guide-for-topics-extraction-in-python-a6aaa6cedbbc from gensim import corpora, models dictionary_LDA = corpora.Dictionary(sentences2) dictionary_LDA.filter_extremes(no_below=3) corpus = [dictionary_LDA.doc2bow(list_of_tokens) for list_of_tokens in sentences2] num_topics = 20 # %time lda_model = models.LdaModel(corpus, num_topics=num_topics, \ # id2word=dictionary_LDA, \ # passes=4, alpha=[0.01]*num_topics, \ # eta=[0.01]*len(dictionary_LDA.keys())) # Note the %time magic that will time how long it takes to execute that line # - # Lets look at the top 10 words for a topic group for i,topic in lda_model.show_topics(formatted=True, num_topics=num_topics, num_words=10): print(str(i)+": "+ topic) print() # Install LDA visualization package for ease of use # !python -m pip install pyLDAvis # Lets see what the topics look like # %matplotlib inline import pyLDAvis import pyLDAvis.gensim_models vis = pyLDAvis.gensim_models.prepare(topic_model=lda_model, corpus=corpus, dictionary=dictionary_LDA) pyLDAvis.enable_notebook() pyLDAvis.display(vis) # + # Now lets vectorize the raw text again from the corpus to check out kmeans clustering # https://towardsdatascience.com/clustering-documents-with-python-97314ad6a78d from sklearn.feature_extraction.text import TfidfVectorizer for i, r in enumerate(raw_text_list): raw_text_list[i]=BeautifulSoup(r).get_text() vectorizer = TfidfVectorizer(stop_words={'english'}) X = vectorizer.fit_transform(raw_text_list) print(X[0]) # + # Use Kmeans to fit clusters from 2 to 10 and plot it to see where out elbow is to use as a good cluster size import matplotlib.pyplot as plt from sklearn.cluster import KMeans Sum_of_squared_distances = [] K = range(2,10) for k in K: km = KMeans(n_clusters=k, max_iter=200, n_init=10) km = km.fit(X) Sum_of_squared_distances.append(km.inertia_) plt.plot(K, Sum_of_squared_distances, 'bx-') plt.xlabel('k') plt.ylabel('Sum_of_squared_distances') plt.title('Elbow Method For Optimal k') plt.show() # - # Elbow at 6 clusters... # #Lts see how the documents got grouped together true_k = 6 model = KMeans(n_clusters=true_k, init='k-means++', max_iter=200, n_init=10) model.fit(X) labels=model.labels_ a_cl=pd.DataFrame(list(zip(raw_title_list,labels)),columns=['title','cluster']) print(a_cl.sort_values(by=['cluster'])) # Install wordcloud package to see wordclouds easily for each cluster # !python -m pip install wordcloud # + # For each cluster lets check out some wordclouds from wordcloud import WordCloud result={'cluster':labels,'wiki':raw_text_list} result=pd.DataFrame(result) for k in range(0,true_k): s=result[result.cluster==k] text=s['wiki'].str.cat(sep=' ') text=text.lower() text=' '.join([word for word in text.split()]) wordcloud = WordCloud(max_font_size=50, max_words=100, background_color="white").generate(text) print('Cluster: {}'.format(k)) print('Titles') titles=wiki_cl[wiki_cl.cluster==k]['title'] print(titles.to_string(index=False)) plt.figure() plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # -
code/text_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import cv2 cam = cv2.VideoCapture(0) cv2.namedWindow("test") img_counter = 0 while True: ret, frame = cam.read() cv2.imshow("test", frame) if not ret: break k = cv2.waitKey(1) if k%256 == 27: print("Escape hit, closing...") break elif k%256 == 32: img = "opencv_frame_{}.png".format(img_counter) cv2.imwrite(img, frame) print("{} written!".format(img)) img_counter += 1 cam.release() cv2.destroyAllWindows() from PIL import Image import pytesseract #img = input("Enter the name of the image with the respective extension : ") im = Image.open(img) l = input("In which language your text is: ") if(l=='hindi'): short = "hin" elif(l=='bengali'): short = "ben" elif(l=='english'): short = 'eng' elif(l=='spanish'): short = "spa" elif(l=='chinese'): short = "chi_sim" elif(l=='russian'): short = "rus" elif(l=='japanese'): short = "jpn" elif(l=='italian'): short = "ita" elif(l=='korean'): short = "kor" elif(l=='kannada'): short = "kan" elif(l=='german'): short = "deu" elif(l=='african'): short = "afr" elif(l=='arabic'): short = "ara" elif(l=='bulgarian'): short = "bul" elif(l=='dutch'): short = "nld" elif(l=='french'): short = "fra" elif(l=='indonesia'): short = "ind" elif(l=='urdu'): short = "urd" elif(l=='turkish'): short = "tur" elif(l=='tamil'): short = "tam" elif(l=='swedish'): short = "swe" elif(l=='romanian'): short = "ron" elif(l=='serbian'): short = "srp" elif(l=='portuguese'): short = "por" elif(l=='persian'): short = "fas" elif(l=='latin'): short = "lat" elif(l=='irish'): short = "gle" elif(l=='latvian'): short = "lav" elif(l=='polish'): short = "pol" else: print("You have entered wrong choice.") text = pytesseract.image_to_string(im, lang = short) from textblob import TextBlob from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer y = TextBlob(text) a = input("In which language you want the output: ") if(a=='hindi'): m = "hi" out = y.translate(to = 'hi') elif(a=='bengali'): m = "bn" out = y.translate(to='bn') elif(a=='english'): m = 'en' out = y.translate(to='en') elif(a=='spanish'): m = "es" out = y.translate(to='es') elif(a=='chinese'): m = "zh-CN" out = y.translate(to='zh-CN') elif(a=='russian'): m = "ru" out = y.translate(to='ru') elif(a=='japanese'): m = "ja" out = y.translate(to='ja') elif(a=='italian'): m = "it" out = y.translate(to='it') elif(a=='korean'): m = "ko" out = y.translate(to='ko') elif(a=='kannada'): m = "kn" out = y.translate(to='kn') elif(a=='german'): m = "de" out = y.translate(to='de') elif(a=='african'): m = "af" out = y.translate(to='af') elif(a=='arabic'): m = "ar" out = y.translate(to='ar') elif(a=='bulgarian'): m = "bg" out = y.translate(to='bg') elif(a=='dutch'): m = "nl" out = y.translate(to='nl') elif(a=='french'): m = "fr" out = y.translate(to='fr') elif(a=='indonesia'): m = "id" out = y.translate(to='id') elif(a=='urdu'): m = "ur" out = y.translate(to='ur') elif(a=='turkish'): m = "tr" out = y.translate(to='tr') elif(a=='tamil'): m = "ta" out = y.translate(to='ta') elif(a=='swedish'): m = "sv" out = y.translate(to='sv') elif(a=='romanian'): m = "ro" out = y.translate(to='ro') elif(a=='serbian'): m = "sr" out = y.translate(to='sr') elif(a=='portuguese'): m = "pt" out = y.translate(to='pt') elif(a=='persian'): m = "fa" out = y.translate(to='fa') elif(a=='latin'): m = "la" out = y.translate(to='la') elif(a=='latvian'): m = "lv" out = y.translate(to='lv') elif(a=='polish'): m = "pl" out = y.translate(to='pl') else: out = "You have entered wrong choice." from gtts import gTTS speech = gTTS(str(out), m) speech.save("hello.mp3") from playsound import playsound playsound("hello.mp3") # -
HackSRM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- ["Even" if i%2==0 else "Odd" for i in range(8)] [val if val % 2 else -val for val in range(20) if val % 3] ["Even" if i%2==0 else "Odd" for i in range(10)] [ x if x%2 else x*100 for x in range(1, 10) ] [y for y in range(100) if y % 2 == 0 if y % 5 == 0] a = [22, 13, 45, 50, 98, 69, 43, 44, 1] [x+1 if x >= 45 else x+5 for x in a] v = [22, 13, 45, 50, 98, 69, 43, 44, 1] [ (x+1 if x >=45 else x+5) for x in v ] [x if x % 2 else x * 100 for x in range(1, 10) ] [[x*100, x][x % 2 != 0] for x in range(1,11)] [x if x % 2 != 0 else x * 100 for x in range(1,10)] [a if a else 2 for a in [0,1,0,3]] ["Even" if i%2==0 else "Odd" for i in range(10)] [("A" if b=="e" else "c") for b in "comprehension"] ["A" if b=="e" else "d" if True else "x" for b in "comprehension"] ["A" if b=="e" else "c" for b in "comprehension" if "comprehension".index(b)%2] [i for i in range(3) for _ in range(3)] [i for i in range(3) if i for _ in range(3) if _ if True if True] [i for i in range(3) if (True if i else False)] [[i for j in range(i)] for i in range(3)] {i for i in "set comprehension"} {k:v for k,v in [("key","value"), ("dict","comprehension")]} list1 = [10, 20, 30, 40, 50] newlist2 = [x if x > 30 else x**2 for x in list1 ] print(newlist2, type(newlist2)) a = [22, 13, 45, 50, 98, 69, 43, 44, 1] [x+1 if x >= 45 else x+5 for x in a] v = [22, 13, 45, 50, 98, 69, 43, 44, 1] [ (x+1 if x >=45 else x+5) for x in v ] X = [1.5, 2.3, 4.4, 5.4, 'n', 1.5, 5.1, 'a'] X_non_str = [el for el in X if not isinstance(el, str)] X_str_changed = ['b' if isinstance(el, str) else el for el in X] print(X_non_str) print(X_str_changed) [i**2 if i%2==0 else i**3 for i in [1,2,3,4,5]] print([10*i if i%2==0 else 100*i for i in range(6)]) ['float' if x % 1 else 'int' for x in range(10)] ['float' if (x + x % 2 / 10) % 1 else 'int' for x in range(10)] [[x*y for y in range(1,11)] for x in range(4,7)] nums = [1, 3, 5, 7, 9, 11, 13, 15] info = ["by 5" if num % 5 == 0 else "by 3" if num % 3 == 0 else "one" if num == 1 else "other" for num in nums] info
Data-Science-HYD-2k19/Topic-Wise/DATA-TYPES/List_Comprehension.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Generate passwords # # We use SHA1, SHA256, MD5 and bcrypt algorithms to hash generated passwords. # + import random as rand import string rand.seed() # + def read_from_file(name, size=None): with open(name) as f: data = f.read().split() if size: data = data[:size] return data TOP_100 = read_from_file('top-100-passwords.txt', 100) TOP_1M = read_from_file('top-1M-passwords.txt', 10**6) ENGLISH_COMMON_WORDS = read_from_file('english-common-words.txt') def generate_random(): size = rand.randint(5, 10) return ''.join(rand.choices(string.ascii_letters + string.digits + '!?', k=size)) def generate_from(pass_list): return rand.choice(pass_list) def generate_random_readable(): prepend_numbers = rand.random() > 0.5 append_numbers = rand.random() > 0.5 replace_symbols = rand.random() > 0.5 num_words = rand.randint(2, 4) words = rand.sample(ENGLISH_COMMON_WORDS, num_words) result = [] for word in words: if len(result) > 5: break result += list(word) if prepend_numbers: size = rand.randint(3, 5) result = rand.choices(string.digits, k=size) + result if append_numbers: size = rand.randint(2, 4) result += rand.choices(string.digits, k=size) replacements = { 's': '$S5', 'i': 'l1!', 'a': '@A', 't': '7T', 'e': '3E', 'g': '9G6', 'o': 'O0', 'b': '8B' } if replace_symbols: result = [rand.choice(replacements.get(el, '') + el) for el in result] return ''.join(result) def generate_password(): TOP_100_PERCENTAGE = 5 TOP_1M_PERCENTAGE = 80 RANDOM_PERCENTAGE = 5 RANDOM_READABLE_PERCENTAGE = 10 chance = rand.randint(1, 100) if chance <= TOP_100_PERCENTAGE: return generate_from(TOP_100) chance -= TOP_100_PERCENTAGE if chance <= TOP_1M_PERCENTAGE: return generate_from(TOP_1M) chance -= TOP_1M_PERCENTAGE if chance <= RANDOM_PERCENTAGE: return generate_random() chance -= RANDOM_PERCENTAGE assert chance <= RANDOM_READABLE_PERCENTAGE return generate_random_readable() # - from Crypto.Protocol.KDF import bcrypt from Crypto.Hash import MD5, SHA1, SHA256 from Crypto.Random import get_random_bytes # + def generate_hash(algo, password): return algo.new(password.encode()).hexdigest() def generate_bcrypt_hash(password, cost): salt = get_random_bytes(16) return bcrypt(password.encode(), cost, salt).decode(), salt.hex() def generate_hashes(): NUM_PASSWORDS = 500_000 for algo in MD5, SHA1, SHA256: name = algo.__name__.split('.')[-1] with open(f'{name}.csv', 'w') as f: for i in range(NUM_PASSWORDS): print(generate_hash(algo, generate_password()), file=f) BCRYPT_COST = 4 with open(f'bcrypt-{BCRYPT_COST}.csv', 'w') as f: for i in range(NUM_PASSWORDS): print(*generate_bcrypt_hash(generate_password(), BCRYPT_COST), sep=',', file=f) # - generate_hashes() # ## Find hash preimages
.ipynb_checkpoints/lab_4-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nltk from nltk.book import * import numpy as np import matplotlib.pyplot as plt from nltk.corpus import gutenberg emma = gutenberg.words('austen-emma.txt') for fileid in gutenberg.fileids(): chars = len(gutenberg.raw(fileid)) words = len(gutenberg.words(fileid)) sents = len(gutenberg.sents(fileid)) vocab = len(set(w.lower() for w in gutenberg.words(fileid))) print(round(chars/words), round(words/sents), round(words/vocab), fileid) macbeth_sentences = gutenberg.sents('shakespeare-macbeth.txt') macbeth_sentences from nltk.corpus import webtext for fileid in webtext.fileids(): print(fileid, webtext.raw(fileid)[:65], '...') from nltk.corpus import reuters #reuters.categories() from nltk.corpus import inaugural #inaugural.fileids() #[fileid[:4] for fileid in inaugural.fileids()] cfd = nltk.ConditionalFreqDist( (target, fileid[:4]) for fileid in inaugural.fileids() for w in inaugural.words(fileid) for target in ['america', 'citizen', 'country', 'president'] if w.lower().startswith(target) ) plt.figure(figsize=[15,5]) cfd.plot() from nltk.corpus import udhr languages = ['Chickasaw', 'English', 'German_Deutsch','Greenlandic_Inuktikut', 'Hungarian_Magyar', 'Ibibio_Efik'] cfd = nltk.ConditionalFreqDist( (lang, len(word)) for lang in languages for word in udhr.words(lang + '-Latin1')) cfd.plot(cumulative=True) raw = gutenberg.raw("burgess-busterbrown.txt") raw[1:20] words = gutenberg.words("burgess-busterbrown.txt") words[1:20] entries = nltk.corpus.cmudict.entries() len(entries) for entry in entries[42371:42379]: print(entry) for word, pron in entries: if len(pron) == 3: ph1, ph2, ph3 = pron if ph1 == 'P' and ph3 == 'T': print(word, ph2, end=' ') entries = nltk.corpus.names.words() startletters = list(map(lambda n: n[0], entries)) plt.hist(startletters) plt.show() # + names = nltk.corpus.names.fileids() cfd = nltk.ConditionalFreqDist( (target, file_id) for fileid in names.fileids() for w in names.words(fileid) for target[0] in if w.lower().startswith(target) ) plt.figure(figsize=[15,5]) cfd.plot() # - names = nltk.corpus.names cfd = nltk.ConditionalFreqDist( (fileid, name[0]) for fileid in names.fileids() for name in names.words(fileid) ) cfd.plot()
week6/chapter2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HPDM097: Foundations of combinatorial optimisation for routing and scheduling problems in health # # Many healthcare systems manage assets or workforce that they need to deploy geographically. One example, is a community nursing team. These are teams of highly skilled nurses that must visit patients in their own home. Another example, is patient transport services where a fleet of non-emergency ambulances pick up patients from their own home and transport them to outpatient appointments in a clinical setting. These problems are highly complex. For example, in the community nursing example, patients will have a variety of conditions, treatments may be time dependent (for example, insulin injections), nurses will have mixed skills and staffing will vary over time. # # --- # # # The Travelling Nurse Problem # # For simplicity you will first consider a single asset that has to visit patients in their own home and ignore the complex constraints described above. We will frame this problem as the famous **Travelling Salesperson (or Nurse!) Problem (TSP).** # **By the end of this section you will have learnt how to:** # # * represent a routing and scheduling problem in a form suitable for solution by an optimisation algorithm # * solve small instances of the Travelling Salesman Problem (TSP) using a brute force approach # * solve and obtain good solutions to larger TSP problem by applying hill climbing algorithms in combination with stochastic algorithms # * understand and apply a more intelligent hill climbing approach called Iterated Local Search # # > Please use the conda environment `hds_logistics` when running this workbook. You will also need to run this workbook in the same directory as `metapy`. This is a small python package that contains the code to solve the TSP. # # Imports import numpy as np import matplotlib.pyplot as plt import time # # `metapy` package imports # + import metapy.tsp.tsp_io as io import metapy.tsp.euclidean as e from metapy.tsp.init_solutions import TSPPopulationGenerator from metapy.tsp.objective import SimpleTSPObjective, OptimisedSimpleTSPObjective from metapy.tsp.bruteforce import BruteForceSolver, RandomSearch from metapy.local_search.ils import (IteratedLocalSearch, HigherQualityHomeBase, RandomHomeBase, EpsilonGreedyHomeBase, AnnealingEpsilonGreedyHomeBase, TempFastCoolingSchedule, DoubleBridgePertubation, TabuDoubleBridgeTweak) from metapy.local_search.hill_climbing import (HillClimber, TweakTwoOpt, SimpleTweak, HillClimberRandomRestarts) from metapy.evolutionary.evolutionary import (EvolutionaryAlgorithm, MuLambdaEvolutionStrategy, MuPlusLambdaEvolutionStrategy, GeneticAlgorithmStrategy, ElitistGeneticAlgorithmStrategy, TwoOptMutator, TwoCityMutator, TruncationSelector, TournamentSelector, PartiallyMappedCrossover) # - # # Load the data. # # In this notebook, you will work with the famous **st70** problem from [TSPLib](http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsplib.html). # # > You will move onto a real health service dataset in **part 2** where you will work with the a more complex variant of this problem for routing and scheduling with multiple health service assets. # # The data is located in `data/st70.tsp`. The data format from TSPLib contains both metadata and 2D coordinates of 'cities'. The files therefore need some minor preprocessing before they are usable. # # > For efficiency you will work mainly with `numpy`. It of course possible to use `pandas` for this type of problem, but you will pay a heavy price in terms of execution time! # + #load file file_path = "data/st70.tsp" #number of rows in the file that are meta_data md_rows = 6 #read the coordinates cities = io.read_coordinates(file_path, md_rows) #read the meta data meta = io.read_meta_data(file_path, md_rows) #should be an numpy.ndarray print(type(cities)) # should be 70 cities print(cities.shape) #print first 2 coordinate pairs print(cities[:2]) print("st70 meta data") print(meta) # - # The meta data confirms that problem is Euclidean 2D. This means that we need to calculate the euclidean distance between points. #example of calculating a single euclidean distance e.euclidean_distance(cities[0], cities[1]) # + from decimal import Decimal, ROUND_HALF_UP def gen_matrix(cities, as_integer=False): """ Creates a numpy array of euclidian distances between 2 sets of cities Parameters: ---------- points: numpy.array coordinate pairs as_integers: bool, optional (default=False) If true then round to nearest int. Behaviour: 1.5 -> 2 1.2 -> 1 1.8 -> 2 Returns: ------- np.ndarray Matrix of city to city costs """ size = len(cities) matrix = np.zeros(shape=(size, size)) row = 0 col = 0 for city1 in cities: col = 0 for city2 in cities: distance = e.euclidean_distance(city1, city2) if as_integer: distance = int(Decimal(distance).quantize(0, ROUND_HALF_UP)) matrix[row, col] = distance col+=1 row +=1 return matrix # + #generate matrix matrix = gen_matrix(cities, as_integer=True) file_out = 'data/st70_matrix.csv' #output city matrix - to validate and use for manual calcs etc. np.savetxt(file_out, matrix, delimiter=",") # - matrix.shape # # Representation # While you develop your code it is recommended that you work with a small tour. This means that you can find the optimal solution by enumerating all solutions and check that your algorithm is working. # # Representation is straightforward in TSP. It is recommended that create a `np.ndarray` as a vector of city indexes. For example in a TSP problem with 8 cities. #create ordered list of cities to visit tour = np.arange(8) tour # > Remember that the TSP is a loop. You need to remember this when calculating the tour length # # Calculating the length of a tour # # To calculate the length of a tour you can use either `SimpleTSPObjective` or `OptimisedSimpleTSPObjective`. For larger problems (e.g. a 70 city) problem you should find that `OptimisedSimpleTSPObjective` offers an efficiency boost (it runs quicker). But for smaller problems the overhead to set up the optimised approach means that `SimpleTSPObjective` is more efficient! # # The code below illustrates how to create each type of objective and how to use them to cost a tour. If you are interested try changing the tour size (up to a max of 70) and executing the code. It will report an average runtime. # # ```python # #create a tour with 8 cities. # tour = np.arange(8) # ``` #create a tour rng = np.random.default_rng(seed=42) tour = np.arange(8) rng.shuffle(tour) tour #create an instance of an objective object and cost a tour. objective = SimpleTSPObjective(matrix) objective.evaluate(tour) objective.evaluate(tour) #create an instance of an optimised objective function objective2 = OptimisedSimpleTSPObjective(matrix) objective2.evaluate(tour) # The following code run the `evaluate` method multiple times and reports average execution speed. # This will vary by the system you are using and by the size of the problem instance. # %timeit objective.evaluate(tour) # %timeit objective2.evaluate(tour) # # Visualising a tour # # A simple way to visualise a tour is to use matplotlib. The function `plot_tour` below has been provided to help you visualise a single tour. # # Run the code below. It should be easy to see that this isn't a very sensible tour if your objective is to simply minimise travel distance! def plot_tour(tour, cities, figsize=(6,4)): ''' Plots a tour. Each city visited is labelled in order. Red point is the initial city. Params: ------ tour: np.ndarray ordered vector representing tour e.g. [1, 4, 2, 3] cities: np.ndarray matrix representing city coordinates figsize: tuple, optional (default = (6,3)) tuple of ints for figure size Returns ------- tuple of matplotlib figure, and axis ''' tour_length = len(tour) fig, ax = plt.subplots(1, 1, figsize=figsize) #plot points ax.plot(cities[:tour_length][:, 0], cities[:tour_length][:, 1],'bo') #plot lines for j in range(len(tour)-1): city_1 = tour[j] city_2 = tour[j+1] #lookup coordinates coords_1 = cities[city_1] coords_2 = cities[city_2] coords = np.vstack([coords_1, coords_2]) #plot lines ax.plot(coords[:,0], coords[:,1], 'g-') #show order in tour ax.text(coords_1[0] + 0.8, coords_1[1] + 0.8, str(j)) #add in loop back colour code in red... city_1 = tour[-1] city_2 = tour[0] coords_1 = cities[city_1] coords_2 = cities[city_2] coords = np.vstack([coords_1, coords_2]) ax.text(coords_1[0] + 0.8, coords_1[1] + 0.8, str(tour_length-1)) ax.plot(coords[:,0], coords[:,1], 'r--') return fig, ax # + #example visualising a tour rng = np.random.default_rng(seed=42) tour = np.arange(8) rng.shuffle(tour) #plot the tour fig, ax = plot_tour(tour, cities) # - # # Enumerating all solutions # # You can enumerate all solutions of a **small** TSP using the `metapy.tsp.bruteforce.BruteForceSolver` class. The code below creates a `solver` passes in a initial solution (a tour) and a `objective` and then runs the solver. # # The function `print_output` has been provided so that you can quickly output the results of the solver. # def print_output(solver): ''' Utility function for printing formatted output of a solver Params: ------- solver: object Solver class that has .best_solutions, .best_cost attributes ''' print("\nbest solutions:\t{0}".format(len(solver.best_solutions))) print("best cost:\t{0}".format(solver.best_cost)) print("best solutions:") [print(s) for s in solver.best_solutions] # + #create a tour - there is NO need to randomise for bruteforce tour = np.arange(8) #create the objective objective = SimpleTSPObjective(matrix) #create the brute force solver solver = BruteForceSolver(tour, objective) #run the solver (should be quick below tour of length 10) print("Enumerating all solutions...") solver.solve() print("\n** BRUTEFORCE OUTPUT ***") #this should find two optimal solutions! (the reverse of each other) print_output(solver) # - #now visualise the result of solution 1 fig, ax = plot_tour(solver.best_solutions[0], cities) #now visualise the results of solution 2 (the reverse of 1) fig, ax = plot_tour(solver.best_solutions[1], cities) # # Exercise 1: Solving a 9 city TSP # # **Task**: # * Use a brute force approach to solve a 9 city TSP from the st70 dataset # * Plot the results # # # # + # your code here... # + #example solution #create a tour - there is NO need to randomise for bruteforce tour = np.arange(9) #create the objective objective = SimpleTSPObjective(matrix) #create the brute force solver solver = BruteForceSolver(tour, objective) #run the solver (should be quick below tour of length 10) print("Enumerating all solutions...") solver.solve() print("\n** BRUTEFORCE OUTPUT ***") #this should find two optimal solutions! (the reverse of each other) print_output(solver) # - for i in range(4): plot_tour(solver.best_solutions[i], cities) # # A basic optimisation method: random search # # Instead of a brute force enumeration we could have solved the small TSP problem using a **global optimisation algorithm**. These algorithms do not get stuck in 'local optima' and will find the optimum solution **if run for long enough**. That is a big **IF**! # # The simplest method is **random search**. This makes *b* shuffles of the tour where *b* is an fixed iteration budget or the number of iterations that can be complete in a specified time limit. # # > Random search is straightforward to implement yourself. It is a loop with a if statement checking for new best solutions. # # You can also use `metapy.trp.bruteforce.RandomSearch` to conduct a random search on the TSP. # # ```python # #note max_iter is an optional parameter with default value of 1000 # solver = RandomSearch(tour, objective, max_iter=1000) # ``` # # # Exercise 2: Setting a benchmark with random search # # **Task**: # * Apply random search to the 9 city problem in the st70 dataset. Use a max_iter budget of 1000. # * Compare the result to the optimal solution obtained in exercise 1. # * Set a benchmark for solving the 70 city problem - apply random search to the full 70 city problem # # **Hints:** # * When using random search with the 9 city problem you may want to complete multiple runs to get a feel for its performance. # # + # your code here ... # + #example solution 9 city #create a tour - there is NO need to randomise for bruteforce tour = np.arange(9) #create the objective objective = SimpleTSPObjective(matrix) #create the random search solver solver = RandomSearch(tour, objective, max_iter=5000, maximisation=False) #run the solver (should be quick below tour of length 10) print("Running Random Search...") solver.solve() print("\n** RANDOM SEARCH OUTPUT ***") #this should find two optimal solutions! (the reverse of each other) print_output(solver) plot_tour(solver.best_solutions[0], cities) # + #example solution 70 city #create a tour - there is NO need to randomise for bruteforce tour = np.arange(70) #create the objective objective = SimpleTSPObjective(matrix) #create the random search solver solver = RandomSearch(tour, objective, max_iter=10000, maximisation=False) #run the solver (should be quick below tour of length 10) print("Running Random Search on 70 cities...") solver.solve() print("\n** RANDOM SEARCH OUTPUT ***") #show result print_output(solver) fig, ax = plot_tour(solver.best_solutions[0], cities, figsize=(12,9)) # - # # Using a hill-climbing approach # # When working in logistics it is likely that you will need to employ some form of simple hill-climbing algorithm. These are very simple algorithms that iteratively test neighbouring solutions to see if they find any improvement. This **local search** approach is often very successful at finding reasonably good solutions to a routing and scheduling problem. You will see that you can easily out perform random search. However, hill climbers do suffer from getting stuck in a **local optimum** and you can often do better by employing a more sophisticated algorithm. # # **However,** you might be surprised at how useful hill-climbers turn out to be when used in combination with other approaches. Here you will first experiment with a simple first improvement hill climber and then use it to **clean up** the solution produced by a evolutionary strategy and **combine** the framework into random search followed by hill climbing. One of the key benefits of hill climbers is that they are relatively fast (because they are simple). You can even set a time limit to get some of the benefit of local search without greatly extending the execution time of your algorithm. # # > Although this the approach is called Hill-Climbing in the TSP you are **descending** a hill to the find the shortest route. The algorithm is the same, but you are maximising -1*objective (or alternatively $\dfrac{1}{objective}$). # # # Exercise 3: Simple versus 2-Opt tweaks # # Hill-Climbing works by iteratively **tweaking** a solution to search for better neighbouring solutions. `metapy` provides two relatively straightforward tweak operators. `SimpleTweak` swaps the position of two cities at a time while `TweakTwoOpt` reverses a section of the route between two cities. Generally speaking `TweakTwoOpt` will produce better solutions, but it is worth considering a `SimpleTweak` approach when **cleaning up** the output of another algorithm. You could also try both! # # You create the tweak operators as follows: # # ```python # operator1 = SimpleTweak() # operator2 = TweakTwoOpt() # ``` # Each tweak operator provides a `tweak(tour, index1, index2)` method. **Note that the change to tour happens in place** # # ```python # tour = np.arange(10) # tweaker = SimpleTweak() # #swap cities at index 1 and index 2. # tweaker.tweak(tour, 1, 2) # ``` # # **Task**: # * Create a numpy vector representing a tour of 10 cities # * Perform a simple tweak of cities in elements 5 and 9 # * Perform a 2-opt tweak between cities 1 and 4 # * Print out the updated tour. # # + #your code here... # - tour = np.arange(10) tweaker1 = SimpleTweak() tweaker1.tweak(tour, 5, 9) tweaker2 = TweakTwoOpt() tweaker2.tweak(tour, 1, 4) tour # # Exercise 4: Hill-Climbing # # You have been provided with a simple hill climber class in `metapy`. The code below demonstrates how to create a hill-climbing object and run the algorithm. # # **Task:** # * Read the code below and check your understand it. # * Run the code below and check if the hill climber is better or worse than random search. # * Modify the code below so that you pass a random initial solution to the hill climber. # # **Hints**: # * a random initial solution is just a **shuffled** numpy array. # # + #Basic First Improvement Hill Climber #create a tour (full 70 cities) tour = np.arange(70) ########################################### # MODIFY CODE HERE TO SHUFFLE tour # ########################################### #create TSP objective objective = SimpleTSPObjective(matrix) #create Hill climbing algorithm solver = HillClimber(objective=objective, init_solution=tour, tweaker=TweakTwoOpt(), maximisation=False) #run the local search solver.solve() #output results print("\n** Hill Climber First Improvement OUTPUT ***") print("best cost:\t{0}".format(solver.best_cost)) print("best solutions:") print(solver.best_solutions) fig, ax = plot_tour(solver.best_solutions[0], cities, figsize=(12,9)) # + # Example solution. #Basic First Improvement Hill Climber #create a tour (full 70 cities) tour = np.arange(70) rng = np.random.default_rng(42) #random init solution (operation in place) rng.shuffle(tour) #create TSP objective objective = SimpleTSPObjective(matrix) #create Hill climbing algorithm solver = HillClimber(objective=objective, init_solution=tour, tweaker=TweakTwoOpt(), maximisation=False) #run the local search solver.solve() #output results print("\n** Hill Climber First Improvement OUTPUT ***") print("best cost:\t{0}".format(solver.best_cost)) print("best solutions:") print(solver.best_solutions) fig, ax = plot_tour(solver.best_solutions[0], cities, figsize=(12,9)) # - # # Exercise 5: Using an evolutionary algorithm followed by hill climbing # # You will now experiment with using a hill climber to **clean up** the solution provided by a $(\mu, \lambda)$ evolutionary strategy. It is often useful to make a few small computationally cheap tweaks to the solution provided by a more complex algorithm to gain additional performance. # # The code below has been set up for you to run an evolutionary strategy against the st70 problem. # # **Task:** # * Read and run the code. Does the EA beat the basic hill climber and random search? You may want to try this a few times or tune parameters. # * The final line of code assigns the EAs solution to `interim_solution`. Create a `HillClimber` and pass in `interim_solution` as its initial solution. # * Try the `SimpleTweak()` operator. # * Output the hill climbers results and plot the route. # # **Hints**: # * The EA will take a few seconds to run. If you use new Jupyter cells for your hill climbing you can run each algorithm separately. # * Remember the EA is stochastic. Feel free to run it a few times to see how hill climbing can help. It may not help every time. # + # %%time #Evolutionary Algorithm - (mu, lambda) strategy for TSP mu = 10 _lambda = 200 #full tour tour = np.arange(70) ########################################################### # Create objective # if you are finding EA a bit slow try OptimisedSimpleTSPObjective # its experimental so be warned! objective = SimpleTSPObjective(matrix) #objective = OptimisedSimpleTSPObjective(matrix) ########################################################### #create initial TSP population init = TSPPopulationGenerator(tour) #(Mu, Lambda) strategy using 2-Opt mutation strategy = MuLambdaEvolutionStrategy(mu, _lambda, TwoOptMutator()) #EA solver = EvolutionaryAlgorithm(init, objective,_lambda, strategy, maximisation=False, generations=1000) #run the EA print("\nRunning (mu, lambda) evolutionary alg...") solver.solve() #output EA results print("\n** (mu, LAMBDA) OUTPUT ***") print("best cost:\t{0}".format(solver.best_fitness)) print("best solutions:") print(solver.best_solution) fig, ax = plot_tour(solver.best_solution, cities, figsize=(12,9)) interim_solution = solver.best_solution ################################################# # # Modification here: pass interim_solution to a hill climber # or use a new Jupyter cell. # ################################################# # + #Example solution #Evolutionary Algorithm - (mu, lambda) strategy for TSP mu = 10 _lambda = 200 #full tour tour = np.arange(70) #create objective objective = SimpleTSPObjective(matrix) objective = OptimisedSimpleTSPObjective(matrix) #create initial TSP population init = TSPPopulationGenerator(tour) #(Mu, Lambda) strategy using 2-Opt mutation strategy = MuLambdaEvolutionStrategy(mu, _lambda, TwoOptMutator()) #EA solver = EvolutionaryAlgorithm(init, objective,_lambda, strategy, maximisation=False, generations=1000) #run the EA print("\nRunning (mu, lambda) evolutionary alg...") solver.solve() #output EA results print("\n** (mu, LAMBDA) OUTPUT ***") print("best cost:\t{0}".format(solver.best_fitness)) print("best solutions:") print(solver.best_solution) fig, ax = plot_tour(solver.best_solution, cities, figsize=(12,9)) interim_solution = solver.best_solution #now pass the solution to a simple hill climber with SimpleTweak localsearch = HillClimber(objective, interim_solution, SimpleTweak(), maximisation=False) print("\nRunning local search...") localsearch.solve() print("\n** Hill Climber First Improvement OUTPUT ***") print("best cost:\t{0}".format(localsearch.best_cost)) print("best solutions:") print(localsearch.best_solutions[0]) fig, ax = plot_tour(localsearch.best_solutions[0], cities, figsize=(12,9)) # - # # Exercise 6: Hill Climbing with Random Restarts # # Hill-Climbing algorithms may provide a different local optima dependent on the initial solution it is provided. One option is therefore to combine Random Search and Hill Climbing into a general (and still rather dumb) algorithm called Hill-Climbing with Random Restarts. Effectively it runs Hill-Climbing multiple times with a new starting point each time. The algorithm picks the best solution either as it executes or after it has completed. # # **Task:** # * The code below allows you to run a `HillClimber` multiple times each time with a random initial solution. # * Execute the code - how does it compare with the other procedures tested? # * Options: # * Try `SimpleTweak()` instead of `TweakTwoOpt()` # * Try a different `random_seed` or drop it? # * Try a higher `max_iter` (remember this increased runtime!) # # + # Hill Climbing with random restarts tour = np.arange(70) objective = SimpleTSPObjective(matrix) # basic first improvement hill climber localsearch = HillClimber(objective, tour, TweakTwoOpt(), maximisation=False) #random restarts (multiple runs with random init solution) solver = HillClimberRandomRestarts(objective, localsearch, tour, maxiter=20, random_seed=101) print("\nRunning Hill-Climbing with Random Restarts...") solver.solve() print("\n** Hill Climbing with Random Restarts OUTPUT ***") print("best cost:\t{0}".format(solver.best_cost)) print("best solutions:") print(solver.best_solutions[0]) fig, ax = plot_tour(solver.best_solutions[0], cities, figsize=(12,9)) # - # # Exercise 7: Iterated Local Search # # A more sophisticated version of Hill-Climbing with random restarts is **Iterated Local Search** or **ILS** for short. # # Instead of randomly restarting ILS defaults to a **homebase**. A large tweak operation (called a perturbation!) is applied to the homebase and this is then used an the initial solution for Hill-Climbing. There are a few more parameters/operators to tune with ILS and most important are deciding when to change homebase and what perturbation operator to use. ILS can get quite creative in practice, but the pseudo code below gives the general appearance of the algorithm. # # # ``` # function iterated_local_search(init_solution) # best = copy(init_solution) # home = copy(init_solution) # candidate = copy(init_solution) # history = [home] # # while time_remains # # candidate = local_search(candidate) # # if quality(candidate) > quality(best) # best = copy(candidate) # end # # home = update_home(home, candidate) # # candidate, history = perturb(home, history) # # end # # return best # end # # ``` # # In the algorithm `history` is not always used. If implemented, it essentially gives the algorithm a memory (or tabu list). It contains a list of previous initial solutions used in hill climbing and prevents the algorithm from repeating itself. It usual to have a fixed sized memory (another hyper-parameter to tune!) # # The function `update_home` returns a homebase for perturbing. Three simple implementations are a **random walk** and **greedy** and **epsilon greedy**. A random walk uses the last local optima returned from hill climbing. Greedy only accepts a new home base if the new local optima is better than the current homebase. Finally, epsilon greedy takes a random walk epsilon of the time and acts greedy 1 - epsilon of the time. A neat variation on epsilon greedy is to initially allow a lot of exploration and gradually decrease epsilon. However, you will need more iterations (and longer execution time!) to get this to work in practice (but it may return better solutions to large problems). # # The function `perturb` is essentially a tweak operator and hence tends to be problem specific. In routing and scheduling problems (and particularly the TSP) a good operator is called the Double Bridge Tweak. This breaks the tour into four parts, reverses and recombines. You could combine with a tabu list if felt it was necessary. # # **Task:** # * The code below illustrates how to use an implementation of ILS provided in `metapy` # * Your task to to experiment with ILS and compare how it performs to basic hill climbing with random restarts. # * There are several options you can use to experiment. Uncomment the lines of code to explore the different approaches. # * updating the home base # * perturbing the home base # * the number of iterations of ILS # # + #Iterated Local Search Template #multiple runs of Hill Climbing with intelligent initial conditions #random intial solution of size 70 tour = np.arange(70) np.random.shuffle(tour) ################################################################## #objective function objective = SimpleTSPObjective(matrix) objective = OptimisedSimpleTSPObjective(matrix) ################################################################### #create the general hill climber with two opt swaps localsearch = HillClimber(objective, tour, TweakTwoOpt(), maximisation=False) #################################################################### #OPTIONS FOR UPDATING HOMEBASE #UNCOMMENT THE OPTION YOU WOULD LIKE TO USE. homebase_accept = EpsilonGreedyHomeBase(epsilon=0.3) #epsilon greedy #homebase_accept = HigherQualityHomeBase() #greedy method #homebase_accept = RandomHomeBase() # random walk #homebase_accept = AnnealingEpsilonGreedyHomeBase(maxiter_per_temp=20, # verbose=True) #################################################################### #################################################################### #OPTIONS FOR PERTURBING HOMEBASE (medium to large tweak to homebase) #UNCOMMENT THE OPTION YOU WOULD LIKE TO USE. perturb = DoubleBridgePertubation() #no memory #perturb = TabuDoubleBridgeTweak(tabu_size=10, init_solution=tour) #with tabu ###################################################################### #create the ILS solver #set verbose=False to suppress output of each iteration. solver = IteratedLocalSearch(localsearch, accept=homebase_accept, perturb=perturb, verbose=True) ###################################################################### # NO. ITERATIONS OF ILS. # This is a good parameter to experiment with. Try more than 30. n = 30 ###################################################################### print(f"\nRunning {n} iterations...") solver.run(n) print("\n** ILS RESULTS ***") print("best cost:\t{0}".format(solver.best_cost)) print("best solution:") print(solver.best_solutions[0]) fig, ax = plot_tour(solver.best_solutions[0], cities, figsize=(12,9)) # - # # Optional Exercise 8: Good solutions # # **Task:** # * The tours below represent 'good', but not optimal solutions to the st70 problem. # * Can you improve on them? Either by using them as initial solutions in a hill-climbing / iterated local search algorithm or by tuning an evolutionary strategy? # * If you beat them then do tell! # # **Hints** # * You can see the cost of each tour by calling `objective.evaluate()` # + # cost = 688 objective = SimpleTSPObjective(matrix) tour1 = np.array([45, 26, 67, 43, 29, 19, 13, 27, 48, 54, 25, 7, 2, 31, 41, 17, 3, 1, 6, 18, 23, 14, 56, 62, 65, 21, 22, 37, 58, 34, 68, 30, 69, 12, 28, 35, 0, 15, 46, 36, 57, 49, 50, 64, 63, 10, 55, 66, 47, 53, 61, 33, 20, 11, 32, 59, 51, 9, 4, 52, 5, 40, 42, 16, 8, 39, 60, 38, 44, 24]) objective.evaluate(tour1) fig, ax = plot_tour(tour1, cities, figsize=(12,9)) # + #cost = 683 tour2 = np.array([68, 30, 34, 69, 12, 28, 35, 0, 22, 15, 46, 36, 57, 49, 52, 4, 9, 51, 59, 50, 64, 63, 10, 55, 66, 47, 53, 61, 32, 11, 33, 20, 16, 42, 8, 39, 60, 38, 44, 24, 45, 26, 67, 43, 29, 19, 13, 27, 48, 54, 25, 7, 2, 31, 41, 40, 5, 17, 3, 1, 6, 18, 23, 14, 56, 62, 65, 21, 37, 58]) objective.evaluate(tour2) fig, ax = plot_tour(tour2, cities, figsize=(12,9)) # + #cost = 686 tour3 = np.array([65, 56, 14, 23, 18, 6, 1, 3, 17, 41, 31, 2, 7, 25, 54, 48, 27, 13, 19, 29, 43, 67, 26, 45, 24, 44, 38, 60, 39, 8, 16, 42, 40, 5, 52, 4, 9, 51, 59, 11, 33, 20, 32, 61, 53, 66, 47, 10, 63, 64, 55, 50, 49, 57, 36, 46, 15, 0, 35, 22, 37, 12, 28, 69, 30, 68, 34, 21, 58, 62]) objective.evaluate(tour3) fig, ax = plot_tour(tour3, cities, figsize=(12,6)) # -
optimisation/03_routing_and_scheduling_part1_SOLUTIONS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp labeller.core # - #export from fastai2.basics import * from lazylabel.basics import * from functools import wraps # # Labeller # > `Labeller` wraps `subscribe` and saves the returned value of wrapped functions in a attribute called `labels` in the original object. #export class UniqueList(L): def append(self, o): if o not in self.items: super().append(o) #export class Labeller: def __init__(self): self.subs = {} def __call__(self, tfm, *pre): def _inner(f): return self.register_func(tfm, f, *pre) return _inner def register_func(self, tfm, f, *pre): sub = tfm.listen(*pre)(self._add_label(f)) self.subs[f.__name__] = sub return sub def register_funcs(self, tfm, fs, *pre): for f in L(fs): self.register_func(tfm, f, *pre) def _add_label(self, f): @wraps(f) def _inner(x, *args, **kwargs): x = add_attr(x, 'labels', []) x.labels.append(f(x, *args, **kwargs)) return x return _inner # Tests labeller with arbitrary transforms ABSTAIN,CAT1,CAT2,CAT3 = 'abstain','neg1','neg2','intdiv1' vocab = [ABSTAIN,CAT1,CAT2] @Transform def neg(x:Tensor): return -x class IntDiv(Transform): def encodes(self, x:int): return x//2 labeller = Labeller() int_div = IntDiv() def labeller_cat1(x): return CAT1 def labeller_cat2(x): return CAT2 labeller.register_funcs(neg, [labeller_cat1, labeller_cat2]) @labeller(int_div) def labeller_cat3(x): return CAT3 pipe = Pipeline(neg) test_eq(pipe(tensor(2)).labels, [CAT1, CAT2]) test_fail(lambda: pipe(2).labels, "'int' object has no attribute 'labels'") # Labelling functions should only be applied based on type dispatch of the transforms. pipe = Pipeline([neg, int_div]) test_eq(pipe(2).labels, [CAT3]) # Get execution order #export @patch def listen_lfs_order(self:Labeller): self._lfs_order = L() for sub in self.subs.values(): sub.listen_one(self._lfs_order.append) #export @patch_property def lfs_order(self:Labeller): for sub in self.subs.values(): sub.listen_one(None) return self._lfs_order labeller.listen_lfs_order() pipe(tensor(2)) test_eq(labeller.lfs_order, ['labeller_cat1','labeller_cat2']) labeller.listen_lfs_order() pipe(4) test_eq(labeller.lfs_order, ['labeller_cat3']) # Remove a subscription #export @patch def remove(self:Labeller, name): sub = self.subs.pop(name) sub.cancel() labeller.remove('labeller_cat1') test_eq(pipe(tensor(2)).labels, ['neg2']) # Reset subscriptions #export @patch def reset(self:Labeller): for sub in self.subs.values(): sub.cancel() self.subs.clear() labeller.reset() test_fail(lambda: pipe(2).labels, "'int' object has no attribute 'labels'") # # Find # Find samples with specific labels #export @patch def _find(self:Labeller, dl, lfs_idxs, lbl_idxs, reduction=operator.and_): matches,total = [],0 old_shuffle, dl.shuffle = dl.shuffle, False for b in dl: xb,yb = split_batch(dl, b) masks = [xb[:,i]==x for i,x in zip(lfs_idxs,lbl_idxs)] mask = reduce(reduction, masks) idxs = np.array(mask2idxs(mask)) matches.extend(idxs+total) total += find_bs(xb) dl.shuffle = old_shuffle return matches #export @patch def find(self:Labeller, dl, vocab, lfs, lbls, reduction=operator.and_): vocab = CategoryMap(vocab) lfs_idxs = [self.lfs_order.index(lf) for lf in lfs] lbl_idxs = [vocab.o2i[lbl] for lbl in lbls] return self._find(dl, lfs_idxs, lbl_idxs, reduction) x = tensor([[0, 0], [0, 1], [0, 2], [1, 0], [0, 2], [1, 0], [1, 2], [0, 0]]) dset = Datasets(x) dls = DataLoaders.from_dsets(dset, bs=2, drop_last=False, num_workers=0) labeller = Labeller() labeller._lfs_order = [0,1] vocab = [0,1,2] idxs = labeller.find(dls.train, vocab=vocab, lfs=[0,1], lbls=[1,2]) test_eq(idxs, [6]) # ## Tasks labels helper # Extract the `labels` from a `TfmdLists`. #export def tasks_labels(labeller, tls, vocab, lazy=False): labeller.listen_lfs_order() tasks = tls._new(tls.items) tasks.tfms = Pipeline([*tls.tfms.fs, AttrGetter('labels'), MultiCategorize(vocab)]) if not lazy: tasks.cache() return tasks # ## Export - from nbdev.export import notebook2script notebook2script()
nbs/02_labeller.core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="u2UXutvEvpUj" # # Question Answering with BERT and HuggingFace # # You've seen how to use BERT, and other transformer models for a wide range of natural language tasks, including machine translation, summarization, and question answering. Transformers have become the standard model for NLP, similar to convolutional models in computer vision. And all started with Attention! # # In practice, you'll rarely train a transformer model from scratch. Transformers tend to be very large, so they take time, money, and lots of data to train fully. Instead, you'll want to start with a pre-trained model and fine-tune it with your dataset if you need to. # # [Hugging Face](https://huggingface.co/) (๐Ÿค—) is the best resource for pre-trained transformers. Their open-source libraries simplify downloading and using transformer models like BERT, T5, and GPT-2. And the best part, you can use them alongside either TensorFlow, PyTorch and Flax. # # In this notebook, you'll use ๐Ÿค— transformers to download and use the DistilBERT model for question answering. # # First, let's install some packages that we will use during the lab. # + id="7rW5HyNyv3YC" colab={"base_uri": "https://localhost:8080/"} outputId="7d2f7279-16ce-4883-e397-39e215a83124" # !pip install transformers # + [markdown] id="tm675LmQvpUm" # ## Pipelines # # Before fine-tuning a model, you will look to the pipelines from Hugging Face # to use pre-trained transformer models for specific tasks. The `transformers` library provides pipelines for popular tasks like sentiment analysis, summarization, and text generation. A pipeline consists of a tokenizer, a model, and the model configuration. All these are packaged together into an easy-to-use object. Hugging Face makes life easier. # # Pipelines are intended to be used without fine-tuning and will often be immediately helpful in your projects. For example, `transformers` provides a pipeline for [question answering](https://huggingface.co/transformers/main_classes/pipelines.html#the-pipeline-abstraction) that you can directly use to answer your questions if you give some context. Let's see how to do just that. # # You will import `pipeline` from `transformers` for creating pipelines. # + id="uNJGGbRWvpUm" from transformers import pipeline # + [markdown] id="_CeFTIr7P3QR" # Now, you will create the pipeline for question-answering, which uses the [DistilBert](https://hf.co/distilbert-base-cased-distilled-squad) model for extractive question answering (i.e., answering questions with the exact wording provided in the context). # + id="nKy4AAhLvpUo" colab={"base_uri": "https://localhost:8080/", "height": 177, "referenced_widgets": ["6cae0d5f62fc4ecea1cb3bd59c793ceb", "8a7a409baede4691b89c4858c4c755cc", "290b5037e6404582ad1111f03206c1fa", "7753bab7e77b4eb28d42c7b8bffdba7d", "0f0fea07da934e4087d8677064780a59", "3a7cc3c34f534947afcd178788244474", "91bf5092f6244f0ca23d8fc3cf3067ef", "633ee9ea0f544042a5ab92c63a041561", "b3b035a458a44e39a90640fab7e81531", "<KEY>", "adf9ff1a6839485d8b5f07123a410948", "<KEY>", "<KEY>", "<KEY>", "65c739f3deb14814815c1343eafbef2d", "<KEY>", "cf023fd20ce54deab2c4532c87713417", "<KEY>", "<KEY>", "0e2299d4ac1e4cbfa9d591969ad1664e", "759ed84d2d854dd1b161dead93f695fb", "<KEY>", "<KEY>", "81ddd660161a45fb87d60be7a0028f0e", "66212a1ed9044ef1ba9769a6e9d89d0a", "<KEY>", "7419ff4d35184f8a9950ad116805a634", "<KEY>", "e2104a5d7f1044ca8a5809c655205875", "0cf84fc6ead94adfbbaea02f8721e9d5", "<KEY>", "<KEY>", "bdc8a6517fd24adbaff4e0a00d840b4a", "f6d7d1a4553c432a9f5bd5f9a64edb8e", "d2a1e1a175e04305a86c28ec50beac84", "33f416e38dec49e8ba998a782dd886e0", "0339d81f590345b089a54be62f983ab6", "<KEY>", "<KEY>", "<KEY>", "5f52936e2d3e42c4b2a2fca5a8e5ec03", "ef319a5fa8f34a3cac73a9bdec6522f0", "a8a9c3e9e51c4911afb2e76baa3bef49", "e705c7ca66d140b1a7efccaf48fee55e", "ff9a16c83e19414d92a1a500487a0911", "<KEY>", "3bd115e2a00d4190b99334ec27fa1f08", "8693179a55e141ab8b1c5ad30f87f30d", "<KEY>", "<KEY>", "31e4ff1f21124ce991cfec63bbaad23f", "<KEY>", "<KEY>", "<KEY>", "<KEY>"]} outputId="c1d69058-268b-49b3-8782-72b013437025" # The task "question-answering" will return a QuestionAnsweringPipeline object question_answerer = pipeline(task="question-answering", model="distilbert-base-cased-distilled-squad") # + [markdown] id="4ltQLVWgvpUo" # After running the last cell, you have a pipeline for performing question answering given a context string. The pipeline `question_answerer` you just created needs you to pass the question and context as strings. It returns an answer to the question from the context you provided. For example, here are the first few paragraphs from the [Wikipedia entry for tea](https://en.wikipedia.org/wiki/Tea) that you will use as the context. # # # # + id="D_-MzZNJvpUp" context = """ Tea is an aromatic beverage prepared by pouring hot or boiling water over cured or fresh leaves of Camellia sinensis, an evergreen shrub native to China and East Asia. After water, it is the most widely consumed drink in the world. There are many different types of tea; some, like Chinese greens and Darjeeling, have a cooling, slightly bitter, and astringent flavour, while others have vastly different profiles that include sweet, nutty, floral, or grassy notes. Tea has a stimulating effect in humans primarily due to its caffeine content. The tea plant originated in the region encompassing today's Southwest China, Tibet, north Myanmar and Northeast India, where it was used as a medicinal drink by various ethnic groups. An early credible record of tea drinking dates to the 3rd century AD, in a medical text written by Hua Tuo. It was popularised as a recreational drink during the Chinese Tang dynasty, and tea drinking spread to other East Asian countries. Portuguese priests and merchants introduced it to Europe during the 16th century. During the 17th century, drinking tea became fashionable among the English, who started to plant tea on a large scale in India. The term herbal tea refers to drinks not made from Camellia sinensis: infusions of fruit, leaves, or other plant parts, such as steeps of rosehip, chamomile, or rooibos. These may be called tisanes or herbal infusions to prevent confusion with 'tea' made from the tea plant. """ # + [markdown] id="HyR3o2mrvpUq" # Now, you can ask your model anything related to that passage. For instance, "Where is tea native to?". # + id="eiRohAWWvpUq" colab={"base_uri": "https://localhost:8080/"} outputId="6b7cc785-94f5-4fe3-f3e7-8a291dabe56b" result = question_answerer(question="Why do people drink tea?", context=context) print(result['answer']) # + [markdown] id="cRXzFlZ5vpUr" # You can also pass multiple questions to your pipeline within a list so that you can ask: # # * "Where is tea native to?" # * "When was tea discovered?" # * "What is the species name for tea?" # # at the same time, and your `question-answerer` will return all the answers. # + id="IMLyXeMZvpUr" colab={"base_uri": "https://localhost:8080/"} outputId="2d2ad019-02bd-41be-d1f6-6d83888c5cb0" questions = ["Where is tea native to?", "When was tea discovered?", "What is the species name for tea?"] results = question_answerer(question=questions, context=context) for q, r in zip(questions, results): print(q, "\n>> " + r['answer']) # + [markdown] id="XXf18tVu8p70" # Although the models used in the Hugging Face pipelines generally give outstanding results, sometimes you will have particular examples where they don't perform so well. Let's use the following example with a context string about the Golden Age of Comic Books: # + id="0v9C0TAqwinw" context = """ The Golden Age of Comic Books describes an era of American comic books from the late 1930s to circa 1950. During this time, modern comic books were first published and rapidly increased in popularity. The superhero archetype was created and many well-known characters were introduced, including Superman, Batman, Captain Marvel (later known as SHAZAM!), Captain America, and Wonder Woman. Between 1939 and 1941 Detective Comics and its sister company, All-American Publications, introduced popular superheroes such as Batman and Robin, Wonder Woman, the Flash, Green Lantern, Doctor Fate, the Atom, Hawkman, Green Arrow and Aquaman.[7] Timely Comics, the 1940s predecessor of Marvel Comics, had million-selling titles featuring the Human Torch, the Sub-Mariner, and Captain America.[8] As comic books grew in popularity, publishers began launching titles that expanded into a variety of genres. Dell Comics' non-superhero characters (particularly the licensed Walt Disney animated-character comics) outsold the superhero comics of the day.[12] The publisher featured licensed movie and literary characters such as Mickey Mouse, <NAME>, <NAME> and Tarzan.[13] It was during this era that noted Donald Duck writer-artist <NAME> rose to prominence.[14] Additionally, MLJ's introduction of Archie Andrews in Pep Comics #22 (December 1941) gave rise to teen humor comics,[15] with the Archie Andrews character remaining in print well into the 21st century.[16] At the same time in Canada, American comic books were prohibited importation under the War Exchange Conservation Act[17] which restricted the importation of non-essential goods. As a result, a domestic publishing industry flourished during the duration of the war which were collectively informally called the Canadian Whites. The educational comic book Dagwood Splits the Atom used characters from the comic strip Blondie.[18] According to historian <NAME>, appealing comic-book characters helped ease young readers' fear of nuclear war and neutralize anxiety about the questions posed by atomic power.[19] It was during this period that long-running humor comics debuted, including EC's Mad and Carl Barks' Uncle Scrooge in Dell's Four Color Comics (both in 1952).[20][21] """ # + [markdown] id="fYbERLKQbhyH" # Let's ask the following question: "What popular superheroes were introduced between 1939 and 1941?" The answer is in the fourth paragraph of the context string. # + id="SEmAbSSGbg0J" colab={"base_uri": "https://localhost:8080/"} outputId="47950c1a-28fe-4540-8598-dce8524b70ff" question = "What popular superheroes were introduced between 1939 and 1941?" result = question_answerer(question=question, context=context) print(result['answer']) # + [markdown] id="LGx_BHkN-ejY" # Here, the answer should be: # "Batman and Robin, Wonder Woman, the Flash, # Green Lantern, Doctor Fate, the Atom, Hawkman, Green Arrow, and Aquaman", instead, the pipeline returned a different answer. You can even try different question wordings: # # * "What superheroes were introduced between 1939 and 1941?" # * "What comic book characters were created between 1939 and 1941?" # * "What well-known characters were created between 1939 and 1941?" # * "What well-known superheroes were introduced between 1939 and 1941 by Detective Comics?" # # and you will only get incorrect answers. # + id="f91kLn9VcRzK" colab={"base_uri": "https://localhost:8080/"} outputId="c53c0620-4a9e-417e-934a-b487e0482d7a" questions = ["What popular superheroes were introduced between 1939 and 1941?", "What superheroes were introduced between 1939 and 1941 by Detective Comics and its sister company?", "What comic book characters were created between 1939 and 1941?", "What well-known characters were created between 1939 and 1941?", "What well-known superheroes were introduced between 1939 and 1941 by Detective Comics?"] results = question_answerer(question=questions, context=context) for q, r in zip(questions, results): print(q, "\n>> " + r['answer']) # + [markdown] id="QCkLhf27cEsH" # It seems like this model is a **huge fan** of <NAME>. It even considers him a superhero! # # The example that fooled your `question_answerer` belongs to the [TyDi QA dataset](https://ai.google.com/research/tydiqa), a dataset from Google for question/answering in diverse languages. To achieve better results when you know that the pipeline isn't working as it should, you need to consider fine-tuning your model. # # In the next ungraded lab, you will get the chance to fine-tune the DistilBert model using the TyDi QA dataset. # # # + id="YEwLHvNQ_Zky"
Week-3/Ungraded-Assignments/C4_W3_1_Question_Answering_with_BERT_and_HuggingFace_Pytorch_tydiqa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Comparing the metrics of different Scikit-Learn models # # One of the most important things when comparing different models is to make sure they're compared on the same data splits. # # For example, let's say you have `model_1` and `model_2` which each differ slightly. # # If you want to compare and evaulate their results, `model_1` and `model_2` should both be trained on the same data (e.g. `X_train` and `y_train`) and their predictions should each be made on the same data, for example: # * `model_1.fit(X_train, y_train)` -> `model_1.predict(X_test)` -> `model_1_preds` # * `model_2.fit(X_train, y_train)` -> `model_2.predict(X_test)` -> `model_2_preds` # # Note the differences here being the two models and the 2 different sets of predictions which can be compared against each other. # # This short notebook compares 3 different models on a small dataset. # 1. A baseline `RandomForestClassifier` (all default parameters) # 2. A `RandomForestClassifier` tuned with `RandomizedSearchCV` (and `refit=True`) # 3. A `RandomForestClassifier` tuned with `GridSearchCV` (and `refit=True`) # # The most important part is they all use the same data splits created using `train_test_split()` and `np.random.seed(42)`. import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import train_test_split # ## Import and split data # # + heart_disease = pd.read_csv("https://raw.githubusercontent.com/mrdbourke/zero-to-mastery-ml/master/data/heart-disease.csv") # Split into X & y X = heart_disease.drop("target", axis =1) y = heart_disease["target"] # Split into train & test np.random.seed(42) # seed for reproducibility X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # - # ## Make evaluation function # # Our evaluation function will use all of the major classification metric functions from Scikit-Learn. def evaluate_preds(y_true, y_preds): """ Performs evaluation comparison on y_true labels vs. y_pred labels on a classification. """ accuracy = accuracy_score(y_true, y_preds) precision = precision_score(y_true, y_preds) recall = recall_score(y_true, y_preds) f1 = f1_score(y_true, y_preds) metric_dict = {"accuracy": round(accuracy, 2), "precision": round(precision, 2), "recall": round(recall, 2), "f1": round(f1, 2)} print(f"Acc: {accuracy * 100:.2f}%") print(f"Precision: {precision:.2f}") print(f"Recall: {recall:.2f}") print(f"F1 score: {f1:.2f}") return metric_dict # ## Baseline model # Create model with default hyperparameters. See [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) documentation for more. # + np.random.seed(42) # Make & fit baseline model clf = RandomForestClassifier() clf.fit(X_train, y_train) # Make baseline predictions y_preds = clf.predict(X_test) # Evaluate the classifier on validation set baseline_metrics = evaluate_preds(y_test, y_preds) # - # ## RandomizedSearchCV # Find hyperparameters with [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html). # # **Note:** Although best parameters are found on different splits of `X_train` and `y_train`, because `refit=True`, once the best parameters are found, they are refit to the entire set of `X_train` and `y_train`. See the [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html) and [cross-validation documentation](https://scikit-learn.org/stable/modules/cross_validation.html) for more. # + from sklearn.model_selection import RandomizedSearchCV # Setup the parameters grid grid = {"n_estimators": [10, 100, 200, 500, 1000, 1200], "max_depth": [None, 5, 10, 20, 30], "max_features": ["auto", "sqrt"], "min_samples_split": [2, 4, 6], "min_samples_leaf": [1, 2, 4]} # Instantiate RandomForestClassifier clf = RandomForestClassifier(n_jobs=1) # Setup RandomizedSearchCV rs_clf = RandomizedSearchCV(estimator=clf, param_distributions=grid, n_iter=10, # number of models to try cv=5, verbose=2, random_state=42, # set random_state to 42 for reproducibility refit=True) # set refit=True (default) to refit the best model on the full dataset # Fit the RandomizedSearchCV version of clf rs_clf.fit(X_train, y_train) # 'rs' is short for RandomizedSearch # - # Check best parameters of RandomizedSearchCV rs_clf.best_params_ # + # Evaluate RandomizedSearch model rs_y_preds = rs_clf.predict(X_test) # Evaluate the classifier on validation set rs_metrics = evaluate_preds(y_test, rs_y_preds) # - # ## GridSearchCV # Find best hyperparameters using [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). # # **Note:** Although best parameters are found on different splits of `X_train` and `y_train`, because `refit=True`, once the best parameters are found, they are refit to the entire set of `X_train` and `y_train`. See the [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) and [cross-validation documentation](https://scikit-learn.org/stable/modules/cross_validation.html) for more. # + from sklearn.model_selection import GridSearchCV # Setup grid-2 (refined version of grid) grid_2 = {'n_estimators': [100, 200, 500], 'max_depth': [None], 'max_features': ['auto', 'sqrt'], 'min_samples_split': [6], 'min_samples_leaf': [1, 2]} # Instantiate RandomForestClassifier clf = RandomForestClassifier(n_jobs=1) # Setup GridSearchCV gs_clf = GridSearchCV(estimator=clf, param_grid=grid_2, cv=5, verbose=2, refit=True) # set refit=True (default) to refit the best model on the full dataset # Fit the GridSearchCV version of clf gs_clf.fit(X_train, y_train) # 'gs' is short for GridSearch # - # Find best parameters of GridSearchCV gs_clf.best_params_ # + # Evaluate GridSearchCV model gs_y_preds = gs_clf.predict(X_test) # Evaluate the classifier on validation set gs_metrics = evaluate_preds(y_test, gs_y_preds) # - # ## Compare metrics # Compare all of the found metrics between the models. # + compare_metrics = pd.DataFrame({"baseline": baseline_metrics, "random search": rs_metrics, "grid search": gs_metrics}) compare_metrics.plot.bar(figsize=(10, 8));
zero-to-mastery-ml-master/section-2-appendix-video-code/scikit-learn-metric-comparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np df = pd.read_csv('phone_data.csv') df.head df.shape print(df['item']) print(df.item.head()) df[['item','duration','month']] df.iloc[:8,:4] df.loc[8:25] # #BOOLEAN INDEXING # and & # # or | # # Equal to == # # Not equal to != # # Not in ~ # # Equals: == # # Not equals: != # # Greater than, less than: > or < # # Greater than or equal to >= # # Less than or equal to <= condition = (df.item == 'call') condition condition = (df.item == 'call') df[condition] condition = (df.item == 'call') & (df.network == 'Vodafone') df[condition] # NOW IF YOU WANT TO SEE THE ITEMS WHICH ARE NOT INCLUDED (EXCLUDED ONES) condition = (df.item == 'call') df[~condition] condition = (df.item == 'call') | (df.network == 'Vodafone') df[condition] # NOW IF YOU WANT TO ADD A COLUMN WHICH WILL BE A MULTIPLE OF A PARTICULAR COLUMN df['new_column'] = df['duration']*3 df df['new_column'] = df['network']*3 df df df.drop(['new_column'], axis = 1, inplace=True) df # + # lambda function - Row wise operation def new_duration(network, duration): if network=='world': new_duration = duration * 2 else: new_duration = duration * 4 return new_duration df['new_duration'] = df.apply(lambda x: new_duration(x['network'], x['duration']), axis=1) # - df df.sort_values('duration') #ascending df.sort_values('duration', ascending = False ) #descending df.sort_values(['duration', 'network'], ascending=[False, True]) #ascending # #WORKING WITH DATES # # Directive # # # %a Weekday as localeโ€™s abbreviated name. Sun, Mon, โ€ฆ, Sat (en_US) So, Mo, โ€ฆ, Sa (de_DE) # # # %A Weekday as localeโ€™s full name. Sunday, Monday, โ€ฆ, Saturday (en_US) Sonntag, Montag, โ€ฆ, Samstag (de_DE) # # # %w Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. 0, 1, 2, 3, 4, 5, 6 # # # %d Day of the month as a zero-padded decimal number. 01, 02, โ€ฆ, 31 pd.to_datetime(df['date']) >= '2015-01-01' sum(pd.to_datetime(df['date']) >= '2015-01-01') from datetime import datetime from datetime import date df['new_date']=[datetime.strptime(x,'%d/%m/%y %H:%M') for x in df['date']] type(df['new_date'][0]) type(df['date'][0]) print(date.today()) print(datetime.now()) df[df.month=='2014-11']['duration'].sum() df.groupby('month')['duration'].sum() df.groupby('month')['date'].count() df[df['item']=='call'].groupby('network')['duration'].sum() df.groupby(['network','item'])['duration'].sum() df.groupby(['network','item'])['duration'].sum().reset_index() # + import turtle turtle.color("red","yellow") turtle.begin_fill() turtle.speed(10) for i in range(50): turtle.forward(300) turtle.left(170) turtle.end_fill() turtle.done() # + from turtle import * from random import randint speed(50) bgcolor('black') x = 1 while x < 500: r = randint(0,255) g = randint(0,255) b = randint(0,255) colormode(255) pencolor(r,g,b) fd(50 + x) rt(90.911) x = x+1 exitonclick() # - import pandas as pd sal = pd.read_csv('Salaries.csv') sal sal.head(1) sal.tail() sal.info() #for information sal.describe() sal[sal['OtherPay']==134426.14][['JobTitle','EmployeeName']]
Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/IanReyes2/OOP-58003/blob/main/LAB_ACT_3_OOP_58003.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="sw_Foe-1ra8r" # Create a class # + id="QGa8xTHjreRZ" class Car: pass # + colab={"base_uri": "https://localhost:8080/"} id="3VY_IFbfu0T0" outputId="bd7f47c5-0913-4b0f-ac44-c62c479d18fb" class Car: def __init__(self,brand,mileage): self.brand = brand self.mileage = mileage def description(self): return self.brand, self.mileage def display(self): print("The brand" + self.brand + "has a mileage of" + self.mileage, self.description) car1 = Car("Honda City", 24.1) print(car1.description()) # + [markdown] id="kP1JQrixyODh" # Modify an Object Property # + colab={"base_uri": "https://localhost:8080/"} id="Z8xaJBn5yRH4" outputId="635e30ab-3f58-4fb1-f980-40aa46ea8de4" car1.brand="Toyota" print(car1.brand) # + colab={"base_uri": "https://localhost:8080/"} id="__sIAIliyZrY" outputId="a1f8da14-8721-4cd8-88f5-4020a80b677e" print(car1.description()) # + [markdown] id="3YiqP4Wryn-C" # Delete an Object property # + colab={"base_uri": "https://localhost:8080/", "height": 183} id="8KjdfFFGyq94" outputId="66f67fa9-f908-4717-f415-093e7271b0b8" del car1.mileage print(car1.mileage) # + colab={"base_uri": "https://localhost:8080/", "height": 287} id="eiQRqAg1y7rS" outputId="40d2d7cc-89a1-4014-a75b-f3095ff8162a" print(car1.description()) # + colab={"base_uri": "https://localhost:8080/"} id="Ym3O2gydzBHb" outputId="073e9ca9-a47d-420c-eaa4-f8bff31c5d44" #print(car1.mileage) print(car1.brand) # + [markdown] id="WdRMPdwWzPyx" # Appliction 1 - Write a Python program that computes for the area and the perimeter of a rectangle. Name Rectangle as class name, and length and width as attribute names. # + colab={"base_uri": "https://localhost:8080/"} id="kvwqjn0vzhvR" outputId="286f210e-b83e-46d5-b4f5-ba90fbc17dc5" class Rectangle: def __init__(self,length,width): self.length = length self.width = width def Area(self): return self.length*self.width def Perimeter(self): return 2*(self.length+self.width) def display(self): print("The Area of the Rectangle is",self.Area()) print("The perimeter of the rectangle is",self.Perimeter()) rectangle = Rectangle(7,4.5) print(rectangle.display()) # + [markdown] id="jn4T4xm702GT" # Application 2 - Write a Python program that displays your fullname (Surname, First Name), and name your class as OOP_58003 # + colab={"base_uri": "https://localhost:8080/"} id="9lqNvn7W1IvA" outputId="9b6ac72f-a450-4456-f3d3-5d907e36fb06" class OOP_58003: def __init__(self,Surname,FirstName): self.Surname = Surname self.FirstName = FirstName def description(self): return self.Surname, self.FirstName def display(self): print(self.Surname + "," + self.FirstName) name1 = OOP_58003("Reyes" , "<NAME>.") print(name1.display())
LAB_ACT_3_OOP_58003.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np array1 = np.ones(4);array1 array2 = np.arange(15);array2 array_conc = np.concatenate((array1, array2));array_conc # + #join de arrays a=np.ones((3,3)) b=np.zeros((3,3)) # - print(a);print(b) np.vstack((a,b)) np.hstack((a,b)) a=np.array([0,1,2]) b=np.array([3,4,5]) c=np.array([6,7,8]) np.column_stack((a,b,c)) np.row_stack((a,b,c)) array3 = np.arange(16).reshape((4,4));array3 [array3_pl,array3_p2]= np.hsplit(array3,2) array3_pl array3_p2 [array3_pl,array3_p2]= np.vsplit(array3,2) array3_pl array3_p2 #Matrizes Numpy mat1 = np.matrix("1,2,3;4,5,6");mat1 mat2 = np.matrix([[1,2,3],[4,5,6]]);mat2 a= np.array([[1,2],[3,4]]);a a*a A =np.mat(a) A*A #multiplicando um array como se fosse uma mรบltiplicaรงรฃo de matrizes np.dot(a,a) #convertendo uma matriz em um array mat5 = np.asmatrix(a);mat5 mat5*mat5 #convertendo uma matriz para array array2 = np.asarray(mat5);array2 array2*array2 array1 = np.random.randint(0,50,20);array1 def calc_func(num): if num <10: return num **3 else: return num **2 #nรฃo รฉ possรญvel aplicar diretamente uma funรงรฃo a um array numpy para isso รฉ necessรกrio vetorizar a funรงรฃo calc_func(array1) v_calc_func = np.vectorize(calc_func) type (v_calc_func) v_calc_func(array1) #com a funรงรฃo mpa nรฃo รฉ necessรกrio vetorizar list(map(calc_func, array1)) #da mesma forma com list comprehension [calc_func(x) for x in array1] import pandas as pd serie1 = pd.Series(np.arange(26));serie1 import string lcase = string.ascii_lowercase ucase = string.ascii_uppercase print(lcase,ucase) #convertendo em lista lcase = list(lcase) ucase = list(ucase) print(lcase,ucase) serie1.index = lcase;serie1 #slice pelo รญndice serie1['f':'r'] import matplotlib.pyplot as plt # %matplotlib inline np.random.seed(784) array = np.random.randint(1,30,40) array1 dados = pd.Series(array1);dados #Usando o numpy com valores NaN array1 = np.array([1,2,3,np.nan]);array1 array1.mean() #Usando o pandas com valores NaN serie1 = pd.Series([1,2,3,np.nan]);serie1 serie1.mean() import pandas as pd #Dados originais dados1 = {'disciplina_id': ['1','2','3','4','5'], 'nome': ['Bernardo','Alan', 'Mateus','Ivo','Gerson'], 'sobrenome':['Anderson', 'Teixeira','Amoedo','Trindade','Vargas']} #criaรงรฃo do dataframe df_a = pd.DataFrame(dados2,columns = ['disciplina_id','nome','sobrenome']) df_a #Dados originais dados2 = {'disciplina_id': ['4','5','6','7','8'], 'nome': ['Roberto','Mariana', 'Ana','Marcos','Maria'], 'sobrenome':['Sampaio', 'Fernandes','Arantes','Menezes','Martins']} #criaรงรฃo do dataframe df_b = pd.DataFrame(dados2,columns = ['disciplina_id','nome','sobrenome']) df_b # + #Dados originais dados3 = {'disciplina_id': ['1','2','3','4','5','6','7','8','9','10','11'], 'teste_id': [81,75,75,71,76,84,95,61,57,90]} #criaรงรฃo do dataframe df_n = pd.DataFrame(dados3,columns = ['disciplina_id','teste_id']) df_n
Estudos/DSA/Meu resumo cap03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import tensorflow_addons as tfa from tensorflow.keras.preprocessing.text import Tokenizer import numpy as np import os import sys import pickle as pkl import re weight_path_encoder = "../chatbot/model_v1/model_2_v3/encoder/weights.ckpt" weight_path_decoder = "../chatbot/model_v1/model_2_v3/encoder/weights.ckpt" tokenizer_path = "../chatbot/model_v1/model_2_v3/tokenizer.pkl" encoder_embedding_layer_path = "../chatbot/model_v1/model_2_v3/encoder_embedding_layer.pkl" decoder_embedding_layer_path = "../chatbot/model_v1/model_2_v3/decoder_embedding_layer.pkl" vocab_size = 30000 + 1 units = 1024 embedding_dim = 100 BATCH_SIZE=64 # + with open(tokenizer_path, "rb") as handle: tokenizer = pkl.load(handle) with open(encoder_embedding_layer_path, "rb") as handle: encoder_embedding_variables = pkl.load(handle) with open(decoder_embedding_layer_path, "rb") as handle: decoder_embedding_variables = pkl.load(handle) # - class EncoderAttention(tf.keras.Model): def __init__(self, vocab_size, embedding_dims, hidden_units): super().__init__() self.hidden_units = hidden_units self.embedding_layer = tf.keras.layers.Embedding(vocab_size, embedding_dims, tf.keras.initializers.Constant(encoder_embedding_variables), trainable=True) self.lstm_layer = tf.keras.layers.LSTM(hidden_units, return_sequences=True, return_state=True ) # We need the lstm outputs # to calculate attention! def initialize_hidden_state(self): return [tf.zeros((BATCH_SIZE, self.hidden_units)), tf.zeros((BATCH_SIZE, self.hidden_units))] def call(self, inputs, hidden_state): embedding = self.embedding_layer(inputs) output, h_state, c_state = self.lstm_layer(embedding, initial_state = hidden_state) return output, h_state, c_state # + class DecoderAttention(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, hidden_units): super().__init__() self.embedding_layer = self.embedding_layer = tf.keras.layers.Embedding(vocab_size, embedding_dim, tf.keras.initializers.Constant(decoder_embedding_variables), trainable=True) self.lstm_cell = tf.keras.layers.LSTMCell(hidden_units) self.sampler = tfa.seq2seq.sampler.TrainingSampler() self.attention_mechanism = tfa.seq2seq.LuongAttention(hidden_units, memory_sequence_length=BATCH_SIZE*[15]) #N self.attention_cell = tfa.seq2seq.AttentionWrapper(cell=self.lstm_cell, # N attention_mechanism=self.attention_mechanism, attention_layer_size=hidden_units) self.output_layer = tf.keras.layers.Dense(vocab_size) self.decoder = tfa.seq2seq.BasicDecoder(self.attention_cell, # N sampler=self.sampler, output_layer=self.output_layer) def build_initial_state(self, batch_size, encoder_state): #N decoder_initial_state = self.attention_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32) decoder_initial_state = decoder_initial_state.clone(cell_state=encoder_state) return decoder_initial_state def call(self, inputs, initial_state): embedding = self.embedding_layer(inputs) outputs, _, _ = self.decoder(embedding, initial_state=initial_state, sequence_length=BATCH_SIZE*[15-1]) return outputs # + example_x, example_y = tf.random.uniform((BATCH_SIZE, 15)), tf.random.uniform((BATCH_SIZE, 15)) ##ENCODER encoder = EncoderAttention(vocab_size, embedding_dim, units) # Test the encoder sample_initial_state = encoder.initialize_hidden_state() sample_output, sample_h, sample_c = encoder(example_x, sample_initial_state) print(sample_output.shape) print(sample_h.shape) ##DECODER decoder = DecoderAttention(vocab_size, embedding_dim, units) decoder.attention_mechanism.setup_memory(sample_output) # Attention needs the last output of the Encoder as starting point initial_state = decoder.build_initial_state(BATCH_SIZE, [sample_h, sample_c]) # N sample_decoder_output = decoder(example_y, initial_state) encoder.load_weights(weight_path_encoder) decoder.load_weights(weight_path_decoder) print("All set") # - def decontracted(phrase): # specific phrase = re.sub(r"won\'t", "will not", phrase) phrase = re.sub(r"can\'t", "can not", phrase) # general phrase = re.sub(r"n\'t", " not", phrase) phrase = re.sub(r"\'re", " are", phrase) phrase = re.sub(r"\'s", " is", phrase) phrase = re.sub(r"\'d", " would", phrase) phrase = re.sub(r"\'ll", " will", phrase) phrase = re.sub(r"\'t", " not", phrase) phrase = re.sub(r"\'ve", " have", phrase) phrase = re.sub(r"\'m", " am", phrase) return phrase import unicodedata def preprocess_sentence(w): w = w.lower().strip() # This next line is confusing! # We normalize unicode data, umlauts will be converted to normal letters #w = w.replace("รŸ", "ss") #w = ''.join(c for c in unicodedata.normalize('NFD', w) if unicodedata.category(c) != 'Mn') # creating a space between a word and the punctuation following it # eg: "he is a boy." => "he is a boy ." # Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation w = re.sub(r"\[\w+\]",'', w) w = " ".join(re.findall(r"\w+",w)) w = re.sub(r"([?.!,ยฟ])", r" \1 ", w) w = re.sub(r'[" "]+', " ", w) # replacing everything with space except (a-z, A-Z, ".", "?", "!", ",") w = re.sub(r"[^a-zA-Z?.!]+", " ", w) w = w.strip() w = decontracted(w) # adding a start and an end token to the sentence # so that the model know when to start and stop predicting. w = '<start> ' + w + ' <end>' return w # + #def reply(sentence, preprocess=True): preprocess = True sentence = "Hi there whats up" if preprocess: sentence = preprocess_sentence(sentence) sentence_tokens = tokenizer.texts_to_sequences([sentence]) input = tf.keras.preprocessing.sequence.pad_sequences(sentence_tokens, maxlen=15, padding='post') else: input = sentence input = tf.convert_to_tensor(input) print("After if") encoder_hidden = [tf.zeros((1, units)), tf.zeros((1, units))] encoder_output, encoder_h, encoder_c = encoder(input, encoder_hidden) start_token = tf.convert_to_tensor([tokenizer.word_index['<start>']]) end_token = tokenizer.word_index['<end>'] print("After first block") # This time we use the greedy sampler because we want the word with the highest probability! # We are not generating new text, where a probability sampling would be better greedy_sampler = tfa.seq2seq.GreedyEmbeddingSampler(decoder.embedding_layer) print("Greedy sampler set") # Instantiate a BasicDecoder object decoder_instance = tfa.seq2seq.BasicDecoder(cell=decoder.attention_cell, # N sampler=greedy_sampler, output_layer=decoder.output_layer) print("Decoder sampler set") # Setup Memory in decoder stack decoder.attention_mechanism.setup_memory(encoder_output) # N print("Attention mechanism up!") # set decoder_initial_state decoder_initial_state = decoder.build_initial_state(batch_size=1, encoder_state=[encoder_h, encoder_c]) # N print("Initial state ready") ### Since the BasicDecoder wraps around Decoder's rnn cell only, you have to ensure that the inputs to BasicDecoder ### decoding step is output of embedding layer. tfa.seq2seq.GreedyEmbeddingSampler() takes care of this. ### You only need to get the weights of embedding layer, which can be done by decoder.embedding.variables[0] and pass this callabble to BasicDecoder's call() function decoder_embedding_matrix = decoder.embedding_layer.variables[0] print("Got embedding layer") # - decoder_initial_state # + outputs, _, _ = decoder_instance(decoder_embedding_matrix, start_tokens = start_token, end_token= end_token, initial_state=decoder_initial_state) print("Done") result_sequence = outputs.sample_id.numpy() #return tokenizer.sequences_to_texts(result_sequence)[0] print(return_sentence) # + import tensorflow as tf import tensorflow_addons as tfa batch_size = 4 hidden_size = 32 vocab_size = 64 start_token_id = 1 end_token_id = 2 embedding_layer = tf.keras.layers.Embedding(vocab_size, hidden_size) decoder_cell = tf.keras.layers.LSTMCell(hidden_size) output_layer = tf.keras.layers.Dense(vocab_size) sampler = tfa.seq2seq.GreedyEmbeddingSampler(embedding_layer) decoder = tfa.seq2seq.BasicDecoder( decoder_cell, sampler, output_layer, maximum_iterations=10 ) start_tokens = tf.fill([batch_size], start_token_id) initial_state = decoder_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32) final_output, final_state, final_lengths = decoder( None, start_tokens=start_tokens, end_token=end_token_id, initial_state=initial_state ) print(final_output.sample_id) # -
notebooks/test_inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np def sigmoid(x): return 1/(1+np.exp(-x)) def relu(x): return max(0,x) def leaky_relu(x): return max(0.2*x,x) def tanh(x): return (exp(2*x)-1)/(exp(2*x)+1) def d_sigmoid(x): return (x*(1-x)) def d_tanh(x): return x*x-1 x=np.array([[0,1,1],[0,1,1],[1,0,1],[1,1,1]]) y=np.array([[0],[1],[1],[0]]) alphas=[0.001,0.01,0.1,1,10,100,1000] # + for alpha in alphas: print("\nTraining with Alpha=",str(alpha)) synapse_0=np.random.random((3,4)) # 3 corresponds to inputs and 4 to hidden layers synapse_1=np.random.random((4,1)) #4 corresponds to hidden layers and 1 to output for j in range(60000): #60000 epochs ie. Iterations layer_0=x layer_1=sigmoid(np.dot(layer_0,synapse_0)) #dot product layer_2=sigmoid(np.dot(layer_1,synapse_1)) layer_2_error= layer_2-y if j%10000==0: print("Error after "+str(j)+" iteration "+str(np.mean(np.abs(layer_2_error)))) layer_2_delta=layer_2_error*d_tanh(layer_2) layer_1_error=layer_2_delta.dot(synapse_1.T) # T= Transpose layer_1_delta=layer_1_error*d_tanh(layer_1) synapse_1-=alpha*(layer_1.T.dot(layer_2_delta)) synapse_0-= alpha*(layer_0.T.dot(layer_1_delta)) # -
Neural Network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Beam Bending Analysis Code :D # ## Geometry dimensions, Two setups # #### 3-point beam bending L3 = 0.7 #3-point length b3 = 0.03835 #3-point width h3 = 0.02574 #3-point height # #### 4-point beam bending L4 = 0.7 b4 = 0.03835 h4 = 0.02574 a = 0.2 b = 0.5 c = 0.15 # #### Material Properties E = 69000e6 # Elastic Modulus v = 0.33 # Poisson's Ratio Y = 275e6 # Yield Strength # ## Calculate the second moment of inertia I #This is a function def areaMomentOfInertia(b,h): I = b*h**3/12 return I # ### Max Deflection # + # 3-point max deflection def threePointDeflection(P,E,I,L): w = P*L**3 /(48*E*I) return w # 4-point max deflection def fourPointDeflection(P,E,I,L,a): w = P*a*(3*L**2-4*a**2)/(48*E*I) return w #Calculate deflection for 3-point beam load = [400 , 800, 1200, 1600] # input a list of the loads in Newtons i.e. [L0, L1, L2,...], #Notice that the values in excel file is the reaction force, make sure the load is 2 times of the reaction force I3 = areaMomentOfInertia(b3,h3) #calculate the 2nd moment of area for 3-point beam deflection3 = [threePointDeflection(P,E,I3,L3)*1000 for P in load] print(deflection3) #Calculate deflection for 4-point beam I4 = areaMomentOfInertia(b4,h4) deflection4 = [fourPointDeflection(P,E,I4,L4,a)*1000 for P in load] print(deflection4) # + import matplotlib.pyplot as plt # Deflection data from measurement D3 = [0.90,1.73,2.63,3.4] #input a list of deflection measurements Data from excel file D4 = [0.69,1.37,2.05,2.73] #Plot all the things fig = plt.figure(1,figsize=(10,6),dpi = 64) #initiate the figure ax = fig.gca() #get the axes ax.plot(load,deflection3,'.-',label = 'Theory') #plot the theoretical deflection ax.plot(load,D3,'*-',label = 'Measurement') #plot the experimental deflection #note: if you want to use error bars then run the following #ax.errorbar(load,deflection3,yerr=[listOfYErrors],label = 'Theory') ax.legend() ax.set_xlabel('Load(N)') ax.set_ylabel('Deflection(mm)') plt.title('3-Point beam bending') fig = plt.figure(2,figsize=(10,6),dpi = 64) ax = fig.gca() ax.plot(load,deflection4,'.-', label = 'Theory') ax.plot(load,D4,'*-', label = 'Measurement') ax.legend() ax.set_xlabel('Load(N)') ax.set_ylabel('Deflection(mm)') plt.title('4-Point beam bending') # - # ### Here are the Functions Shear and Moment in Bending # #### For each strain gauge rosette, we have two dimension values x and z # + #Shear force for 3-point bending def shearForce3(P,L,x): if x<= L/2: Q = P/2 else: Q = -P/2 return Q # Moment M for 3-points def bendingMoment3(P,L,x): if x<= L/2: M = P*x/2 else: M = P*(L-x)/2 return M ##### Write your own function for 4-point bending shear force and moment here ##### # Axial stress by moment def axialStress(M,I,z): sigma = M*z/I return sigma # Shear stress by Shear force, since we only care about the top and bottom surface and neutral axix of the beam def shearStress(Q,b,h,z): if z == 0: tau = 3*Q/(2*b*h) else: tau = 0 return tau # - # ### Rotation Function for Strain import numpy as np def strain1Rotate(epsilon,th): '''Rotate a strain epsilon =[e_x,e_y,gamma_xy] by an angle th given in radians.''' T = [cos(th)**2, sin(th)**2, sin(th)*cos(th)] return np.dot(T,epsilon) # ## Calculate stresses at each strain gauge rosette # ### Take the first strain rosette as example rosettePosition = [0.35,-h3/2] #[x_coordinate, z_coordinate] this is rosette 1, note that z value equals to zero at neutral axis P = 400 #load in newtons # First trail of loading Q = shearForce3(P,L3,rosettePosition[0]) M = bendingMoment3(P,L3,rosettePosition[0]) sig = axialStress(M,I3,rosettePosition[1]) tau = shearStress(Q,b3,h3,rosettePosition[1]) # + from numpy import sin,cos,pi # Already have stresses values at each strain rosette, now we can compute the strains using Hooke's law C = np.array([[1/E, -v/E,0],[-v/E,1/E,0],[0,0,2*(1+v)/E]]) #stiffness tensor # Let's take strain rosette 1 as example. We need both axial strains and the shear strain epsilon = np.dot(C,np.array([sig,0,tau])) # No stress on y direction # Now transform from coordinate strains to the direction of strain gauge ros1 = [0,45,90] #all three gauge directions e1a = strain1Rotate(epsilon,ros1[0]*pi/180) e1b = strain1Rotate(epsilon,ros1[1]*pi/180) e1c = strain1Rotate(epsilon,ros1[2]*pi/180) epsilon_rosette1 = np.array([e1a,e1b,e1c]) #This can also be done using list comprehensions as epsilon_rosette1Alt = np.array([strain1Rotate(epsilon,th*pi/180) for th in ros1]) # Since the unit of measurement is micron strain, the results here need to be multiplied by 1e6 epsilon_rosette1 *= 1e6 #this multiplies the original value by 1e6 epsilon_rosette1Alt *= 1e6 print(epsilon_rosette1) print(epsilon_rosette1Alt) # -
Labs/Lab 1 Beam Bending/Beam Bending Lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''d2l'': conda)' # language: python # name: python3 # --- # + import torch from torch import nn from d2l import torch as d2l def corr2d(X, K): #@save """่ฎก็ฎ—ไบŒ็ปดไบ’็›ธๅ…ณ่ฟ็ฎ—""" h, w = K.shape Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y # - X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Module): def __init__(self, kernel_size): super().__init__() self.weight = nn.Parameter(torch.rand(kernel_size)) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): return corr2d(x, self.weight) + self.bias X = torch.ones((6, 8)) X[:, 2:6] = 0 X K = torch.tensor([[1.0, -1.0]]) Y = corr2d(X, K) Y corr2d(X.t(), K) # + # ๆž„้€ ไธ€ไธชไบŒ็ปดๅท็งฏๅฑ‚๏ผŒๅฎƒๅ…ทๆœ‰1ไธช่พ“ๅ‡บ้€š้“ๅ’Œๅฝข็Šถไธบ๏ผˆ1๏ผŒ2๏ผ‰็š„ๅท็งฏๆ ธ conv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False) # ่ฟ™ไธชไบŒ็ปดๅท็งฏๅฑ‚ไฝฟ็”จๅ››็ปด่พ“ๅ…ฅๅ’Œ่พ“ๅ‡บๆ ผๅผ๏ผˆๆ‰น้‡ๅคงๅฐใ€้€š้“ใ€้ซ˜ๅบฆใ€ๅฎฝๅบฆ๏ผ‰๏ผŒ # ๅ…ถไธญๆ‰น้‡ๅคงๅฐๅ’Œ้€š้“ๆ•ฐ้ƒฝไธบ1 X = X.reshape((1, 1, 6, 8)) Y = Y.reshape((1, 1, 6, 7)) lr = 3e-2 # ๅญฆไน ็އ for i in range(10): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 conv2d.zero_grad() l.sum().backward() # ่ฟญไปฃๅท็งฏๆ ธ conv2d.weight.data[:] -= lr * conv2d.weight.grad if (i + 1) % 2 == 0: print(f'epoch {i+1}, loss {l.sum():.3f}') # - conv2d.weight.data.reshape((1, 2)) # ๅกซๅ……ๅ’Œๆญฅๅน… # + import torch from torch import nn # ไธบไบ†ๆ–นไพฟ่ตท่ง๏ผŒๆˆ‘ไปฌๅฎšไน‰ไบ†ไธ€ไธช่ฎก็ฎ—ๅท็งฏๅฑ‚็š„ๅ‡ฝๆ•ฐใ€‚ # ๆญคๅ‡ฝๆ•ฐๅˆๅง‹ๅŒ–ๅท็งฏๅฑ‚ๆƒ้‡๏ผŒๅนถๅฏน่พ“ๅ…ฅๅ’Œ่พ“ๅ‡บๆ้ซ˜ๅ’Œ็ผฉๅ‡็›ธๅบ”็š„็ปดๆ•ฐ def comp_conv2d(conv2d, X): # ่ฟ™้‡Œ็š„๏ผˆ1๏ผŒ1๏ผ‰่กจ็คบๆ‰น้‡ๅคงๅฐๅ’Œ้€š้“ๆ•ฐ้ƒฝๆ˜ฏ1 X = X.reshape((1, 1) + X.shape) Y = conv2d(X) # ็œ็•ฅๅ‰ไธคไธช็ปดๅบฆ๏ผšๆ‰น้‡ๅคงๅฐๅ’Œ้€š้“ return Y.reshape(Y.shape[2:]) # ่ฏทๆณจๆ„๏ผŒ่ฟ™้‡Œๆฏ่พน้ƒฝๅกซๅ……ไบ†1่กŒๆˆ–1ๅˆ—๏ผŒๅ› ๆญคๆ€ปๅ…ฑๆทปๅŠ ไบ†2่กŒๆˆ–2ๅˆ— conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1) X = torch.rand(size=(8, 8)) comp_conv2d(conv2d, X).shape # - conv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape X.shape conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4)) comp_conv2d(conv2d, X).shape
6-2&3-conv-for-images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CARTO Data Observatory # This is a basic template notebook to start exploring your new Dataset from CARTO's Data Observatory via the Python library [CARTOframes](https://carto.com/cartoframes). # # You can find more details about how to use CARTOframes in the [Quickstart guide](https://carto.com/developers/cartoframes/guides/Quickstart/). # # Setup # ## Installation # # Make sure that you have the latest version installed. Please, find more information in the [Installation guide](https://carto.com/developers/cartoframes/guides/Installation/). # !pip install -U cartoframes # + # Note: a kernel restart is required after installing the library import cartoframes cartoframes.__version__ # - # ## Credentials # In order to be able to use the Data Observatory via CARTOframes, you need to set your CARTO account credentials first. # + from cartoframes.auth import set_default_credentials username = 'YOUR_USERNAME' api_key = 'YOUR_API_KEY' # Master API key. Do not make this file public! set_default_credentials(username, api_key) # - # **โš ๏ธ Note about credentials** # # For security reasons, we recommend storing your credentials in an external file preventing publishing them by accident within your notebook. You can get more information in the section *Setting your credentials* of the [Authentication guide](https://carto.com/developers/cartoframes/guides/Authentication/). # # Dataset operations # ## Metadata exploration # # In this section we will explore some basic information regarding the Dataset you have licensed. More information on how to explore metadata associated to a Dataset is available in the [Data discovery guide](https://carto.com/developers/cartoframes/guides/Data-discovery/). # # In order to access the Dataset and its associeted metadata, you need to provide the "ID" which is a unique identifier of that Dataset. The IDs of your Datasets are available from Your Subscriptions page in the CARTO Dashboard and via the Discovery methods in CARTOFrames. # + from cartoframes.data.observatory import Dataset dataset = Dataset.get('YOUR_ID') # - # Retrieve some general metadata about the Dataset dataset.to_dict() # Explore the first 10 rows of the Dataset dataset.head() # Explore the last 10 rows of the Dataset dataset.tail() # Get the geographical coverage of the data dataset.geom_coverage() # Access the list of variables in the dataset dataset.variables.to_dataframe() # Summary of some variable stats dataset.describe() # ## Access the data # # Now that we have explored some basic information about the Dataset, we will proceed to download a sample of the Dataset into a dataframe so we can operate it in Python. # # Datasets can be downloaded in full or by applying a filter with a SQL query. More info on how to download the Dataset or portions of it is available in the [Data discovery guide](https://carto.com/developers/cartoframes/guides/Data-discovery/). # + # Filter by SQL query query = "SELECT * FROM $dataset$ LIMIT 50" dataset_df = dataset.to_dataframe(sql_query=query) # - # **Note about SQL filters** # # Our SQL filtering queries allow for any PostgreSQL and PostGIS operation, so you can filter the rows (by a WHERE condition) or the columns (using the SELECT). Some common examples are filtering the Dataset by bounding box or filtering by column value: # # ``` # SELECT * FROM $dataset$ WHERE ST_IntersectsBox(geom, -74.044467,40.706128,-73.891345,40.837690) # ``` # # ``` # SELECT total_pop, geom FROM $dataset$ # ``` # # A good tool to get the bounding box details for a specific area is [bboxfinder.com](http://bboxfinder.com/#0.000000,0.000000,0.000000,0.000000). # First 10 rows of the Dataset sample dataset_df.head() # ## Visualization # # Now that we have downloaded some data into a dataframe we can leverage the visualization capabilities of CARTOframes to build an interactive map. # # More info about building visualizations with CARTOframes is available in the [Visualization guide](https://carto.com/developers/cartoframes/guides/Visualization/). # + from cartoframes.viz import Layer Layer(dataset_df, geom_col='geom') # - # **Note about variables** # # CARTOframes allows you to make data-driven visualizations from your Dataset variables (columns) via the Style helpers. These functions provide out-of-the-box cartographic styles, legends, popups and widgets. # # Style helpers are also highly customizable to reach your desired visualization setting simple parameters. The helpers collection contains functions to visualize by color and size, and also by type: category, bins and continuous, depending on the type of the variable. # + from cartoframes.viz import color_bins_style Layer(dataset_df, color_bins_style('YOUR_VARIABLE_ID'), geom_col='geom') # - # ## Upload to CARTO account # # In order to operate with the data in CARTO Builder or to build a CARTOFrames visualization reading the data from a table in the Cloud instead of having it in your local environment (with its benefits in performance), you can load the dataframe as a table in your CARTO account. # # More info in the [Data Management guide](https://carto.com/developers/cartoframes/guides/Data-management/). # + from cartoframes import to_carto to_carto(dataset_df, 'my_dataset', geom_col='geom') # - # Build a visualization reading the data from your CARTO account Layer('my_dataset') # ## Enrichment # # Enrichment is the process of adding variables to a geometry, which we call the target (point, line, polygonโ€ฆ) from a spatial Dataset, which we call the source. CARTOFrames has a set of methods for you to augment your data with new variables from a Dataset in the Data Observatory. # # In this example, you will need to load a dataframe with the geometries that you want to enrich with a variable or a group of variables from the Dataset. You can detail the variables to get from the Dataset by passing the variable's ID. You can get the variables IDs with the metadata methods. # # More info in the [Data enrichment guide](https://carto.com/developers/cartoframes/guides/Data-enrichment/). # + from cartoframes.data.observatory import Enrichment enriched_df = Enrichment().enrich_polygons( df, # Insert here the DataFrame to be enriched variables=['YOUR_VARIABLE_ID'] ) # - # ## Save to file # # Finally, you can also export the data into a CSV file. More info in the [Data discovery guide](https://carto.com/developers/cartoframes/guides/Data-discovery/). # + # Filter by SQL query query = "SELECT * FROM $dataset$ LIMIT 50" dataset.to_csv('my_dataset.csv', sql_query=query)
docs/examples/data_observatory/do_dataset_notebook_template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # -*- coding: UTF-8 -*- """PyRamen Homework Starter.""" # @TODO: Import libraries import csv from pathlib import Path # @TODO: Set file paths for menu_data.csv and sales_data.csv menu_filepath = Path('./Resources/menu_data.csv') sales_filepath = Path('./Resources/sales_data.csv') # @TODO: Initialize list objects to hold our menu and sales data menu = [] sales = [] # @TODO: Read in the menu data into the menu list with open(menu_filepath, 'r') as csv_file: csv_reader_menu = csv.reader(csv_file) #menu = list(csv_reader) next(csv_reader_menu) for row in csv_reader_menu: item = row[0] category = row[1] description = row[2] price = float(row[3]) cost = int(row[4]) if category == "entree": menu_item_list = [item, category, description, price, cost] menu.append(menu_item_list) #print(menu) # @TODO: Read in the sales data into the sales list with open(sales_filepath, 'r') as csv_file: csv_reader_sales = csv.reader(csv_file) #sales = list(csv_reader) next(csv_reader_sales) for row in csv_reader_sales: item_id = int(row[0]) sale_date = row[1] credit_card_number = row[2] sale_quantity = int(row[3]) sale_menu_item = row[4] sales_row_list = [item_id, sale_date, credit_card_number, sale_quantity, sale_menu_item] #print(sales_row_list) sales.append(sales_row_list) # @TODO: Initialize dict object to hold our key-value pairs of items and metrics report = {} # Initialize a row counter variable row_count = 0 # @TODO: Loop over every row in the sales list object for sale_line in sales: quantity = sale_line[3] sales_menu_item = sale_line[4] #print(f"sale line quantity: {quantity} and menu item: {menu_item}") for menu_item in menu: item = menu_item[0] price = menu_item[3] cost = menu_item[4] if sales_menu_item == item: if item not in report.keys(): item_sales_dict = {"01-count": quantity,"02-revenue": price * quantity,"03-cogs": cost * quantity,"04-profit": price - cost} report[item] = item_sales_dict else: report[item]["01-count"] += quantity report[item]["02-revenue"] += price * quantity report[item]["03-cogs"] += cost * quantity report[item]["04-profit"] += (price * quantity) - (cost * quantity) output_path = 'output.txt' with open(output_path, 'w') as file: file.write(f"Menu item analysis.\n") for k,v in report.items(): print(f"{k} {v}") file.write(f"{k} {v}\n") # -
PyRamen/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/anik199/House_Prices_Advanced_Regression/blob/main/House_Prices_ART.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="RI1o3oRl50zH" # ## Step:1 # + id="g8AZBXck4m8v" import pandas as pd import numpy as np from sklearn.model_selection import train_test_split # Read the data X_full = pd.read_csv('/content/train.csv', index_col='Id') X_test_full = pd.read_csv('/content/test.csv', index_col='Id') # Remove rows with missing target, separate target from predictors X_full.dropna(axis=0, subset=['SalePrice'], inplace=True) y = X_full.SalePrice X_full.drop(['SalePrice'], axis=1, inplace=True) # Break off validation set from training data X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0) # "Cardinality" means the number of unique values in a column # Select categorical columns with relatively low cardinality (convenient but arbitrary) categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 20 and X_train_full[cname].dtype == "object"] # Select numerical columns numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] # Keep selected columns only my_cols = categorical_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_test_full[my_cols].copy() # + [markdown] id="VKUsD6bz5rM5" # ## Step:2 # + id="SoOfAczZ5bmI" from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import StackingRegressor from xgboost import XGBRegressor from lightgbm import LGBMRegressor from sklearn.linear_model import Lasso # Preprocessing for numerical data numerical_transformer = SimpleImputer(strategy='constant') # Preprocessing for categorical data categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore')) ]) # Bundle preprocessing for numerical and categorical data preprocessor = ColumnTransformer( transformers=[ ('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols) ]) # + [markdown] id="6MeFzNVg6Ahs" # ## Step:3 # # + [markdown] id="O1_tq7JbtxcV" # ##Model:0 # + id="TlYf_ucluX0W" pip install catboost # + id="Sw0ZbEDUst1Y" from catboost import CatBoostRegressor # Define the model my_model_0 = CatBoostRegressor() # Bundle preprocessing and modeling code in a pipeline my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', my_model_0) ]) # Fit the model my_model_0=my_pipeline.fit(X_train, y_train) # Get predictions predictions_0 = my_pipeline.predict(X_valid) # Calculate MAE mse_0 = mean_squared_error(predictions_0, y_valid) print("RMSE:" , mse_0) # Run prediction on the Kaggle test set. preds_test_0 = my_pipeline.predict(X_test) # + [markdown] id="ccM9Z1aEBkyz" # ### Model:1 # + id="K77K88Vc6Gjs" # I found this best alpha through cross-validation. best_alpha = 350 # Define the model my_model_1= Lasso(alpha=best_alpha, max_iter=50000) # Bundle preprocessing and modeling code in a pipeline my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', my_model_1) ]) # Fit the model my_model_1=my_pipeline.fit(X_train, y_train) # Get predictions predictions_1 = my_pipeline.predict(X_valid) # Calculate MAE mse_1 = mean_squared_error(predictions_1, y_valid) print("RMSE:" , mse_1) # Run prediction on the Kaggle test set. preds_test_1 = my_pipeline.predict(X_test) # + [markdown] id="UDrp3ZCR6hxC" # ### Model:2 # + id="godMOW-76lYd" # Define the model my_model_2 = XGBRegressor(learning_rate=0.01, n_estimators=3460, max_depth=3, min_child_weight=0, gamma=0, subsample=0.7, colsample_bytree=0.7, objective='reg:linear', nthread=-1, scale_pos_weight=1, seed=27, reg_alpha=0.00006) # Bundle preprocessing and modeling code in a pipeline my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', my_model_2) ]) # Fit the model my_model_2=my_pipeline.fit(X_train, y_train) # Get predictions predictions_2 = my_pipeline.predict(X_valid) # Calculate MAE mse_2 = mean_squared_error(predictions_2, y_valid) print("RMSE:" , mse_2) # Run prediction on the Kaggle test set. preds_test_2 = my_pipeline.predict(X_test) # + [markdown] id="5kdzw82Q6sTG" # ### Model:3 # + id="Tx4kKFme6u-G" # Define the model my_model_3 = LGBMRegressor(objective='regression', num_leaves=4, learning_rate=0.01, n_estimators=5000, max_bin=200, bagging_fraction=0.75, bagging_freq=5, bagging_seed=7, feature_fraction=0.2, feature_fraction_seed=7, verbose=-1, ) # Bundle preprocessing and modeling code in a pipeline my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', my_model_3) ]) # Fit the model my_model_3=my_pipeline.fit(X_train, y_train) # Get predictions predictions_3 = my_pipeline.predict(X_valid) # Calculate MAE mse_3 = mean_squared_error(predictions_3, y_valid) print("RMSE:" , mse_3) # Run prediction on the Kaggle test set. preds_test_3 = my_pipeline.predict(X_test) # + [markdown] id="vRKur44k65s9" # ### Model:4 # + id="Pe0zhPXV66VS" # Define the model my_model_4 = GradientBoostingRegressor(n_estimators=10000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', random_state =0) # Bundle preprocessing and modeling code in a pipeline my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', my_model_4) ]) # Fit the model my_model_4=my_pipeline.fit(X_train, y_train) # Get predictions predictions_4 = my_pipeline.predict(X_valid) # Calculate MAE mse_4 = mean_squared_error(predictions_4, y_valid) print("RMSE:" , mse_4) # Run prediction on the Kaggle test set. preds_test_4= my_pipeline.predict(X_test) # + [markdown] id="rGpjeGvR7PJk" # ### Predictions # + id="z2bsaMi27OQf" # Combined Best Models Average preds_test= (preds_test_0 + preds_test_2 + preds_test_3) / 3 preds_test= my_pipeline.predict(X_test) # + [markdown] id="pE_oP81l7YRq" # ### Make Prediction Dataframe # + id="nUTwV4sQ7gZo" # Save test predictions to file output = pd.DataFrame({'Id': X_test.index, 'SalePrice': preds_test}) output.to_csv('submission.csv', index=False)
House_Prices_ART.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="VtQuTUbBzPA2" executionInfo={"status": "ok", "timestamp": 1605580278192, "user_tz": 300, "elapsed": 363, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} outputId="f52a2dbc-52e9-434e-ebd8-efe51aeaead0" colab={"base_uri": "https://localhost:8080/"} from google.colab import drive drive.mount('/content/drive') # + id="FRPBenhQyZYY" # %cd "/content/drive/MyDrive/Deep_Learning_course_VT/DL project/covid_xrays_model" # %pip install -e . # + id="Ncnlk4spyZYl" executionInfo={"status": "ok", "timestamp": 1605580291187, "user_tz": 300, "elapsed": 301, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} import numpy as np import os import matplotlib.pyplot as plt # from covid_xrays_model import config from fastai.vision import DatasetType import pandas as pd import joblib from covid_xrays_model.processing.data_management import load_dataset from covid_xrays_model.predict import predict_dataset from covid_xrays_model.train_pipeline import run_training_sample import warnings warnings.filterwarnings('ignore') # + [markdown] id="bWWyBZG0yZYp" # # Set your data path here # + id="o1W5ZKWkyZYq" executionInfo={"status": "ok", "timestamp": 1605580294301, "user_tz": 300, "elapsed": 441, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} # os.environ['DATA_DIR'] = '/home/doaa/Git/covid_xrays/data' os.environ['DATA_DIR'] = "/content/drive/MyDrive/Deep_Learning_course_VT/DL project/data" # + [markdown] id="RqP07KaNyZYu" # # FASTAI classification model # + id="t6l6EG_PyZYv" executionInfo={"status": "ok", "timestamp": 1605562221996, "user_tz": 300, "elapsed": 18277, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} outputId="7cd3cd82-7113-4bf9-ebc2-36b9f736d027" colab={"base_uri": "https://localhost:8080/"} data = load_dataset(image_size=224, sample_size=5000) data # + id="1Db3XOQtyZY0" executionInfo={"status": "ok", "timestamp": 1605562243776, "user_tz": 300, "elapsed": 14547, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} outputId="4eeeefdd-b6f8-4ddb-a34b-0a6890e488da" colab={"base_uri": "https://localhost:8080/", "height": 585} data.show_batch(rows=3, figsize=(10,8), ds_type=DatasetType.Train) # + id="kj1hyvzAyZY4" executionInfo={"status": "ok", "timestamp": 1605562251195, "user_tz": 300, "elapsed": 486, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} outputId="17fa5bbd-8499-4ca8-fa25-22f686d1b3fc" colab={"base_uri": "https://localhost:8080/"} # data.train_ds data.classes, len(data.train_ds), len(data.valid_ds), data.c # + id="JPoY4KuFyZY8" # data.test_ds[0][0] # + [markdown] id="XuzD6wYjyZZB" # # Run training # + id="rhkaKwuNC2K8" executionInfo={"status": "ok", "timestamp": 1605580319666, "user_tz": 300, "elapsed": 285, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} sample_size = 5001 # number of training images per class percent_gan_images = 25 # 25% confusion_matrix_filename = f'conf_mat_{sample_size}_{percent_gan_images}' # + id="F-v8S770yZZC" executionInfo={"status": "ok", "timestamp": 1605577115541, "user_tz": 300, "elapsed": 7634060, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} outputId="45e676ea-53c9-48e1-9cbe-b9b43769b17c" colab={"base_uri": "https://localhost:8080/", "height": 1000} run_training_sample(n_cycles=10, sample_size=sample_size, #percent_gan_images=percent_gan_images, # Percent of GAN generated COVID images to add confusion_matrix_filename=confusion_matrix_filename, with_oversampling=True, # always use True with_weighted_loss=True, with_focal_loss=False) # + [markdown] id="xzxOrfE3yZZH" # ### Load training Confusion Matrix # + id="IMpVkK1NyZZH" executionInfo={"status": "ok", "timestamp": 1605580325501, "user_tz": 300, "elapsed": 539, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} outputId="731ed0ef-ffbe-4e3c-dbec-431226990cb9" colab={"base_uri": "https://localhost:8080/"} train_mat = joblib.load(f'train_{confusion_matrix_filename}.pkl') # sum diagonal / all data_size *100 accuracy = np.trace(train_mat) / train_mat.sum() * 100 print(f'Accuracy: {accuracy:.2f}%') print(f'Confusion Matrix:\n {train_mat}') # + [markdown] id="CKa_PGFKyZZM" # ### For Validation confusion Matrix # + id="eXfBhG3xyZZN" executionInfo={"status": "ok", "timestamp": 1605580329014, "user_tz": 300, "elapsed": 290, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} outputId="5cbdc4c5-c43f-4ce5-aeff-a3cc9377dc22" colab={"base_uri": "https://localhost:8080/"} valid_mat = joblib.load(f'valid_{confusion_matrix_filename}.pkl') # sum diagonal / all data_size *100 accuracy = np.trace(valid_mat) / valid_mat.sum() * 100 print(f'Accuracy: {accuracy:.2f}%') print(f'Confusion Matrix:\n {valid_mat}') # + [markdown] id="NZOIj7MYyZZQ" # # Run evaluation (model has to be already trained) - slow on CPU # + id="OBKVxDP0yZZR" executionInfo={"status": "ok", "timestamp": 1605580705240, "user_tz": 300, "elapsed": 365333, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgTyKaEU-p3dKM2bTRZzJsXYAyjJRA-Zt3-CkCTLA=s64", "userId": "15852237945509464181"}} outputId="3efa2bcd-be5c-4710-8e04-305134d43b35" colab={"base_uri": "https://localhost:8080/", "height": 592} # Evaluate test set (ds_type='test') # Will load the correct saved model based on the given parameters test_conf_mat, test_accuracy = predict_dataset(ds_type='test', cpu=False, # set to False if running on GPUs sample_size=sample_size, #percent_gan_images=percent_gan_images, with_oversampling=True, # always use True with_weighted_loss=True, with_focal_loss=False, confusion_matrix_filename=confusion_matrix_filename ) # + id="otKqjD8YyZZU" executionInfo={"status": "ok", "timestamp": 1605580727198, "user_tz": 300, "elapsed": 324, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/<KEY>", "userId": "15852237945509464181"}} outputId="dd139b38-b6fb-4178-c27e-329b957479f8" colab={"base_uri": "https://localhost:8080/"} test_conf_mat = joblib.load(f'test_{confusion_matrix_filename}.pkl') # sum diagonal / all data_size *100 test_accuracy = np.trace(test_conf_mat) / test_conf_mat.sum() * 100 print(f'Test Accuracy: {test_accuracy:.2f}%') print(f'Test Confusion Matrix:\n {test_conf_mat}') # + id="PmikOIbWyZZZ" # + [markdown] id="hcbp3yqxyZZd" # # Play with the model # + id="yvpEAoVoyZZd" outputId="e1cdcf7d-f58c-4783-9e87-7ed662b2e596" learn.predict(data.test_ds[0][0]) # + id="gGPxSNmUyZZh" # + id="52eUtVnuyZZl"
notebooks/04-train_predict.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .cpp // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: C++14 // language: C++14 // name: xcpp14 // --- // + #include "cost.h" #include <cmath> double goal_distance_cost(int goal_lane, int intended_lane, int final_lane, double distance_to_goal) { // The cost increases with both the distance of intended lane from the goal // and the distance of the final lane from the goal. The cost of being out // of the goal lane also becomes larger as the vehicle approaches the goal. int delta_d = 2.0 * goal_lane - intended_lane - final_lane; double cost = 1 - exp(-(std::abs(delta_d) / distance_to_goal)); return cost; }
behavior Planning/cost_function_1/cost.cpp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="R0iis46JultV" colab_type="code" colab={} import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.metrics import accuracy_score, mean_squared_error, r2_score from sklearn.preprocessing import LabelEncoder # + id="Ce0L5sPtuaMy" colab_type="code" outputId="9c6b69dd-625e-4417-ff44-70941eb670d1" executionInfo={"status": "ok", "timestamp": 1561129551790, "user_tz": 300, "elapsed": 775, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # Load S&P 500 df #remember to fix! df_quake_sp500 = pd.read_csv("https://raw.githubusercontent.com/labs13-quake-viewer/ds-data/master/" + "S&P%20500%20Price%20Change%20by%20Earthquake(5.5+).csv", index_col=0) #df_quake_gold = pd.read_csv("Gold Price Change by Earthquake(5.5+).csv", index_col=0) df_quake_sp500.shape # + id="e2ovujy9wfcw" colab_type="code" colab={} dates = [] for i in df_quake_sp500.Date: dates.append(int(''.join(c for c in i if c.isdigit()))) # + id="a5SH-G0E1ZYb" colab_type="code" colab={} df_quake_sp500["magg"] = (df_quake_sp500["Mag"] * 10).astype(int) # + id="pcJ70PWkyKs_" colab_type="code" colab={} df_quake_sp500["dates"] = dates # + id="FP0Qss2_6bKU" colab_type="code" outputId="951c57a2-9a59-493b-b17a-b6f73559d71d" executionInfo={"status": "ok", "timestamp": 1561129562218, "user_tz": 300, "elapsed": 252, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 425} df_quake_sp500.info() # + [markdown] id="SgCP6zZy9f31" colab_type="text" # ##Linear Regression # + id="V933NzUtBNe6" colab_type="code" colab={} X = df_quake_sp500[['dates', 'Mag', 'Lat', 'Long', 'Depth']] y = df_quake_sp500['Appr_Day_30'] # + id="zf7Z1QE_Bany" colab_type="code" outputId="cd3bdefd-7998-4da4-d900-9a76eb915770" executionInfo={"status": "ok", "timestamp": 1561129574182, "user_tz": 300, "elapsed": 281, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 119} X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=42) print("Original shape:", X.shape, "\n") print("X_train shape:", X_train.shape) print("X_test shape:", X_test.shape) print("y_train shape:", y_train.shape) print("y_test shape:", y_test.shape) # + id="5Qwl9LtKBiKT" colab_type="code" colab={} model = LinearRegression() linear_reg = model.fit(X_train, y_train) lin_reg_score = linear_reg.score(X_train, y_train) # + id="3x1jiLB9Bsfl" colab_type="code" outputId="681eb183-6c87-4007-f26a-2eaa4a85dd81" executionInfo={"status": "ok", "timestamp": 1561129602158, "user_tz": 300, "elapsed": 261, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 187} beta_0 = model.intercept_ beta_i = model.coef_[0] print("Slope Coefficient: ", beta_i) print("\nIntercept Value: ", beta_0) print("\nCoefficients:") for i in range(X.shape[1]): print(X.columns[i], '\t', model.coef_[i]) # + id="E9hZ0yiYBzF6" colab_type="code" outputId="a19084df-5eea-4a6d-bc8f-66de025e0dfb" executionInfo={"status": "ok", "timestamp": 1561129723466, "user_tz": 300, "elapsed": 255, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 119} y_test_predict = model.predict(X_test) RMSE = np.sqrt(mean_squared_error(y_test, y_test_predict)) R2= r2_score(y_test, y_test_predict) print("For S&P 500, Incident Mag >= 5.5 ({} incidents)".format(df_quake_sp500.shape[0])) print("Linear Regression Model score:", lin_reg_score) print('\nLinear Regression Model Predictive Accuracy:') print('RMSE is {}'.format(RMSE)) print('R^2 is {}'.format(R2)) # + [markdown] id="_ntbyYuNFfnv" colab_type="text" # ##Logistic Regression # + id="Xh4TfOU3Dl1s" colab_type="code" colab={} df = df_quake_sp500 # + id="up3KBlBbE7hL" colab_type="code" outputId="309ade18-f3a1-4528-a68e-ed5552ee8a3f" executionInfo={"status": "ok", "timestamp": 1561130128692, "user_tz": 300, "elapsed": 362, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 442} #encode object columns object_columns = list(df.select_dtypes(include=['object'])) df[object_columns] = df[object_columns].apply(LabelEncoder().fit_transform) print(df.info()) # + id="cbsjN0lKyhvz" colab_type="code" outputId="d2e659de-238e-4314-fb00-cf9bf999a150" executionInfo={"status": "ok", "timestamp": 1561130133815, "user_tz": 300, "elapsed": 266, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 34} y = df['Appr_Day_30'].astype(str) X = df[['dates', 'Mag', 'Lat', 'Long', 'Depth', 'magType', 'Place', 'Type', 'locationSource', 'magSource']] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) X_train.shape, X_test.shape, y_train.shape, y_test.shape # + id="QubjVvpLzp-D" colab_type="code" outputId="191ac569-4c62-4d8d-e3bf-b86dc80f08ca" executionInfo={"status": "ok", "timestamp": 1561130135846, "user_tz": 300, "elapsed": 266, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 80} X_train.sample() # + id="bhdzQXjgy-Ay" colab_type="code" outputId="ad628dcc-1d6f-4a4a-b232-431f9effb956" executionInfo={"status": "ok", "timestamp": 1561131279601, "user_tz": 300, "elapsed": 1103650, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 51} # %%time log_reg = LogisticRegression(multi_class='ovr', solver='liblinear', max_iter=100) log_reg_fit = log_reg.fit(X_train, y_train) # + id="FfyTwiCaNAjv" colab_type="code" outputId="ef0788b2-d96a-45e8-99a4-71eb08d5ec11" executionInfo={"status": "ok", "timestamp": 1561131419731, "user_tz": 300, "elapsed": 340, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 102} log_reg # + id="vfarGipoJvKT" colab_type="code" outputId="c9a7e79e-0b25-47d7-c5e7-71ce5e3ba0c6" executionInfo={"status": "ok", "timestamp": 1561131427950, "user_tz": 300, "elapsed": 6284, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 68} print("For S&P 500, Incident Mag >= 6.7 ({} incidents)".format(df_quake_sp500.shape[0])) print("Logistic Regression Model score:", log_reg_fit.score(X_train, y_train)) predictions = log_reg.predict(X_test) print("Logistic Regression prediction accuracy:", accuracy_score(y_test, predictions)) # + id="J8sP1YhH6sQ6" colab_type="code" outputId="4c96a2b2-28cf-474e-d6e3-0830536ada02" executionInfo={"status": "ok", "timestamp": 1561045512153, "user_tz": 300, "elapsed": 267, "user": {"displayName": "<NAME>.", "photoUrl": "https://lh3.googleusercontent.com/-p2FrFpD_hQk/AAAAAAAAAAI/AAAAAAAAAkU/Qol50T4G-Pc/s64/photo.jpg", "userId": "11714338394019695389"}} colab={"base_uri": "https://localhost:8080/", "height": 68} log_reg.coef_[0]
financial_models/S&P500_Linear_Logistic_Regression_5.5+.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Mortal Fibonacci Rabbits # ## Problem # Recall the definition of the Fibonacci numbers from โ€œRabbits and Recurrence Relationsโ€, which followed the recurrence relation Fn=Fnโˆ’1+Fnโˆ’2 and assumed that each pair of rabbits reaches maturity in one month and produces a single pair of offspring (one male, one female) each subsequent month. # # Our aim is to somehow modify this recurrence relation to achieve a dynamic programming solution in the case that all rabbits die out after a fixed number of months. See Figure 4 for a depiction of a rabbit tree in which rabbits live for three months (meaning that they reproduce only twice before dying). # # Given: Positive integers nโ‰ค100 and mโ‰ค20. # # Return: The total number of pairs of rabbits that will remain after the n-th month if all rabbits live for m months. # # # + n , m = map(int,input().split()) dp = [0]*101 birth = [0]*101 birth[1]=1 for i in range(2,n+1) : dp[i] = birth[i-1] + dp[i-1] birth[i] = dp[i-1] if i > m : dp[i]-=birth[i-m] print(dp[n]+birth[n]) # -
Bioinformatics Stronghold/LEVEL 2/FIBD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark (Local) # language: python # name: pyspark_local # --- from pyspark.sql import SparkSession from datetime import datetime import math spark = SparkSession.builder.getOrCreate() # # RDD method # ## Input Data # + registerRDD_raw = sc.textFile("/data/students/bigdata_internet/lab3/register.csv").map(lambda row:row.split("\t")) # print(registerRDD_raw.take(3)) print("Total line of the raw register RDD = ",registerRDD_raw.count()) def regFilter(e): if(e[0]=="station"): return False if(e[2]=='0' and e[3]=='0'): return False return True registerRDD = registerRDD_raw.filter(regFilter) # print(registerRDD.take(3)) print("Lines of the filted register RDD = ", registerRDD.count()) stationsRDD= sc.textFile("/data/students/bigdata_internet/lab3/stations.csv").map(lambda row:row.split("\t")).filter(lambda row:False if row[0]=="id" else True) # stationsRDD.take(3) # - # transfrom stationsRDD to pair RDD stationsPairRDD = stationsRDD.map(lambda e:(e[0],[e[1],e[2],e[3]])) stationsPairRDD.take(3) # ## Exercise # + # calcultate the criticality def registerRemap(row): key = str(row[0])+"-"+datetime.strptime(row[1],"%Y-%m-%d %H:%M:%S").strftime("%w-%H") val=[0,1] #val[0] is the flag for cretical reading, val[1] is a counter for reading if row[3]=='0': val[0] = 1 #the station is critical for that reading return (key,val) STPairedRegisterRDD = registerRDD.map(registerRemap) creticality=STPairedRegisterRDD.reduceByKey(lambda e1,e2:[e1[0]+e2[0],e1[1]+e2[1]]).mapValues(lambda v :format(v[0]/v[1],'.3f')) creticality.take(3) # - # filter the pairs greater than the threshold threshold = 0.6 def thresholdFilter(tup): (key,val)=tup if float(val)>=threshold: return True return False creticalityFilted = creticality.filter(thresholdFilter) creticalityFilted.take(3) # order the result by increasing criticality def swapKeyVal(tup): (key,val)= tup return (val,key) creticalityFiltedSorted = creticalityFilted.map(swapKeyVal).sortByKey().map(swapKeyVal) creticalityFiltedSorted.take(3) # + # join the criticality with the station inforamtion and store def remapCreticality(tup): (key,val)=tup keyList = key.split("-") day = "Sunday" if keyList[1]=='1': day="Monday" elif keyList[1]=='2': day="Tuesday" elif keyList[1]=='3': day="Wednesday" elif keyList[1]=='4': day ="Thursday" elif keyList[1]=='5': day ="Friday" elif keyList[1]=='6': day = "Saturday" valList = [] valList.append(day) valList.append(keyList[2]) valList.append(val) return (keyList[0],valList) creticalityFiltedSortedJoined=creticalityFiltedSorted.map(remapCreticality).join(stationsPairRDD) def remapToResult(tup): (key,(v1,v2))=tup result = [] result.append(key) result.append(v2[0]) result.append(v2[1]) result.append(v1[0]) result.append(v1[1]) result.append(v1[2]) return result ResultRDD = creticalityFiltedSortedJoined.map(remapToResult) print("The number of obtained critical pairs is: ",ResultRDD.count()) print(ResultRDD.collect()) # - df = spark.createDataFrame(ResultRDD,["station","station longitude","station latitude",\ "day of week","hour","criticality value"]) df.write.csv("./lab3/RDD_result.csv",header=True,sep="\t") # # Dataframe Method # ## Input Data # + df_register_raw = spark.read.load("/data/students/bigdata_internet/lab3/register.csv", format="csv", header=True, inferSchema=True, sep="\t") # df_register_raw.show(3) print("The total number of data is=",df_register_raw.count()) df_stations = spark.read.load("/data/students/bigdata_internet/lab3/stations.csv", format="csv", header=True, inferSchema=True, sep="\t") # df_stations.show(3) # fitering the uncorrect data df_register = df_register_raw.filter("used_slots!=0 or free_slots!=0 ") print("Number of data after filtering=",df_register.count()) # - # ## Exercise # + spark.udf.register("critical",lambda x:1 if x==0 else 0) spark.udf.register("calCriticality",lambda a,b:format(a/b,'.3f')) # calculate the critiality dfRemap = df_register.selectExpr("station","date_format(timestamp,'EEEE') AS day", "date_format(timestamp,'H') As hour","critical(free_slots) AS critical" ) dfRemap.show(3) dfRemap.createOrReplaceTempView("register") dfSumCount = spark.sql("SELECT station,day,hour,sum(critical) AS numCritial,count(critical) AS totalNum FROM register GROUP BY station,day,hour") dfCriticality = dfSumCount.selectExpr("station","day","hour","calCriticality(numCritial,totalNum) AS criticality") dfCriticality.show(3) # - # filter the data greater than the threshold threshold=0.6 condition = "criticality>="+str(threshold) dfSelected=dfCriticality.filter(condition) dfSelected.show() # sorting dfSorted = dfSelected.sort("criticality") dfSorted.show() # combine two df df_result = dfSorted.join(df_stations,dfSorted.station==df_stations.id).select("station","latitude","longitude","day","hour","criticality") print("The number of obtained critical pairs is: ",df_result.count()) df_result.show() df_result.write.csv("./lab3/Dataframe_result.csv",header=True,sep="\t") # # BONUS TASK #station distance calculation def distanceToCenter(lat2,long2): toRad = 0.01745329252 lat1 = 41.386904*toRad long1 =2.169989*toRad lat2=lat2*toRad long2=long2*toRad r = 6356.725 # km temp1 =math.sin((lat2-lat1)/2)*math.sin((lat2-lat1)/2) temp2 = math.cos(lat1)*math.cos(lat2)*math.sin((long2-long1)/2)*math.sin((long2-long1)/2) return format(2*r*math.asin(math.sqrt(temp1+temp2)),'.3f') spark.udf.register("distanceCal",lambda lat,long:distanceToCenter(lat,long)) dfStationDist = df_stations.selectExpr("id","name","distanceCal(latitude,longitude) AS distance") dfStationDist.show(3) # Average among reading of used slots df_register.createOrReplaceTempView("register2") dfStationAvg = spark.sql("SELECT station, avg(used_slots) AS avg_used FROM register2 GROUP BY station") dfStationAvg.show(3) dfStationNear = dfStationDist.filter("distance<1.5").join(dfStationAvg,dfStationAvg.station==dfStationDist.id) dfStationNear.show(3) U1=dfStationNear.selectExpr("avg(avg_used) AS U1" ) U1.show() dfStationFar = dfStationDist.filter("distance>=1.5").join(dfStationAvg,dfStationAvg.station==dfStationDist.id) dfStationFar.show(3) U2=dfStationFar.selectExpr("avg(avg_used) AS U2" ) U2.show() # Answer: There are more used the station closer to center. U1=8.10 U2=7.89
Solutions/lab3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv # language: python # name: venv # --- # + from functools import wraps import numpy as np import tensorflow as tf from keras import backend as K from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D from keras.layers.advanced_activations import LeakyReLU from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.regularizers import l2 import numpy as np from keras.layers import Input, Lambda from keras.models import Model from keras.optimizers import Adam from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss from yolo3.utils import get_random_data from yolo3.utils import compose # - def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V3 model CNN body in Keras.""" darknet = Model(inputs, darknet_body(inputs)) x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5)) x = compose( DarknetConv2D_BN_Leaky(256, (1,1)), UpSampling2D(2))(x) x = Concatenate()([x,darknet.layers[152].output]) x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5)) x = compose( DarknetConv2D_BN_Leaky(128, (1,1)), UpSampling2D(2))(x) x = Concatenate()([x,darknet.layers[92].output]) x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5)) print(y1.shape) return Model(inputs, [y1,y2,y3]) @wraps(Conv2D) def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) print(Conv2D(*args, **darknet_conv_kwargs)) return Conv2D(*args, **darknet_conv_kwargs) def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose( DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1)) def darknet_body(x): '''Darknent body having 52 Convolution2D layers''' print(x) x = DarknetConv2D_BN_Leaky(32, (3,3))(x) print(x) # print('------------------------') # print(x) # print('------------------------') x = resblock_body(x, 64, 1) # print(x) # print('------------------------') x = resblock_body(x, 128, 2) # print(x) # print('------------------------') x = resblock_body(x, 256, 8) # print(x) # print('------------------------') x = resblock_body(x, 512, 8) # print(x) # print('------------------------') x = resblock_body(x, 1024, 4) # print(x) return x # ![title](darknet53.png) # + # from keras.layers import Input # inputs = Input(shape=(416, 416, 3)) # darknet = Model(inputs, darknet_body(inputs)) # print('Total layers: ', len(darknet.layers)) # - def get_anchors(anchors_path): '''loads the anchors from a file''' with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1,0),(1,0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters//2, (1,1)), DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x) # print('\n') # print('Block: ', i) # print('Res block x: ', x) # print('Res block y: ', x) x = Add()([x,y]) return x def make_last_layers(x, num_filters, out_filters): '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer''' x = compose( DarknetConv2D_BN_Leaky(num_filters, (1,1)), DarknetConv2D_BN_Leaky(num_filters*2, (3,3)), DarknetConv2D_BN_Leaky(num_filters, (1,1)), DarknetConv2D_BN_Leaky(num_filters*2, (3,3)), DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x) y = compose( DarknetConv2D_BN_Leaky(num_filters*2, (3,3)), DarknetConv2D(out_filters, (1,1)))(x) return x, y def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. # box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) # box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[...,::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[...,::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss == True: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape): '''Process Conv layer output''' box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats, anchors, num_classes, input_shape) boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape) boxes = K.reshape(boxes, [-1, 4]) box_scores = box_confidence * box_class_probs box_scores = K.reshape(box_scores, [-1, num_classes]) return boxes, box_scores def yolo_eval(yolo_outputs, anchors, num_classes, image_shape, max_boxes=20, score_threshold=.6, iou_threshold=.5): """Evaluate YOLO model on given input and return filtered boxes.""" num_layers = len(yolo_outputs) anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting input_shape = K.shape(yolo_outputs[0])[1:3] * 32 boxes = [] box_scores = [] for l in range(num_layers): _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, image_shape) boxes.append(_boxes) box_scores.append(_box_scores) boxes = K.concatenate(boxes, axis=0) box_scores = K.concatenate(box_scores, axis=0) mask = box_scores >= score_threshold max_boxes_tensor = K.constant(max_boxes, dtype='int32') boxes_ = [] scores_ = [] classes_ = [] for c in range(num_classes): # TODO: use keras backend instead of tf. class_boxes = tf.boolean_mask(boxes, mask[:, c]) class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c]) nms_index = tf.image.non_max_suppression( class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold) class_boxes = K.gather(class_boxes, nms_index) class_box_scores = K.gather(class_box_scores, nms_index) classes = K.ones_like(class_box_scores, 'int32') * c boxes_.append(class_boxes) scores_.append(class_box_scores) classes_.append(classes) boxes_ = K.concatenate(boxes_, axis=0) scores_ = K.concatenate(scores_, axis=0) classes_ = K.concatenate(classes_, axis=0) return boxes_, scores_, classes_ # + def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes): '''Preprocess true boxes to training input format Parameters ---------- true_boxes: array, shape=(m, T, 5) Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape. input_shape: array-like, hw, multiples of 32 anchors: array, shape=(N, 2), wh num_classes: integer Returns ------- y_true: list of array, shape like yolo_outputs, xywh are reletive value ''' assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes' num_layers = len(anchors)//3 # default setting anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] true_boxes = np.array(true_boxes, dtype='float32') input_shape = np.array(input_shape, dtype='int32') boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2 boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2] true_boxes[..., 0:2] = boxes_xy/input_shape[::-1] true_boxes[..., 2:4] = boxes_wh/input_shape[::-1] m = true_boxes.shape[0] grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)] y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes), dtype='float32') for l in range(num_layers)] # Expand dim to apply broadcasting. anchors = np.expand_dims(anchors, 0) anchor_maxes = anchors / 2. anchor_mins = -anchor_maxes valid_mask = boxes_wh[..., 0]>0 for b in range(m): # Discard zero rows. wh = boxes_wh[b, valid_mask[b]] if len(wh)==0: continue # Expand dim to apply broadcasting. wh = np.expand_dims(wh, -2) box_maxes = wh / 2. box_mins = -box_maxes intersect_mins = np.maximum(box_mins, anchor_mins) intersect_maxes = np.minimum(box_maxes, anchor_maxes) intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] box_area = wh[..., 0] * wh[..., 1] anchor_area = anchors[..., 0] * anchors[..., 1] iou = intersect_area / (box_area + anchor_area - intersect_area) # Find best anchor for each true box best_anchor = np.argmax(iou, axis=-1) for t, n in enumerate(best_anchor): for l in range(num_layers): if n in anchor_mask[l]: i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32') j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32') k = anchor_mask[l].index(n) c = true_boxes[b,t, 4].astype('int32') y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4] y_true[l][b, j, i, k, 4] = 1 y_true[l][b, j, i, k, 5+c] = 1 return y_true def box_iou(b1, b2): '''Return iou tensor Parameters ---------- b1: tensor, shape=(i1,...,iN, 4), xywh b2: tensor, shape=(j, 4), xywh Returns ------- iou: tensor, shape=(i1,...,iN, j) ''' # Expand dim to apply broadcasting. b1 = K.expand_dims(b1, -2) b1_xy = b1[..., :2] b1_wh = b1[..., 2:4] b1_wh_half = b1_wh/2. b1_mins = b1_xy - b1_wh_half b1_maxes = b1_xy + b1_wh_half # Expand dim to apply broadcasting. b2 = K.expand_dims(b2, 0) b2_xy = b2[..., :2] b2_wh = b2[..., 2:4] b2_wh_half = b2_wh/2. b2_mins = b2_xy - b2_wh_half b2_maxes = b2_xy + b2_wh_half intersect_mins = K.maximum(b1_mins, b2_mins) intersect_maxes = K.minimum(b1_maxes, b2_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b1_area = b1_wh[..., 0] * b1_wh[..., 1] b2_area = b2_wh[..., 0] * b2_wh[..., 1] iou = intersect_area / (b1_area + b2_area - intersect_area) return iou def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False): '''Return yolo_loss tensor Parameters ---------- yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body y_true: list of array, the output of preprocess_true_boxes anchors: array, shape=(N, 2), wh num_classes: integer ignore_thresh: float, the iou threshold whether to ignore object confidence loss Returns ------- loss: tensor, shape=(1,) ''' num_layers = len(anchors)//3 # default setting yolo_outputs = args[:num_layers] y_true = args[num_layers:] anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0])) grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)] loss = 0 m = K.shape(yolo_outputs[0])[0] # batch size, tensor mf = K.cast(m, K.dtype(yolo_outputs[0])) for l in range(num_layers): object_mask = y_true[l][..., 4:5] true_class_probs = y_true[l][..., 5:] grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True) pred_box = K.concatenate([pred_xy, pred_wh]) # Darknet raw box to calculate loss. raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1]) raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4] # Find ignore mask, iterate over each of batch. ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True) object_mask_bool = K.cast(object_mask, 'bool') def loop_body(b, ignore_mask): true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0]) iou = box_iou(pred_box[b], true_box) best_iou = K.max(iou, axis=-1) ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box))) return b+1, ignore_mask _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask]) ignore_mask = ignore_mask.stack() ignore_mask = K.expand_dims(ignore_mask, -1) # K.binary_crossentropy is helpful to avoid exp overflow. xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True) wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4]) confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \ (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True) xy_loss = K.sum(xy_loss) / mf wh_loss = K.sum(wh_loss) / mf confidence_loss = K.sum(confidence_loss) / mf class_loss = K.sum(class_loss) / mf loss += xy_loss + wh_loss + confidence_loss + class_loss if print_loss: loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ') return loss # - def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo_weights.h5'): '''create the training model''' K.clear_session() # get a new session image_input = Input(shape=(416, 416, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ num_anchors//3, num_classes+5)) for l in range(3)] model_body = yolo_body(image_input, num_anchors//3, num_classes) print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model anchors = get_anchors('model_data/train_anchors.txt') model = create_model((416,416), anchors, 8) from keras.utils import plot_model plot_model(model, to_file='model.png', show_shapes=True)
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Working with Dates # # This is a tutorial on how to prepare temporal data for use with Lux. To display temporal fields in Lux, the column must be converted into Pandas's [`datetime`](https://docs.python.org/3/library/datetime.html) objects. Lux automatically detects attribute named as `date`, `month`, `year`, `day`, and `time` as a datetime field and recognizes them as temporal data types. If you're temporal attributes do not have these names, read more to find out how to work with temporal data types in Lux. import pandas as pd import lux from lux.vis.Vis import Vis # ### Converting Strings to Datetime objects # To convert column referencing dates/times into [`datetime`](https://docs.python.org/3/library/datetime.html) objects, we use [`pd.to_datetime`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html), as follows: # # # ``` # pd.to_datetime(['2020-01-01', '2020-01-15', '2020-02-01'],format="%Y-%m-%d") # ``` # As a toy example, a dataframe might contain a `record_date` attribute as strings of dates: # + df = pd.DataFrame({'record_date': ['2020-02-01', '2020-03-01', '2020-04-01', '2020-05-01','2020-06-01',], 'value': [10.5,15.2,20.3,25.2, 14.2]}) df # - # By default, the `record_date` attribute is detected as an `object` type as Pandas's data type [`dtype`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dtypes.html): df.dtypes # Since `record_date` is detected as an object type in Pandas, the `record_date` field is recognized as a `nominal` field in Lux, instead of a `temporal` field: df.data_type # The typing has implications on the generated visualizations, since nominal chart types are displayed as bar charts, whereas temporal fields are plotted as time series line charts. vis = Vis(["record_date","value"],df) vis # To fix this, we can convert the `record_date` column into a datetime object by doing: df["record_date"] = pd.to_datetime(df["record_date"],format="%Y-%m-%d") df["record_date"] # After changing the Pandas data type to datetime, we see that date field is recognized as temporal fields in Lux. df.data_type vis.refresh_source(df) vis # ### Visualizing Trends across Different Timescales # # Lux automatically detects the temporal attribute and plots the visualizations across different timescales to showcase any cyclical patterns. Here, we see that the `Temporal` tab displays the yearly, monthly, and weekly trends for the number of stock records. from vega_datasets import data df = data.stocks() df.recommendation["Temporal"] # ### Advanced Date Manipulation # You might notice earlier that all the dates in our example dataset are the first of the month. In this case, there may be situations where we only want to list the year and month, instead of the full date. Here, we look at how to handle these cases. # # Below we look at an example stocks dataset that also has `month_date` field with each row representing data for the first of each month. df.dtypes vis = Vis(["monthdate","price"],df) vis # If we only want Lux to output the month and the year, we can convert the column to a [`PeriodIndex`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.PeriodIndex.html) using [`to_period`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DatetimeIndex.to_period.html). The `freq` argument specifies the granularity of the output. In this case, we are using 'M' for monthly. You can find more about how to specify time periods [here](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects). df["monthdate"] = pd.DatetimeIndex(df["monthdate"]).to_period(freq='M') vis.refresh_source(df) vis # ### Specifying Intents With Datetime Fields # The string representation seen in the Dataframe can be used to filter out specific dates. # # For example, in the above `stocks` dataset, we converted the date column to a `PeriodIndex`. Now the string representation only shows the granularity we want to see. We can use that string representation to filter the dataframe in Pandas: df[df["monthdate"] == '2008-11'] # We can also use the same string representation for specifying an intent in Lux. vis = Vis(["monthdate=2008-11","price","symbol"],df) vis
tutorial/5-datetime.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <i>Copyright (c) Microsoft Corporation. All rights reserved.</i> # # <i>Licensed under the MIT License.</i> # # Local Automated Machine Learning Model with ACI Deployment for Predicting Sentence Similarity # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/nlp/examples/sentence_similarity/automl_local_deployment_aci.png) # This notebook demonstrates how to use [Azure Machine Learning Service's](https://azure.microsoft.com/en-us/services/machine-learning-service/ # ) Automated Machine Learning ([AutoML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml # )) locally to automate machine learning model selection and tuning and how to use Azure Container Instance ([ACI](https://azure.microsoft.com/en-us/services/container-instances/ # )) for deployment. We utilize the STS Benchmark dataset to predict sentence similarity and utilize AutoML's text preprocessing features. # ## Table of Contents # 1. [Introduction](#1.-Introduction) # [1.1 What is Azure AutoML?](#1.1-What-is-Azure-AutoML?) # [1.2 Modeling Problem](#1.2-Modeling-Problem) # 1. [Data Preparation](#2.-Data-Preparation) # 1. [Create AutoML Run](#3.-Create-AutoML-Run) # [3.1 Link to or create a Workspace](#3.1-Link-to-or-create-a-Workspace) # [3.2 Create AutoMLConfig object](#3.2-Create-AutoMLConfig-object) # [3.3 Run Experiment](#3.3-Run-Experiment) # 1. [Deploy Sentence Similarity Model](#4.-Deploy-Sentence-Similarity-Model) # [4.1 Retrieve the Best Model](#4.1-Retrieve-the-Best-Model) # [4.2 Register the Fitted Model for Deployment](#4.2-Register-the-Fitted-Model-for-Deployment) # [4.3 Create an Entry Script](#4.3-Create-an-Entry-Script) # [4.4 Create a YAML File for the Environment](#4.4-Create-a-YAML-File-for-the-Environment) # [4.5 Create a Container Image](#4.5-Create-a-Container-Image) # [4.6 Deploy the Image as a Web Service to Azure Container Instance](#4.6-Deploy-the-Image-as-a-Web-Service-to-Azure-Container-Instance) # [4.7 Test Deployed Model](#4.7-Test-Deployed-Model) # 1. [Clean](#5-Clean) # ### 1.1 What is Azure AutoML? # # Automated machine learning ([AutoML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml)) is a capability of Microsoft's [Azure Machine Learning service](https://azure.microsoft.com/en-us/services/machine-learning-service/ # ). The goal of AutoML is to improve the productivity of data scientists and democratize AI by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning. # ![](https://nlpbp.blob.core.windows.net/images/automl.PNG) # The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). # ### 1.2 Modeling Problem # # The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence). The sentence pairs will be embedded using AutoML's built-in preprocessing, so we'll pass the sentences directly into the model. # + # Set the environment path to find NLP import sys sys.path.append("../../") import time import os import pandas as pd import shutil import numpy as np import torch import sys from scipy.stats import pearsonr from scipy.spatial import distance from sklearn.externals import joblib import json import scrapbook as sb # Import utils from utils_nlp.azureml import azureml_utils from utils_nlp.dataset import stsbenchmark from utils_nlp.dataset.preprocess import ( to_lowercase, to_spacy_tokens, rm_spacy_stopwords, ) from utils_nlp.common.timer import Timer # Tensorflow dependencies for Google Universal Sentence Encoder import tensorflow_hub as hub # AzureML packages import azureml as aml import logging from azureml.telemetry import set_diagnostics_collection set_diagnostics_collection(send_diagnostics=True) from azureml.train.automl import AutoMLConfig from azureml.core.experiment import Experiment from azureml.widgets import RunDetails from azureml.train.automl.run import AutoMLRun from azureml.core.webservice import AciWebservice, Webservice from azureml.core.image import ContainerImage from azureml.core.conda_dependencies import CondaDependencies print("System version: {}".format(sys.version)) print("Azure ML SDK Version:", aml.core.VERSION) print("Pandas version: {}".format(pd.__version__)) # + tags=["parameters"] BASE_DATA_PATH = "../../data" CPU_CORES = 1 MEMORY_GB = 8 # Define the settings for AutoML automl_task = "regression" automl_iteration_timeout = 15 automl_iterations = 50 automl_metric = "spearman_correlation" automl_preprocess = True automl_model_blacklist = ['XGBoostRegressor'] config_path = ( "./.azureml" ) # Path to the directory containing config.json with azureml credentials webservice_name = "aci-automl-service" #name for webservice; must be unique within your workspace # Azure resources subscription_id = "YOUR_SUBSCRIPTION_ID" resource_group = "YOUR_RESOURCE_GROUP_NAME" workspace_name = "YOUR_WORKSPACE_NAME" workspace_region = "YOUR_WORKSPACE_REGION" #Possible values eastus, eastus2 and so on. # - automl_settings = { "task": automl_task, # type of task: classification, regression or forecasting "debug_log": "automated_ml_errors.log", "path": "./automated-ml-regression", "iteration_timeout_minutes": automl_iteration_timeout, # How long each iteration can take before moving on "iterations": automl_iterations, # Number of algorithm options to try "primary_metric": automl_metric, # Metric to optimize "preprocess": automl_preprocess, # Whether dataset preprocessing should be applied "blacklist_models": automl_model_blacklist #exclude this model due to installation issues } # # 2. Data Preparation # ## STS Benchmark Dataset # As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair. # Load in the raw datasets as pandas dataframes train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split="train") dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split="dev") test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split="test") # Clean each dataset by lowercasing text, removing irrelevant columns, # and renaming the remaining columns train_clean = stsbenchmark.clean_sts(train_raw) dev_clean = stsbenchmark.clean_sts(dev_raw) test_clean = stsbenchmark.clean_sts(test_raw) # Convert all text to lowercase train = to_lowercase(train_clean) dev = to_lowercase(dev_clean) test = to_lowercase(test_clean) print("Training set has {} sentences".format(len(train))) print("Development set has {} sentences".format(len(dev))) print("Testing set has {} sentences".format(len(test))) train.head() # # 3. Create AutoML Run # AutoML can be used for classification, regression or timeseries experiments. Each experiment type has corresponding machine learning models and metrics that can be optimized (see [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train)) and the options will be delineated below. As a first step we connect to an existing workspace or create one if it doesn't exist. # ## 3.1 Link to or create a Workspace # The following cell looks to set up the connection to your [Azure Machine Learning service Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace). You can choose to connect to an existing workspace or create a new one. # # **To access an existing workspace:** # 1. If you have a `config.json` file, you do not need to provide the workspace information; you will only need to update the `config_path` variable that is defined above which contains the file. # 2. Otherwise, you will need to supply the following: # * The name of your workspace # * Your subscription id # * The resource group name # # **To create a new workspace:** # # Set the following information: # * A name for your workspace # * Your subscription id # * The resource group name # * [Azure region](https://azure.microsoft.com/en-us/global-infrastructure/regions/) to create the workspace in, such as `eastus2`. # # This will automatically create a new resource group for you in the region provided if a resource group with the name given does not already exist. ws = azureml_utils.get_or_create_workspace( config_path=config_path, subscription_id=subscription_id, resource_group=resource_group, workspace_name=workspace_name, workspace_region=workspace_region, ) print( "Workspace name: " + ws.name, "Azure region: " + ws.location, "Subscription id: " + ws.subscription_id, "Resource group: " + ws.resource_group, sep="\n", ) # ## 3.2 Create AutoMLConfig object # Next, we specify the parameters for the AutoMLConfig class. # **task** # AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second "metalearner" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble) # **preprocess** # AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. # **primary_metric** # The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) # **Constraints:** # There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after achieving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try). # **Note**: we are directly passing in sentence pairs as data because we are relying upon AutoML's built-in preprocessing (by setting preprocess = True in the AutoMLConfig parameters) to perform the embedding step. # + X_train = train.drop("score", axis=1).values y_train = train["score"].values.flatten() X_validation = dev.drop("score", axis=1).values y_validation = dev["score"].values.flatten() # local compute automated_ml_config = AutoMLConfig( X=X_train, y=y_train, X_valid=X_validation, y_valid=y_validation, verbosity=logging.ERROR, **automl_settings # where the autoML main settings are defined ) # - # ## 3.3 Run Experiment # # Run the experiment locally and inspect the results using a widget experiment = Experiment(ws, "NLP-SS-automl") local_run = experiment.submit(automated_ml_config, show_output=True) # + #local_run.cancel() # - # The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, by accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc. # Inspect the run details using the provided widget RunDetails(local_run).show() # ![](https://nlpbp.blob.core.windows.net/images/autoMLwidget.PNG) # # 4. Deploy Sentence Similarity Model # Deploying an Azure Machine Learning model as a web service creates a REST API. You can send data to this API and receive the prediction returned by the model. # In general, you create a webservice by deploying a model as an image to a Compute Target. # # Some of the Compute Targets are: # 1. Azure Container Instance # 2. Azure Kubernetes Service # 3. Local web service # # The general workflow for deploying a model is as follows: # 1. Register a model # 2. Prepare to deploy # 3. Deploy the model to the compute target # 4. Test the deployed model (webservice) # # In this notebook, we walk you through the process of creating a webservice running on Azure Container Instance by deploying an AutoML model as an image. ACI is typically used for low scale, CPU-based workloads. (You can find more information on deploying and serving models [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)) # # ## 4.1 Retrieve the Best Model # Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the `get_output` method which returns the best_run (AutoMLRun object with information about the experiment) and fitted_model ([Pipeline]((https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb)) object) across all iterations. Overloads on `get_output` allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. # # The different steps that make up the pipeline can be accessed through `fitted_model.named_steps` and information about data preprocessing is available through `fitted_model.named_steps['datatransformer'].get_featurization_summary()` best_run, fitted_model = local_run.get_output() # ## 4.2 Register the Fitted Model for Deployment # # Registering a model means registering one or more files that make up a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can either come from Azure Machine Learning or another location, such as your local machine. # Below we show how a model is registered from the results of an experiment run. If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered. # # See other ways to register a model [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where). # + description = "AutoML Model" tags = {"area": "nlp", "type": "sentence similarity automl"} name = "automl" model = local_run.register_model(description=description, tags=tags) print(local_run.model_id) # - # ## 4.3 Create an Entry Script # In this section we show an example of an entry script, which is called from the deployed webservice. `score.py` is our entry script. The script must contain: # 1. init() - This function loads the model in a global object. # 2. run() - This function is used for model prediction. The inputs and outputs to `run()` typically use JSON for serialization and deserilization. # + # %%writefile score.py import pickle import json import numpy import azureml.train.automl from sklearn.externals import joblib from azureml.core.model import Model def init(): global model model_path = Model.get_model_path( model_name="<<modelid>>" ) # this name is model.id of model that we want to deploy # deserialize the model file back into a sklearn model model = joblib.load(model_path) def run(rawdata): try: data = json.loads(rawdata)["data"] data = numpy.array(data) result = model.predict(data) except Exception as e: result = str(e) return json.dumps({"error": result}) return json.dumps({"result": result.tolist()}) # + # Substitute the actual model id in the script file. script_file_name = "score.py" with open(script_file_name, "r") as cefr: content = cefr.read() with open(script_file_name, "w") as cefw: cefw.write(content.replace("<<modelid>>", local_run.model_id)) # - # ## 4.4 Create a YAML File for the Environment # # To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, autoenv.yml, which specifies the dependencies from the run. ml_run = AutoMLRun(experiment=experiment, run_id=local_run.id) best_iteration = int( best_run.id.split("_")[-1] ) # get the appended iteration number for the best model dependencies = ml_run.get_run_sdk_dependencies(iteration=best_iteration) dependencies # Add dependencies in the yaml file from the above cell. You must specify the version of "azureml-sdk[automl]" while creating the yaml file. # + myenv = CondaDependencies.create( conda_packages=["numpy", "scikit-learn==0.21.2", "py-xgboost<=0.80", "pandas==0.24.2"], pip_packages=["azureml-sdk[automl]==1.0.48.*"], python_version="3.6.8", ) conda_env_file_name = "automlenv.yml" myenv.save_to_file(".", conda_env_file_name) # - # ## 4.5 Create a Container Image # In this step we create a container image which is wrapper containing the entry script, yaml file with package dependencies and the model. The created image is then deployed as a webservice in the next step. This step can take up to 10 minutes and even longer if the model is large. # + image_config = ContainerImage.image_configuration( execution_script=script_file_name, runtime="python", conda_file=conda_env_file_name, description="Image with automl model", tags={"area": "nlp", "type": "sentencesimilarity automl"}, ) image = ContainerImage.create( name="automl-image", # this is the model object models=[model], image_config=image_config, workspace=ws, ) image.wait_for_creation(show_output=True) # - # If the above step fails, then use the below command to see logs # + # print(image.image_build_log_uri) # - # ## 4.6 Deploy the Image as a Web Service to Azure Container Instance # Azure Container Instances are mostly used for deploying your models as a web service if one or more of the following conditions are true: # 1. You need to quickly deploy and validate your model. # 2. You are testing a model that is under development. # # To set them up properly, we need to indicate the number of CPU cores and the amount of memory we want to allocate to our web service. # Set the web service configuration aci_config = AciWebservice.deploy_configuration( cpu_cores=CPU_CORES, memory_gb=MEMORY_GB ) # The final step to deploying our web service is to call `WebService.deploy_from_image()`. This function uses the Docker image and the deployment configuration we created above to perform the following: # 1. Deploy the docker image to an Azure Container Instance # 2. Call the init() function in our scoring file # 3. Provide an HTTP endpoint for scoring calls # # The deploy_from_image method requires the following parameters: # # 1. workspace: the workspace containing the service # 2. name: a unique name used to identify the service in the workspace # 3. image: a docker image object that contains the environment needed for scoring/inference # 4. deployment_config: a configuration object describing the compute type # # **Note:** The web service creation can take a few minutes # + # deploy image as web service aci_service = Webservice.deploy_from_image( workspace=ws, name=webservice_name, image=image, deployment_config=aci_config ) aci_service.wait_for_deployment(show_output=True) print(aci_service.state) # - # Fetch logs to debug in case of failures. # + # print(aci_service.get_logs()) # - # If you want to reuse an existing service versus creating a new one, call the webservice with the name. You can look up all the deployed webservices under deployment in the Azure Portal. Below is an example: # + # aci_service = Webservice(workspace=ws, name='<<serive-name>>') # to use the webservice # aci_service.run() # - # ## 4.7 Test Deployed Model # # Testing the deployed model means running the created webservice. <br> # The deployed model can be tested by passing a list of sentence pairs. The output will be a score between 0 and 5, with 0 indicating no meaning overlap between the sentences and 5 meaning equivalence. sentences = [ ["This is sentence1", "This is sentence1"], ["A hungry cat.", "A sleeping cat"], ["Its summer time ", "Winter is coming"], ] data = {"data": sentences} data = json.dumps(data) # + # Set up a Timer to see how long the model takes to predict t = Timer() t.start() score = aci_service.run(input_data=data) t.stop() print("Time elapsed: {}".format(t)) result = json.loads(score) try: output = result["result"] print("Number of samples predicted: {}".format(len(output))) print(output) except: print(result["error"]) # - # Finally, we'll calculate the Pearson Correlation on the test set. # # **What is Pearson Correlation?** # # Our evaluation metric is Pearson correlation ($\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: # # $$\rho_{X,Y} = \frac{E[(X-\mu_X)(Y-\mu_Y)]}{\sigma_X \sigma_Y}$$ # # This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the main metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset. # + test_y = test["score"].values.flatten() test_x = test.drop("score", axis=1).values.tolist() data = {"data": test_x} data = json.dumps(data) # + # Set up a Timer to see how long the model takes to predict t = Timer() t.start() score = aci_service.run(input_data=data) t.stop() print("Time elapsed: {}".format(t)) result = json.loads(score) try: output = result["result"] print("Number of samples predicted: {}".format(len(output))) except: print(result["error"]) # + # get Pearson Correlation pearson = pearsonr(output, test_y)[0] print(pearson) sb.glue("pearson_correlation", pearson) # - # The goal of this notebook is to demonstrate how to use AutoML locally and then deploy the model to Azure Container Instance quickly. The model utilizes the built-in capabilities of AutoML to embed our sentences. The model performance on its own, without tweaking, is not very strong with this particular dataset. For a more advanced model, see [AutoML with Pipelines Deployment AKS](automl_with_pipelines_deployment_aks.ipynb) for much stronger performance on the same task. This notebook utilizes AzureML Pipelines to explicitly embed our sentences using the Google Universal Sentence Encoder (USE) model. For our dataset, the Google USE embeddings result in superior model performance. # ## 5. Clean up # Throughout the notebook, we used a workspace and Azure container instances. To get a sense of the cost we incurred, we can refer to this [calculator](https://azure.microsoft.com/en-us/pricing/calculator/). We can also navigate to the [Cost Management + Billing](https://ms.portal.azure.com/#blade/Microsoft_Azure_Billing/ModernBillingMenuBlade/Overview) pane on the portal, click on our subscription ID, and click on the Cost Analysis tab to check our credit usage. # <br><br> # In order not to incur extra costs, let's delete the resources we no longer need. # <br><br> # Once we have verified that our web service works well on ACI, we can delete it. This helps reduce [costs](https://azure.microsoft.com/en-us/pricing/details/container-instances/), since the container group we were paying for no longer exists, and allows us to keep our workspace clean. # + # aci_service.delete() # - # At this point, the main resource we are paying for is the Standard Azure Container Registry (ACR), which contains our Docker image. Details on pricing are available [here](https://azure.microsoft.com/en-us/pricing/details/container-registry/). # # We may decide to use our Docker image in a separate ACI or even in an AKS deployment. In that case, we should keep it available in our workspace. However, if we no longer have a use for it, we can delete it. # + # docker_image.delete() # - # If our goal is to continue using our workspace, we should keep it available. On the contrary, if we plan on no longer using it and its associated resources, we can delete it. # <br><br> # Note: Deleting the workspace will delete all the experiments, outputs, models, Docker images, deployments, etc. that we created in that workspace # + # ws.delete(delete_dependent_resources=True) # This deletes our workspace, the container registry, the account storage, Application Insights and the key vault # - # As mentioned above, Azure Container Instances tend to be used to develop and test deployments. They are typically configured with CPUs, which usually suffice when the number of requests per second is not too high. When working with several instances, we can configure them further by specifically allocating CPU resources to each of them. # # For production requirements, i.e. when > 100 requests per second are expected, we recommend deploying models to Azure Kubernetes Service (AKS). It is a convenient infrastructure as it manages hosted Kubernetes environments, and makes it easy to deploy and manage containerized applications without container orchestration expertise. It also supports deployments with CPU clusters and deployments with GPU clusters.For more examples on deployment follow [MachineLearningNotebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment) github repository. # # # ## Next Steps # # Check out [AutoML with Pipelines Deployment AKS](automl_with_pipelines_deployment_aks.ipynb) to see how to construct a AzureML Pipeline with an embedding step (using Google Universal Sentence Encoder model) and an AutoMLStep, increasing our Pearson correlation score. Also, this notebooks demonstrates deployment using AKS versus ACI.
examples/sentence_similarity/automl_local_deployment_aci.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # metadata: # interpreter: # hash: 291ada4285d81b7724775da889f1025824cfcf5691e48ef89418ffd1713b1d97 # name: Python 3.8.6 64-bit # --- import numpy as np import time #Printing the details entered def print_details(x,y,alpha,w,b): print("The inputs are:") print(x) print("The associated targets are:") print(y) print("The learning rate is: ") print(alpha) print("The initial weights and bias considered are:") print(w) print(b) #Input data def input_data(): n = int(input("Enter number of input vectors: ")) x = [] y = [] for i in range(0,n): raw_str1 = str(input("Enter values for vector " + str(i+1) + ": ")) input_vector = raw_str1.split(' ') #print(input_vector) ip_list = [] for ele in input_vector: ip_list.append(float(ele)) np_list = np.array(ip_list, dtype=np.float64) x.append(np_list) curr_target = str(input("Enter the target for vector " + str(i+1) + ": ")) y.append(float(curr_target)) return n,x,y #Input learning rate and number of epochs def input_alpha_error(): alpha = float(input("Enter the learning rate for the model")) no_of_epochs = int(input("Enter the maximum number of epochs for which the model should run")) tolerance_error = float(input("Enter the tolerance error for the model")) return alpha,no_of_epochs,tolerance_error #Input inital weights and bias def input_weights(): raw_str3 = str(input("Enter initial weight vector: ")) w_list = raw_str3.split(' ') w = [] for ele in w_list: w.append(float(ele)) b = float(input("Enter the value of bias")) return w,b number_of_inputs, input_vector, target = input_data() weights, bias = input_weights() alpha, max_epochs, tolerance_error = input_alpha_error() # + tags=[] print_details(input_vector,target,alpha,weights,bias) total_error= 0.0 i = 1 flag = 0 while(1): if i>max_epochs: break print("Epoch: ", str(i)) i = i+1 total_epoch_error= 0.0 for j in range(0,number_of_inputs): x1 = input_vector[j][0] x2 = input_vector[j][1] current_target = target[j] yin = x1*weights[0] + x2*weights[1] + bias change = current_target - yin curr_error = change*change weights[0] = weights[0] + alpha*x1*change weights[1] = weights[1] + alpha*x2*change bias = bias + alpha*change total_epoch_error = total_epoch_error + curr_error if total_epoch_error<tolerance_error: flag = 1 break print("The total error for epoch is: ", str(total_epoch_error)) total_error = total_error+total_epoch_error if flag == 1: break if i<max_epochs: print("The error was reduced to be less than the tolerance error.") else: print("The model was stopped because it ran for maximum number of epochs entered") print("The final weights are: ") print(weights) print("The final bias is: ") print(str(bias)) print("The total error is: ") print(str(total_error)) # -
delta-learning/delta-learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 1.882352, "end_time": "2021-08-08T15:39:56.511425", "exception": false, "start_time": "2021-08-08T15:39:54.629073", "status": "completed"} tags=[] import psutil import joblib import numpy as np import pandas as pd import torch from tqdm import tqdm from sklearn.metrics import roc_auc_score import torch.nn as nn from sklearn.model_selection import KFold from torch.utils.data import Dataset, DataLoader import gc import os import warnings warnings.filterwarnings("ignore") # + papermill={"duration": 0.511882, "end_time": "2021-08-08T15:47:23.362873", "exception": false, "start_time": "2021-08-08T15:47:22.850991", "status": "completed"} tags=[] MAX_SEQ = 400 n_part = data['sub_chapter_id'].nunique() + 1 D_MODEL = 128 N_LAYER = 2 DROPOUT = 0.2 # + papermill={"duration": 0.47527, "end_time": "2021-08-08T15:47:24.391022", "exception": false, "start_time": "2021-08-08T15:47:23.915752", "status": "completed"} tags=[] class FFN(nn.Module): def __init__(self, state_size=200): super(FFN, self).__init__() self.state_size = state_size self.lr1 = nn.Linear(state_size, state_size) self.relu = nn.ReLU() self.lr2 = nn.Linear(state_size, state_size) self.dropout = nn.Dropout(DROPOUT) def forward(self, x): x = self.lr1(x) x = self.relu(x) x = self.lr2(x) return self.dropout(x) def future_mask(seq_length): future_mask = np.triu(np.ones((seq_length, seq_length)), k=1).astype('bool') return torch.from_numpy(future_mask) class SAINTModel(nn.Module): def __init__(self, n_skill, n_part, max_seq=MAX_SEQ, embed_dim= D_MODEL, elapsed_time_cat_flag = False): super(SAINTModel, self).__init__() self.n_skill = n_skill self.embed_dim = embed_dim self.n_chapter= 39 self.n_sub_chapter = n_part self.elapsed_time_cat_flag = elapsed_time_cat_flag self.q_embedding = nn.Embedding(self.n_skill+1, embed_dim) ## exercise self.c_embedding = nn.Embedding(self.n_chapter+1, embed_dim) ## category self.sc_embedding = nn.Embedding(self.n_sub_chapter, embed_dim) ## category self.pos_embedding = nn.Embedding(max_seq+1, embed_dim) ## position self.res_embedding = nn.Embedding(2+1, embed_dim) ## response self.transformer = nn.Transformer(nhead=8, d_model = embed_dim, num_encoder_layers= N_LAYER, num_decoder_layers= N_LAYER, dropout = DROPOUT) self.dropout = nn.Dropout(DROPOUT) self.layer_normal = nn.LayerNorm(embed_dim) self.ffn = FFN(embed_dim) self.pred = nn.Linear(embed_dim, 1) def forward(self, question, chapter, schapter, response): device = question.device ## embedding layer question = self.q_embedding(question) chapter = self.c_embedding(chapter) schapter = self.sc_embedding(schapter) pos_id = torch.arange(question.size(1)).unsqueeze(0).to(device) pos_id = self.pos_embedding(pos_id) res = self.res_embedding(response) enc = pos_id + question + chapter + schapter dec = pos_id + res + enc enc = enc.permute(1, 0, 2) # x: [bs, s_len, embed] => [s_len, bs, embed] dec = dec.permute(1, 0, 2) mask = future_mask(enc.size(0)).to(device) att_output = self.transformer(enc, dec, src_mask=mask, tgt_mask=mask, memory_mask = mask) att_output = self.layer_normal(att_output) att_output = att_output.permute(1, 0, 2) # att_output: [s_len, bs, embed] => [bs, s_len, embed] x = self.ffn(att_output) x = self.layer_normal(x + att_output) x = self.pred(x) return x.squeeze(-1) # + papermill={"duration": 0.344937, "end_time": "2021-08-08T15:47:25.072005", "exception": false, "start_time": "2021-08-08T15:47:24.727068", "status": "completed"} tags=[] n_skill = data['question_id'].nunique() + 1 # + papermill={"duration": 0.338747, "end_time": "2021-08-08T15:47:25.744391", "exception": false, "start_time": "2021-08-08T15:47:25.405644", "status": "completed"} tags=[] n_skill # + papermill={"duration": 0.344918, "end_time": "2021-08-08T15:47:37.804172", "exception": false, "start_time": "2021-08-08T15:47:37.459254", "status": "completed"} tags=[] patience = 5 # + papermill={"duration": 748.12826, "end_time": "2021-08-08T16:00:06.965220", "exception": false, "start_time": "2021-08-08T15:47:38.836960", "status": "completed"} tags=[] X = np.array(group.keys()) kfold = KFold(n_splits=5, shuffle=True) train_losses = list() train_aucs = list() train_accs = list() val_losses = list() val_aucs = list() val_accs = list() test_losses = list() test_aucs = list() test_accs = list() for train, test in kfold.split(X): users_train, users_test = X[train], X[test] n = len(users_test)//2 users_test, users_val = users_test[:n], users_test[n: ] train = PRACTICE_DATASET(group[users_train]) valid = PRACTICE_DATASET(group[users_val]) test = PRACTICE_DATASET(group[users_test]) train_dataloader = DataLoader(train, batch_size=32, shuffle=True, num_workers=8) val_dataloader = DataLoader(valid, batch_size=32, shuffle=True, num_workers=8) test_dataloader = DataLoader(test, batch_size=32, shuffle=True, num_workers=8) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") saint = SAINTModel(n_skill, n_part) epochs = 100 criterion = nn.BCEWithLogitsLoss() optimizer = torch.optim.Adam(saint.parameters(), betas=(0.9, 0.999), lr = 0.0005, eps=1e-8) saint.to(device) criterion.to(device) def train_epoch(model=saint, train_iterator=train_dataloader, optim=optimizer, criterion=criterion, device=device): model.train() train_loss = [] num_corrects = 0 num_total = 0 labels = [] outs = [] tbar = tqdm(train_iterator) for item in tbar: question_id = item[0].to(device).long() chapter = item[1].to(device).long() schapter = item[2].to(device).long() responses = item[3].to(device).long() label = item[4].to(device).float() target_mask = (question_id!=0) optim.zero_grad() output = model(question_id, chapter, schapter, responses) output = torch.reshape(output, label.shape) output = torch.masked_select(output, target_mask) label = torch.masked_select(label, target_mask) loss = criterion(output, label) loss.backward() optim.step() train_loss.append(loss.item()) pred = (torch.sigmoid(output) >= 0.5).long() num_corrects += (pred == label).sum().item() num_total += len(label) labels.extend(label.view(-1).data.cpu().numpy()) outs.extend(output.view(-1).data.cpu().numpy()) tbar.set_description('loss - {:.4f}'.format(loss)) acc = num_corrects / num_total auc = roc_auc_score(labels, outs) loss = np.mean(train_loss) return loss, acc, auc def val_epoch(model=saint, val_iterator=test_dataloader, criterion=criterion, device=device): model.eval() train_loss = [] num_corrects = 0 num_total = 0 labels = [] outs = [] tbar = tqdm(val_iterator) for item in tbar: question_id = item[0].to(device).long() chapter = item[1].to(device).long() schapter = item[2].to(device).long() responses = item[3].to(device).long() label = item[4].to(device).float() target_mask = (question_id!=0) with torch.no_grad(): output = model(question_id, chapter, schapter, responses) output = torch.reshape(output, label.shape) output = torch.masked_select(output, target_mask) label = torch.masked_select(label, target_mask) loss = criterion(output, label) train_loss.append(loss.item()) pred = (torch.sigmoid(output) >= 0.5).long() num_corrects += (pred == label).sum().item() num_total += len(label) labels.extend(label.view(-1).data.cpu().numpy()) outs.extend(output.view(-1).data.cpu().numpy()) tbar.set_description('valid loss - {:.4f}'.format(loss)) acc = num_corrects / num_total auc = roc_auc_score(labels, outs) loss = np.average(train_loss) return loss, acc, auc MIN_VAL = 1000000000 count = 0 print('----------------------------------------------------------------------------') for epoch in range(epochs): train_loss, train_acc, train_auc = train_epoch(model=saint, device=device) print("epoch - {} train_loss - {:.2f} acc - {:.3f} auc - {:.3f}".format(epoch, train_loss, train_acc, train_auc)) val_loss, val_acc, val_auc = val_epoch(model=saint, val_iterator= val_dataloader, device=device) print("epoch - {} val_loss - {:.2f} val acc - {:.3f} val auc - {:.3f}".format(epoch, val_loss, val_acc, val_auc)) if val_loss < MIN_VAL: count = 0 MIN_VAL = val_loss else: count += 1 if count == patience: print('Val Loss does not improve for {} consecutive epochs'.format(patience)) break test_loss, test_acc, test_auc = val_epoch(model=saint, device=device) print("epoch - {} test_loss - {:.2f} acc - {:.3f} auc - {:.3f}".format(epoch, test_loss, test_acc, test_auc)) test_losses.append(test_loss) test_aucs.append(test_auc) test_accs.append(test_acc) train_aucs.append(train_auc) train_losses.append(train_loss) train_accs.append(train_acc) # + papermill={"duration": 2.211096, "end_time": "2021-08-08T16:00:11.187078", "exception": false, "start_time": "2021-08-08T16:00:08.975982", "status": "completed"} tags=[] print("test avg loss: ", np.mean(test_losses), np.std(test_losses)) print("test avg acc: ", np.mean(test_accs), np.std(test_accs)) print("test avg auc: ", np.mean(test_aucs), np.std(test_aucs)) # + papermill={"duration": 1.951633, "end_time": "2021-08-08T16:00:15.069330", "exception": false, "start_time": "2021-08-08T16:00:13.117697", "status": "completed"} tags=[] print("train avg loss: ", np.mean(train_losses), np.std(train_losses)) print("train avg acc: ", np.mean(train_accs), np.std(train_accs)) print("train avg auc: ", np.mean(train_aucs), np.std(train_aucs))
additional_features/transformer-based/saint+/saint_plus_PF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="qlseAUvJJDXM" # ## The Data # # ** Source: https://datamarket.com/data/set/22ox/monthly-milk-production-pounds-per-cow-jan-62-dec-75#!ds=22ox&display=line ** # # **Monthly milk production: pounds per cow. Jan 62 - Dec 75** # + [markdown] colab_type="text" id="5WTUlbh4JDXN" # ** Import numpy pandas and matplotlib ** # + colab={} colab_type="code" id="Xg2peEzGJDXO" import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] colab_type="text" id="hBtEtgN6JDXS" # ** Use pandas to read the csv of the monthly-milk-production.csv file and set index_col='Month' ** # + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" executionInfo={"elapsed": 29701, "status": "ok", "timestamp": 1555021285621, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="FPJdTW0TJcMA" outputId="8931b8b6-ed4f-4b22-83ba-e056d7225711" from google.colab import drive drive.mount('/content/gdrive') # + colab={} colab_type="code" id="nArYXHMPJDXT" milk = pd.read_csv("gdrive/My Drive/dataML/monthly-milk-production.csv",index_col='Month') #milk = pd.read_csv("monthly-milk-production.csv",index_col='Month') # + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" executionInfo={"elapsed": 30254, "status": "ok", "timestamp": 1555021286232, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="RYvb00fEJDXW" outputId="e9a410e1-9495-49fb-f98d-44a503412335" milk.head() # + [markdown] colab_type="text" id="qd6qU_zOJDXb" # ** Check out the head of the dataframe** # + [markdown] colab_type="text" id="Z0aPxAkWJDXg" # ** Make the index a time series by using: ** # # milk.index = pd.to_datetime(milk.index) # + colab={} colab_type="code" id="ej2PnSz7JDXh" milk.index = pd.to_datetime(milk.index) # + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" executionInfo={"elapsed": 30214, "status": "ok", "timestamp": 1555021286237, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="TuNcJhMoJDXj" outputId="60ba7680-626f-4005-a054-3e8f00373a1b" milk.head() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 30197, "status": "ok", "timestamp": 1555021286239, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="-rgkKTs6JDXn" outputId="d0c2ed31-4719-4853-d701-07057a196c50" milk.keys() # + [markdown] colab_type="text" id="z8Hk38WeJDXq" # ** Plot out the time series data. ** # + colab={"base_uri": "https://localhost:8080/", "height": 290} colab_type="code" executionInfo={"elapsed": 30575, "status": "ok", "timestamp": 1555021286647, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="qXNiSsZUJDXq" outputId="5ccde4fd-6ca2-4814-8c3b-5aea28a11776" milk.plot() # + [markdown] colab_type="text" id="Fxbb9fhP1rsU" # ### Train Test Split # # ** Let's attempt to predict a year's worth of data. (12 months or 12 steps into the future) ** # # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 30554, "status": "ok", "timestamp": 1555021286649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="TSYpC-nu1rsW" outputId="90d4c5be-de98-4817-c3af-0281811c8963" milk.info() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 30545, "status": "ok", "timestamp": 1555021286657, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="00vc2Jms1rsc" outputId="0b5d35bc-954b-438f-a98e-360412250e15" len(milk) # + colab={} colab_type="code" id="F_PjB7rq1rsf" train_set= milk.head((1976-1962-1)*12) # + colab={} colab_type="code" id="C6632CCp1rsi" test_set = milk.tail(12) # + [markdown] colab_type="text" id="_9PtX6Um1rsl" # ### Scale the Data # # ** Use sklearn.preprocessing to scale the data using the MinMaxScaler. Remember to only fit_transform on the training data, then transform the test data. You shouldn't fit on the test data as well, otherwise you are assuming you would know about future behavior!** # + colab={} colab_type="code" id="czuAVyRj1rsl" from sklearn.preprocessing import MinMaxScaler # + colab={} colab_type="code" id="UFn7xXzj1rsp" scaler = MinMaxScaler() # + colab={} colab_type="code" id="hu0-OYDX1rsq" train_scaled = scaler.fit_transform(train_set) # + colab={} colab_type="code" id="O2uDgWsI1rsu" test_scaled = scaler.transform(test_set) # + [markdown] colab_type="text" id="EiI-RuSPKC7v" # ## Build Training Data # # * X_train: Past 12 monthes productions, shape: (-1,12) # * Y_train: Future 1 month productions, shape:(-1, 1) # * Shift the window to get more trainning data # + colab={} colab_type="code" id="6YaF3wjKKB4Y" def build_train_data(data, past_monthes = 12, future_monthes = 1): X_train, Y_train = [],[] for i in range(data.shape[0] + 1 - past_monthes - future_monthes): X_train.append(np.array(data[i:i + past_monthes])) Y_train.append(np.array(data[i + past_monthes:i + past_monthes + future_monthes])) return np.array(X_train).reshape([-1,12]), np.array(Y_train).reshape([-1,1]) # + colab={} colab_type="code" id="t_z8xNdaQjvH" x, y = build_train_data(train_scaled) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 31204, "status": "ok", "timestamp": 1555021287363, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="DiLmBOy4Qq3j" outputId="1fac8bcf-951c-4790-dfcd-6481685b6d53" print('x shape;', x.shape, 'y shape: ', y.shape) # + [markdown] colab_type="text" id="JxbPdOLds9JD" # Take a look at the data # + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" executionInfo={"elapsed": 31456, "status": "ok", "timestamp": 1555021287674, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="3_tRprRSjkYY" outputId="e7bc2d58-9de3-4b90-dcef-9ee2f9badbe6" plt.plot(x[12]) # + [markdown] colab_type="text" id="P_rsY5NQQ3op" # ## Keras # + colab={} colab_type="code" id="1GiFVMqlQ3or" from tensorflow.keras import layers, Sequential,models # + colab={} colab_type="code" id="AsBnLuapQ3ot" RNN_CELLSIZE = 10 SEQLEN = 12 BATCHSIZE = 10 # + [markdown] colab_type="text" id="j96G7Ef0k5Z-" # ### Build and Train the Model # Many to One # + colab={"base_uri": "https://localhost:8080/", "height": 343} colab_type="code" executionInfo={"elapsed": 32876, "status": "ok", "timestamp": 1555021289111, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="vwLHV-p5k5Z0" outputId="cc239d60-03a5-4b78-ddf5-226aac2506d9" model_layers = [ layers.Reshape((SEQLEN,1),input_shape=(SEQLEN,)), layers.GRU(RNN_CELLSIZE, return_sequences=True), layers.GRU(RNN_CELLSIZE), layers.Dense(1) ] model = Sequential(model_layers) model.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 88} colab_type="code" executionInfo={"elapsed": 32870, "status": "ok", "timestamp": 1555021289112, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="qU4zPwkzk5Zt" outputId="1e5a1f6e-021f-4e72-fecd-6710e476f24d" model.compile( loss = 'mean_squared_error', optimizer = 'adam' ) # + colab={"base_uri": "https://localhost:8080/", "height": 34088} colab_type="code" executionInfo={"elapsed": 832811, "status": "ok", "timestamp": 1555022089082, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="6AW4Vl5-k5ZP" outputId="2cadc61b-dc26-4113-db05-8a1cf4bb7519" h = model.fit(x,y, batch_size=BATCHSIZE, epochs = 1000) model.save('gdrive/My Drive/dataML/rnn_model.h5') # + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" executionInfo={"elapsed": 833128, "status": "ok", "timestamp": 1555022089429, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="bMwy2IIozaRt" outputId="4b971525-3745-4b88-c7a4-2341ca6042d0" plt.plot(h.history['loss']) # + [markdown] colab_type="text" id="r714-jBM1Yo-" # ### Predict Future # * Use the last 12 monthes data of the train data set to predict the future 12 monthes production # * Compare the predicted data with the real data in test data set # + colab={} colab_type="code" id="BnGC-2ohfLwz" model = models.load_model('gdrive/My Drive/dataML/rnn_model.h5') # + [markdown] colab_type="text" id="QkYcxMMgqGN0" # Get the last 12 monthes data of the train data # + colab={} colab_type="code" id="wN_zImU5_T-7" train_seed = list(train_scaled[-12:].flatten()) # + [markdown] colab_type="text" id="lJXmc5C_qPi4" # One prediction only generate one predict, we need predict 12 times to get 12 monthes data. # + colab={} colab_type="code" id="gPLrd2196eHx" def get_prediction(data_list): predict = [] train_seed = data_list for i in range(12): x_train = np.array(train_seed[-12:]).reshape(1,12) one_predict = model.predict(x_train)[0][0] predict.append(one_predict) train_seed.append(one_predict) return predict, train_seed # + colab={} colab_type="code" id="BT-RPo6k9XrQ" predict, train_seed = get_prediction(train_seed) # + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" executionInfo={"elapsed": 752, "status": "ok", "timestamp": 1555022548018, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="f_p2HvS2Ce12" outputId="8556eba2-1b36-4e75-ce8e-be8ca55f893f" plt.plot(train_seed) # + colab={} colab_type="code" id="H5VAR8RQ98LN" results = scaler.inverse_transform(np.array(predict).reshape(12,1)) # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 873, "status": "ok", "timestamp": 1555022549636, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="bJgzXnlWDMp3" outputId="6d6ffed5-0e92-47b4-bd35-710fcd0dee35" test_set['Generated'] = results # + [markdown] colab_type="text" id="omtVz1Crqo9f" # Compare the generated data with the real data # + colab={"base_uri": "https://localhost:8080/", "height": 452} colab_type="code" executionInfo={"elapsed": 436, "status": "ok", "timestamp": 1555022550715, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="bPZAXKyMDVAD" outputId="18a1a768-b04c-4a3f-9931-a459eceb33f3" test_set # + colab={"base_uri": "https://localhost:8080/", "height": 298} colab_type="code" executionInfo={"elapsed": 901, "status": "ok", "timestamp": 1555022552978, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="6v-Ot8ZxDZvi" outputId="656d72cf-d691-48ef-c6cb-2328ce54b10b" test_set.plot() # + [markdown] colab_type="text" id="WkrgRIqzkXgq" # ### Predict more # Use the first months data to predict the future 13 years' milk productions and compare with the real data # + colab={} colab_type="code" id="sbRNA4ERklYA" train_seed = list(train_scaled[:12].flatten()) # + colab={} colab_type="code" id="F91e0IzvklYE" def get_prediction(data_list): predict = [] train_seed = data_list for i in range(12*13): x_train = np.array(train_seed[-12:]).reshape(1,12) one_predict = model.predict(x_train)[0][0] predict.append(one_predict) train_seed.append(one_predict) return predict, train_seed # + colab={} colab_type="code" id="BNW_8HJ2klYG" predict, train_seed = get_prediction(train_seed) # + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" executionInfo={"elapsed": 3615, "status": "ok", "timestamp": 1555023270581, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="TvgurkwHklYI" outputId="fefd099b-b47a-4ad7-93cd-522994912571" plt.plot(train_seed) # + colab={} colab_type="code" id="pcNqRCbiklYN" results = scaler.inverse_transform(np.array(train_seed).reshape(-1,1)) # + colab={} colab_type="code" id="K-8x-sOFmBav" milk_predict = milk # + colab={} colab_type="code" id="387zn4SBmKsl" milk_predict['Generated'] = results # + colab={"base_uri": "https://localhost:8080/", "height": 290} colab_type="code" executionInfo={"elapsed": 664, "status": "ok", "timestamp": 1555023317996, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-XK0YfSQSDhA/AAAAAAAAAAI/AAAAAAAAIoM/eIY5kmxK00c/s64/photo.jpg", "userId": "14551110502303433991"}, "user_tz": -60} id="PDvQXKtjklYX" outputId="b6bc252d-d54c-47fd-b875-da4a9e4cc2da" milk_predict.plot() # + colab={} colab_type="code" id="zIAwIfd6JDa8"
5_Prediction_MilkProdction/1_RNN_Many_to_One_Keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from tensorflow.compat.v1 import ConfigProto from tensorflow.compat.v1 import InteractiveSession config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) # - # https://www.youtube.com/watch?v=1-NYPQw5THU&feature=youtu.be import pandas as pd import numpy as np import datetime from pandas_summary import DataFrameSummary df = pd.read_feather('train_normalized_data.fth') df_test = pd.read_feather('test_normalized_data.fth') # + cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen', 'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear', 'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw', 'SchoolHoliday_fw', 'SchoolHoliday_bw'] cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'State'] # - contin_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC', 'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h', 'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE', 'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday'] contin_vars = [] from lightgbm import LGBMRegressor y_out_columns = ['Sales'] df_train = df[df.Date < datetime.datetime(2015, 7, 1)] df_val = df[df.Date >= datetime.datetime(2015, 7, 1)] print(f'Cantidad en val: {len(df_val)}, porcentaje: {len(df_train)/(len(df_train) + len(df_val))}') X_train = df_train[cat_vars + contin_vars] X_val = df_val[cat_vars + contin_vars] X_test = df_test[cat_vars + contin_vars] X_train.shape, X_val.shape # + log_output = True if log_output: # Escala logaritmica max_log_y = np.max(np.log(df[y_out_columns])).values y_train = np.log(df_train[y_out_columns].values)/max_log_y y_val = np.log(df_val[y_out_columns].values)/max_log_y else: # Normalizaciรณn y_mean = df_train[y_out_columns].mean().values y_std = df_train[y_out_columns].std().values y_train = (df_train[y_out_columns].values - y_mean)/y_std y_val = (df_val[y_out_columns].values - y_mean)/y_std # - min_child_samples=5 n_estimators=4000 learning_rate=0.05 model = LGBMRegressor(min_child_samples=min_child_samples, n_estimators=n_estimators, learning_rate=learning_rate ) fit_params={"early_stopping_rounds":100, "eval_metric" : 'l2', "eval_set" : [(X_val, y_val.reshape(-1))], 'eval_names': ['valid'], 'verbose': 100, 'feature_name': 'auto', # that's actually the default 'categorical_feature': cat_vars } model.fit(X_train, y_train.reshape(-1), **fit_params) # # Mรฉtrica # $$ # \textrm{RMSE} = \sqrt{\frac{1}{n} \sum_{i=1}^{n} \left(\frac{\hat{y}_i - y_i}{y_i}\right)^2} # $$ model.score(X_val, y_val) if log_output: y_pred_train = np.exp(model.predict(X_train, verbose=1)*max_log_y) y_pred = np.exp(model.predict(X_val, verbose=1)*max_log_y) y_pred_test = np.exp(model.predict(X_test, verbose=1)*max_log_y) else: y_pred_train = model.predict(X_train, verbose=1)*y_std + y_mean y_pred = model.predict(X_val, verbose=1)*y_std + y_mean y_pred_test = model.predict(X_test, verbose=1)*y_std + y_mean # Train np.sqrt((((df_train['Sales'].values - y_pred_train)/df_train['Sales'].values)**2).sum()/len(y_pred_train)) # Validaciรณn np.sqrt((((df_val['Sales'].values - y_pred)/df_val['Sales'].values)**2).sum()/len(y_pred)) # # Baseline import pandas as pd sample_csv = pd.read_csv('dataset/sample_submission.csv') stores_mean = {} for store, g_df in df.groupby('Store'): stores_mean[store] = g_df[g_df['Sales'] > 0]['Sales'].mean() df_test['Sales'] = df_test['Store'].apply(stores_mean.get) df_test.loc[df_test['Open'] == 0, 'Sales'] = 0 df_test[['Store', 'Sales']].head(10) df_test[df_test['Open'] == 0][['Store', 'Sales']].head() sample_csv['Sales'] = df_test['Sales'] sample_csv.to_csv(f'submision_baseline.csv', index=False) sample_csv.head() # # Sumbit a la competiciรณn # + sample_csv = pd.read_csv('dataset/sample_submission.csv') sample_csv['Sales'] = y_pred_test sample_csv.head() sample_csv.to_csv(f'submision2_{log_output}-{min_child_samples}-{n_estimators}-{learning_rate}.csv', index=False) # -
lab-Kaggle2-lightGBM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Grove_Buzzer using pynqmicroblaze library # --- # ## Aim # # * This notebook shows how the PYNQ microblaze infrastructure can be used to access the Grove_Buzzer from various interfaces without changing the driver code written in C and compiled using the MicroBlaze compiler available in the image. # # ## References # * [Grove buzzer](https://www.seeedstudio.com/Grove-buzzer.html) # * [PYNQ Grove Adapter](https://store.digilentinc.com/pynq-grove-system-add-on-board/) # # ## Revision History # # * Initial Release # # --- # ## Load _base_ Overlay from pynq.lib import MicroblazeLibrary from pynq.overlays.base import BaseOverlay base = BaseOverlay('base.bit') # ## Using Grove buzzer with PL Grove connector # <div class="alert alert-box alert-warning"><ul> # <h4 class="alert-heading">Make Physical Connections</h4> # <li>Connect the Grove_Buzzer module to the PL-GC0 connector.</li> # </ul> # </div> # ### Library compilation lib = MicroblazeLibrary(base.GC, ['grove_buzzer', 'gc']) # With the library compiled we can see available functions by executing the next cell. dir(lib) # ### Create _buzzer_ device # Initialize IO pins. Since only one signal line is used, only one pin needs initialization. # In case PL-GC1 is being used, change GC_0_A to GC_1_A. buzzer = lib.grove_buzzer_init_pins(lib.GC_0_A) # ### Play pre-defined melody buzzer.melody_demo() # ### Play tone of 2000 uSec for 500 times buzzer.playTone(2000,500) # ## Using Grove buzzer with the PYNQ_Grove_Adapter (PMOD) # <div class="alert alert-box alert-warning"><ul> # <h4 class="alert-heading">Make Physical Connections</h4> # <li>Connect the PYNQ Grove Adapter to PMODB connector. Connect the Grove_Buzzer module to the G1 connector of the Adapter.</li> # </ul> # </div> # ### Library compilation lib = MicroblazeLibrary(base.PMODB, ['grove_buzzer', 'pmod_grove']) # ### Create _buzzer_ device buzzer = lib.grove_buzzer_init_pins(lib.PMOD_G1_A) # ### Play pre-defined melody buzzer.melody_demo() # ### Play tone of 2000 uSec for 500 times buzzer.playTone(2000,500) # Copyright (C) 2021 Xilinx, Inc # --- # ---
Pynq-ZU/base/notebooks/microblaze/grove_buzzer_using_pynqmicroblaze_library.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # Run in python console import nltk; nltk.download('stopwords') # Run in terminal or command prompt #python3 -m spacy download en # - pip install pyLDAvis pip install gensim # + #pip install spacy==2.2.0 # + import re import numpy as np import pandas as pd from pprint import pprint # Gensim import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel import nltk from nltk.stem import WordNetLemmatizer # spacy for lemmatization import spacy # Plotting tools import pyLDAvis import pyLDAvis.gensim_models # don't skip this import matplotlib.pyplot as plt # %matplotlib inline # Enable logging for gensim - optional import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR) import warnings warnings.filterwarnings("ignore",category=DeprecationWarning) # - # NLTK Stop words from nltk.corpus import stopwords stop_words = stopwords.words('english') stop_words.extend(['from', 'subject', 're', 'edu', 'use']) #import data df_id = pd.read_csv("dataset/nonprofit.txt", sep = "|", encoding = "cp1252") df_text = pd.read_csv("dataset/nonprofit_text.txt", sep = "|", encoding = "cp1252") #merge sets df_merge = pd.merge(df_id, df_text, how='inner', on='nonprofit_id') #filter by TN entries df_text = df_merge.loc[(df_merge["stabbrv"]=='TN')] # + # removes the empty descriptions df_text = df_text[df_text["description"].str.len() > 0] # combines all by nonprofit_id to a single description df_text = df_text.groupby(['nonprofit_id'], as_index = False).agg({'description': ' '.join}) # - df_text = df_text[df_text["description"].str.len() > 10] df_text.info() # + #create sample set #df_text = df_text.sample(n = 250000) #df_text.head() # - data = df_text.description.values.tolist() # + def sent_to_words(sentences): for sentence in sentences: yield(gensim.utils.simple_preprocess(str(sentence).encode('utf-8'), deacc=True)) # deacc=True removes punctuations data_words = list(sent_to_words(data)) print(data_words[:1]) # + # Build the bigram and trigram models bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases. trigram = gensim.models.Phrases(bigram[data_words], threshold=100) # Faster way to get a sentence clubbed as a trigram/bigram bigram_mod = gensim.models.phrases.Phraser(bigram) trigram_mod = gensim.models.phrases.Phraser(trigram) # See trigram example print(trigram_mod[bigram_mod[data_words[0]]]) # + # Define functions for stopwords, bigrams, trigrams and lemmatization def remove_stopwords(texts): return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts] def make_bigrams(texts): return [bigram_mod[doc] for doc in texts] def make_trigrams(texts): return [trigram_mod[bigram_mod[doc]] for doc in texts] def lemmatization(texts): """https://spacy.io/api/annotation""" texts_out = [] for sent in texts: doc = nlp(" ".join(sent)) texts_out.append([token.lemma_ for token in doc if(len(token) > 3)]) return texts_out # + #python3 -m spacy download en # + # Remove Stop Words data_words_nostops = remove_stopwords(data_words) print(data_words_nostops[:1]) # - print(data_words_nostops[7]) # + # Form Bigrams data_words_bigrams = make_bigrams(data_words_nostops) print(data_words_bigrams[:1]) # - print(data_words_bigrams[7]) # + # Initialize spacy 'en' model, keeping only tagger component (for efficiency) # python3 -m spacy download en nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner']) # Do lemmatization keeping only noun, adj, vb, adv data_lemmatized = lemmatization(data_words_bigrams) print(data_lemmatized[:1]) # - print(data_lemmatized[7]) # + # Create Dictionary id2word = corpora.Dictionary(data_lemmatized) # Create Corpus texts = data_lemmatized # Term Document Frequency corpus = [id2word.doc2bow(text) for text in texts] # View print(corpus[:2]) # - # Build LDA model lda_model = gensim.models.ldamulticore.LdaMulticore(workers = 3, corpus=corpus, id2word=id2word, num_topics=12, random_state=42, chunksize=100, passes=10, per_word_topics=True) # Print the Keyword in the 10 topics pprint(lda_model.print_topics()) doc_lda = lda_model[corpus] # + # Compute Perplexity print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better. # Compute Coherence Score coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score: ', coherence_lda) # - # Visualize the topics pyLDAvis.enable_notebook() vis = pyLDAvis.gensim_models.prepare(lda_model, corpus, id2word) vis # + def get_readable_topic(input_doc): if len(input_doc) > 1: doc = lda_model.id2word.doc2bow(input_doc) doc_topics, word_topics, phi_values = lda_model.get_document_topics(doc, per_word_topics=True) return lda_model.id2word[doc_topics[0][0]] else: return None index = 0 for doc in data_lemmatized: df_text.loc[index,"topic_model_result"] = get_readable_topic(doc) index = index + 1 # - df_text.head(10) df_text.to_csv('output.csv') df_text.nunique()
model_notebooks/hackathon_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table> # <tr><td align="right" style="background-color:#ffffff;"> # <img src="../images/logo.jpg" width="20%" align="right"> # </td></tr> # <tr><td align="right" style="color.:#777777;background-color:#ffffff;font-size:12px;"> # Prepared by <NAME> and <NAME><br> # <NAME> | December 4, 2019 (updated) # </td></tr> # <tr><td align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;"> # This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. # </td></tr> # </table> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # <h2> <font color="blue"> Solutions for </font> Controlled Operators</h2> # <a id="task1"></a> # <h3>Task 1</h3> # # Our task is to learn the behavior of the following quantum circuit by doing experiments. # # Our circuit has two qubits. # <ul> # <li> Apply Hadamard to both qubits. # <li> Apply CNOT(qreg[1] is the control,qreg[0] is the target). # <li> Apply Hadamard to both qubits. # <li> Measure the circuit. # </ul> # # Iteratively initialize the qubits to $ \ket{00} $, $ \ket{01} $, $ \ket{10} $, and $ \ket{11} $. # # Execute your program 100 times for each iteration, and then check the outcomes for each iteration. # # Observe that the overall circuit implements CNOT(qreg[0] is the control, qreg[1] is the target). # <h3> Solution </h3> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer all_inputs=['00','01','10','11'] for input in all_inputs: qreg1 = QuantumRegister(2) # quantum register with 2 qubits creg1 = ClassicalRegister(2) # classical register with 2 bits mycircuit1 = QuantumCircuit(qreg1,creg1) # quantum circuit with quantum and classical registers #initialize the inputs if input[0]=='1': mycircuit1.x(qreg1[1]) # set the state of the qubit to |1> if input[1]=='1': mycircuit1.x(qreg1[0]) # set the state of the qubit to |1> # apply h-gate to both qubits mycircuit1.h(qreg1[0]) mycircuit1.h(qreg1[1]) # apply cx mycircuit1.cx(qreg1[1],qreg1[0]) # apply h-gate to both qubits mycircuit1.h(qreg1[0]) mycircuit1.h(qreg1[1]) # measure both qubits mycircuit1.measure(qreg1,creg1) # execute the circuit 100 times in the local simulator job = execute(mycircuit1,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(mycircuit1) for outcome in counts: # print the reverse of the outcomes print("our input is",input,": ",outcome,"is observed",counts[outcome],"times") # - # <a id="task2"></a> # <h3>Task 2</h3> # # Our task is to learn the behavior of the following quantum circuit by doing experiments. # # Our circuit has two qubits. # <ul> # <li> Apply CNOT(qreg[1] is the control, qreg[0] is the target). # <li> Apply CNOT(qreg[0] is the control, qreg[1] is the target). # <li> Apply CNOT(qreg[0] is the control, qreg[1] is the target). # </ul> # # Iteratively initialize the qubits to $ \ket{00} $, $ \ket{01} $, $ \ket{10} $, and $ \ket{11} $. # # Execute your program 100 times for each iteration, and then check the outcomes for each iteration. # # Observe that the overall circuit swaps the values of the two qubits: # <ul> # <li> $\ket{00} \rightarrow \ket{00} $ </li> # <li> $\ket{01} \rightarrow \ket{10} $ </li> # <li> $\ket{10} \rightarrow \ket{01} $ </li> # <li> $\ket{11} \rightarrow \ket{11} $ </li> # </ul> # <h3> Solution </h3> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer all_inputs=['00','01','10','11'] for input in all_inputs: qreg2 = QuantumRegister(2) # quantum register with 2 qubits creg2 = ClassicalRegister(2) # classical register with 2 bits mycircuit2 = QuantumCircuit(qreg2,creg2) # quantum circuit with quantum and classical registers #initialize the inputs if input[0]=='1': mycircuit2.x(qreg2[1]) # set the value of the qubit to |1> if input[1]=='1': mycircuit2.x(qreg2[0]) # set the value of the qubit to |1> # apply cx(qreg2[0] is the target) mycircuit2.cx(qreg2[1],qreg2[0]) # apply cx(qreg2[1] is the target) mycircuit2.cx(qreg2[0],qreg2[1]) # apply cx(qreg2[0] is the target) mycircuit2.cx(qreg2[1],qreg2[0]) mycircuit2.measure(qreg2,creg2) # execute the circuit 100 times in the local simulator job = execute(mycircuit2,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(mycircuit2) for outcome in counts: # print the reverse of the outcomes print("our input is",input,": ",outcome,"is observed",counts[outcome],"times") # - # <a id="task3"></a> # <h3> Task 3 [Extra]</h3> # # Create a quantum curcuit with $ n=5 $ qubits. # # Set each qubit to $ \ket{1} $. # # Repeat 4 times: # <ul> # <li>Randomly pick a pair of qubits, and apply cx-gate (CNOT operator) on the pair.</li> # </ul> # # Draw your circuit, and execute your program 100 times. # # Verify your measurement results by checking the diagram of the circuit. # <h3> Solution </h3> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # import randrange for random choices from random import randrange n = 5 m = 4 states_of_qubits = [] # we trace the state of each qubit also by ourselves qreg3 = QuantumRegister(n) # quantum register with n qubits creg3 = ClassicalRegister(n) # classical register with n bits mycircuit3 = QuantumCircuit(qreg3,creg3) # quantum circuit with quantum and classical registers # set each qubit to |1> for i in range(n): mycircuit3.x(qreg3[i]) # apply x-gate (NOT operator) states_of_qubits.append(1) # the state of each qubit is set to 1 # randomly pick m pairs of qubits for i in range(m): controller_qubit = randrange(n) target_qubit = randrange(n) # controller and target qubits should be different while controller_qubit == target_qubit: # if they are the same, we pick the target_qubit again target_qubit = randrange(n) # print our picked qubits print("the indices of the controller and target qubits are",controller_qubit,target_qubit) # apply cx-gate (CNOT operator) mycircuit3.cx(qreg3[controller_qubit],qreg3[target_qubit]) # we also trace the results if states_of_qubits[controller_qubit] == 1: # if the value of the controller qubit is 1, states_of_qubits[target_qubit] = 1 - states_of_qubits[target_qubit] # then flips the value of the target qubit # remark that 1-x gives the negation of x # measure the quantum register mycircuit3.measure(qreg3,creg3) print("Everything looks fine, let's continue ...") # + # draw the circuit mycircuit3.draw(output='mpl') # re-execute this cell if you DO NOT see the circuit diagram # - # <a id="task4"></a> # <h3> Task 4 </h3> # # In this task, our aim is to create an operator which will apply the NOT operator to the target qubit qreg[0] when the control qubit qreg[1] is in state $\ket{0}$. In other words, we want to obtain the following operator: # # $\mymatrix{cccc}{0 & 1 & 0 & 0 \\ 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1}$. # # We can summarize its effect as follows: # <ul> # <li>$ \ket{00} \rightarrow \ket{01} $, </li> # <li>$ \ket{01} \rightarrow \ket{00} $, </li> # <li>$ \ket{10} \rightarrow \ket{10} $, </li> # <li>$ \ket{11} \rightarrow \ket{11} $. </li> # </ul> # # Write a function named c0x which takes the circuit name and the register as parameters to implement the operator and check using the code given below. # <ul> # <li>Apply NOT operator to qreg[1];</li> # <li>Apply CNOT operator, where qreg[1] is control and qreg[0] is target;</li> # <li>Apply NOT operator to qreg[1] - to revert it to the initial state.</li> # </ul> # <b>Idea:</b> We can use our regular CNOT operator, and to change the condition for the control qubit we can apply NOT operator to it before the CNOT - this way the NOT operator will be applied to the target qubit when initially the state of the control qubit was $\ket{0}$. # # Although this trick is quite simple, this approach is important and will be very useful in our following implementations. # <h3>Solution:</h3> def c0x(mycircuit,qreg): mycircuit.x(qreg[1]) # Apply CNOT where qreg[0] is the target mycircuit.cx(qreg[1],qreg[0]) # Returning control qubit to the initial state mycircuit.x(qreg[1]) # + from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer qreg4 = QuantumRegister(2) creg4 = ClassicalRegister(2) mycircuit4 = QuantumCircuit(qreg4,creg4) #We apply the operator c0x by calling the function c0x(mycircuit4,qreg4) job = execute(mycircuit4,Aer.get_backend('unitary_simulator')) u=job.result().get_unitary(mycircuit4,decimals=3) for i in range(len(u)): s="" for j in range(len(u)): val = str(u[i][j].real) while(len(val)<5): val = " "+val s = s + val print(s) mycircuit4.draw(output="mpl") # - # <a id="task5"></a> # <h3>Task 5</h3> # # You have a circuit with three qubits. Apply NOT operator to qreg[1] if qreg[0] is in state 0 and qreg[2] is in state 1. Check its efffect on different inputs. # <h3>Solution</h3> # + from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer all_inputs=['000','001','010','011','100','101','110','111'] for input in all_inputs: qreg5 = QuantumRegister(3) # quantum register with 3 qubits creg5 = ClassicalRegister(3) # classical register with 3 bits mycircuit5 = QuantumCircuit(qreg5,creg5) # quantum circuit with quantum and classical registers #initialize the inputs if input[0]=='1': mycircuit5.x(qreg5[2]) # set the state of the first qubit to |1> if input[1]=='1': mycircuit5.x(qreg5[1]) # set the state of the second qubit to |1> if input[2]=='1': mycircuit5.x(qreg5[0]) # set the state of the third qubit to |1> mycircuit5.x(qreg5[0]) mycircuit5.ccx(qreg5[2],qreg5[0],qreg5[1]) #Set back to initial value mycircuit5.x(qreg5[0]) # measure the qubits mycircuit5.measure(qreg5,creg5) # execute the circuit 100 times in the local simulator job = execute(mycircuit5,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(mycircuit5) for outcome in counts: # print the reverse of the outcomes print("our input is",input,": ",outcome,"is observed",counts[outcome],"times") # - # <a id="task6"></a> # <h3> Task 6 </h3> # # Implement the NOT operator controlled by 4 qubits where qreg[0] is the target and apply it iteratively to all possible states. Note that you will need additional qubits. # <h3>Solution</h3> def ccccx(mycircuit,qreg): mycircuit.ccx(qreg[4],qreg[3],qreg[5]) mycircuit.ccx(qreg[2],qreg[1],qreg[6]) mycircuit.ccx(qreg[5],qreg[6],qreg[0]) # Returning additional qubits to the initial state mycircuit.ccx(qreg[2],qreg[1],qreg[6]) mycircuit.ccx(qreg[4],qreg[3],qreg[5]) # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer all_inputs=['00000','00001','00010','00011','00100','00101','00110','00111','00000', '01001','01010','01011','01100','01101','01110','01111','10000','10001', '10010','10011','10100','10101','10110','10111','10000','11001','11010', '11011','11100','11101','11110','11111'] for input in all_inputs: qreg6 = QuantumRegister(7) # quantum register with 7 qubits creg6 = ClassicalRegister(7) # classical register with 7 bits mycircuit6 = QuantumCircuit(qreg6,creg6) # quantum circuit with quantum and classical registers #initialize the inputs if input[0]=='1': mycircuit6.x(qreg6[4]) # set the state of the first qubit to |1> if input[1]=='1': mycircuit6.x(qreg6[3]) # set the state of the second qubit to |1> if input[2]=='1': mycircuit6.x(qreg6[2]) # set the state of the third qubit to |1> if input[3]=='1': mycircuit6.x(qreg6[1]) # set the state of the fourth qubit to |1> if input[4]=='1': mycircuit6.x(qreg6[0]) # set the state of the fifth qubit to |1> ccccx(mycircuit6,qreg6) mycircuit6.measure(qreg6,creg6) job = execute(mycircuit6,Aer.get_backend('qasm_simulator'),shots=10000) counts = job.result().get_counts(mycircuit6) for outcome in counts: # print the reverse of the outcomes print("our input is",input,": ",outcome,"is observed",counts[outcome],"times") # - # <a id="task7"></a> # <h3>Task 7</h3> # # Implement the following control: the NOT operator is applied to the target qubit qreg[0] if 5 control qubits qreg[5] to qreg[1] are initially in the state $\ket{10101}$. Check your operator by trying different initial states. You may define a function or write your code directly. # <h3>Solution</h3> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer #Try different initial states all_inputs=['101010','101011','100000','111111'] for input in all_inputs: qreg7 = QuantumRegister(9) # quantum register with 9 qubits creg7 = ClassicalRegister(9) # classical register with 9 bits mycircuit7 = QuantumCircuit(qreg7,creg7) # quantum circuit with quantum and classical registers #initialize the inputs if input[0]=='1': mycircuit7.x(qreg7[5]) # set the state of the first qubit to |1> if input[1]=='1': mycircuit7.x(qreg7[4]) # set the state of the second qubit to |1> if input[2]=='1': mycircuit7.x(qreg7[3]) # set the state of the third qubit to |1> if input[3]=='1': mycircuit7.x(qreg7[2]) # set the state of the fourth qubit to |1> if input[4]=='1': mycircuit7.x(qreg7[1]) # set the state of the fifth qubit to |1> if input[5]=='1': mycircuit7.x(qreg7[0]) # set the state of the fifth qubit to |1> mycircuit7.x(qreg7[4]) mycircuit7.x(qreg7[2]) mycircuit7.ccx(qreg7[1],qreg7[2],qreg7[6]) mycircuit7.ccx(qreg7[3],qreg7[4],qreg7[7]) mycircuit7.ccx(qreg7[6],qreg7[7],qreg7[8]) mycircuit7.ccx(qreg7[5],qreg7[8],qreg7[0]) # Returning additional and control qubits to the initial state mycircuit7.ccx(qreg7[6],qreg7[7],qreg7[8]) mycircuit7.ccx(qreg7[3],qreg7[4],qreg7[7]) mycircuit7.ccx(qreg7[1],qreg7[2],qreg7[6]) mycircuit7.x(qreg7[4]) mycircuit7.x(qreg7[2]) mycircuit7.measure(qreg7,creg7) job = execute(mycircuit7,Aer.get_backend('qasm_simulator'),shots=10000) counts = job.result().get_counts(mycircuit7) for outcome in counts: # print the reverse of the outcomes print("our input is",input,": ",outcome,"is observed",counts[outcome],"times") # - # <a id="task8"></a> # <h3>Task 8 (Optional)</h3> # # Implement the parametrized controlled NOT operator with 4 control qubits, where parameter will be the state of control qubits for which NOT operator will be applied to the target qubit. # # As a result you need to define the following function: <i>control(circuit,quantum_reg,number)</i>, where: # <ul> # <li><i>circuit</i> allows to pass the quantum circuit;</li> # <li><i>quantum_reg</i> allows to pass the quantum register;</li> # <li><i>state</i> is the state of control qubits, between 0 and 15, where 0 corresponds to 0000 and 15 corresponds to 1111 (like binary numbers :) ).</li> # </ul> # <h3>Solution</h3> #state - the state of control qubits, between 0 and 15. def control(circuit,quantum_reg,state): if(state%2 == 0): circuit.x(quantum_reg[1]) if(state%4 < 2): circuit.x(quantum_reg[2]) if(state%8 < 4): circuit.x(quantum_reg[3]) if(state < 8): circuit.x(quantum_reg[4]) circuit.ccx(quantum_reg[1],quantum_reg[2],quantum_reg[5]) circuit.ccx(quantum_reg[3],quantum_reg[4],quantum_reg[6]) circuit.ccx(quantum_reg[5],quantum_reg[6],quantum_reg[0]) circuit.ccx(quantum_reg[3],quantum_reg[4],quantum_reg[6]) circuit.ccx(quantum_reg[1],quantum_reg[2],quantum_reg[5]) if(state < 8): circuit.x(quantum_reg[4]) if(state%8 < 4): circuit.x(quantum_reg[3]) if(state%4 < 2): circuit.x(quantum_reg[2]) if(state%2 == 0): circuit.x(quantum_reg[1]) # You can try different inputs to see that your function is implementing the mentioned control operation. # + #Try different initial states all_inputs=['01010','01011','10000','11111'] for input in all_inputs: qreg8 = QuantumRegister(7) # quantum register with 7 qubits creg8 = ClassicalRegister(7) # classical register with 7 bits mycircuit8 = QuantumCircuit(qreg8,creg8) # quantum circuit with quantum and classical registers #initialize the inputs if input[0]=='1': mycircuit8.x(qreg8[4]) # set the state of the first qubit to |1> if input[1]=='1': mycircuit8.x(qreg8[3]) # set the state of the second qubit to |1> if input[2]=='1': mycircuit8.x(qreg8[2]) # set the state of the third qubit to |1> if input[3]=='1': mycircuit8.x(qreg8[1]) # set the state of the fourth qubit to |1> if input[4]=='1': mycircuit8.x(qreg8[0]) # set the state of the fifth qubit to |1> control(mycircuit8,qreg8,5) mycircuit8.measure(qreg8,creg8) job = execute(mycircuit8,Aer.get_backend('qasm_simulator'),shots=10000) counts = job.result().get_counts(mycircuit8) for outcome in counts: # print the reverse of the outcomes print("our input is",input,": ",outcome,"is observed",counts[outcome],"times")
bronze/B39_Controlled_Operations_Solutions.ipynb
# <a href="https://colab.research.google.com/github/dmlc/gluon-cv/blob/onnx/scripts/onnx/notebooks/segmentation/deeplab_v3b_plus_wideresnet_citys.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # !pip3 install --upgrade onnxruntime # + import numpy as np import onnxruntime as rt import urllib.request import os.path from PIL import Image # + def fetch_model(): if not os.path.isfile("deeplab_v3b_plus_wideresnet_citys.onnx"): urllib.request.urlretrieve("https://apache-mxnet.s3-us-west-2.amazonaws.com/onnx/models/gluoncv-deeplab_v3b_plus_wideresnet_citys-f54693e5.onnx", filename="deeplab_v3b_plus_wideresnet_citys.onnx") return "deeplab_v3b_plus_wideresnet_citys.onnx" def prepare_img(img_path, input_shape): # input_shape: BHWC height, width = input_shape[1], input_shape[2] img = Image.open(img_path).convert('RGB') img = img.resize((width, height)) img = np.asarray(img) plt_img = img img = np.expand_dims(img, axis=0).astype('float32') return plt_img, img # - # **Make sure to replace the image you want to use** # + model = fetch_model() img_path = 'Your image' plt_img, img = prepare_img(img_path, (1, 480, 480, 3)) # + # Create a onnx inference session and get the input name onnx_session = rt.InferenceSession(model, None) input_name = onnx_session.get_inputs()[0].name # + raw_result = onnx_session.run([], {input_name: img})[0] # - # # (Optional) We use mxnet and gluoncv to visualize the result. # # Feel free to visualize the result your own way # # !pip3 install --upgrade mxnet gluoncv # + import mxnet as mx from gluoncv import utils from matplotlib import pyplot as plt from gluoncv.utils.viz import get_color_pallete # Make prediction predict = mx.nd.squeeze(mx.nd.argmax(mx.nd.array(raw_result), 1)).asnumpy() mask = get_color_pallete(predict, 'citys') mask.save('output.png') # + # Plot the result plt.imshow(mask) plt.show()
scripts/onnx/notebooks/segmentation/deeplab_v3b_plus_wideresnet_citys.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp core # - # # Core XLA extensions #exporti #colab IN_COLAB = True #hide #colab try: from google.colab import drive drive.mount('/content/drive') except ImportError: IN_COLAB = False #hide #colab import os if IN_COLAB: assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator' #hide #colab ![ -d /content ] && [ ! -d /content/data ] && curl -s https://course19.fast.ai/setup/colab | bash # ## Install fastai # # Use latest fastai and fastcore versions #hide_output #colab ![ -d /content ] && pip install -Uqq fastcore --upgrade ![ -d /content ] && pip install -Uqq fastai --upgrade # ## Setup torch XLA # # This is the official way to install Pytorch-XLA 1.7 [instructions here](https://colab.research.google.com/github/pytorch/xla/blob/master/contrib/colab/getting-started.ipynb#scrollTo=CHzziBW5AoZH) #hide_output #colab ![ -d /content ] && pip install -Uqq cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.7-cp36-cp36m-linux_x86_64.whl # + #hide #colab # use this for getting pytorch XLA nightly version # VERSION = "20200707" #@param ["1.5" , "20200325","20200707", "nightly"] # # !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py # # !python pytorch-xla-env-setup.py --version $VERSION # - #hide # !pip freeze | grep torch # !pip freeze | grep fast #hide #colab # %cd /content/drive/MyDrive/fastai_xla_extensions # ## Check if XLA is available #exporti DEBUG = False # set to false for prod release TRACE = False # set to false for prod release #exporti if DEBUG: from pdb import set_trace else: from fastcore.imports import noop set_trace = noop # # + #export #hide_output import importlib import os import sys def xla_imported(): return 'torch_xla' in sys.modules # currently unused, might be deleted later? def xla_available_config(): return os.environ.get("XRT_DEVICE_MAP", False) and os.environ.get("XRT_WORKERS", False) def xla_module_exist(): return importlib.util.find_spec('torch_xla') # - # This next code routine handles the possibility of running the package when the environment does not provide a TPU (e.g. CI env, local, etc) by providing mock implementations #exporti import warnings try: import torch_xla except ImportError as e: if DEBUG: warnings.warn('TPU environment not available') #exporti if not xla_imported(): from types import SimpleNamespace from typing import Union,BinaryIO import os import pickle import torch.cuda def fake_opt_step(opt,barrier=False): opt.step() def fake_device(n=None, devkind=None): gpu_available = torch.cuda.is_available() return torch.device(torch.cuda.current_device()) if gpu_available else torch.device('cpu') def fake_save(obj, f: Union[str, os.PathLike, BinaryIO], master_only=True, global_master=False): return torch.save(obj,f,pickle_module=pickle, pickle_protocol=2, _use_new_zipfile_serialization=True) xm = SimpleNamespace( optimizer_step = fake_opt_step, xla_device = fake_device, save = fake_save ) else: import torch_xla.core.xla_model as xm # ## Patching BaseOptimizer to be Pickable # Patching Base Optimizer `__getstate__` and `__setstate__` whichi is used in pickling # the optimizer which should fix the bug in running the learner in multiple TPU cores # in XLA by which the `def _fetch_gradients(optimizer)` in `for param_group in optimizer.__getstate__()['param_groups']:` fails, and this patch fixes the "copy constructor" to include the param_groups. # + #export # from fastcore.foundation import GetAttr # from fastai.optimizer import Optimizer # from copy import deepcopy # Right now deciding to patch BaseOptimizer instead of add with a PickableOpt(Optimizer) class like in previous versions from fastcore.basics import patch_to from fastai.optimizer import _BaseOptimizer @patch_to(_BaseOptimizer) def __getstate__(self): # https://github.com/pytorch/pytorch/blob/46b252b83a97bba0926cead050d76fcef129cb6b/torch/optim/optimizer.py#L54 d = { 'defaults': self.defaults, 'state': self.state_dict(), 'param_groups': self.param_groups, } return d @patch_to(_BaseOptimizer) def __setstate__(self, data): # https://github.com/pytorch/pytorch/blob/46b252b83a97bba0926cead050d76fcef129cb6b/torch/optim/optimizer.py#L61 self.defaults = data['defaults'] self.load_state_dict(data['state']) self.param_groups = data['param_groups'] # - # ## XLA Optim Proxy # `XLAOptimProxy` is a class which has overridden the `step` method to call the Pytorch-XLA function `xm.optimizer_step` which synchronizes the XLA graph. All other calls to `XLAOptimProxy` just forward it to the internal `self.opt` instance. # + #export #colab # import torch_xla.core.xla_model as xm # + #export from fastcore.foundation import GetAttr class XLAOptimProxy(GetAttr): _default='opt' "Proxy optimizer to override `opt.step` with Pytorch XLA sync method `xm.optimizer_step` " def __init__(self,opt, barrier): self.opt = opt # because not using PickableOpt(opt) for the moment self._barrier = barrier def step(self): xm.optimizer_step(self.opt,barrier=self._barrier) @property def barrier(self): return self._barrier @barrier.setter def barrier(self,v): self._barrier = v # - # ## DeviceMoverTransform # `DeviceMoverTransform` is a simple transform that moves the batch input from the CPU to the XLA device. # # This is in lieu of the normal mechanism of the DataLoader implementation where the dls.device is set to the XLA device before the start of any batch transformations in the dataloaders. # # Unfortunately, the AffineCoordTfm which is used for data augmentation (all the batch Zoom, Warp, Rotate augmentations) cause a problem when run on the TPU due to some affine operations not currently implemented in the Pytorch XLA) which triggers a lowering of the XLA Tensors to the CPU to perform the affine operation and causes a massive slowdown, even much slower than just doing the affine transform in the CPU in the first place. # # The solution is then to postpone the moving of the input batch to TPU after the affine transformation, by setting the dls.device to None, which is done in the before_fit method of the XLAOptCallback. # #export from fastcore.transform import DisplayedTransform from fastcore.basics import store_attr from torch import Tensor import torch class DeviceMoverTransform(DisplayedTransform): "Transform to move input to new device and reverse to cpu" def __init__(self, device_to, device_from=torch.device('cpu')): store_attr('device_to,device_from') def encodes(self, o:Tensor): return o.to(self.device_to) def decodes(self, o:Tensor): return o.to(self.device_from) # The following functions are for the purpose of modifying the batch_transforms pipeline to add a device mover transform that moves the batch input sample to the TPU since this step has been disabled (by setting the dls.device to None) so that all batch transforms prior to the device mover transform are by default executed on the CPU. # + #export from fastcore.transform import Transform from fastai.vision.augment import AffineCoordTfm, RandomResizedCropGPU from fastai.data.core import DataLoaders,DataLoader def _isAffineCoordTfm(o:Transform): return isinstance(o,(AffineCoordTfm,RandomResizedCropGPU)) def _isDeviceMoverTransform(o:Transform): return isinstance(o,DeviceMoverTransform) def has_affinecoord_tfm(dls: DataLoaders) -> bool: "returns true if train dataloader has an AffineCoordTfm in the batch_tfms" idxs = dls.train.after_batch.fs.argwhere(_isAffineCoordTfm) return len(idxs) > 0 def has_devicemover_tfm(dl: DataLoader) -> bool: "returns true if train dataloader has a DeviceMoverTransform in the batch_tfms" idxs = dl.after_batch.fs.argwhere(_isDeviceMoverTransform) return len(idxs) > 0 def get_last_affinecoord_tfm_idx(dl:DataLoader)-> int: # -1 if none "returns index of last AffineCoordTfm if it exists, otherwise returns -1" idxs = dl.after_batch.fs.argwhere(_isAffineCoordTfm) return -1 if len(idxs) == 0 else idxs[-1] # - # This inserts a batch transform for a dataloader at the index location `idx`. #export def insert_batch_tfm(dl:DataLoader, batch_tfm:Transform, idx:int): "adds a batch_tfm in the batch_tfms for the dataloader at idx location" dl.after_batch.fs.insert(idx, batch_tfm) # This will add a device mover transform to the batch transforms if any of them trigger a lowering from the TPU to CPU. Currently identified transforms that cause this are the 'AffineCoordTfm` and `RandomResizeCropGPU` transforms. # # If none of the transforms are present, the dls.device is set to XLA so that when the `TrainEvalCallback.before_fit` is called, the model is also moved to the TPU. # + #export from fastai.learner import Learner def setup_input_device_mover(learn: Learner, new_device): "setup batch_tfms to use cpu if dataloader batch_tfms has AffineCoordTfms" if not has_affinecoord_tfm(learn.dls): learn.dls.device = new_device return learn.dls.device = None if has_devicemover_tfm(learn.dls.train): return # skip adding device mover if already added dm_tfm = DeviceMoverTransform(new_device) for dl in learn.dls.loaders: if not has_devicemover_tfm(dl): idx = get_last_affinecoord_tfm_idx(dl) if DEBUG: print(f'setup device mover dl: {dl} idx: {idx}') if idx != -1: insert_batch_tfm(dl, dm_tfm, idx+1) # - # ## XLA Opt Callback # This callback replaces the learner's `opt` with an instance of `XLAOptimProxy` that proxies the original `opt` during the beginning of the `fit` method and restores the original `opt` after the `fit`. # # It also sets the `dataloaders.device` and the `learn.model` to use a TPU core using the device returned by the `xm.xla_device()` method. # + #export from fastai.callback.core import Callback from fastai.data.core import DataLoaders from fastai.vision.all import to_device from fastai.callback.core import TrainEvalCallback from fastai.learner import Recorder class XLAOptCallback(Callback): 'Callback to replace `opt.step` with `xm.optimizer_step(opt)` as required to run on TPU' run_after,run_before = TrainEvalCallback,Recorder def __init__(self, barrier=True): self._barrier = barrier def before_fit(self): 'replace opt with proxy which calls `xm.optimizer_step` instead of `opt.step` and set `dls.device` and model to `xla_device`' # set dls device to none so prevent trigger of moving to batch input to XLA device # as this move will be done by the DeviceMoverTransform which has been added to the dls after_batch tfms if has_affinecoord_tfm(self.dls): self.dls.device = None if self.learn.opt is not None: if not isinstance(self.learn.opt,XLAOptimProxy): # force opt to reinitialize its parameters and make sure its parameters opt = self.learn.opt self.learn.opt = XLAOptimProxy(opt, barrier=self._barrier) def after_fit(self): 'restore original opt ' if isinstance(self.learn.opt, XLAOptimProxy): opt = self.learn.opt.opt self.learn.opt = opt @property def barrier(self): return self._barrier @barrier.setter def barrier(self,v): self._barrier = v # - # * Make sure the model and dataloader has been moved to the xla device prior to creating the optimizer by setting the opt to None which will force a call to create_opt in the fit methods after already moving the model to the TPU device in this method. #export from fastcore.foundation import patch @patch def to_xla(self:Learner, new_device=None): self.add_cb(XLAOptCallback()) if new_device is None: new_device = xm.xla_device() self.model.to(new_device) setup_input_device_mover(self, new_device) self.opt = None return self #export @patch def detach_xla(self:Learner): self.remove_cb(XLAOptCallback) self.dls.device = torch.device('cpu') self.model.to(self.dls.device) self.opt = None return self # ## Export (to be fixed) # # Still needs work #hide # import pickle # @patch # def export(self:Learner, fname='export.pkl', pickle_module=pickle, pickle_protocol=2): # "Export the content of `self` without the items and the optimizer state for inference" # if rank_distrib(): return # don't export if child proc # self._end_cleanup() # old_dbunch = self.dls # self.dls = self.dls.new_empty() # state = self.opt.state_dict() if self.opt is not None else None # self.opt = None # with warnings.catch_warnings(): # #To avoid the warning that come from PyTorch about model not being checked # warnings.simplefilter("ignore") # xm.save(self, self.path/fname) # self.create_opt() # if state is not None: self.opt.load_state_dict(state) # self.dls = old_dbunch # ## Example: Create an MNIST classifier # This is an example of the fastai_xla_extensions library # in action. # # First, we import fastai libraries. #hide #colab # %cd /content from fastai.vision.all import * from fastai.callback.training import GradientAccumulation # Load data path = untar_data(URLs.MNIST_TINY) Path.BASE_PATH = path # Create datablock # datablock = DataBlock( blocks=(ImageBlock,CategoryBlock), get_items=get_image_files, get_y=parent_label, splitter=GrandparentSplitter(), item_tfms=Resize(28), batch_tfms=aug_transforms(do_flip=False, min_scale=0.8) # trigger usage of RandomResizedCropGPU # batch_tfms=[] ) # Set dataloader to load the batches to the cpu dls = datablock.dataloaders(path) dls.device dls.train.after_batch.fs dls.show_batch() # Create the Learner # learner = cnn_learner(dls, resnet18, metrics=accuracy) # learner = cnn_learner(dls, resnet18, pretrained=False, metrics=accuracy) # see current bug re pretrained https://github.com/butchland/fastai_xla_extensions/issues/14 learner.dls.train.after_batch.fs DEBUG = False #colab learner.to_xla(xm.xla_device()); #colab learner.dls.device is None learner.dls.train.after_batch.fs learner.dls.valid.after_batch.fs # The `learner` object should have an `xla_opt` attribute which confirms that `XLAOptCallback` has been added to the list of callbacks for this learner. #colab learner.xla_opt #colab learner.xla_opt.barrier #colab learner.dls.device is None #colab learner.opt is None one_param(learner.model).device #colab has_affinecoord_tfm(learner.dls) #colab has_devicemover_tfm(learner.dls.train) TRACE = False # + #colab # currently an unrelated bug : https://github.com/fastai/fastai/issues/3011 # learner.summary() # + #colab # learner.show_training_loop() # - #colab learner.opt is None learner.dls.device #colab class CheckXLADeviceCallback(Callback): def before_fit(self): if self.dls.device is not None: print(f'dls device: {self.dls.device} model device: {one_param(self.learn.model).device}') else: print(f'dls device: None model device: {one_param(self.learn.model).device}') if self.learn.opt is not None: param = first(self.learn.opt.all_params())[0] print(f'opt param device: {param.device}') def before_epoch(self): if self.dls.device is not None: print(f'dls device: {self.dls.device} model device: {one_param(self.learn.model).device}') else: print(f'dls device: None model device: {one_param(self.learn.model).device}') # Run `fit` to train the model. #colab learner.fine_tune(6,freeze_epochs=4, cbs=CheckXLADeviceCallback()) #colab learner.dls.train.after_batch.fs #colab learner.detach_xla() #colab learner.dls.device is None #colab learner.opt is None one_param(learner.model).device learner.save('stage-1') learner = cnn_learner(dls,resnet18, metrics=accuracy) learner.load('stage-1') #colab learner.to_xla() learner.dls.device is None #colab learner.fit_flat_cos(1,cbs=CheckXLADeviceCallback()) one_param(learner.model).device learner.save('stage-2') learner.load('stage-2') #colab learner.fine_tune(6, freeze_epochs=4, cbs=CheckXLADeviceCallback()) learner.dls.train.after_batch.fs learner.save('stage-4') learner.lr_find() # + # from IPython.core.debugger import set_trace # def call_fit(learner,epochs=1): # set_trace() # learner.fit(epochs, cbs=[CheckXLADeviceCallback()]) # - TRACE = False learner.fit_one_cycle(5, lr_max=slice(2e-2)) learner.save('stage-5') # Gradient Accum callback (which calls CancelBatchException) should still work. # # An alternative design for the XLA Opt Callback which raises the CancelBatchException in the `after_backward` method (after executing `xm.optimizer_step` and `opt.zero_grad`) would interfere with the Gradient Accum callback (which raises `CancelBatchException` in the `after_backward` method to [skip the gradient updates](https://github.com/fastai/fastai/blob/master/fastai/callback/training.py#L22) in order to accumulate the gradients). # # The current design (add/remove `XLAOptimProxy` during `before_fit` and `after_fit` callback lifecycle methods) is less disruptive and more compatible with other callbacks. #colab learner.fit_one_cycle(4,cbs=[GradientAccumulation(n_acc=2),]) # Valid loss has kind of plateaued so this look ok. #colab learner.recorder.plot_loss() # Plot moms and lr across batches/epochs #colab learner.recorder.plot_sched() # Get Classification Interpretation for more details on model performance #colab interp = ClassificationInterpretation.from_learner(learner) # Plot confusion matrix #colab interp.plot_confusion_matrix() # Samples where model was most confused #colab interp.plot_top_losses(12) # **End of Notebook**
nbs/00_core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="u6MTimx2mvKo" outputId="e3119c4e-ba86-40b6-d991-c6c047603ba5" import os import sys import json import datetime import pprint import tensorflow as tf # Authenticate, so we can access storage bucket and TPU from google.colab import auth auth.authenticate_user() # If you want to use TPU, first switch to tpu runtime in colab USE_TPU = True #@param{type:"boolean"} # We will use base uncased bert model, you can give try with large models # For large model TPU is necessary BERT_MODEL = 'uncased_L-12_H-768_A-12' #@param {type:"string"} # BERT checkpoint bucket BERT_PRETRAINED_DIR = 'gs://cloud-tpu-checkpoints/bert/' + BERT_MODEL print('***** BERT pretrained directory: {} *****'.format(BERT_PRETRAINED_DIR)) # !gsutil ls $BERT_PRETRAINED_DIR # Bucket for saving checkpoints and outputs BUCKET = 'quorabert' #@param {type:"string"} if BUCKET!="": OUTPUT_DIR = 'gs://{}/outputs'.format(BUCKET) tf.gfile.MakeDirs(OUTPUT_DIR) elif USE_TPU: raise ValueError('Must specify an existing GCS bucket name for running on TPU') else: OUTPUT_DIR = 'out_dir' os.mkdir(OUTPUT_DIR) print('***** Model output directory: {} *****'.format(OUTPUT_DIR)) if USE_TPU: # getting info on TPU runtime assert 'COLAB_TPU_ADDR' in os.environ, 'ERROR: Not connected to a TPU runtime; Change notebook runtype to TPU' TPU_ADDRESS = 'grpc://' + os.environ['COLAB_TPU_ADDR'] print('TPU address is', TPU_ADDRESS) # + [markdown] colab_type="text" id="ONIXa1_Pr1xX" # ## Clone BERT Repo and Download Quora Questions Pairs Dataset # # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="0dTKAzm1k5BE" outputId="b57db3b8-e4c4-434d-c62a-e41048b6b695" # Clone BERT repo and add bert in system path # !test -d bert || git clone -q https://github.com/google-research/bert.git if not 'bert' in sys.path: sys.path += ['bert'] # Download QQP Task dataset present in GLUE Tasks. TASK_DATA_DIR = 'glue_data/QQP' # !test -d glue_data || git clone https://gist.github.com/60c2bdb54d156a41194446737ce03e2e.git glue_data # !test -d $TASK_DATA_DIR || python glue_data/download_glue_data.py --data_dir glue_data --tasks=QQP # !ls -als $TASK_DATA_DIR # + [markdown] colab_type="text" id="3nGLW4s-L6ws" # ## Model Configs and Hyper Parameters # # + colab={} colab_type="code" id="xUNH1_-zHJIH" import modeling import optimization import tokenization import run_classifier # Model Hyper Parameters TRAIN_BATCH_SIZE = 32 # For GPU, reduce to 16 EVAL_BATCH_SIZE = 8 PREDICT_BATCH_SIZE = 8 LEARNING_RATE = 2e-5 NUM_TRAIN_EPOCHS = 2.0 WARMUP_PROPORTION = 0.1 MAX_SEQ_LENGTH = 200 # Model configs SAVE_CHECKPOINTS_STEPS = 1000 ITERATIONS_PER_LOOP = 1000 NUM_TPU_CORES = 8 VOCAB_FILE = os.path.join(BERT_PRETRAINED_DIR, 'vocab.txt') CONFIG_FILE = os.path.join(BERT_PRETRAINED_DIR, 'bert_config.json') INIT_CHECKPOINT = os.path.join(BERT_PRETRAINED_DIR, 'bert_model.ckpt') DO_LOWER_CASE = BERT_MODEL.startswith('uncased') # + colab={} colab_type="code" id="5RvBsrOrKLJN" class QQPProcessor(run_classifier.DataProcessor): """Processor for the Quora Question pair data set.""" def get_train_examples(self, data_dir): """Reading train.tsv and converting to list of InputExample""" return self._create_examples( self._read_tsv(os.path.join(data_dir,"train.tsv")), 'train') def get_dev_examples(self, data_dir): """Reading dev.tsv and converting to list of InputExample""" return self._create_examples( self._read_tsv(os.path.join(data_dir,"dev.tsv")), 'dev') def get_test_examples(self, data_dir): """Reading train.tsv and converting to list of InputExample""" return self._create_examples( self._read_tsv(os.path.join(data_dir,"test.tsv")), 'test') def get_predict_examples(self, sentence_pairs): """Given question pairs, conevrting to list of InputExample""" examples = [] for (i, qpair) in enumerate(sentence_pairs): guid = "predict-%d" % (i) # converting questions to utf-8 and creating InputExamples text_a = tokenization.convert_to_unicode(qpair[0]) text_b = tokenization.convert_to_unicode(qpair[1]) # We will add label as 0, because None is not supported in converting to features examples.append( run_classifier.InputExample(guid=guid, text_a=text_a, text_b=text_b, label=0)) return examples def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%d" % (set_type, i) if set_type=='test': # removing header and invalid data if i == 0 or len(line)!=3: print(guid, line) continue text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label = 0 # We will use zero for test as convert_example_to_features doesn't support None else: # removing header and invalid data if i == 0 or len(line)!=6: continue text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label = int(line[5]) examples.append( run_classifier.InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): "return class labels" return [0,1] # + colab={} colab_type="code" id="8GMeF2pc7igA" # Instantiate an instance of QQPProcessor and tokenizer processor = QQPProcessor() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer(vocab_file=VOCAB_FILE, do_lower_case=DO_LOWER_CASE) # + colab={"base_uri": "https://localhost:8080/", "height": 1278} colab_type="code" id="OdMc4HkJ7ljr" outputId="4c2ee1db-0da1-4ca2-dd03-7865563bcb37" # Converting training examples to features print("################ Processing Training Data #####################") TRAIN_TF_RECORD = os.path.join(OUTPUT_DIR, "train.tf_record") train_examples = processor.get_train_examples(TASK_DATA_DIR) num_train_examples = len(train_examples) num_train_steps = int( num_train_examples / TRAIN_BATCH_SIZE * NUM_TRAIN_EPOCHS) num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION) run_classifier.file_based_convert_examples_to_features(train_examples, label_list, MAX_SEQ_LENGTH, tokenizer, TRAIN_TF_RECORD) # + [markdown] colab_type="text" id="1uq8zO7O5Dnq" # ## Creating Classification Model # + colab={} colab_type="code" id="6_aUbDJA1N7w" def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" # Bert Model instant model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # Getting output for last layer of BERT output_layer = model.get_pooled_output() # Number of outputs for last layer hidden_size = output_layer.shape[-1].value # We will use one layer on top of BERT pretrained for creating classification model output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) # Calcaulte prediction probabilites and loss logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) # + [markdown] colab_type="text" id="gxTo8jgbuRoG" # ## Model Function Builder for Estimator # # Based on mode, We will create optimizer for training, evaluation metrics for evalution and estimator spec # + colab={} colab_type="code" id="An2DFEqX2yDJ" def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" # reading features input input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) # checking if training mode is_training = (mode == tf.estimator.ModeKeys.TRAIN) # create simple classification model (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) # getting variables for intialization and using pretrained init checkpoint tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: # defining optimizar function train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) # Training estimator spec output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: # accuracy, loss, auc, F1, precision and recall metrics for evaluation def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) f1_score = tf.contrib.metrics.f1_score( label_ids, predictions) auc = tf.metrics.auc( label_ids, predictions) recall = tf.metrics.recall( label_ids, predictions) precision = tf.metrics.precision( label_ids, predictions) return { "eval_accuracy": accuracy, "eval_loss": loss, "f1_score": f1_score, "auc": auc, "precision": precision, "recall": recall } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) # estimator spec for evalaution output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: # estimator spec for predictions output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # + [markdown] colab_type="text" id="elGbiKDlamy6" # ## Creating TPUEstimator # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="ZPuYpqW97vMf" outputId="cf38c1db-a1bf-4ea8-e006-2d5a0dc7dbab" # Define TPU configs if USE_TPU: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(TPU_ADDRESS) else: tpu_cluster_resolver = None run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=OUTPUT_DIR, save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=ITERATIONS_PER_LOOP, num_shards=NUM_TPU_CORES, per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2)) # + colab={} colab_type="code" id="f5qq6yfS7yOw" # create model function for estimator using model function builder model_fn = model_fn_builder( bert_config=modeling.BertConfig.from_json_file(CONFIG_FILE), num_labels=len(label_list), init_checkpoint=INIT_CHECKPOINT, learning_rate=LEARNING_RATE, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=USE_TPU, use_one_hot_embeddings=True) # + colab={"base_uri": "https://localhost:8080/", "height": 241} colab_type="code" id="T6D-ZzhlfeGx" outputId="f6ff8829-b75b-4c04-b4de-dff9c05969ff" # Defining TPU Estimator estimator = tf.contrib.tpu.TPUEstimator( use_tpu=USE_TPU, model_fn=model_fn, config=run_config, train_batch_size=TRAIN_BATCH_SIZE, eval_batch_size=EVAL_BATCH_SIZE, predict_batch_size=PREDICT_BATCH_SIZE) # + [markdown] colab_type="text" id="l1eH8gUIZ9gD" # ## Finetune Training # # # + colab={"base_uri": "https://localhost:8080/", "height": 3641} colab_type="code" id="Rk4PXAdnjW_N" outputId="85ee1d1f-1bcd-4a00-a778-e91db6ba2ab5" # Train the model. print('QQP on BERT base model normally takes about 1 hour on TPU and 15-20 hours on GPU. Please wait...') print('***** Started training at {} *****'.format(datetime.datetime.now())) print(' Num examples = {}'.format(num_train_examples)) print(' Batch size = {}'.format(TRAIN_BATCH_SIZE)) tf.logging.info(" Num steps = %d", num_train_steps) # we are using `file_based_input_fn_builder` for creating input function from TF_RECORD file train_input_fn = run_classifier.file_based_input_fn_builder(TRAIN_TF_RECORD, seq_length=MAX_SEQ_LENGTH, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) print('***** Finished training at {} *****'.format(datetime.datetime.now())) # + [markdown] colab_type="text" id="BcSOEcxdZo3B" # ## Evalute FineTuned model # First we will evalute on Train set and Then on Dev set # + colab={"base_uri": "https://localhost:8080/", "height": 887} colab_type="code" id="ne6yR18I3T09" outputId="38b1e414-6d46-471f-d2f1-07010ce3fae0" # eval the model on train set. print('***** Started Train Set evaluation at {} *****'.format(datetime.datetime.now())) print(' Num examples = {}'.format(num_train_examples)) print(' Batch size = {}'.format(EVAL_BATCH_SIZE)) # eval input function for train set train_eval_input_fn = run_classifier.file_based_input_fn_builder(TRAIN_TF_RECORD, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=True) # evalute on train set result = estimator.evaluate(input_fn=train_eval_input_fn, steps=int(num_train_examples/EVAL_BATCH_SIZE)) print('***** Finished evaluation at {} *****'.format(datetime.datetime.now())) print("***** Eval results *****") for key in sorted(result.keys()): print(' {} = {}'.format(key, str(result[key]))) # + colab={"base_uri": "https://localhost:8080/", "height": 734} colab_type="code" id="CAZ_611y7owV" outputId="e96efaf0-7699-410a-b77c-0c42db0c4c17" # Converting eval examples to features print("################ Processing Dev Data #####################") EVAL_TF_RECORD = os.path.join(OUTPUT_DIR, "eval.tf_record") eval_examples = processor.get_dev_examples(TASK_DATA_DIR) num_eval_examples = len(eval_examples) run_classifier.file_based_convert_examples_to_features(eval_examples, label_list, MAX_SEQ_LENGTH, tokenizer, EVAL_TF_RECORD) # + colab={"base_uri": "https://localhost:8080/", "height": 785} colab_type="code" id="rTT5RTAlkCO5" outputId="bbab37cf-c319-426e-bd21-85a9aa57589a" # Eval the model on Dev set. print('***** Started Dev Set evaluation at {} *****'.format(datetime.datetime.now())) print(' Num examples = {}'.format(num_eval_examples)) print(' Batch size = {}'.format(EVAL_BATCH_SIZE)) # eval input function for dev set eval_input_fn = run_classifier.file_based_input_fn_builder(EVAL_TF_RECORD, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=True) # evalute on dev set result = estimator.evaluate(input_fn=eval_input_fn, steps=int(num_eval_examples/EVAL_BATCH_SIZE)) print('***** Finished evaluation at {} *****'.format(datetime.datetime.now())) print("***** Eval results *****") for key in sorted(result.keys()): print(' {} = {}'.format(key, str(result[key]))) # + [markdown] colab_type="text" id="zBdomz-EyZKk" # # ## Evaluation Results # # # --- # Evaluation results are on BERT base uncased model. For reproducing similar results, train for 3 epochs. # # # # |**Metrics** | **Train Set** | **Dev Set** | # |---|---|---| # |**Loss**|0.150|0.497| # |**Accuracy**|0.969|0.907| # |**F1**|0.959|0.875| # |**AUC**|0.969|0.902| # |**Precision**|0.949|0.864| # |**Recall**|0.969|0.886| # # + [markdown] colab_type="text" id="iOP8xA32CBjE" # ## Predictions on Model # # First We will predict on custom examples. # # For test set, We will get predictions and save in file. # + colab={} colab_type="code" id="AhvA3hEL-2Xt" # examples sentences, feel free to change and try sent_pairs = [("how can i improve my english?", "how can i become fluent in english?"), ("How can i recover old gmail account ?","How can i delete my old gmail account ?"), ("How can i recover old gmail account ?","How can i access my old gmail account ?")] # + colab={"base_uri": "https://localhost:8080/", "height": 1176} colab_type="code" id="_CXSUjvgMucd" outputId="43d926d7-11e1-45f0-ac2e-ae980c628516" print("******* Predictions on Custom Data ********") # create `InputExample` for custom examples predict_examples = processor.get_predict_examples(sent_pairs) num_predict_examples = len(predict_examples) # For TPU, We will append `PaddingExample` for maintaining batch size if USE_TPU: while(len(predict_examples)%EVAL_BATCH_SIZE!=0): predict_examples.append(run_classifier.PaddingInputExample()) # Converting to features predict_features = run_classifier.convert_examples_to_features(predict_examples, label_list, MAX_SEQ_LENGTH, tokenizer) print(' Num examples = {}'.format(num_predict_examples)) print(' Batch size = {}'.format(PREDICT_BATCH_SIZE)) # Input function for prediction predict_input_fn = run_classifier.input_fn_builder(predict_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=True) result = list(estimator.predict(input_fn=predict_input_fn)) print(result) for ex_i in range(num_predict_examples): print("****** Example {} ******".format(ex_i)) print("Question1 :", sent_pairs[ex_i][0]) print("Question2 :", sent_pairs[ex_i][1]) print("Prediction :", result[ex_i]['probabilities'][1]) # + colab={"base_uri": "https://localhost:8080/", "height": 751} colab_type="code" id="6TOq8o9aTMzk" outputId="312bd5c4-aad1-4fa5-8d39-3aa00770b883" # Converting test examples to features print("################ Processing Test Data #####################") TEST_TF_RECORD = os.path.join(OUTPUT_DIR, "test.tf_record") test_examples = processor.get_test_examples(TASK_DATA_DIR) num_test_examples = len(test_examples) run_classifier.file_based_convert_examples_to_features(test_examples, label_list, MAX_SEQ_LENGTH, tokenizer, TEST_TF_RECORD) # + colab={} colab_type="code" id="-5ohC_ab0i8I" # Predictions on test set. print('***** Started Prediction at {} *****'.format(datetime.datetime.now())) print(' Num examples = {}'.format(num_test_examples)) print(' Batch size = {}'.format(PREDICT_BATCH_SIZE)) # predict input function for test set test_input_fn = run_classifier.file_based_input_fn_builder(TEST_TF_RECORD, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=True) tf.logging.set_verbosity(tf.logging.ERROR) # predict on test set result = list(estimator.predict(input_fn=test_input_fn)) print('***** Finished Prediction at {} *****'.format(datetime.datetime.now())) # saving test predictions output_test_file = os.path.join(OUTPUT_DIR, "test_predictions.txt") with tf.gfile.GFile(output_test_file, "w") as writer: for (example_i, predictions_i) in enumerate(result): writer.write("%s , %s\n" % (test_examples[example_i].guid, str(predictions_i['probabilities'][1])))
upload code final/BERT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Evaluate ALL features # # Similar to notebook 3 but we package everything inside a for loop to evaluate all the features. # + # TO REMOVE when notebook is stable # %load_ext autoreload # %autoreload 2 # - # ### Common Imports # ### Decide how to filter the anndata object from anndata import read_h5ad from tissue_purifier.genex import * # + # filter cells parameters fc_bc_min_umi = 200 # filter cells with too few UMI fc_bc_max_umi = 3000 # filter cells with too many UMI fc_bc_min_n_genes_by_counts = 10 # filter cells with too few GENES fc_bc_max_n_genes_by_counts = 2500 # filter cells with too many GENES fc_bc_max_pct_counts_mt = 5 # filter cells with mitocrondial fraction too high # filter genes parameters fg_bc_min_cells_by_counts = 3000 # filter genes which appear in too few CELLS # filter rare cell types parameters fctype_bc_min_cells_absolute = 100 # filter cell-types which are too RARE in absolute number fctype_bc_min_cells_frequency = 0.01 # filter cell-types which are too RARE in relative abundance # - # ### Open the first annotated anndata adata = read_h5ad(filename="./testis_anndata_annotated/anndata_sick3.h5ad") adata # ### compute few metrics # + import scanpy as sc cell_type_key = "cell_type" # mitocondria metrics adata.var['mt'] = adata.var_names.str.startswith('mt-') # annotate the group of mitochondrial genes as 'mt' sc.pp.calculate_qc_metrics(adata, qc_vars=['mt'], percent_top=None, log1p=False, inplace=True) # counts cells frequency tmp = adata.obs[cell_type_key].values.describe() print(tmp) mask1 = (tmp["counts"] > fctype_bc_min_cells_absolute) mask2 = (tmp["freqs"] > fctype_bc_min_cells_frequency) mask = mask1 * mask2 cell_type_keep = set(tmp[mask].index.values) adata.obs["keep_ctype"] = adata.obs["cell_type"].apply(lambda x: x in cell_type_keep) # Note that adata has extra annotation now adata # - # ### Filter out cells, genes and cell-type adata = adata[adata.obs["total_counts"] > fc_bc_min_umi, :] adata = adata[adata.obs["total_counts"] < fc_bc_max_umi, :] adata = adata[adata.obs["n_genes_by_counts"] > fc_bc_min_n_genes_by_counts, :] adata = adata[adata.obs["n_genes_by_counts"] < fc_bc_max_n_genes_by_counts, :] adata = adata[adata.obs["pct_counts_mt"] < fc_bc_max_pct_counts_mt, :] adata = adata[adata.obs["keep_ctype"] == True, :] adata = adata[:, adata.var["n_cells_by_counts"] > fg_bc_min_cells_by_counts] # # Loop to train multiple gene_regression models from tissue_purifier.genex import * import os import numpy import matplotlib import matplotlib.pyplot as plt import seaborn # + gr_ckpt_dir = "gr_ckpt" filename_no_covariate = os.path.join(gr_ckpt_dir, "gr_no_covariate.pt") ckpt_list, dataset_list = [], [] for f in os.listdir(gr_ckpt_dir): if not f.endswith("no_covariate.pt") and not f.endswith("dataset.pt"): ckpt_list.append(f) if f.endswith("dataset.pt"): dataset_list.append(f) dataset_list.sort() ckpt_list.sort() for a,b in zip(dataset_list, ckpt_list): print(a, "--->", b) # - # # check the loss functions # + gr = GeneRegression() nmax = len(ckpt_list) ncols = 4 nrows = int(numpy.ceil(float(nmax)/ncols)) fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(6*ncols, 6*nrows)) fig.suptitle("Loss History") for n, ckpt_file in enumerate(ckpt_list): r,c = n//ncols, n%ncols ax_cur = axes[r,c] gr.load_ckpt(os.path.join(gr_ckpt_dir, ckpt_file)) gr.show_loss(ax=ax_cur, logy=False, logx=False) _ = ax_cur.set_title(ckpt_file) # - # ### check the overdispersion parameter # + nmax = len(ckpt_list) ncols = 4 nrows = int(numpy.ceil(float(nmax)/ncols)) fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(6*ncols, 6*nrows)) fig.suptitle("Distribution on gene-overdispersion params, i.e. eps") for n, ckpt_file in enumerate(ckpt_list): r,c = n//ncols, n%ncols ax_cur = axes[r,c] gr.load_ckpt(os.path.join(gr_ckpt_dir, ckpt_file)) df_param = gr.get_params() _ = seaborn.histplot(data=df_param, x="eps", hue="cell_type", bins=200, ax=ax_cur, multiple="layer") _ = ax_cur.set_title(ckpt_file) _ = ax_cur.set_xlim(gr._train_kargs["eps_range"]) # - # ### Compute the baseline metrics (i.e. the case with no covariates). # + filename_no_covariate_ckpt = os.path.join(gr_ckpt_dir, "gr_no_covariate.pt") filename_no_covariate_dataset = os.path.join(gr_ckpt_dir, "gr_no_covariate_dataset.pt") import torch import seaborn gr.load_ckpt(filename_no_covariate_ckpt) train_dataset, test_dataset, val_dataset = torch.load(filename_no_covariate_dataset) df_metric_no_cov_train, df_count_no_cov_train = gr.predict( test_dataset, num_samples=100, subsample_size_cells=200, subsample_size_genes=None) df_metric_no_cov_test, df_count_no_cov_test = gr.predict( test_dataset, num_samples=100, subsample_size_cells=200, subsample_size_genes=None) # - # ### plot the ratio of q_train_no_cov over q_test_no_cov df_baseline_train["q_train_over_test"] = df_baseline_train["q_dist"] / df_baseline_test["q_dist"] _ = seaborn.histplot(data=df_baseline_train, x="q_train_over_test", hue="cell_type", bins=200) #_ = ax_cur.set_title("{} score= {}".format(ckpt_file, score)) # ### compute the ratio w.r.t the baseline result_dict = dict() for n, (data_file, ckpt_file) in enumerate(zip(dataset_list, ckpt_list)): gr.load_ckpt(os.path.join(gr_ckpt_dir, ckpt_file)) train_dataset, test_dataset, _ = torch.load(os.path.join(gr_ckpt_dir, data_file)) df_metric_tmp_train, df_count_tmp_train = gr.predict( train_dataset, num_samples=100, subsample_size_cells=200, subsample_size_genes=None) df_metric_tmp_test, df_count_tmp_test = gr.predict( test_dataset, num_samples=100, subsample_size_cells=200, subsample_size_genes=None) df_tmp_test = df_metric_tmp_test.groupby(["cell_type", "gene"]).mean() df_tmp_test["q_ratio"] = df_tmp_test["q_dist"] / df_baseline_test["q_dist"] result_dict["test_"+ckpt_file] = df_tmp_test df_tmp_train = df_metric_tmp_train.groupby(["cell_type", "gene"]).mean() df_tmp_train["q_ratio"] = df_tmp_train["q_dist"] / df_baseline_train["q_dist"] result_dict["test_"+ckpt_file] = df_tmp_train # # plot the distribution of Q # + nmax = len(result_dict.keys()) ncols = 3 nrows = int(numpy.ceil(float(nmax)/ncols)) fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(6*ncols, 6*nrows)) fig.suptitle("Distribution of q_with_cov / q_no_cov") for n, key in enumerate(result_dict.keys()): print(key) r,c = n//ncols, n%ncols ax_cur = axes[r,c] _ = seaborn.histplot(data=result_dict[key], x="q_ratio", hue="cell_type", bins=200, ax=ax_cur) score = result_dict[key]["q_ratio"].mean() _ = ax_cur.set_title("{} score= {}".format(key, score)) # - # From these plots I see that all ncv are roughly equivalent to predict gene expression. \ # Maybe only some genes in some cell_type can be predicted using ncv. \ # We explore this possibility below # # FROM HERE # ### Find most predictable genes. Maybe only few genes can be predicted from micro-environment for key in result_dict.keys(): print(key)
notebooks/notebook3_all_part2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # # Working with Hugging Face models # <img align="left" width="130" src="https://raw.githubusercontent.com/PacktPublishing/Amazon-SageMaker-Cookbook/master/Extra/cover-small-padded.png"/> # # This notebook contains the code to help readers work through one of the recipes of the book [Machine Learning with Amazon SageMaker Cookbook: 80 proven recipes for data scientists and developers to perform ML experiments and deployments](https://www.amazon.com/Machine-Learning-Amazon-SageMaker-Cookbook/dp/1800567030) # ### How to do it... # !mkdir -p scripts # + g = "raw.githubusercontent.com" p = "PacktPublishing" a = "Amazon-SageMaker-Cookbook" mc = "master/Chapter09" path = f"https://{g}/{p}/{a}/{mc}/scripts" # - # !wget -P scripts {path}/setup.py # !wget -P scripts {path}/train.py # !wget -P scripts {path}/inference.py # !wget -P scripts {path}/requirements.txt # !mkdir -p tmp # + g = "raw.githubusercontent.com" p = "PacktPublishing" a = "Amazon-SageMaker-Cookbook" mc = "master/Chapter09" path = f"https://{g}/{p}/{a}/{mc}/files" # - # !wget -P tmp {path}/synthetic.train.txt # !wget -P tmp {path}/synthetic.validation.txt s3_bucket = "<insert S3 bucket name here>" prefix = "chapter09" s3_train_data = 's3://{}/{}/input/{}'.format( s3_bucket, prefix, "synthetic.train.txt" ) s3_validation_data = 's3://{}/{}/input/{}'.format( s3_bucket, prefix, "synthetic.validation.txt" ) # !aws s3 cp tmp/synthetic.train.txt {s3_train_data} # !aws s3 cp tmp/synthetic.validation.txt {s3_validation_data} # + import sagemaker from sagemaker import Session role = sagemaker.get_execution_role() session = sagemaker.Session() # + from sagemaker.huggingface import HuggingFace hyperparameters = { 'epochs': 1, 'train_batch_size': 32, 'model_name':'distilbert-base-uncased' } # - estimator = HuggingFace( entry_point='train.py', source_dir='./scripts', instance_type='ml.p3.2xlarge', instance_count=1, role=role, transformers_version='4.4', pytorch_version='1.6', py_version='py36', hyperparameters=hyperparameters ) # + from sagemaker.inputs import TrainingInput train_data = TrainingInput(s3_train_data) validation_data = TrainingInput(s3_validation_data) data_channels = { 'train': train_data, 'valid': validation_data } # + # %%time estimator.fit(data_channels) # + from sagemaker.pytorch.model import PyTorchModel model_data = estimator.model_data model = PyTorchModel( model_data=model_data, role=role, source_dir="scripts", entry_point='inference.py', framework_version='1.6.0', py_version="py3" ) # + # %%time predictor = model.deploy( instance_type='ml.m5.xlarge', initial_instance_count=1 ) # + from sagemaker.serializers import JSONSerializer from sagemaker.deserializers import JSONDeserializer predictor.serializer = JSONSerializer() predictor.deserializer = JSONDeserializer() # + test_data = { "text": "This tastes bad. I hate this place." } predictor.predict(test_data) # + test_data = { "text": "Very delicious. I would recommend this to my friends" } predictor.predict(test_data) # - predictor.delete_endpoint()
Chapter09/01 - Working with Hugging Face models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Gentle Introduction to Programming Concepts - Using Python # ## Introduction # ### Play along at home # # You can follow along and through the notebooks that we will be working through by going to the GitHub repository that we manage our content in. # # * Repository: https://github.com/unmrds/cc-python # * Introduction/Concepts (this notebook): # * http://tinyurl.com/cc-python-intro # * https://github.com/unmrds/cc-python/blob/master/Programming%20Concepts.ipynb # * Aboutness demo: # * http://tinyurl.com/cc-python-aboutness # * https://github.com/unmrds/cc-python/blob/master/IR%20Keywords%20Versus%20IR%20%22Aboutness%22.ipynb # * Space Analysis: # * http://tinyurl.com/cc-python-space # * https://github.com/unmrds/cc-python/blob/master/Space%20Analysis%20.ipynb # # You can practice and play with code in our playground Jupyter Notebook platform - http://cc-playground.unmrds.net. We routinely reboot and clean out this system so don't do anything here (without downloading what you've done) that you want to keep. # # ### Why learn the basic principles of programming? # # * Thinking algorithmically (a key element in the process used in developing programming solutions) is a powerful problem solving skill that is reinforeced with practice. Practicing programming is great practice. # * Defining a problem with sufficient specificity that a solution can be effectively developed # * Defining what the end-product of the process should be # * Breaking a problem down into smaller components that interact with each other # * Identifying the objects/data and actions that are needed to meet the requirements of each component # * Linking components together to solve the defined problem # * Identifying potential expansion points to reuse the developed capacity for solving related problems # # ![Problem decomposition illustration](problemDecomposition.png) # # * Capabilities to streamline and automate routine processes through scripting are ubiquitous # * Query languages built into existing tools (e.g. Excel, ArcGIS, Word) # * Specialized languages for specific tasks (e.g. R, Pandoc template language, PHP) # * General purpose languages for solving many problems (e.g. Bash shell, Perl, Python, C#) # # * Repeatabilty with documentation # * Scalability # * Portability # ### Why Python? # # * It is available as a free and [Open Source](https://opensource.org/osd-annotated) programming language that can be # installed on numerous computer systems, including Windows, Linux and the Mac OS. It can even be editited and run # through a web interface such as this Jupyter Notebook. # * It is a modern programming language that includes many features that make it a very efficient language # to both learn programming with and write programs in. # * It is readable and expressive. # * It supports a variety of development models including object-oriented, procedural and functional capabilities. # * It includes a standard library of functions that support significant programming capabilities including: # * Handling email # * Interacting with and publishing web and other online resources # * Connecting with a wide variety of databases # * Executing operating system commands # * Developing graphical user interfaces # * It is relatively easy to start to become productive in Python, though it still takes time and practice to become # an expert (as is the case with any programming language). # # The primary downside that is mentioned when discussing the choice of Python as a programming language is that as # an interpreted language it can execute more slowly than traditional compiled languages such as C or C++. # ### Can I Play at Home? # # There are a variety of ways to run Python on your computer: # # * You may already have a version of Python installed. Many operating systems have a version of Python installed that is used for routine processes within the operating system. You can easily check to see what version of Python might already be on your computer by typing `python` at the `Command Prompt` (Windows) or in the `Terminal` (Mac OS) and seeing what response you get. If Python is installed you will typically see information about the currently installed version and then be taken to the Python command prompt where you can start typing commands. # * You can install one of the available versions directly from the Python project site: https://www.python.org/downloads/. Following this installation you will be able to execute commands from the *interactive command prompt* or you can start the *IDLE* integrated development environment (IDE). # * You can install a pre-packaged python system such as the Anaconda release of Python (https://www.continuum.io/downloads) that has both Python 2.x and 3.x versions available for download. I prefer this method as it installs a copy of Python that is separate from any previous ones on your system, and allows you to execute the (enhanced) interactive Python command prompt, **and** run the Jupyter Notebook web-based environment for writing and executing Python code. The examples that we will go through today will be executed in the Jupyter Notebook environment. # ## Running a Python Environment # # Once Python is installed on your computer you have a number of options for how you start up an environment where you can execute Python commands/code. # # 1. The most simple method is to just type `python` at the *Command Prompt* (Windows) or *Terminal* (Mac OS and Linux). If you installation was successful you will be taken to the interactive prompt. For example: # # UL0100MAC:~ kbene$ python # Python 2.7.10 |Anaconda 2.3.0 (x86_64)| (default, May 28 2015, 17:04:42) # [GCC 4.2.1 (Apple Inc. build 5577)] on darwin # Type "help", "copyright", "credits" or "license" for more information. # Anaconda is brought to you by Continuum Analytics. # Please check out: http://continuum.io/thanks and https://binstar.org # >>> # # 2. If you would like to run the IDLE IDE you should be able to find the executable file in the folder where the Python executable installed on your system. # # 3. If you installed the Anaconda release of Python you can type `ipython` at the *Command Prompt* (Windows) or *Terminal* (Mac OS and Linux). If you installation was successful you will be taken to an enhanced (compared with the basic Python prompt) interactive prompt. For example: # # UL0100MAC:~ kbene$ ipython # Python 2.7.10 |Anaconda 2.3.0 (x86_64)| (default, May 28 2015, 17:04:42) # Type "copyright", "credits" or "license" for more information. # # IPython 3.2.0 -- An enhanced Interactive Python. # Anaconda is brought to you by Continuum Analytics. # Please check out: http://continuum.io/thanks and https://anaconda.org # ? -> Introduction and overview of IPython's features. # # %quickref -> Quick reference. # help -> Python's own help system. # object? -> Details about 'object', use 'object??' for extra details. # # In [1]: # # 4. If you installed the Anaconda release of Python you can type `jupyter notebook` at the *Command Prompt* (Windows) or *Terminal* (Mac OS and Linux). If you installation was successful you should see some startup messages in the terminal window and your browser should open up and display the *Jupyter Notebook* interface from where you can navigate through your system's folder structure (starting in the folder that you ran the `ipython notebook` command from), and load existing notebooks or create new ones in which you can enter and execute Python commands. You can also start a local Jupyter Notebook instance through the *Anaconda Navigator* application that is included with recent releases of the Anaconda Python distribution. In more recent releases of the Anaconda Python distribution you can run the *Anaconda Navigator* from which you can run *Jupyter Notebooks* and other applications. *This is the interface that we are using for today's workshop**. # # **You can experiment with the examples we are using today in your own Jupyter notebook at [http://cc-playground.unmrds.net](http://cc-playground.unmrds.net) . (password will be provided in the workshop)** # ## Getting Help # # There are a number of strategies that you can use for getting help with specific Python commands and syntax. First and foremost you can access the Python [documentation](https://docs.python.org/3/index.html) which will default to the most recent Python 3.x version that is in production, but from which (in the upper left corner of the page) you can select other Python versions if you are not using the version referenced by the page. Looking at and working through some of the materials in the Python [tutorial](https://docs.python.org/3/tutorial/) is also a great way to see the core Python capabilities in action. # # In some cases you can find quite a few useful and interesting resources through a resonably crafted Google search: e.g. for [`python create list`](https://www.google.com/search?client=safari&rls=en&q=python+create+list&ie=UTF-8&oe=UTF-8). # # You can also get targeted help some specific commands or objects from the command prompt by just using the `help()` function. Where you put the name of the command or object between the parentheses `()`. # # For example: # # >>>help(print) # # and # # >>>help(str) # # and # # >>>myVar = [1,2,3,4,5] # >>>help(myVar) # ### Try It Yourself # # Type in the help command in a code box in Jupyter Notebook for a few of the following commands/objects and take a look at the information you get: # # * `dict` - e.g. `help(dict)` # * `print` # * `sorted` # * `float` # # For some commands/functions you need to import the module that that command belongs to. For example: # # import os # help(os.path) # # Try this pair of commands in a code window in your Jupyter Notebook or interactive terminal. # + # type your help commands in the box and # execute the code in the box by typing shift-enter # (hold down the shift key while hitting the enter/return key) # - # ## The Basics # # At the core of Python (and any programming language) there are some key characteristics of how a program is structured that enable the proper execution of that program. These characteristics include the structure of the code itself, the core data types from which others are built, and core operators that modify objects or create new ones. From these raw materials more complex commands, functions, and modules are built. # For guidance on recommended Python structure refer to the [Python Style Guide](https://www.python.org/dev/peps/pep-0008). # # # Examples: Variables and Data Types # # ## The Interpreter # + # The interpreter can be used as a calculator, and can also echo or concatenate strings. 3 + 3 # - 3 * 3 3 ** 3 3 / 2 # classic division - output is a floating point number # + # Use quotes around strings 'dogs' # + # # + operator can be used to concatenate strings 'dogs' + "cats" # - print('Hello World!') # ### Try It Yourself # # Go to the section _4.4. Numeric Types_ in the Python 3 documentation at <https://docs.python.org/3.4/library/stdtypes.html>. The table in that section describes different operators - try some! # # What is the difference between the different division operators (`/`, `//`, and `%`)? # # ## Variables # # Variables allow us to store values for later use. a = 5 b = 10 a + b # Variables can be reassigned: b = 38764289.1097 a + b # The ability to reassign variable values becomes important when iterating through groups of objects for batch processing or other purposes. In the example below, the value of `b` is dynamically updated every time the `while` loop is executed: a = 5 b = 10 while b > a: print("b="+str(b)) b = b-1 # Variable data types can be inferred, so Python does not require us to declare the data type of a variable on assignment. a = 5 type(a) # is equivalent to a = int(5) type(a) # + c = 'dogs' print(type(c)) c = str('dogs') print(type(c)) # - # There are cases when we may want to declare the data type, for example to assign a different data type from the default that will be inferred. Concatenating strings provides a good example. customer = 'Carol' pizzas = 2 print(customer + ' ordered ' + pizzas + ' pizzas.') # Above, Python has inferred the type of the variable `pizza` to be an integer. Since strings can only be concatenated with other strings, our print statement generates an error. There are two ways we can resolve the error: # # 1. Declare the `pizzas` variable as type string (`str`) on assignment or # 2. Re-cast the `pizzas` variable as a string within the `print` statement. customer = 'Carol' pizzas = str(2) print(customer + ' ordered ' + pizzas + ' pizzas.') customer = 'Carol' pizzas = 2 print(customer + ' ordered ' + str(pizzas) + ' pizzas.') # Given the following variable assignments: # # ``` # x = 12 # y = str(14) # z = donuts # ``` # # Predict the output of the following: # # 1. `y + z` # 2. `x + y` # 3. `x + int(y)` # 4. `str(x) + y` # # Check your answers in the interpreter. # # ### Variable Naming Rules # # Variable names are case senstive and: # # 1. Can only consist of one "word" (no spaces). # 2. Must begin with a letter or underscore character ('\_'). # 3. Can only use letters, numbers, and the underscore character. # # We further recommend using variable names that are meaningful within the context of the script and the research. # ### Structure # # #### Blocks # # The structure of a Python program is pretty simple: # Blocks of code are defined using indentation. Code that is at a lower level of indentation is not considerd part of a block. Indentation can be defined using spaces or tabs (spaces are recommended by the style guide), but be consistent (and prepared to defend your choice). As we will see, code blocks define the boundaries of sets of commands that fit within a given section of code. This indentation model for defining blocks of code significantly increases the readabiltiy of Python code. # # For example: # # >>>a = 5 # >>>b = 10 # >>>while b > a: # ... print("b="+str(b)) # ... b = b-1 # >>>print("I'm outside the block") # #### Comments & Documentation # # You can (and should) also include documentation and comments in the code your write - both for yourself, and potential future users (including yourself). Comments are pretty much any content on a line that follows a `#` symbol (unless it is between quotation marks. For example: # # >>># we're going to do some math now # >>>yae = 5 # the number of votes in favor # >>>nay = 10 # the number of votes against # >>>proportion = yae / nay # the proportion of votes in favor # >>>print(proportion) # # When you are creating functions or classes (a bit more on what these are in a bit) you can also create what are called *doc strings* that provide a defined location for content that is used to generate the `help()` information highlighted above and is also used by other systems for the automatic generation of documentation for packages that contain these *doc strings*. Creating a *doc string* is simple - just create a single or multi-line text string (more on this soon) that starts on the first indented line following the start of the definition of the function or class. For example: # # >>># we're going to create a documented function and then access the information about the function # >>>def doc_demo(some_text="Ill skewer yer gizzard, ye salty sea bass"): # ... """This function takes the provided text and prints it out in Pirate # ... # ... If a string is not provided for `some_text` a default message will be displayed # ... """ # ... out_string = "Ahoy Matey. " + some_text # ... print(out_string) # >>>help(doc_demo) # >>>doc_demo() # >>>doc_demo("Sail ho!") # ### Standard Objects # # Any programming language has at its foundation a collection of *types* or in Python's terminology *objects*. The standard objects of Python consist of the following: # # * **Numbers** - integer, floating point, complex, and multiple-base defined numeric values # * **Strings** - **immutable** strings of characters, numbers, and symbols that are bounded by single- or double-quotes # * **Lists** - an ordered collection of objects that is bounded by square-brackets - `[]`. Elements in lists are extracted or referenced by their position in the list. For example, `my_list[0]` refers to the first item in the list, `my_list[5]` the sixth, and `my_list[-1]` to the last item in the list. # * **Dictionaries** - an unordered collection of objects that are referenced by *keys* that allow for referring to those objexts by reference to those keys. Dictionaryies are bounded by curley-brackets - `{}` with each element of the dictionary consisting of a *key* (string) and a *value* (object) separated by a colon `:`. Elements of a dictionary are extracted or referenced using their keys. for example: # # my_dict = {"key1":"value1", "key2":36, "key3":[1,2,3]} # my_dict['key1'] returns "value1" # my_dict['key3'] returns [1,2,3] # # * **Tuples** - **immutable** lists that are bounded by parentheses = `()`. Referencing elements in a tuple is the same as referencing elements in a list above. # * **Files** - objects that represent external files on the file system. Programs can interact with (e.g. read, write, append) external files through their representative file objects in the program. # * **Sets** - unordered, collections of **immutable** objects (i.e. ints, floats, strings, and tuples) where membership in the set and uniqueness within the set are defining characteristics of the member objects. Sets are created using the `set` function on a sequence of objects. A specialized list of operators on sets allow for identifying *union*, *intersection*, and *difference* (among others) between sets. # * **Other core types** - Booleans, types, `None` # * **Program unit types** - *functions*, *modules*, and *classes* for example # * **Implementation-related types** (not covered in this workshop) # # These objects have their own sets of related methods (as we saw in the `help()` examples above) that enable their creation, and operations upon them. # >>># Fun with types # >>> # >>>this = 12 # >>>that = 15 # >>>the_other = "27" # >>>my_stuff = [this,that,the_other,["a","b","c",4]] # >>>more_stuff = { # ... "item1": this, # ... "item2": that, # ... "item3": the_other, # ... "item4": my_stuff # ...} # >>>this + that # >>> # >>># this won't work ... # >>>this + that + the_other # >>> # >>># ... but this will ... # >>>this + that + int(the_other) # >>> # >>># ...and this too # >>>str(this) + str(that) + the_other # ## Lists # # <https://docs.python.org/3/library/stdtypes.html?highlight=lists#list> # # Lists are a type of collection in Python. Lists allow us to store sequences of items that are typically but not always similar. All of the following lists are legal in Python: # + # Separate list items with commas! number_list = [1, 2, 3, 4, 5] string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples'] combined_list = [1, 2, 'oranges', 3.14, 'peaches', 'grapes', 99.19876] # Nested lists - lists of lists - are allowed. list_of_lists = [[1, 2, 3], ['oranges', 'grapes', 8], [['small list'], ['bigger', 'list', 55], ['url_1', 'url_2']]] # - # There are multiple ways to create a list: # + # Create an empty list empty_list = [] # As we did above, by using square brackets around a comma-separated sequence of items new_list = [1, 2, 3] # Using the type constructor constructed_list = list('purple') # Using a list comprehension result_list = [i for i in range(1, 20)] # - # We can inspect our lists: empty_list new_list result_list constructed_list # The above output for `typed_list` may seem odd. Referring to the documentation, we see that the argument to the type constructor is an _iterable_, which according to the documentation is "An object capable of returning its members one at a time." In our construtor statement above # # ``` # # Using the type constructor # # constructed_list = list('purple') # ``` # # the word 'purple' is the object - in this case a word - that when used to construct a list returns its members (individual letters) one at a time. # # Compare the outputs below: constructed_list_int = list(123) constructed_list_str = list('123') constructed_list_str # Lists in Python are: # # * mutable - the list and list items can be changed # * ordered - list items keep the same "place" in the list # # _Ordered_ here does not mean sorted. The list below is printed with the numbers in the order we added them to the list, not in numeric order: ordered = [3, 2, 7, 1, 19, 0] ordered # + # There is a 'sort' method for sorting list items as needed: ordered.sort() ordered # - # Info on additional list methods is available at <https://docs.python.org/3/library/stdtypes.html?highlight=lists#mutable-sequence-types> # # Because lists are ordered, it is possible to access list items by referencing their positions. Note that the position of the first item in a list is 0 (zero), not 1! string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples'] string_list[0] # + # We can use positions to 'slice' or selection sections of a list: string_list[3:] # - string_list[:3] string_list[1:4] # + # If we don't know the position of a list item, we can use the 'index()' method to find out. # Note that in the case of duplicate list items, this only returns the position of the first one: string_list.index('pears') # - string_list.append('oranges') string_list string_list.index('oranges') # + # one more time with lists and dictionaries list_ex1 = my_stuff[0] + my_stuff[1] + int(my_stuff[2]) print(list_ex1) list_ex2 = ( str(my_stuff[0]) + str(my_stuff[1]) + my_stuff[2] + my_stuff[3][0] ) print(list_ex2) dict_ex1 = ( more_stuff['item1'] + more_stuff['item2'] + int(more_stuff['item3']) ) print(dict_ex1) dict_ex2 = ( str(more_stuff['item1']) + str(more_stuff['item2']) + more_stuff['item3'] ) print(dict_ex2) # + # Now try it yourself ... # print out the phrase "The answer: 42" using the following # variables and one or more of your own and the 'print()' function # (remember spaces are characters as well) start = "The" answer = 42 # - # ### Operators # # If *objects* are the nouns, operators are the verbs of a programming language. We've already seen examples of some operators: *assignment* with the `=` operator, *arithmetic* addition *and* string concatenation with the `+` operator, *arithmetic* division with the `/` and `-` operators, and *comparison* with the `>` operator. Different object types have different operators that may be used with them. The [Python Documentation](https://docs.python.org/3/library/stdtypes.html) provides detailed information about the operators and their functions as they relate to the standard object types described above. # # ### Flow Control and Logical Tests # # Flow control commands allow for the dynamic execution of parts of the program based upon logical conditions, or processing of objects within an *iterable* object (like a list or dictionary). Some key flow control commands in python include: # # * `while-else` loops that continue to run until the termination test is `False` or a `break` command is issued within the loop: # # done = False # i = 0 # while not done: # i = i+1 # if i > 5: done = True # # * `if-elif-else` statements defined alternative blocks of code that are executed if a test condition is met: # # do_something = "what?" # if do_something == "what?": # print(do_something) # elif do_something == "where?": # print("Where are we going?") # else: # print("I guess nothing is going to happen") # # * `for` loops allow for repeated execution of a block of code for each item in a python sequence such as a list or dictionary. For example: # # my_stuff = ['a', 'b', 'c'] # for item in my_stuff: # print(item) # # a # b # c # # ### Functions # # Functions represent reusable blocks of code that you can reference by name and pass informatin into to customize the exectuion of the function, and receive a response representing the outcome of the defined code in the function. # # ### Putting it all together # # An example of reading a data file and doing basic work with it illustrates all of these concepts. This also illustrates the concept of writing a script that combines all of your commands into a file that can be run. `eggs.py` in this case. # # # #!/usr/bin/env python # # import csv # # # create an empty list that will be filled with the rows of data from the CSV as dictionaries # csv_content = [] # # # open and loop through each line of the csv file to populate our data file # with open('aaj1945_DataS1_Egg_shape_by_species_v2.csv') as csv_file: # csv_reader = csv.DictReader(csv_file) # lineNo = 0 # for row in csv_reader: # process each row of the csv file # csv_content.append(row) # if lineNo < 3: # print out a few lines of data for our inspection # print(row) # lineNo += 1 # # # create some empty lists that we will fill with values for each column of data # order = [] # family = [] # species = [] # asymmetry = [] # ellipticity = [] # avglength = [] # # # for each row of data in our dataset write a set of values into the lists of column values # for item in csv_content: # order.append(item['\ufeffOrder']) # family.append(item['Family']) # species.append(item['Species']) # # # deal with issues # try: # asymmetry.append(float(item['Asymmetry'])) # except: # asymmetry.append(-9999) # # try: # ellipticity.append(float(item['Ellipticity'])) # except: # ellipticity.append(-9999) # # try: # avglength.append(float(item['AvgLength (cm)'])) # except: # avglength.append(-9999) # # print() # print() # # # Calculate and print some statistics # mean_asymmetry = sum(asymmetry)/len(asymmetry) # print("Mean Asymmetry: ", str(mean_asymmetry)) # mean_ellipticity = sum(ellipticity)/len(ellipticity) # print("Mean Ellipticity: ", str(mean_ellipticity)) # mean_avglength = sum(avglength)/len(avglength) # print("Mean Average Length: ", str(mean_avglength)) # # # What's wrong with these results? What would you do next to fix the problem? # ## Going beyond the *Standard Library* # # While Python's *Standard Library* of modules is very powerful and diverse, you will encounter times when you need functionality that is not included in the base installation of Python. *Fear not*, there are over 100,000 additional packages that have been developed to extend the capabilities of Python beyond those provided in the default installation. The central repository for Python packages is the [*Python Package Index*](https://pypi.python.org/pypi) that can be browsed on the web, or can be programmatically interacted with using the [PIP](https://docs.python.org/3/tutorial/venv.html#managing-packages-with-pip) utility. # # Once installed, the functionality of a module (standard or not) is added to a script using the `import` command. # ## Resources # # * [*Computational Thinking for Teacher Education*](https://cacm.acm.org/magazines/2017/4/215031-computational-thinking-for-teacher-education/fulltext?mobile=false) # * [Python Project Site](https://www.python.org/downloads/) # * [Anaconda Python Site](https://www.continuum.io/downloads) # * [Python Documentation](https://docs.python.org/3/index.html) # * [Python 3.6 Tutorial](https://docs.python.org/3/tutorial/) # # Some book-length resources: # # * *Python 3 for Absolute Beginners*. 2009. <NAME> and <NAME>. http://library.books24x7.com.libproxy.unm.edu/toc.aspx?bookid=33297 # * *Learning Python* 5th ed. 2013. <NAME>. https://www.amazon.com/Learning-Python-5th-Mark-Lutz/dp/1449355730/ref=sr_1_1?ie=UTF8&qid=1497019326&sr=8-1&keywords=%22learning+python%22 # * *The Quick Python Book* 2010. <NAME>. https://www.amazon.com/Quick-Python-Book-Second/dp/193518220X/ref=sr_1_1?s=books&ie=UTF8&qid=1497019549&sr=1-1&keywords=%22quick+python%22 # #
.ipynb_checkpoints/Programming Concepts-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" # <a href="https://colab.research.google.com/github/murthylab/sleap/blob/main/docs/notebooks/Training_and_inference_using_Google_Drive.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Training and inference on your own data using Google Drive # + [markdown] id="K5xp-A8Oc80Q" # In this notebook we'll install SLEAP, import training data into Colab using [Google Drive](https://www.google.com/drive), and run training and inference. # + [markdown] id="yX9noEb8m8re" # ## Install SLEAP # Note: Before installing SLEAP check [SLEAP releases](https://github.com/murthylab/sleap/releases) page for the latest version. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="DUfnkxMtLcK3" outputId="988097ae-e996-4b81-eb06-ec85aa0b2d9d" # !pip install sleap==1.1.3 # + [markdown] id="iq7jrgUksLtR" # ## Import training data into Colab with Google Drive # We'll first prepare and export the training data from SLEAP, then upload it to Google Drive, and then mount Google Drive into this Colab notebook. # + [markdown] id="GwpEwrxYdLMR" # ### Create and export the training job package # A self-contained **training job package** contains a .slp file with labeled data and images which will be used for training, as well as .json training configuration file(s). # # A training job package can be exported in the SLEAP GUI fron the "Run Training.." dialog under the "Predict" menu. # + [markdown] id="ApaDWxW4dLMS" # ### Upload training job package to Google Drive # To be consistent with the examples in this notebook, name the SLEAP project `colab` and create a directory called `sleap` in the root of your Google Drive. Then upload the exported training job package `colab.slp.training_job.zip` into `sleap` directory. # # If you place your training pckage somewhere else, or name it differently, adjust the paths/filenames/parameters below accordingly. # + [markdown] id="3HCGgy4kdLMS" tags=[] # ### Mount your Google Drive # Mounting your Google Drive will allow you to accessed the uploaded training job package in this notebook. When prompted to log into your Google account, give Colab access and the copy the authorization code into a field below (+ hit 'return'). # + colab={"base_uri": "https://localhost:8080/"} id="GBWjF4jpMG2N" outputId="612b3674-60b4-4cd3-d61b-90ab56312293" from google.colab import drive drive.mount('/content/drive/') # + [markdown] id="KQhv_gsdJzaq" # Let's set your current working directory to the directory with your training job package and unpack it there. Later on the output from training (i.e., the models) and from interence (i.e., predictions) will all be saved in this directory as well. # + colab={"base_uri": "https://localhost:8080/"} id="9umui-gI2rBz" outputId="a0c208c2-edaf-41cb-ad03-34c4c0e25770" import os os.chdir("/content/drive/My Drive/sleap") # !unzip colab.slp.training_job.zip # !ls # + [markdown] id="xZ-sr67av5uu" # ## Train a model # # Let's train a model with the training profile (.json file) and the project data (.slp file) you have exported from SLEAP. # # # ### Note on training profiles # Depending on the pipeline you chose in the training dialog, the config filename(s) will be: # # - for a **bottom-up** pipeline approach: `multi_instance.json` (this is the pipeline we assume here), # # - for a **top-down** pipeline, you'll have a different profile for each of the models: `centroid.json` and `centered_instance.json`, # # - for a **single animal** pipeline: `single_instance.json`. # # # ### Note on training process # When you start training, you'll first see the training parameters and then the training and validation loss for each training epoch. # # As soon as you're satisfied with the validation loss you see for an epoch during training, you're welcome to stop training by clicking the stop button. The version of the model with the lowest validation loss is saved during training, and that's what will be used for inference. # # If you don't stop training, it will run for 200 epochs or until validation loss fails to improve for some number of epochs (controlled by the early_stopping fields in the training profile). # + id="QKf6qzMqNBUi" # !sleap-train multi_instance.json colab.pkg.slp # + [markdown] id="whOf8PaFxYbt" # If instead of bottom-up you've chosen the top-down pipeline (with two training configs), you would need to invoke two separate training jobs in sequence: # # - `!sleap-train centroid.json colab.pkg.slp` # - `!sleap-train centered_instance.json colab.pkg.slp` # # + [markdown] id="nIsKUX661xFK" # ## Run inference to predict instances # # Once training finishes, you'll see a new directory (or two new directories for top-down training pipeline) containing all the model files SLEAP needs to use for inference. # # Here we'll use the created model files to run inference in two modes: # # - predicting instances in suggested frames from the exported .slp file # # - predicting and tracking instances in uploaded video # # You can also download the trained models for running inference from the SLEAP GUI on your computer (or anywhere else). # # ### Predicting instances in suggested frames # This mode of predicting instances is useful for accelerating the manual labeling work; it allows you to get early predictions on suggested frames and merge them back into the project for faster labeling. # # Here we assume you've trained a bottom-up model and that the model files were written in directory named `colab_demo.bottomup`; later in this notebook we'll also show how to run inference with the pair of top-down models instead. # - # !sleap-track \ # -m colab_demo.bottomup \ # --only-suggested-frames \ # -o colab.predicted_suggestions.slp \ # colab.pkg.slp # + [markdown] id="nIsKUX661xFK" # Now, you can download the generated `colab.predicted_suggestions.slp` file and merge it into your labeling project (**File -> Merge into Project...** from the GUI) to get new predictions for your suggested frames. # + [markdown] id="nIsKUX661xFK" # ### Predicting and tracking instances in uploaded video # Let's first upload the video we want to run inference on and name it `colab_demo.mp4`. (If your video is not named `colab_demo.mp4`, adjust the names below accordingly.) # # For this demo we'll just get predictions for the first 200 frames (or you can adjust the --frames parameter below or remove it to run on the whole video). # + colab={"base_uri": "https://localhost:8080/"} id="CLtjtq9E1Znr" outputId="7c6da613-08b5-4c79-8eeb-7b785774417c" # !sleap-track colab_demo.mp4 \ # --frames 0-200 \ # --tracking.tracker simple \ # -m colab_demo.bottomup # + [markdown] id="nzObCUToEqwA" # When inference is finished, it will save the predictions in a file which can be opened in the GUI as a SLEAP project file. The file will be in the same directory as the video and the filename will be `{video filename}.predictions.slp`. # # Let's inspect the predictions file: # + id="nPfmNMSt-vS7" # !sleap-inspect colab_demo.mp4.predictions.slp # + [markdown] id="LoJ2kNBK-w6k" # You can copy this file from your Google Drive to a local drive and open it in the SLEAP GUI app (or open it directly if you have your Google Drive mounted on your local machine). If the video is in the same directory as the predictions file, SLEAP will automatically find it; otherwise, you'll be prompted to locate the video (since the path to the video on your local machine will be different than the path to the video on Colab). # + [markdown] id="qW-NoJOFvYHM" # ### Inference with top-down models # # If you trained the pair of models needed for top-down inference, you can call `sleap-track` with `-m path/to/model` for each model, like so: # + id="QPKnMc1qvim7" # !sleap-track colab_demo.mp4 \ # --frames 0-200 \ # --tracking.tracker simple \ # -m colab_demo.centered_instance \ # -m colab_demo.centroid
docs/notebooks/Training_and_inference_using_Google_Drive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf tf.__version__ # + inp = tf.keras.Input(shape = (10, 100)) inp1 = tf.keras.Input(shape = (10, 100)) x = tf.keras.layers.LSTM(100, return_sequences = True)(inp) x1 = tf.keras.layers.LSTM(100, return_sequences = True)(inp1) con = tf.keras.layers.concatenate([x, x1], axis = -1) output = tf.keras.layers.Dense(1, activation = 'sigmoid')(con) model = tf.keras.Model([inp, inp1], output) # - tf.keras.utils.plot_model(model, show_shapes = True)
model_poc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.4 64-bit (''python39'': conda)' # name: python3 # --- # # Diagnosing Coronary Artery Disease # # **Data Set Information:** # # This dataset contains 76 attributes, but all published experiments refer to using a subset of 14 of them. In particular, the Cleveland database is the only one that has been used by ML researchers to this date. The "goal" field refers to the presence of heart disease in the patient. It is integer valued from 0 (no presence) to 4. Experiments with the Cleveland database have concentrated on simply attempting to distinguish presence (values 1,2,3,4) from absence (value 0). # # The names and social security numbers of the patients were recently removed from the database, replaced with dummy values. # # One file has been "processed", that one containing the Cleveland database. All four unprocessed files also exist in this directory. # **Attribute Information:** # # Only 14 attributes used: # 1. #3 (age) # 2. #4 (sex) # 3. #9 (cp) # 4. #10 (trestbps) # 5. #12 (chol) # 6. #16 (fbs) # 7. #19 (restecg) # 8. #32 (thalach) # 9. #38 (exang) # 10. #40 (oldpeak) # 11. #41 (slope) # 12. #44 (ca) # 13. #51 (thal) # 14. #58 (num) (the predicted attribute) # ## Import Libraries # + import sys # Data Science Tools import numpy as np import pandas as pd import matplotlib.pyplot as plt from pandas.plotting import scatter_matrix # Machine Learning Tools import sklearn from sklearn import model_selection from sklearn.metrics import classification_report, accuracy_score # Deep Learning Tools import keras from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense from tensorflow.keras.optimizers import Adam # - # ## Loading data # + # Import the heart disease dataset url = "http://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.cleveland.data" # the names will be the names of each column in our pandas DataFrame names = ["age", "sex", "cp", "trestbps", "chol", "fbs", "restecg", "thalach", "exang", "oldpeeak", "slope", "ca", "thal", "class"] # - # read the csv cleveland_data = pd.read_csv(url, names = names) cleveland_data.head() # + # print the shape of the Dataframe, so we can see how many examples we have print("Shape of Dataframe: {}".format(cleveland_data.shape)) print(cleveland_data.loc[1]) # - # print the lsat twenty or so data points cleveland_data.loc[280:] # ## Data Preparation # remove missing data (indicated with a "?") data = cleveland_data[~cleveland_data.isin(['?'])] data.loc[280:] # drop rows with NaN values from DataFrame data = data.dropna(axis=0) data.loc[280:] # print the shape and data type of the dataframe print(data.shape) print(data.dtypes) # transform data to numeric to enable further analysis data = data.apply(pd.to_numeric) data.dtypes # print data characteristics, usings pandas built-in describe() function data.describe() # plot histogram for each variables data.hist(figsize=(12, 12)) plt.show() # ## Data Splitting (Train / Test) # # Now that we have preprocessed the data appropriately, we can split it into training and testings datasets. We will use Sklearn's train_test_split() function to generate a training dataset (80 percent of the total data) and testing dataset (20 percent of the total data). # # Furthermore, the class values in this dataset contain multiple types of heart disease with values ranging from 0 (healthy) to 4 (severe heart disease). Consequently, we will need to convert our class data to categorical labels. For example, the label 2 will become [0, 0, 1, 0, 0]. # + # create X and y datasets for training X = np.array(data.drop(["class"],1)) y = np.array(data["class"]) X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2) # + # convert the data to categorical labels Y_train = to_categorical(y_train, num_classes=None) Y_test = to_categorical(y_test, num_classes = None) print(Y_train.shape) print(Y_train[:10]) # - # ## Modelling # + # define a function to build the keras model def create_model(): # create model model = Sequential() model.add(Dense(8, input_dim = 13, kernel_initializer = "normal", activation= "relu")) model.add(Dense(4, kernel_initializer= "normal", activation = "relu")) model.add(Dense(5, activation = "softmax")) # compile model adam = Adam(learning_rate = 0.001) model.compile(optimizer = adam, loss = "categorical_crossentropy", metrics = ["accuracy"]) return model model = create_model() print(model.summary()) # - # fit the model to the training data model.fit(X_train, Y_train, epochs=100, batch_size=10, verbose=1) # + # convert into binary classification problem - heart disease or no heart disease Y_train_binary = y_train.copy() Y_test_binary = y_test.copy() Y_train_binary[Y_train_binary > 0] = 1 Y_test_binary[Y_test_binary > 0] = 1 print(Y_train_binary[:20]) # + # define a new keras model for binary classification def create_binary_model(): # create model model = Sequential() model.add(Dense(8, input_dim=13, kernel_initializer='normal', activation='relu')) model.add(Dense(4, kernel_initializer='normal', activation='relu')) model.add(Dense(1, activation='sigmoid')) # Compile model adam = Adam(lr=0.001) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) return model binary_model = create_binary_model() print(binary_model.summary()) # - # fit the binary model on the training data binary_model.fit(X_train, Y_train_binary, epochs=100, batch_size=10, verbose=1) # + # generate classification report using predictions for categorical model categorical_pred = np.argmax(model.predict(X_test), axis=1) print('Results for Categorical Model') print(accuracy_score(y_test, categorical_pred)) print(classification_report(y_test, categorical_pred)) # + # generate classification report using predictions for binary model binary_pred = np.round(binary_model.predict(X_test)).astype(int) print('Results for Binary Model') print(accuracy_score(Y_test_binary, binary_pred)) print(classification_report(Y_test_binary, binary_pred))
Diagnosing Coronary Artery Disease Project/Diagnosing Coronary Artery Disease.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import PyPDF2 import os import sys import re import textract import docx rootdir = '/Users/eddieandress/development/forms/live_forms/' extraction_file_path = '/Users/eddieandress/development/forms/live_forms_extracted/' os.chdir(rootdir) # + reader_fail = [] pagenum_fail = [] for subdir, dirs, files in os.walk(rootdir): for file in files: text = "" if '.pdf' in file: pdfFileObj = open(file, 'rb') try: pdfReader = PyPDF2.PdfFileReader(pdfFileObj) except: reader_fail.append(file) continue try: num_pages = pdfReader.numPages except: pagenum_fail.append(file) continue for page in range(num_pages): page = pdfReader.getPage(page) text = text + page.extractText().lower().encode('ascii', 'ignore') text_file_name = file.split('.')[0] text_file = open(extraction_file_path + text_file_name + '.txt', 'w') text_file.write(text) text_file.close() # 7 failures # - reader_fail pagenum_fail # + pdf = [] word = [] other = [] for subdir, dirs, files in os.walk(rootdir): for file in files: if '.pdf' in file: pdf.append(file) elif '.doc' in file: word.append(file) elif '.docx' in file: word.append(file) else: other.append(file) print "%d PDF" % len(pdf) print "%d Word" % len(word) print "%d Other" % len(other) print "\nFiles in other formats:" other # - # approximate number of duplicates in word and pdf list pdf_codes = [name.split('-')[0].lower() for name in pdf] word_codes = [name.split('-')[0].lower() for name in word] len(set(pdf_codes).intersection(word_codes)) # + # extract text from Word docs rootdir = '/Users/eddieandress/development/forms/live_forms/' extraction_file_path = '/Users/eddieandress/development/forms/live_forms_extracted/' os.chdir(rootdir) textract_fail = [] for subdir, dirs, files in os.walk(rootdir): for file in files: if '.doc' in file: try: text = textract.process(file).lower() except: textract_fail.append(file) continue text_file_name = file.split('.')[0] text_file = open(extraction_file_path + text_file_name + '_doc.txt', 'w') text_file.write(text) text_file.close() # - len(textract_fail)
text_extraction_and_file_format_investigation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Y4lHQ2ja4Ve2" # # **NOISE REMOVAL FOR IMAGE PROCESSING** # # ## WHAT IS IMAGE PROCESSING ? # Image processing is a method to perform some operations on an image, in order to get an enhanced image or to extract some useful information from it. It is a type of signal processing in which input is an image and output may be image or characteristics/features associated with that image. # # <p align="center"> # <img width="450" height="200" src="https://assets.skyfilabs.com/images/blog/latest-image-processing-mini-projects.webp"> # </p> # # ## WHY IS IT IMPORTANT ? # * Importance and necessity of digital image processing stems from two principal application areas: # * First being the Improvement of pictorial information for human interpretation. # * Second being the Processing of a scene data for an autonomous machine perception. # * Digital image processing has a broad range of applications such as remote sensing, image and data storage for transmission in business applications, medical imaging, acoustic imaging, Forensic sciences and industrial automation. # *Images acquired by satellites are useful in tracking of earth resources, geographical mapping, and prediction of agricultural crops, urban population, weather forecasting, flood and fire control. # *Space imaging applications include recognition and analyzation of objects contained in images obtained from deep space-probe missions. There are also medical applications such as processing of X-Rays, Ultrasonic scanning, Electron micrographs, Magnetic Resonance Imaging etc. # # # # + [markdown] id="b3JUI1J55-Zf" # ### **Denoising Using cv2.fastNlMeansDenoising()** # Steps- # # # 1. Import the required libraries # 2. Load the image. # 3. Denoise the third frame considering all the 5 frames. # 4. Set the plot size as required. # 5. Show the subplot. # # # + colab={"base_uri": "https://localhost:8080/", "height": 432} id="9qD3TJ3ItcnG" outputId="9776afba-b3f9-48b9-87fe-20cf10e7b7c7" import numpy as np import cv2 from matplotlib import pyplot as plt img = cv2.imread('test_image.jpg') dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21) plt.figure(figsize=(15, 10)) plt.subplot(121),plt.imshow(img) plt.subplot(122),plt.imshow(dst) plt.show() # + [markdown] id="Kp5v5UI84WVV" # ### **Denoising Using cv2.fastNlMeansDenoisingMulti()** # We can also apply the same denoising method to a video. # # The first argument is the list of noisy frames. Second argument imgToDenoiseIndex specifies which frame we need to denoise, for that we pass the index of frame in our input list. Third is the temporalWindowSize which specifies the number of nearby frames to be used for denoising. It should be odd. In that case, a total of temporalWindowSize frames are used where central frame is the frame to be denoised. For example, you passed a list of 5 frames as input. Let imgToDenoiseIndex = 2 and temporalWindowSize = 3. Then frame-1, frame-2 and frame-3 are used to denoise frame-2. # # **CODING:** # # # 1. Create a list of first 5 frames. # 2. Convert all to float64. # 3. Create a noise of a specific variance. # 4. Add noise to the images. # 5. Denoise 3rd frame considering all the 5 frames. # # # + colab={"base_uri": "https://localhost:8080/", "height": 190} id="llZuUhcWu4Hc" outputId="bf9c7083-e593-4d7c-f555-806e0a92abf2" import numpy as np import cv2 from matplotlib import pyplot as plt cap = cv2.VideoCapture('test_video.mp4') # create a list of first 5 frames img = [cap.read()[1] for i in range(5)] # convert all to grayscale gray = [cv2.cvtColor(i, cv2.COLOR_BGR2GRAY) for i in img] # convert all to float64 gray = [np.float64(i) for i in gray] # create a noise of variance 25 noise = np.random.randn(*gray[1].shape)*10 # Add this noise to images noisy = [i+noise for i in gray] # Convert back to uint8 noisy = [np.uint8(np.clip(i,0,255)) for i in noisy] # Denoise 3rd frame considering all the 5 frames dst = cv2.fastNlMeansDenoisingMulti(noisy, 2, 5, None, 4, 7, 35) plt.figure(figsize=(15, 10)) plt.subplot(131),plt.imshow(gray[2],'gray') plt.subplot(132),plt.imshow(noisy[2],'gray') plt.subplot(133),plt.imshow(dst,'gray') plt.show()
Datascience_With_Python/Computer Vision/Projects/Noise Removal for Image Processing/noise_removal_for_image_processing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from nltk.corpus import stopwords #Lister les mots vides print('mots vides en arabic') print(stopwords.words("arabic")) # + #Eliminer les mots vides from nltk.corpus import stopwords from nltk.tokenize import word_tokenize text = ''' ุฑุจู…ุง ูƒุงู†ุช ุฃุญุฏ ุฃู‡ู… ุงู„ุชุทูˆุฑุงุช ุงู„ุชูŠ ู‚ุงู…ุช ุจู‡ุง ุงู„ุฑูŠุงุถูŠุงุช ุงู„ุนุฑุจูŠุฉ ุงู„ุชูŠ ุจุฏุฃุช ููŠ ู‡ุฐุง ุงู„ูˆู‚ุช ุจุนู…ู„ ุงู„ุฎูˆุงุฑุฒู…ูŠ ูˆู‡ูŠ ุจุฏุงูŠุงุช ุงู„ุฌุจุฑ, ูˆู…ู† ุงู„ู…ู‡ู… ูู‡ู… ูƒูŠู ูƒุงู†ุช ู‡ุฐู‡ ุงู„ููƒุฑุฉ ุงู„ุฌุฏูŠุฏุฉ ู…ู‡ู…ุฉ, ูู‚ุฏ ูƒุงู†ุช ุฎุทูˆุฉ ู†ูˆุฑูŠุฉ ุจุนูŠุฏุง ุนู† ุงู„ู…ูู‡ูˆู… ุงู„ูŠูˆู†ุงู†ูŠ ู„ู„ุฑูŠุงุถูŠุงุช ุงู„ุชูŠ ู‡ูŠ ููŠ ุฌูˆู‡ุฑู‡ุง ู‡ู†ุฏุณุฉ, ุงู„ุฌุจุฑ ฺฉุงู† ู†ุธุฑูŠุฉ ู…ูˆุญุฏุฉ ุชุชูŠุญ ุงู„ุฃุนุฏุงุฏ ุงู„ูƒุณุฑูŠุฉ ูˆุงู„ุฃุนุฏุงุฏ ุงู„ู„ุง ูƒุณุฑูŠุฉ, ูˆุงู„ู…ู‚ุงุฏูŠุฑ ุงู„ู‡ู†ุฏุณูŠุฉ ูˆุบูŠุฑู‡ุง, ุฃู† ุชุชุนุงู…ู„ ุนู„ู‰ ุฃู†ู‡ุง ุฃุฌุณุงู… ุฌุจุฑูŠุฉ, ูˆุฃุนุทุช ุงู„ุฑูŠุงุถูŠุงุช ูƒูƒู„ ู…ุณุงุฑุง ุฌุฏูŠุฏุง ู„ู„ุชุทูˆุฑ ุจู…ูู‡ูˆู… ุฃูˆุณุน ุจูƒุซูŠุฑ ู…ู† ุงู„ุฐูŠ ูƒุงู† ู…ูˆุฌูˆุฏุง ู…ู† ู‚ุจู„, ูˆู‚ู… ูˆุณูŠู„ุฉ ู„ู„ุชู†ู…ูŠุฉ ููŠ ู‡ุฐุง ุงู„ู…ูˆุถูˆุน ู…ุณุชู‚ุจู„ุง. ูˆุฌุงู†ุจ ุขุฎุฑ ู…ู‡ู… ู„ุฅุฏุฎุงู„ ุฃููƒุงุฑ ุงู„ุฌุจุฑ ูˆู‡ูˆ ุฃู†ู‡ ุณู…ุญ ุจุชุทุจูŠู‚ ุงู„ุฑูŠุงุถูŠุงุช ุนู„ู‰ ู†ูุณู‡ุง ุจุทุฑูŠู‚ุฉ ู„ู… ุชุญุฏุซ ู…ู† ู‚ุจู„ ''' stop_words = set(stopwords.words('english')) words = word_tokenize(text) for word in words: if word not in stop_words: print(word) # - #Tokenizing sentences from nltk.tokenize import sent_tokenize, word_tokenize print(sent_tokenize(text)) # + ##Eliminer les ponctuation import string from nltk.corpus import stopwords from nltk.tokenize import word_tokenize stop_words = set(stopwords.words('english')) words = word_tokenize(text) for word in words: if word not in string.punctuation: print(word) # + import string import nltk from nltk.stem import WordNetLemmatizer wordnet_lemmatizer = WordNetLemmatizer() sentence_words = nltk.word_tokenize(text) for word in sentence_words: if word in string.punctuation: sentence_words.remove(word) sentence_words print("{0:20}{1:20}".format("Word","Lemmatization")) print() for word in sentence_words: print ("{0:20}{1:20}".format(word,wordnet_lemmatizer.lemmatize(word))) # + tokens = nltk.word_tokenize(text) porter = nltk.PorterStemmer() [porter.stem(t) for t in tokens] lancaster = nltk.LancasterStemmer() [lancaster.stem(t) for t in tokens] wnl = nltk.WordNetLemmatizer() [wnl.lemmatize(t) for t in tokens] # + import nltk from nltk import pos_tag t = nltk.word_tokenize(text) print(nltk.pos_tag(t)) # -
TP1_NLP_NLTK_arabic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ๋จธ์‹ ๋Ÿฌ๋‹ ๋ง›๋ณด๊ธฐ 1ํŽธ # __์ฐธ๊ณ :__ ์˜ค๋ ๋ฆฌ์•™ ์ œ๋กฑ์˜ [<ํ•ธ์ฆˆ์˜จ ๋จธ์‹ ๋Ÿฌ๋‹(2ํŒ)>](https://github.com/ageron/handson-ml2) # 4์žฅ 1์ ˆ์˜ ์†Œ์Šค์ฝ”๋“œ ์ผ๋ถ€๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. # ## ์ฃผ์š” ๋‚ด์šฉ # ๋จธ์‹ ๋Ÿฌ๋‹์€ ํŠน์ • ๊ฐ’์„ ์˜ˆ์ธกํ•˜๋Š” ๋ชจ๋ธ(์ผ์ข…์˜ ํ•จ์ˆ˜)์„ ๊ตฌํ˜„ํ•˜๋Š” ์ปดํ“จํ„ฐ ํ”„๋กœ๊ทธ๋ž˜๋ฐ ๋ถ„์•ผ์ด๋ฉฐ # ์ฃผ์–ด์ง„ ๋ฌธ์ œ์— ๋Œ€ํ•œ ์ ์ ˆํ•œ ๋ชจ๋ธ์„ ์ฐพ๋Š” ์ผ์ด ํ•ต์‹ฌ ๊ณผ์ œ์ด๋‹ค. # ๋จธ์‹ ๋Ÿฌ๋‹์€ ๋ฐ์ดํ„ฐ ๋ถ„์„์˜ ์ฃผ์š” ํ™œ์šฉ๋ถ„์•ผ๊ฐ€ ๋˜์—ˆ์œผ๋ฉฐ ์ง€๊ธˆ๊นŒ์ง€ ์‚ดํŽด ๋ณธ ํŒŒ์ด์ฌ ๋ฐ์ดํ„ฐ ๋ถ„์„์˜ ๋‹ค์–‘ํ•œ ๊ฐœ๋…๊ณผ ๋„๊ตฌ๋“ค์ด # ์œ ์šฉํ•˜๊ฒŒ ํ™œ์šฉ๋œ๋‹ค. # ์—ฌ๊ธฐ์„œ๋Š” ์„ ํ˜•ํšŒ๊ท€ ๋ชจ๋ธ์˜ ๊ฐœ๋…๊ณผ ํ™œ์šฉ์„ ๊ฐ„๋‹จํ•œ ์˜ˆ์ œ๋ฅผ ์ด์šฉํ•˜์—ฌ # ๋จธ์‹ ๋Ÿฌ๋‹์˜ ๊ธฐ๋ณธ ์•„์ด๋””์–ด๋ฅผ ์ „๋‹ฌํ•œ๋‹ค. # ## ๊ธฐ๋ณธ ์„ค์ • # - ํ•„์ˆ˜ ๋ชจ๋“ˆ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ # - ๊ทธ๋ž˜ํ”„ ์ถœ๋ ฅ ๊ด€๋ จ ๊ธฐ๋ณธ ์„ค์ • ์ง€์ • # + import numpy as np # ๋…ธํŠธ๋ถ ์‹คํ–‰ ๊ฒฐ๊ณผ๋ฅผ ๋™์ผํ•˜๊ฒŒ ์œ ์ง€ํ•˜๊ธฐ ์œ„ํ•ด np.random.seed(42) # ๊น”๋”ํ•œ ๊ทธ๋ž˜ํ”„ ์ถœ๋ ฅ์„ ์œ„ํ•ด # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # - # ## ๋จธ์‹ ๋Ÿฌ๋‹ ๋ชจ๋ธ ํ›ˆ๋ จ # ๋จธ์‹ ๋Ÿฌ๋‹ ๋ชจ๋ธ์˜ ํ›ˆ๋ จ ๊ณผ์ •์— ํ•„์š”ํ•œ ์š”์†Œ๋Š” __๋ชจ๋ธ__๊ณผ __ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ์…‹__์ด๋‹ค. # ๊ทธ๋ฆฌ๊ณ  ๋ชจ๋ธ๊ณผ ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ์…‹์— ๋”ฐ๋ผ ํ›ˆ๋ จ ๋ฐฉ์‹์ด ๊ฒฐ์ •๋˜๋ฉฐ, ๋จธ์‹ ๋Ÿฌ๋‹๊ณผ ๋”ฅ๋Ÿฌ๋‹ ๋ถ„์•ผ์—์„œ ๋‹ค์–‘ํ•œ ํ›ˆ๋ จ ๊ธฐ๋ฒ•์„ ์—ฐ๊ตฌํ•œ๋‹ค. # ๋˜ํ•œ ์–ด๋–ค ๋ชจ๋ธ๊ณผ ์–ด๋–ค ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ•˜๋А๋ƒ์— ๋”ฐ๋ผ ๋ชจ๋ธ ํ›ˆ๋ จ์˜ ๊ฒฐ๊ณผ๊ฐ€ ๋งค์šฐ ๋‹ฌ๋ผ์งˆ ์ˆ˜ ์žˆ๋‹ค. # ์—ฌ๊ธฐ์„œ๋Š” ๊ฐ„๋‹จํ•œ ์„ ํ˜•ํšŒ๊ท€ ๋ชจ๋ธ์˜ ํ›ˆ๋ จ๊ณผ์ •์„ ๊ตฌ์ฒด์ ์œผ๋กœ ์‚ดํŽด๋ณด๋ฉด์„œ ๋จธ์‹ ๋Ÿฌ๋‹ ๋ชจ๋ธ ํ›ˆ๋ จ์„ ์†Œ๊ฐœํ•œ๋‹ค. # ## ์„ ํ˜•ํšŒ๊ท€ ๋ชจ๋ธ ํ›ˆ๋ จ # ๋จผ์ € ๋จธ์‹ ๋Ÿฌ๋‹ ๋ชจ๋ธ ํ›ˆ๋ จ์— ์‚ฌ์šฉ๋˜๋Š” ๋ฐ์ดํ„ฐ ํ›ˆ๋ จ ์„ธํŠธ๋Š” $m \times n$ ๋ชจ์–‘์˜ 2์ฐจ์› ์–ด๋ ˆ์ด๋กœ ํ‘œํ˜„๋จ์„ ๊ธฐ์–ตํ•ด์•ผ ํ•œ๋‹ค. # # - $m$: ํ›ˆ๋ จ ์„ธํŠธ ํฌ๊ธฐ, ์ฆ‰ ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ ์ƒ˜ํ”Œ์˜ ๊ฐœ์ˆ˜. # - $n$: ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ ์ƒ˜ํ”Œ์˜ ํŠน์„ฑ ์ˆ˜. ์ฆ‰, ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ ์ƒ˜ํ”Œ์„ ํ‘œํ˜„ํ•˜๋Š” 1์ฐจ์› ์–ด๋ ˆ์ด์˜ ๊ธธ์ด. # ์–ด๋ ˆ์ด ๊ฐ๊ฐ์˜ ํ•ญ๋ชฉ์„ ํŠน์„ฑ์ด๋ผ ๋ถ€๋ฆ„. # - $\mathbf{x}_{j}^{(i)}$: $i$ ๋ฒˆ์งธ ํ›ˆ๋ จ ์ž…๋ ฅ ๋ฐ์ดํ„ฐ ์ƒ˜ํ”Œ์˜ $j$๋ฒˆ์งธ ํŠน์„ฑ๊ฐ’. # # $$ # \mathbf{X}_{\textit{train}} = # \begin{bmatrix} # \mathbf{x}_{1}^{(1)} & \mathbf{x}_{2}^{(1)} & \cdots & \mathbf{x}_{n}^{(1)}\\ # \mathbf{x}_{1}^{(2)} & \mathbf{x}_{2}^{(2)} & \cdots & \mathbf{x}_{n}^{(2)}\\ # & \vdots & \\ # \mathbf{x}_{1}^{(m)} & \mathbf{x}_{2}^{(m)} & \cdots & \mathbf{x}_{n}^{(m)} # \end{bmatrix} # $$ # ์•„๋ž˜ ์ฝ”๋“œ๋Š” ์„ ํ˜•ํšŒ๊ท€ ํ•™์Šต๊ณผ์ •์„ ์„ค๋ช…ํ•˜๊ธฐ ์œ„ํ•ด ์‚ฌ์šฉ๋˜๋Š” ํ•˜๋‚˜์˜ ํŠน์„ฑ์„ ์‚ฌ์šฉํ•˜๋Š” # ๊ฐ„๋‹จํ•œ ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ๋ฅผ ์ƒ์„ฑํ•œ๋‹ค. # # * `X`: ํ›ˆ๋ จ ์„ธํŠธ. ํ•˜๋‚˜์˜ ํŠน์„ฑ `x1`์„ ๊ฐ–๋Š” 100๊ฐœ์˜ ๋ฐ์ดํ„ฐ. ์ฆ‰, $m=100$, $n=1$. # * `y`: 100 ๊ฐœ์˜ ๋ ˆ์ด๋ธ”. ๊ธฐ๋ณธ์ ์œผ๋กœ `4 + 3 * x`์˜ ํ˜•์‹์„ ๋”ฐ๋ฅด๋‚˜ ํ›ˆ๋ จ์„ ์œ„ํ•ด ์žก์Œ(noise)๋ฅผ ์ถ”๊ฐ€ ํ–ˆ์Œ. # # __์ฐธ๊ณ :__ ์ •๊ทœ๋ถ„ํฌ๋ฅผ ๋”ฐ๋ฅด๋Š” ๋ถ€๋™์†Œ์ˆ˜์  100๊ฐœ๋ฅผ ๋ฌด์ž‘์œ„๋กœ ์ƒ์„ฑํ•˜์—ฌ ์žก์Œ์œผ๋กœ ์‚ฌ์šฉํ•˜์˜€๋‹ค. # + import numpy as np X = 2 * np.random.rand(100, 1) y = 4 + 3 * X + np.random.randn(100, 1) # X์™€ y์˜ ๊ด€๊ณ„๋ฅผ 1์ฐจ ํ•จ์ˆ˜๋กœ ํ‘œํ˜„ # - X.shape y.shape # ํŠน์„ฑ `x1`๊ณผ ๋ ˆ์ด๋ธ” `y`์˜ ๊ด€๊ณ„๋ฅผ ๊ทธ๋ฆฌ๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. # ๊ธฐ๋ณธ์ ์œผ๋กœ `y = 4 + 3 * x` ์˜ ์„ ํ˜•๊ด€๊ณ„๋ฅผ ๊ฐ–์ง€๋งŒ ์žก์Œ์œผ๋กœ ์ธํ•ด ๋ฐ์ดํ„ฐ๊ฐ€ ํผ์ ธ ์žˆ๋‹ค. plt.plot(X, y, "b.") # ํŒŒ๋ž‘ ์ : ํ›ˆ๋ จ ์„ธํŠธ ์‚ฐ์ ๋„ plt.xlabel("$x_1$", fontsize=18) # x์ถ• ํ‘œ์‹œ plt.ylabel("$y$", rotation=0, fontsize=18) # y์ถ• ํ‘œ์‹œ plt.axis([0, 2, 0, 15]) # x์ถ•, y์ถ• ์Šค์ผ€์ผ ์ง€์ • plt.show() # ### ์ •๊ทœ ๋ฐฉ์ •์‹ # ์ด์ œ $x_1$๊ณผ $y$๊ฐ€ ์–ด๋–ป๊ฒŒ ์ƒ์„ฑ๋˜์—ˆ๋Š”์ง€ ๋ชจ๋ฅธ๋‹ค๋Š” ๊ฐ€์ • ํ•˜์— ๋‘ ๋ณ€์ˆ˜ ์‚ฌ์ด์˜ ๊ด€๊ณ„๋ฅผ ์ฐพ๋Š” ๊ฒŒ ๋ชฉํ‘œ์ด๋‹ค. # ์ฆ‰, ์•„๋ž˜ ์‹์„ ๋งŒ์กฑ์‹œํ‚ค๋Š” ์ตœ์ ์˜ $\theta_0$, $\theta_1$์„ ์ฐพ์•„์•ผ ํ•œ๋‹ค. # $$ # \begin{align*} # y^{(1)} &= \theta_0 + \theta_1 \cdot x_1^{(1)} \\ # y^{(2)} &= \theta_0 + \theta_1 \cdot x_1^{(2)} \\ # &= \cdots \\ # y^{(100)} &= \theta_0 + \theta_1 \cdot x_1^{(100)} # \end{align*} # $$ # ์ด๋ฅผ ํ–‰๋ ฌ์‹์œผ๋กœ ํ‘œํ˜„ํ•˜๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. # $$ # \begin{bmatrix} # y^{(1)} \\ # y^{(2)} \\ # \vdots \\ # y^{(100)} # \end{bmatrix} # = # \begin{bmatrix} # 1 & \mathbf{x}_{1}^{(1)} \\ # 1 & \mathbf{x}_{1}^{(2)} \\ # & \vdots \\ # 1 & \mathbf{x}_{1}^{(m)} # \end{bmatrix} # \cdot # \begin{bmatrix} # \theta_0 \\ # \theta_1 # \end{bmatrix} # $$ # ์ผ๋ฐ˜์ ์œผ๋กœ๋Š” ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ์˜ ๊ฐ ์ƒ˜ํ”Œ์ด $n \ge 1$ ๊ฐœ์˜ ํŠน์„ฑ์„ ๊ฐ€์ง€๋ฉฐ, ๋”ฐ๋ผ์„œ ์•„๋ž˜ ์‹์„ # ๋งŒ์กฑํ•˜๋Š” $\theta_0, \theta_1, \dots, \theta_n$์„ ์ฐพ์•„์•ผ ํ•œ๋‹ค. # $$ # \begin{bmatrix} # y^{(1)} \\ # y^{(2)} \\ # \vdots \\ # y^{(100)} # \end{bmatrix} # = # \begin{bmatrix} # 1 & \mathbf{x}_{1}^{(1)} & \mathbf{x}_{2}^{(1)} & \cdots & \mathbf{x}_{n}^{(1)}\\ # 1 & \mathbf{x}_{1}^{(2)} & \mathbf{x}_{2}^{(2)} & \cdots & \mathbf{x}_{n}^{(2)}\\ # & & \vdots & \\ # 1 & \mathbf{x}_{1}^{(m)} & \mathbf{x}_{2}^{(m)} & \cdots & \mathbf{x}_{n}^{(m)} # \end{bmatrix} # \cdot # \begin{bmatrix} # \theta_0 \\ # \theta_1\\ # \theta_2 \\ # \vdots \\ # \theta_n \\ # \end{bmatrix} # $$ # ์ด ๊ณผ์ •์„ ๋ณด๋‹ค ๋‹จ์ˆœํ•˜๊ฒŒ ํ‘œํ˜„ํ•˜๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. # # ๊ธธ์ด๊ฐ€ $m$์ธ 1์ฐจ์› ์–ด๋ ˆ์ด $\mathbf{y}$์™€ $(m, n+1)$ ๋ชจ์–‘์˜ 2์ฐจ์› ์–ด๋ ˆ์ด $\mathbf{X}$๊ฐ€ ์ฃผ์–ด์กŒ์„ ๋•Œ, # ์•„๋ž˜ ์‹์„ ๋งŒ์กฑ์‹œํ‚ค๋ฉด์„œ ๊ธธ์ด๊ฐ€ $n+1$์ธ 1์ฐจ์› ์–ด๋ ˆ์ด $\hat{\boldsymbol{\theta}}$๋ฅผ ๊ตฌํ•ด์•ผ ํ•œ๋‹ค. # # $$ # \mathbf{y} = \mathbf{X} \,\hat{\boldsymbol{\theta}} # $$ # # ๋งŒ์•ฝ์— $(\mathbf{X}^T \mathbf{X})$์˜ ์—ญํ–‰๋ ฌ $(\mathbf{X}^T \mathbf{X})^{-1}$์ด ์กด์žฌํ•˜๊ณ  # ์‹ค์ œ๋กœ ์ผ์ • ์‹œ๊ฐ„ ๋‚ด์— ๊ณ„์‚ฐ์ด ๊ฐ€๋Šฅํ•˜๋‹ค๋ฉด ์ตœ์ ์˜ # ํŒŒ๋ผ๋ฏธํ„ฐ ์กฐํ•ฉ $\boldsymbol{\hat\theta}$์„ ์•„๋ž˜ __์ •๊ทœ ๋ฐฉ์ •์‹__์œผ๋กœ ์ง์ ‘ ๊ตฌํ•  ์ˆ˜ ์žˆ๋‹ค. # # $$ # \hat{\boldsymbol{\theta}} = (\mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \mathbf{y} # $$ # ### ์ •๊ทœ ๋ฐฉ์ •์‹ ํ™œ์šฉ # ์•„๋ž˜ ์ฝ”๋“œ๋Š” ์œ„ ์ •๊ทœ ๋ฐฉ์ •์‹์„ ์ด์šฉํ•˜์—ฌ ์œ„์—์„œ ์ƒ์„ฑํ•œ 100๊ฐœ์˜ ํ›ˆ๋ จ ์ƒ˜ํ”Œ์ด ํฌํ•จ๋œ # ํ›ˆ๋ จ ์„ธํŠธ์— ๋Œ€ํ•œ ์ตœ์ ์˜ $\theta_0, \theta_1$์„ ๊ณ„์‚ฐํ•œ๋‹ค. # # __์ฃผ์˜์‚ฌํ•ญ:__ # # * `np.ones((100, 1))`: ์ ˆํŽธ $\theta_0$๋ฅผ ๊ณ ๋ คํ•˜๊ธฐ ์œ„ํ•ด ํ›ˆ๋ จ ์„ธํŠธ์˜ 0๋ฒˆ ์—ด์— ์ถ”๊ฐ€๋˜๋Š” 1๋กœ ์ด๋ฃจ์–ด์ง„ ๋ฒกํ„ฐ. # * `X_b`: ๋ชจ๋“  ์ƒ˜ํ”Œ์— ๋Œ€ํ•ด `1`์ด 0๋ฒˆ ์ธ๋ฑ์Šค์ฒด ์ถ”๊ฐ€๋œ ํ›ˆ๋ จ ์„ธํŠธ๋ฅผ ๋‚˜ํƒ€๋‚ด๋Š” 2์ฐจ์› ์–ด๋ ˆ์ด X_b = np.c_[np.ones((100, 1)), X] # ๋ชจ๋“  ์ƒ˜ํ”Œ์— x0 = 1 ์ถ”๊ฐ€ X_b[:5] # ์ด์ œ ์ •๊ทœ ๋ฐฉ์ •์‹์„ ํ™œ์šฉํ•  ์ˆ˜ ์žˆ๋‹ค. theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y) # ๊ณ„์‚ฐ๋œ $\boldsymbol{\hat\theta} = [\theta_0, \theta_1]$ ์€ ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. theta_best # ## ์‚ฌ์ดํ‚ท๋Ÿฐ์˜ `LinearRegression` ๋ชจ๋ธ # ์‚ฌ์ดํ‚ท๋Ÿฐ(scikit-learn) ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋Š” ๋จธ์‹ ๋Ÿฌ๋‹์—์„œ ์‚ฌ์šฉ๋˜๋Š” ๋‹ค์–‘ํ•œ ๋ชจ๋ธ์˜ ๊ธฐ๋ณธ์ ์ธ ํ‹€(basic models)๋“ค์„ ์ œ๊ณตํ•œ๋‹ค. # ์„ ํ˜•ํšŒ๊ท€์˜ ๊ฒฝ์šฐ `LinearRegression` ํด๋ž˜์Šค์˜ ๊ฐ์ฒด๋ฅผ ์ƒ์„ฑํ•˜์—ฌ ํ›ˆ๋ จ์‹œํ‚ค๋ฉด # ์ตœ์ ์˜ ์ ˆํŽธ๊ณผ ๊ธฐ์šธ๊ธฐ๋ฅผ ๊ณ„์‚ฐํ•ด์ค€๋‹ค. # ๋ชจ๋ธ์„ ์ง€์ •ํ•˜๊ณ  ํ›ˆ๋ จ์‹œํ‚ค๋Š” ๊ณผ์ •์€ ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. # # ๋จผ์ € ์„ ํ˜•ํšŒ๊ท€ ๋ชจ๋ธ์˜ ๊ฐ์ฒด๋ฅผ ์ƒ์„ฑํ•œ๋‹ค. # + from sklearn import linear_model lin1 = linear_model.LinearRegression() # - # ์‚ฌ์ดํ‚ท๋Ÿฐ์˜ ๋ชจ๋ธ์€ 1 ๋ฒกํ„ฐ(1๋กœ๋งŒ ์ด๋ฃจ์–ด์ง„ ๋ฒกํ„ฐ)๋ฅผ ์ถ”๊ฐ€ํ•˜๋Š” ๊ณผ์ •์„ ์•Œ์•„์„œ ์ฒ˜๋ฆฌํ•œ๋‹ค. # ๋‹ค๋งŒ, ์ž…๋ ฅ ๋ฐ์ดํ„ฐ์™€ ํƒ€๊นƒ ๋ฐ์ดํ„ฐ ๋ชจ๋‘ 2์ฐจ์› ์–ด๋ ˆ์ด๋กœ ์ง€์ •ํ•ด์•ผ ํ•œ๋‹ค. X_train = np.c_[X] y_train = np.c_[y] # ์ด์ œ `fit()` ๋ฉ”์„œ๋“œ๋ฅผ ํ˜ธ์ถœํ•˜์—ฌ ๋ชจ๋ธ์„ ํ›ˆ๋ จ์‹œํ‚จ๋‹ค. lin1.fit(X_train, y_train) # ํ›ˆ๋ จ์œผ๋กœ ํ•™์Šต๋œ ๋ชจ๋ธ์ด ์•Œ์•„๋œ ์ตœ์ ์˜ $\theta_0, \theta_1$์ด # ์•ž์„œ ์ •๊ทœ ๋ฐฉ์ •์‹์„ ์ด์šฉํ•œ ๊ฒฐ๊ณผ์™€ ๋™์ผํ•˜๋‹ค. # + t0, t1 = lin1.intercept_[0], lin1.coef_[0][0] print(f"์ ˆํŽธ:\t {t0}t1") print(f"๊ธฐ์šธ๊ธฐ:\t {t1}t1") # - # ### ํ•™์Šต๋œ ๋ชจ๋ธ # ํ›ˆ๋ จ๋œ ์˜ˆ์ธก ๋ชจ๋ธ์€ # $\theta_0$ ์„ ์ ˆํŽธ์œผ๋กœ, $\theta_1$ ์„ ๊ธฐ์šธ๊ธฐ๋กœ ํ•˜๋Š” ์ง์„ ์— ํ•ด๋‹นํ•œ๋‹ค. # ์˜ˆ์ธก ๋ชจ๋ธ์„ ๋‚˜ํƒ€๋‚ด๋Š” 1์ฐจ ํ•จ์ˆ˜์˜ ๊ทธ๋ž˜ํ”„๋ฅผ ๊ทธ๋ฆฌ๊ธฐ ์œ„ํ•ด # ๋จผ์ € ์ง์„  ์ƒ์— ์œ„์น˜ํ•œ ๋‘ ์ ์„ ์ง€์ •ํ•œ๋‹ค. X_new = np.array([[0], [2]]) # ๋‘ ์ ์— ๋Œ€ํ•œ ์˜ˆ์ธก๊ฐ’์„ ๊ณ„์‚ฐํ•œ๋‹ค. y_predict = lin1.predict(X_new) # ์ด์ œ ํ›ˆ๋ จ ์„ธํŠธ์˜ ์‚ฐ์ ๋„์™€ ์•ž์„œ ๊ตฌํ•œ ๋‘ ์ ์„ ์ž‡๋Š” ์ง์„ , ์ฆ‰ ์˜ˆ์ธก ๋ชจ๋ธ์˜ ๊ทธ๋ž˜ํ”„๋ฅผ ๊ทธ๋ฆฌ๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. # + # ํ›ˆ๋ จ ์„ธํŠธ์˜ ์‚ฐ์ ๋„: ํŒŒ๋ž€ ์ ์œผ๋กœ ํ‘œ์‹œ plt.plot(X, y, "b.") # ์˜ˆ์ธก ๋ชจ๋ธ: 1์ฐจ ํ•จ์ˆ˜ ๊ทธ๋ž˜ํ”„: ๋นจ๊ฐ• ์ง์„  plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.legend(loc="upper left", fontsize=14) # ๋ฒ”๋ก€ ์œ„์น˜ plt.axis([0, 2, 0, 15]) # x์ถ•, y์ถ• ์Šค์ผ€์ผ ์ง€์ • plt.show() # - # ## ๋ฌด์–ด-ํŽœ๋กœ์ฆˆ(Moore-Penrose) ์—ญํ–‰๋ ฌ # ์•ž์„œ ์†Œ๊ฐœํ•œ ์ •๊ทœ ๋ฐฉ์ •์‹์€ $(\mathbf{X}^T \mathbf{X})$์˜ ์—ญํ–‰๋ ฌ $(\mathbf{X}^T \mathbf{X})^{-1}$์ด # ์กด์žฌํ•  ๋•Œ๋งŒ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋‹ค. # ํ•˜์ง€๋งŒ ๊ทธ๋ ‡์ง€ ์•Š์€ ๊ฒฝ์šฐ ํ–‰๋ ฌ์˜ ํŠน์ž‡๊ฐ’ ๋ถ„ํ•ด(SVD)๋ฅผ ์‚ฌ์šฉํ•  ๋•Œ ์–ป์–ด์ง€๋Š” # $\mathbf{X}$์˜ ๋ฌด์–ด-ํŽœ๋กœ์ฆˆ ์—ญํ–‰๋ ฌ $\mathbf{X}^+$์„ ์ด์šฉํ•˜์—ฌ # $\boldsymbol{\hat\theta}$์˜ ๊ทผ์‚ฌ๊ฐ’์„ ๊ณ„์‚ฐํ•  ์ˆ˜ ์žˆ๋‹ค. # # $$ # \hat{\boldsymbol{\theta}} = \mathbf{X}^+ \mathbf{y} # $$ # # # ์‚ฌ์ดํ‚ท๋Ÿฐ์˜ `LinearRegression` ๋ชจ๋ธ์ด ๋ฐ”๋กœ ์ด ๋ฌด์–ด-ํŽœ๋กœ์ฆˆ ์—ญํ–‰๋ ฌ์„ ์ด์šฉํ•œ๋‹ค. # # __์ฐธ๊ณ :__ ๋ฌด์–ด-ํŽœ๋กœ์ฆˆ ์—ญํ–‰๋ ฌ์„ __์œ ์‚ฌ์—ญํ–‰์—ด__์ด๋ผ๊ณ ๋„ ๋ถ€๋ฅธ๋‹ค. # ### ๋ฌด์–ด-ํŽœ๋กœ์ฆˆ ์—ญํ–‰๋ ฌ ๊ณ„์‚ฐ์˜ ํ•œ๊ณ„ # ์•ž์„  ์–ธ๊ธ‰ํ•œ ๋ฌด์–ด-ํŽœ๋กœ์ฆˆ ์—ญํ–‰๋ ฌ์„ ๊ตฌํ•˜๋Š” ์•Œ๊ณ ๋ฆฌ์ฆ˜์˜ ๊ณ„์‚ฐ ๋ณต์žก๋„๋Š” $O(n^2)$์ด๋‹ค. # ์ฆ‰, ์—ญํ–‰๋ ฌ ๊ณ„์‚ฐ ์‹œ๊ฐ„์ด ํŠน์„ฑ ์ˆ˜์˜ ์ œ๊ณฑ์— ๋น„๋ก€ํ•œ๋‹ค. # # ์˜ˆ๋ฅผ ๋“ค์–ด, 1๋งŒ๊ฐœ์˜ ํ–‰์„ ๊ฐ–๋Š” ๋‹จ์œ„ํ–‰๋ ฌ์˜ ๋ฌด์–ด-ํŽœ๋กœ์ฆˆ ์—ญํ–‰๋ ฌ์„ ๊ณ„์‚ฐํ•˜๋Š” ๋ฐ์— ๋ช‡ ๋ถ„ ์ •๋„ ๊ฑธ๋ฆฐ๋‹ค. # ๋งŒ์•ฝ์— 10๋งŒ๊ฐœ์˜ ํ–‰์„ ๊ฐ–๋Š” ๋‹จ์œ„ํ–‰๋ ฌ์„ ์‚ฌ์šฉํ•˜๋ฉด ๊ทธ์˜ 100๋ฐฐ์ธ ๋ช‡ ์‹œ๊ฐ„์ด ๊ฑธ๋ฆด ๊ฒƒ์ด๋‹ค. # # __๊ฒฝ๊ณ :__ ์•„๋ž˜ ์ฝ”๋“œ๋Š” ์ปดํ“จํ„ฐ ์‚ฌ์–‘์— ๋”ฐ๋ผ ๋ช‡ ๋ถ„ ์ด์ƒ ๊ฑธ๋ฆด ์ˆ˜ ์žˆ๋‹ค. # + import time X = np.eye(10000) start = time.time() # ๊ณ„์‚ฐ ์‹œ์ž‘ np.linalg.pinv(X) end = time.time() # ๊ณ„์‚ฐ ์™„๋ฃŒ duration = end - start # ๊ณ„์‚ฐ ๊ฒฝ๊ณผ ์‹œ๊ฐ„ # - print(f"{duration:.1f} ์ดˆ") # ๋”ฐ๋ผ์„œ ๋งŽ์€ ์ˆ˜์˜ ํŠน์„ฑ์„ ์‚ฌ์šฉํ•˜๋Š” ๋ฐ์ดํ„ฐ์— ๋Œ€ํ•ด์„œ๋Š” ์‚ฌ์ดํ‚ท๋Ÿฐ์˜ `LinearRegrssion` ๋ชจ๋ธ์€ # ํ˜„์‹ค์ ์œผ๋กœ ์‚ฌ์šฉ๋  ์ˆ˜ ์—†๋‹ค. # ์ด์— ๋Œ€ํ•œ ๋Œ€์•ˆ์œผ๋กœ ๊ฒฝ์‚ฌ ํ•˜๊ฐ•๋ฒ•์„ ์ ์šฉํ•ด์•ผ ํ•˜๋ฉฐ, ๊ฒฝ์‚ฌํ•˜๊ฐ•๋ฒ•์„ ์ ์šฉํ•˜๋Š” ๋‹ค์–‘ํ•œ ๋ชจ๋ธ์„ # ๊ตฌํ˜„ํ•˜๋Š” ๊ฒƒ์ด ๋จธ์‹ ๋Ÿฌ๋‹๊ณผ ๋”ฅ๋Ÿฌ๋‹์˜ ์ฃผ์š” ์ฃผ์ œ์ด๋‹ค.
notebooks/pydata09_machine_learning_preview_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <span style="color:orange"> Exercise 10.1 </span> # # ## <span style="color:green"> Task </span> # By adapting your Genetic Algorithm code, developed during the Numerical Exercise 9, write a C++ code to solve the TSP with a Simulated Annealing (SA) algorithm. Apply your code to the optimization of a path among # - 30 cities randomly placed on a circumference # - 30 cities randomly placed inside a square # # Show your results via: # # - a picture of the length of the best path as a function of the iteration of your algorithm # - a picture of the best path # # ## <span style="color:green"> Solution </span> # In order to complete the given task, the code inherits most functionalities from the Ex9 Genetic Algorithm class and adds a Simulated Annealing algorithm. This package also contains a citygen.hpp file that generates the city positions according to a specific configuration, a genetic.hpp file that contains the class for the Genetic Algorithm, a main.cpp file where the simulation is defined and ran, and various files with useful objects (population,individual,etc..).<br> # The parameters that are initialized from a configuration file are: # - layout(str): city arrangement # - ncities(int): number of cities # - box(float): size of city layout # - metric(str): type of metric for calculating distances # - iterations(int): number of iterations for the genetic algorithm # - N(int): number of explored temperatures # - step(double): increment of beta at each temperature value # # The main function executes the Genetic Algorithm using a Simulated Annealing algorithm, that initially proposed a path that connects all cities, according to the appropriate boundary conditions. The quest for the most optimal path consists of suggesting a change in the proposed path using the Mutation function and, using the Metropolis algorithm, the code decides to accept or reject the new proposed path. The transition probability is: # $$ \Large{P = e^{-\beta(F_{new} - F_{old})}} $$ # After a certain number of iterations at a fixed value of $\beta$, the latter multiplies itself by a step given in the settings, so that the temperature increase is slow enough to explore numerous values in the earliest stages. # # The program saves the results for each simulation in a dedicated SQL database that contains the parameters of the simulation, the path with lowest fitness value, the fitness value for each beta value and the city arrangement.<br> # The notebook below exhaustively explores the outcomes of the various dependancies of the fitness value of the best individual, such as the dependancy on the increment step of $\beta$, the number of iterations for each $\beta$ value, the metric and more. # For this purpose, various values of each parameter are explored and fed to the simulation. A temporary configuration file is created with the right values and given to the main script as an input paramenter for each iteration (see second cell).<br> # The two main sections are dedicated to a city arrangement on a circumference and on a square.<br><br> # ## <span style="color:red; display: block; text-align: center"> Circle </span> import numpy as np import sqlite3 import sys, os sys.path.append("10.1/") import utility import time import logging import subprocess logging.disable(sys.maxsize) import datetime import inspect from numba import jit from matplotlib import pyplot as plt LAYOUT = "circle" # + def chop_microseconds(delta): """ Remove microseconds from datetime object """ return delta - datetime.timedelta(microseconds=delta.microseconds) def run(): global LAYOUT ncities = 32 metrics = ["L1","L2"] boxes = [1] ntemps = [1000,2000,5000] iters = [100,500,1000,2000,5000] beta_steps = [1.0001, 1.001, 1.01, 1.1] total = len(metrics)*len(boxes)*len(iters)*len(beta_steps)*len(ntemps) percentages = [int(i*total/100) for i in np.arange(0,110,10)] index=1 utility.clear_previous_databases(LAYOUT) print("----> Running a total of {} simulations\n".format(total)) time_start = time.time() subprocess.run(["make"]) for ntemp in ntemps: for step in beta_steps: for _iter in iters: for box in boxes: for metric in metrics: if index in percentages: print("Executing simulation ",index,"/",total) time_temp = time.time() elapsed = time_temp - time_start date = str(chop_microseconds(datetime.timedelta(seconds=elapsed))) print("Time elapsed {} ({:.2f} seconds)\n".format(date,elapsed)) utility.create_temp_ini(LAYOUT,ncities,box,metric,_iter,ntemp,step,"results"+str(index)) p = subprocess.Popen("./main tempfile.ini",shell=True,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL) output, err = p.communicate(b"input data that is passed to subprocess' stdin") rc = p.returncode index+=1 time_end = time.time() date = str(chop_microseconds(datetime.timedelta(seconds=time_end - time_start))) print("Total time elapsed: {}\n".format(date)) run() # - print(f"Cities generated on a {LAYOUT}") cityi, cityx, cityy = np.loadtxt(f"./10.1/outputs/{LAYOUT}/cityposition.dat",unpack=True) s = [4 for k in range(len(cityi))] fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) # ### Dependancy on $\beta$ increment # This section aims to analyze the dependancy on beta of the fitness level of the best individual. #explore beta values reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,iterations=2000,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = [q.get_fitness() for q in queries] fig, ax = plt.subplots(1,1,figsize=(12,5)) for i,res in enumerate(results): fitness = [j[-1] for j in res] plt.plot(fitness,linewidth=2,label="Beta: "+str(queries[i].get_data()["beta_step"]),zorder=10/(i+1)) plt.title("Fitness trend for different beta increment step") plt.xlabel("Step") plt.ylabel("Fitness") plt.grid(True) plt.legend() plt.show() print("\n") betas = sorted([[j[1] for j in res] for res in results]) fig, ax = plt.subplots(1,len(betas),figsize=(14,6)) plt.suptitle("Beta values for different beta increment step") for index, beta in enumerate(betas): ax[index-1].set_title("Beta: "+str(queries[index-1].get_data()["beta_step"])) ax[index-1].plot(betas[index-1],linewidth=2) ax[index-1].set_xlabel("Step") ax[index-1].set_ylabel("Beta") ax[index-1].grid(True) plt.show() print("") # ### Dependancy on number of iterations # The number of iterations is the number of times a new path is searched for a fixed value of $\beta$. The expectation for this dependancy is a faster convergence of the fitness value, because more possible paths are discovered in a shorter amount of time, therefore finding the optimal path in an early iteration reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,metric="L2",beta_step=1.01) print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = [q.get_fitness() for q in queries] fig, ax = plt.subplots(1,1,figsize=(12,5)) for i,res in enumerate(results): fitness = [j[-1] for j in res] plt.plot(fitness,linewidth=1,label="Iterations: "+str(queries[i].get_data()["iterations"])) plt.title("Fitness trend for different numbers of iterations") plt.xlabel("Step") plt.ylabel("Fitness") plt.grid(True) plt.legend() plt.show() print("") # ### Dependency on metric reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,iterations=5000,beta_step=1.01) print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = [q.get_fitness() for q in queries] fig, ax = plt.subplots(1,1,figsize=(12,5)) for i,res in enumerate(results): fitness = [j[-1] for j in res] plt.plot(fitness,linewidth=1,label="Metric: "+str(queries[i].get_data()["metric"])) plt.title("Fitness trend for different metrics") plt.xlabel("Step") plt.ylabel("Fitness") plt.grid(True) plt.legend() plt.show() # ## Best paths # In the current section a graph for the best path for a given configuration. The criteria of selection for the parameters is the following: # - Fixed beta step of 1.01, which has one of the best convergences as shown above # - Fixed metric L2 # - Variation of ```number of explored temperatures``` and ```number of iterations per population```. # The values for both quantities is 1000 and 5000 for a total of 4 configuration, because the goal is to show that a good balance of those values is necessary for a good convergence (e.g. a very high iteration number and number of temperatures, $1^{st}$ path, overfits the optimal path. fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,iterations=5000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n".format(fitnessvalue)) fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,iterations=1000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n".format(fitnessvalue)) fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=1000,iterations=5000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n".format(fitnessvalue)) fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=1000,iterations=1000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n\n".format(fitnessvalue)) # ## <span style="color:red; display: block; text-align: center"> Square </span> run() LAYOUT = "square" print(f"Cities generated on a {LAYOUT}") cityi, cityx, cityy = np.loadtxt(f"./10.1/outputs/{LAYOUT}/cityposition.dat",unpack=True) s = [4 for k in range(len(cityi))] fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) # ### Dependancy on $\beta$ increment # This section aims to analyze the dependancy on beta of the fitness level of the best individual. #explore beta values reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,iterations=2000,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = [q.get_fitness() for q in queries] fig, ax = plt.subplots(1,1,figsize=(12,5)) for i,res in enumerate(results): fitness = [j[-1] for j in res] plt.plot(fitness,linewidth=2,label="Beta: "+str(queries[i].get_data()["beta_step"]),zorder=10/(i+1)) plt.title("Fitness trend for different beta increment step") plt.xlabel("Step") plt.ylabel("Fitness") plt.grid(True) plt.legend() plt.show() print("\n") betas = sorted([[j[1] for j in res] for res in results]) fig, ax = plt.subplots(1,len(betas),figsize=(14,6)) plt.suptitle("Beta values for different beta increment step") for index, beta in enumerate(betas): ax[index-1].set_title("Beta: "+str(queries[index-1].get_data()["beta_step"])) ax[index-1].plot(betas[index-1],linewidth=2) ax[index-1].set_xlabel("Step") ax[index-1].set_ylabel("Beta") ax[index-1].grid(True) plt.show() print("") # ### Dependancy on number of iterations # The number of iterations is the number of times a new path is searched for a fixed value of $\beta$. The expectation for this dependancy is a faster convergence of the fitness value, because more possible paths are discovered in a shorter amount of time, therefore finding the optimal path in an early iteration reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,metric="L2",beta_step=1.01) print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = [q.get_fitness() for q in queries] fig, ax = plt.subplots(1,1,figsize=(12,5)) for i,res in enumerate(results): fitness = [j[-1] for j in res] plt.plot(fitness,linewidth=1,label="Iterations: "+str(queries[i].get_data()["iterations"])) plt.title("Fitness trend for different numbers of iterations") plt.xlabel("Step") plt.ylabel("Fitness") plt.grid(True) plt.legend() plt.show() print("") # ### Dependency on metric reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,iterations=5000,beta_step=1.01) print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = [q.get_fitness() for q in queries] fig, ax = plt.subplots(1,1,figsize=(12,5)) for i,res in enumerate(results): fitness = [j[-1] for j in res] plt.plot(fitness,linewidth=1,label="Metric: "+str(queries[i].get_data()["metric"])) plt.title("Fitness trend for different metrics") plt.xlabel("Step") plt.ylabel("Fitness") plt.grid(True) plt.legend() plt.show() # ## Best paths # In the current section, as for the circular configuration, a graph for the best path for a given configuration. The criteria of selection for the parameters is still the following: # - Fixed beta step of 1.01, which has one of the best convergences as shown above # - Fixed metric L2 # - Variation of ```number of explored temperatures``` and ```number of iterations per population```. # The values for both quantities is 1000 and 5000 for a total of 4 configuration, because the goal is to show that a good balance of those values is necessary for a good convergence (e.g. a very high iteration number and number of temperatures, $1^{st}$ path, overfits the optimal path. fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,iterations=5000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n".format(fitnessvalue)) fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=5000,iterations=1000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n".format(fitnessvalue)) fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=1000,iterations=5000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n".format(fitnessvalue)) fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=1000,iterations=5000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n".format(fitnessvalue)) fig, ax = plt.subplots(figsize=(14,6)) ax.scatter(cityx, cityy,color="blue",s=s) for index, txt in enumerate(cityi): ax.annotate(int(txt), (cityx[index], cityy[index])) reader = utility.DatabaseHandler(layout=LAYOUT,ntemp=1000,iterations=5000,beta_step=1.01,metric="L2") print(reader) queries = reader.search(False) print("Found ",len(queries)," queries") results = queries[0].get_best() fitnessvalue = queries[0].get_fitness()[-1][2] plt.plot([cityx[gene] for gene in results],[cityy[gene] for gene in results]) plt.show() print("Fitness: {}\n\n".format(fitnessvalue)) # ## <span style="color:green"> Results </span> # The fitness value of the best individual found has been explored by tweaking numerous parameters. According to both city arrangements, the better convergence is obtained for a value of $\beta = 0.01$, while a faster convergence is obtained for a value of $\beta = 1.1$. The latter is considered to be too large because quickly converges to a value that is above the best possible one.<br> # Both graphs regarding the different numbers of iterations demostrate that the value ought not to be too small nor too large. In In the first case it is possible that an optimal solution for a given value of $\beta$ cannot be found due to the small amount of iterations, while in the second case a non-optimal solution may be found too early. In the graphs showing the best paths, it is also possible to see that the best fitness values are achieved for mixed values of temperatures and iterations (1000 and 5000 or viceversa), meaning that balanced values should be seeked for these parameters. # # # <span style="color:orange"> Exercise 10.2 </span> # ## <span style="color:green"> Task </span> # Parallelize the Simulated Annealing algorithm with MPI # # ## <span style="color:green"> Solution </span> # + import sys, os sys.path.append("./10.2/") import utils from matplotlib import pyplot as plt basedir = "./10.2/outputs/" ncities = 32 iters = 2000 ntemps = 1000 box = 1 betastep = 1.004 processes = 5 # + fig, ax = plt.subplots(figsize=(12,6)) plt.suptitle("Best fitness value as a function of the total number of processes") mins = list() indexes = { "1" : 0, "2" : 0, "3" : 0, "4" : 0 } indexes = {str(k):0 for (k,v) in zip([i for i in range(processes)],[i for i in range(processes)])} for p in range(2,processes+1): reader = utils.DatabaseHandler(processes=p, layout="circle") queries = reader.search(False) results = queries[0].get_result("FITNESS") print(p," ",results) at = results.index(min(results)) indexes[str(at)]+=1 plt.bar(p,min(results),label=f"Min at process {at}") plt.xlabel("Number of processes") plt.ylabel("Best fitness") mins.append(min(results)) plt.legend() plt.show() # - fig, ax = plt.subplots(figsize=(12,6)) plt.suptitle("Execution time as a function of the total number of processes") mins = list() for p in range(2,processes+1): reader = utils.DatabaseHandler(processes=p, layout="circle") queries = reader.search(False) results = queries[0].get_result("TIME") print(p," ",results) at = results.index(min(results)) plt.bar(p,min(results), width=0.9) plt.xticks([i for i in range(2,processes+1)]) plt.xlabel("Number of processes") plt.ylabel("Best fitness") mins.append(min(results)) plt.show() sizes = list(indexes.values()) labels = indexes.keys() fig1, ax1 = plt.subplots(figsize=(10,6)) plt.suptitle("Frequency of process that minimizes fitness value") explode = [0.0 for i in range(processes)] ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.legend() plt.show()
es10/.ipynb_checkpoints/Exercise10-checkpoint.ipynb