text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
import seaborn as sns
import numpy as np
import json
from pprint import pprint
import matplotlib.pyplot as plt
def read_performances(path_prefix, run_dirs, filename, num_goals):
all_performances = []
for run_dir in run_dirs:
with open(path_prefix + run_dir + filename, "r") as performance_file:
temp_performances = json.load(performance_file)
all_performances.extend(temp_performances[num_goals]["success_rate"])
return all_performances
run_dirs = ["run_1/", "run_2/", "run_3/", "run_4/", "run_5/"]
pretrain_filename = "testing_records_pretrain.json"
scratch_filename = "testing_records_scratch.json"
```
# Testing Performances over the user goal portions
## Testing Records for 5 User Goals
```
path_prefix_5_goals = "../5_goals/50_episodes_1_9/"
testing_records_pretrain_5_goals = read_performances(path_prefix_5_goals, run_dirs, pretrain_filename, "5")
testing_records_scratch_5_goals = read_performances(path_prefix_5_goals, run_dirs, scratch_filename, "5")
pprint(len(testing_records_pretrain_5_goals))
pprint(len(testing_records_scratch_5_goals))
```
## Testing Records for 10 User Goals
```
path_prefix_10_goals = "../10_goals/50_episodes_1_9/"
testing_records_pretrain_10_goals = read_performances(path_prefix_10_goals, run_dirs, pretrain_filename, "10")
testing_records_scratch_10_goals = read_performances(path_prefix_10_goals, run_dirs, scratch_filename, "10")
pprint(len(testing_records_pretrain_10_goals))
pprint(len(testing_records_scratch_10_goals))
```
## Testing Records for 20 User Goals
```
path_prefix_20_goals = "../20_goals/50_episodes_1_9/"
testing_records_pretrain_20_goals = read_performances(path_prefix_20_goals, run_dirs, pretrain_filename, "20")
testing_records_scratch_20_goals = read_performances(path_prefix_20_goals, run_dirs, scratch_filename, "20")
pprint(len(testing_records_pretrain_20_goals))
pprint(len(testing_records_scratch_20_goals))
```
## Testing Records for 30 User Goals
```
path_prefix_30_goals = "../30_goals/50_episodes_1_9/"
testing_records_pretrain_30_goals = read_performances(path_prefix_30_goals, run_dirs, pretrain_filename, "30")
testing_records_scratch_30_goals = read_performances(path_prefix_30_goals, run_dirs, scratch_filename, "30")
pprint(len(testing_records_pretrain_30_goals))
pprint(len(testing_records_scratch_30_goals))
```
## Testing Records for 50 User Goals
```
path_prefix_50_goals = "../50_goals/50_episodes_1_9/"
testing_records_pretrain_50_goals = read_performances(path_prefix_50_goals, run_dirs, pretrain_filename, "50")
testing_records_scratch_50_goals = read_performances(path_prefix_50_goals, run_dirs, scratch_filename, "50")
pprint(len(testing_records_pretrain_50_goals))
pprint(len(testing_records_scratch_50_goals))
```
## Testing Records for 120 User Goals
```
path_prefix_120_goals = "../120_goals/50_episodes_1_9/"
testing_records_pretrain_120_goals = read_performances(path_prefix_120_goals, run_dirs, pretrain_filename, "120")
testing_records_scratch_120_goals = read_performances(path_prefix_120_goals, run_dirs, scratch_filename, "120")
pprint(len(testing_records_pretrain_120_goals))
pprint(len(testing_records_scratch_120_goals))
testing_records_pretrain = np.array([[0]*189,
testing_records_pretrain_5_goals[0:189],
testing_records_pretrain_10_goals[0:189],
testing_records_pretrain_20_goals[0:189],
testing_records_pretrain_30_goals[0:189],
testing_records_pretrain_50_goals[0:189],
testing_records_pretrain_120_goals[0:189]])
pprint(testing_records_pretrain.T)
testing_records_scratch = np.array([[0]*189,
testing_records_scratch_5_goals[0:189],
testing_records_scratch_10_goals[0:189],
testing_records_scratch_20_goals[0:189],
testing_records_scratch_30_goals[0:189],
testing_records_scratch_50_goals[0:189],
testing_records_scratch_120_goals[0:189]])
pprint(testing_records_scratch.T)
plt.figure(figsize=(15, 9))
sns.set(font_scale=3)
sns.set_style("whitegrid")
data = testing_records_pretrain.T
ax = sns.tsplot(data=data, ci=[90], color='blue', marker='o', markersize=15, linewidth=3.0, legend=True, condition="transfer learning")
data = testing_records_scratch.T
ax = sns.tsplot(data=data, ci=[90], color='red', marker='v', markersize=15, linewidth=3.0, legend=True, condition ="no transfer learning")
pts_seq = [0, 5, 10, 20, 30, 50, 120]
ax.set(xticklabels=pts_seq)
ax.set_xlabel('Number of training user goals', weight='bold', size=35)
ax.set_ylabel('Success Rate', weight='bold', size=35)
sns.plt.title('Testing Performance Over User Goals', weight='bold', size=35)
plt.setp(ax.get_legend().get_texts(), fontsize="35")
plt.savefig("testing_over_user_goal_portions_50_episodes_200_repetitions_95_confidence_color.png", dpi=200, bbox_inches="tight", pad_inches=0)
plt.show()
```
| github_jupyter |
# Exploration: Linear Regression and Classification
A fundamental component of mastering data science concepts is applying and practicing them. This exploratory notebook is designed to provide you with a semi-directed space to do just that with the Python, linear regression, and ML-based classification skills that you either covered in an in-person workshop or through Microsoft Learn. The specific examples in this notebook apply NumPy and pandas concepts in a life-sciences context, but they are applicable across disciplines and industry verticals.
This notebook is divided into different stages of exploration. Initial suggestions for exploration are more structured than later ones and can provide some additional concepts and skills for tackling data-science challenges with real-world data. However, this notebook is designed to provide you with a launchpad for your personal experimentation with data science, so feel free to add cells and running your own experiments beyond those suggested here. That is the power and the purpose of a platform like Jupyter Notebook!
## Setup and Refresher on Notebooks
Before we begin, you will need to important the principal libraries used to explore and manipulate data in Python: NumPy, pandas, and scikit-learn. The cell below also imports Matplotlib, the main visualization library in Python. For simplicity and consistency with prior instruction, industry-standard aliases are applied to these imported libraries. The cell below also runs `%matplotlib inline` magic command, which instructs Jupyter to display Matplotlib output directly in the notebook. This cell also imports many of the specific functions from scikit-learn that you will need, but feel free to import others as you see fit in the course of your exploration.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
from sklearn import metrics
from sklearn.metrics import r2_score
from sklearn.datasets import load_iris
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
```
As it might have been a while since you last worked with Jupyter notebooks, here is a quick refresher on efficiently using them.
### Notebook Cells
Notebook cells are divided into Markdown text cells and interactive code cells. You can easily recognize code cells by the `[-]:` to the left of them.
Code in a code cells has only been executed -- and is thus available for use in other code cells in the notebook -- if there is a number beside the code cell (for example, `[1]:`).
To run the code in a cell, you can click the **Run** icon at the top left of a code cell or press **`Ctrl` + `Enter`**.
### Instructor's Note
Open-ended questions for students and groups in this notebook are followed by italicized explanations in your copy of this notebook to more easily help you guide group discussions or answer student questions.
### Documentation and Help
Documentation for Python objects and functions is available directly in Jupyter notebooks. In order to access the documentation, simply put a question mark in front of the object or function in a code cell and execute the cell (for example, `?print`). A window containing the documentation will then open at the bottom of the notebook.
On to exploration!
## Section 1: Guided Exploration
For the first part of this workshop, you will step into the role of a data scientist examining some raw biological statistics. The dataset provided is in the `mammals.csv` file, which documents the body weight (in kilograms) and the brain weight (in grams) of 62 mammals. (Source: Rogel-Salazar, Jesus (2015): Mammals Dataset. figshare. Dataset. https://doi.org/10.6084/m9.figshare.1565651.v1. Drawn from Allison, T. and Cicchetti, D. V. (1976). Sleep in mammals: ecological and constitutional correlates. *Science, 194*, 732–734.)
Specifically, your task is to evaluate the relationship between mammalian body weight and brain weight. Even without domain expertise, it seems logical that some relationship should exist (afterall, it seems safe to assume that larger animals would have proporitionally larger brains). But what is the exact relationship? Any strong is that relationship. Determining those details is useful in fields such as evolutionary biology and doing so will be your job today.
### Import and Investigate the Data
Use `pd.read_csv()` to import `mammals.csv` and perform any other initial investigation you feel necessary in order to become familiar with the dataset. (For a refresher on importing data into pandas, see the Reactors modules on Manipulating and Cleaning Data or pandas or refer to the [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html).)
```
# Import the data from mammals.csv into a DataFrame.
mammals = pd.read_csv('Data/mammals.csv')
# mammals = pd.read_csv('https://raw.githubusercontent.com/microsoft/Reactors/main/workshop-resources/data-science-and-machine-learning/Data_Science_2/bioscience-project/Data/mammals.csv')
mammals.head()
```
### Plot the Data
Often the best way to get a sense of your data is to do so visually. Because you have to numerical features, a scatter plot would be most appropriate for this dataset. pandas DataFrames have [two](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html) [methods](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.scatter.html) that can be used to create scatter plots.
```
# Create a scatterplot of mammalian body and brain masses.
# Students might also opt to use the following, alternative code:
# mammals.plot(x='body', y='brain', kind='scatter');
mammals.plot.scatter(x='body', y='brain');
```
Don't worry: your scatter plot should be hard to read. Most of the values are clustered at the tiny end with two very large mammals in particular (the Asian and African elephants) skewing the scale.
### Transform Your Data
Because of the decidedly non-linear dispersion of mammalian size, you will need to transform you data in order to more clearly see the relationships in it.
**Group or Partner Discussion**
- Which feature in the dataset should you transform? Should you transform both? In either case, why? What do you predict you might see after the transformation?
- *Because both body weight and brain weight exhibit the same expoential growth in the dataset, students will need to transform both features.*
- What transformation do you think you should use? What key words do you see in the documentation for the pandas plot method that might give a hint?
- *Because the increase in body weight and brain weight of the mammals in this dataset is exponential, students should take the logarithms of both features. And while any base logarithm will do, interpretation will likely be easier for students if they use base-10 logarithms.*
```
# Create a scatterplot of mammalian body and brain masses with base-10 logarithmic transformations applied to both axes.
# Students might also opt to use the following, alternative code:
# mammals.plot(x='body', y='brain', kind='scatter', logx=True, logy=True);
# mammals.plot.scatter(x='body', y='brain', logx=True, logy=True);
mammals.plot.scatter(x='body', y='brain', loglog=True);
```
### Fit and Plot a Linear Regression on Your Data
Transformed, the data presents an elegant linear relationship; just looking at the scatterplot, the line practically draws itself. But how tight is that linear relationship? Put another way, how far away from the line of best fit are the points of your dataset on average from the line of best fit? Fit and plot a simple linear regression model for the data to find out. If you are unsure about how to do this, refer to the Reactors module on Machine Learning Models for a reminder. You will need to perform the following steps to do fit the model:
1. Split your dataset into predictor variable (`X` is a common name of this variable) and the respons variable (`y` is a common variable name for this). (Remember to transform your data at this stage in the same way that you transformed it when you plotted it.)
2. Further divide your into training and test subsets. (There is a scikit-learn function for this.)
3. Create the linear regression model object.
4. Fit the model to the training data.
**Note:** You will get an error when you try to fit the data. Refer to https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.to_numpy.html and to https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.reshape.html for documenation on how to reshape the your data.
```
# You will need to reshape and transform your predictor (body) in order to fit your model.
X = np.log(mammals['body']).to_numpy().reshape(-1,1)
# Use brain as your response.
y = np.log(mammals['brain'])
# Split the predictor and response into training and test subsets.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Create a linear-regression object.
reg = LinearRegression()
# Fit the linear-regression model on the training data.
reg.fit(X_train, y_train)
```
Now plot your model with your data. Run [`matplotlib.pyplot.plot`](https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html) along with your transformed scatter plot of the data to do so. (Using a different color for the model will help you see it more clearly.)
**Group or Partner Discussion**
- Does your model plot as you expected it to? Does your linear model show up as a line? Remember that are modeling log-transformed data; what complementary transformations do you need to run on it to produce the linear plot you expect?
- *If students are stuck, you can explain that, becuase they have transformed their $X$ and $y$ values by taking their logarithms, they must plot the predictors and predicitons values as exponents in order to produce a straight line (as shown in the code cell below).*
**Note:** Python has functions that can perform the necessary transformations and the dataset is small enough that you will likely not notice a lag in performance by using the native Python functions. That said, it is a good habit to develop to use the NumPy ufuncs for when you deal with larger datasets. For a reminder on these, refer to the Reactors moduls on NumPy.
```
# Plot the body and brain masses with base-10 logarithmic transformations applied to both axes.
mammals.plot.scatter(x='body', y='brain', logx=True, logy=True);
# Remember to account for the transformation of your data when you plot your model.
plt.plot(np.exp(X), np.exp(reg.predict(X)), color='red');
```
If your transformations were successful, you should see a gratifyingly tight line with your points of data closely clustered about it. But looks can be subjective. How good is relation in reality? Us the [$R^2$ score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html) to find out.
```
# Print out the R-squared value for you model.
r2_score(y_test, reg.predict(X_test))
```
## Section 2: Intermediate Exploration
As your investigation showed, there is a profound relationship between the size of mammals and the size of their brains: your $R^2$ score indicates that almost 88% of the proportional, average weight of a mammal's brain can be explained by its body weight alone. (The task of evolutionary biologists is to then examine what accounts for the other 12% of the variance.) A good model, to be sure, but in practice, how good is 'good'? Plot the linear model against the untransformed data to see this more clearly.
To create this plot, you will again use the pandas `plot` (or `plot.scatter`) method in conjunction with `matplotlib.pyplot.plot`. However, you will need to create Numpy array to supply inputs along the x-axis for `matplotlib.pyplot.plot`. (See the Reactors Numpy module for a refresher on how to do that.) You will also need to transform that array when you input it to you model and further transform the output of your model to plot it accurately.
**Group or Partner Discussion**
- What errors are you getting? Do you need to reshape any of the data? Are zero values causing trouble for any of your transformations? How should you best deal with those?
- *Students will likely encounter this error when they attempt this visualization: `ValueError: Expected 2D array, got 1D array instead`. In order to avoid this, they will need to use the `reshape(-1,1)` method as shown in the code cell below.*
```
# Plot the body and brain masses without the log transformations.
mammals.plot.scatter(x='body', y='brain')
# Create an array of x-axis values to use in plotting the model.
x_array = np.arange(1, max(mammals['body']))
# Remember in plotting this that your model was fitted using transformed data.
plt.plot(x_array, np.exp(reg.predict(np.log(x_array).reshape(-1,1))), color='red');
# Mathematically inclined students might also come up with this alternative code:
# plt.plot(x_array, np.exp(np.add(np.multiply(np.log(x_array), reg.coef_), reg.intercept_)), color='red');
```
Even with a good model, the noise inherent in real data can means that experimental values still diverge from your predicted ones by more than 20% in some cases. For this reason, it can be valuable to remember the maxim of statistician George Box that "[all models are wrong, but some are useful](https://en.wikipedia.org/wiki/All_models_are_wrong)."
**Note:** Another way to approach this challenge is to deal with the model coefficient and intercept directly. Recall that linear models take the form of `Y`$ = $`intercept`$ + $`coefficient`$ * $`x`. Check the documentation for your model object to see how to access those values in your model.
```
# Print out the coefficient and intercept for the model.
print(reg.coef_, reg.intercept_)
```
### When Linear Regression is Less Helpful
As you develop your data-science skills, it is natural to want to apply the new tools that you learn to use on a variety of problems. Thus when learning about new algorithms, it can sometimes be as valuable to learn about when not to apply them as when to use them.
For example, consider a case where linear regression might not provide the insight that you would like. To investigate this, import and plot the `lynx.csv` dataset, which contains annual numbers of lynx trappings for 1821–1934 in the Mackenzie River area of Canada. (Source: Campbell, M. J. and Walker, A. M. (1977). A Survey of statistical work on the Mackenzie River series of annual Canadian lynx trappings for the years 1821–1934 and a new analysis. *Journal of the Royal Statistical Society series A, 140*, 411–431. doi: [10.2307/2345277](http://doi.org/10.2307/2345277).)
```
# Import the data from lynx.csv and plot it.
# Note: The plot of this data should produce a cyclical pattern.
lynx = pd.read_csv('Data/lynx.csv')
lynx = pd.read_csv('https://raw.githubusercontent.com/microsoft/Reactors/main/workshop-resources/data-science-and-machine-learning/Data_Science_2/bioscience-project/Data/lynx.csv')
lynx.plot(x="Year", y="Lynx");
```
Your plot should show and interesting cyclical pattern to the data. A high peak in lynx numbers is followed by three smaller peaks every 9-10 years and then the pattern repeats itself. Ample food supply enables Mackenzie River lynx to reproduce to high numbers, after which the population plummets due to lack of food. The food supply gradually builds back up, enabling a repeat of the boom-and-bust population growth of the lynx.
**Group or Partner Discussion**
- Is a linear model appropriate for data like this? Why or why not? What do you suspect you might see if you attempt to fit a linear model to this data?
- *To an extent, the answer to this question rest on defining what is appropriate. If the goal is to predict future lynx numbers, the following exercise will illustrate why a linear model is inappropriate for this task. If the goals is evaluate the overall trend in the lynx population, a linear model can be appropriate.*
Go ahead and fit and plot this data as you did for the mammals dataset above.
```
# Plot the lynx data and the fitted linear model.
# Remember to reshape your data in order to fit the regression model.
X = lynx['Year'].to_numpy().reshape(-1,1)
y = lynx['Lynx']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
lr = LinearRegression()
lr.fit(X_train,y_train)
lynx.plot(x="Year", y="Lynx")
plt.plot(X, lr.predict(X), color='red');
```
Now check the $R^2$ score for this model.
```
# Print out the R-squared score for this model.
r2_score(y_test, reg.predict(X_test))
```
**Group or Partner Discussion**
- What does this $R^2$ score mean? What interpretation might it carry? (This [essay](https://web.maths.unsw.edu.au/~adelle/Garvan/Assays/GoodnessOfFit.html) from the University of New South Wales might help your discussion.)
- *The $R^2$ score quantifies the proportion of variance explained by a model. For this reason, a model that just predicted the mean value of the training set regardless of the input would a have an $R^2$ score of zero: it encapsulates none of the variance in the linear relationship being modeled.*
- *In the example above, the negative $R^2$ score means that the model does a worse job than just predicting the mean value. In other words, while the linear model does show an up trend in the number of lynx, year-to-year it does a worse job of predicting how many lynx there will be than just predicting the yearly average.*
- Even if your $R^2$ score cannot explain the proportion of variance explained by your model, what information might it nonetheless provide?
- *Even though the linear model is actually worse at prediction than just drawing a horizontal line at the mean, the slope of the model gives an indication of a rising trend in the lynx population.*
### Classification
Some practitioners say that 70% of problems in data science involve classificaiton. This figure is probably higher still in life sciences. In order to explore ML-based classification, let's return to a dataset you already encountered in the Reactors Manipulating and Cleaning Data module. Import the Python scikit-learn library and use an iconic dataset that every data scientist has seen hundreds of times: British biologist Ronald Fisher's *Iris* data set used in his 1936 paper "The use of multiple measurements in taxonomic problems."
You have already imported the scikit-learn library containing the `iris` dataset; you can access it using the `load_iris()` function. (Look at the `?load_iris` documentation for more information about this function; the data and target information is stored separately.) You might also find it helpful to create a DataFrame with the iris information in order to investigate it. (Check the Reactors pandas module for a refresher on how to load data into DataFrames.)
```
# Load the Fisher iris data.
iris = load_iris()
X = iris.data
y = iris.target
# Now create a DataFrame of the data.
iris_df = pd.DataFrame(data=iris['data'], columns=iris['feature_names'])
```
In the Reactors Machine Learning module, you used the logistic regression and decision tree algorithms to classify observations into two categories, but there are many other kinds of classification algorithms that you will explore in this module.
#### $K$-means Clustering
$K$-means clustering is an example of unsupervised machine learning. Rather than having to train a model, the $k$-means algorithm examines all of the data to make a determination of which category to assign a particular observation. All that you have to do is supply the algorithm with the number of categories into which you want observations classified.
Based on the number of species in the `iris` dataset, what is the most appropriate number of clusters to submit to the algorithm? Fit a $k$-means model for that number of clusters and measure its accuracy. (Consult the ?metrics.accuracy_score documentation for information on how to do this.)
```
# Create a k-means clustering object with 3 clusters.
kmeans = KMeans(n_clusters=3, random_state=0)
# Fit the k-means model.
kmeans = kmeans.fit(X)
# Find predicted values for all values of X.
y_pred = kmeans.predict(X)
# Print out the accuracy score for the fitted model.
metrics.accuracy_score(y, y_pred)
```
**Group or Partner Discussion**
- Is this accuracy surprising? Try different values for the `random_state` parameter in the `KMeans` function (such as 0, 1, 2). Why the large disparities in accuracy based on the random state of the algorithm? (In the Individual Exploration section below, you can explore some of the structure of the `iris` dataset that can help generate these disparities.)
- *The $k$-means algorithm can converge on different centroids the clusters depending on the initial centroid seeds. The disparities in accuracy for different `random_state` parameters reflects that some clusters are more accurate than others for classification.*
#### $K$-nearest Neighbors
A classification algorithm that might work better on the `iris` dataset is the $k$-nearest neighbors algorithm (abbreviated $k$-NN). It works by comparing an observation to its $k$ nearest training observations in feature space (where $k$ is a parameter supplied by the user). $k$-NN is a supervised algorithm, so it does need to be supplied with a *response*--the correct classifications that you are looking for (often referred to as $y$)--in order to classify new observations. Fit a $k$-NN model and use `metrics.accuracy_score` to examine its accuracy.
```
# Split the data into training and test subsets.
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Create a 5-nearest neighbors object.
knn = KNeighborsClassifier(n_neighbors=5)
# Fit the model.
knn.fit(X_train, y_train)
# Find predicted values for all values of X.
y_pred = knn.predict(X_test)
# Print out the accuracy score for the fitted model.
metrics.accuracy_score(y_test, y_pred)
```
**Group or Partner Discussion**
- Try supplying different `random_state` parameters to the `train_test_split` function (such as `random_state=0` and `random_state=2`). What causes the differences in accuracy?
- *The `random_state` parameter determines on which subset of data points will be used to train the $k$-NN model. Some students might quickly grasp that the $k$-NN algorithm does not necessarily need a fitting step: it can consider the entirety of the data set to classify data points. This is true but can be enormously expensive computationally speaking. For this reason, actual implementations of $k$-NN ofter use data structures such as [$k$-d trees](https://en.wikipedia.org/wiki/K-d_tree) and [ball trees](https://en.wikipedia.org/wiki/Ball_tree) to speed up classification; these data structures can be fitted once and used again, which is what happens with the `fit` method.*
- *Interested students or groups can also refer to this post on Stack Exchange for more information: https://stats.stackexchange.com/questions/349842/why-do-we-need-to-fit-a-k-nearest-neighbors-classifier*
One way to mitigate the luck of the draw inherent in training/test splitting is to do so repeatedly. Testing your models in this way is called cross validation (or $k$-fold cross validation after the number of times you resplit the data, the folds). Scikit-learn has a [good page](https://scikit-learn.org/stable/modules/cross_validation.html) in its documentation on the concept.
Use the `cross_val_score` model to perform a 10-fold cross validation on your $k$-NN model and take the mean of the accuracy scores.
```
# Run 10-fold cross-validation on the 5-nearest neighbor model you have already fitted.
scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy')
# Print out the mean score for the cross-validation.
scores.mean()
```
## Section 3: Individual Exploration
Here are some ideas to continue your exploration of classification and predictive ML algorithms:
- What number of nearest neighbors provides the highest average accuracy for your $k$-NN model? While it's true that with the $k$-NN algorithm you don't have to worry about feature engineering or selection, the selection of $k$ can play a big role in the algorithm's accuracy for you dataset and must be tuned. The better that you can tune your algorithm for accuracy, the better you can help domain experts solve problems, be they correctly identifying irises (in this example) or sequencing genes in bioinformatics or correctly identifying diseased cells in a medical application.
(Hint: Try referring back to the Reactors Python module for a refresher on loops and data structures to see how you could automate this comparison over many several different values of $k$.)
```
# Create a list of values of k from 1 to 25.
k_range = list(range(1, 26))
# Create a dictionary of k values and associated mean cross-validation accuracy for each value of k.
k_dict = {}
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy')
k_dict[k] = scores.mean()
# Find and print out the value of k that produced the highest accuracy and what that score is.
score_max = max(k_dict, key=k_dict.get)
print(score_max, k_dict[score_max])
```
- Visualize the `iris` dataset to see why the $k$-means algorithm produced the accuracy that it did.
The `iris` dataset has four features, which means that a true scatter plot of all of the observations in it would require four dimensions, which is impossible to visualize directly. However, you can use a technique called principle component analysis (PCA) to reduce this to three dimensions with minimal loss of information. (Don't worry about the details of how PCA works; you will cover it in another session in the Reactors PCA module.)
To visualized a "flattened" `iris` dataset, you will need to fit a PCA transformation with the data from the dataset. You can get some ideas about what code to use from this [page](https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html) from the scikit-learn documentation.
```
# Perform the PCA dimension-reduction on the iris data.
X_reduced = PCA(n_components=3).fit_transform(iris.data)
# Create the 3D plot of the "flattened" iris data set.
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y, cmap='viridis', edgecolor='k', s=40)
```
With this visualization in hand, why might $k$-means clustering not be a good classification algorithm to use with the `iris` dataset? (This [Wikipedia article](https://en.wikipedia.org/wiki/K-means_clustering) on $k$-means clustering might help your discussion.)
- *In this lower-dimensional projection of the data, the three types of iris appear in parallel bands, which also play a role with the $k$-means algorithm being able to cleanly draw boundaries between them in higher dimensions.*
| github_jupyter |
```
import pandas as pd
from collections import Counter
from langdetect import detect
import langdetect
import numpy as np
import importlib
current_dir = os.getcwd()
%cd ..
import textmining.text_miner
import textmining.topic_modeler as tm
importlib.reload(textmining.text_miner)
importlib.reload(textmining.topic_modeler)
os.chdir(current_dir)
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
```
# Mallet LDA train
```
data = pd.read_csv('../../data/processed/final_repo_english_whatwhy.csv', index_col='file_id')
stop_words_extension = ['abstr_hyperlink', 'https', 'abstr_number', 'abstr_image',
'abstr_mailto', 'abstr_code_section', 'abstr_image', 'abstr_number', 'http', 'www', 'please']
mallet_lda_modeler = tm.topic_modeler(data['content_clean'].values.tolist(),
stopwords_extension= stop_words_extension)
len(data)
```
## Plotting cohenrence for each K topics
```
start= 40#125## 85 # 45
step=1
limit=146 # not included
_, coherence_values = mallet_lda_modeler.compute_coherence_values(limit, start=start, step=step, model_type='mallet')
## Saving results
n = mallet_lda_modeler.num_topics
print('best model has {} topics'.format(n))
coherence_vals_df = pd.DataFrame({'Number of Topics':range(start, limit, step), 'Coherence Value':coherence_values} )
coherence_vals_df.to_csv('../../data/feature_extraction/lda/ldamallet_coherence_values_{}_{}.csv'.format(start, (limit-1)))
def save_model_info(modeler):
n = modeler.num_topics
print('get_doc_dominant_topic')
nmf_readable_document_topics_df = modeler.get_doc_dominant_topic(save_path= "../../data/feature_extraction/lda/ldamallet_readable_document_topics_{}.csv".format(n))
print('get_topics_terms')
nmf_topic_words_df = modeler.get_topics_terms(save_path= "../../data/feature_extraction/lda/ldamallet_topic_words_{}.csv".format(n))
print('get_doc_topic_matrix')
nmf_document_topics_df = modeler.get_doc_topic_matrix(save_path= "../../data/feature_extraction/lda/ldamallet_document_topics_{}.csv".format(n))
return nmf_readable_document_topics_df, nmf_topic_words_df, nmf_document_topics_df
lda_readable_document_topics_df, lda_topic_words_df, lda_document_topics_df = save_model_info(mallet_lda_modeler)
```
## Topics Exploration
#### Topics Keywords
```
topics_with_perc_df = pd.DataFrame(mallet_lda_modeler.model.print_topics(num_topics=40, num_words=10),
columns=['Topic', 'Keywords'])
#topics_with_perc_df.reset_index().to_csv('../../data/feature_extraction/lda_topic_words_45.csv')
mallet_lda_modeler.model.show_topics(num_topics=45, num_words=10, formatted=False)
```
#### Docs dominant Topics
#### Topic Distribution - See below (Wordlcloud for topic top words)
```
topic_dstr = mallet_lda_modeler.get_topic_distr()
topic_dstr['Dominant_Topic'].value_counts().plot(kind='bar')
topic_dstr.to_csv('../../data/feature_extraction/lda/ldamallet_topics_distribution_{}.csv'.format(n))
```
#### Word Cloud for each Topic
```
## word cloud
from nltk.corpus import stopwords
from matplotlib import pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import matplotlib.colors as mcolors
def show_word_cloud(model, topics, limit=10, stopwords_extension=[], topic_index=range(1,11)):
# 1. Wordcloud of Top N words in each topic
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'
stop_words = stopwords.words('english')
stop_words.extend(stopwords_extension)
cloud = WordCloud(stopwords=stop_words,
background_color='white',
width=2500,
height=1800,
max_words=10,
colormap='tab10',
color_func=lambda *args, **kwargs: cols[i],
prefer_horizontal=1.0)
fig, axes = plt.subplots( int(limit/2), 2, figsize=(10,10), sharex='all', sharey='all')
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words = dict(topics[i][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(topic_index[i]), fontdict=dict(size=16))
plt.gca().axis('off')
if i> limit:
break
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
plt.show()
topics = LDA_MODEL.show_topics(num_topics=35, num_words=10, formatted=False)
show_word_cloud(LDA_MODEL, topics[:10], limit=10, stopwords_extension=stop, topic_index=range(0,10))
show_word_cloud(LDA_MODEL, topics[10:20], limit=10, stopwords_extension=stop, topic_index=range(10,20))
show_word_cloud(LDA_MODEL, topics[20:30], limit=10, stopwords_extension=stop, topic_index=range(20, 30))
show_word_cloud(LDA_MODEL, topics[30:35], limit=5, stopwords_extension=stop, topic_index=range(20, 35))
```
| github_jupyter |
# Name Classifier
http://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html
```
import glob
import unicodedata
import string
def findFiles(path): return glob.glob(path)
print(findFiles('data/names/*.txt'))
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicodeToAscii('Ślusàrski'))
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
for filename in findFiles('data/names/*.txt'):
category = filename.split('/')[-1].split('.')[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
print(n_categories)
import torch
# Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
print(letterToTensor('J'))
print(lineToTensor('Jones').size())
import torch.nn as nn
from torch.autograd import Variable
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
input = Variable(lineToTensor('Albert'))
hidden = Variable(torch.zeros(1, n_hidden))
output, next_hidden = rnn(input[0], hidden)
print(output)
def categoryFromOutput(output):
top_n, top_i = output.data.topk(1) # Tensor out of Variable with .data
category_i = top_i[0][0]
return all_categories[category_i], category_i
print(categoryFromOutput(output))
import random
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = Variable(torch.LongTensor([all_categories.index(category)]))
line_tensor = Variable(lineToTensor(line))
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
```
## Training
```
criterion = nn.NLLLoss()
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss.data[0]
import time
import math
n_iters = 100000
print_every = 5000
plot_every = 1000
# Keep track of losses for plotting
current_loss = 0
all_losses = []
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
guess, guess_i = categoryFromOutput(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.figure()
plt.plot(all_losses)
plt.show()
```
## Evaluating the Results
```
# Keep track of correct guesses in a confusion matrix
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
# Go through a bunch of examples and record which are correctly guessed
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomTrainingExample()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
confusion[category_i][guess_i] += 1
# Normalize by dividing every row by its sum
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
# Set up plot
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()
```
| github_jupyter |
# Tensorflowing (small stream)
```
%matplotlib inline
import tensorflow as tf
from skimage import data
from matplotlib import pyplot as plt
import numpy as np
# create a tf Tensor that holds 100 values evenly spaced from -3 to 3
x = tf.linspace(-3.0, 3.0, 100)
print(x)
# create a graph (holds the theory of the computation)
g = tf.get_default_graph()
[op.name for op in g.get_operations()]
sess = tf.Session()
computed_x = sess.run(x)
print(computed_x)
sess.close()
```
# Vermessung der Welt
```
mean = 0
sigma = 1.0
# I do not understand this formula, it is just copied... :/
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
sess = tf.Session()
graph = sess.run(z)
plt.plot(graph)
# what exactly is this? the shape dimensions of the gaussian curve
ksize = z.get_shape().as_list()[0]
ksize
# essentially it seems we're just multiplying two gaussians, but don't ask me what's going on
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
# run the session with the new operations (graph?) fed to it
graph_2d = sess.run(z_2d)
# display the 2D gaussian as an image
plt.imshow(graph_2d)
```
# Creating a Collection
## at first, a detour
http://sipi.usc.edu/database/database.php/n/database.php?volume=misc
```
dir(data)
# or do the tab trick
# get a list of all the actual imgs available as attributes
img_list = [i for i in dir(data) if not i.startswith("_")]
non_imgs = ['use_plugin', 'deprecated', 'binary_blobs', 'data_dir', 'imread', 'load', 'lena']
for ni in non_imgs:
img_list.remove(ni)
img_list
# a horrible way of getting (not quite) what I wanted :D
# haha, oh well...
for i in img_list:
img = eval("data." + i + "().astype(np.float32)")
plt.imshow(img, cmap="gray")
```
okay, that's the end of the detour. :) haha, I just did things one shouldn't do. back to normality.
## now the real thing
```
img = data.moon().astype(np.float32)
plt.imshow(img, cmap="gray")
img.shape
# seems we need a 4D tensor for fun times, so let's do it
img_4d = tf.reshape(img, [1, img.shape[0], img.shape[1], 1])
img_4d.get_shape()
# reshaping the kernel works somewhat differently: [Kh, Kw, C, NK]
kernel_height, kernel_width = ksize, ksize
channels, num_kernels = 1, 1
z_4d = tf.reshape(z_2d, [kernel_height, kernel_width, channels, num_kernels])
print(z_4d.get_shape().as_list())
# now we're doing some convolution stuff
convolved = tf.nn.conv2d(img_4d, z_4d, strides=[1, 1, 1, 1], padding="SAME")
res_4d = sess.run(convolved)
print(res_4d.shape)
# matplotlib can't visualize 4d images, so we need to convert it back to original height and width
plt.imshow(np.squeeze(res_4d), cmap="gray")
```
^ it seems that I surprised the moon! : o
| github_jupyter |
# Configuring Sonnet's BatchNorm Module
This colab walks you through Sonnet's BatchNorm module's different modes of operation.
The module's behaviour is determined by three main parameters: One constructor argument (```update_ops_collection```) and two arguments that are passed to the graph builder (```is_training``` and ```test_local_stats```).
```python
bn = BatchNorm(update_ops_collection)
bn(inputs, is_training, test_local_stats)
```
The following diagram visualizes how different parameter settings lead to different modes of operation. Bold arrows mark the current default values of the arguments.
```
#@title Decision tree
%%svg
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink" width="971px" height="384px" version="1.1" content="<mxfile userAgent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36" version="6.7.3" editor="www.draw.io"><diagram name="Page-1">7Vptc5s4EP41fLQHkMHxx9pNrh96mZtJb3r3ySODDLoKxAnhl/76roR4C7bHl5CEXOuZTORHy2q1++yuZNtCq+Twm8BZ/DsPCbNcOzxY6KPlwstz4Z9CjiUyd+wSiAQNS8hpgAf6nRiwEitoSPKOoOScSZp1wYCnKQlkB8NC8H1XbMtZd9UMR6QHPASY9dGvNJRxid64foN/IjSKq5Udf1HObHDwLRK8SM16lou2+lVOJ7jSZTaaxzjk+xaEbi20EpzLcpQcVoQp31ZuK5+7OzNb2y1IKq95wDNmyGO1dW08UdO2hZaxTBgMHRiSA5V/KXjqmXd/m5l/iJRHE0RcSA4QFzLmEU8x+8x5VmkIwclmpUbgtkGXQSF2em0lnkvBv9X+R4CUxio1Z7droJwXIjBShosSi4gYKVS7GihMeEKkOIKIIAxLuutqx4ZLUS3X+BMGxqWn3TsvVewwK4zSLwJGj30uyUF2nS1ITr/jjRZQccg4TaU2xFta3kdAMKNRCkAAWycCgB0RkgKFP5iJhIahdirDG8KWNTFXnHGh162oiZZbnso7nFCm8nUFvqOg0rXvyd5MVmEzPlFrkcOpVDQ2NwxvB8c77XajaGJPnTlC5WPHjvKrA2OU/6Hc1RLh220OsX8cudqGq4Lp9+L2Hwn9gqk1WGbMXikzbnqZcYdZ/t5S41nZ4F/MBkgGuwrQKOjv9iJmuT5TwckExM2P1JDmaykwTWkaVbOgtyXQzyHGoKurYOxjKslDhjU793Cw6Mb9rK97JD/rUxfZnepSRWXf9HjHNljc6u+e/XzCO05/66MpH0O1WtQvKI59OiSDVxSnT9AHKTQR31VNGa7dOpezAfqt20mHyZjKjYNGnC+DZcfitZJj1kuOe57+VO3WOXPor/uta89nnXwY0+ETXdN9KyiAC3mFFVmIJVnzLF8HnDHwEuVpqze3ZUfYsmezbst25id69uKlevZ8xDWo27OfXpVmJ3r2mYva8GVp0aP1T3cNKEl28VaMZotOEoypT9efZI0/R55+rj2VI2eub4PnSKXj14dIbcJdujePKDv6x64r27YkuVwzDqFY5xJD1N5Nx57M37Bj9/tJ5aGCPfYZox0n+f8WvPT8QU5MYnwACUa2spmtlNxzkYAQVCDX5oXMCkWdItfXTVvHzVLfBsgg1osAE3NIrHYY1fIDGvSnPufBdMJ3pRUYwogjkiv3pKroYW3Mlos9FqGOaX7ZIADbbntEqqZuO69CLM/vEgvZfWLV3yC1ieUPcRS0fzEL7hAlf1QrjM9xDas6ZGcMmBBW1MPwl+JEA80tZGqpWl5auKlW+xKTY60l5Wrz5ECCQuqHVbNPsG5R7Dht7XlzgrtjJzSyb55G6PpDymcx2hsFo3PJhQ5tj0vTl6P0PVcWdHldMi4QBMDw8trjopHnvWFZ9EdBorcoi/8rDk1c+/WaK7xtfuFQHqqbn5Gg2x8=</diagram></mxfile>" style="background-color: rgb(255, 255, 255);">
<defs/>
<g transform="translate(0.5,0.5)">
<path d="M 480 50 Q 480 100 607.5 100 Q 735 100 735 139.9" fill="none" stroke="#000000" stroke-width="1" stroke-miterlimit="10" pointer-events="none"/>
<path d="M 735 146.65 L 730.5 137.65 L 735 139.9 L 739.5 137.65 Z" fill="#000000" stroke="#000000" stroke-width="1" stroke-miterlimit="10" pointer-events="none"/>
<g transform="translate(562.5,93.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="29" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: "Courier New"; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">True
</div>
</div>
</foreignObject>
<text x="15" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Courier New">True</text>
</switch>
</g>
<path d="M 480 50 Q 480 100 352.5 100 Q 225 100 225 143.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/>
<path d="M 225 148.88 L 221.5 141.88 L 225 143.63 L 228.5 141.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/>
<g transform="translate(316.5,94.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="36" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: "Courier New"; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">False
</div>
</div>
</foreignObject>
<text x="18" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Courier New">False</text>
</switch>
</g>
<ellipse cx="480" cy="25" rx="50" ry="25" fill="#ffffff" stroke="#000000" pointer-events="none"/>
<g transform="translate(439.5,6.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="80" height="36" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 80px; white-space: nowrap; word-wrap: normal; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">
<pre>is_training</pre>
</div>
</div>
</foreignObject>
<text x="40" y="24" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica"><pre>is_training</pre></text>
</switch>
</g>
<path d="M 735 200 Q 735 240 674 240 Q 613 240 613 269.9" fill="none" stroke="#000000" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/>
<path d="M 613 276.65 L 608.5 267.65 L 613 269.9 L 617.5 267.65 Z" fill="#000000" stroke="#000000" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/>
<g transform="translate(672.5,232.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="43" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: "Courier New"; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; font-weight: bold; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">String
</div>
</div>
</foreignObject>
<text x="22" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Courier New" font-weight="bold">String</text>
</switch>
</g>
<path d="M 735 200 Q 735 240 800 240 Q 865 240 865 273.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/>
<path d="M 865 278.88 L 861.5 271.88 L 865 273.63 L 868.5 271.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/>
<g transform="translate(807.5,233.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="29" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: "Courier New"; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">None
</div>
</div>
</foreignObject>
<text x="15" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Courier New">None</text>
</switch>
</g>
<ellipse cx="735" cy="175" rx="95" ry="25" fill="#ffffff" stroke="#000000" pointer-events="none"/>
<g transform="translate(658.5,156.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="152" height="36" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 152px; white-space: nowrap; word-wrap: normal; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">
<pre>update_ops_collection</pre>
</div>
</div>
</foreignObject>
<text x="76" y="24" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica"><pre><code>update_ops_collection</code></pre></text>
</switch>
</g>
<path d="M 225 200 Q 225 240 292.5 240 Q 360 240 360 273.63" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/>
<path d="M 360 278.88 L 356.5 271.88 L 360 273.63 L 363.5 271.88 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/>
<g transform="translate(260.5,232.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="36" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: "Courier New"; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">False
</div>
</div>
</foreignObject>
<text x="18" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Courier New">False</text>
</switch>
</g>
<path d="M 225 200 Q 225 240 165 240 Q 105 240 105 269.9" fill="none" stroke="#000000" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/>
<path d="M 105 276.65 L 100.5 267.65 L 105 269.9 L 109.5 267.65 Z" fill="#000000" stroke="#000000" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/>
<g transform="translate(140.5,234.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="29" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: "Courier New"; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; font-weight: bold; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">True
</div>
</div>
</foreignObject>
<text x="15" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Courier New" font-weight="bold">True</text>
</switch>
</g>
<ellipse cx="225" cy="175" rx="95" ry="25" fill="#ffffff" stroke="#000000" pointer-events="none"/>
<g transform="translate(166.5,156.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="116" height="36" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 116px; white-space: nowrap; word-wrap: normal; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">
<pre>test_local_stats</pre>
</div>
</div>
</foreignObject>
<text x="58" y="24" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica"><pre><code>test_local_stats</code></pre></text>
</switch>
</g>
<rect x="760" y="280" width="210" height="60" rx="9" ry="9" fill="#ffffff" stroke="#000000" pointer-events="none"/>
<g transform="translate(761.5,270.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="206" height="78" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 206px; white-space: normal; word-wrap: normal; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">
<ul>
<li style="text-align: left">Normalize output using local batch statistics</li>
<li style="text-align: left">Update moving averages in each forward pass</li>
</ul>
</div>
</div>
</foreignObject>
<text x="103" y="45" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text>
</switch>
</g>
<rect x="508" y="280" width="210" height="100" rx="15" ry="15" fill="#ffffff" stroke="#000000" pointer-events="none"/>
<g transform="translate(509.5,276.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="206" height="106" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 206px; white-space: normal; word-wrap: normal; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">
<ul>
<li style="text-align: left">Normalize output using local batch statistics</li>
<li style="text-align: left">Update ops for the moving averages are placed in a named collection.
<b>They are not executed automatically.</b>
</li>
</ul>
</div>
</div>
</foreignObject>
<text x="103" y="59" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text>
</switch>
</g>
<rect x="255" y="280" width="210" height="60" rx="9" ry="9" fill="#ffffff" stroke="#000000" pointer-events="none"/>
<g transform="translate(256.5,277.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="206" height="64" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 206px; white-space: normal; word-wrap: normal; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">
<ul>
<li style="text-align: left">Normalize output using stored moving averages.</li>
<li style="text-align: left">No update ops are created.</li>
</ul>
</div>
</div>
</foreignObject>
<text x="103" y="38" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text>
</switch>
</g>
<rect x="0" y="280" width="210" height="60" rx="9" ry="9" fill="#ffffff" stroke="#000000" pointer-events="none"/>
<g transform="translate(1.5,277.5)">
<switch>
<foreignObject style="overflow:visible;" pointer-events="all" width="206" height="64" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 206px; white-space: normal; word-wrap: normal; text-align: center;">
<div
xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">
<ul>
<li style="text-align: left">Normalize output using local batch statistics</li>
<li style="text-align: left">No update ops are created.</li>
</ul>
</div>
</div>
</foreignObject>
<text x="103" y="38" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text>
</switch>
</g>
</g>
</svg>
#@title Setup
import numpy as np
import tensorflow as tf
import sonnet as snt
import matplotlib.pyplot as plt
from matplotlib import patches
%matplotlib inline
def run_and_visualize(inputs, outputs, bn_module):
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
inputs_collection = []
outputs_collection = []
for i in range(1000):
current_inputs, current_outputs = sess.run([inputs, outputs])
inputs_collection.append(current_inputs)
outputs_collection.append(current_outputs)
bn_mean, bn_var = sess.run([bn_module._moving_mean,
bn_module._moving_variance])
inputs_collection = np.concatenate(inputs_collection, axis=0)
outputs_collection = np.concatenate(outputs_collection, axis=0)
print("Number of update ops in collection: {}".format(
len(tf.get_collection(tf.GraphKeys.UPDATE_OPS))))
print("Input mean: {}".format(np.mean(inputs_collection, axis=0)))
print("Input variance: {}".format(np.var(inputs_collection, axis=0)))
print("Moving mean: {}".format(bn_mean))
print("Moving variance: {}".format(bn_var))
plt.figure()
# Plot the learned Gaussian distribution.
ellipse = patches.Ellipse(xy=bn_mean[0], width=bn_var[0, 0],
height=bn_var[0, 1], angle=0, edgecolor='g',
fc='None', zorder=1000, linestyle='solid',
linewidth=2)
# Plot the input distribution.
input_ax = plt.scatter(inputs_collection[:, 0], inputs_collection[:, 1],
c='r', alpha=0.1, zorder=1)
# Plot the output distribution.
output_ax = plt.scatter(outputs_collection[:, 0], outputs_collection[:, 1],
c='b', alpha=0.1, zorder=1)
ax = plt.gca()
ellipse_ax = ax.add_patch(ellipse)
plt.legend((input_ax, output_ax, ellipse_ax),
("Inputs", "Outputs", "Aggregated statistics"),
loc="lower right")
plt.axis("equal")
def get_inputs():
return tf.concat([
tf.random_normal((10, 1), 10, 1),
tf.random_normal((10, 1), 10, 2)],
axis=1)
```
# Examples
## Default mode
```
tf.reset_default_graph()
inputs = get_inputs()
bn = snt.BatchNorm()
outputs = bn(inputs, is_training=True)
run_and_visualize(inputs, outputs, bn)
```
**Results**
1. The outputs have been normalized. This is indicated by the blue isotropic Gaussian distribution.
1. Update ops have been created and placed in a collection.
1. No moving statistics have been collected. The green circle shows the learned Gaussian distribution. It is initialized to have mean 0 and standard deviation 1. Because the update ops were created but not executed, these statistics have not been updated.
1. The "boxy" shape of the normalized data points comes from the rather small batch size of 10. Because the batch statistics are only computed over 10 data points, they are very noisy.
## Collecting statistics during training
### First option: Update statistics automatically on every forward pass
```
tf.reset_default_graph()
inputs = get_inputs()
bn = snt.BatchNorm(update_ops_collection=None)
outputs = bn(inputs, is_training=True)
run_and_visualize(inputs, outputs, bn)
```
**Results**
1. The outputs have been normalized as we can tell from the blue isotropic Gaussian distribution.
1. Update ops have been created and executed. We can see that the moving statistics no longer have their default values (i.e. the green ellipsis has changed). The aggregated statistics don't represent the input distribution yet because we only ran 1000 forward passes.
### Second option: Explicitly add update ops as control dependencies
```
tf.reset_default_graph()
inputs = get_inputs()
bn = snt.BatchNorm(update_ops_collection=None)
outputs = bn(inputs, is_training=True)
# Add the update ops as control dependencies
# This can usually be done when defining the gradient descent
# ops
update_ops = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
with tf.control_dependencies([update_ops]):
outputs = tf.identity(outputs)
run_and_visualize(inputs, outputs, bn)
```
**Results**
The actual results are identical to the previous run. However, this time, the update ops have not been executed automatically whenever we did a forward pass. We have to explicitly make the updates a dependency of our output by using ```tf.control_dependencies```. Usually, we would add the dependencies to our learning ops.
# Using statistics at test time
## Default mode
```
tf.reset_default_graph()
inputs = get_inputs()
bn = snt.BatchNorm()
outputs = bn(inputs, is_training=False)
run_and_visualize(inputs, outputs, bn)
```
**Results**
1. No update ops have been created and the moving statistics still have their initial values (mean 0, standard deviation 1).
2. The inputs have been normalized using the batch statistics as we can tell from the blue isotropic Gaussian distribution.
This means: In the default testing mode, the inputs are normalized using the batch statistics and the aggregated statistics are ignored.
## Using moving averages at test time
```
def hacky_np_initializer(array):
"""Allows us to initialize a tf variable with a numpy array."""
def _init(shape, dtype, partition_info):
return tf.constant(np.asarray(array, dtype='float32'))
return _init
tf.reset_default_graph()
inputs = get_inputs()
# We initialize the moving mean and variance to non-standard values
# so we can see the effect of this setting
bn = snt.BatchNorm(initializers={
"moving_mean": hacky_np_initializer([[10, 10]]),
"moving_variance": hacky_np_initializer([[1, 4]])
})
outputs = bn(inputs, is_training=False, test_local_stats=False)
run_and_visualize(inputs, outputs, bn)
```
**Results**
We have now manually initialized the moving statistics to the moments of the input distribution. We can see that the inputs have been normalized according to our stored statistics.
| github_jupyter |
In this python script, I have done:
- EDA
- Data collection
- Checking null and inf in the data
- Drop all the null data
- Visualization
- Plot data adistribution
- Plot candle stick
- Plot volumn
- Model training
- Xg-boosting
- Loop over all assets (training)
- Model Prediction
- Score: 0.3550
- Ranking: 385/830
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
import time
import xgboost as xgb
import gresearch_crypto
import traceback
import keras
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, RepeatedKFold, KFold
from sklearn.metrics import mean_squared_error
from matplotlib.colors import to_rgba
```
## <center>**EDA**</center>
### **train.csv - Column Description**
- **timestamp**: All timestamps are returned as second Unix timestamps (the number of seconds elapsed since 1970-01-01 00:00:00.000 UTC). Timestamps in this dataset are multiple of 60, indicating minute-by-minute data.
- **Asset_ID**: The asset ID corresponding to one of the crypto currencies (e.g. Asset_ID = 1 for Bitcoin). The mapping from Asset_ID to crypto asset is contained in asset_details.csv.
- **Count**: Total number of trades in the time interval (last minute).
- **Open**: Opening price of the time interval (in USD).
- **High**: Highest price reached during time interval (in USD).
- **Low**: Lowest price reached during time interval (in USD).
- **Close**: Closing price of the time interval (in USD).
- **Volume**: Quantity of asset bought or sold, displayed in base currency USD.
- **VWAP**: The average price of the asset over the time interval, weighted by volume. VWAP is an aggregated form of trade data.
- **Target**: Residual log-returns for the asset over a 15 minute horizon.
```
# Read Data
def read_data(nrows=None):
data = pd.read_csv('../input/g-research-crypto-forecasting/train.csv', nrows=nrows)
asset_details = pd.read_csv('../input/g-research-crypto-forecasting/asset_details.csv')
return data, asset_details
```
### **Checking Nulls And Inf**
```
def check_null_and_inf(data):
print("Numbers of Nulls in Data:")
print(data.isnull().sum(), end='\n\n')
print("Inf in Data:")
print(np.where(np.isinf(data)==True)[0])
# Drop Infinite and Nan
def drop_inf_and_nan(data):
data.replace([np.inf, -np.inf], np.nan, inplace=True)
data.dropna(axis=0, inplace=True)
data.isnull().sum()
return data
```
## <center>**DataSet**</center>
### Hyperparameters
```
FEATURES = ['Count', 'Close','High', 'Low', 'Open', 'VWAP', 'Volume']
PARAMS = {
'colsample_bytree': [0.5, 0.7],
'n_estimators': range(520, 600, 40),
'learning_rate': [0.01, 0.03, 0.05],
'max_depth': range(11, 14, 1),
}
def crypto_df(asset_id, data):
df = data[data["Asset_ID"] == asset_id].set_index("timestamp")
return df
# Two new features from the competition tutorial
def upper_shadow(df):
return df['High'] - np.maximum(df['Close'], df['Open'])
def lower_shadow(df):
return np.minimum(df['Close'], df['Open']) - df['Low']
def get_features(df):
df_feat = df[FEATURES].copy()
df_feat['Upper_Shadow'] = upper_shadow(df_feat)
df_feat['Lower_Shadow'] = lower_shadow(df_feat)
return df_feat
```
## <center>**Visualization**</center>
### **DATA DISTRIBUTION**
Training Data Distribution among differnet Assets (Crypto Currencies)
```
def plot_dis(data):
asset_count = []
for i in range(14):
count = (data["Asset_ID"] == i).sum()
asset_count.append(count)
fig = sns.countplot(x="Asset_ID", data=data)
fig.ticklabel_format(style='sci', axis='y')
# fig.set_xticklabels(asset_details.sort_values("Asset_ID")["Asset_Name"].tolist(), rotation=-30, horizontalalignment='left')
fig.set(xlabel='Assets', ylabel='Number of Rows')
```
### **CANDELSTICK CHARTS**
```
def candelstick_chart(data,title):
candlestick = go.Figure(data = [go.Candlestick(x =data.index,
open = data[('Open')],
high = data[('High')],
low = data[('Low')],
close = data[('Close')])])
candlestick.update_xaxes(title_text = 'Minutes',
rangeslider_visible = True)
candlestick.update_layout(
title = {
'text': '{:} Candelstick Chart'.format(title),
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'})
candlestick.update_yaxes(title_text = 'Price in USD', ticksuffix = '$')
return candlestick
```
### **AREA PLOT**
```
# Volumn trade
def vol_traded(data):
fig = plt.stackplot(data.index, data.Volume, color='thistle')
return fig
```
## <center>**XG-Boosting**</center>
```
def xgb_cv(X, Y, params):
data_dmatrix = xgb.DMatrix(data=X, label=Y)
start_time = time.time()
cv_results = xgb.cv(dtrain=data_dmatrix, params=params, nfold=3,
num_boost_round=50, early_stopping_rounds=10,metrics="rmse", as_pandas=True, seed=123)
end_time = time.time()
run_time = end_time - start_time
return cv_results, run_time
```
## <center>**Pipeline**</center>
### Data (Training, Testing)
```
data, asset_details = read_data()
data.head()
data.shape
asset_details
check_null_and_inf(data)
data = drop_inf_and_nan(data)
check_null_and_inf(data)
data['Asset_ID'].unique()
```
### Visualization
```
# Plot data adistribution
plot_dis(data)
btc = crypto_df(1, data)
eth = crypto_df (6, data)
# Plot candle stick
btc_plot = candelstick_chart(btc[-100:], "Bitcoin")
btc_plot.show()
# Plot volumn
vol_traded(btc[-50:])
```
### Model Training
```
def get_xgb_model(X, Y):
estimator = xgb.XGBRegressor(
n_estimators=500,
max_depth=11,
learning_rate=0.05,
subsample=0.9,
colsample_bytree=0.7,
missing=-999,
random_state=2020,
tree_method='gpu_hist'
)
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2, shuffle=False)
start_time = time.time()
estimator.fit(X_train, Y_train, eval_set=[(X_train,Y_train), (X_val, Y_val)], early_stopping_rounds=20)
end_time = time.time()
time_elapsed = round(end_time - start_time)
return estimator, time_elapsed
def get_xgb_model_cv(X_train, Y_train):
estimator = xgb.XGBRegressor(
objective = "reg:squarederror",
nthread = 4,
)
estimator.fit(X_train, Y_train)
cv = RepeatedKFold(n_splits=10, n_repeats=1, random_state=1)
random_search = RandomizedSearchCV(
estimator = estimator,
param_distributions = PARAMS,
n_jobs = -1,
cv = cv,
random_state=1
)
start_time = time.time()
random_search.fit(X_train, Y_train)
end_time = time.time()
time_elapsed = round(end_time - start_time)
best_estimator = random_search.best_estimator_.get_params()
xgb_best = xgb.XGBRegressor(
objective = "reg:squarederror",
nthread = 4,
colsample_bytree = best_estimator['colsample_bytree'],
n_estimators = best_estimator['n_estimators'],
learning_rate = best_estimator['learning_rate'],
max_depth = best_estimator['max_depth'],
subsample = 0.9,
random_state = 1,
missing = -999,
tree_method='gpu_hist'
)
xgb_best.fit(X_train, Y_train)
return xgb_best, time_elapsed
# Loop over all assets
def loop_over():
Xs_train = {}
ys_train = {}
models = {}
time_total = 0
for asset_id, asset_name in zip(asset_details['Asset_ID'], asset_details['Asset_Name']):
X_train, Y_train = data[data["Asset_ID"] == asset_id][FEATURES], data[data["Asset_ID"] == asset_id]['Target']
X_train = get_features(X_train)
Xs_train[asset_id], ys_train[asset_id] = X_train.reset_index(drop=True), Y_train.reset_index(drop=True)
# print('Training model for "{}":'.format(asset_details[asset_details['Asset_ID'] == asset_id]['Asset_Name'].iloc[0]))
# models[asset_id] = get_xgb_model_cv(X_train.iloc[:10], Y_train.iloc[:10])
print(asset_name, asset_id)
model, time_elapsed = get_xgb_model(X_train, Y_train)
models[asset_id] = model
# print("Exicuted time: {} seconds.\n".format(time_elapsed))
time_total += time_elapsed
# print('Total time elapsed:', time_total)
return Xs_train, ys_train, models
Xs_train, ys_train, models = loop_over()
for i in range(len(models)):
results = models[i].evals_result()
plt.figure(figsize=(10,7))
plt.plot(results["validation_0"]["rmse"], label="Training loss")
plt.plot(results["validation_1"]["rmse"], label="Validation loss")
plt.xlabel("Iter")
plt.ylabel("Loss")
plt.legend()
```
fitting model
```
## Save the models
# import pickle
# for index, asset_id in enumerate(asset_details['Asset_ID']):
# filename = str(asset_id) + '.pkl'
# with open(filename, 'wb') as file:
# pickle.dump(models[index], file)
# # Load the models
# load_models = {}
# for index, asset_id in enumerate(asset_details['Asset_ID']):
# filename = str(asset_id) + '.pkl'
# with open(filename, 'rb') as file:
# load_models[index] = pickle.load(file)
# load_models
# for i in range(len(asset_details)):
# predicted = models[i].predict(Xs_test[i])
# print(mean_squared_error(ys_test[i], predicted))
### Submit
env = gresearch_crypto.make_env()
iter_test = env.iter_test()
for i, (df_test, df_pred) in enumerate(iter_test):
for j , row in df_test.iterrows():
if models[row['Asset_ID']] is not None:
try:
model = models[row['Asset_ID']]
x_test = get_features(row)
y_pred = model.predict(pd.DataFrame([x_test]))[0]
df_pred.loc[df_pred['row_id'] == row['row_id'], 'Target'] = y_pred
except:
df_pred.loc[df_pred['row_id'] == row['row_id'], 'Target'] = 0
traceback.print_exc()
else:
df_pred.loc[df_pred['row_id'] == row['row_id'], 'Target'] = 0
env.predict(df_pred)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
np.random.seed(41)
sns.set()
df = pd.read_csv('https://huseinhouse.com/dataset/mall-customer.csv')
df.head()
X = df.iloc[:, -2:].values
X.shape
plt.figure(figsize = (7, 5))
plt.scatter(X[:,0], X[:, 1])
plt.ylabel('Spending Score (1-100)')
plt.xlabel('Annual Income (k$)')
plt.show()
K = 5
r_centroids = X[np.random.randint(0, X.shape[0], size=(5))]
plt.figure(figsize = (7, 5))
plt.scatter(X[:,0], X[:, 1], c = 'b')
plt.scatter(r_centroids[:,0], r_centroids[:,1], color='r',marker='X', s=200)
plt.ylabel('Spending Score (1-100)')
plt.xlabel('Annual Income (k$)')
plt.title('random centroids')
plt.show()
pp_centroids = np.array([X[np.random.randint(0,X.shape[0])]])
for i in range(1, K, 1):
D = np.array([np.sqrt(np.sum((x - pp_centroids) ** 2)) for x in X])
prob = D / np.sum(D)
cummulative_prob = np.cumsum(prob)
r=np.random.random()
s = np.where(cummulative_prob > r)[0][0]
pp_centroids=np.append(pp_centroids,[X[s]],axis=0)
pp_centroids
plt.figure(figsize = (7, 5))
plt.scatter(X[:,0], X[:, 1], c = 'b')
plt.scatter(pp_centroids[:,0], pp_centroids[:,1], color='r',marker='X', s=200)
plt.ylabel('Spending Score (1-100)')
plt.xlabel('Annual Income (k$)')
plt.title('++ centroids')
plt.show()
from matplotlib import animation
CENTROID = r_centroids.astype('float')
def training(epoch):
euclidean_distance = np.array([np.sqrt(np.sum((X-CENTROID[k,:])**2,axis=1)) for k in range(K)]).T
C = np.argmin(euclidean_distance,axis=1)
for k in range(K):
mean = X[C == k].mean(axis = 0)
CENTROID[k] = mean
line.set_offsets(CENTROID)
ax.set_title('random centroids, iteration %d'%(epoch))
return line, ax
fig = plt.figure(figsize = (7, 5))
ax = plt.axes()
ax.scatter(X[:,0], X[:, 1], c = 'b')
line = ax.scatter([],[], color='r',marker='X', s=200)
ax.set_xlabel('Annual Income (k$)')
ax.set_ylabel('Spending Score (1-100)')
ax.set_title('random centroids')
anim = animation.FuncAnimation(fig, training, frames=20, interval=200)
anim.save('random-centroids.gif', writer='imagemagick', fps=5)
CENTROID = pp_centroids.astype('float')
def training(epoch):
euclidean_distance = np.array([np.sqrt(np.sum((X-CENTROID[k,:])**2,axis=1)) for k in range(K)]).T
C = np.argmin(euclidean_distance,axis=1)
for k in range(K):
mean = X[C == k].mean(axis = 0)
CENTROID[k] = mean
line.set_offsets(CENTROID)
ax.set_title('++ centroids, iteration %d'%(epoch))
return line, ax
fig = plt.figure(figsize = (7, 5))
ax = plt.axes()
ax.scatter(X[:,0], X[:, 1], c = 'b')
line = ax.scatter([],[], color='r',marker='X', s=200)
ax.set_xlabel('Annual Income (k$)')
ax.set_ylabel('Spending Score (1-100)')
ax.set_title('++ centroids')
anim = animation.FuncAnimation(fig, training, frames=20, interval=200)
anim.save('pp-centroids.gif', writer='imagemagick', fps=5)
```
| github_jupyter |
# Rapid Eye Movements (REMs) detection
This notebook demonstrates how to use YASA to automatically detect rapid eye movements (REMs) on EOG data.
Please make sure to install the latest version of YASA first by typing the following line in your terminal or command prompt:
`pip install --upgrade yasa`
```
import yasa
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from mne.filter import filter_data
sns.set(font_scale=1.2)
```
## Data loading
Let's load 50 seconds of EOG data (LOC and ROC) during REM sleep, sampled at 256 Hz.
```
# Load data
file = np.load('data_EOGs_REM_256Hz.npz')
data, sf, chan = file['data'], file['sf'], file['chan']
# Keep only 50 seconds of data
loc = data[0, 77000:89801]
roc = data[1, 77000:89801]
# Define sampling frequency and time vector
times = np.arange(loc.size) / sf
# Plot the signal
fig, ax = plt.subplots(1, 1, figsize=(16, 4))
plt.plot(times, loc, label='LOC', lw=1.5)
plt.plot(times, roc, label='ROC', lw=1.5)
plt.xlabel('Time (seconds)')
plt.ylabel('Amplitude (uV)')
plt.xlim([times.min(), times.max()])
plt.title('REM sleep EOG data')
plt.legend(loc='best', frameon=False)
sns.despine()
```
## Apply the detection
We use the [rem_detect](https://raphaelvallat.com/yasa/build/html/generated/yasa.rem_detect.html#yasa.rem_detect) function to apply the detection. The output of the detection is a [REMResults](file:///C:/Users/Raphael/Desktop/yasa/docs/build/html/generated/yasa.REMResults.html#yasa.REMResults) class, which comes with some pre-compiled functions (also called methods). For instance, the [summary](file:///C:/Users/Raphael/Desktop/yasa/docs/build/html/generated/yasa.REMResults.html#yasa.REMResults.summary) method returns a [pandas DataFrame](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe) with all the detected REMs and their properties.
The algorithm is based on an amplitude thresholding of the negative product of the LOC and ROC filtered signal. As such, this function requires BOTH the LOC and ROC EOG data, it does NOT work with a single EOG.
```
from yasa import rem_detect
# Short version
# rem = rem_detect(loc, roc, sf)
# Long version (withh all optional, implicit, options)
rem = rem_detect(loc, roc, sf, hypno=None, include=4, amplitude=(50, 325),
duration=(0.3, 1.5), freq_rem=(0.5, 5), remove_outliers=False,
verbose=False)
# Get the detection dataframe
events = rem.summary()
events.round(3)
```
### Plot the detected REMs
```
# Let's get a boolean mask of the REMs in data
mask = rem.get_mask()
mask
loc_highlight = loc * mask[0, :]
roc_highlight = roc * mask[1, :]
loc_highlight[loc_highlight == 0] = np.nan
roc_highlight[roc_highlight == 0] = np.nan
plt.figure(figsize=(16, 4.5))
plt.plot(times, loc, 'slategrey', label='LOC')
plt.plot(times, roc, 'grey', label='ROC')
plt.plot(times, loc_highlight, 'indianred')
plt.plot(times, roc_highlight, 'indianred')
plt.xlabel('Time (seconds)')
plt.ylabel('Amplitude (uV)')
plt.xlim([0, times[-1]])
plt.title('REM sleep EOG data')
plt.legend()
sns.despine()
rem.plot_average(center="Peak", time_before=0.4, time_after=0.4);
```
### Restraining the detection to REM sleep
```
hypno = 4 * np.ones_like(loc)
rem = rem_detect(loc, roc, sf, hypno=hypno, include=4)
rem.summary().round(3)
rem.summary(grp_stage=True)
```
### Computation time
```
%timeit rem_detect(loc, roc, sf)
# Line profiling
# %load_ext line_profiler
# %lprun -f rem_detect rem_detect(loc, roc, sf)
```
| github_jupyter |
2017
Machine Learning Practical
University of Edinburgh
Georgios Pligoropoulos - s1687568
Coursework 4 (part 5a)
### Imports, Inits, and helper functions
```
jupyterNotebookEnabled = True
plotting = True
saving = True
coursework, part = 4, "5a"
if jupyterNotebookEnabled:
#%load_ext autoreload
%reload_ext autoreload
%autoreload 2
import sys
import os
mlpdir = os.path.expanduser(
'~/pligor.george@gmail.com/msc_Artificial_Intelligence/mlp_Machine_Learning_Practical/mlpractical'
)
sys.path.append(mlpdir)
import pickle
import skopt
from skopt.plots import plot_convergence
import datetime
import time
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mylibs.tf_helper import tfRMSE, tfMSE, fully_connected_layer, trainEpoch, validateEpoch
from mylibs.py_helper import merge_dicts
from mylibs.dropout_helper import constructProbs
from skopt.space.space import Real, Integer
from skopt import gp_minimize
from mylibs.jupyter_notebook_helper import show_graph
from mlp.data_providers import DataProvider, \
MSD10GenreDataProvider, MSD25GenreDataProvider,\
MSD10Genre_Autoencoder_DataProvider, MSD10Genre_StackedAutoEncoderDataProvider
from mylibs.batch_norm import batchNormWrapper_byExponentialMovingAvg, fully_connected_layer_with_batch_norm
from mylibs.batch_norm import fully_connected_layer_with_batch_norm_and_l2
from mylibs.stacked_autoencoder_pretrainer import \
constructModelFromPretrainedByAutoEncoderStack,\
buildGraphOfStackedAutoencoder, executeNonLinearAutoencoder
from mylibs.jupyter_notebook_helper import getRunTime, getTrainWriter, getValidWriter,\
plotStats, initStats, gatherStats, renderStatsCollection
if jupyterNotebookEnabled:
%matplotlib inline
seed = 16011984
rng = np.random.RandomState(seed=seed)
config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)
config.gpu_options.allow_growth = True
figcount = 0
tensorboardLogdir = 'tf_cw{}_{}'.format(coursework, part)
curDtype = tf.float32
reluBias = 0.1
batch_size = 50
numTestSongs = 9950
numClasses = 10
```
### MSD 10 genre task
```
segment_count = 120
segment_len = 25
from models.teacher_student_nn import MSD10Genre_Teacher_DataProvider, StudentNN
from rnn.manual_rnn import ManualRNN
dataset_filename = 'msd-10-genre-train_valid.npz'
logits_filename = 'rnn_logits.npy'
```
### Validating with Teacher
```
manualRNN = ManualRNN(batch_size=batch_size, rng = rng, dtype = curDtype, config=config,
segment_count = segment_count, segment_len = segment_len)
best_params_filename = 'best_params_rnn.npy'
(state_size, num_steps) = np.load(best_params_filename)
(state_size, num_steps)
data_provider = manualRNN.get_dp("train_valid", num_steps = num_steps)
graph = manualRNN.getGraph(num_steps=num_steps, state_size=state_size)
epochs = 20
%%time
stats, keys = manualRNN.validate(data_provider, state_size, graph, epochs=epochs, verbose=True)
times = [46.634,
46.672,
46.347,
46.027,
45.852,
45.920,
45.909,
45.864,
46.181,
45.946,
46.052,
45.538,
45.458,
45.520,
45.849,
45.888,
45.531,
45.583,
45.557,
45.947,]
np.mean(times)
np.min(times)
np.max(times)
np.var(times)
```
### Running full training of shallow neural net
```
studentNN = StudentNN(batch_size=batch_size, rng=rng, dtype=curDtype, config=config)
learning_rate = "1e-4"
best_params_student_teacher_filename = 'student_teacher/{}/student_teacher_best_params.npy'.\
format(learning_rate)
best_params_student_teacher = np.load(best_params_student_teacher_filename)
best_params_student_teacher
%%time
input_keep_prob, hidden_keep_prob, hidden_dim, lamda2 = best_params_student_teacher
hidden_dim = int(hidden_dim)
learning_rate = float(learning_rate)
graph = studentNN.loadAndGetGraph(hidden_dim=hidden_dim, lamda2=lamda2, learningRate=learning_rate)
data_provider = MSD10GenreDataProvider('train_valid', batch_size=batch_size, rng=rng)
epochs = 20
stats, keys, runTimes = studentNN.validate(
data_provider = data_provider,
graph = graph,
epochs = epochs,
)
np.mean(runTimes)
np.min(runTimes)
np.max(runTimes)
np.var(runTimes)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/6_hyperparameter_tuning/1)%20Analyse%20Learning%20Rates.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Goals
### Learn how to use hyper parameter analyser for learning rates
# Table of Contents
## [Install](#0)
## [Load experiment in default mode](#1)
## [Run Analyser](#2)
<a id='0'></a>
# Install Monk
## Using pip (Recommended)
- colab (gpu)
- All bakcends: `pip install -U monk-colab`
- kaggle (gpu)
- All backends: `pip install -U monk-kaggle`
- cuda 10.2
- All backends: `pip install -U monk-cuda102`
- Gluon bakcned: `pip install -U monk-gluon-cuda102`
- Pytorch backend: `pip install -U monk-pytorch-cuda102`
- Keras backend: `pip install -U monk-keras-cuda102`
- cuda 10.1
- All backend: `pip install -U monk-cuda101`
- Gluon bakcned: `pip install -U monk-gluon-cuda101`
- Pytorch backend: `pip install -U monk-pytorch-cuda101`
- Keras backend: `pip install -U monk-keras-cuda101`
- cuda 10.0
- All backend: `pip install -U monk-cuda100`
- Gluon bakcned: `pip install -U monk-gluon-cuda100`
- Pytorch backend: `pip install -U monk-pytorch-cuda100`
- Keras backend: `pip install -U monk-keras-cuda100`
- cuda 9.2
- All backend: `pip install -U monk-cuda92`
- Gluon bakcned: `pip install -U monk-gluon-cuda92`
- Pytorch backend: `pip install -U monk-pytorch-cuda92`
- Keras backend: `pip install -U monk-keras-cuda92`
- cuda 9.0
- All backend: `pip install -U monk-cuda90`
- Gluon bakcned: `pip install -U monk-gluon-cuda90`
- Pytorch backend: `pip install -U monk-pytorch-cuda90`
- Keras backend: `pip install -U monk-keras-cuda90`
- cpu
- All backend: `pip install -U monk-cpu`
- Gluon bakcned: `pip install -U monk-gluon-cpu`
- Pytorch backend: `pip install -U monk-pytorch-cpu`
- Keras backend: `pip install -U monk-keras-cpu`
## Install Monk Manually (Not recommended)
### Step 1: Clone the library
- git clone https://github.com/Tessellate-Imaging/monk_v1.git
### Step 2: Install requirements
- Linux
- Cuda 9.0
- `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt`
- Cuda 9.2
- `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt`
- Cuda 10.0
- `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt`
- Cuda 10.1
- `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt`
- Cuda 10.2
- `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt`
- CPU (Non gpu system)
- `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt`
- Windows
- Cuda 9.0 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt`
- Cuda 9.2 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt`
- Cuda 10.0 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt`
- Cuda 10.1 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt`
- Cuda 10.2 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt`
- CPU (Non gpu system)
- `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt`
- Mac
- CPU (Non gpu system)
- `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt`
- Misc
- Colab (GPU)
- `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt`
- Kaggle (GPU)
- `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt`
### Step 3: Add to system path (Required for every terminal or kernel run)
- `import sys`
- `sys.path.append("monk_v1/");`
## Dataset - Caltech-256
- https://www.kaggle.com/jessicali9530/caltech256
```
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1Lltrl2U4L8WJkyBjMBFHSaoK8dLhoItl' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1Lltrl2U4L8WJkyBjMBFHSaoK8dLhoItl" -O caltech256.zip && rm -rf /tmp/cookies.txt
! unzip -qq caltech256.zip
```
# Imports
```
#Using gluon backend
# When installed using pip
from monk.gluon_prototype import prototype
# When installed manually (Uncomment the following)
#import os
#import sys
#sys.path.append("monk_v1/");
#sys.path.append("monk_v1/monk/");
#from monk.gluon_prototype import prototype
```
<a id='1'></a>
# Load experiment in default mode
```
gtf = prototype(verbose=1);
gtf.Prototype("Project", "analyser_lr");
gtf.Default(dataset_path="caltech256/train",
model_name="resnet18_v1",
freeze_base_network=False,
num_epochs=5);
#Read the summary generated once you run this cell.
```
<a id='2'></a>
# Analyse Learning Rates
```
# Analysis Project Name
analysis_name = "analyse_learning_rates"
# Learning rates to explore
lrs = [0.1, 0.05, 0.01, 0.005, 0.0001];
# Num epochs for each sub-experiment to run
epochs=10
# Percentage of original dataset to take in for experimentation
percent_data=10
# "keep_all" - Keep all the sub experiments created
# "keep_non" - Delete all sub experiments created
analysis = gtf.Analyse_Learning_Rates(analysis_name, lrs, percent_data,
num_epochs=epochs, state="keep_none");
```
## Analysis
- LR as 0.1 doesnt work
- Same is the case with 0.0001
- Of the other's lr as 0.01 produces least validation loss
## Update learning rate
```
gtf.update_learning_rate(0.01);
# Very important to reload post updates
gtf.Reload();
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
```
# Goals Completed
### Learn how to use hyper parameter analyser for learning rates
| github_jupyter |
In this tutorial, you will learn what a **categorical variable** is, along with three approaches for handling this type of data.
# Introduction
A **categorical variable** takes only a limited number of values.
- Consider a survey that asks how often you eat breakfast and provides four options: "Never", "Rarely", "Most days", or "Every day". In this case, the data is categorical, because responses fall into a fixed set of categories.
- If people responded to a survey about which what brand of car they owned, the responses would fall into categories like "Honda", "Toyota", and "Ford". In this case, the data is also categorical.
You will get an error if you try to plug these variables into most machine learning models in Python without preprocessing them first. In this tutorial, we'll compare three approaches that you can use to prepare your categorical data.
# Three Approaches
### 1) Drop Categorical Variables
The easiest approach to dealing with categorical variables is to simply remove them from the dataset. This approach will only work well if the columns did not contain useful information.
### 2) Ordinal Encoding
**Ordinal encoding** assigns each unique value to a different integer.

This approach assumes an ordering of the categories: "Never" (0) < "Rarely" (1) < "Most days" (2) < "Every day" (3).
This assumption makes sense in this example, because there is an indisputable ranking to the categories. Not all categorical variables have a clear ordering in the values, but we refer to those that do as **ordinal variables**. For tree-based models (like decision trees and random forests), you can expect ordinal encoding to work well with ordinal variables.
### 3) One-Hot Encoding
**One-hot encoding** creates new columns indicating the presence (or absence) of each possible value in the original data. To understand this, we'll work through an example.

In the original dataset, "Color" is a categorical variable with three categories: "Red", "Yellow", and "Green". The corresponding one-hot encoding contains one column for each possible value, and one row for each row in the original dataset. Wherever the original value was "Red", we put a 1 in the "Red" column; if the original value was "Yellow", we put a 1 in the "Yellow" column, and so on.
In contrast to ordinal encoding, one-hot encoding *does not* assume an ordering of the categories. Thus, you can expect this approach to work particularly well if there is no clear ordering in the categorical data (e.g., "Red" is neither _more_ nor _less_ than "Yellow"). We refer to categorical variables without an intrinsic ranking as **nominal variables**.
One-hot encoding generally does not perform well if the categorical variable takes on a large number of values (i.e., you generally won't use it for variables taking more than 15 different values).
# Example
As in the previous tutorial, we will work with the [Melbourne Housing dataset](https://www.kaggle.com/dansbecker/melbourne-housing-snapshot/home).
We won't focus on the data loading step. Instead, you can imagine you are at a point where you already have the training and validation data in `X_train`, `X_valid`, `y_train`, and `y_valid`.
```
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
# Separate target from predictors
y = data.Price
X = data.drop(['Price'], axis=1)
# Divide data into training and validation subsets
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2,
random_state=0)
# Drop columns with missing values (simplest approach)
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
# "Cardinality" means the number of unique values in a column
# Select categorical columns with relatively low cardinality (convenient but arbitrary)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and
X_train_full[cname].dtype == "object"]
# Select numerical columns
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
# Keep selected columns only
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
```
We take a peek at the training data with the `head()` method below.
```
X_train.head()
```
Next, we obtain a list of all of the categorical variables in the training data.
We do this by checking the data type (or **dtype**) of each column. The `object` dtype indicates a column has text (there are other things it could theoretically be, but that's unimportant for our purposes). For this dataset, the columns with text indicate categorical variables.
```
# Get list of categorical variables
s = (X_train.dtypes == 'object')
object_cols = list(s[s].index)
print("Categorical variables:")
print(object_cols)
```
### Define Function to Measure Quality of Each Approach
We define a function `score_dataset()` to compare the three different approaches to dealing with categorical variables. This function reports the [mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) (MAE) from a random forest model. In general, we want the MAE to be as low as possible!
```
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# Function for comparing different approaches
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
```
### Score from Approach 1 (Drop Categorical Variables)
We drop the `object` columns with the [`select_dtypes()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.select_dtypes.html) method.
```
drop_X_train = X_train.select_dtypes(exclude=['object'])
drop_X_valid = X_valid.select_dtypes(exclude=['object'])
print("MAE from Approach 1 (Drop categorical variables):")
print(score_dataset(drop_X_train, drop_X_valid, y_train, y_valid))
```
### Score from Approach 2 (Ordinal Encoding)
Scikit-learn has a [`OrdinalEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html) class that can be used to get ordinal encodings. We loop over the categorical variables and apply the ordinal encoder separately to each column.
```
from sklearn.preprocessing import OrdinalEncoder
# Make copy to avoid changing original data
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
# Apply ordinal encoder to each column with categorical data
ordinal_encoder = OrdinalEncoder()
label_X_train[object_cols] = ordinal_encoder.fit_transform(X_train[object_cols])
label_X_valid[object_cols] = ordinal_encoder.transform(X_valid[object_cols])
print("MAE from Approach 2 (Ordinal Encoding):")
print(score_dataset(label_X_train, label_X_valid, y_train, y_valid))
```
In the code cell above, for each column, we randomly assign each unique value to a different integer. This is a common approach that is simpler than providing custom labels; however, we can expect an additional boost in performance if we provide better-informed labels for all ordinal variables.
### Score from Approach 3 (One-Hot Encoding)
We use the [`OneHotEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html) class from scikit-learn to get one-hot encodings. There are a number of parameters that can be used to customize its behavior.
- We set `handle_unknown='ignore'` to avoid errors when the validation data contains classes that aren't represented in the training data, and
- setting `sparse=False` ensures that the encoded columns are returned as a numpy array (instead of a sparse matrix).
To use the encoder, we supply only the categorical columns that we want to be one-hot encoded. For instance, to encode the training data, we supply `X_train[object_cols]`. (`object_cols` in the code cell below is a list of the column names with categorical data, and so `X_train[object_cols]` contains all of the categorical data in the training set.)
```
from sklearn.preprocessing import OneHotEncoder
# Apply one-hot encoder to each column with categorical data
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(X_train[object_cols]))
OH_cols_valid = pd.DataFrame(OH_encoder.transform(X_valid[object_cols]))
# One-hot encoding removed index; put it back
OH_cols_train.index = X_train.index
OH_cols_valid.index = X_valid.index
# Remove categorical columns (will replace with one-hot encoding)
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
# Add one-hot encoded columns to numerical features
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1)
print("MAE from Approach 3 (One-Hot Encoding):")
print(score_dataset(OH_X_train, OH_X_valid, y_train, y_valid))
```
# Which approach is best?
In this case, dropping the categorical columns (**Approach 1**) performed worst, since it had the highest MAE score. As for the other two approaches, since the returned MAE scores are so close in value, there doesn't appear to be any meaningful benefit to one over the other.
In general, one-hot encoding (**Approach 3**) will typically perform best, and dropping the categorical columns (**Approach 1**) typically performs worst, but it varies on a case-by-case basis.
# Conclusion
The world is filled with categorical data. You will be a much more effective data scientist if you know how to use this common data type!
# Your Turn
Put your new skills to work in the **[next exercise](https://www.kaggle.com/kernels/fork/3370279)**!
---
*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/intermediate-machine-learning/discussion) to chat with other learners.*
| github_jupyter |
# NVE
## Phase Space, Liuville's Theorem and Ergoicity ideas
Conservative systems are govenred by Hamilton's equation of motion. That is changes in position and momenta stay on the surface: $H(p,q)=E$
$$\dot{q} = \frac{\partial H}{\partial p}$$
$$\dot{p} = -\frac{\partial H}{\partial q}$$
To see how ensemble N body mechanical conervative system evoleves we are introducing probability distribution of classical trajecotires in phase space
$$\rho(p,q,t)dq dp $$
### Continuity equation and Liuville's theorem
$$\frac{\partial \rho(p,q,t)}{\partial t} = -\nabla J = - \nabla(\rho \vec{v})$$
Where flux $J= \rho \vec{v}$ is defined in terms of the velcotiy of points in phase space $v = (\dot{q},\dot{p})$. Combinging the continuity expression combined with Hamilton's equation of motion:
$$\dot{p_i} = -\partial_{q_i} H \,\,\,\,\ \dot{q_i} = \partial_{p_i} H$$
$$\frac{\partial \rho(p,q,t)}{\partial t} + \sum_i \Big [ \frac{\partial \rho}{\partial q_i}\dot{q_i}+\frac{\partial \rho}{\partial p_i} \dot{p_i} \Big] + \rho \sum_i \Big [ \frac{\partial \dot{q_i}}{\partial q_i}+ \frac{\partial \dot{p_i}}{\partial p_i} \Big]=0$$
Where the last term is zero one we plug Hamilton's equation. We thus arrive at a crucial conclusion that the space volume is preserved during conservative dynamics
$$\frac{\partial \rho(p,q,t)}{\partial t} + \sum_i \Big [ \frac{\partial \rho}{\partial q_i}\dot{q_i}+\frac{\partial \rho}{\partial p_i} \dot{p_i} \Big]=\frac{d \rho}{ dt} = 0$$
Furthermore we see that the time dependence of phase space probability density vanishes if it is a function of hamiltonian $\rho = f(H)$
$$\frac{\partial \rho}{\partial t} = \sum_i \Big [ \frac{\partial \rho}{\partial q_i}\cdot{q_i}+\frac{\partial \rho}{\partial p_i}\cdot{p_i} \Big] = -\{H,\rho \}$$
### Liuville theorem illustrated
According to Liuvile's theorem small phase space area element under time evolution gets deform but preserves the volume. For example, assume the initial distribution is a rectangle in phase space (x , v)
$${x 0 − d x ≤ x ≤ x 0 + d x}$$
$$ {v 0 − d v ≤ v ≤ v 0 + d v } $$
As time progresses this rectangle will deform, but its area will not change (assuming $d x$ and $d v$ are sufficiently small which ensure energy conservation)
```
import matplotlib.pyplot as plt
import numpy as np
import scipy as sci
from matplotlib.patches import Polygon # for making rectangles from four points
a = 1.0 # acceleration
x0, v0 = 0., 0. # center of initial phase space element
dx, dv = 0.1, 0.1 # (half of) width of initial phase space element
p0 = np.array(((x0-dx,v0-dv),(x0-dx,v0+dv),(x0+dx,v0+dv),(x0+dx,v0-dv))) # initial phase space element
def propagate(p0, t):
"""Propagates a phase space patch p0 for time t."""
x0, v0 = p0.T
x = x0 + v0*t + 0.5*a*t**2
v = v0 + a*t
return np.column_stack((x,v))
fig, ax = plt.subplots(figsize=(9,3))
for t in np.arange(4):
p = propagate(p0,t)
x, y = np.mean(p,axis=0)
ax.add_patch(Polygon(p))
ax.text(x, y-0.3, f"t={t}")
ax.set_xlabel("Position x", fontsize=15)
ax.set_ylabel("Velocity v", fontsize=15)
ax.set_xlim(-0.5,5.5)
ax.set_ylim(-0.5,3.5)
```
### Hamiltonian, conservative dynamics in phase space
```
# range of x and y grid
xmax = 5
ymax = 5
# make a grid of x and y values, Y = dot X
X, Y = np.meshgrid(np.arange(-xmax,xmax,.1), np.arange(-ymax,ymax,.1) )
H = 0.5*Y*Y +0.5*X*X #here is the Hamiltonian
#cs = plt.contour(X,Y,H,20,cmap='inferno')
#plt.clabel(cs,inline=1,fontsize=10)
plt.xlabel('q')
plt.ylabel('dq/dt')
plt.axis([-1.1*xmax, 1.1*xmax, -1.1*ymax, 1.1*ymax])
# Hamilton's equations define a vector field U,V
U = Y
V = - X
Q = plt.streamplot(X,Y, U, V,density=1)
# range of x and y grid
xmax = np.pi*2.0
ymax = 2
# make a grid of x and y values, Y = dot X
X, Y = np.meshgrid(np.arange(-xmax,xmax,.1),np.arange(-ymax,ymax,.1) )
epsilon=0.3
H = 0.5*Y*Y - epsilon*np.cos(X) #here is the Hamiltonian
# Hamilton's equations define a vector field U,V
U = Y
V = -epsilon*np.sin(X)
#cs =plt.contour(X,Y,H,10,cmap='inferno')
#plt.clabel(cs,inline=1,fontsize=10)
plt.xlabel('x')
plt.ylabel('dx/dt')
plt.axis([-xmax, xmax, -ymax, ymax])
Q = plt.streamplot(X,Y, U, V,density=1) # plot the vector field
```
| github_jupyter |
# Accessing the Youtube API
This Notebook explores convenience functions for accessing the Youtube API.
Writen by Leon Yin and Megan Brown
```
import os
import sys
import json
import datetime
import pandas as pd
# this is to import youtube_api from the py directory
sys.path.append(os.path.abspath('../'))
import youtube_api
from youtube_api import youtube_api_utils as utils
from runtimestamp.runtimestamp import runtimestamp
runtimestamp()
key = os.environ.get('YT_KEY')
```
We can go from Username to `channel_id`. `channel_id` is required to get uploaded videos, user metadata, and relationships like subscriptions and featured channels.
```
yt = youtube_api.YoutubeDataApi(key)
yt.get_channel_id_from_user('munchies')
channel_id = 'UCaLfMkkHhSA_LaCta0BzyhQ'
```
We can collect channel-level metrics and metadata:
```
channel_meta = yt.get_channel_metadata(channel_id)
channel_meta
```
Note that `topic_ids` is a json serialized list.
```
channel_meta['topic_ids']
```
Note for some API calls that require a "playlist ID", you need to use the playlist id (from uploads or likes) rather than the `channel_id`.
```
playlist_id = channel_meta['playlist_id_uploads']
playlist_id
channel_id == playlist_id
```
For user uploads the channel ID's first two letters are replaced with "UU" (User Upload) and "LL" (Likes), <br>these relationships are captured in two helper-functions.<br> `yt.get_upload_playlist_id()` and `yt.get_liked_playlist_id()`
```
utils.get_upload_playlist_id(channel_id)
```
We can use the following function to get all the video IDs from any playlist id.<br>
`cutoff_date` can be used to filter out videos after a certain date and `stop_after_n_iterations` can be used for testing to return the first N * 50 video ids.
```
video_ids = yt.get_videos_from_playlist_id(playlist_id, published_after=datetime.datetime(2017,1,1))
df = pd.DataFrame(video_ids)
df.head()
```
Let's look at the data we can collect on a video level...
```
video_id = df['video_id'].tolist()
video_id[:2]
yt.get_video_metadata(video_id[0])
```
The function also works for a list of up to 50 video ids
```
video_meta = yt.get_video_metadata(video_id)
len(video_id)
df_video_meta = pd.DataFrame(video_meta)
df_video_meta.head(2)
```
For establishing relationships you can list featured channels and subscriptions
```
yt.get_featured_channels(channel_id)
```
You can save on time by using a list of inputs for some api calls:
```
channel_ids = ['UCaLfMkkHhSA_LaCta0BzyhQ', 'UC6MFZAOHXlKK1FI7V0XQVeA']
yt.get_featured_channels(channel_ids)
```
Subscriptions can only done one channel at a time:
```
yt.get_subscriptions(channel_id)
```
Subscriptions can be more descriptive by setting the `descriptive` flag as True.
```
yt.get_subscriptions(channel_id)[:2]
```
You can also get the comments for a given video
```
yt.get_video_comments(video_id[0], max_results=2)
```
For more text we can get closed captions!
```
video_id = 'wmxDZeh8W34'
captions = yt.get_captions(video_id)
captions
```
You can also get the recommended videos for any given video
```
recommended_vids = yt.get_recommended_videos(video_id)
recommended_vids[:2]
```
| github_jupyter |
```
import h5py
import numpy as np
from sklearn import model_selection
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, UpSampling2D, MaxPooling2D, AveragePooling2D, Attention
from tensorflow.keras.layers import ELU, BatchNormalization, Reshape, Concatenate, Dropout, Add, Multiply
from utils import SeizureState, setup_tf, AttentionPooling, BiasedConv
val_path = 'PATH_TO_DATASET.h5'
saved_predictions = 'PATH_TO_PREDICTIONS.h5'
network_path = 'PATH_TO_NETWORK_WEIGHTS.h5'
fs = 200
n_channels = 18
seizure = 'seiz'
background = 'bckg'
setup_tf()
with h5py.File(val_path, 'r') as f:
file_names_test = []
signals_test = []
file_names_ds = f['filenames']
signals_ds = f['signals']
for i in range(len(signals_ds)):
file_names_test.append(file_names_ds[i])
data = np.asarray(np.vstack(signals_ds[i]).T, dtype=np.float32)
mean = np.mean(data, axis=0)
std = np.std(data, axis=0)
signals_test.append((data-mean)/(std+1e-8))
```
# Seizure detection
### Building U-Net
```
n_filters = 8
input_seq = Input(shape=(None, n_channels, 1))
x = Conv2D(filters=n_filters, kernel_size=(15, 1), strides=(1, 1), padding='same', activation=None)(input_seq)
x = BatchNormalization()(x)
lvl0 = ELU()(x)
x = MaxPooling2D(pool_size=(4, 1), padding='same')(lvl0)
x = Conv2D(filters=2*n_filters, kernel_size=(15, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
lvl1 = ELU()(x)
x = MaxPooling2D(pool_size=(4, 1), padding='same')(lvl1)
x = Conv2D(filters=4*n_filters, kernel_size=(15, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
lvl2 = ELU()(x)
x = MaxPooling2D(pool_size=(4, 1), padding='same')(lvl2)
x = Conv2D(filters=4*n_filters, kernel_size=(7, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
lvl3 = ELU()(x)
x = MaxPooling2D(pool_size=(4, 1), padding='same')(lvl3)
x = Conv2D(filters=8*n_filters, kernel_size=(3, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
lvl4 = ELU()(x)
x = MaxPooling2D(pool_size=(4, 1), padding='same')(lvl4)
x = Conv2D(filters=8*n_filters, kernel_size=(3, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
lvl5 = x
x = MaxPooling2D(pool_size=(1, 20), padding='same')(lvl5)
x = Conv2D(filters=4*n_filters, kernel_size=(3, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
x = Dropout(rate=0.5)(x)
x = Conv2D(filters=4*n_filters, kernel_size=(3, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
x = Dropout(rate=0.5)(x)
up4 = UpSampling2D(size=(4, 1))(x)
att4 = AttentionPooling(filters=4*n_filters, channels=n_channels)([up4, lvl4])
x = Concatenate(axis=-1)([up4, att4])
x = Conv2D(filters=4*n_filters, kernel_size=(3, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
up3 = UpSampling2D(size=(4, 1))(x)
att3 = AttentionPooling(filters=4*n_filters, channels=n_channels)([up3, lvl3])
x = Concatenate(axis=-1)([up3, att3])
x = Conv2D(filters=4*n_filters, kernel_size=(7, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
up2 = UpSampling2D(size=(4, 1))(x)
att2 = AttentionPooling(filters=4*n_filters, channels=n_channels)([up2, lvl2])
x = Concatenate(axis=-1)([up2, att2])
x = Conv2D(filters=4*n_filters, kernel_size=(15, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
up1 = UpSampling2D(size=(4, 1))(x)
att1 = AttentionPooling(filters=4*n_filters, channels=n_channels)([up1, lvl1])
x = Concatenate(axis=-1)([up1, att1])
x = Conv2D(filters=4*n_filters, kernel_size=(15, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
up0 = UpSampling2D(size=(4, 1))(x)
att0 = AttentionPooling(filters=4*n_filters, channels=n_channels)([up0, lvl0])
x = Concatenate(axis=-1)([up0, att0])
x = Conv2D(filters=4*n_filters, kernel_size=(15, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
x = Conv2D(filters=4*n_filters, kernel_size=(15, 1), strides=(1, 1), padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = ELU()(x)
output = Conv2D(filters=1, kernel_size=(15, 1), strides=(1, 1), padding='same', activation='sigmoid')(x)
unet = Model(input_seq, output)
unet.load_weights(network_path)
unet.summary()
```
### Prediction step
```
y_probas = []
reduction = 4096//4
with tf.device('cpu:0'):
for signal in signals_test:
signal = signal[:len(signal)//reduction*reduction, :]
prediction = unet.predict(signal[np.newaxis, :, :, np.newaxis])[0, :, 0, 0]
y_probas.append(prediction)
```
# Saving predictions
```
dt_fl = h5py.vlen_dtype(np.dtype('float32'))
dt_str = h5py.special_dtype(vlen=str)
with h5py.File(saved_predictions, 'w') as f:
dset_signals = f.create_dataset('signals', (len(file_names_test),), dtype=dt_fl)
dset_file_names = f.create_dataset('filenames', (len(file_names_test),), dtype=dt_str)
for i in range(len(file_names_test)):
dset_signals[i] = y_probas[i]
dset_file_names[i] = file_names_test[i]
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Recurrent Neural Networks (RNN) with Keras
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/keras/rnn">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/keras/rnn.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/keras/rnn.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/keras/rnn.ipynb">
<img src="https://www.tensorflow.org/images/download_logo_32px.png" />
Download notebook</a>
</td>
</table>
Recurrent neural networks (RNN) are a class of neural networks that is powerful for modeling sequence data such as time series or natural language.
Schematically, a RNN layer uses a `for` loop to iterate over the timesteps of a sequence, while maintaining an internal state that encodes information about the timesteps it has seen so far.
The Keras RNN API is designed with a focus on:
- **Ease of use**: the built-in `tf.keras.layers.RNN`, `tf.keras.layers.LSTM`, `tf.keras.layers.GRU` layers enable you to quickly build recurrent models without having to make difficult configuration choices.
- **Ease of customization**: You can also define your own RNN cell layer (the inner part of the `for` loop) with custom behavior, and use it with the generic `tf.keras.layers.RNN` layer (the `for` loop itself). This allows you to quickly prototype different research ideas in a flexible way with minimal code.
## Setup
```
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import matplotlib.pyplot as plt
import numpy as np
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras import layers
```
## Build a simple model
There are three built-in RNN layers in Keras:
1. `tf.keras.layers.SimpleRNN`, a fully-connected RNN where the output from previous timestep is to be fed to next timestep.
2. `tf.keras.layers.GRU`, first proposed in [Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation](https://arxiv.org/abs/1406.1078).
3. `tf.keras.layers.LSTM`, first proposed in [Long Short-Term Memory](https://www.bioinf.jku.at/publications/older/2604.pdf).
In early 2015, Keras had the first reusable open-source Python implementations of LSTM and GRU.
Here is a simple example of a `Sequential` model that processes sequences of integers, embeds each integer into a 64-dimensional vector, then processes the sequence of vectors using a `LSTM` layer.
```
model = tf.keras.Sequential()
# Add an Embedding layer expecting input vocab of size 1000, and
# output embedding dimension of size 64.
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# Add a LSTM layer with 128 internal units.
model.add(layers.LSTM(128))
# Add a Dense layer with 10 units and softmax activation.
model.add(layers.Dense(10, activation='softmax'))
model.summary()
```
## Outputs and states
By default, the output of a RNN layer contain a single vector per sample. This vector is the RNN cell output corresponding to the last timestep, containing information about the entire input sequence. The shape of this output is `(batch_size, units)` where `units` corresponds to the `units` argument passed to the layer's constructor.
A RNN layer can also return the entire sequence of outputs for each sample (one vector per timestep per sample), if you set `return_sequences=True`. The shape of this output is `(batch_size, timesteps, units)`.
```
model = tf.keras.Sequential()
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256)
model.add(layers.GRU(256, return_sequences=True))
# The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128)
model.add(layers.SimpleRNN(128))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
```
In addition, a RNN layer can return its final internal state(s). The returned states can be used to resume the RNN execution later, or [to initialize another RNN](https://arxiv.org/abs/1409.3215). This setting is commonly used in the encoder-decoder sequence-to-sequence model, where the encoder final state is used as the initial state of the decoder.
To configure a RNN layer to return its internal state, set the `return_state` parameter to `True` when creating the layer. Note that `LSTM` has 2 state tensors, but `GRU` only has one.
To configure the initial state of the layer, just call the layer with additional keyword argument `initial_state`.
Note that the shape of the state needs to match the unit size of the layer, like in the example below.
```
encoder_vocab = 1000
decoder_vocab = 2000
encoder_input = layers.Input(shape=(None, ))
encoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)(encoder_input)
# Return states in addition to output
output, state_h, state_c = layers.LSTM(
64, return_state=True, name='encoder')(encoder_embedded)
encoder_state = [state_h, state_c]
decoder_input = layers.Input(shape=(None, ))
decoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)(decoder_input)
# Pass the 2 states to a new LSTM layer, as initial state
decoder_output = layers.LSTM(
64, name='decoder')(decoder_embedded, initial_state=encoder_state)
output = layers.Dense(10, activation='softmax')(decoder_output)
model = tf.keras.Model([encoder_input, decoder_input], output)
model.summary()
```
## RNN layers and RNN cells
In addition to the built-in RNN layers, the RNN API also provides cell-level APIs. Unlike RNN layers, which processes whole batches of input sequences, the RNN cell only processes a single timestep.
The cell is the inside of the `for` loop of a RNN layer. Wrapping a cell inside a `tf.keras.layers.RNN` layer gives you a layer capable of processing batches of sequences, e.g. `RNN(LSTMCell(10))`.
Mathemetically, `RNN(LSTMCell(10))` produces the same result as `LSTM(10)`. In fact, the implementation of this layer in TF v1.x was just creating the corresponding RNN cell and wrapping it in a RNN layer. However using the built-in `GRU` and `LSTM` layers enables the use of CuDNN and you may see better performance.
There are three built-in RNN cells, each of them corresponding to the matching RNN layer.
- `tf.keras.layers.SimpleRNNCell` corresponds to the `SimpleRNN` layer.
- `tf.keras.layers.GRUCell` corresponds to the `GRU` layer.
- `tf.keras.layers.LSTMCell` corresponds to the `LSTM` layer.
The cell abstraction, together with the generic `tf.keras.layers.RNN` class, make it very easy to implement custom RNN architectures for your research.
## Cross-batch statefulness
When processing very long sequences (possibly infinite), you may want to use the pattern of **cross-batch statefulness**.
Normally, the internal state of a RNN layer is reset every time it sees a new batch (i.e. every sample seen by the layer is assume to be independent from the past). The layer will only maintain a state while processing a given sample.
If you have very long sequences though, it is useful to break them into shorter sequences, and to feed these shorter sequences sequentially into a RNN layer without resetting the layer's state. That way, the layer can retain information about the entirety of the sequence, even though it's only seeing one sub-sequence at a time.
You can do this by setting `stateful=True` in the constructor.
If you have a sequence `s = [t0, t1, ... t1546, t1547]`, you woud split it into e.g.
```
s1 = [t0, t1, ... t100]
s2 = [t101, ... t201]
...
s16 = [t1501, ... t1547]
```
Then you would process it via:
```python
lstm_layer = layers.LSTM(64, stateful=True)
for s in sub_sequences:
output = lstm_layer(s)
```
When you want to clear the state, you can use `layer.reset_states()`.
> Note: In this setup, sample `i` in a given batch is assumed to be the continuation of sample `i` in the previous batch. This means that all batches should contain the same number of samples (batch size). E.g. if a batch contains `[sequence_A_from_t0_to_t100, sequence_B_from_t0_to_t100]`, the next batch should contain `[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200]`.
Here is a complete example:
```
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
output = lstm_layer(paragraph3)
# reset_states() will reset the cached state to the original initial_state.
# If no initial_state was provided, zero-states will be used by default.
lstm_layer.reset_states()
```
##Bidirectional RNNs
For sequences other than time series (e.g. text), it is often the case that a RNN model can perform better if it not only processes sequence from start to end, but also backwards. For example, to predict the next word in a sentence, it is often useful to have the context around the word, not only just the words that come before it.
Keras provides an easy API for you to build such bidirectional RNNs: the `tf.keras.layers.Bidirectional` wrapper.
```
model = tf.keras.Sequential()
model.add(layers.Bidirectional(layers.LSTM(64, return_sequences=True),
input_shape=(5, 10)))
model.add(layers.Bidirectional(layers.LSTM(32)))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
```
Under the hood, `Bidirectional` will copy the RNN layer passed in, and flip the `go_backwards` field of the newly copied layer, so that it will process the inputs in reverse order.
The output of the `Bidirectional` RNN will be, by default, the sum of the forward layer output and the backward layer output. If you need a different merging behavior, e.g. concatenation, change the `merge_mode` parameter in the `Bidirectional` wrapper constructor. For more details about `Bidirectional`, please check [the API docs](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Bidirectional).
## Performance optimization and CuDNN kernels in TensorFlow 2.0
In Tensorflow 2.0, the built-in LSTM and GRU layers have been updated to leverage CuDNN kernels by default when a GPU is available. With this change, the prior `keras.layers.CuDNNLSTM/CuDNNGRU` layers have been deprecated, and you can build your model without worrying about the hardware it will run on.
Since the CuDNN kernel is built with certain assumptions, this means the layer **will not be able to use the CuDNN kernel if you change the defaults of the built-in LSTM or GRU layers**. E.g.:
- Changing the `activation` function from `tanh` to something else.
- Changing the `recurrent_activation` function from `sigmoid` to something else.
- Using `recurrent_dropout` > 0.
- Setting `unroll` to True, which forces LSTM/GRU to decompose the inner `tf.while_loop` into an unrolled `for` loop.
- Setting `use_bias` to False.
- Using masking when the input data is not strictly right padded (if the mask corresponds to strictly right padded data, CuDNN can still be used. This is the most common case).
For the detailed list of contraints, please see the documentation for the [LSTM](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM) and [GRU](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/GRU) layers.
### Using CuDNN kernels when available
Let's build a simple LSTM model to demonstrate the performance difference.
We'll use as input sequences the sequence of rows of MNIST digits (treating each row of pixels as a timestep), and we'll predict the digit's label.
```
batch_size = 64
# Each MNIST image batch is a tensor of shape (batch_size, 28, 28).
# Each input sequence will be of size (28, 28) (height is treated like time).
input_dim = 28
units = 64
output_size = 10 # labels are from 0 to 9
# Build the RNN model
def build_model(allow_cudnn_kernel=True):
# CuDNN is only available at the layer level, and not at the cell level.
# This means `LSTM(units)` will use the CuDNN kernel,
# while RNN(LSTMCell(units)) will run on non-CuDNN kernel.
if allow_cudnn_kernel:
# The LSTM layer with default options uses CuDNN.
lstm_layer = tf.keras.layers.LSTM(units, input_shape=(None, input_dim))
else:
# Wrapping a LSTMCell in a RNN layer will not use CuDNN.
lstm_layer = tf.keras.layers.RNN(
tf.keras.layers.LSTMCell(units),
input_shape=(None, input_dim))
model = tf.keras.models.Sequential([
lstm_layer,
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(output_size, activation='softmax')]
)
return model
```
### Load MNIST dataset
```
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
sample, sample_label = x_train[0], y_train[0]
```
### Create a model instance and compile it
We choose `sparse_categorical_crossentropy` as the loss function for the model. The output of the model has shape of `[batch_size, 10]`. The target for the model is a integer vector, each of the integer is in the range of 0 to 9.
```
model = build_model(allow_cudnn_kernel=True)
model.compile(loss='sparse_categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(x_train, y_train,
validation_data=(x_test, y_test),
batch_size=batch_size,
epochs=5)
```
### Build a new model without CuDNN kernel
```
slow_model = build_model(allow_cudnn_kernel=False)
slow_model.set_weights(model.get_weights())
slow_model.compile(loss='sparse_categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
slow_model.fit(x_train, y_train,
validation_data=(x_test, y_test),
batch_size=batch_size,
epochs=1) # We only train for one epoch because it's slower.
```
As you can see, the model built with CuDNN is much faster to train compared to the model that use the regular TensorFlow kernel.
The same CuDNN-enabled model can also be use to run inference in a CPU-only environment. The `tf.device` annotation below is just forcing the device placement. The model will run on CPU by default if no GPU is available.
You simply don't have to worry about the hardware you're running on anymore. Isn't that pretty cool?
```
with tf.device('CPU:0'):
cpu_model = build_model(allow_cudnn_kernel=True)
cpu_model.set_weights(model.get_weights())
result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1)
print('Predicted result is: %s, target result is: %s' % (result.numpy(), sample_label))
plt.imshow(sample, cmap=plt.get_cmap('gray'))
```
## RNNs with list/dict inputs, or nested inputs
Nested structures allow implementers to include more information within a single timestep. For example, a video frame could have audio and video input at the same time. The data shape in this case could be:
`[batch, timestep, {"video": [height, width, channel], "audio": [frequency]}]`
In another example, handwriting data could have both coordinates x and y for the current position of the pen, as well as pressure information. So the data representation could be:
`[batch, timestep, {"location": [x, y], "pressure": [force]}]`
The following code provides an example of how to build a custom RNN cell that accepts such structured inputs.
### Define a custom cell that support nested input/output
```
NestedInput = collections.namedtuple('NestedInput', ['feature1', 'feature2'])
NestedState = collections.namedtuple('NestedState', ['state1', 'state2'])
class NestedCell(tf.keras.layers.Layer):
def __init__(self, unit_1, unit_2, unit_3, **kwargs):
self.unit_1 = unit_1
self.unit_2 = unit_2
self.unit_3 = unit_3
self.state_size = NestedState(state1=unit_1,
state2=tf.TensorShape([unit_2, unit_3]))
self.output_size = (unit_1, tf.TensorShape([unit_2, unit_3]))
super(NestedCell, self).__init__(**kwargs)
def build(self, input_shapes):
# expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]
input_1 = input_shapes.feature1[1]
input_2, input_3 = input_shapes.feature2[1:]
self.kernel_1 = self.add_weight(
shape=(input_1, self.unit_1), initializer='uniform', name='kernel_1')
self.kernel_2_3 = self.add_weight(
shape=(input_2, input_3, self.unit_2, self.unit_3),
initializer='uniform',
name='kernel_2_3')
def call(self, inputs, states):
# inputs should be in [(batch, input_1), (batch, input_2, input_3)]
# state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]
input_1, input_2 = tf.nest.flatten(inputs)
s1, s2 = states
output_1 = tf.matmul(input_1, self.kernel_1)
output_2_3 = tf.einsum('bij,ijkl->bkl', input_2, self.kernel_2_3)
state_1 = s1 + output_1
state_2_3 = s2 + output_2_3
output = [output_1, output_2_3]
new_states = NestedState(state1=state_1, state2=state_2_3)
return output, new_states
```
### Build a RNN model with nested input/output
Let's build a Keras model that uses a `tf.keras.layers.RNN` layer and the custom cell we just defined.
```
unit_1 = 10
unit_2 = 20
unit_3 = 30
input_1 = 32
input_2 = 64
input_3 = 32
batch_size = 64
num_batch = 100
timestep = 50
cell = NestedCell(unit_1, unit_2, unit_3)
rnn = tf.keras.layers.RNN(cell)
inp_1 = tf.keras.Input((None, input_1))
inp_2 = tf.keras.Input((None, input_2, input_3))
outputs = rnn(NestedInput(feature1=inp_1, feature2=inp_2))
model = tf.keras.models.Model([inp_1, inp_2], outputs)
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
```
### Train the model with randomly generated data
Since there isn't a good candidate dataset for this model, we use random Numpy data for demonstration.
```
input_1_data = np.random.random((batch_size * num_batch, timestep, input_1))
input_2_data = np.random.random((batch_size * num_batch, timestep, input_2, input_3))
target_1_data = np.random.random((batch_size * num_batch, unit_1))
target_2_data = np.random.random((batch_size * num_batch, unit_2, unit_3))
input_data = [input_1_data, input_2_data]
target_data = [target_1_data, target_2_data]
model.fit(input_data, target_data, batch_size=batch_size)
```
With the Keras `tf.keras.layers.RNN` layer, You are only expected to define the math logic for individual step within the sequence, and the `tf.keras.layers.RNN` layer will handle the sequence iteration for you. It's an incredibly powerful way to quickly prototype new kinds of RNNs (e.g. a LSTM variant).
For more details, please visit the [API docs](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/RNN).
| github_jupyter |
# Daqss API Tutorial
## Overview
An application program interface (API) is a set of routines, protocols, and tools for building software applications. A good APi makes it easier to develop a program by providing the building blocks. A programmer then puts those blocks together.
The Daqss API provides an easy way to read and write data to/from the remote server. It is protected by key/password combinations and allows Users read-only access. The following describes how to use the Python wrapper that is built on top of the Daqss API.
## Contents
1. [Installation](#installation)
2. [API Credentials](#api-credentials)
3. [API Endpoints](#api-endpoints)
4. [Using the Python API Wrapper](#how-to-use)
<a id="#installation"></a>
## Installation
To install the python-daqss python API wrapper, download from the Github repository here, or clone the repository to your computer.
### Download and unzip
>>> wget https://github.com/dhhagan/python-daqss/archive/master.zip
>>> unzip master.zip
>>> cd python-daqss/
>>> sudo python setup.py install
### Install via git
>>> git clone https://github.com/dhhagan/python-daqss.git
>>> cd python-daqss/
>>> sudo python setup.py install
You should now have the python API wrapper installed on your computer.
<a id="#api-credentials"></a>
## API Credentials
The Daqss API is protected using a key, password system that prevents unwanted people from injecting data into the database, or deleting entries. In order to use the API to communicate with the database, you must first request your own set of API credentials.
As of right now, you must email david@davidhhagan.com for API credentials!
## API Endpoints
API endpoints at their most basic level, share a common structure; essentially, you send a url that is formatted in a specific way, accompanied by your credentials, and recieve data (or an error!) in return in [json](http://json.org/) format. All of the available API endpoints are layed out in detail in the [full documentation](#).
For example, say we wanted to request the data for one of the alphasense gas sensors. The documentation looks something like this:
### Resource URL
* **GET api/< api-version >/alphasense/< sn >/data/date/< basedate >/< period >**
* **GET api/< api-version >/alphasense/< sn >/data/date/< basedate >/< enddate >**
| api-version | The API version. Currently 1.0 |
|:-|:-|
| sn | The serial number for the alphasense sensor of interest|
| basedate | The beginning date for the data you want to recieve in the format **YYYY-MM-DD** |
| period | The period (length) for which you want data beginning at the basedate. One of **1d, 2w, 4m, 1y** |
| enddate | The ending date for the data you want to recieve in the format **YYYY-MM-DD** |
<br />
Thus, once you build the resource url, you might end up with something like this: ** api/v1.0/alphasense/A123/data/date/2015-01-01/1w**
## Using the python API wrapper
Luckily, a python wrapper has been written around the Daqss API to make things easy for everyone!
```
import daqss
from daqss import Daqss
import pandas as pd
key = 'DHDNAG3E43AV'
pswd = 'S9L5NNDLKAJU'
print ("daqss library version: {0}".format(daqss.__version__))
```
### Set up the class instance of Daqss
```
# Set up an instance of the class Daqss with keyword arguments key and password
api = Daqss(key, pswd)
# Ping to see if you have a connection
api.ping()
```
### Your first API call
To begin, let's try grabbing all of the nodes from the database. Instead of worrying about the API endpoint, we can just call `api.get_nodes()` and you will get what you desire!
Everything is in json format, so you will need to learn how to deal with that..don't worry, it's easy!
```
status, req = api.get_nodes()
print ("The Status Code: {0}".format(status))
print ("The response: \n")
req['nodes']
```
### Making the data more usable
Let's take advantage of the amazing library [pandas]() and DataFrame's so we can easily manipulate the data and export to csv if we want to.
We will do so by reading the `nodes` portion of the json response into a pandas DataFrame. All of the column names and everything should be automatically set.
```
# Read into a DataFrame
df = pd.DataFrame.from_dict(req['nodes'])
# Print the info
df.info()
```
### Exporting
Now we are going to export this data to a csv for whatever reason!
```
df.to_csv("Nodes.csv")
```
## Getting the Data!
Now, let's download some data and export it. We are going to download one months worth of data for the alphasense sensor associated with Node 3. From the config file, we know that the SN for this O3 sensor is **203490202**. According to the documentation for the API wrapper, we can get the alphasense data using the following api call:
**get_alphasense_data(self, sn, basedate = None, enddate = None, period = None)**
Let's grab the O3 data for the time period of 1 month beginning on May 1, 2015.
```
status, data = api.get_alphasense_data('203490202', basedate = '2015-05-01', period='6m')
# read into a dataframe
df = pd.DataFrame.from_dict(data['data'])
# Re-index so we have a nice datetime index to work with
df.index = pd.to_datetime(df['timestamp'])
# Export to csv!
df.to_csv("O3 Node 3 May 2015.csv")
# Print the info
df.info()
status, data = api.get_rht_data('RHT001', basedate = '2015-05-01', period = '1w')
# read into a dataframe
df = pd.DataFrame.from_dict(data['data'])
# Re-index so we have a nice datetime index to work with
df.index = pd.to_datetime(df['timestamp'])
# Print the info
df.info()
```
| github_jupyter |
# Interpret Models
You can use Azure Machine Learning to interpret a model by using an *explainer* that quantifies the amount of influence each feature contribues to the predicted label. There are many common explainers, each suitable for different kinds of modeling algorithm; but the basic approach to using them is the same.
## Install SDK packages
Let's start by ensuring that you have the latest version of the Azure ML SDK installed, including the *explain* optional package. In addition, you'll install the Azure ML Interpretability library. You can use this to interpret many typical kinds of model, even if they haven't been trained in an Azure ML experiment or registered in an Azure ML workspace.
```
!pip install --upgrade azureml-sdk[notebooks,explain]
!pip install --upgrade azureml-interpret
```
## Explain a model
Let's start with a model that is trained outside of Azure Machine Learning - Run the cell below to train a decision tree classification model.
```
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# load the diabetes dataset
print("Loading Data...")
data = pd.read_csv('data/diabetes.csv')
# Separate features and labels
features = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']
labels = ['not-diabetic', 'diabetic']
X, y = data[features].values, data['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a decision tree model
print('Training a decision tree model')
model = DecisionTreeClassifier().fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
print('Model trained.')
```
The training process generated some model evaluation metrics based on a hold-back validation dataset, so you have an idea of how accurately it predicts; but how do the features in the data influence the prediction?
### Get an explainer for the model
Let's get a suitable explainer for the model from the Azure ML interpretability library you installed earlier. There are many kinds of explainer. In this example you'll use a *Tabular Explainer*, which is a "black box" explainer that can be used to explain many kinds of model by invoking an appropriate [SHAP](https://github.com/slundberg/shap) model explainer.
```
from interpret.ext.blackbox import TabularExplainer
# "features" and "classes" fields are optional
tab_explainer = TabularExplainer(model,
X_train,
features=features,
classes=labels)
print(tab_explainer, "ready!")
```
### Get *global* feature importance
The first thing to do is try to explain the model by evaluating the overall *feature importance* - in other words, quantifying the extent to which each feature influences the prediction based on the whole training dataset.
```
# you can use the training data or the test data here
global_tab_explanation = tab_explainer.explain_global(X_train)
# Get the top features by importance
global_tab_feature_importance = global_tab_explanation.get_feature_importance_dict()
for feature, importance in global_tab_feature_importance.items():
print(feature,":", importance)
```
The feature importance is ranked, with the most important feature listed first.
### Get *local* feature importance
So you have an overall view, but what about explaining individual observations? Let's generate *local* explanations for individual predictions, quantifying the extent to which each feature influenced the decision to predict each of the possible label values. In this case, it's a binary model, so there are two possible labels (non-diabetic and diabetic); and you can quantify the influence of each feature for each of these label values for individual observations in a dataset. You'll just evaluate the first two cases in the test dataset.
```
# Get the observations we want to explain (the first two)
X_explain = X_test[0:2]
# Get predictions
predictions = model.predict(X_explain)
# Get local explanations
local_tab_explanation = tab_explainer.explain_local(X_explain)
# Get feature names and importance for each possible label
local_tab_features = local_tab_explanation.get_ranked_local_names()
local_tab_importance = local_tab_explanation.get_ranked_local_values()
for l in range(len(local_tab_features)):
print('Support for', labels[l])
label = local_tab_features[l]
for o in range(len(label)):
print("\tObservation", o + 1)
feature_list = label[o]
total_support = 0
for f in range(len(feature_list)):
print("\t\t", feature_list[f], ':', local_tab_importance[l][o][f])
total_support += local_tab_importance[l][o][f]
print("\t\t ----------\n\t\t Total:", total_support, "Prediction:", labels[predictions[o]])
```
## Adding explainability to a model training experiment
As you've seen, you can generate explanations for models trained outside of Azure Machine Learning; but when you use experiments to train and register models in your Azure Machine Learning workspace, you can generate model explanations and log them.
Run the code in the following cell to connect to your workspace.
> **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.
```
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
```
### Train and explain a model using an experiment
OK, let's create an experiment and put the files it needs in a local folder - in this case we'll just use the same CSV file of diabetes data to train the model.
```
import os, shutil
from azureml.core import Experiment
# Create a folder for the experiment files
experiment_folder = 'diabetes_train_and_explain'
os.makedirs(experiment_folder, exist_ok=True)
# Copy the data file into the experiment folder
shutil.copy('data/diabetes.csv', os.path.join(experiment_folder, "diabetes.csv"))
```
Now we'll create a training script that looks similar to any other Azure ML training script except that is includes the following features:
- The same libraries to generate model explanations we used before are imported and used to generate a global explanation
- The **ExplanationClient** library is used to upload the explanation to the experiment output
```
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Import Azure ML run library
from azureml.core.run import Run
# Import libraries for model explanation
from azureml.interpret import ExplanationClient
from interpret.ext.blackbox import TabularExplainer
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
print("Loading Data...")
data = pd.read_csv('diabetes.csv')
features = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']
labels = ['not-diabetic', 'diabetic']
# Separate features and labels
X, y = data[features].values, data['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a decision tree model
print('Training a decision tree model')
model = DecisionTreeClassifier().fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes.pkl')
# Get explanation
explainer = TabularExplainer(model, X_train, features=features, classes=labels)
explanation = explainer.explain_global(X_test)
# Get an Explanation Client and upload the explanation
explain_client = ExplanationClient.from_run(run)
explain_client.upload_model_explanation(explanation, comment='Tabular Explanation')
# Complete the run
run.complete()
```
Now you can run the experiment. Note that the **azureml-interpret** library is included in the training environment so the script can create a **TabularExplainer** and use the **ExplainerClient** class.
```
from azureml.core import Experiment, ScriptRunConfig, Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.widgets import RunDetails
# Create a Python environment for the experiment
explain_env = Environment("explain-env")
# Create a set of package dependencies (including the azureml-interpret package)
packages = CondaDependencies.create(conda_packages=['scikit-learn','pandas','pip'],
pip_packages=['azureml-defaults','azureml-interpret'])
explain_env.python.conda_dependencies = packages
# Create a script config
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='diabetes_training.py',
environment=explain_env)
# submit the experiment
experiment_name = 'diabetes_train_and_explain'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
```
## Retrieve the feature importance values
With the experiment run completed, you can use the **ExplanationClient** class to retrieve the feature importance from the explanation registered for the run.
```
from azureml.interpret import ExplanationClient
# Get the feature explanations
client = ExplanationClient.from_run(run)
engineered_explanations = client.download_model_explanation()
feature_importances = engineered_explanations.get_feature_importance_dict()
# Overall feature importance
print('Feature\tImportance')
for key, value in feature_importances.items():
print(key, '\t', value)
```
## View the model explanation in Azure Machine Learning studio
You can also click the **View run details** link in the Run Details widget to see the run in Azure Machine Learning studio, and view the **Explanations** tab. Then:
1. Select the **Tabular Explanation** explainer.
2. View the **Global Importance** chart, which shows the overall global feature importance.
3. View the **Summary Importance** chart, which shows each data point from the test data in a *swarm*, *violin*, or *box* plot.
4. Select an individual point to see the **Local Feature Importance** for the individual prediction for the selected data point.
**More Information**: For more information about using explainers in Azure ML, see [the documentation](https://docs.microsoft.com/azure/machine-learning/how-to-machine-learning-interpretability).
| github_jupyter |
## Pipelines
Pipeline can be used to chain multiple estimators into one. This is useful as there is often a fixed sequence of steps in processing the data, for example feature selection, normalization and classification. Pipeline serves two purposes here:
* Convenience: You only have to call fit and predict once on your data to fit a whole sequence of estimators.
* Joint parameter selection: You can grid search over parameters of all estimators in the pipeline at once.
All estimators in a pipeline, except the last one, must be transformers (i.e. must have a transform method). The last estimator may be any type (transformer, classifier, etc.).
```
from sklearn.pipeline import Pipeline
Pipeline?
from sklearn.svm import SVC
from sklearn.decomposition import PCA
estimators = [('reduce_dim', PCA(n_components=2)), ('clf', SVC())]
pipe = Pipeline(estimators)
pipe
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
# Notice no need to PCA the Xs in the score!
pipe.fit(X, y).score(X, y)
```
The utility function make_pipeline is a shorthand for constructing pipelines; it takes a variable number of estimators and returns a pipeline, filling in the names automatically:
```
from sklearn.pipeline import make_pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import Binarizer
make_pipeline(Binarizer(), MultinomialNB())
pipe.steps[0]
pipe.named_steps['reduce_dim']
pipe.set_params(clf__C=10)
from sklearn.model_selection import GridSearchCV
params = dict(reduce_dim__n_components=[2, 5, 10],
clf__C=[0.1, 10, 100])
grid_search = GridSearchCV(pipe, param_grid=params)
from sklearn.linear_model import LogisticRegression
params = dict(reduce_dim=[None, PCA(5), PCA(10)],
clf=[SVC(), LogisticRegression()],
clf__C=[0.1, 10, 100])
grid_search = GridSearchCV(pipe, param_grid=params)
```
## Feature Union
FeatureUnion combines several transformer objects into a new transformer that combines their output. A FeatureUnion takes a list of transformer objects. During fitting, each of these is fit to the data independently. For transforming data, the transformers are applied in parallel, and the sample vectors they output are concatenated end-to-end into larger vectors.
FeatureUnion serves the same purposes as Pipeline - convenience and joint parameter estimation and validation.
FeatureUnion and Pipeline can be combined to create complex models.
(A FeatureUnion has no way of checking whether two transformers might produce identical features. It only produces a union when the feature sets are disjoint, and making sure they are the caller’s responsibility.)
```
from sklearn.pipeline import FeatureUnion
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
estimators = [('linear_pca', PCA()), ('kernel_pca', KernelPCA())]
combined = FeatureUnion(estimators)
combined
combined.fit_transform(X).shape
combined.set_params(kernel_pca=None)
combined.fit_transform(X).shape
```
| github_jupyter |

# PyTorch
PyTorch is an open source machine learning library for Python, based on Torch, used for applications such as natural language processing. It is primarily developed by Facebook's artificial-intelligence research group, and Uber's "Pyro" software for probabilistic programming is built on it.
- https://pytorch.org/
- https://github.com/pytorch/pytorch

PyTorch's predecessor was Torch, a very old tensor library for manipulating multidimensional matrix data. It is widely used in machine learning and other math-intensive applications, but because of its language using Lua, it has been Very small, now using the Python language to return strongly, quickly won a large number of users. PyTorch offers two high-level features:
- Using powerful GPU-accelerated Tensor calculations (like numpy)
- Built on deep neural networks based on the autograd system,
there are usually two reasons for using PyTorch:
- As an alternative to numpy, Use powerful GPU acceleration;
- Use it as a deep learning research platform that provides maximum flexibility and speed
### As a Python-first dynamic graph framework, PyTorch has the following features:
* PyTorch doesn't simply bind Python to the overall C++ framework. It's built on top of Python. You can use PyTorch as easily as numpy/scipy/scikit-learn, or you can use your favorite libraries and packages in PyTorch.
* PyTorch's design philosophy is linear, intuitive and easy to use. When you need to execute a line of code, it will execute faithfully. PyTorch has no asynchronous worldview. When you open the debugger, or receive error codes and stack traces, you will find it easy to understand the information. The Stack-trace point will point directly to the exact location of the code definition. We don't want you to waste time when debugging, because of incorrect pointing.
* PyTorch has a lightweight framework that integrates various acceleration libraries such as Intel MKL, NVIDIA's CuDNN and NCCL to optimize speed. At its core, its CPU and GPU Tensor and neural network backends (TH, THC, THNN, THCUNN) are written as stand-alone libraries with the C99 API.
### Installation
PyTorch is very easy to install and can be installed with Anaconda or with pip.
* Install using conda
<br><code>conda install pytorch torchvision cuda80 -c pytorch</code>
* Using Pip
<br><code>pip3 install http://download.pytorch.org/whl/cu80/torch-0.4.1-cp36-cp36m-linux_x86_64.whl
pip3 install torchvision</code>
| github_jupyter |
# Multi-model metadata generation
> experiment in combining text and tabular models to generate web archive metadata
- toc: true
- badges: false
- comments: true
- categories: [metadata, multi-model]
- search_exclude: false
# Learning from multiple input types
Deep learning models usually take one type of input (image, text etc.) to predict output labels (category, entities etc). This usually makes sense if the data you are using to make predictions contains a lot of information. i.e. a chunk of text from a movie review or an image.
Recently I have been playing around with a Website Classification Dataset from the UK web archive. The dataset is derived from a manually curated web archive which contains a primary and secondary category for each web page. The UK web archive has made a [dataset](https://data.webarchive.org.uk/opendata/ukwa.ds.1/classification/) available based on this archive which contains the manually classified subject categories alongside the page URL and the page title.
As part of playing around with this dataset I was keen to see if a multi-input model would work well. In this case exploring a model that takes both text and tabular data as input. A preview of the data:
```
#hide_input
import pandas as pd
tsv ='https://gist.githubusercontent.com/davanstrien/5e22b725046eddc2f1ee06b108f27e48/raw/71426e6b92c7fa98140a95728a5ea55171b948cd/classification.tsv'
df = pd.read_csv(tsv, error_bad_lines=False, index_col=0)
df.head()
```
Based on this data the UK web archive are interested:
>"in understanding whether high-level metadata like this can be used to train an appropriate automatic classification system so that we might use this manually generated dataset to partially automate the categorisation of our larger archives."
This is going to be fairly tricky but offers a nice excuse to try to use models with multiple inputs to predict our categories.
## Looking at the data
Taking a closer look at the data:
```
#hide_input
tsv = 'https://gist.githubusercontent.com/davanstrien/5e22b725046eddc2f1ee06b108f27e48/raw/71426e6b92c7fa98140a95728a5ea55171b948cd/classification.tsv'
df = pd.read_csv(tsv, error_bad_lines=False,)
```
### Unique primary categories
```
len(df['Primary Category'].unique())
```
### Unique secondary categories
```
len(df['Secondary Category'].unique())
```
Predicting a 104 different labels is going to be pretty difficult so I've only used 'Primary Category' as the the ```y``` target. What is the distribution of these categories like?
```
#hide_input
df['Primary Category'].value_counts()
```
😬 We also have a fairly skewed datasets. I could drop some of rows which don't occur often but since the main objective here is to see if we can use a multi-input model we'll leave the data as it is for now.
# Multi-input model
The rest of the notebook will describe some experiments with using [fastai](https://docs.fast.ai/) to create a model which takes tabular and text data as an input. The aim here wasn't for me to create the best model but get my head around how to combine models. I heavily relied on some existing [notebooks](https://nbviewer.jupyter.org/gist/joshfp/b62b76eae95e6863cb511997b5a63118/5.full-deep-learning.ipynb), kaggle [writeup](https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/89491) and forum posts on the [fastai forums](forums.fast.ai/).
## Tabular model
In the dataset above we start of with two columns of data which can be used as inputs for the model. The title is fairly obviously something which we can treat like other text inputs. The URL is a little less obvious. It could be treated as a text input but an alternative is to treat a URL as parts which each contain some information which could be useful for our model.
```
#hide_input
print(df.URL.sample(10).to_list()[3])
print(df.URL.sample(10).to_list()[4])
print(df.URL.sample(10).to_list()[3])
```
Each part of the URL could be split into smaller parts
```
#hide_input
print(df.URL.sample(10).to_list()[3].split('.'))
```
Whether a url has '.org' or '.uk' or '.com' could be meaningful for predicting our categories (it might also not be meaningful). It also offers us a way of taking the URLs and composing it into a format which looks more tabular.
```
#hide_input
csv ='https://gist.githubusercontent.com/davanstrien/5e22b725046eddc2f1ee06b108f27e48/raw/4c2a27772bf4d959bf3e58cfa8de9e0b9be69ca7/03_classification_valid_train.csv'
df = pd.read_csv(csv, index_col=0)
df[['scheme','url1','url3','url4','url5']].sample(5)
```
So far I've only done this very crudely. I suspect tidying up this part of the data will help improve things. At this point though we have something which is a little more tabular looking we can pass to ```fastai.tabular``` learner. Now we have some 'categories' rather than unique urls.
```
print(len(df.url3.unique()))
print(len(df.url4.unique()))
```
## How does this tabular model do?
Once some preprocessing of the url has been done we train a model using the tabular learner. I didn't do much to try to optimize this model. Tracking best ```f2``` score we end up with:
```Better model found at epoch 36 with f_beta value: 0.17531482875347137``` and an accuracy of ```0.334121```
## How well does a text model do?
Next I tried training using the title field in a NLP model. I tried a few things here.
### SentencePiece tokenization
By default fastai uses SpaCy to do tokenization with a few additional special tokens added by fastai. I wanted to see if using [sentencePiece](https://github.com/google/sentencepiece) would work better for processing title fields. SentencePiece allows for various sub-word tokeinzation. This can be useful for agglutinative languages but could also be useful when you have a lot of out of vocabulary words in your corpus. I wanted to see if this also was useful for processing titles since these may contain domain specific terms. I only tried using SentencePiece with 'unigram' tokenization. The best score I got for this was:
```Better model found at epoch 1 with f_beta value: 0.21195338666439056.```
### Default SpaCy tokenization
I compared the above to using the default fastai tokenizer which uses SpaCy. In this case the default approach worked better. This is probably because we didn't have a large pre-trained model using the SentencePiece tokenization to use as a starting point. The best score I got for this model was:
```Better model found at epoch 27 with f_beta value: 0.33327043056488037.```
### Using the URL as text input
I wanted to do a quick comparison to the tabular model and use the URL as a text input instead. In this case I used SentencePiece with byte-pair-encoding (BPE). The best score in this case was:
```Better model found at epoch 3 with f_beta value: 0.2568161189556122.```
This might end up being a better approach compared to the tabular approach described above.
# Combining inputs
Neither of these models is doing super well but my main question was whether combining the two would improve things at all. There are different approaches to combining these models. I followed existing examples and removed some layers from the text and tabular models which are then combined in a concat model. I won't cover all the steps here but all the notebooks can be found in this [GitHub repo](https://github.com/davanstrien/Website-Classification).
```
#hide
from fastai.tabular import *
from pathlib import Path
import pandas as pd
from fastai import *
from fastai.tabular import *
from fastai.callbacks import *
from fastai.text import *
from fastai.metrics import accuracy, MultiLabelFbeta
```
One of the things we need to do to create a model with multiple input is create a new Pytorch dataset which combines our text and tabular ```x``` inputs with our target. This is pretty straightforward:
```
#collapse_show
class ConcatDataset(Dataset):
def __init__(self, x1, x2, y):
self.x1,self.x2,self.y = x1,x2,y
def __len__(self):
return len(self.y)
def __getitem__(self, i):
return (self.x1[i], self.x2[i]), self.y[i]
```
One of the other pieces was creating a ```ConcatModel```
```
#collapse_show
class ConcatModel(nn.Module):
def __init__(self, model_tab, model_nlp, layers, drops):
super().__init__()
self.model_tab = model_tab
self.model_nlp = model_nlp
lst_layers = []
activs = [nn.ReLU(inplace=True),] * (len(layers)-2) + [None]
for n_in,n_out,p,actn in zip(layers[:-1], layers[1:], drops, activs):
lst_layers += bn_drop_lin(n_in, n_out, p=p, actn=actn) # https://docs.fast.ai/layers.html#bn_drop_lin
self.layers = nn.Sequential(*lst_layers)
def forward(self, *x):
x_tab = self.model_tab(*x[0])
x_nlp = self.model_nlp(x[1])[0]
x = torch.cat([x_tab, x_nlp], dim=1)
return self.layers(x)
```
```lst_layer``` is dependent on the layers from the tabular and nlp models. This layer is manually defined at the moment, so if changes are made to the number of layers in the tab model this needs to be manually changed.
```bn_drop_lin``` is a fastai helper function that returns a a sequence of batch normalization, dropout and a linear layer which is the final layer of the model.
## How does this combined model do? 🤷♂️
The best result I got was``` f_beta value: 0.39341238141059875``` with an accuracy of ```0.595348```. A summary of the scores for each models:
| Model | F2 score |
|-------|--------|
|SentencePiece text | 0.211 |
| Spacy text | 0.333|
| Tabular | 0.175 |
|Concat| **0.393** |
This provides some improvement on the tabular or nlp models on their own. I found the combined model was fairly tricky to train and suspect that there could be some improvements in how the model is set up that might improve it's performance. I am keen to try a similar approach with a dataset where there is more abundant information available to train with.
# tl;dr
It wasn't possible to get a very good f2 score on this website classification dataset. As the UK web archive say:
> We expect that a appropriate classifier might require more information about each site in order to produce reliable results, and are looking at augmenting this dataset with further information in the future. Options include:
For each site, make the titles of every page on that site available.
For each site, extract a set of keywords that summarise the site, via the full-text index.
I suspect that having a either of these additional components would help improve the performance of the classifier.
| github_jupyter |
```
# from google.colab import drive
# drive.mount('/content/drive')
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}
fg1,fg2,fg3 = 0,1,2
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]].type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx].type("torch.DoubleTensor"))
label = foreground_label[fg_idx]- fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
class Focus(nn.Module):
def __init__(self):
super(Focus, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, padding=0)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=6, kernel_size=3, padding=0)
# self.conv3 = nn.Conv2d(in_channels=12, out_channels=32, kernel_size=3, padding=0)
self.fc1 = nn.Linear(1014, 512)
self.fc2 = nn.Linear(512, 64)
# self.fc3 = nn.Linear(512, 64)
# self.fc4 = nn.Linear(64, 10)
self.fc3 = nn.Linear(64,1)
def forward(self,z): #y is avg image #z batch of list of 9 images
y = torch.zeros([batch,3, 32,32], dtype=torch.float64)
x = torch.zeros([batch,9],dtype=torch.float64)
y = y.to("cuda")
x = x.to("cuda")
for i in range(9):
x[:,i] = self.helper(z[:,i])[:,0]
x = F.softmax(x,dim=1)
x1 = x[:,0]
torch.mul(x1[:,None,None,None],z[:,0])
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],z[:,i])
return x, y
def helper(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = (F.relu(self.conv2(x)))
# print(x.shape)
# x = (F.relu(self.conv3(x)))
x = x.view(x.size(0), -1)
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# x = F.relu(self.fc3(x))
# x = F.relu(self.fc4(x))
x = self.fc3(x)
return x
focus_net = Focus().double()
focus_net = focus_net.to("cuda")
class Classification(nn.Module):
def __init__(self):
super(Classification, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, padding=0)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=6, kernel_size=3, padding=0)
# self.conv3 = nn.Conv2d(in_channels=12, out_channels=20, kernel_size=3, padding=0)
self.fc1 = nn.Linear(1014, 512)
self.fc2 = nn.Linear(512, 64)
# self.fc3 = nn.Linear(512, 64)
# self.fc4 = nn.Linear(64, 10)
self.fc3 = nn.Linear(64,3)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = (F.relu(self.conv2(x)))
# print(x.shape)
# x = (F.relu(self.conv3(x)))
x = x.view(x.size(0), -1)
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# x = F.relu(self.fc3(x))
# x = F.relu(self.fc4(x))
x = self.fc3(x)
return x
classify = Classification().double()
classify = classify.to("cuda")
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer_classify = optim.Adam(classify.parameters(), lr=0.001)#, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
optimizer_focus = optim.Adam(focus_net.parameters(), lr=0.001)#, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
col1=[]
col2=[]
col3=[]
col4=[]
col5=[]
col6=[]
col7=[]
col8=[]
col9=[]
col10=[]
col11=[]
col12=[]
col13=[]
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
count += 1
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
print(count)
print("="*100)
col1.append(0)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
nos_epochs = 200
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
for epoch in range(nos_epochs): # loop over the dataset multiple times
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
running_loss = 0.0
epoch_loss = []
cnt=0
iteration = desired_num // batch
#training data set
for i, data in enumerate(train_loader):
inputs , labels , fore_idx = data
inputs, labels = inputs.to("cuda"), labels.to("cuda")
# zero the parameter gradients
optimizer_focus.zero_grad()
optimizer_classify.zero_grad()
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
# print(outputs)
# print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1))
loss = criterion(outputs, labels)
loss.backward()
optimizer_focus.step()
optimizer_classify.step()
running_loss += loss.item()
mini = 60
if cnt % mini == mini-1: # print every 40 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))
epoch_loss.append(running_loss/mini)
running_loss = 0.0
cnt=cnt+1
if epoch % 5 == 0:
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
if(np.mean(epoch_loss) <= 0.005):
break;
if epoch % 5 == 0:
# focus_net.eval()
# classify.eval()
col1.append(epoch+1)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
#************************************************************************
#testing data set
with torch.no_grad():
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
print('Finished Training')
# torch.save(focus_net.state_dict(),"/content/drive/My Drive/Research/Cheating_data/16_experiments_on_cnn_3layers/"+name+"_focus_net.pt")
# torch.save(classify.state_dict(),"/content/drive/My Drive/Research/Cheating_data/16_experiments_on_cnn_3layers/"+name+"_classify.pt")
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
df_train[columns[0]] = col1
df_train[columns[1]] = col2
df_train[columns[2]] = col3
df_train[columns[3]] = col4
df_train[columns[4]] = col5
df_train[columns[5]] = col6
df_train[columns[6]] = col7
df_test[columns[0]] = col1
df_test[columns[1]] = col8
df_test[columns[2]] = col9
df_test[columns[3]] = col10
df_test[columns[4]] = col11
df_test[columns[5]] = col12
df_test[columns[6]] = col13
df_train
# plt.figure(12,12)
plt.plot(col1,col2, label='argmax > 0.5')
plt.plot(col1,col3, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.show()
plt.plot(col1,col4, label ="focus_true_pred_true ")
plt.plot(col1,col5, label ="focus_false_pred_true ")
plt.plot(col1,col6, label ="focus_true_pred_false ")
plt.plot(col1,col7, label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.savefig("train_ftpt.pdf", bbox_inches='tight')
plt.show()
df_test
# plt.figure(12,12)
plt.plot(col1,col8, label='argmax > 0.5')
plt.plot(col1,col9, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.title("On Testing set")
plt.show()
plt.plot(col1,col10, label ="focus_true_pred_true ")
plt.plot(col1,col11, label ="focus_false_pred_true ")
plt.plot(col1,col12, label ="focus_true_pred_false ")
plt.plot(col1,col13, label ="focus_false_pred_false ")
plt.title("On Testing set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.savefig("test_ftpt.pdf", bbox_inches='tight')
plt.show()
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
correct = 0
total = 0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
max_alpha =[]
alpha_ftpt=[]
argmax_more_than_half=0
argmax_less_than_half=0
for i, data in enumerate(test_loader):
inputs, labels,fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
alphas, avg = focus_net(inputs)
outputs = classify(avg)
mx,_ = torch.max(alphas,1)
max_alpha.append(mx.cpu().detach().numpy())
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if (focus == fore_idx[j] and predicted[j] == labels[j]):
alpha_ftpt.append(alphas[j][focus].item())
max_alpha = np.concatenate(max_alpha,axis=0)
print(max_alpha.shape)
plt.figure(figsize=(6,6))
_,bins,_ = plt.hist(max_alpha,bins=50,color ="c")
plt.title("alpha values histogram")
plt.savefig("alpha_hist.pdf")
plt.figure(figsize=(6,6))
_,bins,_ = plt.hist(np.array(alpha_ftpt),bins=50,color ="c")
plt.title("alpha values in ftpt")
plt.savefig("alpha_hist_ftpt.pdf")
```
| github_jupyter |
# Supervised Contrastive Learning
**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br>
**Date created:** 2020/11/30<br>
**Last modified:** 2020/11/30<br>
**Description:** Using supervised contrastive learning for image classification.
```
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
import keras
from keras import layers
```
## Prepare the data
```
num_classes = 10
input_shape = (32, 32, 3)
# Load the train and test data splits
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
# Display shapes of train and test datasets
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
```
## Using image data augmentation
```
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.Normalization(),
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomRotation(0.02),
layers.experimental.preprocessing.RandomWidth(0.2),
layers.experimental.preprocessing.RandomHeight(0.2),
]
)
# Setting the state of the normalization layer.
data_augmentation.layers[0].adapt(x_train)
```
## Build the encoder model
The encoder model takes the image as input and turns it into a 2048-dimensional
feature vector.
```
def create_encoder():
resnet = keras.applications.ResNet50V2(
include_top=False, weights=None, input_shape=input_shape, pooling="avg"
)
inputs = keras.Input(shape=input_shape)
augmented = data_augmentation(inputs)
outputs = resnet(augmented)
model = keras.Model(inputs=inputs, outputs=outputs, name="cifar10-encoder")
return model
encoder = create_encoder()
encoder.summary()
learning_rate = 0.001
batch_size = 265
hidden_units = 512
projection_units = 128
num_epochs = 50
dropout_rate = 0.5
temperature = 0.05
```
## Build the classification model
The classification model adds a fully-connected layer on top of the encoder,
plus a softmax layer with the target classes.
```
def create_classifier(encoder, trainable=True):
for layer in encoder.layers:
layer.trainable = trainable
inputs = keras.Input(shape=input_shape)
features = encoder(inputs)
features = layers.Dropout(dropout_rate)(features)
features = layers.Dense(hidden_units, activation="relu")(features)
features = layers.Dropout(dropout_rate)(features)
outputs = layers.Dense(num_classes, activation="softmax")(features)
model = keras.Model(inputs=inputs, outputs=outputs, name="cifar10-classifier")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
return model
```
## Experiment 1: Train the baseline classification model
In this experiment, a baseline classifier is trained as usual, i.e., the
encoder and the classifier parts are trained together as a single model
to minimize the crossentropy loss.
```
encoder = create_encoder()
classifier = create_classifier(encoder)
classifier.summary()
history = classifier.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs)
accuracy = classifier.evaluate(x_test, y_test)[1]
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
```
We get to ~78.4% test accuracy.
## Experiment 2: Use supervised contrastive learning
In this experiment, the model is trained in two phases. In the first phase,
the encoder is pretrained to optimize the supervised contrastive loss,
described in [Prannay Khosla et al.](https://arxiv.org/abs/2004.11362).
In the second phase, the classifier is trained using the trained encoder with
its weights freezed; only the weights of fully-connected layers with the
softmax are optimized.
### 1. Supervised contrastive learning loss function
```
class SupervisedContrastiveLoss(keras.losses.Loss):
def __init__(self, temperature=1, name=None):
super(SupervisedContrastiveLoss, self).__init__(name=name)
self.temperature = temperature
def __call__(self, labels, feature_vectors, sample_weight=None):
# Normalize feature vectors
feature_vectors_normalized = tf.math.l2_normalize(feature_vectors, axis=1)
# Compute logits
logits = tf.divide(
tf.matmul(
feature_vectors_normalized, tf.transpose(feature_vectors_normalized)
),
temperature,
)
return tfa.losses.npairs_loss(tf.squeeze(labels), logits)
def add_projection_head(encoder):
inputs = keras.Input(shape=input_shape)
features = encoder(inputs)
outputs = layers.Dense(projection_units, activation="relu")(features)
model = keras.Model(
inputs=inputs, outputs=outputs, name="cifar-encoder_with_projection-head"
)
return model
```
### 2. Pretrain the encoder
```
encoder = create_encoder()
encoder_with_projection_head = add_projection_head(encoder)
encoder_with_projection_head.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=SupervisedContrastiveLoss(temperature),
)
encoder_with_projection_head.summary()
history = encoder_with_projection_head.fit(
x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs
)
```
### 3. Train the classifier with the frozen encoder
```
classifier = create_classifier(encoder, trainable=False)
history = classifier.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs)
accuracy = classifier.evaluate(x_test, y_test)[1]
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
```
We get to ~82.6% test accuracy.
## Conclusion
As shown in the experiments, using the supervised contrastive learning technique
outperformed the conventional technique in terms of the test accuracy. Note that
the same training budget (i.e., number of epochs) was given to each technique.
Supervised contrastive learning pays off when the encoder involves a complex
architecture, like ResNet, and multi-class problems with many labels.
In addition, large batch sizes and multi-layer projection heads
improve its effectiveness. See the [Supervised Contrastive Learning](https://arxiv.org/abs/2004.11362)
paper for more details.
| github_jupyter |
# Intro to Seismology: Programming for Homework 3
## Name:
## Introduction
The goal of this assignment is to locate an earthquake based on travel times. To do this will require three ingredients
1. A function that generates travel times from any point in the media to all receivers.
- We will use a closed-form solution to the eikonal equation in a homogeneous medium.
2. A way to compare the estimated and observed travel times.
- We will use least-squares.
3. A strategy for finding a model that better predicts the correct observed travel times.
- We will use gradient-based nonlinear optimization (Geiger's method) and grid-search.
```
# includes and convenience functions
import matplotlib.pyplot as plt
from numpy import amin, amax
from numpy import argmin, argmax, unravel_index
from numpy import zeros
from numpy import median
from numpy import linspace
from numpy import exp
from numpy import meshgrid
from numpy import average
from numpy import copy
from numpy import sqrt
from numpy.linalg import lstsq
def unpackModel(model):
"""!
Convenience function to unpack model stored as a vector.
Parameters
----------
model : array_like
Model to unpack.
Returns
-------
xs : double
x source position in meters.
ys : double
y source position in meters.
t0 : double
Origin time in seconds.
"""
xs = model[0]
ys = model[1]
t0 = model[2]
return xs, ys, t0
def packModel(xs, ys, t0):
"""!
Convenience function to pack the model as a vector.
Parameters
----------
xs : double
x source position in meters.
ys : double
y source position in meters.
t0 : double
Origin time in seconds.
Returns
-------
model : array_like
Model parameters packed as a vector.
"""
model = zeros(3)
model[0] = xs
model[1] = ys
model[2] = t0
return model
```
## Modeling Travel Times
In a homogeneous medium the estimated travel time, $t_{est}$, is simply the distance, $d$, divided by the medium velocity, $v$, i.e., $t_{est} = \frac{d}{v}$. Moreover, observe that earthquakes have unknown origin times, $t_0$. Thus, the estimated travel time function must add some bias corresponding to the origin time, i.e., $t_{est} = t_0 = \frac{d}{v}$. For ease of implementation, we will consider only a $2D$ media, so that distance between the source, $\textbf{x}_s$, and receiver, $\textbf{x}_j$, is $d = \sqrt{ (x_j - x_s)^2 + (y_j - y_s)^2}$. Therefore, our travel time function modeled at the $j$'th receiver is
\begin{equation}
T_{est}^{(j)}(t_0, \textbf{x}_s)
=t_0 + \frac{\sqrt{ (x_j - x_s)^2 + (y_j - y_s)^2}}{v}
\end{equation}
In general it will be more convenient to define the model as $\textbf{m} = \{x_s, y_s, t_0 \}$ so that
\begin{equation}
T_{est}^{(j)}(\textbf{m})
=t_0 + \frac{\sqrt{ (x_j - x_s)^2 + (y_j - y_s)^2}}{v}
\end{equation}
```
def computeTravelTimes(xr, yr, vel, model):
"""
Computes travel times in a 2D homogeneous medium with contant velocity.
Parameters
----------
xr : array_like
The x positions of the receivers (meters). This has dimension [nrec]
where nrec is the number of receivers.
yr : array_like
The y positions of the receivers (meters). This has dimension [nrec]
where nrec is the number of receivers.
vel : double
The constant velocity in the medium (meters/seconds).
model : array_like
The model that contains the source location in (x,y) as well as the origin time.
Returns
-------
test : array_like
The estimate travel times from the source to each receiver (seconds).
"""
xs, ys, t0 = unpackModel(model)
test = # Put your code here
return test
```
## Measuring Misfit
In all inverse problems a measure of how well the model explains the data is required. Such measures are typically called objective or cost funtcions. Many objective functions exist and each has its own merits and drawbacks. Arguably, the most common objective function is the sum of the residuals squared. Least-squares typically yields quick convergence in non-linear inversions and results in simple very algebra. Unfortunately, outliers can have an undo influence on the solution and therefore, least-squares is not robust. To mitigate the last point, it is generally intelligent to screen data and manually remove outlier data prior to inversion. That said, the misfit (cost) function to minimize looks like
\begin{equation}
\mathcal{C}(\textbf{m}) = \sum_{j=1}^{n_{obs}} (T_{obs}^{(j)} - T_{est}^{(j)}(\textbf{m}) )^2
\end{equation}
where we are simply squaring and summing the residual travel times for all $n_{obs}$ observations. If only locating with $P$ waves then the number of observations would be the number of receivers in the network. Notice, the residual is the difference between the observed travel time, $T_{obs}^{(j)})$, and estimate travel time for the $j$'th observation.
```
def computeResiduals(tobs, test):
"""
Computes the residuals.
Parameters
----------
tobs : array_like
The observations in seconds. This has dimension [nrec] which is
the number of receivers.
test : array_like
The estimates in seconds. This has dimension [nrec] which is the
number of recievers.
Returns
-------
res : array_like
The travel time residuals in seconds.
"""
res = # Put your code here
return res
def computeCostFunction(model, tobs, xr, yr, vel):
"""
Computes the cost (objective) function corresponding to the current model.
Parameters
----------
model : array_like
Contains the current source position and origin time.
tobs : array_like
The observed travel times (seconds). This has dimension [nrec] which is
the number of receivers (observations).
xr : array_like
The receiver positions in x in meters. This has dimension [nrec].
yr : array_like
The receiver positions in y in meters. This has dimension [nrec].
vel : double
The constant velocity in the medium in meters/second.
Returns
-------
cost : double
The sum of the squared residuals.
"""
# Compute the estimate travel times with computeTravelTimes
test = # Put your code here
# Compute the residuals with computeResiduals
res = # Put your code here
# Compute the sum of the squared residuals (e.g., sum(res*res))
cost = # Put your code here
return cost
```
## Geiger's Method
The iterative solution to minimizing the previous cost function was developed well before the advent of computers in 1910 by Geiger. In principle the proposed solution is no different than iteratively minimizing a function in Calculus I. The difference between Geiger's method and a Calculus I optimization problem is that there are four parameters (a hypocenter and origin) comprising an optimum instead of one.
Ideally, we desire an optimal model, $\textbf{m}^*$ that yields a produces a cost function whose every residual is identically $0$
\begin{equation}
\mathbf{r}(\textbf{m}^*) = \textbf{0}
\end{equation}
Here each row of the residual vector $\mathbf{r}(\textbf{m}^*) = \textbf{0}$ is an equation of the form
$T_{obs}^{(j)} - T_{est}^{(j)} = 0$. By inspection, we note that $\mathcal{C} = \mathbf{r}^T \cdot \mathbf{r}$ is indeed the sum of the residuals squared.
In general, we don't know $\textbf{m}^*$. Instead, we guess it to be some initial model, $\textbf{m}_0$. Then, we look for a perturbation, $\textbf{m}_1 \leftarrow \textbf{m}_0 + \alpha \textbf{p}$, so that $\textbf{m}_1$ is a better approximation to $\textbf{m}^*$. Here $\alpha$ is a unitless 'step-length' that scales the search-direction, $\textbf{p}$. The search direction necessarily has units equivalent to the model, i.e., $\textbf{p} = \{\Delta x, \Delta y, \Delta t\}$. For the time being, let's ignore $\alpha$ though in practice it is a very important parameter whose estimation requires a difficult class of algorithms called line-search methods. Thus, we seek a solution to the over-determined system
\begin{equation}
\mathbf{r}(\textbf{m}_0 + \textbf{p})
=\textbf{0}
\end{equation}
Such a formulation is of little practical value so we linearize the above with a Taylor expansion about $ \textbf{p}$
\begin{align}
\mathbf{r}(\textbf{m}_0 + \textbf{p})
&=\mathbf{r}(\textbf{m}_0)
+ \nabla_{\textbf{m}} \mathbf{r}(\textbf{m}_0) \textbf{p}
+\cdots \\
&\approx
\mathbf{r}(\textbf{m}_0)
+\nabla_{\textbf{m}} \mathbf{r}(\textbf{m}_0) \textbf{p}
%+\frac{1}{2} \alpha^2 \textbf{p}^T \nabla \mathbf{r}(\textbf{m}_0)^T \cdot \nabla \mathbf{r}(\textbf{m}_0) \textbf{p}
\end{align}
Recalling that the above should equal $\textbf{0}$ we can solve for the search direction
\begin{equation}
\mathbf{r}(\textbf{m}_0) + \nabla_{\textbf{m}} \mathbf{r}(\textbf{m}_0) \cdot \textbf{p} = 0
\end{equation}
Note, that the gradient of a vector involves computing an outer-product. This yields a matrix. The matrix is known as the Jacobian matrix, $\nabla_{\textbf{m}} r(\textbf{m}) = J(\textbf{m})$ and is of great consequence to inverse problems. To obtain a search direction we solve a system of over-determined equations
\begin{equation}
J(\textbf{m}_0) \cdot \textbf{p} =-\mathbf{r}(\textbf{m}_0)
\end{equation}
whose solution is conventionally written as the normal equations
\begin{equation}
\textbf{p} =-(J^T J)^{-1} J^T \textbf{r}(\textbf{m}_0)
\end{equation}
The term $J^T \textbf{r}$ is the gradient of the objective function, $\nabla_{\textbf{m}} \mathcal{C}$. Indeed, if we approximate $(J^T J)^{-1}$ as the identity matrix, then we are left with something of the form $\textbf{p} =-J^T \textbf{r} =-\textbf{g}$. This says that the update direction is the negative of the gradient. This result is very intuitive and indicates that to optimize this nonlinear function we should simply march down the gradient of the objective function. Recall, that the gradient points in the direction of maximum increase so a negative gradient points in the direction in the direction of maximum decrease. This particularly optimization scheme is steepest descent.
Finally, the location algorithm proceeds as follows:
1. Specify an initial model $\textbf{m}_0 = (x_s, y_s, t_0)$.
2. Compute the objective function, $\mathcal{C}(\textbf{m}_0)$. If the objective function is small enough then stop.
3. Compute the search direction either by solving $J \textbf{p} =-\textbf{f}$ (Gauss-Newton) or $\textbf{p} =-J^T \textbf{r}$ (steepest-descent).
4. Update the model $\textbf{m}_1 = \textbf{m}_0 - \alpha \textbf{p}$ where we will take $\alpha = 1$ for Gauss-Newton or some small number $\ll 1$ for steepest-descent.
5. Iterate
```
def computeGaussNewtonSearchDirection(model, tobs, xr, yr, vel):
"""
Computes the Gauss-Newton search direction from the Jacobian and residual
in a numerically stable way by solving the normal equations, Jp=r, with the
singular value decomposition instead of the normal equations.
Parameters
----------
model : Array of length 3
This is an array containing the model.
tobs : array_like
These are the observed travel times in second. This has dimension [nrec]
which corresponds to the number observations.
xr : array_like
These are the x positions (meters) of each reciever. This has dimension [nrec]
which corresponds to the number of receivers and observations.
yr : array_like
These are the y positions (meters) of each receiver. This has dimension [nrec]
which corresponds to the number of receivers and observations.
vel : double
This is the constant velocity in the medium.
Returns
-------
p : array_like
The Gauss-Newton search direction. This has dimension [3] and is paked
in the same order as the model so that m1 = m0 - alpha*p.
"""
# Compute the Jacobian with computeJacobian
J = # Put your code here
# Compute the estimate travel times with computeTravelTimes
test = # Put your code here
# Compute the residuals with computeResidiuals
res = # Put your code here
# The normal equations are equivalent to solving the overdetermined
# system J p =-r
p = lstsq(J, -res, rcond=-1)[0]
return p
def computeSteepestDescentSearchDirection(model, tobs, xr, yr, vel):
"""
Computes the steepest descent search direction from the Jacobian and residual.
Parameters
----------
model : Array of length 3
This is an array containing the model.
tobs : array_like
These are the observed travel times in second. This has dimension [nrec]
which corresponds to the number observations.
xr : array_like
These are the x positions (meters) of each reciever. This has dimension [nrec]
which corresponds to the number of receivers and observations.
yr : array_like
These are the y positions (meters) of each receiver. This has dimension [nrec]
which corresponds to the number of receivers and observations.
vel : double
This is the constant velocity in the medium.
Returns
-------
p : array_like
The Gauss-Newton search direction. This has dimension [3] and is paked
in the same order as the model so that m1 = m0 - alpha*p.
"""
# Compute the Jacobian with computeJacobian
J = # Put your code here
# Compute the estimate travel times with computeTravelTimes
test = # Put your code here
# Compute the residuals with computeResidiuals
p =-J.T@res # negative of gradient
return p
```
### Computing Derivatives
Geiger's method is a gradient-descent technique that naturally requires derivatives of the cost function. Recall, our objective function looks like
\begin{equation}
\mathcal{C}(t_0, \textbf{x}_s) = \sum_{j=1}^{n_{obs}} (T_{obs}^{(j)} - T_{est}^{(j)}(\textbf{m}) )^2 = \sum_{r=1}^{n_{rec}} \left (T_{obs}^{(r)} - \left ( t_0 + \frac{\sqrt{ (x_j - x_s)^2 + (y_j - y_s)^2}}{v} \right ) \right )^2
\end{equation}
Here what is required are the derivatives of the cost-function that comprise the gradient:
\begin{equation}
\frac{\partial \mathcal{C}}{\partial x_s}
=\sum_{j=1}^{n_{obs}} -2(T_{obs}^{(j)} - T_{est}^{(j)}) \frac{\partial T_{est}^{(j)}}{\partial x_s}
= 2 \sum_{j=1}^{n_{obs}} (T_{obs}^{(j)} - T_{est}^{(j)}) \frac{(x_j - x_s)}{v \sqrt{(x_j-x_s)^2 + (y_j-y_s)^2}}
\end{equation}
\begin{equation}
\frac{\partial \mathcal{C}}{\partial y_s}
=\sum_{j=1}^{n_{obs}} -2(T_{obs}^{(j)} - T_{est}^{(j)}) \frac{\partial T_{est}^{(j)}}{\partial y_s}
= 2 \sum_{j=1}^{n_{obs}} (T_{obs}^{(j)} - T_{est}^{(j)}) \frac{(y_j - y_s)}{v \sqrt{(x_j-x_s)^2 + (y_j-y_s)^2}}
\end{equation}
\begin{equation}
\frac{\partial \mathcal{C}}{\partial t_0}
=\sum_{j=1}^{n_{obs}} -2(T_{obs}^{(j)} - T_{est}^{(j)}) \frac{\partial T_{est}^{(j)}}{\partial t_0}
=-2 \sum_{j=1}^{n_{obs}} (T_{obs}^{(j)} - T_{est}^{(j)})
\end{equation}
Defining the residual, $r_j$, for the $j$'th observation as $r_j \equiv T_{obs}^{(j)} - T_{est}^{(j)}$, and the distance between the $j$'th source and receiver, $d_j$, as $d_j \equiv \sqrt{ (x_j - x_s)^2 + (y_j - y_s)^2}$ allows us to write the compactly as
\begin{equation}
\frac{\partial \mathcal{C}}{\partial x_s}
= 2 \sum_{j=1}^{n_{obs}} r_j \frac{(x_j - x_s)}{v d_j}
\end{equation}
\begin{equation}
\frac{\partial \mathcal{C}}{\partial y_s}
= 2 \sum_{j=1}^{n_{obs}} r_j \frac{(y_j - y_s)}{v d_j}
\end{equation}
\begin{equation}
\frac{\partial \mathcal{C}}{\partial t_0}
=-2 \sum_{j=1}^{n_{obs}} r_j
\end{equation}
### The Jacobian Matrix
Notice that the gradient has a common $\sum_{j=1}^{n_{obs}} r_j$ term scaled by some coefficient. We can rewrite the above has a matrix-vector multiplication
\begin{equation}
\nabla \mathcal{C}
=
\left [
\begin{array}{cccc}
\frac{2(x_1 - x_s)}{v d_1} & \frac{2(x_2 - x_s)}{v d_2} & \cdots & \frac{2(x_{n_{obs}} - x_s)}{v d_{n_{obs}}} \\
\frac{2(y_1 - y_s)}{v d_1} & \frac{2(y_2 - y_s)}{v d_2} & \cdots & \frac{2(y_{n_{obs}} - y_s)}{v d_{n_{obs}}} \\
-2 & -2 & \cdots & -2
\end{array}
\right ]
\left \{
\begin{array}{c}
T_{obs}^{(1)} - T_{est}^{(1)} \\
T_{obs}^{(2)} - T_{est}^{(2)} \\
\vdots \\
T_{obs}^{(n_{rec})} - T_{est}^{(n_{obs})} \\
\end{array}
\right \}
=\left [
\begin{array}{cccc}
\frac{2(x_1 - x_s)}{v d_1} & \frac{2(x_2 - x_s)}{v d_2} & \cdots & \frac{2(x_{n_{obs}} - x_s)}{v d_{n_{obs}}} \\
\frac{2(y_1 - y_s)}{v d_1} & \frac{2(y_2 - y_s)}{v d_2} & \cdots & \frac{2(y_{n_{obs}} - y_s)}{v d_{n_{obs}}} \\
-2 & -2 & \cdots & -2
\end{array}
\right ]
\left \{
\begin{array}{c}
r_1 \\
r_2 \\
\vdots \\
r_{n_{obs}} \\
\end{array}
\right \}\end{equation}
This form has a well-known form and is compactly written as something we've already seen
\begin{equation}
\nabla \mathcal{C} = J^T \textbf{r}
\end{equation}
Again, the matrix, $J$, is the Jacobian matrix and gives us insights into the sensitivity of the model parameters to different observations. The Jacobian matrix is essential in higher-order methods with faster convegence like Gauss-Newton optimization are second order. Recall, the Gauss-Newton search direction, $\textbf{p}$, is found by computing
\begin{equation}
J^T J \textbf{p} =-\nabla J^T \textbf{r}
\end{equation}
or
\begin{equation}
\textbf{p} =-(J^T J)^{-1} J^T \textbf{r}
\end{equation}
with model update given by $\textbf{m}_{k+1} = \textbf{m}_k + \alpha \textbf{p}$. For (Gauss)-Newton steps $\alpha$ is typically $1$. The algorithm should try small values of $\alpha$ should $\alpha=1$ prove to not reduce the objective function - i.e., we've stepped too far. Something to keep in mind, $J$ is usually ill-conditioned. Computing $J^T J$ then squares the condition number of $J$. Hence, inverting $J^T J$ is very difficult. It is advised to use the QR decomposition or SVD instead of the Cholesky decomposition of the normal equations. Shifting the small singular values (eigenvalues) of $J^T J$ away from zero by adding a diagonal regularization matrix like $J^T J + \gamma I$ is a common strategy to mitigate the dramatic effect that small singular values can have on the solution.
As shown previously, if we set $J^T J$ to the identity matrix, $I$, so that $(J^T J)^{-1} = I^{-1} = I$, then we obtain the steepest-descent method from the Gauss-Newton method as the search direction is simply
\begin{equation}
\textbf{p} =-J^T \textbf{r}
\end{equation}
Note, for problems of appreciable size the Jacobian matrix is expensive to store and explicity compute. For this reason, like in full-waveform inversion, we typically compute the action of $J$ on a vector of residuals and avoid its explicit construction. More sophisticated algorithms like conjugate-gradient or the the limited memory BFGS algorithm can approximate the inverse of $J^T J$ and considerably increase convergence rates for a very small cost in system random access memory (RAM). For this reasion l-BFGS and conjugate gradient are typically used for large-scale optimization problems in seismology as they balance low-memory footprint of steepest-descent with the convergence rate of Gauss-Newton.
```
def computeJacobian(model, xr, yr, vel):
"""
Computes the Jacobian for the 2D travel time location in a homogeneous medium.
The derivatives in the Jacobian are analytically computed.
Parameters
----------
model : Array of length 3
This is an array containing the model.
xr : array_like
These are the x positions (meters) of each reciever. This has dimension [nrec]
which corresponds to the number of receivers and observations.
yr : array_like
These are the y positions (meters) of each receiver. This has dimension [nrec]
which corresponds to the number of receivers and observations.
vel : double
This is the constant velocity in the medium.
Returns
-------
J : Matrix
The [nobs x 3] Jacobian matrix. Here, nobs is the number of observations which
equals the number of receivers. The gradient is readily obtained by computing
"""
# Unpack the model
xs, ys, t0 = unpackModel(model)
# Allocate space for the Jacobian matrix
nrows = len(xr) # The number of rows in the Jacobian is the number of observations
ncols = len(model) # The number of columns in the Jacobian is the dimension of the model spcae
J = zeros([nrows, ncols])
# Loop on observations
for irow in range(nrows):
# It's possible to get a division by zero. Technically the singularity goes to infinity.
# But, for numerical reasons I'll force it to 0. The trickery here is that the denominator
# will be shifted away from zero and the numerator will evaluate to 0
d = max(1.e-14, sqrt( (xr[irow] - xs)**2 + (yr[irow] - ys)**2 )) # Can get division by 0
J[irow,0] = # Put your code here for dC_{irow}/dx
J[irow,1] = # Put your code here for dC_{irow}/dy
J[irow,2] = # Put your code here for dC_{irow}/dt
return J
def computeGradient(model, tobs, xr, yr, vel):
"""
Computes the gradient of the objective function for the 2D
homogeneous velocity model.
Parameters
----------
model : Array of length 3
This is an array containing the model.
tobs : array_like
These are the observed travel times in second. This has dimension [nrec]
which corresponds to the number observations.
xr : array_like
These are the x positions (meters) of each reciever. This has dimension [nrec]
which corresponds to the number of receivers and observations.
yr : array_like
These are the y positions (meters) of each receiver. This has dimension [nrec]
which corresponds to the number of receivers and observations.
vel : double
This is the constant velocity in the medium.
Returns
-------
grad : array_like
The gradient of the objective function. This has dimension [3] and is packed
in the same order as the model so that m1 = m0 - alpha*grad.
"""
# Compute the estimate travel times
test = computeTravelTimes(xr, yr, vel, model)
# Compute the residuals
res = computeResiduals(tobs, test)
# Compute the Jacobian with computeJacobian
J = # Put your code here
# Compute the gradient
grad = J.T@res # The `at' symbol in numpy is shorthand for matrix-vector and matrix-matrix multiplication
return grad
```
Below are a series of unit tests that you should pass before proceeding to the next section. Realize that there is a much more general way to compute gradient entries without explicitly calculating derivatives. To do this we appeal to finite differencing the cost function so that the gradient can be approximated by
\begin{equation}
\nabla \mathcal{C}
\approx
\left \{
\begin{array}{c}
\frac{\mathcal{C}(\textbf{m} + \delta \textbf{e}_1) - \mathcal{C}(\textbf{m})}{\delta} \\
\frac{\mathcal{C}(\textbf{m} + \delta \textbf{e}_2) - \mathcal{C}(\textbf{m})}{\delta} \\
\frac{\mathcal{C}(\textbf{m} + \delta \textbf{e}_3) - \mathcal{C}(\textbf{m})}{\delta}
\end{array}
\right \}
\end{equation}
where $\textbf{e}_i$ is a unit vector in the $i$'th component of the model and $\delta$ some perturbation. Realize, that choosing $\delta$ requires some trial and error. There is a strange tension between a $\delta$ that is too large and does not well represent the concept of a derivative and a $\delta$ too small that succumbs to cancellation error (a form of numerical error).
```
def unit_test():
# Test the travel time function calculator
xs = 2
ys = 1
t0 =-1
# Distance of a (3,4,5) triangle is 5
xr = [5, 5, 5]
yr = [5, 5, 5]
# All times should be -1 + 5/5 = 0
vel = 5
model = packModel(xs, ys, t0)
test = computeTravelTimes(xr, yr, vel, model)
assert( max(abs(test)) < 1.e-14 ), "Failed travel time calculation"
print("Passed travel time calculation test")
# Test the residual calculation: tobs - test = 1 - 0 = 1
tobs = zeros(4) + 1
test = zeros(4)
res = computeResiduals(tobs, test)
for i in range(len(res)):
assert( abs(res[i] - 1.0) < 1.e-14), 'Failed residual test'
print("Passed residual test")
# Begin making a little more substantial test - emplace a real source
xs_true = 1200
ys_true = 1300
t0_true = 5.0
model_true = packModel(xs_true, ys_true, t0_true) # pack the model
# Put the estimate source in the ballpark of the true source
xs_est = 1100
ys_est = 900
ts_est = 0.0
model_est = packModel(xs_est, ys_est, ts_est) # pack the model
# Surround the source with receivers
xr = [0, 1000, 2000, 0, 1000, 2000, 0, 1000, 2000]
yr = [0, 0, 0, 1000, 1000, 1000, 2000, 2000, 2000]
vel = 5000.0
tobs = computeTravelTimes(xr, yr, vel, model_true)
test = computeTravelTimes(xr, yr, vel, model_est)
cost = computeCostFunction(model_est, tobs, xr, yr, vel)
r2 = 0
for i in range(len(tobs)):
r2 = r2 + (tobs[i] - test[i])**2
assert(abs(cost - r2) < 1.e-10), "Failed to compute cost function"
# check scipy's algebra
J = computeJacobian(model_est, xr, yr, vel)
res = computeResiduals(tobs, test)
grad = computeGradient(model_est, tobs, xr, yr, vel)
assert(max(abs(J.T@res - grad)) < 1.e-14), "Strange algebraic mistake"
print("Passed grad = J^T*res test")
# For more general cases where the travel times have no analytic oslution we would
# use a finite difference. Here, compute the trusty finite difference to verify
# our algebra.
cost = computeCostFunction(model_est, tobs, xr, yr, vel)
pert = 1.e-7
model_pert_x = model_est + [pert, 0, 0]
model_pert_y = model_est + [0, pert, 0]
model_pert_t = model_est + [0, 0, pert]
# compute perturbed cost functions
cost_pert_x = computeCostFunction(model_pert_x, tobs, xr, yr, vel)
cost_pert_y = computeCostFunction(model_pert_y, tobs, xr, yr, vel)
cost_pert_t = computeCostFunction(model_pert_t, tobs, xr, yr, vel)
# compute the forward finite differnces
dcdx = (cost_pert_x - cost)/pert
dcdy = (cost_pert_y - cost)/pert
dcdt = (cost_pert_t - cost)/pert
grad_fd = [dcdx, dcdy, dcdt]
# these are never that accurate
assert(max(abs(grad_fd - grad)) < 1.e-4), "Gradient is wrong; Jacobian is incorrect"
print("Passed residual test")
return True
if (not unit_test()):
print("Failed unit tests")
else:
print("Passed unit tests")
```
## Implement Geiger's Method
Here, the nonlinear inversion strategy is implemented with the gradient-descent technique and a more sophisticad Gauss-Newton step.
```
# Now implement the optimization
def geiger(tobs, xr, yr, vel, maxit=100, alpha=1, tol=5.e-5, lgn = False, model_specified = None):
# Initialize a solution
if (model_specified is None):
xs0 = average(xr) + 20
ys0 = average(yr) + 20
ts0 = average(tobs)
model0 = packModel(xs0, ys0, ts0)
else:
model0 = copy(model_specified)
models = []
lconv = False
# Begin iterative method
for k in range(maxit):
# Save the current model
models.append(model0)
# Compute the cost function with computeCostFunction and check if the method converged
cost = # Put your code here
print("Iteration %d, Objective function: %f (s^2), (xs,ys,ts)=(%f,%f,%f)"%(k, cost, model0[0], model0[1], model0[2]))
if (cost < tol):
print("Convergence achieved; terminating", cost)
lconv = True
break
# Compute the new search direction either with a Gauss-Newton or gradient step
if (lgn):
# Gauss-Newton search direction from computeGaussNewtonSearchDirection
p = # Put your code here
else:
p = computeSteepestDescentSearchDirection(model0, tobs, xr, yr, vel)
# This is where the line search would go. For now, recommend alpha=1 for Gauss
# Newton and alpha << 1 for gradient-descent.
model1 = model0 + alpha*p
# Save model for final iteration
model0 = copy(model1)
# Check final convergence
if (not lconv):
cost = computeCostFunction(model0, tobs, xr, yr, vel)
if (cost < tol):
print("Convergence achieved", cost)
else:
print("Method did not converge to given tolerance")
return models
# Begin making a little more substantial test - emplace a real source
xs_true = 1200
ys_true = 1300
t0_true = 5.0
model_true = packModel(xs_true, ys_true, t0_true) # pack the model
# Make up some initial source estimate
xs_est = 400 #1100
ys_est = 400# 900
ts_est = 0.0
model_est = packModel(xs_est, ys_est, ts_est) # pack the model
# Surround the source with receivers
xr = [0, 1000, 2000, 0, 1000, 2000, 0, 1000, 2000]
yr = [0, 0, 0, 1000, 1000, 1000, 2000, 2000, 2000]
vel = 5000.0
# Compute the observed travel times
tobs = computeTravelTimes(xr, yr, vel, model_true)
# Apply Geiger's method with a gradient descent step
print("Geiger Gradient Descent")
models_grad = geiger(tobs, xr, yr, vel, maxit=20, alpha=1.e-1, lgn=False, model_specified=model_est)
print("Geiger Gauss Newton")
models_gn = geiger(tobs, xr, yr, vel, maxit=20, alpha=1, lgn=True, model_specified=model_est)
```
# Question
The gradient descent method does not converge to a satisfactory solution while Gauss-Newton converges very quickly. Both methods use the same Jacobian. Why is gradient descent is having difficulty?
__TYPE YOUR ANSWER HERE__
## Grid Search
An alternative strategy for locating an earthquake is to exhaustively search every point in a grid and tabulate the cost function. Then, the earthquake location is assigned to the grid whose point yields the smallest objective function. With modern computers exhaustive search strategies have replaced gradient-based non-linear optimization strategies because exhaustive searches do not fall into local minima and exhaustive searches naturally lead to uncertainty quantification.
### Optimizing For An Origin Time In The Grid Search
In a conventional grid-search earthquake location we look to minimize a likelihood function of the form
\begin{equation}
\mathcal{L}(\textbf{m})
\propto e^{-\sum_{j=1}^{n_{obs}} (T_{obs}^{(j)} - T_{est}^{(j)})^2 }
\end{equation}
Note that the argument of the exponential is the sum-squared residuals. Additionally, $e^{-r^2}$ is simply a Gaussian. This indicates that maximizing the likelihood of an earthquake location corresponds to minimizing the argument of the exponential - i.e., minimizing the residual.
In minimizing the likelihood function recall that the estimate travel times are a function of the origin time and the candidate source position. In this instance we will simply specify source positions (i.e., search through a grid of source positions) and at each source position optimize for an origin time. Thus, we optimize the likelihood function by computing the origin time, $t_0$, that corresonds to
\begin{equation}
\frac{\partial \mathcal{L}(t_0, \textbf{x}_s)}{\partial t_0} = 0
\end{equation}
To simplify the algebra, let's call the estimate travel time $T_{est}^{(j)} = t_0 + \mathcal{T}^{(j)}$ where $\mathcal{T}^{(j)}$, is a solution to the eikonal equation. In this instance,
$\mathcal{T}^{(j)} = \frac{\sqrt{ (x_j - x_s)^2 + (y_j - y_s)^2 }}{v}$,
\begin{align*}
\frac{\partial \mathcal{L}(t_0, \textbf{x}_s)}{\partial t_0}
&=\frac{\partial }{\partial t_0}
\left [
e^{-\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right )^2 }
\right ] \\
&=-e^{-\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right )^2 }
\frac{\partial }{\partial t_)}
\left [
\left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right )^2
\right ] \\
&=2e^{-\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right )^2 } \cdot
\left (
\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right )
\right )
\end{align*}
Next, the above is set to $0$,
\begin{equation}
2e^{-\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right )^2 } \cdot
\left (
\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right )
\right ) = 0
\end{equation}
The exponential is always greater than $0$, so we instead solve for $t_0$ in the second term
\begin{equation}
\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right ) = 0
\end{equation}
Using linearity of the summation operator,
\begin{align*}
0&=\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - (t_0 + \mathcal{T}^{(j)}) \right ) \\
&=\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - \mathcal{T}^{(j)}) \right ) - \sum_{j=1}^{n_{obs}} t_0 \\
&=\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - \mathcal{T}^{(j)}) \right ) - n_{obs} t_0 \\
\end{align*}
Finally, the optimal origin time is found by solving for $t_0$ so that
\begin{equation}
t_0 = \frac{\sum_{j=1}^{n_{obs}} \left (T_{obs}^{(j)} - \mathcal{T}^{(j)}) \right )}{n_{obs}}
\end{equation}
This says that the origin time is simply the weighted sum of residual travel times. The algorithm is then as follows,
1. Define a grid of locations (candidate sources).
2. At each grid point compute the estimate travel times with a $0$ origin time.
2. Optimize for the origin time by computing the average residual.
3. Add the origin time in the travel time estimates and tabulate the sum of the squared residuals.
```
def gridSearch(tobs, xr, yr, x, y, vel, objfn = 'l2'):
# Tabulate the x's and y's in the grid
nx = len(x)
ny = len(y)
nxy = nx*ny
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
# Set space for ther esult
t0s = zeros(xv.shape)
costfn = zeros(xv.shape)
# Compute the travel times
t0s = zeros(xv.shape)
for ix in range(nx):
for iy in range(ny):
# Pack the model
model = packModel(xv[ix,iy], yv[ix,iy], t0s[ix,iy])
# Compute the travel times
test = computeTravelTimes(xr, yr, vel, model)
# Optimize for the origin time, i.e., the average residual
res = computeResiduals(tobs, test)
if (objfn == 'l2'):
t0s[ix,iy] = average(res)
else:
# graduate students optimize l1 objective function
t0s[ix,iy] = median(res)
# Update the origin times with the origin time
testWithOT = test + t0s[ix,iy]
# Tabulate the cost function
res = computeResiduals(tobs, testWithOT)
if (objfn == 'l2'):
costfn[ix,iy] = sum(res**2)
else:
# graduate students write l1 objective function
costfn[ix,iy] = sum(abs(res))
# Loop on y grid points
# Loop on x grid points
# This is a likelihood function so the area under the curve should sum to unity
norm = sum(exp(-costfn.flatten()))
likelihood = exp(-costfn)/norm # For numerical reasons this is not a good idea; typically use log likelihood
return t0s, costfn, likelihood
x = linspace(-500,2500,301)
y = linspace(-500,2500,301)
t0s, costfn, likelihood = gridSearch(tobs, xr, yr, x, y, vel, objfn='l2')
# Get the optimum location
[ixopt, iyopt] = unravel_index(argmin(costfn), dims=[len(x), len(y)])
print("Estimate optimum (x,y,t0)=(%f,%f,%f); true optimum (x,y)=(%f,%f,%f)"%
(x[ixopt], y[iyopt], t0s[ixopt,iyopt], xs_true, ys_true, t0_true))
# Plot the results from the Gauss-Newton and grid-search optimization
plt.figure(figsize=(10,10))
plt.imshow(likelihood.T, extent=[min(x), max(x), min(y), max(y)], origin='lower')
plt.xlabel('x offset (m)')
plt.ylabel('y offset (m)')
plt.title("Grid Search Location and Gauss-Newton Convergence History")
plt.grid(True)
plt.colorbar()
# We typically plot stations as (inverted) triangles
plt.scatter(xr, yr, c='red', marker='v', s=60)
# Extract the convergence history
niter = len(models_gn)
xconv = zeros(niter)
yconv = zeros(niter)
for i in range(niter):
xconv[i] = models_gn[i][0]
yconv[i] = models_gn[i][1]
# Plot the convergence history
plt.plot(xconv, yconv, c='black', linewidth=0.5)
plt.scatter(xconv, yconv, c='blue', marker='o', s=10)
# Plot the true source location as a black star
plt.scatter(xs_true, ys_true, c='black', marker='*', s=120)
plt.show()
```
# Questions
Does the Gauss-Newton method or grid-search take less computational time?
In this instance, both methods identified the true source location. Do you find the Gauss-Newton method more informative than the result of the grid-search?
Notice that the Gauss-Newton method quickly converges on a minimum. For more interesting acquisition geometries and velocity models the objective function can have multiple minima. In general, would you prefer Gauss-Newton or grid-search?
## Graduate Students
Rewrite the grid-search function to optimize an $L_1$ misfit function. In this case the origin time optimization amounts to computing the median residual travel time. Plot the result. Comment on the robustness of $L_1$ vs $L_2$ norms.
| github_jupyter |
```
# PCA
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
dataset.head()
# X is created by extracting the Age and Estimated Salary fields from the dataset
X[0:3]
# Y is the purchased field
y[0:3]
# Dataset split into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 1)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Applying Kernel PCA
from sklearn.decomposition import KernelPCA
kpca = KernelPCA(n_components = 2, kernel = 'rbf')
X_train = kpca.fit_transform(X_train)
X_test = kpca.transform(X_test)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
```
| github_jupyter |
# CT-LTI: Multi-sample Training and Eval
In this notebook we train over different graphs and initial-target state pairs.
We change parametrization slightly from the single sample, using Xavier normal instead of Kaiming initialization and higher decelaration rate for training. Preliminary results on few runs indicated the above choices would lead to faster convergence on BA and Tree graphs. Still, extensive hyper-parameter optimization would be preferable in the future, especially to optimize performance further.
Please make sure that the required data folder is available at the paths used by the script.
You may generate the required data by running the python script
```nodec_experiments/ct_lti/gen_parameters.py```.
This notebook takes around 15 hours per graph on RTX TITAN GPU, so plesae be patient when you generate new results.
## Imports
```
# %load_ext autoreload
# %autoreload 2
import os
os.sys.path.append('../../../')
import torch
from torchdiffeq import odeint
import numpy as np
import pandas as pd
import networkx as nx
from tqdm.cli import tqdm
from nnc.controllers.baselines.ct_lti.dynamics import ContinuousTimeInvariantDynamics
from nnc.controllers.baselines.ct_lti.optimal_controllers import ControllabiltyGrammianController
from nnc.helpers.torch_utils.graphs import adjacency_tensor, drivers_to_tensor
from nnc.helpers.graph_helper import load_graph
from nnc.helpers.torch_utils.evaluators import FixedInteractionEvaluator
from nnc.helpers.torch_utils.losses import FinalStepMSE
from nnc.helpers.torch_utils.trainers import NODECTrainer
from nnc.controllers.neural_network.nnc_controllers import NNCDynamics
from nnc.helpers.torch_utils.nn_architectures.fully_connected import StackedDenseTimeControl
from plotly import graph_objects as go
from plotly.subplots import make_subplots
```
## Load graph and dynamics parameters
```
experiment_data_folder = '../../../../data/parameters/ct_lti/'
graph='tree' # please use one of the following: lattice, ba, tree
device = 'cuda:0'
results_data_folder = '../../../../results/ct_lti/multi_sample/'+graph + '/'
os.makedirs(results_data_folder, exist_ok=True)
# load graph data
graph_folder = experiment_data_folder+graph+'/'
adj_matrix = torch.load(graph_folder+'adjacency.pt').to(dtype=torch.float, device=device)
n_nodes = adj_matrix.shape[0]
drivers = torch.load(graph_folder + 'drivers.pt')
n_drivers = len(drivers)
pos = pd.read_csv(graph_folder + 'pos.csv').set_index('index').values
driver_matrix = drivers_to_tensor(n_nodes, drivers).to(device)
# select dynamics type and initial-target states
dyn = ContinuousTimeInvariantDynamics(adj_matrix, driver_matrix)
target_states = torch.load(graph_folder+'target_states.pt').to(device)
initial_states = torch.load(experiment_data_folder+'init_states.pt').to(device)
# total time for control
total_time=0.5
```
## Train and evaluate all baselines
```
# For all sample indices
for i in tqdm(range(initial_states.shape[0])):
current_sample_id = i
# load current sample
x0 = initial_states[current_sample_id].unsqueeze(0)
xstar = target_states[current_sample_id].unsqueeze(0)
# calculate optimal control
oc = ControllabiltyGrammianController(
adj_matrix,
driver_matrix,
total_time,
x0,
xstar,
simpson_evals=100,
progress_bar=tqdm,
use_inverse=False,
)
# OC evaluations for different interaciton intervals.
loss_fn = FinalStepMSE(xstar, total_time=total_time)
all_n_interactions = [50, 500, 5000]
for n_interactions in all_n_interactions:
oc_evaluator = FixedInteractionEvaluator(
'oc_sample'+str(current_sample_id)+'_ninter_' + str(n_interactions),
log_dir=results_data_folder,
n_interactions=n_interactions,
loss_fn=loss_fn,
ode_solver=None,
ode_solver_kwargs={'method' : 'dopri5'},
preserve_intermediate_states=False,
preserve_intermediate_controls=True,
preserve_intermediate_times=False,
preserve_intermediate_energies=False,
preserve_intermediate_losses=False,
preserve_params=False,
)
oc_res = oc_evaluator.evaluate(dyn, oc, x0, total_time, epoch=0)
oc_evaluator.write_to_file(oc_res)
# neural network controller
# prepare neural network.
torch.manual_seed(1)
nn = StackedDenseTimeControl(n_nodes,
n_drivers,
n_hidden=0,#1,
hidden_size=15,#*n_nodes,
activation=torch.nn.functional.elu,
use_bias=True
).to(x0.device)
nndyn = NNCDynamics(dyn, nn).to(x0.device)
nn_trainer = NODECTrainer(
nndyn,
x0,
xstar,
total_time,
obj_function=None,
optimizer_class = torch.optim.LBFGS,
optimizer_params=dict(lr=1.2,
#momentum =0.5
max_iter=1,
max_eval=1,
history_size=100
),
ode_solver_kwargs=dict(method='dopri5'),
logger=None,
closure=None,
use_adjoint=False,
)
# here we initialize with Xavier which seemed to help NODEC converge faster for tree/ba graphs
for name, param in nn.named_parameters():
if len(param.shape) > 1:
torch.nn.init.xavier_normal_(param)
# here we use higher decelaration rate, which seemed to help NODEC converge faster for tree/ba graphs
# train for 100 epochs
nndyn = nn_trainer.train_best(epochs=100,
lr_acceleration_rate=0,
lr_deceleration_rate=0.99,
loss_variance_tolerance=10,
verbose=True
)
# Evaluate after 100 epochs of training for 50 interactions.
nn_logger_50 = FixedInteractionEvaluator('nn_sample_'+str(current_sample_id)+'_train_50',
log_dir=results_data_folder,
n_interactions=50,
loss_fn=loss_fn,
ode_solver=None,
ode_solver_kwargs={'method' : 'dopri5'},
preserve_intermediate_states=False,
preserve_intermediate_controls=False,
preserve_intermediate_times=False,
preserve_intermediate_energies=False,
preserve_intermediate_losses=False,
preserve_params=True,
)
nn_res = nn_logger_50.evaluate(dyn, nndyn.nnc, x0, total_time, epoch=100)
nn_logger_50.write_to_file(nn_res)
# keep training for 2400 epochs
nndyn = nn_trainer.train_best(epochs=2400,
lr_acceleration_rate=0,
lr_deceleration_rate=0.99,
loss_variance_tolerance=10,
verbose=True)
# evaluate for 500 interactions
nn_logger_500 = FixedInteractionEvaluator(
'nn_sample_'+str(current_sample_id)+'_train_500',
log_dir=results_data_folder,
n_interactions=500,
loss_fn=loss_fn,
ode_solver=None,
ode_solver_kwargs={'method' : 'dopri5'},
preserve_intermediate_states=False,
preserve_intermediate_controls=False,
preserve_intermediate_times=False,
preserve_intermediate_energies=False,
preserve_intermediate_losses=False,
preserve_params=False,
)
nn_res = nn_logger_500.evaluate(dyn, nndyn.nnc, x0, total_time, epoch=2500)
nn_logger_500.write_to_file(nn_res)
# evaluate for 5000 interactions
nn_logger_5000= FixedInteractionEvaluator(
'nn_sample_'+str(current_sample_id)+'_train_5000',
log_dir=results_data_folder,
n_interactions=5000,
loss_fn=loss_fn,
ode_solver=None,
ode_solver_kwargs={'method' : 'dopri5'},
preserve_intermediate_states=False,
preserve_intermediate_controls=False,
preserve_intermediate_times=False,
preserve_intermediate_energies=False,
preserve_intermediate_losses=False,
preserve_params=True,
)
nn_res = nn_logger_5000.evaluate(dyn, nndyn.nnc, x0, total_time, epoch=2500)
nn_logger_5000.write_to_file(nn_res)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.plotting.register_matplotlib_converters.html
# Register converters for handling timestamp values in plots
```
<h2>Kaggle Bike Sharing Demand Dataset</h2>
<h4>To download dataset, sign-in and download from this link: https://www.kaggle.com/c/bike-sharing-demand/data</h4>
<br>
Input Features:<br>
['season', 'holiday', 'workingday', 'weather', 'temp', 'atemp', 'humidity', 'windspeed', 'year', 'month', 'day', 'dayofweek','hour']<br>
Target:<br>
['count']<br>
Objective:
You are provided hourly rental data spanning two years.
For this competition, the training set is comprised of the first 19 days of each month, while the test set is the 20th to the end of the month.
You must predict the total count of bikes rented during each hour covered by the test set, using only information available prior to the rental period
Reference: https://www.kaggle.com/c/bike-sharing-demand/data
```
columns = ['count', 'season', 'holiday', 'workingday', 'weather', 'temp',
'atemp', 'humidity', 'windspeed', 'year', 'month', 'day', 'dayofweek','hour']
df = pd.read_csv('train.csv', parse_dates=['datetime'],index_col=0)
df_test = pd.read_csv('test.csv', parse_dates=['datetime'],index_col=0)
df.head()
# We need to convert datetime to numeric for training.
# Let's extract key features into separate numeric columns
def add_features(df):
df['year'] = df.index.year
df['month'] = df.index.month
df['day'] = df.index.day
df['dayofweek'] = df.index.dayofweek
df['hour'] = df.index.hour
# Add New Features
add_features(df)
add_features(df_test)
df.head()
# Need to predict the missing data
plt.title('Rental Count - Gaps')
df['2011-01':'2011-02']['count'].plot()
plt.show()
# Rentals change hourly!
plt.plot(df['2011-01-01']['count'])
plt.xticks(fontsize=14, rotation=45)
plt.xlabel('Date')
plt.ylabel('Rental Count')
plt.title('Hourly Rentals for Jan 01, 2011')
plt.show()
# Seasonal
plt.plot(df['2011-01']['count'])
plt.xticks(fontsize=14, rotation=45)
plt.xlabel('Date')
plt.ylabel('Rental Count')
plt.title('Jan 2011 Rentals (1 month)')
plt.show()
group_hour = df.groupby(['hour'])
average_by_hour = group_hour['count'].mean()
plt.plot(average_by_hour.index,average_by_hour)
plt.xlabel('Hour')
plt.ylabel('Rental Count')
plt.xticks(np.arange(24))
plt.grid(True)
plt.title('Average Hourly Rental Count')
# Year to year trend
plt.plot(df['2011']['count'],label='2011')
plt.plot(df['2012']['count'],label='2012')
plt.xticks(fontsize=14, rotation=45)
plt.xlabel('Date')
plt.ylabel('Rental Count')
plt.title('2011 and 2012 Rentals (Year to Year)')
plt.legend()
plt.show()
group_year_month = df.groupby(['year','month'])
average_year_month = group_year_month['count'].mean()
average_year_month
for year in average_year_month.index.levels[0]:
plt.plot(average_year_month[year].index,average_year_month[year],label=year)
plt.legend()
plt.xlabel('Month')
plt.ylabel('Count')
plt.grid(True)
plt.title('Average Monthly Rental Count for 2011, 2012')
plt.show()
group_year_hour = df.groupby(['year','hour'])
average_year_hour = group_year_hour['count'].mean()
for year in average_year_hour.index.levels[0]:
#print (year)
#print(average_year_month[year])
plt.plot(average_year_hour[year].index,average_year_hour[year],label=year)
plt.legend()
plt.xlabel('Hour')
plt.ylabel('Count')
plt.xticks(np.arange(24))
plt.grid(True)
plt.title('Average Hourly Rental Count - 2011, 2012')
group_workingday_hour = df.groupby(['workingday','hour'])
average_workingday_hour = group_workingday_hour['count'].mean()
for workingday in average_workingday_hour.index.levels[0]:
#print (year)
#print(average_year_month[year])
plt.plot(average_workingday_hour[workingday].index,average_workingday_hour[workingday],
label=workingday)
plt.legend()
plt.xlabel('Hour')
plt.ylabel('Count')
plt.xticks(np.arange(24))
plt.grid(True)
plt.title('Average Hourly Rental Count by Working Day')
plt.show()
# Let's look at correlation beween features and target
df.corr()['count']
# Any relation between temperature and rental count?
plt.scatter(x=df.temp,y=df["count"])
plt.grid(True)
plt.xlabel('Temperature')
plt.ylabel('Count')
plt.title('Temperature vs Count')
plt.show()
# Any relation between humidity and rental count?
plt.scatter(x=df.humidity,y=df["count"],label='Humidity')
plt.grid(True)
plt.xlabel('Humidity')
plt.ylabel('Count')
plt.title('Humidity vs Count')
plt.show()
# Save all data
df.to_csv('bike_all.csv',index=True,index_label='datetime',columns=columns)
```
## Training and Validation Set
### Target Variable as first column followed by input features
### Training, Validation files do not have a column header
```
# Training = 70% of the data
# Validation = 30% of the data
# Randomize the datset
np.random.seed(5)
l = list(df.index)
np.random.shuffle(l)
df = df.loc[l]
rows = df.shape[0]
train = int(.7 * rows)
test = rows-train
rows, train, test
columns
# Write Training Set
df.iloc[:train].to_csv('bike_train.csv'
,index=False,header=False
,columns=columns)
# Write Validation Set
df.iloc[train:].to_csv('bike_validation.csv'
,index=False,header=False
,columns=columns)
# Test Data has only input features
df_test.to_csv('bike_test.csv',index=True,index_label='datetime')
print(','.join(columns))
# Write Column List
with open('bike_train_column_list.txt','w') as f:
f.write(','.join(columns))
```
| github_jupyter |
# Ballot-polling SPRT
This notebook explores the ballot-polling SPRT we've developed.
```
%matplotlib inline
from __future__ import division
import math
import numpy as np
import numpy.random
import scipy as sp
import scipy.stats
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
from sprt import ballot_polling_sprt
from hypergeometric import trihypergeometric_optim, simulate_ballot_polling_power
```
The proportion of votes for each candidate in the sample is exactly those in the population, except the population is 50 times larger. The sample of votes is made up of 2000 votes for candidate $w$, 1800 votes for candidate $\ell$, and 500 invalid votes.
Candidate $w$ earned $46.5\%$ of the votes and candidate $\ell$ earned $41.9\%$ of the votes, corresponding to a difference of about $4.6\%$. We will test the null hypothesis that they received the same proportion of votes overall against the alternative that the reported vote totals are correct.
## Trinomial SPRT without replacement
First, suppose we don't know the number of invalid ballots. Minimize the LR over possible values.
```
alpha = 0.05
sample = [1]*2000 + [0]*1800 + [np.nan]*500
popsize = 50*len(sample)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=2000*50, Vl=1800*50)
print(res)
```
The optimization does the right thing: if we did know that there were $500 \times 50$ invalid votes in the population, we'd get the same result!
```
res = ballot_polling_sprt(sample, popsize, alpha, Vw=2000*50, Vl=1800*50, number_invalid=500*50)
print(res)
```
## What happens when the reported outcome is wrong
In 100 replicates, we draw samples of 500 ballots and conduct the SPRT using the reported results as the alternative hypothesis. We never reject the null.
We do the same for samples of size 1000.
Candidate | Reported | Actual
---|---|---
A | 750 | 600
B | 150 | 200
Ballots | 1,000 | 1,000
Diluted margin | 60% | 40%
```
np.random.seed(8062018)
alpha = 0.05
population = [1]*600 + [0]*200 + [np.nan]*200
popsize = len(population)
reps = 100
rejects_sprt = 0
rejects_trihyper = 0
for i in range(reps):
sample = np.random.choice(population, replace=False, size=50)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=750, Vl=150, null_margin=500)
if res['decision']==1:
rejects_sprt += 1
res2 = trihypergeometric_optim(sample, popsize, null_margin=500)
if res2 <= alpha:
rejects_trihyper += 1
print("Samples of size 50, SPRT rejection rate:", rejects_sprt/reps)
print("Samples of size 50, fixed n trihypergeometric rejection rate:", rejects_trihyper/reps)
rejects_sprt = 0
rejects_trihyper = 0
for i in range(reps):
sample = np.random.choice(population, replace=False, size=100)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=750, Vl=150, null_margin=500)
if res['decision']==1:
rejects_sprt += 1
res2 = trihypergeometric_optim(sample, popsize, null_margin=500)
if res2 <= alpha:
rejects_hyper += 1
print("Samples of size 100, SPRT rejection rate:", rejects_sprt/reps)
print("Samples of size 100, fixed n trihypergeometric rejection rate:", rejects_trihyper/reps)
```
# Another example where the reported results are wrong and consistent with the null.
The null hypothesis is that $N_w - N_\ell \leq 5$: this is true.
The alternative is that the reported results are correct: $V_w = 80$ and $V_\ell = 70$.
Candidate | Reported | Actual
---|---|---
A | 80 | 80
B | 70 | 75
Ballots | 165 | 165
Diluted margin | 6% | 3%
```
np.random.seed(8062018)
alpha = 0.05
population = [1]*80 + [0]*70 + [np.nan]*15
popsize = len(population)
reps = 100
rejects_sprt = 0
rejects_trihyper = 0
rejects_trihyper_red = 0
for i in range(reps):
sample = np.random.choice(population, replace=False, size=100)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=80, Vl=70, null_margin=5)
if res['decision']==1:
rejects_sprt += 1
res2 = trihypergeometric_optim(sample, popsize, null_margin=5)
if res2 <= alpha:
rejects_trihyper += 1
if res2 <= alpha/2:
rejects_trihyper_red += 1
print("n=1000, SPRT rejection rate:", rejects_sprt/reps)
print("n=1000, fixed n trihypergeometric rejection rate:", rejects_trihyper/reps)
print("n=1000, fixed n trihypergeometric rejection rate with alpha/2:", rejects_trihyper_red/reps)
```
# The reported results are wrong and inconsistent with the null.
The null hypothesis is that $N_w - N_\ell \leq 200$: this is false.
The alternative is that the reported results are correct: $V_w = 8,500$ and $V_\ell = 7,000$.
The truth is somewhere in the middle, with $N_w - N_\ell = 1,000$.
Power is not great. $n=800$ is nearly half the population.
Candidate | Reported | Actual
---|---|---
A | 8,500 | 8,000
B | 7,000 | 7,000
Ballots | 16,500 | 16,500
Diluted margin | 9% | 6%
```
np.random.seed(8062018)
alpha = 0.05
population = [1]*8000 + [0]*7000 + [np.nan]*1500
popsize = len(population)
reps = 100
rejects_sprt = 0
sprt_pvalues = []
for i in range(reps):
sample = np.random.choice(population, replace=False, size=1000)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=8500, Vl=7000, null_margin=200)
if res['decision']==1:
rejects_sprt += 1
sprt_pvalues.append(res['pvalue'])
print("n=1000, SPRT rejection rate:", rejects_sprt/reps)
print("n=1000, median p-value:", np.median(sprt_pvalues))
rejects_sprt = 0
sprt_pvalues = []
for i in range(reps):
sample = np.random.choice(population, replace=False, size=2000)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=8500, Vl=7000, null_margin=200)
if res['decision']==1:
rejects_sprt += 1
sprt_pvalues.append(res['pvalue'])
print("n=2000, SPRT rejection rate:", rejects_sprt/reps)
print("n=2000, median p-value:", np.median(sprt_pvalues))
rejects_sprt = 0
sprt_pvalues = []
for i in range(reps):
sample = np.random.choice(population, replace=False, size=3000)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=8500, Vl=7000, null_margin=200)
if res['decision']==1:
rejects_sprt += 1
sprt_pvalues.append(res['pvalue'])
print("n=3000, SPRT rejection rate:", rejects_sprt/reps)
print("n=3000, median p-value:", np.median(sprt_pvalues))
```
# The reported results are correct and inconsistent with the null.
The null hypothesis is that $N_w - N_\ell \leq 200$: this is false.
The alternative is that the reported results are correct: $V_w = 8,500$ and $V_\ell = 7,000$.
Power is improved.
Candidate | Reported | Actual
---|---|---
A | 8,500 | 8,500
B | 7,000 | 7,000
Ballots | 16,500 | 16,500
Diluted margin | 9% | 6%
```
np.random.seed(8062018)
alpha = 0.05
population = [1]*8500 + [0]*7000 + [np.nan]*1000
popsize = len(population)
reps = 100
rejects_sprt = 0
sprt_pvalues = []
for i in range(reps):
sample = np.random.choice(population, replace=False, size=500)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=8500, Vl=7000, null_margin=200)
if res['decision']==1:
rejects_sprt += 1
sprt_pvalues.append(res['pvalue'])
print("n=500, SPRT rejection rate:", rejects_sprt/reps)
print("n=500, median p-value:", np.median(sprt_pvalues))
rejects_sprt = 0
sprt_pvalues = []
for i in range(reps):
sample = np.random.choice(population, replace=False, size=1000)
res = ballot_polling_sprt(sample, popsize, alpha, Vw=8500, Vl=7000, null_margin=200)
if res['decision']==1:
rejects_sprt += 1
sprt_pvalues.append(res['pvalue'])
print("n=1000, SPRT rejection rate:", rejects_sprt/reps)
print("n=1000, median p-value:", np.median(sprt_pvalues))
```
| github_jupyter |
# Preprocessing for simulation 5
## Effects at phylum level and order level with Mis-specified tree information
#### Method comparison based on MSE and Pearson correlation coefficient
#### for outcome associated taxa clustering at phylum & order level under regression design when using a mis-specified phylogenetic tree in model learning
@ Aug. 16, Youngwon (youngwon08@gmail.com)
```
import pandas as pd
import numpy as np
from sklearn.utils.extmath import softmax as softmax
from scipy.special import expit as sigmoid
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
%matplotlib inline
path = "./data/simulation/s5"
path_genus = "./data/genus48"
def relu(x):
x[x <= 0.] = 0.
return x
```
## Mis-specified Phylogenetic Tree information
```
## TODO: Should we start with genus48.csv?
phylogenetic_tree_info = pd.read_csv('%s/genus48_dic_misspecify.csv' % path_genus)
# tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum']
# phylogenetic_tree_info = phylogenetic_tree_info.iloc[:,::-1]
tree_level_list = phylogenetic_tree_info.columns[:-1].tolist()
phylogenetic_tree_info
print('------------------------------------------------------------------------------------------')
print('Phylogenetic tree level list: %s' % tree_level_list)
print('------------------------------------------------------------------------------------------')
lvl_category_dict = np.load('data/genus48/lvl_category.npy', allow_pickle=True)
phylogenetic_tree_dict = {'Number':{}}
for i, tree_lvl in enumerate(tree_level_list):
# lvl_category = phylogenetic_tree_info[tree_lvl].unique()
lvl_category = lvl_category_dict[i]
lvl_num = lvl_category.shape[0]
print('%6s: %d' % (tree_lvl, lvl_num))
phylogenetic_tree_dict[tree_lvl] = dict(zip(lvl_category, np.arange(lvl_num)))
phylogenetic_tree_dict['Number'][tree_lvl]=lvl_num
print('------------------------------------------------------------------------------------------')
print('Phylogenetic_tree_dict info: %s' % list(phylogenetic_tree_dict.keys()))
print('------------------------------------------------------------------------------------------')
phylogenetic_tree = phylogenetic_tree_info.iloc[:,:-1]
for tree_lvl in tree_level_list:
phylogenetic_tree[tree_lvl] = phylogenetic_tree[tree_lvl].map(phylogenetic_tree_dict[tree_lvl])
phylogenetic_tree = np.array(phylogenetic_tree)
phylogenetic_tree
tree_weight_list = []
tree_weight_noise_list = []
num_dict = phylogenetic_tree_dict['Number']
for i in range(len(tree_level_list)-1):
print('Build edge weights between [%6s, %6s]'%(tree_level_list[i],tree_level_list[i+1]))
lower = phylogenetic_tree[:,i]
upper = phylogenetic_tree[:,i+1]
n_lower = num_dict[tree_level_list[i]]
n_upper = num_dict[tree_level_list[i+1]]
tree_w = np.zeros((n_lower,n_upper))
tree_w_n = np.zeros_like(tree_w) + 0.01
for j in range(n_upper):
tree_w[lower[j==upper],j] = 1.
tree_w_n[lower[j==upper],j] = 1.
tree_weight_list.append(tree_w)
tree_weight_noise_list.append(tree_w_n)
pd.DataFrame(tree_weight_noise_list[-1]).to_csv('%s/miss_mw%d.csv' % (path, i+1), index=False)
```
## True Phylogenetic Tree information
```
## TODO: Should we start with genus48.csv?
phylogenetic_tree_info = pd.read_csv('%s/genus48_dic.csv' % path_genus)
# tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum']
tree_level_list = phylogenetic_tree_info.columns[:-1].tolist()
phylogenetic_tree_info
lvl_category_dict = []
for tree_lvl in tree_level_list:
lvl_category = phylogenetic_tree_info[tree_lvl].unique()
lvl_category_dict.append(lvl_category)
print('------------------------------------------------------------------------------------------')
print('Phylogenetic tree level list: %s' % tree_level_list)
print('------------------------------------------------------------------------------------------')
phylogenetic_tree_dict = {'Number':{}}
for tree_lvl in tree_level_list:
lvl_category = phylogenetic_tree_info[tree_lvl].unique()
lvl_num = lvl_category.shape[0]
print('%6s: %d' % (tree_lvl, lvl_num))
phylogenetic_tree_dict[tree_lvl] = dict(zip(lvl_category, np.arange(lvl_num)))
phylogenetic_tree_dict['Number'][tree_lvl]=lvl_num
print('------------------------------------------------------------------------------------------')
print('Phylogenetic_tree_dict info: %s' % list(phylogenetic_tree_dict.keys()))
print('------------------------------------------------------------------------------------------')
phylogenetic_tree = phylogenetic_tree_info.iloc[:,:-1]
for tree_lvl in tree_level_list:
phylogenetic_tree[tree_lvl] = phylogenetic_tree[tree_lvl].map(phylogenetic_tree_dict[tree_lvl])
phylogenetic_tree = np.array(phylogenetic_tree)
phylogenetic_tree
tree_weight_list = []
tree_weight_noise_list = []
num_dict = phylogenetic_tree_dict['Number']
for i in range(len(tree_level_list)-1):
print('Build edge weights between [%6s, %6s]'%(tree_level_list[i],tree_level_list[i+1]))
lower = phylogenetic_tree[:,i]
upper = phylogenetic_tree[:,i+1]
n_lower = num_dict[tree_level_list[i]]
n_upper = num_dict[tree_level_list[i+1]]
tree_w = np.zeros((n_lower,n_upper))
tree_w_n = np.zeros_like(tree_w) + 0.01
for j in range(n_upper):
tree_w[lower[j==upper],j] = 1.
tree_w_n[lower[j==upper],j] = 1.
tree_weight_list.append(tree_w)
tree_weight_noise_list.append(tree_w_n)
pd.DataFrame(tree_weight_noise_list[-1]).to_csv('%s/mw%d.csv' % (path, i+1), index=False)
```
# Regression effected with order level and phylum level selections
Aug. 10, 2019
## Experiments with 1000 replications
## Generating X: microbiome abundance data
* Using the data that Jing made, described in detail by Zhai et al. (2018a,b).
* $n \times p$ OTU count matrix
* Aggregating $p_0=2964$ OTUs to $p=48$ genus
* Sample size for each replication: $n = 1000$
* Training $75%$; Test $25%$
* Phylogenetic tree levels:
1. Genus: $m^0 = 48$
1. Family: $m^1 = 40$
1. Order: $m^2 = 23$
1. Class: $m^3 = 17$
1. Phylum: $m^4 = 9$
## Generation Y: a forward propagation approach
### Ver 0: same weights with same true connection for each repeatition
#### (similar to the original version)
$x^0 = x^{input} \in \mathbb{R}^{n\times p}$ (input genus abundance data)
#### Before repeatition,
* Select the true connection to disease (based on the phylogenetic tree information)
1. Choose 4 indexes from the order nodes
1. Construct the disease path according to the chosen order nodes.
* Construct the true weights.
1. For $i = 1,2,...,4$, $w^{i} \in \mathbb{R}^{m^{i-1} \times m^{i}}, b^{i} \in \mathbb{R}^{m^{i} \times 1},$
$$w^{i}_{j,k} \sim \left\{\begin{matrix}
\text{Uniform}(-4.5,3) \quad\quad \text{associated with the disease} \\
% \mathcal{N}(0,0.0001) \quad\quad \text{not associaated with the disease} \\
0 \quad\quad \text{not associaated with the disease} \\
0 \quad\quad\quad \text{not associaated with the phylogenetic trees}
\end{matrix}\right.$$
$$b^{i}_{k} \sim \mathcal{N}(0,4)$$
#### For each repeatition,
* For $i = 1,2,...,4$,
1. $h^i = w^{i} x^{i-1} + b^i$
1. $x^{i} = \text{ReLU}(h^i) \in \mathbb{R}^{m^{i}}$
* For the last layer:
1. $y=x^{5} = w^{5}x^{4}+b^{5} + \epsilon$ where $\epsilon \sim \mathcal{N}(0, 1)$
```
verbose=False
count_path = 'data/simulation/count/'
x_list = np.array(pd.read_csv('data/simulation/gcount_list.csv', header=None)[0])
np.random.seed(10)
print('-------------------------------------------------------------------------------')
print('Generation True Connection to disease')
order_idx = np.array([5,4,1,9]) # 5(0) and 4(2) are effect at order level, 1(1) and 9(1) are effects at phylum level
phylum_idx = np.array([1,0,2]) # 1(1, 9) are effect at phylum level, 1(6) and 3(5) are effects at order level
true_tree_idx_list = []
for i in range(5):
idx_order = np.unique(np.concatenate([phylogenetic_tree[:,i][phylogenetic_tree[:,2]==k] for k in order_idx]))
# idx_class = np.unique(np.concatenate([phylogenetic_tree[:,i][phylogenetic_tree[:,3]==k] for k in class_idx]))
idx_phylum = np.unique(np.concatenate([phylogenetic_tree[:,i][phylogenetic_tree[:,4]==k] for k in phylum_idx]))
idx = np.intersect1d(idx_order, idx_phylum)
print("%6s idx: %s"% (tree_level_list[i], idx))
true_tree_idx_list.append(idx)
print('------------------------------------------------------------------------------------------')
true_tree_weight_list = []
num_dict = phylogenetic_tree_dict['Number']
for i in range(len(tree_level_list)-1):
print('Build true edge weights between [%6s, %6s]'%(tree_level_list[i],tree_level_list[i+1]))
lower = phylogenetic_tree[:,i]
upper = phylogenetic_tree[:,i+1]
n_lower = num_dict[tree_level_list[i]]
n_upper = num_dict[tree_level_list[i+1]]
tree_w = np.zeros((n_lower,n_upper), dtype=np.float32)
for j in true_tree_idx_list[i]:
tree_w[j,upper[lower==j]] = 1.
true_tree_weight_list.append(tree_w)
np.save('%s/ver0/tw_%d.npy'%(path,i+1), np.repeat([tree_w], x_list.shape[0], axis=0))
print('Saved true edge weights between [%6s, %6s] to %s/ver0/tw_%d.npy'%(tree_level_list[i],tree_level_list[i+1],path,i+1))
print('-------------------------------------------------------------------------------')
print('Generation y')
yweight_list = []
ybias_list = []
for i in range(len(true_tree_weight_list)):
yw = np.random.uniform(-4.5,3., true_tree_weight_list[i].shape).astype(np.float32) * true_tree_weight_list[i] # left only the weights on the true connections to the disease
yb = np.random.normal(0,4, true_tree_weight_list[i].shape[-1]).astype(np.float32)
yw = np.repeat([yw], x_list.shape[0], axis=0)
yb = np.repeat([yb], x_list.shape[0], axis=0)
yweight_list.append(yw)
ybias_list.append(yb)
np.save('%s/ver0/solw_%d.npy'%(path,i), yw)
np.save('%s/ver0/solb_%d.npy'%(path,i), yb)
ywc = np.zeros((true_tree_weight_list[3].shape[-1],1), dtype=np.float32)
ywc[:3,0] = np.random.uniform(-4.5,3., 3).astype(np.float32)
# ywc = np.expand_dims(np.array([-0.025, 0.15, -0.35, 0, 0, 0, 0, 0, 0]), axis=-1)
ywc[:,0] = ywc[:,0] * (np.sum(true_tree_weight_list[-1], axis=0) > 0).astype(np.float32)
ywc = np.repeat([ywc], x_list.shape[0], axis=0)
ybc = np.random.normal(0,4, ywc.shape[-1]).astype(np.float32)
ybc = np.repeat([ybc], x_list.shape[0], axis=0)
np.save('%s/ver0/solw_%d.npy'%(path,len(true_tree_weight_list)), ywc)
# np.save('%s/ver0/solb_%d.npy'%(path,len(true_tree_weight_list)), ybc)
newy_all = []
for fold in range(x_list.shape[0]):
x = pd.read_csv('%s/%s'%(count_path, x_list[fold])) # input x
mat = np.matrix(x)
prepro = MinMaxScaler()
prepro.fit(mat)
x = pd.DataFrame(prepro.transform(mat), columns = list(x.columns))
h = np.array(x, dtype=np.float32)
for i, (yw, yb) in enumerate(zip(yweight_list,ybias_list)):
yw_noise = yw[fold]
# yw_noise += np.random.normal(0,0.0001, true_tree_weight_list[i].shape) \
# *(1.-true_tree_weight_list[i])*(tree_weight_list[i]) # add noise on the tree
h = relu(np.dot(h, yw_noise) + np.repeat([yb[fold]], h.shape[0], axis=0))
h = np.dot(h, ywc[fold])
p = h + np.repeat([ybc[fold]], h.shape[0], axis=0)
p += np.random.normal(0,1)
newy = p[:,0]
newy_all.append(newy)
newy_all = pd.DataFrame(np.stack(newy_all).T)
newy_all.to_csv('%s/ver0/y.csv'%path, index=False)
np.save('%s/ver0/solb_%d.npy'%(path,len(true_tree_weight_list)), ybc)
newy_all
plt.hist(np.array(newy_all).flatten())
```
| github_jupyter |
```
import json
from dataclasses import dataclass
from typing import Any, Dict, List
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import resample
from sklearn.metrics import accuracy_score
from sklearn.linear_model import Perceptron
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from shared import dataset_local_path, TODO
examples = []
ys = []
with open(dataset_local_path("poetry_id.jsonl")) as fp:
for line in fp:
info = json.loads(line)
# Note: the data contains a whole bunch of extra stuff; we just want numeric features for now.
keep = info["features"]
# whether or not it's poetry is our label.
ys.append(info["poetry"])
# hold onto this single dictionary.
examples.append(keep)
feature_numbering = DictVectorizer(sort=True)
feature_numbering.fit(examples)
X = feature_numbering.transform(examples)
# The above two lines is the same as
# X = feature_numbering.fit_transform(examples)
print("Features as {} matrix.".format(X.shape))
RANDOM_SEED = 999999
y = np.array(ys)
# First split the data into training and testing
X_tv, X_test, y_tv, y_test = train_test_split(
X, y, train_size=0.75, shuffle=True, random_state=RANDOM_SEED
)
# Then split the training data into training and validations
X_train, X_vali, y_train, y_vali = train_test_split(
X_tv, y_tv, train_size=0.66, shuffle=True, random_state=RANDOM_SEED
)
# Decision tree parameters
params = {
"criterion": "gini",
"splitter": "best",
"max_depth": 5,
}
# Number of traning with different models for each one
N_MODELS = 100
# Number of sampling done for one of the models
N_SAMPLES = 100
seed_based_accuracies = []
for randomness in range(N_MODELS):
# ** unpacks a dictionary into the input of a function call
f_seed = DecisionTreeClassifier(random_state = RANDOM_SEED + randomness, **params)
f_seed.fit(X_train, y_train)
seed_based_accuracies.append(f_seed.score(X_vali, y_vali))
bootstrap_based_accuracies = []
f_single = DecisionTreeClassifier(random_state = RANDOM_SEED, **params)
f_single.fit(X_train, y_train)
y_pred = f_single.predict(X_vali)
for trial in range(N_SAMPLES):
sample_pred, sample_truth = resample(y_pred, y_vali, random_state = RANDOM_SEED+trial)
score = accuracy_score(y_true=sample_truth, y_pred=sample_pred)
bootstrap_based_accuracies.append(score)
boxplot_data = [seed_based_accuracies, bootstrap_based_accuracies]
plt.boxplot(boxplot_data)
plt.xticks(ticks=[1, 2], labels=["Seed-Based", "Bootstrap-Based"])
plt.xlabel("Sampling Method")
plt.ylabel("Accuracy")
plt.ylim([0.8, 1.0])
plt.show()
```
1. The bounds created by the seed based approach represents the range of accuracy scores of the
entire models. While the bounds created by the booystrap approach shows the distribution of the
data within a model and demonstrates the range of accuracy scores of subsets of the model.
2. a) Since the variance in the seed based approach is rather small, we cn infer that the diffrence
between the data of the models are not large, which suggests that the overall variance is close
to that of one model with K samples
| github_jupyter |

# Graded Assignment: Machine Learning
### Lenin Escobar - Real-time Data Analysis
<h1 style="background-color:powderblue;">Setting Virtual Env</h1>
```
#General
import sys
import os
import subprocess
import time
import random
import numpy as np
#from math import log
#Plotting
import matplotlib.pyplot as plt
#Pyspark
from pyspark.sql import *
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.ml.feature import *
from pyspark.ml import Pipeline
from pyspark.ml.linalg import Vectors
from pyspark.ml.regression import LinearRegression
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.stat import Correlation
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator, TrainValidationSplit
#from pyspark.sql.functions import isnan, when, count, col, row_number, regexp_extract, regexp_replace, trim, split, to_timestamp, date_format
#from pyspark.sql.types import StructType, StructField, LongType, StringType, TimestampType, IntegerType
class CustomEnv():
"""Custom environment definition class"""
def __init__(self, virtual_env_dir = "python-virtual-environments"):
self.virtual_env_dir = virtual_env_dir
def in_virtualenv(self):
is_venv = False
if ( hasattr(sys, 'prefix') and hasattr(sys, 'base_prefix') ):
if (sys.prefix != sys.base_prefix):
if ( os.path.exists(self.virtual_env_dir ) ):
is_venv = True
return is_venv
def exec_shell_cmd(self, prg_name, prg_args):
command = [prg_name]
command.extend(prg_args)
output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]
return output
def create_minimum_venv(self):
try:
if self.in_virtualenv() == False:
#Upgradig pip first
print("cmd_1")
prg_name = "pip"
prg_args = ["install --upgrade", "pip"]
cmd_output = self.exec_shell_cmd(prg_name, prg_args)
print(cmd_output)
#Installing venv
print("cmd_2")
prg_name = "pip"
prg_args = ["install", "virtualenv"]
cmd_output = self.exec_shell_cmd(prg_name, prg_args)
print(cmd_output)
#Creating directory
print("cmd_3")
prg_name = "mkdir"
prg_args = ["python-virtual-environments"]
cmd_output = self.exec_shell_cmd(prg_name, prg_args)
print(cmd_output)
#Creating venv
print("cmd_4")
command = "cd python-virtual-environments && python3 -m venv env"
process = subprocess.run(command, capture_output=True, shell=True)
print(process.stdout.decode())
#Activating venv
#print("cmd_5")
#command = "source python-virtual-environments/env/bin/activate"
#process = subprocess.run(command, capture_output=True, shell=True)
#print(process.stdout.decode())
#Installing Google libs
#print("cmd_6")
#command = "pip install --upgrade", "google-api-python-client google-auth-httplib2 google-auth-oauthlib"
#process = subprocess.run(command, capture_output=True, shell=True)
#print(process.stdout.decode())
except Exception as e:
return 'Error:' + str(e)
finally:
return "Done"
customEnv = CustomEnv()
customEnv.create_minimum_venv()
!source python-virtual-environments/env/bin/activate
!pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib
```
<h1 style="background-color:powderblue;">Google drive Conn Class</h1>
```
# According to Github best practices
# (https://docs.github.com/en/github/managing-large-files/working-with-large-files):
# GitHub limits the size of files allowed in repositories,
# and will block a push to a repository if the files are larger than the maximum file limit.
# So, I better use google drive for these cases
# Plus, I'm working on databrick community edition. So, I cannot use API Token (just to use something like s3)
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
from google.oauth2 import service_account
import io
import os
import sys
from IPython.utils.capture import CapturedIO
from enum import Enum
class Environments(Enum):
"""Environments Enum"""
UNDEFINED = 0
LOCAL = 1
DATABRICKS = 2
class ConfEnvironment(Enum):
"""Conf_Environments Enum"""
PENDING = 0
READY = 1
class FSManagement():
"""File system management class"""
def __init__(self,
path_authentication_dbfs_dir="dbfs:/FileStore/Authentication",
path_data_dbfs_dir="dbfs:/FileStore/Data",
path_authentication_dbfs_file="dbfs:/FileStore/python_service.json",
path_authentication_dir="file:/dbfs/FileStore/Authentication",
path_data_dir="file:/dbfs/FileStore/Data"):
self.environment = Environments.LOCAL.value # Current environment
self.conf_df_env = ConfEnvironment.PENDING.value #Conf environment
self.path_authentication_dbfs_dir = path_authentication_dbfs_dir # path_authentication_dbfs_dir
self.path_data_dbfs_dir = path_data_dbfs_dir # path_data_dbfs_dir
self.path_authentication_dbfs_file = path_authentication_dbfs_file # path_authentication_file
self.path_authentication_dir = path_authentication_dir # path_authentication_dir
self.path_data_dir = path_data_dir # path_authentication_dir
def is_running_in_databricks(self):
return (Environments.DATABRICKS.value if os.environ.get('SPARK_ENV_LOADED') else Environments.LOCAL.value)
def conf_fs_environment(self):
if self.is_running_in_databricks():
if self.conf_df_env == ConfEnvironment.PENDING.value:
try:
#Creating directories in default storage (root) DBDF
dbutils.fs.mkdirs(self.path_authentication_dbfs_dir)
dbutils.fs.mkdirs(self.path_data_dbfs_dir)
#Creating directories in default local storage (filesystem)
dbutils.fs.mkdirs(self.path_authentication_dir)
dbutils.fs.mkdirs(self.path_data_dir)
#Moving files where can be accesed from: DBDF to local storage
dbutils.fs.mv(self.path_authentication_dbfs_file, self.path_authentication_dir)
except Exception as e:
return 'Error:' + str(e)
finally:
self.conf_df_env = ConfEnvironment.READY.value
else:
#Safely create directories
if not os.path.exists(self.path_data_dbfs_dir):
os.makedirs(self.path_data_dbfs_dir)
if not os.path.exists(self.path_authentication_dbfs_dir):
os.makedirs(self.path_authentication_dbfs_dir)
self.conf_df_env = ConfEnvironment.READY.value
fs_management = FSManagement(path_authentication_dbfs_dir="dbfs:/FileStore/Authentication",
path_data_dbfs_dir="dbfs:/FileStore/Data",
path_authentication_dbfs_file="dbfs:/FileStore/python_service.json",
path_authentication_dir="file:/dbfs/FileStore/Authentication",
path_data_dir="file:/dbfs/FileStore/Data")
fs_management.conf_fs_environment()
class GoogleDriveFiles():
"""Custom Google Drive Class"""
def __init__(self, credentials_file_name):
#https://console.cloud.google.com/iam-admin/serviceaccounts/details/113530738473514992506/keys?project=feisty-dolphin-313318
#https://console.cloud.google.com/apis/credentials?organizationId=0&project=feisty-dolphin-313318&supportedpurview=project
#https://console.cloud.google.com/apis/library/drive.googleapis.com?project=feisty-dolphin-313318
self.credentials_file_name = credentials_file_name # Credentials compatible with this API
print(self.credentials_file_name)
#service_account.Credentials.from_service_account_file()
self.credentials = service_account.Credentials.from_service_account_file(self.credentials_file_name) # Global Service Account Credentials
def download_file_from_gdrive(self,file_id, downloaded_file_name, verbose = False):
"""Get credentials from file
:return:boolean
"""
drive_service = build('drive', 'v3', credentials=self.credentials)
request = drive_service.files().get_media(fileId=file_id)
#fh = io.BytesIO() # this can be used to keep in memory
fh = io.FileIO(downloaded_file_name, 'wb') # this can be used to write to disk
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
if verbose:
print (f'%{int(status.progress() * 100)} downloaded file: {downloaded_file_name}')
#print("Download %d%%." % int(status.progress() * 100))
return done
#Google Drive- File
#Initializing Class
#google_drive = GoogleDriveFiles("dbfs:/FileStore/Authentication/python_service.json")
google_drive = GoogleDriveFiles("/dbfs/FileStore/Authentication/python_service.json")
#Downloading feature.name"
file_id = '1wNQXEkJl_CaYNk0Hl_gKaKP71Rsle3h7'
file_path = "/dbfs/FileStore/Data/feature.name"
download_status = google_drive.download_file_from_gdrive(file_id, file_path, verbose = True)
print(download_status)
#Downloading test.data"
file_id = '1F5x1y1ulpII1Zb96p76NwPtg7EppxOhV'
file_path = "/dbfs/FileStore/Data/test.data"
download_status = google_drive.download_file_from_gdrive(file_id, file_path, verbose = True)
print(download_status)
#Downloading train.data"
file_id = '1ZIoE3DU1_NxqIemHpwfr4vYJeaw9j7iB'
file_path = "/dbfs/FileStore/Data/train.data"
download_status = google_drive.download_file_from_gdrive(file_id, file_path, verbose = True)
print(download_status)
#Downloading train.solution"
file_id = '1aoY92XlzY7TkX9GGtSwY2TlEXgzucdxj'
file_path = "/dbfs/FileStore/Data/train.solution"
download_status = google_drive.download_file_from_gdrive(file_id, file_path, verbose = True)
print(download_status)
#Downloading validation.data"
file_id = '1989M5OYFs7wb-YG3XQSFDlKVgHV-uJGT'
file_path = "/dbfs/FileStore/Data/validation.data"
download_status = google_drive.download_file_from_gdrive(file_id, file_path, verbose = True)
print(download_status)
```
<h5 style="background-color:blue;">Reading pre-downloaded files</h5>
```
os.listdir("/dbfs/FileStore/Data/")
!cat /dbfs/FileStore/Data/feature.name
!head -5 /dbfs/FileStore/Data/train.data
!head -5 /dbfs/FileStore/Data/train.solution
!head -5 /dbfs/FileStore/Data/test.data
```
<h5 style="background-color:blue;">Preprocessing</h5>
```
class Timer():
"""Custom Timer Class"""
def __init__(self):
self._start_time = None
self._id = None
def start(self, _id):
"""Start a new timer"""
if self._start_time is not None:
raise TimerError(f"Timer is running. Use .stop() to stop it, so you can use it this process:{self._id}")
self._start_time = time.perf_counter()
def stop(self, _id):
"""Stop the timer, and report the elapsed time"""
if self._start_time is None:
raise TimerError(f"Timer is not running. Use .start() to start it, so you can use it this process:{self._id}")
elapsed_time = time.perf_counter() - self._start_time
self._start_time = None
print(f"Process {self._id} elapsed time: {elapsed_time:0.4f} seconds")
#Testind Timer Class
#t = Timer()
#t.start(_id = "random")
#lst_test = []
#for i in range(0,100):
# x = random.randint(1,10)
# lst_test.append(x)
#t.stop(_id = "random")
#I decided to work with Spark dataframes based on parquet files instead of CSV
class Ingestion():
"""Custom Ingestion Class"""
def __init__(self,
csv_file_path = "file:/dbfs/FileStore/Data/",
parquet_file_path = "dbfs:/FileStore/Data/"):
self.csv_file_path = csv_file_path
self.parquet_file_path = parquet_file_path
def parquet_file_exists(self, file_name):
"""Check if parquet file exists
:return:list_len
"""
lst_len = 0
try:
lst_len = len(dbutils.fs.ls(self.parquet_file_path + file_name))
except:
print("Parquet file already exists!")
return lst_len
finally:
return lst_len
def parquet_file_del(self, file_name):
"""Delete file parquet exists
:return:boolean
"""
try:
dbutils.fs.rm(self.parquet_file_path + file_name, recurse = True)
except:
print("Something wrong happened!")
return False
finally:
return True
def df_from_csv(self, file_name, schema):
"""Get dataframe from csv file
:return:dataframe
"""
input_file = self.csv_file_path + file_name
df = spark.read.schema(schema) \
.option("header", "false") \
.option("delimiter", "\t") \
.csv(input_file)
return df
def df_to_parquet(self, df, file_name):
"""Dataframe to parquet
:return:output_file
"""
output_file = self.parquet_file_path + file_name
df.write.parquet(output_file)
return output_file
def df_from_parquet(self, _file):
"""Dataframe from parquet
:return:dataframe
"""
df = spark.read.parquet(_file)
return df
def join_train_db(self,df_train_data,df_train_solution):
"""Join two special Train dataframes
:return:dataframe
"""
#Creating Rownumber column just to join the two dataframes
window=Window.orderBy(lit(1))
df_tmp_data=df_train_data.withColumn("rownumber",row_number().over(window)-1)
df_tmp_solution=df_train_solution.withColumn("rownumber",row_number().over(window)-1)
df_train_data_joined = df_tmp_data.join(df_tmp_solution,["rownumber"]).drop("rownumber")
return df_train_data_joined
#Read train.data file into Dataframe
ingestion = Ingestion()
csv_file_name = "train.data"
parquet_file_name = "train-data.parquet"
_continue = False
if ingestion.parquet_file_exists(file_name=parquet_file_name) > 0:
if ingestion.parquet_file_del(file_name=parquet_file_name):
_continue = True
else:
_continue = True
if _continue:
schema = StructType([
StructField(name = "tweet_id", dataType = LongType(), nullable = False),
StructField(name = "username", dataType = StringType(), nullable = False),
StructField(name = "timestamp", dataType = StringType(), nullable = False),
StructField(name = "#followers", dataType = IntegerType(), nullable = True),
StructField(name = "#friends", dataType = IntegerType(), nullable = True),
StructField(name = "#favorites", dataType = IntegerType(), nullable = True),
StructField(name = "entities", dataType = StringType(), nullable = True),
StructField(name = "sentiment", dataType = StringType(), nullable = True),
StructField(name = "mentions", dataType = StringType(), nullable = True),
StructField(name = "hashtags", dataType = StringType(), nullable = True),
StructField(name = "urls", dataType = StringType(), nullable = True)
])
t = Timer()
t.start(_id="train.data_start_df_from_csv")
df_from_csv = ingestion.df_from_csv(file_name=csv_file_name, schema=schema)
t.stop(_id="train.data_stop_df_from_csv")
t.start(_id="train.data_start_df_to_parquet")
parquet_file = ingestion.df_to_parquet(df = df_from_csv, file_name = parquet_file_name)
t.stop(_id="train.data_stop_df_to_parquet")
t.start(_id="train.data_start_df_from_parquet")
df_train_data = ingestion.df_from_parquet(_file = parquet_file)
t.stop(_id="train.data_start_df_from_parquet")
#Read train.solution file into Dataframe
ingestion = Ingestion()
csv_file_name = "train.solution"
parquet_file_name = "train-solution.parquet"
_continue = False
if ingestion.parquet_file_exists(file_name=parquet_file_name) > 0:
if ingestion.parquet_file_del(file_name=parquet_file_name):
_continue = True
else:
_continue = True
if _continue:
schema = StructType([
StructField(name = "tweet_number", dataType = IntegerType(), nullable = False)
])
t = Timer()
t.start(_id="train.solution_start_df_from_csv")
df_from_csv = ingestion.df_from_csv(file_name=csv_file_name, schema=schema)
t.stop(_id="train.solution_stop_df_from_csv")
t.start(_id="train.solution_start_df_to_parquet")
parquet_file = ingestion.df_to_parquet(df = df_from_csv, file_name = parquet_file_name)
t.stop(_id="train.solution_stop_df_to_parquet")
t.start(_id="train.solution_start_df_from_parquet")
df_train_solution = ingestion.df_from_parquet(_file = parquet_file)
t.stop(_id="train.solution_start_df_from_parquet")
df_train_data.filter( col("tweet_id") == 1178798309491822592).show(n=5, vertical=False, truncate = False)
df_train_solution.show(n=5, vertical=False, truncate = False)
print(df_train_data.count(), df_train_solution.count())
df_train_data_joined = ingestion.join_train_db(df_train_data=df_train_data,df_train_solution=df_train_solution)
df_train_data_joined.show(n=5, vertical=False, truncate = False)
print(df_train_data_joined.count())
#Converting timestamp col type from string to timestamp
df_train_data_joined = df_train_data_joined.withColumn("timestamp", to_timestamp(substring("timestamp", 5, 30), "MMM dd HH:mm:ss Z yyyy") )
#Splitting original sentiment col
df_train_data_joined = df_train_data_joined.withColumn('pos_sentiment', split(df_train_data_joined['sentiment'], ' ').getItem(0)) \
.withColumn('neg_sentiment', split(df_train_data_joined['sentiment'], ' ').getItem(1))
df_train_data_joined = df_train_data_joined.withColumn("pos_sentiment", df_train_data_joined["pos_sentiment"].cast(IntegerType()))
df_train_data_joined = df_train_data_joined.withColumn("neg_sentiment", df_train_data_joined["neg_sentiment"].cast(IntegerType()))
df_train_data_joined.show(n=5, vertical=False, truncate = False)
df_train_data_joined.dtypes
#df_train_data_joined.select("#Followers", "#Friends").show(n=5, vertical=False, truncate = False)
```
<h5 style="background-color:blue;">Feature engineering</h5>
```
#df_train_data_feat_eng = df_train_data_joined.withColumn("date_tweet", to_date(col("timestamp") ) )
df_train_fe = df_train_data_joined
df_train_fe_tweets_date = df_train_fe.groupBy(to_date(col("timestamp")).alias("date_tweet")).count().select( "date_tweet", col("count").alias("count")).orderBy("count",ascending=False)
df_train_fe_tweets_date_usr = df_train_fe.groupBy(to_date(col("timestamp")).alias("date_tweet"), col("username")).count().select( "date_tweet", "username", col("count").alias("count")).orderBy("username", "count", ascending=False)
display(df_train_fe_tweets_date)
df_train_fe_tweets_date.show(n=5, vertical=False, truncate = False)
#Casting BigInt to Int
#df_train_fe_tweets_date = df_train_fe_tweets_date.withColumn("count", df_train_fe_tweets_date["count"].cast(LongType()))
#df_train_fe_tweets_date.withColumn( "count", col("count").cast("integer") )
df_train_fe_tweets_date.printSchema()
class Tranformation():
"""Custom tranformation Class"""
def __init__(self):
print("init")
def trans_MinMaxScaler(self, df, cols):
"""Transform cols using MinMaxScaler
:return:dataframe
"""
# UDF for converting column type from vector to double type
unlist = udf(lambda v: float(v[0]),FloatType())
# Iterating over columns to be scaled
for i in cols:
# VectorAssembler Transformation - Converting column to vector type
assembler = VectorAssembler(inputCols=[i],outputCol=i+"_Vect")
# MinMaxScaler Transformation
scaler = MinMaxScaler(inputCol=i+"_Vect", outputCol=i+"_Scaled")
# Pipeline of VectorAssembler and MinMaxScaler
pipeline = Pipeline(stages=[assembler, scaler])
# Fitting pipeline on dataframe
df = pipeline.fit(df).transform(df).withColumn(i+"_Scaled", unlist(i+"_Scaled")).drop(i+"_Vect")
return df
tranformation = Tranformation()
df_train_fe_tweets_date = tranformation.trans_MinMaxScaler(df=df_train_fe_tweets_date, cols=["count"])
print("After Scaling :")
df_train_fe_tweets_date.show(n=5, vertical=False, truncate = False)
#df_train_fe_tweets_date.select("count").rdd.flatMap(lambda x: x).histogram(20)
df_train_fe_tweets_date_usr.printSchema()
#tranformation = Tranformation()
df_train_fe_tweets_date_usr = tranformation.trans_MinMaxScaler(df=df_train_fe_tweets_date_usr, cols=["count"])
print("After Scaling :")
df_train_fe_tweets_date_usr.show(n=5, vertical=False, truncate = False)
df_train_fe_tweets_date_usr.filter( col("count") > 1).orderBy("count", ascending=False).show(n=5, vertical=False, truncate = False)
df_train_fe.filter( col("username") == 'ffff643ced69a69fca8cfc93e4125bcc').show(n=5, vertical=False, truncate = False)
print( df_train_fe_tweets_date.count(), df_train_fe_tweets_date_usr.count() )
df_train_fe_tweets_date_usr_join = df_train_fe_tweets_date_usr.withColumn("count_tdu",col("count_Scaled")).alias("A").join(df_train_fe_tweets_date.withColumn("count_td",col("count_Scaled")).alias("B"), on=["date_tweet"], how ="inner")\
.select("A.date_tweet", "A.username", "B.count_td", "A.count_tdu" )
df_train_fe_tweets_date_usr_join.show(n=5, vertical=False, truncate = False)
df_train_fe_tweets_date_usr_join.filter( col("count_tdu") > 0).orderBy("count_tdu", ascending=False).show(n=5, vertical=False, truncate = False)
df_train_fe = df_train_fe.withColumn("date_tweet",to_date(col("timestamp"))).alias("A").join(df_train_fe_tweets_date_usr_join.alias("B"), on=["date_tweet", "username"], how ="inner" )\
.select("A.*", "B.count_td", "B.count_tdu")
df_train_fe.show(n=5, vertical=False, truncate = False)
print( df_train_fe.count() )
# UDF for converting column type from vector to double type
#unlist = udf(lambda v: float(v[0]),FloatType())
#dayofweek(date) - Returns the day of the week for date/timestamp (1 = Sunday, 2 = Monday, ..., 7 = Saturday).
df_train_fe = df_train_fe.withColumn("weekend", when( dayofweek(col("date_tweet")) == 1, 1).when( dayofweek(col("date_tweet")) == 7, 1 ).otherwise(0) )
df_train_fe.show(n=5, vertical=False, truncate = False)
#https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
df_train_fe = df_train_fe.withColumn("am_of_day", when( date_format(col("timestamp"), "a") == "AM", 1 ).otherwise(0) )
df_train_fe = df_train_fe.withColumn("pm_of_day", when( date_format(col("timestamp"), "a") == "PM", 1 ).otherwise(0) )
df_train_fe.show(n=5, vertical=False, truncate = False)
#https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.functions.split.html
#df_train_fe_tmp = df_train_fe
df_train_fe = df_train_fe.withColumn('number_entities', when (length(trim(col("entities"))) == 0,0).otherwise(size(split(trim(col("entities")), r";")) -1))
df_train_fe = df_train_fe.withColumn('number_mentions', when (length(trim(col("mentions"))) == 0,0).otherwise(size(split(trim(col("mentions")), r"\s+"))))
df_train_fe = df_train_fe.withColumn('number_hashtags', when (length(trim(col("hashtags"))) == 0,0).otherwise(size(split(trim(col("hashtags")), r"\s+"))))
df_train_fe = df_train_fe.withColumn('number_urls', when (length(trim(col("urls"))) == 0,0).otherwise(size(split(trim(col("urls")), r":-:")) -1))
df_train_fe.show(n=5, vertical=False, truncate = False)
#data_test = [("James","","Smith Smith","36636","M",3000),
# ("Michael","https://twitter.com/tconnellyrte/status/1178760377678618626:-:","","40288","M",4000),
# ("Robert","","Williams","42114","M",4000),
# ("Maria","Anne","Jones","39192","F",4000),
# ("original_text:annotated_entity:score;","Mary","Brown","","F",-1)
# ]
#schema_test = StructType([ \
# StructField("firstname",StringType(),True), \
# StructField("middlename",StringType(),True), \
# StructField("lastname",StringType(),True), \
# StructField("id", StringType(), True), \
# StructField("gender", StringType(), True), \
# StructField("salary", IntegerType(), True) \
# ])
#df_xy = spark.createDataFrame(data=data_test,schema=schema_test)
#https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.functions.split.html
#df_xy = df_xy.withColumn('number_lastname', when (length(trim(col("lastname"))) == 0,0).otherwise( size(split( trim(col("lastname") ), r"\s+")) ) )
#df_xy = df_xy.withColumn('number_middlename', when (length(trim(col("middlename"))) == 0,0).otherwise( size(split( trim(col("middlename") ), r":-:")) -1) )
#df_xy = df_xy.withColumn('number_firstname', when (length(trim(col("firstname"))) == 0,0).otherwise( size(split( trim(col("firstname") ), r";")) -1) )
#df_xy.show(n=5, vertical=False, truncate = False)
#https://arxiv.org/pdf/1709.02984.pdf
#the positive polarity score (from 2 to 5), the negative polarity score (from -2 to -5) and scores (1 and -1) are considered to be neutral.
##overall trinary score, i.e. the overall positive (score = 1), negative (score = -1) and neutral (score = 0)
#df_train_fe_tmp = df_train_fe
df_train_fe = df_train_fe.withColumn("sentiment_overall_trinary_Score", when( col("pos_sentiment") + col("neg_sentiment") > 0, 1 ).when( col("pos_sentiment") + col("neg_sentiment") < 0, -1 ). otherwise(0) )
df_train_fe.show(n=5, vertical=False, truncate = False)
df_train_fe.printSchema()
#print( df_train_fe_tweets_date_usr_join.count() )
#Just like a custom checkpoint
df_train_fe.write.parquet("dbfs:/FileStore/Data/df_train_fe.parquet")
columnAssembler = ['tweet_number',
'#followers',
'#friends',
'#favorites',
'pos_sentiment',
'neg_sentiment',
'count_td',
'count_tdu',
'weekend',
'am_of_day',
'pm_of_day',
'number_entities',
'number_mentions',
'number_hashtags',
'number_urls',
'sentiment_overall_trinary_Score']
vectorAssembler = VectorAssembler(
inputCols = columnAssembler
, outputCol = "features"
)
vectorAssembler = vectorAssembler.transform(df_train_fe)
pearson_corr = Correlation.corr(dataset=vectorAssembler, column= "features", method="pearson")
corr_list = pearson_corr.head()[0].toArray().tolist()
pearson_corr_df = spark.createDataFrame(corr_list, columnAssembler)
#pearson_corr_df.show(truncate=False)
display(pearson_corr_df)
data_array = np.array(pearson_corr_df.collect())
corrmatrix = data_array.tolist()
#print(corrmatrix)
def plot_corr_matrix(correlations,attr,fig_no):
fig=plt.figure(fig_no,figsize=(50.,10.))
ax=fig.add_subplot(111)
ax.set_xticklabels(['']+attr, rotation=10)
ax.set_yticklabels(['']+attr)
ax.set_title("Correlation Matrix for Specified Attributes")
cax=ax.matshow(correlations,vmax=1,vmin=-1)
fig.colorbar(cax)
plt.show()
plot_corr_matrix(corrmatrix, columnAssembler, 234)
#log(x-min(x)+1) where min(x) == 1; and we are adding 1 cuz log needs positive values
#We are going to apply log transformation so the distribution becomes more aproxiamate to normal and decrease the effect of outliers.
#X -Xmin / Xmax - Xmin
#df_train_fe_tweets_date_usr = df_train_fe_tweets_date_usr.withColumn("count_1", df_train_fe_tweets_date_usr["count"].cast(IntegerType()))
# Normalize each Vector using $L^1$ norm.
#normalizer = Normalizer(inputCol="count_1", outputCol="normCount", p=1.0)
#l1NormData = normalizer.transform(df_train_fe_tweets_date_usr)
#We need to normalize the data
#feature engineering:
#User-based --> Average tweets per day (and popularity), Average tweets per day per user and tweets per day ration
#Time-based --> is_public_holiday, is_weekend, is_noon_ is_eve, etc
#Content-based --> num_and_length_url_hashtags_mentions, contain_exclamation_or_anyNumber, average_sentiment_analysis. Finally, considere TFIDF in categorical features
```
<h5 style="background-color:blue;">Data Exploration</h5>
```
#It gives us most of the statistical
df_train_fe.describe().show(n=5, vertical=True, truncate = False)
# null values in each column
df_train_fe_agg = df_train_fe.agg(*[count(when(isnull(c), c)).alias(c) for c in df_train_fe.columns])
df_train_fe_agg.show(n=5, vertical=True, truncate = False)
# histogram of tweet_number
npts = 400
df_train_fe_plot = df_train_fe.limit(num=npts)
df_train_fe_plot = df_train_fe_plot.withColumn( "tweet_number", col("tweet_number").cast("float") )
df_train_fe_plot = df_train_fe_plot.withColumn("tweet_number_log", log(col("tweet_number")).cast("float") )
tweet_number = df_train_fe_plot.select('tweet_number').take(num=npts)
tweet_number_log = df_train_fe_plot.select('tweet_number_log').take(num=npts)
f, axes = plt.subplots(1,2)
f.tight_layout()
axes[0].hist(tweet_number, bins=20, log=True)
axes[0].set_title('log-Histogram of tweet_number')
axes[1].hist(tweet_number_log, bins=20, log=False)
axes[1].set_title('Histogram of log(tweet_number)')
display(f)
tweet_number
aa = df_train_fe_plot.withColumn("tweet_number_log", log(col("tweet_number")) )
aa = df_train_fe_plot.withColumn("tweet_number_log", log(col("tweet_number_log")) )
aa.select('tweet_number_log').take(num=npts)
df_train_fe_plot.printSchema()
```
<h5 style="background-color:blue;">Transformers and Estimators</h5>
<h6 style="background-color:blue;">Linear Regression - The simple one</h6>
* I use only original numerical features
```
df_train_fe_num = df_train_fe.select('tweet_number',
'#followers',
'#friends',
'#favorites')
df_train_fe_num = df_train_fe_num.withColumn("#followers", df_train_fe_num["#followers"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("#friends", df_train_fe_num["#friends"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("#favorites", df_train_fe_num["#favorites"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("tweet_number", df_train_fe_num["tweet_number"].cast("float"))
df_train_fe_num.printSchema()
vecAssembler = VectorAssembler(inputCols=["#favorites", "#followers", "#friends"], outputCol="features")
lr= LinearRegression(labelCol = "tweet_number")
pipeline = Pipeline(stages=[vecAssembler, lr])
trainDF, testDF = df_train_fe_num.randomSplit([.7, .3], seed=42)
pipelineModel = pipeline.fit(trainDF)
predDF = pipelineModel.transform(testDF)
predDF.select("#favorites", "#followers", "#friends", "tweet_number", "prediction").show(5)
#Evaluator
predDF = predDF.withColumn( "tweet_number", col("tweet_number").cast("float") )
evaluator = RegressionEvaluator()
evaluator.setLabelCol("tweet_number")
evaluator.setPredictionCol("prediction")
rmse_ = evaluator.evaluate(predDF, {evaluator.metricName: "rmse"})
print(f"rmse is {rmse_}")
mse_ = evaluator.evaluate(predDF, {evaluator.metricName: "mse"})
print(f"mse is {mse_}")
r2_ = evaluator.evaluate(predDF, {evaluator.metricName: "r2"})
print(f"r2 is {r2_}")
mae_ = evaluator.evaluate(predDF, {evaluator.metricName: "mae"})
print(f"mae is {mae_}")
var_ = evaluator.evaluate(predDF, {evaluator.metricName: "var"})
print(f"var is {var_}")
columnAssembler = ['tweet_number',
'#followers',
'#friends',
'#favorites']
vectorAssembler = VectorAssembler().setInputCols(columnAssembler).setOutputCol('features')
lr= LinearRegression(labelCol = "tweet_number")
pipeline = Pipeline(stages=[vectorAssembler, lr])
# seed = YYYYMMDD
trainDF, testDF = df_train_fe_num.randomSplit([.7, .3], seed=20200618)
pipelineModel = pipeline.fit(trainDF)
#Saving the fitted pipeline to disk
pipelineModel.write().overwrite().save(path="dbfs:/FileStore/Data/df_train_fe.parquet/spark-lr-simplest-pipelineModel")
predDF = pipelineModel.transform(testDF)
predDF.select('tweet_number',
'#followers',
'#friends',
'#favorites',
'prediction').show(n=5, vertical=False, truncate = False)
#predDF.select('tweet_number',
#'#followers',
#'#friends',
#'#favorites',
#'prediction').filter(col("#followers")> 0).show(n=100, vertical=True, truncate = False)
#Evaluator
predDF = predDF.withColumn( "tweet_number", col("tweet_number").cast("float") )
evaluator = RegressionEvaluator()
evaluator.setLabelCol("tweet_number")
evaluator.setPredictionCol("prediction")
rmse_ = evaluator.evaluate(predDF, {evaluator.metricName: "rmse"})
print(f"rmse is {rmse_}")
mse_ = evaluator.evaluate(predDF, {evaluator.metricName: "mse"})
print(f"mse is {mse_}")
r2_ = evaluator.evaluate(predDF, {evaluator.metricName: "r2"})
print(f"r2 is {r2_}")
mae_ = evaluator.evaluate(predDF, {evaluator.metricName: "mae"})
print(f"mae is {mae_}")
var_ = evaluator.evaluate(predDF, {evaluator.metricName: "var"})
print(f"var is {var_}")
#define once
df_train_fe_num = df_train_fe.select('tweet_number',
'#followers',
'#friends',
'#favorites',
'pos_sentiment',
'neg_sentiment',
'count_td',
'count_tdu',
'weekend',
'am_of_day',
'pm_of_day',
'number_entities',
'number_mentions',
'number_hashtags',
'number_urls',
'sentiment_overall_trinary_Score')
df_train_fe_num = df_train_fe_num.withColumn("#followers", df_train_fe_num["#followers"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("#friends", df_train_fe_num["#friends"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("#favorites", df_train_fe_num["#favorites"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("tweet_number", df_train_fe_num["tweet_number"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("pos_sentiment", df_train_fe_num["pos_sentiment"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("neg_sentiment", df_train_fe_num["neg_sentiment"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("count_td", df_train_fe_num["count_td"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("count_tdu", df_train_fe_num["count_tdu"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("weekend", df_train_fe_num["weekend"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("am_of_day", df_train_fe_num["am_of_day"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("pm_of_day", df_train_fe_num["pm_of_day"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("number_entities", df_train_fe_num["number_entities"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("number_mentions", df_train_fe_num["number_mentions"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("number_hashtags", df_train_fe_num["number_hashtags"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("number_urls", df_train_fe_num["number_urls"].cast("float"))
df_train_fe_num = df_train_fe_num.withColumn("sentiment_overall_trinary_Score", df_train_fe_num["sentiment_overall_trinary_Score"].cast("float"))
df_train_fe_num.printSchema()
```
<h6 style="background-color:blue;">Linear Regression without Regularization</h6>
* I use all numerical features, including feature engineered features
```
columnAssembler = ['tweet_number',
'#followers',
'#friends',
'#favorites',
'pos_sentiment',
'neg_sentiment',
'count_td',
'count_tdu',
'weekend',
'am_of_day',
'pm_of_day',
'number_entities',
'number_mentions',
'number_hashtags',
'number_urls',
'sentiment_overall_trinary_Score']
vectorAssembler = VectorAssembler().setInputCols(columnAssembler).setOutputCol('features')
#Param
niter = 500 # set by trial and error
reg = 0.0 # no regularization for now
elastic_reg = 0.0 # secondary regularization parameter (ratio of L1 to L2 regularization penalties)
tolerance = 1e-5 # set by trial and error
intercept = True # why not indeed
lr= LinearRegression(labelCol = "tweet_number", \
maxIter=niter, \
regParam=reg, \
elasticNetParam=elastic_reg, \
tol=tolerance)
pipeline = Pipeline(stages=[vectorAssembler, lr])
# seed = YYYYMMDD
trainDF, testDF = df_train_fe_num.randomSplit([.7, .3], seed=20200618)
pipelineModel = pipeline.fit(trainDF)
#Saving the fitted pipeline to disk
pipelineModel.write().overwrite().save(path="dbfs:/FileStore/Data/df_train_fe.parquet/spark-lr-noreg-pipelineModel")
predDF = pipelineModel.transform(testDF)
predDF.select('tweet_number',
'#followers',
'#friends',
'#favorites',
'pos_sentiment',
'neg_sentiment',
'count_td',
'count_tdu',
'weekend',
'am_of_day',
'pm_of_day',
'number_entities',
'number_mentions',
'number_hashtags',
'number_urls',
'prediction').show(n=5, vertical=True, truncate = False)
#Evaluator
predDF = predDF.withColumn( "tweet_number", col("tweet_number").cast("float") )
evaluator = RegressionEvaluator()
evaluator.setLabelCol("tweet_number")
evaluator.setPredictionCol("prediction")
rmse_ = evaluator.evaluate(predDF, {evaluator.metricName: "rmse"})
print(f"rmse is {rmse_}")
mse_ = evaluator.evaluate(predDF, {evaluator.metricName: "mse"})
print(f"mse is {mse_}")
r2_ = evaluator.evaluate(predDF, {evaluator.metricName: "r2"})
print(f"r2 is {r2_}")
mae_ = evaluator.evaluate(predDF, {evaluator.metricName: "mae"})
print(f"mae is {mae_}")
var_ = evaluator.evaluate(predDF, {evaluator.metricName: "var"})
print(f"var is {var_}")
# scatter plot of errors by baseline and regression model
npts = 2000
labels_preds = predDF.select('tweet_number', 'prediction').take(npts)
#base_pred = trainVal_withPreds_base_df.select('prediction').first()[0]
truth = [labpred['tweet_number'] for labpred in labels_preds]
reg_err = [labpred['prediction']-labpred['tweet_number'] for labpred in labels_preds]
#base_err = [base_pred-labpred['label'] for labpred in labels_preds]
f = plt.figure()
#plot_base = plt.scatter(truth,base_err,s=8,c='red',linewidth=0,alpha=0.7)
plot_reg = plt.scatter(truth,reg_err,s=8,c='blue',linewidth=0,alpha=0.7)
plt.plot((0,100),(0,0),lineStyle='-',c='black')
plt.xlim((0,100))
plt.ylim((-100,100))
plt.xlabel('shares')
plt.ylabel('error')
#plt.legend((plot_base,plot_reg),('baseline','regression'))
plt.title('Prediction error vs label value (validation set)')
display(f)
```
<h6 style="background-color:blue;">Linear Regression with Regularization</h6>
* I use all numerical features, including feature engineered features
```
columnAssembler = ['tweet_number',
'#followers',
'#friends',
'#favorites',
'pos_sentiment',
'neg_sentiment',
'count_td',
'count_tdu',
'weekend',
'am_of_day',
'pm_of_day',
'number_entities',
'number_mentions',
'number_hashtags',
'number_urls',
'sentiment_overall_trinary_Score']
vectorAssembler = VectorAssembler().setInputCols(columnAssembler).setOutputCol('features')
#Param
niter = 500 # set by trial and error
reg = 0.2 # set by trial and error
elastic_reg = 0.8 # set by trial and error
tolerance = 1e-5 # set by trial and error
intercept = True # why not indeed
lr= LinearRegression(labelCol = "tweet_number", \
maxIter=niter, \
regParam=reg, \
elasticNetParam=elastic_reg, \
tol=tolerance)
pipeline = Pipeline(stages=[vectorAssembler, lr])
# seed = YYYYMMDD
trainDF, testDF = df_train_fe_num.randomSplit([.7, .3], seed=20200618)
pipelineModel = pipeline.fit(trainDF)
#Saving the fitted pipeline to disk
pipelineModel.write().overwrite().save(path="dbfs:/FileStore/Data/df_train_fe.parquet/spark-lr-reg-pipelineModel")
predDF = pipelineModel.transform(testDF)
predDF.select('tweet_number',
'#followers',
'#friends',
'#favorites',
'pos_sentiment',
'neg_sentiment',
'count_td',
'count_tdu',
'weekend',
'am_of_day',
'pm_of_day',
'number_entities',
'number_mentions',
'number_hashtags',
'number_urls',
'prediction').show(n=5, vertical=True, truncate = False)
#Evaluator
predDF = predDF.withColumn( "tweet_number", col("tweet_number").cast("float") )
evaluator = RegressionEvaluator()
evaluator.setLabelCol("tweet_number")
evaluator.setPredictionCol("prediction")
rmse_ = evaluator.evaluate(predDF, {evaluator.metricName: "rmse"})
print(f"rmse is {rmse_}")
mse_ = evaluator.evaluate(predDF, {evaluator.metricName: "mse"})
print(f"mse is {mse_}")
r2_ = evaluator.evaluate(predDF, {evaluator.metricName: "r2"})
print(f"r2 is {r2_}")
mae_ = evaluator.evaluate(predDF, {evaluator.metricName: "mae"})
print(f"mae is {mae_}")
var_ = evaluator.evaluate(predDF, {evaluator.metricName: "var"})
print(f"var is {var_}")
```
<h6 style="background-color:blue;">Linear Regression without Regularization and basic features and 100 iterations</h6>
* I used just the basic features (not including feature engineered features) and default params
* Prevent overfitting
```
# Preparing VectorAssembler
df_train_fe_num_cv = df_train_fe_num
columnAssembler = ['tweet_number',
'#followers',
'#friends',
'#favorites',
'pos_sentiment',
'neg_sentiment',
'count_td',
'count_tdu',
'weekend',
'am_of_day',
'pm_of_day',
'number_entities',
'number_mentions',
'number_hashtags',
'number_urls',
'sentiment_overall_trinary_Score']
vectorAssembler = VectorAssembler().setInputCols(columnAssembler).setOutputCol('features')
df_train_fe_num_assembler = vectorAssembler.transform(df_train_fe_num_cv)
df_train_fe_num_assembler.show(n=5, vertical=True, truncate = False)
# Create initial LinearRegression model
lr = LinearRegression(labelCol="tweet_number", featuresCol="features")
# Create ParamGrid for Cross Validation
lrparamGrid = (ParamGridBuilder()
.addGrid(lr.regParam, [0.001, 0.01, 0.1, 0.5, 1.0, 2.0])
# .addGrid(lr.regParam, [0.01, 0.1, 0.5])
.addGrid(lr.elasticNetParam, [0.0, 0.25, 0.5, 0.75, 1.0])
# .addGrid(lr.elasticNetParam, [0.0, 0.5, 1.0])
.addGrid(lr.maxIter, [1, 5, 10, 20, 50])
# .addGrid(lr.maxIter, [1, 5, 10])
.build())
# Evaluate model
lrevaluator = RegressionEvaluator(predictionCol="prediction", labelCol="tweet_number", metricName="rmse")
# Create 5-fold CrossValidator
lrcv = CrossValidator(estimator = lr,
estimatorParamMaps = lrparamGrid,
evaluator = lrevaluator,
numFolds = 5)
#Splitting DataFrame
# seed = YYYYMMDD
trainDF, testDF = df_train_fe_num_assembler.randomSplit([.7, .3], seed=20200618)
# Run cross validations
lrcvModel = lrcv.fit(trainDF)
print(lrcvModel)
#Saving the fitted pipeline to disk
pipelineModel.write().overwrite().save(path="dbfs:/FileStore/Data/df_train_fe.parquet/spark-lrcvModel")
# Get Model Summary Statistics
lrcvSummary = lrcvModel.bestModel.summary
#print("Coefficient Standard Errors: " + str(lrcvSummary coefficientStandardErrors))
# java.lang.UnsupportedOperationException: No Std. Error of coefficients available for this LinearRegressionModel
#print("P Values: " + str(lrcvSummary.pValues)) # Last element is the intercept
# Use test set here so we can measure the accuracy of our model on new data
lrpredictions = lrcvModel.transform(testDF)
# cvModel uses the best model found from the Cross Validation
# Evaluate best model
print('RMSE:', lrevaluator.evaluate(lrpredictions))
#Evaluator
#predDF = predDF.withColumn( "tweet_number", col("tweet_number").cast("float") )
evaluator = RegressionEvaluator()
evaluator.setLabelCol("tweet_number")
evaluator.setPredictionCol("prediction")
rmse_ = evaluator.evaluate(lrpredictions, {evaluator.metricName: "rmse"})
print(f"rmse is {rmse_}")
mse_ = evaluator.evaluate(lrpredictions, {evaluator.metricName: "mse"})
print(f"mse is {mse_}")
r2_ = evaluator.evaluate(lrpredictions, {evaluator.metricName: "r2"})
print(f"r2 is {r2_}")
mae_ = evaluator.evaluate(lrpredictions, {evaluator.metricName: "mae"})
print(f"mae is {mae_}")
var_ = evaluator.evaluate(lrpredictions, {evaluator.metricName: "var"})
print(f"var is {var_}")
```
| github_jupyter |
# Character level language model - Dinosaurus land
Welcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go beserk, so choose wisely!
<table>
<td>
<img src="images/dino.jpg" style="width:250;height:300px;">
</td>
</table>
Luckily you have learned some deep learning and you will use it to save the day. Your assistant has collected a list of all the dinosaur names they could find, and compiled them into this [dataset](dinos.txt). (Feel free to take a look by clicking the previous link.) To create new dinosaur names, you will build a character level language model to generate new names. Your algorithm will learn the different name patterns, and randomly generate new names. Hopefully this algorithm will keep you and your team safe from the dinosaurs' wrath!
By completing this assignment you will learn:
- How to store text data for processing using an RNN
- How to synthesize data, by sampling predictions at each time step and passing it to the next RNN-cell unit
- How to build a character-level text generation recurrent neural network
- Why clipping the gradients is important
We will begin by loading in some functions that we have provided for you in `rnn_utils`. Specifically, you have access to functions such as `rnn_forward` and `rnn_backward` which are equivalent to those you've implemented in the previous assignment.
```
import numpy as np
from utils import *
import random
```
## 1 - Problem Statement
### 1.1 - Dataset and Preprocessing
Run the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size.
```
data = open('dinos.txt', 'r').read()
data= data.lower()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))
```
The characters are a-z (26 characters) plus the "\n" (or newline character), which in this assignment plays a role similar to the `<EOS>` (or "End of sentence") token we had discussed in lecture, only here it indicates the end of the dinosaur name rather than the end of a sentence. In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26. We also create a second python dictionary that maps each index back to the corresponding character character. This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer. Below, `char_to_ix` and `ix_to_char` are the python dictionaries.
```
char_to_ix = { ch:i for i,ch in enumerate(sorted(chars)) }
ix_to_char = { i:ch for i,ch in enumerate(sorted(chars)) }
print(ix_to_char)
```
### 1.2 - Overview of the model
Your model will have the following structure:
- Initialize parameters
- Run the optimization loop
- Forward propagation to compute the loss function
- Backward propagation to compute the gradients with respect to the loss function
- Clip the gradients to avoid exploding gradients
- Using the gradients, update your parameter with the gradient descent update rule.
- Return the learned parameters
<img src="images/rnn.png" style="width:450;height:300px;">
<caption><center> **Figure 1**: Recurrent Neural Network, similar to what you had built in the previous notebook "Building a RNN - Step by Step". </center></caption>
At each time-step, the RNN tries to predict what is the next character given the previous characters. The dataset $X = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is a list of characters in the training set, while $Y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$ is such that at every time-step $t$, we have $y^{\langle t \rangle} = x^{\langle t+1 \rangle}$.
## 2 - Building blocks of the model
In this part, you will build two important blocks of the overall model:
- Gradient clipping: to avoid exploding gradients
- Sampling: a technique used to generate characters
You will then apply these two functions to build the model.
### 2.1 - Clipping the gradients in the optimization loop
In this section you will implement the `clip` function that you will call inside of your optimization loop. Recall that your overall loop structure usually consists of a forward pass, a cost computation, a backward pass, and a parameter update. Before updating the parameters, you will perform gradient clipping when needed to make sure that your gradients are not "exploding," meaning taking on overly large values.
In the exercise below, you will implement a function `clip` that takes in a dictionary of gradients and returns a clipped version of gradients if needed. There are different ways to clip gradients; we will use a simple element-wise clipping procedure, in which every element of the gradient vector is clipped to lie between some range [-N, N]. More generally, you will provide a `maxValue` (say 10). In this example, if any component of the gradient vector is greater than 10, it would be set to 10; and if any component of the gradient vector is less than -10, it would be set to -10. If it is between -10 and 10, it is left alone.
<img src="images/clip.png" style="width:400;height:150px;">
<caption><center> **Figure 2**: Visualization of gradient descent with and without gradient clipping, in a case where the network is running into slight "exploding gradient" problems. </center></caption>
**Exercise**: Implement the function below to return the clipped gradients of your dictionary `gradients`. Your function takes in a maximum threshold and returns the clipped versions of your gradients. You can check out this [hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.clip.html) for examples of how to clip in numpy. You will need to use the argument `out = ...`.
```
### GRADED FUNCTION: clip
def clip(gradients, maxValue):
'''
Clips the gradients' values between minimum and maximum.
Arguments:
gradients -- a dictionary containing the gradients "dWaa", "dWax", "dWya", "db", "dby"
maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue
Returns:
gradients -- a dictionary with the clipped gradients.
'''
dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']
### START CODE HERE ###
# clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)
for gradient in [dWax, dWaa, dWya, db, dby]:
np.clip(gradient, -maxValue, maxValue, out=gradient)
### END CODE HERE ###
gradients = {"dWaa": dWaa, "dWax": dWax, "dWya": dWya, "db": db, "dby": dby}
return gradients
np.random.seed(3)
dWax = np.random.randn(5,3)*10
dWaa = np.random.randn(5,5)*10
dWya = np.random.randn(2,5)*10
db = np.random.randn(5,1)*10
dby = np.random.randn(2,1)*10
gradients = {"dWax": dWax, "dWaa": dWaa, "dWya": dWya, "db": db, "dby": dby}
gradients = clip(gradients, 10)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
```
** Expected output:**
<table>
<tr>
<td>
**gradients["dWaa"][1][2] **
</td>
<td>
10.0
</td>
</tr>
<tr>
<td>
**gradients["dWax"][3][1]**
</td>
<td>
-10.0
</td>
</td>
</tr>
<tr>
<td>
**gradients["dWya"][1][2]**
</td>
<td>
0.29713815361
</td>
</tr>
<tr>
<td>
**gradients["db"][4]**
</td>
<td>
[ 10.]
</td>
</tr>
<tr>
<td>
**gradients["dby"][1]**
</td>
<td>
[ 8.45833407]
</td>
</tr>
</table>
### 2.2 - Sampling
Now assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below:
<img src="images/dinos3.png" style="width:500;height:300px;">
<caption><center> **Figure 3**: In this picture, we assume the model is already trained. We pass in $x^{\langle 1\rangle} = \vec{0}$ at the first time step, and have the network then sample one character at a time. </center></caption>
**Exercise**: Implement the `sample` function below to sample characters. You need to carry out 4 steps:
- **Step 1**: Pass the network the first "dummy" input $x^{\langle 1 \rangle} = \vec{0}$ (the vector of zeros). This is the default input before we've generated any characters. We also set $a^{\langle 0 \rangle} = \vec{0}$
- **Step 2**: Run one step of forward propagation to get $a^{\langle 1 \rangle}$ and $\hat{y}^{\langle 1 \rangle}$. Here are the equations:
$$ a^{\langle t+1 \rangle} = \tanh(W_{ax} x^{\langle t \rangle } + W_{aa} a^{\langle t \rangle } + b)\tag{1}$$
$$ z^{\langle t + 1 \rangle } = W_{ya} a^{\langle t + 1 \rangle } + b_y \tag{2}$$
$$ \hat{y}^{\langle t+1 \rangle } = softmax(z^{\langle t + 1 \rangle })\tag{3}$$
Note that $\hat{y}^{\langle t+1 \rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1). $\hat{y}^{\langle t+1 \rangle}_i$ represents the probability that the character indexed by "i" is the next character. We have provided a `softmax()` function that you can use.
- **Step 3**: Carry out sampling: Pick the next character's index according to the probability distribution specified by $\hat{y}^{\langle t+1 \rangle }$. This means that if $\hat{y}^{\langle t+1 \rangle }_i = 0.16$, you will pick the index "i" with 16% probability. To implement it, you can use [`np.random.choice`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.choice.html).
Here is an example of how to use `np.random.choice()`:
```python
np.random.seed(0)
p = np.array([0.1, 0.0, 0.7, 0.2])
index = np.random.choice([0, 1, 2, 3], p = p.ravel())
```
This means that you will pick the `index` according to the distribution:
$P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$.
- **Step 4**: The last step to implement in `sample()` is to overwrite the variable `x`, which currently stores $x^{\langle t \rangle }$, with the value of $x^{\langle t + 1 \rangle }$. You will represent $x^{\langle t + 1 \rangle }$ by creating a one-hot vector corresponding to the character you've chosen as your prediction. You will then forward propagate $x^{\langle t + 1 \rangle }$ in Step 1 and keep repeating the process until you get a "\n" character, indicating you've reached the end of the dinosaur name.
```
# GRADED FUNCTION: sample
def sample(parameters, char_to_ix, seed):
"""
Sample a sequence of characters according to a sequence of probability distributions output of the RNN
Arguments:
parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b.
char_to_ix -- python dictionary mapping each character to an index.
seed -- used for grading purposes. Do not worry about it.
Returns:
indices -- a list of length n containing the indices of the sampled characters.
"""
# Retrieve parameters and relevant shapes from "parameters" dictionary
Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
vocab_size = by.shape[0]
n_a = Waa.shape[1]
### START CODE HERE ###
# Step 1: Create the one-hot vector x for the first character (initializing the sequence generation). (≈1 line)
x = np.zeros((vocab_size, 1))
# Step 1': Initialize a_prev as zeros (≈1 line)
a_prev = np.zeros((n_a, 1))
# Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)
indices = []
# Idx is a flag to detect a newline character, we initialize it to -1
idx = -1
# Loop over time-steps t. At each time-step, sample a character from a probability distribution and append
# its index to "indices". We'll stop if we reach 50 characters (which should be very unlikely with a well
# trained model), which helps debugging and prevents entering an infinite loop.
counter = 0
newline_character = char_to_ix['\n']
while (idx != newline_character and counter != 50):
# Step 2: Forward propagate x using the equations (1), (2) and (3)
a = np.tanh(np.dot(Wax, x) + np.dot(Waa, a_prev) + b)
z = np.dot(Wya, a) + by
y = softmax(z)
# for grading purposes
np.random.seed(counter+seed)
# Step 3: Sample the index of a character within the vocabulary from the probability distribution y
idx = np.random.choice(vocab_size, p = y.ravel())
# Append the index to "indices"
indices.append(idx)
# Step 4: Overwrite the input character as the one corresponding to the sampled index.
x = np.zeros((vocab_size, 1))
x[idx] = 1
# Update "a_prev" to be "a"
a_prev = a
# for grading purposes
seed += 1
counter +=1
### END CODE HERE ###
if (counter == 50):
indices.append(char_to_ix['\n'])
return indices
np.random.seed(2)
_, n_a = 20, 100
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
indices = sample(parameters, char_to_ix, 0)
print("Sampling:")
print("list of sampled indices:", indices)
print("list of sampled characters:", [ix_to_char[i] for i in indices])
```
** Expected output:**
<table>
<tr>
<td>
**list of sampled indices:**
</td>
<td>
[12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, <br>
7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 5, 6, 12, 25, 0, 0]
</td>
</tr><tr>
<td>
**list of sampled characters:**
</td>
<td>
['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', <br>
'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', <br>
'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'e', 'f', 'l', 'y', '\n', '\n']
</td>
</tr>
</table>
## 3 - Building the language model
It is time to build the character-level language model for text generation.
### 3.1 - Gradient descent
In this section you will implement a function performing one step of stochastic gradient descent (with clipped gradients). You will go through the training examples one at a time, so the optimization algorithm will be stochastic gradient descent. As a reminder, here are the steps of a common optimization loop for an RNN:
- Forward propagate through the RNN to compute the loss
- Backward propagate through time to compute the gradients of the loss with respect to the parameters
- Clip the gradients if necessary
- Update your parameters using gradient descent
**Exercise**: Implement this optimization process (one step of stochastic gradient descent).
We provide you with the following functions:
```python
def rnn_forward(X, Y, a_prev, parameters):
""" Performs the forward propagation through the RNN and computes the cross-entropy loss.
It returns the loss' value as well as a "cache" storing values to be used in the backpropagation."""
....
return loss, cache
def rnn_backward(X, Y, parameters, cache):
""" Performs the backward propagation through time to compute the gradients of the loss with respect
to the parameters. It returns also all the hidden states."""
...
return gradients, a
def update_parameters(parameters, gradients, learning_rate):
""" Updates parameters using the Gradient Descent Update Rule."""
...
return parameters
```
```
# GRADED FUNCTION: optimize
def optimize(X, Y, a_prev, parameters, learning_rate = 0.01):
"""
Execute one step of the optimization to train the model.
Arguments:
X -- list of integers, where each integer is a number that maps to a character in the vocabulary.
Y -- list of integers, exactly the same as X but shifted one index to the left.
a_prev -- previous hidden state.
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
b -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
learning_rate -- learning rate for the model.
Returns:
loss -- value of the loss function (cross-entropy)
gradients -- python dictionary containing:
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)
db -- Gradients of bias vector, of shape (n_a, 1)
dby -- Gradients of output bias vector, of shape (n_y, 1)
a[len(X)-1] -- the last hidden state, of shape (n_a, 1)
"""
### START CODE HERE ###
# Forward propagate through time (≈1 line)
loss, cache = rnn_forward(X, Y, a_prev, parameters)
# Backpropagate through time (≈1 line)
gradients, a = rnn_backward(X, Y, parameters, cache)
# Clip your gradients between -5 (min) and 5 (max) (≈1 line)
gradients = clip(gradients, 5)
# Update parameters (≈1 line)
parameters = update_parameters(parameters, gradients, learning_rate)
### END CODE HERE ###
return loss, gradients, a[len(X)-1]
np.random.seed(1)
vocab_size, n_a = 27, 100
a_prev = np.random.randn(n_a, 1)
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
X = [12,3,5,11,22,3]
Y = [4,14,11,22,25, 26]
loss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
print("Loss =", loss)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("np.argmax(gradients[\"dWax\"]) =", np.argmax(gradients["dWax"]))
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
print("a_last[4] =", a_last[4])
```
** Expected output:**
<table>
<tr>
<td>
**Loss **
</td>
<td>
126.503975722
</td>
</tr>
<tr>
<td>
**gradients["dWaa"][1][2]**
</td>
<td>
0.194709315347
</td>
<tr>
<td>
**np.argmax(gradients["dWax"])**
</td>
<td> 93
</td>
</tr>
<tr>
<td>
**gradients["dWya"][1][2]**
</td>
<td> -0.007773876032
</td>
</tr>
<tr>
<td>
**gradients["db"][4]**
</td>
<td> [-0.06809825]
</td>
</tr>
<tr>
<td>
**gradients["dby"][1]**
</td>
<td>[ 0.01538192]
</td>
</tr>
<tr>
<td>
**a_last[4]**
</td>
<td> [-1.]
</td>
</tr>
</table>
### 3.2 - Training the model
Given the dataset of dinosaur names, we use each line of the dataset (one name) as one training example. Every 100 steps of stochastic gradient descent, you will sample 10 randomly chosen names to see how the algorithm is doing. Remember to shuffle the dataset, so that stochastic gradient descent visits the examples in random order.
**Exercise**: Follow the instructions and implement `model()`. When `examples[index]` contains one dinosaur name (string), to create an example (X, Y), you can use this:
```python
index = j % len(examples)
X = [None] + [char_to_ix[ch] for ch in examples[index]]
Y = X[1:] + [char_to_ix["\n"]]
```
Note that we use: `index= j % len(examples)`, where `j = 1....num_iterations`, to make sure that `examples[index]` is always a valid statement (`index` is smaller than `len(examples)`).
The first entry of `X` being `None` will be interpreted by `rnn_forward()` as setting $x^{\langle 0 \rangle} = \vec{0}$. Further, this ensures that `Y` is equal to `X` but shifted one step to the left, and with an additional "\n" appended to signify the end of the dinosaur name.
```
# GRADED FUNCTION: model
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):
"""
Trains the model and generates dinosaur names.
Arguments:
data -- text corpus
ix_to_char -- dictionary that maps the index to a character
char_to_ix -- dictionary that maps a character to an index
num_iterations -- number of iterations to train the model for
n_a -- number of units of the RNN cell
dino_names -- number of dinosaur names you want to sample at each iteration.
vocab_size -- number of unique characters found in the text, size of the vocabulary
Returns:
parameters -- learned parameters
"""
# Retrieve n_x and n_y from vocab_size
n_x, n_y = vocab_size, vocab_size
# Initialize parameters
parameters = initialize_parameters(n_a, n_x, n_y)
# Initialize loss (this is required because we want to smooth our loss, don't worry about it)
loss = get_initial_loss(vocab_size, dino_names)
# Build list of all dinosaur names (training examples).
with open("dinos.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
# Shuffle list of all dinosaur names
np.random.seed(0)
np.random.shuffle(examples)
# Initialize the hidden state of your LSTM
a_prev = np.zeros((n_a, 1))
# Optimization loop
for j in range(num_iterations):
### START CODE HERE ###
# Use the hint above to define one training example (X,Y) (≈ 2 lines)
index = j % len(examples)
X = [None] + [char_to_ix[ch] for ch in examples[index]]
Y = X[1:] + [char_to_ix["\n"]]
# Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters
# Choose a learning rate of 0.01
curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
### END CODE HERE ###
# Use a latency trick to keep the loss smooth. It happens here to accelerate the training.
loss = smooth(loss, curr_loss)
# Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly
if j % 2000 == 0:
print('Iteration: %d, Loss: %f' % (j, loss) + '\n')
# The number of dinosaur names to print
seed = 0
for name in range(dino_names):
# Sample indices and print them
sampled_indices = sample(parameters, char_to_ix, seed)
print_sample(sampled_indices, ix_to_char)
seed += 1 # To get the same result for grading purposed, increment the seed by one.
print('\n')
return parameters
```
Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names.
```
parameters = model(data, ix_to_char, char_to_ix)
```
## Conclusion
You can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implemetation generated some really cool names like `maconucon`, `marloralus` and `macingsersaurus`. Your model hopefully also learned that dinosaur names tend to end in `saurus`, `don`, `aura`, `tor`, etc.
If your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, `dromaeosauroides` is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest!
This assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favoriate name is the great, undefeatable, and fierce: Mangosaurus!
<img src="images/mangosaurus.jpeg" style="width:250;height:300px;">
## 4 - Writing like Shakespeare
The rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative.
A similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in ths sequence. These long term dependencies were less important with dinosaur names, since the names were quite short.
<img src="images/shakespeare.jpg" style="width:500;height:400px;">
<caption><center> Let's become poets! </center></caption>
We have implemented a Shakespeare poem generator with Keras. Run the following cell to load the required packages and models. This may take a few minutes.
```
from __future__ import print_function
from keras.callbacks import LambdaCallback
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking
from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from shakespeare_utils import *
import sys
import io
```
To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called [*"The Sonnets"*](shakespeare.txt).
Let's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run `generate_output`, which will prompt asking you for an input (`<`40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try "Forsooth this maketh no sense " (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well.
```
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
model.fit(x, y, batch_size=128, epochs=1, callbacks=[print_callback])
# Run this cell to try with different inputs without having to re-train the model
generate_output()
```
The RNN-Shakespeare model is very similar to the one you have built for dinosaur names. The only major differences are:
- LSTMs instead of the basic RNN to capture longer-range dependencies
- The model is a deeper, stacked LSTM model (2 layer)
- Using Keras instead of python to simplify the code
If you want to learn more, you can also check out the Keras Team's text generation implementation on GitHub: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py.
Congratulations on finishing this notebook!
**References**:
- This exercise took inspiration from Andrej Karpathy's implementation: https://gist.github.com/karpathy/d4dee566867f8291f086. To learn more about text generation, also check out Karpathy's [blog post](http://karpathy.github.io/2015/05/21/rnn-effectiveness/).
- For the Shakespearian poem generator, our implementation was based on the implementation of an LSTM text generator by the Keras team: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py
| github_jupyter |
# Unsplash Image Search
Using this notebook you can search for images from the [Unsplash Dataset](https://unsplash.com/data) using natural language queries. The search is powered by OpenAI's [CLIP](https://github.com/openai/CLIP) neural network.
This notebook uses the precomputed feature vectors for almost 2 million images from the full version of the [Unsplash Dataset](https://unsplash.com/data). If you want to compute the features yourself, see [here](https://github.com/haltakov/natural-language-image-search#on-your-machine).
This project was created by [Vladimir Haltakov](https://twitter.com/haltakov) and the full code is open-sourced on [GitHub](https://github.com/haltakov/natural-language-image-search).
## Setup Environment
In this section we will setup the environment.
First we need to install CLIP and then upgrade the version of torch to 1.7.1 with CUDA support (by default CLIP installs torch 1.7.1 without CUDA). Google Colab currently has torch 1.7.0 which doesn't work well with CLIP.
```
!pip install git+https://github.com/openai/CLIP.git
!pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 -f https://download.pytorch.org/whl/torch_stable.html
```
We can now load the pretrained public CLIP model.
```
import clip
import torch
# Load the open CLIP model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
```
## Download the Precomputed Data
In this section the precomputed feature vectors for all photos are downloaded.
In order to compare the photos from the Unsplash dataset to a text query, we need to compute the feature vector of each photo using CLIP. This is a time consuming task, so you can use the feature vectors that I precomputed and uploaded to Google Drive (with the permission from Unsplash). If you want to compute the features yourself, see [here](https://github.com/haltakov/natural-language-image-search#on-your-machine).
We need to download two files:
* `photo_ids.csv` - a list of the photo IDs for all images in the dataset. The photo ID can be used to get the actual photo from Unsplash.
* `features.npy` - a matrix containing the precomputed 512 element feature vector for each photo in the dataset.
The files are available on [Google Drive](https://drive.google.com/drive/folders/1WQmedVCDIQKA2R33dkS1f980YsJXRZ-q?usp=sharing).
```
from pathlib import Path
# Create a folder for the precomputed features
!mkdir unsplash-dataset
# Download the photo IDs and the feature vectors
!gdown --id 1FdmDEzBQCf3OxqY9SbU-jLfH_yZ6UPSj -O unsplash-dataset/photo_ids.csv
!gdown --id 1L7ulhn4VeN-2aOM-fYmljza_TQok-j9F -O unsplash-dataset/features.npy
# Download from alternative source, if the download doesn't work for some reason (for example download quota limit exceeded)
if not Path('unsplash-dataset/photo_ids.csv').exists():
!wget https://transfer.army/api/download/TuWWFTe2spg/EDm6KBjc -O unsplash-dataset/photo_ids.csv
if not Path('unsplash-dataset/features.npy').exists():
!wget https://transfer.army/api/download/LGXAaiNnMLA/AamL9PpU -O unsplash-dataset/features.npy
```
After the files are downloaded we need to load them using `pandas` and `numpy`.
```
import pandas as pd
import numpy as np
# Load the photo IDs
photo_ids = pd.read_csv("unsplash-dataset/photo_ids.csv")
photo_ids = list(photo_ids['photo_id'])
# Load the features vectors
photo_features = np.load("unsplash-dataset/features.npy")
# Convert features to Tensors: Float32 on CPU and Float16 on GPU
if device == "cpu":
photo_features = torch.from_numpy(photo_features).float().to(device)
else:
photo_features = torch.from_numpy(photo_features).to(device)
# Print some statistics
print(f"Photos loaded: {len(photo_ids)}")
```
## Define Functions
Some important functions for processing the data are defined here.
The `encode_search_query` function takes a text description and encodes it into a feature vector using the CLIP model.
```
def encode_search_query(search_query):
with torch.no_grad():
# Encode and normalize the search query using CLIP
text_encoded = model.encode_text(clip.tokenize(search_query).to(device))
text_encoded /= text_encoded.norm(dim=-1, keepdim=True)
# Retrieve the feature vector
return text_encoded
```
The `find_best_matches` function compares the text feature vector to the feature vectors of all images and finds the best matches. The function returns the IDs of the best matching photos.
```
def find_best_matches(text_features, photo_features, photo_ids, results_count=3):
# Compute the similarity between the search query and each photo using the Cosine similarity
similarities = (photo_features @ text_features.T).squeeze(1)
# Sort the photos by their similarity score
best_photo_idx = (-similarities).argsort()
# Return the photo IDs of the best matches
return [photo_ids[i] for i in best_photo_idx[:results_count]]
```
The `display_photo` function displays a photo from Unsplash given its ID.
This function needs to call the Unsplash API to get the URL of the photo and some metadata about the photographer. Since I'm [not allowed](https://help.unsplash.com/en/articles/2511245-unsplash-api-guidelines) to share my Unsplash API access key publicly, I created a small proxy that queries the Unsplash API and returns the data (see the code [here](https://github.com/haltakov/natural-language-image-search/tree/main/unsplash-proxy)). In this way you can play around without creating a developer account at Unsplash, while keeping my key private. I hope I don't hit the API rate limit.
If you already have an Unsplash developer account, you can uncomment the relevant code and plugin your own access key.
```
from IPython.display import Image
from IPython.core.display import HTML
from urllib.request import urlopen
import json
def display_photo(photo_id):
# Proxy for the Unsplash API so that I don't expose my access key
unsplash_api_url = f"https://haltakov.net/unsplash-proxy/{photo_id}"
# Alternatively, you can use your own Unsplash developer account with this code
# unsplash_api_url = f"https://api.unsplash.com/photos/{photo_id}?client_id=YOUR_UNSPLASH_ACCESS_KEY"
# Fetch the photo metadata from the Unsplash API
photo_data = json.loads(urlopen(unsplash_api_url).read().decode("utf-8"))
# Get the URL of the photo resized to have a width of 480px
photo_image_url = photo_data["urls"]["raw"] + "&w=320"
# Display the photo
display(Image(url=photo_image_url))
# Display the attribution text
display(HTML(f'Photo by <a target="_blank" href="https://unsplash.com/@{photo_data["user"]["username"]}?utm_source=NaturalLanguageImageSearch&utm_medium=referral">{photo_data["user"]["name"]}</a> on <a target="_blank" href="https://unsplash.com/?utm_source=NaturalLanguageImageSearch&utm_medium=referral">Unsplash</a>'))
print()
```
Putting it all together in one function.
```
def search_unslash(search_query, photo_features, photo_ids, results_count=3):
# Encode the search query
text_features = encode_search_query(search_query)
# Find the best matches
best_photo_ids = find_best_matches(text_features, photo_features, photo_ids, results_count)
# Display the best photos
for photo_id in best_photo_ids:
display_photo(photo_id)
```
## Search Unsplash
Now we are ready to search the dataset using natural language. Check out the examples below and feel free to try out your own queries.
> ⚠️ WARNING ⚠️
> Since many people are currently using the notebook, it seems that the Unsplash API limit is hit from time to time (even with caching in the proxy). I applied for production status which will solve the problem. In the meantime, you can just try when a new hour starts. Alternatively, you can use your own Unsplash API key.
### "Two dogs playing in the snow"
```
search_query = "Two dogs playing in the snow"
search_unslash(search_query, photo_features, photo_ids, 3)
```
### "The word love written on the wall"
```
search_query = "The word love written on the wall"
search_unslash(search_query, photo_features, photo_ids, 3)
```
### "The feeling when your program finally works"
```
search_query = "The feeling when your program finally works"
search_unslash(search_query, photo_features, photo_ids, 3)
```
### "The Syndey Opera House and the Harbour Bridge at night"
```
search_query = "The Syndey Opera House and the Harbour Bridge at night"
search_unslash(search_query, photo_features, photo_ids, 3)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ipavlopoulos/diagnostic_captioning/blob/master/DC_show_n_tell.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#Medical Image To Diagnostic Text
---
### Use the IU-Xray dataset, including radiology XRays along with their human-generated findings/impressions, to train the Show & Tell Encoder-Decoder model.
The code is based on https://machinelearningmastery.com/develop-a-caption-generation-model-in-keras/ while future work will i) add visual/semantic attention, ii) experiment with first words given, iii) decode hierarchicaly/consecutive.
```
# Download the dataset and put in proper folder to use it.
!git clone https://github.com/nlpaueb/bio_image_caption.git
!python bio_image_caption/SiVL19/get_iu_xray.py
!cp -n -r iu_xray /usr/local/share/jupyter/nbextensions/google.colab/iu_xray
# imports
from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import random
import os
import json
import numpy as np
import pandas as pd
import re
import time
from glob import glob
from PIL import Image
import pickle
from os import listdir
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu
import keras
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.text import Tokenizer
from keras.applications.vgg16 import preprocess_input
from keras.layers import Input
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import GRU, LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from keras.layers import Embedding
from keras.layers.merge import concatenate
from keras.layers.pooling import GlobalMaxPooling2D
import nltk
nltk.download('punkt')
```
# `Parse the texts and any tags, even if they aren't used.`
```
# download the image tags
DATA_PATH = "iu_xray/"
DATA_IMAGES_PATH = os.path.join(DATA_PATH, "iu_xray_images")
iuxray_major_tags = json.load(open(DATA_PATH+"iu_xray_major_tags.json"))
iuxray_auto_tags = json.load(open(DATA_PATH+"iu_xray_auto_tags.json"))
iuxray_captions = json.load(open(DATA_PATH+"iu_xray_captions.json"))
# parse the captions
iuxray_ids = list(iuxray_captions.keys())
iuxray_caption_texts = [iuxray_captions[iid].split() for iid in iuxray_ids]
iuxray_captions_num = len(iuxray_ids)
print ("# texts: {0}".format(iuxray_captions_num))
raw_texts = [" ".join(t) for t in iuxray_caption_texts]
print (len(raw_texts), len(set(raw_texts)))
```
# `Map image/captions to the patient they correspond. `
```
patient_images = {}
for visit in iuxray_ids:
patient = visit[3:].split("_")[0]
if patient in patient_images:
patient_images[patient].append(visit)
else:
patient_images[patient] = [visit]
# a model should be reading both images per patient while excluding other patients
iuxray_ids_img1 = [patient_images[patient][0] for patient in patient_images if len(patient_images[patient])==1]
iuxray_ids_img2 = [patient_images[patient][0] for patient in patient_images if len(patient_images[patient])==2]
iuxray_ids_img3 = [patient_images[patient][0] for patient in patient_images if len(patient_images[patient])>2]
print ("#patients with 1, 2, or more images:", len(iuxray_ids_img1), len(iuxray_ids_img2), len(iuxray_ids_img3))
#print (patient_images)
```
# `Download a sentence tokenizer and preprocess the text.`
```
train_path = os.path.join(DATA_PATH, "train_images.tsv")
test_path = os.path.join(DATA_PATH, "test_images.tsv")
train_data = pd.read_csv(train_path, sep="\t", header=None)
test_data = pd.read_csv(test_path, sep="\t", header=None)
train_data.columns = ["id", "caption"]
test_data.columns = ["id", "caption"]
start, end, sentence_token = "startsequence", "endsequence", " endofsentence "
def preprocess(text, start=start, end=end, sentence_token=sentence_token):
if sentence_token is not None:
sentences = nltk.tokenize.sent_tokenize(text)
sentences = [s for s in sentences if len(s)>5]
text = sentence_token.join(sentences)
text = text.lower()
return start + ' ' + text + ' ' + end
```
***Merge image/captions of same patient and use only patients with 2 images:***
```
# use the patient DB to create the datasets (one caption per patient)
image_captions = dict(zip(train_data.id.to_list()+test_data.id.to_list(), train_data.caption.to_list()+test_data.caption.to_list()))
print ("# captions", len(image_captions))
patient_captions = {patient:[image_captions[img] for img in patient_images[patient]] for patient in patient_images}
print ("# patients", len(patient_captions))
# discard patients without both XRays
ids = list(patient_captions.keys())
for patient in ids:
if len(patient_captions[patient])!=2:
del patient_captions[patient], patient_images[patient]
else:
patient_captions[patient] = preprocess(patient_captions[patient][0])
patient_images[patient] = [os.path.join(DATA_IMAGES_PATH, img_name) for img_name in patient_images[patient]]
ids = list(patient_captions.keys())
random.shuffle(ids)
sample_size = int(len(ids)*.1)
train_ids = ids[:-sample_size]
test_ids = ids[-sample_size:]
print ("# train/test: ", len(train_ids), len(test_ids))
```
# `Set up the VGG19 encoder and encode all the images. `
```
in_layer = Input(shape=(224, 224, 3))
encoder = VGG16(include_top=False, input_tensor=in_layer)
print(encoder.summary())
def encode(img_path):
image = keras.preprocessing.image.load_img(img_path, target_size=(224, 224))
# convert the image pixels to a numpy array
image = keras.preprocessing.image.img_to_array(image)
# prepare the image for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = keras.applications.densenet.preprocess_input(image)
return encoder.predict(image, verbose=0)
from tqdm import tqdm
train_encoded_images = {}
for pid in tqdm(train_ids):
train_encoded_images[pid] = [encode(img_path) for img_path in patient_images[pid]]
test_encoded_images = {}
for pid in tqdm(test_ids):
test_encoded_images[pid] = [encode(img_path) for img_path in patient_images[pid]]
test_captions = {pid:patient_captions[pid] for pid in test_ids}
pickle.dump(train_encoded_images, open("train_encoded_images.pkl", "wb"))
pickle.dump(test_encoded_images, open("test_encoded_images.pkl", "wb"))
pickle.dump(patient_images, open("patient_images.pkl", "wb"))
train_captions = {pid:patient_captions[pid] for pid in train_ids}
pickle.dump(patient_captions, open("patient_captions.pkl", "wb"))
```
# `Compute the Vocabulary and make a data processing method.`
```
tokenizer = Tokenizer()
tokenizer.fit_on_texts((train_captions.values()))
vocab_size = len(tokenizer.word_index) + 1
print('Vocabulary Size: %d' % vocab_size)
def create_sequences(tokenizer, caption, image1, image2, max_length):
Ximages1,Ximages2, XSeq, y = list(), list(), list(),list()
vocab_size = len(tokenizer.word_index) + 1
# integer encode the description
seq = tokenizer.texts_to_sequences([caption])[0]
# split one sequence into multiple X,y pairs
for i in range(1, len(seq)):
# select
in_seq, out_seq = seq[:i], seq[i]
# pad input sequence
in_seq = keras.preprocessing.sequence.pad_sequences([in_seq], maxlen=max_length)[0]
# encode output sequence
out_seq = keras.utils.to_categorical([out_seq], num_classes=vocab_size)[0]
# store
Ximages1.append(image1)
Ximages2.append(image2)
XSeq.append(in_seq)
y.append(out_seq)
return [Ximages1, Ximages2, XSeq, y]
#print (np.array(create_sequences(tokenizer, list(train_captions.values())[0], train_encoded_images[train_ids[0]][0][0], train_encoded_images[train_ids[0]][1][0], 58)).shape)
```
# `Core methods for setting up the model.`
```
# define the captioning model
def define_model(vocab_size, max_length, loss="categorical_crossentropy"):
# feature extractor (encoder)
inputs1 = Input(shape=(7, 7, 512))
inputs2 = Input(shape=(7, 7, 512))
fe1 = GlobalMaxPooling2D()(inputs1)
fe2 = GlobalMaxPooling2D()(inputs2)
fe3 = Dense(128, activation='relu')(fe1)
fe4 = Dense(128, activation='relu')(fe2)
fe5 = concatenate([fe3,fe4])
fe6 = RepeatVector(max_length)(fe5)
# embedding
inputs3 = Input(shape=(max_length,))
emb2 = Embedding(vocab_size, 50, mask_zero=True)(inputs3)
emb3 = GRU(128, return_sequences=True)(emb2)
emb4 = TimeDistributed(Dense(128, activation='relu'))(emb3)
# merge inputs
merged = concatenate([fe6, emb4])
# language model (decoder)
lm2 = GRU(256)(merged)
lm3 = Dense(128, activation='relu')(lm2)
outputs = Dense(vocab_size, activation='softmax')(lm3)
# tie it together [image, seq] [word]
model = Model(inputs=[inputs1,inputs2, inputs3], outputs=outputs)
model.compile(loss=loss, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
# loss could also be e.g. nltk.translate.bleu_score.sentence_bleu
print(model.summary())
plot_model(model, show_shapes=True, to_file='plot.png')
return model
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate a description for an image
def generate_desc(model, tokenizer, photos, max_length):
# seed the generation process
in_text = start
# iterate over the whole length of the sequence
for i in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad input
sequence = keras.preprocessing.sequence.pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photos[0],photos[1],sequence], verbose=0)
# convert probability to integer
yhat = np.argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# stop if we predict the end of the sequence
if word == end:
break
return in_text
# generate a description for an image
def generate_desc_beam(model, tokenizer, photos, max_length, beam_size=10):
in_text = [start]
start_word = [[start, 0.0]]
while len(start_word[0][0]) < max_length:
tmp = []
for s in start_word:
sequence = tokenizer.texts_to_sequences(s[0])
sequence = keras.preprocessing.sequence.pad_sequences([sequence[0]], maxlen=max_length)
yhat = model.predict([photos[0],photos[1], np.array(sequence)], verbose=0)
word_yhat = np.argsort(yhat[0])[-beam_size:]
for w in word_yhat:
nextcap, prob = s[0], s[1]
nextcap+= ' ' + word_for_id(w, tokenizer)
prob += yhat[0][w]
#print (nextcap, prob)
tmp.append([nextcap, prob])
start_word = tmp
start_word = sorted(start_word, reverse=False, key=lambda l: l[1])
start_word = start_word[-beam_size:]
start_word = start_word[-1][0]
intermediate_caption = start_word
final_caption = []
for i in intermediate_caption.split():
if i != end: final_caption.append(i)
else: break
final_caption = ' '.join(final_caption[1:])
return final_caption
#bleu = evaluate_n_visualise(loaded_model, test_captions, test_encoded_images, tokenizer, max_length, beam=10)
# evaluate the skill of the model
def evaluate_model(model, descriptions, photos, tokenizer, max_length):
actual, predicted = list(), list()
# step over the whole set
for key, desc in descriptions.items():
# generate description
yhat = generate_desc(model, tokenizer, photos[key], max_length)
# store actual and predicted
actual.append([desc.split()])
predicted.append(yhat.split())
# calculate BLEU score
bleu = corpus_bleu(actual, predicted)
return bleu
# data generator, intended to be used in a call to model.fit_generator()
def data_generator(captions, image_tuples, tokenizer, max_length, n_step, validation=False, validation_num=None):
while 1:
# iterate over patients - hold some for validation
patients = list(captions.keys())
if validation:
assert validation_num>0
patients = patients[-validation_num:]
elif not validation:
if validation_num>0:
patients = patients[:-validation_num]
for i in range(0, len(patients), n_step):
Ximages1, Ximages2, XSeq, y = list(), list(), list(),list()
for j in range(i, min(len(patients), i+n_step)):
patient_id = patients[j]
# retrieve text input
caption = captions[patient_id]
# generate input-output pairs (many images in each batch)
img1 = image_tuples[patient_id][0][0]
img2 = image_tuples[patient_id][1][0]
in_img1, in_img2, in_seq, out_word = create_sequences(tokenizer, caption, img1, img2, max_length)
for k in range(len(in_img1)):
Ximages1.append(in_img1[k])
Ximages2.append(in_img2[k])
XSeq.append(in_seq[k])
y.append(out_word[k])
# yield this batch of samples to the model
#print (array(Ximages1).shape)
yield [np.array(Ximages1), np.array(Ximages2), np.array(XSeq)], np.array(y)
# evaluate the skill of the model
def evaluate_n_visualise(model, descriptions, photos, tokenizer, max_length, size=5, beam=0):
actual, predicted = list(), list()
# step over the whole set
for key, desc in descriptions.items():
if random.random() > 0.8 :
# generate description
if beam>0:
yhat = generate_desc_beam(model, tokenizer, photos[key], max_length, beam_size=beam)
else:
yhat = generate_desc(model, tokenizer, photos[key], max_length)
# store actual and predicted
print('Actual: %s' % desc)
print('Predicted: %s\n' % yhat)
actual.append([desc.split()])
predicted.append(yhat.split())
if len(actual) >= size: break
# calculate BLEU score
bleu = corpus_bleu(actual, predicted)
return bleu
```
# `Run the model.`
```
# define experiment
verbose = 1
n_epochs = 300
max_length = 60
n_patients_per_update = 16
val_len = int(.01 * len(train_ids))
train_len = len(train_ids) - val_len
train_steps = int(train_len / n_patients_per_update)
val_steps = int(val_len / n_patients_per_update)
model_name = 'show_n_tell.e'+str(n_epochs)+'.ml'+str(max_length) + '.ppu'+str(n_patients_per_update) + '.val'+str(val_steps)
print (train_steps, val_steps, model_name)
show_n_tell = define_model(vocab_size, max_length, loss="categorical_crossentropy")
# Train
train_gen = data_generator(train_captions, train_encoded_images, tokenizer, max_length, n_patients_per_update, validation=False, validation_num=val_len)
val_gen = data_generator(train_captions, train_encoded_images, tokenizer, max_length, n_patients_per_update, validation=True, validation_num=val_len)
early = keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0.001, patience=10, verbose=1, mode='auto', restore_best_weights=True)
show_n_tell.fit_generator(train_gen, validation_data=val_gen, steps_per_epoch=train_steps, validation_steps=val_steps, epochs=n_epochs, verbose=verbose, callbacks=[early])
# Save & download
show_n_tell.save(model_name+'.h5')
#from google.colab import files
#files.download(model_name+'.h5')
```
# `See an example text generation:`
```
# Load & evaluate
#loaded_model = keras.models.load_model(model_name+'.h5')
loaded_model.summary()
plot_model(loaded_model, show_shapes=True, to_file='model.png')
! mv model.png /usr/local/share/jupyter/nbextensions/google.colab/model.png
%%html
<img src='/nbextensions/google.colab/model.png' width=1024/>
b4 = evaluate_n_visualise(loaded_model, test_captions, test_encoded_images, tokenizer, max_length, beam=0)
print ("BLEU (@sample):", b4)
```
# `See the diversity & evaluate.`
```
predicted = list()
actual = list()
for key in test_encoded_images:
caption = generate_desc(loaded_model, tokenizer, test_encoded_images[key], max_length)
gold = test_captions [key]
predicted.append(caption.split())
actual.append([gold.split()])
denom = len(predicted)
nom_pred = len(set(map(lambda x: " ".join(x), predicted)))
nom_actual = len(set(map(lambda x: " ".join(x[0]), actual)))
print ("diversity@predicted:", nom_pred/denom)
print ("diversity@actual:", nom_actual/denom)
# call METEOR, ROUGE, CIDEr, SPICE,
! git clone https://github.com/salaniz/pycocoevalcap.git || true
from pycocoevalcap.rouge import rouge
from pycocoevalcap.meteor import meteor
from pycocoevalcap.bleu import bleu
from pycocoevalcap.cider import cider
measures = {"ROU":[], "MET":[], "BLU1":[], "BLU2":[], "BLU3":[], "BLU4":[], "SPI":[], "CID":[]}
for pred,act in zip(predicted, actual):
pred, act = " ".join(pred), " ".join(act[0])
rou = rouge.Rouge().calc_score([pred], [act])
blu1, _ = bleu.BleuScorer(n=1, test=pred, refs=[act]).compute_score(option='closest', verbose=0)
blu2, _ = bleu.BleuScorer(n=2, test=pred, refs=[act]).compute_score(option='closest', verbose=0)
blu3, _ = bleu.BleuScorer(n=3, test=pred, refs=[act]).compute_score(option='closest', verbose=0)
blu4, _ = bleu.BleuScorer(n=4, test=pred, refs=[act]).compute_score(option='closest', verbose=0)
# blu4, _ = bleu.Bleu(4).compute_score({1:[pred]}, {1:[act]})
cid, _ = cider.CiderScorer(test=pred, refs=[act]).compute_score(option='closest', verbose=0)
#met, _ = meteor.Meteor().compute_score({1:[pred]}, {1:[act]}) # too slow
measures["ROU"].append(rou)
measures["BLU1"].append(blu1)
measures["BLU2"].append(blu2)
measures["BLU3"].append(blu3)
measures["BLU4"].append(blu4)
measures["CID"].append(cid)
print ("ROUGE:", np.mean(measures["ROU"]))
print ("METEOR:", np.mean(measures["MET"]))
print ("BLEU1:", np.mean(measures["BLU1"]))
print ("BLEU2:", np.mean(measures["BLU2"]))
print ("BLEU3:", np.mean(measures["BLU3"]))
print ("BLEU4:", np.mean(measures["BLU4"]))
print ("CIDEr:", np.mean(measures["CID"]))
```
# `Now see what happens per epoch:`
```
# See what the model produces in each epoch:
show_n_tell = define_model(vocab_size, max_length, loss="categorical_crossentropy")
for i in range(n_epochs):
print ("### Running epoch ", i, " ###")
train_gen = data_generator(train_captions, train_encoded_images, tokenizer, max_length, n_patients_per_update, validation=False, validation_num=val_len)
val_gen = data_generator(train_captions, train_encoded_images, tokenizer, max_length, n_patients_per_update, validation=True, validation_num=val_len)
show_n_tell.fit_generator(train_gen, validation_data=val_gen, steps_per_epoch=train_steps, validation_steps=val_steps, epochs=1, verbose=verbose)
for _ in range(3):
test_id = random.choice(test_ids)
generation = generate_desc(show_n_tell, tokenizer, test_encoded_images[test_id], max_length)
print ("Predicted:", generation)
print ("Actual :", test_captions[test_id])
print ()
# run experiment with repetitions
n_repeats = 1
train_results, test_results = list(), list()
for i in range(n_repeats):
model = define_model(vocab_size, max_length)
model.fit_generator(data_generator(train_captions, train_encoded_images, tokenizer, max_length, n_photos_per_update), steps_per_epoch=n_batches_per_epoch, epochs=n_epochs, verbose=verbose)
train_score = evaluate_model(model, train_captions, train_encoded_images, tokenizer, max_length)
train_results.append(train_score)
print('>%d: train=%f' % ((i+1), train_score))
df = DataFrame()
df['train'] = train_results
print(df.describe())
df.to_csv(model_name+'.csv', index=False)
```
| github_jupyter |
# <center>Introduction to Python Programming and Best Practices</center>
## <center>Instructors: Matt Slivinski and Andras Zsom</center>
### <center>[Center for Computation and Visualization](https://ccv.brown.edu/)</center>
### <center>Sponsored by the [Data Science Initiative](https://www.brown.edu/initiatives/data-science/home)</center>
## <center>Overview of the workshop</center>
Learning objectives of the first two weeks:
- describe and work with container types such as variables, lists, dictionaries, and arrays,
- describe and write short code snippets using control flow techniques such as if statements, for loops, and list comprehensions,
- write functions to manipulate data.
## <center>Overview of the workshop</center>
Learning objectives of the third week:
- work with python collections and itertools,
- manipulate date-time objects,
- interact with the operating system using os, sys, shutil,
- parse JSON data with python,
- parse CSV and SQL data with python.
## <center>Overview of the workshop</center>
Learning objectives of the fourth week:
- use pandas to read in tabular data (CSV, excel, and SQL databases)
- filter and modify the data
- visualize the data using pandas and matplotlib
- calculate summary statistics
## <center>What to expect</center>
- learning to code is tough but we will do our best to guide you through it
- always feel free to ask questions! there are no stupid questions, only stupid answers!
- lots of time to practice and try things yourself
- you will spend most of your time debugging your code or trying to understand other people's code
- this can be a very frusrating experince and you will make a lot of mistakes especially in the beginning
- we will equip you with tools to improve your debugging skills
- you won't become proficient in python in a month if you start with zero or very little experience
- but you'll have strong basics to build upon!
- I hope you'll feel more empowered by the end of the workshop!
- you'll be able to automate things that would take you way too much time to do manually
- you will be able to do things with tabular data that you can't do in excel
- you'll be able to prepare publication-ready figures
## <center>Goal for today: set up and get familiar with your coding environment</center>
- download the workshop materials from github
- install conda
- create the python environment using conda
- walk through jupyter-lab
### 1. Download the workshop materials
- go to https://github.com/brown-ccv/workshop-python-2021
- click on 'Code'
- download zip
- unzip the file whereever you'd like the workshop material to be (maybe the Documents folder?)
If you are familiar with git, simply clone the repo. Type in the terminal:
*git clone https://github.com/brown-ccv/workshop-python-2021.git*
## 2. Install conda
- https://www.anaconda.com/products/individual
## 3. Create the python environment
- python and the packages we will use change over time
- we will show you how to create a reproducable python environment
- you can recreate this environment on any machine years down the road and you'll be able to run the notebooks of the workshop
- open a terminal/console/command line tool
- make sure you are in the same folder where the material is
- if you type *ls* or *dir*, you should see the folder *workshop-python-2021*
- type the following commands in the terminal
*conda env create -n workshop_env -f workshop-python-2021/workshop.yml*
*ipython kernel install --name "workshop" --user*
*conda activate workshop_env*
- then you can type *jupyter-lab* and the jupyter lab window should open up in your browser
## 4. Jupyter lab walk through
## Plan B
If the installation is unsuccessful, you can also use the jupyter hub:
http://ccv.jupyter.brown.edu/
This is not our default method during the workshop because the hub is a cloud-based service, it might be temporary, it's not easy to change the coding environment if you need to.
| github_jupyter |
## GANs
```
%matplotlib inline
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
from fastai.vision.gan import *
```
GAN stands for [Generative Adversarial Nets](https://arxiv.org/pdf/1406.2661.pdf) and were invented by Ian Goodfellow. The concept is that we will train two models at the same time: a generator and a critic. The generator will try to make new images similar to the ones in our dataset, and the critic's job will try to classify real images from the fake ones the generator does. The generator returns images, the discriminator a feature map (it can be a single number depending on the input size). Usually the discriminator will be trained to return 0. everywhere for fake images and 1. everywhere for real ones.
This module contains all the necessary function to create a GAN.
We train them against each other in the sense that at each step (more or less), we:
1. Freeze the generator and train the discriminator for one step by:
- getting one batch of true images (let's call that `real`)
- generating one batch of fake images (let's call that `fake`)
- have the discriminator evaluate each batch and compute a loss function from that; the important part is that it rewards positively the detection of real images and penalizes the fake ones
- update the weights of the discriminator with the gradients of this loss
2. Freeze the discriminator and train the generator for one step by:
- generating one batch of fake images
- evaluate the discriminator on it
- return a loss that rewards posisitivly the discriminator thinking those are real images; the important part is that it rewards positively the detection of real images and penalizes the fake ones
- update the weights of the generator with the gradients of this loss
```
show_doc(GANLearner)
```
This is the general constructor to create a GAN, you might want to use one of the factory methods that are easier to use. Create a GAN from [`data`](/vision.data.html#vision.data), a `generator` and a `critic`. The [`data`](/vision.data.html#vision.data) should have the inputs the `generator` will expect and the images wanted as targets.
`gen_loss_func` is the loss function that will be applied to the `generator`. It takes three argument `fake_pred`, `target`, `output` and should return a rank 0 tensor. `output` is the result of the `generator` applied to the input (the xs of the batch), `target` is the ys of the batch and `fake_pred` is the result of the `discriminator` being given `output`. `output`and `target` can be used to add a specific loss to the GAN loss (pixel loss, feature loss) and for a good training of the gan, the loss should encourage `fake_pred` to be as close to 1 as possible (the `generator` is trained to fool the `critic`).
`crit_loss_func` is the loss function that will be applied to the `critic`. It takes two arguments `real_pred` and `fake_pred`. `real_pred` is the result of the `critic` on the target images (the ys of the batch) and `fake_pred` is the result of the `critic` applied on a batch of fake, generated byt the `generator` from the xs of the batch.
`switcher` is a [`Callback`](/callback.html#Callback) that should tell the GAN when to switch from critic to generator and vice versa. By default it does 5 iterations of the critic for 1 iteration of the generator. The model begins the training with the `generator` if `gen_first=True`. If `switch_eval=True`, the model that isn't trained is switched on eval mode (left in training mode otherwise, which means some statistics like the running mean in batchnorm layers are updated, or the dropouts are applied).
`clip` should be set to a certain value if one wants to clip the weights (see the [Wassertein GAN](https://arxiv.org/pdf/1701.07875.pdf) for instance).
If `show_img=True`, one image generated by the GAN is shown at the end of each epoch.
### Factory methods
```
show_doc(GANLearner.from_learners)
```
Directly creates a [`GANLearner`](/vision.gan.html#GANLearner) from two [`Learner`](/basic_train.html#Learner): one for the `generator` and one for the `critic`. The `switcher` and all `kwargs` will be passed to the initialization of [`GANLearner`](/vision.gan.html#GANLearner) along with the following loss functions:
- `loss_func_crit` is the mean of `learn_crit.loss_func` applied to `real_pred` and a target of ones with `learn_crit.loss_func` applied to `fake_pred` and a target of zeros
- `loss_func_gen` is the mean of `learn_crit.loss_func` applied to `fake_pred` and a target of ones (to full the discriminator) with `learn_gen.loss_func` applied to `output` and `target`. The weights of each of those contributions can be passed in `weights_gen` (default is 1. and 1.)
```
show_doc(GANLearner.wgan)
```
The Wasserstein GAN is detailed in [this article]. `switcher` and the `kwargs` will be passed to the [`GANLearner`](/vision.gan.html#GANLearner) init, `clip`is the weight clipping.
## Switchers
In any GAN training, you will need to tell the [`Learner`](/basic_train.html#Learner) when to switch from generator to critic and vice versa. The two following [`Callback`](/callback.html#Callback) are examples to help you with that.
As usual, don't call the `on_something` methods directly, the fastai library will do it for you during training.
```
show_doc(FixedGANSwitcher, title_level=3)
show_doc(FixedGANSwitcher.on_train_begin)
show_doc(FixedGANSwitcher.on_batch_end)
show_doc(AdaptiveGANSwitcher, title_level=3)
show_doc(AdaptiveGANSwitcher.on_batch_end)
```
## Discriminative LR
If you want to train your critic at a different learning rate than the generator, this will let you do it automatically (even if you have a learning rate schedule).
```
show_doc(GANDiscriminativeLR, title_level=3)
show_doc(GANDiscriminativeLR.on_batch_begin)
show_doc(GANDiscriminativeLR.on_step_end)
```
## Specific models
```
show_doc(basic_critic)
```
This model contains a first 4 by 4 convolutional layer of stride 2 from `n_channels` to `n_features` followed by `n_extra_layers` 3 by 3 convolutional layer of stride 1. Then we put as many 4 by 4 convolutional layer of stride 2 with a number of features multiplied by 2 at each stage so that the `in_size` becomes 1. `kwargs` can be used to customize the convolutional layers and are passed to [`conv_layer`](/layers.html#conv_layer).
```
show_doc(basic_generator)
```
This model contains a first 4 by 4 transposed convolutional layer of stride 1 from `noise_size` to the last numbers of features of the corresponding critic. Then we put as many 4 by 4 transposed convolutional layer of stride 2 with a number of features divided by 2 at each stage so that the image ends up being of height and widht `in_size//2`. At the end, we add`n_extra_layers` 3 by 3 convolutional layer of stride 1. The last layer is a transpose convolution of size 4 by 4 and stride 2 followed by `tanh`. `kwargs` can be used to customize the convolutional layers and are passed to [`conv_layer`](/layers.html#conv_layer).
```
show_doc(gan_critic)
show_doc(GANTrainer)
```
[`LearnerCallback`](/basic_train.html#LearnerCallback) that will be responsible to handle the two different optimizers (one for the generator and one for the critic), and do all the work behind the scenes so that the generator (or the critic) are in training mode with parameters requirement gradients each time we switch.
`switch_eval=True` means that the [`GANTrainer`](/vision.gan.html#GANTrainer) will put the model that isn't training into eval mode (if it's `False` its running statistics like in batchnorm layers will be updated and dropout will be applied). `clip` is the clipping applied to the weights (if not `None`). `beta` is the coefficient for the moving averages as the [`GANTrainer`](/vision.gan.html#GANTrainer)tracks separately the generator loss and the critic loss. `gen_first=True` means the training begins with the generator (with the critic if it's `False`). If `show_img=True` we show a generated image at the end of each epoch.
```
show_doc(GANTrainer.switch)
```
If `gen_mode` is left as `None`, just put the model in the other mode (critic if it was in generator mode and vice versa).
```
show_doc(GANTrainer.on_train_begin)
show_doc(GANTrainer.on_epoch_begin)
show_doc(GANTrainer.on_batch_begin)
show_doc(GANTrainer.on_backward_begin)
show_doc(GANTrainer.on_epoch_end)
show_doc(GANTrainer.on_train_end)
```
## Specific modules
```
show_doc(GANModule, title_level=3)
```
If `gen_mode` is left as `None`, just put the model in the other mode (critic if it was in generator mode and vice versa).
```
show_doc(GANModule.switch)
show_doc(GANLoss, title_level=3)
show_doc(AdaptiveLoss, title_level=3)
show_doc(accuracy_thresh_expand)
```
## Data Block API
```
show_doc(NoisyItem, title_level=3)
show_doc(GANItemList, title_level=3)
```
Inputs will be [`NoisyItem`](/vision.gan.html#NoisyItem) of `noise_sz` while the default class for target is [`ImageList`](/vision.data.html#ImageList).
```
show_doc(GANItemList.show_xys)
show_doc(GANItemList.show_xyzs)
```
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(GANLoss.critic)
show_doc(GANModule.forward)
show_doc(GANLoss.generator)
show_doc(NoisyItem.apply_tfms)
show_doc(AdaptiveLoss.forward)
show_doc(GANItemList.get)
show_doc(GANItemList.reconstruct)
show_doc(AdaptiveLoss.forward)
```
## New Methods - Please document or move to the undocumented section
| github_jupyter |
<a id=top></a>
# Pea3 smFISH Analysis
## Table of Contents
----
1. [Preparations](#prep)
2. [QC: Spot Detection](#QC_spots)
3. [QC: Cell Shape](#QC_shape)
4. [Data Visualization](#viz)
5. [Predicting Expression from Shape: Testing](#atlas_test)
6. [Predicting Expression from Shape: Running](#atlas_run)
7. [Predicting Expression from Shape: Visualization](#atlas_viz)
<a id=prep></a>
## 1. Preparations
[back to top](#top)
----
```
### Import modules
# External, general
from __future__ import division
import os, sys, pickle
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# External, specific
import ipywidgets as widgets
import scipy.stats as stats
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from skimage import io
from sklearn import model_selection, metrics, multioutput
import sklearn.svm as svm
# Internal
import katachi.utilities.loading as ld
### Load general data
# Prep loader
loader = ld.DataLoaderIDR()
loader.find_imports(r"data/experimentB/extracted_measurements/", recurse=True, verbose=True)
# Import shape spaces
fspace_TFOR, prim_IDs, fspace_idx = loader.load_dataset("shape_TFOR_raw_measured.tsv")
fspace_CFOR, _, _ = loader.load_dataset("shape_CFOR_raw_measured.tsv", IDs=prim_IDs)
print "Imported TFOR shape space of shape:", fspace_TFOR.shape
print "Imported CFOR shape space of shape:", fspace_CFOR.shape
# Standardization and PCA
fspace_TFOR_z = StandardScaler().fit_transform(fspace_TFOR)
pca_TFOR = PCA()
fspace_TFOR_pca = pca_TFOR.fit_transform(fspace_TFOR_z)
fspace_CFOR_z = StandardScaler().fit_transform(fspace_CFOR)
pca_CFOR = PCA()
fspace_CFOR_pca = pca_CFOR.fit_transform(fspace_CFOR_z)
# Import TFOR centroid locations
centroids = loader.load_dataset("_other_measurements.tsv", IDs=prim_IDs)[0][:,3:6][:,::-1]
print "Imported TFOR centroids of shape:", centroids.shape
# Import & standardize engineered features
covar_df, _, _ = loader.load_dataset("_other_measurements.tsv", IDs=prim_IDs, force_df=True)
del covar_df['Centroids RAW X']; del covar_df['Centroids RAW Y']; del covar_df['Centroids RAW Z']
covar_names = list(covar_df.columns)
covar_df_z = (covar_df - covar_df.mean()) / covar_df.std()
print "Imported engineered features of shape:", covar_df.shape
### Load smFISH data
# Counts
rna_counts, _, _ = loader.load_dataset("pea3smFISH_RNAcounts_measured.tsv", IDs=prim_IDs)
print "Imported RNA counts data of shape:", rna_counts.shape
# Spots
rna_spots, _, _= loader.load_dataset("pea3smFISH_RNAspot_coordinates.tsv", IDs=prim_IDs, force_dict=True)
print "Imported RNA spot coordinates for", len(rna_spots), "samples, the first having shape", rna_spots[prim_IDs[0]].shape
### Outlier removal
# Remove samples with `mean(rna_counts) <= mean_count_thresh` as a simple and helpful quality threshold
mean_count_thresh = 2
count_means = [np.mean(rna_counts[fspace_idx==prim_idx]) for prim_idx in range(len(prim_IDs))]
rna_exclude_prim_mask = np.array(count_means) > mean_count_thresh
rna_exclude_cell_mask = rna_exclude_prim_mask[fspace_idx]
# Report
print "Excluding", np.sum(~rna_exclude_prim_mask), "prims /", np.sum(~rna_exclude_cell_mask), "cells,",
print "resulting in", np.sum(rna_exclude_prim_mask), "prims /", np.sum(rna_exclude_cell_mask), "cells",
print "left for analysis."
```
<a id=QC_spots></a>
## 2. QC: Spot Detection
[back to top](#top)
----
```
### Boxplot of mean counts & per-cell counts
# Note:
# - Durdu et al. found a mean of ~11 spots/cell in their manually analyzed data.
# This plot is designed to fit their way of reporting the results.
# - This is recapitulated quite well here, except for a couple of outliers with
# unrealistically low expression.
# - However, note that the cell-level distribution is very non-normal, so the mean
# is not a very good summary characteristic.
# Get count means
count_means = np.array([np.mean(rna_counts[fspace_idx==prim_idx])
for prim_idx in range(len(prim_IDs))])
# Fig prep
fig, ax = plt.subplots(1, 2, figsize=(3.5, 4.5))
# Make boxplots
bp_m = ax[0].boxplot(count_means, widths=0.5, patch_artist=True)
bp_a = ax[1].boxplot(rna_counts, widths=0.5, patch_artist=True, showfliers=False)
# Boxplot styling function (making it similar to Sevi's paper)
def style_boxplot(bp):
for patch in bp['boxes']:
patch.set(edgecolor='black', linewidth=1.2,)
for whisker in bp['whiskers']:
whisker.set(color='black', linestyle='-')
for cap in bp['caps']:
cap.set(linewidth=1.2)
for median in bp['medians']:
median.set(color='black', linewidth=1.2)
# Style the boxplots
style_boxplot(bp_m)
style_boxplot(bp_a)
# Add scatter
ax[0].scatter(np.random.normal(1.0, 0.06, len(count_means)), count_means,
zorder=10, s=20, alpha=0.7, c='midnightblue', edgecolor='')
ax[0].set_ylim([-2, 47])
ax[1].scatter(np.random.normal(1.0, 0.06, len(rna_counts)), rna_counts,
zorder=10, s=2, alpha=0.1, c='midnightblue', edgecolor='')
ax[1].set_ylim([-2, 100])
# Remove ticks
ax[0].yaxis.set_ticks_position('left')
ax[0].xaxis.set_ticks_position('bottom')
ax[1].yaxis.set_ticks_position('left')
ax[1].xaxis.set_ticks_position('bottom')
# Axis labels
from matplotlib import rcParams
rcParams['mathtext.default'] = 'regular'
ax[0].set_ylabel(r'$\it{pea3}$ transcripts per cell (mean)', fontsize=12, labelpad=5)
ax[0].set_xticklabels(['WT 880'], rotation=90, fontsize=12)
ax[1].set_ylabel(r'$\it{pea3}$ transcripts per cell (all)', fontsize=12, labelpad=0)
ax[1].set_xticklabels(['WT 880'], rotation=90, fontsize=12)
plt.tight_layout()
# Show
plt.show()
### Histograms of RNA counts for each sample
# Prep
n_plot_cols = 7
n_plot_rows = int(np.ceil(len(prim_IDs) / n_plot_cols))
fig, ax = plt.subplots(n_plot_rows, n_plot_cols, figsize=(1.5*n_plot_cols, 1.5*n_plot_rows),
sharex=True, sharey=True)
ax = ax.flatten()
[ax[i].axis('off') for i in range(len(prim_IDs), n_plot_cols*n_plot_rows)]
# For each sample...
for axx, prim_idx, prim_ID, is_outlier in zip(ax, range(len(prim_IDs)), prim_IDs, ~rna_exclude_prim_mask):
# Generate the histogram
axx.hist(rna_counts[fspace_idx==prim_idx],
bins=40, range=(rna_counts.min(), rna_counts.max()),
histtype='stepfilled', color='darkblue' if not is_outlier else 'darkred', alpha=0.5)
axx.set_title(prim_ID, fontsize=9)
# Set common axis labels
fig.text(0.5, -0.01, 'RNA Counts', ha='center', va='center')
fig.text(-0.01, 0.50, 'Histogram\nof Cells', ha='center', va='center', rotation='vertical')
# Done
plt.tight_layout()
plt.show()
### Histogram of counts over all cells
# Prep
plt.figure(figsize=(5, 3))
# Make hist
plt.hist(rna_counts, bins=100, histtype='stepfilled', color='b', alpha=0.5)
# Label
plt.xlabel('RNA Count')
plt.ylabel('Histogram of Cells')
# Done
plt.show()
```
<a id=QC_shape></a>
## 3. QC: Cell Shape (Fixation Effects)
[back to top](#top)
----
```
### Load live imaging reference data
# Prep loader
ref_loader = ld.DataLoaderIDR()
ref_loader.find_imports(r"data/experimentA/extracted_measurements/", recurse=True, verbose=True)
# Use only the 24 samples that were single-color imaged
ref_IDs = ['056F63395C', '08B96BE794', '0B51F8B46C', '1C43D83E9A', '2902E38204', '4DC24FC301',
'6F18162F4C', '8C633380D2', 'B95A4F6D95', 'CB87D7CBC9', '0E48AB134C', '3612A6CEF5',
'8713481504', '8C83D4387F', 'AB98466077', 'C95F528559', 'E013272A99', 'E6E56C3F42',
'22DF2AE1A0', '2B23352582', '673A65D087', '8CA33561B5', 'EC77708A51', 'FC90367714']
# Import shape spaces
ref_TFOR, _, ref_idx = ref_loader.load_dataset("shape_TFOR_raw_measured.tsv", IDs=ref_IDs)
ref_CFOR, _, _ = ref_loader.load_dataset("shape_CFOR_raw_measured.tsv", IDs=ref_IDs)
print "Imported TFOR shape space of shape:", ref_TFOR.shape
print "Imported CFOR shape space of shape:", ref_CFOR.shape
# Standardization and apply PCA (fitted above)
ref_TFOR_z = StandardScaler().fit_transform(ref_TFOR)
ref_TFOR_pca = pca_TFOR.transform(ref_TFOR_z)
ref_CFOR_z = StandardScaler().fit_transform(ref_CFOR)
ref_CFOR_pca = pca_CFOR.transform(ref_CFOR_z)
# Import & standardize engineered features
ref_covar_df, _, _ = ref_loader.load_dataset("_other_measurements.tsv", IDs=ref_IDs, force_df=True)
del ref_covar_df['Centroids RAW X']; del ref_covar_df['Centroids RAW Y']; del ref_covar_df['Centroids RAW Z']
ref_covar_names = list(ref_covar_df.columns)
ref_covar_df_z = (ref_covar_df - ref_covar_df.mean()) / ref_covar_df.std()
print "Imported engineered features of shape:", ref_covar_df.shape
### Compare to reference shape spaces: overlay
# Set interactions
@widgets.interact(PCx=(1, fspace_TFOR_pca.shape[1], 1),
PCy=(1, fspace_TFOR_pca.shape[1], 1))
# Show
def show_PCs(PCx=1, PCy=2):
# Prep
fig, ax = plt.subplots(1, 2, figsize=(12,5))
# Plot TFOR
ax[0].scatter(ref_TFOR_pca[:,PCx-1], ref_TFOR_pca[:,PCy-1],
c='b', cmap=plt.cm.plasma, edgecolor='',
s=20, alpha=0.25, label='reference')
ax[0].scatter(fspace_TFOR_pca[:,PCx-1], fspace_TFOR_pca[:,PCy-1],
c='r', cmap=plt.cm.plasma, edgecolor='',
s=20, alpha=0.25, label='fixed')
# Plot CFOR
ax[1].scatter(ref_CFOR_pca[:,PCx-1], ref_CFOR_pca[:,PCy-1],
c='b', cmap=plt.cm.plasma, edgecolor='',
s=20, alpha=0.25, label='reference')
ax[1].scatter(fspace_CFOR_pca[:,PCx-1], fspace_CFOR_pca[:,PCy-1],
c='r', cmap=plt.cm.plasma, edgecolor='',
s=20, alpha=0.25, label='fixed')
# Cosmetics
ax[0].legend(fontsize=8, frameon=False)
ax[0].set_xlabel("PC "+str(PCx))
ax[1].set_xlabel("PC "+str(PCx))
ax[0].set_ylabel("PC "+str(PCy))
ax[1].set_ylabel("PC "+str(PCy))
ax[0].set_title("TFOR")
ax[1].set_title("CFOR")
# Done
plt.tight_layout()
plt.show()
### Compare to reference cell extents
# Prep for plots
fig, ax = plt.subplots(1, 3, figsize=(6.5,3), sharey=True)
# Create plots
for i, lbl in enumerate(['Z', 'Y', 'X']):
# Violinplot
vio = ax[i].violinplot([ref_covar_df[lbl+' Axis Length'],
covar_df[lbl+' Axis Length']],
widths=0.60, showextrema=False)
# Violinplot cosmetics
vio['bodies'][0].set_facecolors('lightskyblue')
vio['bodies'][1].set_facecolors('tomato')
ax[i].set_xlim(0.3, 2.7)
ax[i].set_xticks([1.0, 2.0])
ax[i].set_xticklabels(["Reference", "Fixed"])
ax[i].set_ylabel(lbl)
# Jitter
for j,y in enumerate([ref_covar_df[lbl+' Axis Length'], covar_df[lbl+' Axis Length']]):
x = np.random.normal(j+1, 0.08, size=len(y))
ax[i].plot(x, y, '.', color=['blue', 'red'][j], alpha=[0.1, 0.1][j], ms=2)
# Done
plt.tight_layout()
plt.show()
### Compare to reference cell sphericity
# Violinplot
plt.figure(figsize=(2,3))
vio = plt.violinplot([ref_covar_df['Sphericity'], covar_df['Sphericity']],
widths=0.60, showextrema=False)
# Violinplot cosmetics
vio['bodies'][0].set_facecolors('lightskyblue')
vio['bodies'][1].set_facecolors('tomato')
plt.xlim(0.3, 2.7)
plt.xticks([1.0, 2.0])
plt.gca().set_xticklabels(["Reference", "Fixed"])
plt.ylabel("Cell Sphericity")
# Jitter
for i,y in enumerate([ref_covar_df['Sphericity'], covar_df['Sphericity']]):
x = np.random.normal(i+1, 0.08, size=len(y))
plt.plot(x, y, '.', color=['blue', 'red'][i], alpha=[0.1, 0.1][i], ms=2)
# Done
plt.show()
### Compare to reference cell volume
# Violinplot
plt.figure(figsize=(2,3))
vio = plt.violinplot([ref_covar_df['Volume'], covar_df['Volume']], widths=0.60, showextrema=False)
# Violinplot cosmetics
vio['bodies'][0].set_facecolors('lightskyblue')
vio['bodies'][1].set_facecolors('tomato')
plt.xlim(0.3, 2.7)
plt.xticks([1.0, 2.0])
plt.gca().set_xticklabels(["Reference", "Fixed"])
plt.ylabel("Cell Volume")
# Jitter
for i,y in enumerate([ref_covar_df['Volume'], covar_df['Volume']]):
x = np.random.normal(i+1, 0.08, size=len(y))
plt.plot(x, y, '.', color=['blue', 'red'][i], alpha=[0.1, 0.1][i], ms=2)
# Done
plt.show()
### For publication: compare to diverse set of shape references
# Prep for plots
fig, ax = plt.subplots(1, 3, figsize=(8, 3.5))
# Violinplot
vio_data = [[ref_TFOR_pca[:,0], fspace_TFOR_pca[:,0]], # TFOR PC 1
[ref_CFOR_pca[:,0], fspace_CFOR_pca[:,0]], # CFOR PC 1
[ref_covar_df['Z Axis Length'], covar_df['Z Axis Length']]] # Cell Height
# Create plots
for i, lbl in enumerate(['TFOR-PC1 (D-V orient.)',
'CFOR-PC1 (sphericity)',
r'Cell height $\it{[\mu m]}$']):
# Violinplot
vio = ax[i].violinplot(vio_data[i], widths=0.70, showextrema=False)
# Violinplot cosmetics
vio['bodies'][0].set_facecolors('w')
vio['bodies'][1].set_facecolors('w')
ax[i].set_xlim(0.3, 2.7)
ylims = ax[i].get_ylim()
ax[i].set_ylim(ylims[0]-(ylims[1]-ylims[0])*0.05, ylims[1]+(ylims[1]-ylims[0])*0.2)
ax[i].set_xticks([1.0, 2.0])
ax[i].set_xticklabels(["Live", "Fixed"], fontsize=14)
ax[i].set_ylabel(lbl, fontsize=14, labelpad=0)
ax[i].set_yticklabels([int(n) for n in ax[i].get_yticks()], fontsize=14)
# Jitter
for j,y in enumerate(vio_data[i]):
x = np.random.normal(j+1, 0.08, size=len(y))
ax[i].plot(x, y, '.', color=['blue', 'midnightblue'][j], alpha=[0.1, 0.1][j], ms=2)
# Print stats
print 'pMWU('+lbl+'):', stats.mannwhitneyu(*vio_data[i], alternative='two-sided')[1]
# Cosmetics
plt.tight_layout()
# Done
plt.show()
```
<a id=viz></a>
## 4. Data Visualization
[back to top](#top)
----
```
### Overlay of counts on shape spaces
# Set interactions
@widgets.interact(PCx=(1, fspace_TFOR_pca.shape[1], 1),
PCy=(1, fspace_TFOR_pca.shape[1], 1),
vmax_factor=(0.0, 1.0, 0.1))
# Show
def show_PCs(PCx=1, PCy=2, vmax_factor=0.5):
# Prep
fig, ax = plt.subplots(1, 2, figsize=(12,5))
# Plot TFOR
ax[0].scatter(fspace_TFOR_pca[:,PCx-1], fspace_TFOR_pca[:,PCy-1],
c=rna_counts, cmap=plt.cm.plasma,
vmax=vmax_factor*np.max(rna_counts),
s=20, edgecolor='', alpha=0.5)
# Plot CFOR
ax[1].scatter(fspace_CFOR_pca[:,PCx-1], fspace_CFOR_pca[:,PCy-1],
c=rna_counts, cmap=plt.cm.plasma,
vmax=vmax_factor*np.max(rna_counts),
s=20, edgecolor='', alpha=0.5)
# Cosmetics
ax[0].set_xlabel("PC "+str(PCx))
ax[1].set_xlabel("PC "+str(PCx))
ax[0].set_ylabel("PC "+str(PCy))
ax[1].set_ylabel("PC "+str(PCy))
ax[0].set_title("TFOR")
ax[1].set_title("CFOR")
# Done
plt.tight_layout()
plt.show()
### Tissue consensus map
# Note: This suffers a little because some prims are so weirdly angled in the images
# that the TFOR transform didn't get them quite right.
# Settings
xlim = (-130, 8)
ylim = ( -19, 19)
# Exclude weirdly TFOR-ed prims (those with centroids of `x > 0`) for cleaner visualization
centroid_exclude_prim_mask = np.array([np.max(centroids[fspace_idx==prim_idx,-1])
for prim_idx in range(len(prim_IDs))]) < 5
centroid_exclude_cell_mask = centroid_exclude_prim_mask[fspace_idx]
plot_exclude_cell_mask = rna_exclude_cell_mask & centroid_exclude_cell_mask
# Get plot values & remove outliers
plot_values = rna_counts[plot_exclude_cell_mask]
# Tools for smoothing on scatter
from katachi.utilities.pcl_helpers import pcl_gaussian_smooth
from scipy.spatial.distance import pdist, squareform
# Cut off at prim contour outline
kernel_prim = stats.gaussian_kde(centroids[plot_exclude_cell_mask,1:].T)
f_prim = kernel_prim(centroids[plot_exclude_cell_mask,1:].T)
f_prim_mask = f_prim > f_prim.min() + (f_prim.max()-f_prim.min())*0.1
plot_values = plot_values[f_prim_mask]
plot_centroids = centroids[plot_exclude_cell_mask][f_prim_mask]
# Smoothen?
pdists = squareform(pdist(plot_centroids[:,1:]))
plot_values = pcl_gaussian_smooth(pdists, plot_values[:,np.newaxis], sg_percentile=0.5)[:,0]
# Initialize figure
fig, ax = plt.subplots(1, figsize=(8, 2.8))
# Contourf plot
cfset = ax.tricontourf(plot_centroids[:,2], plot_centroids[:,1], plot_values, 20,
cmap='plasma', vmax=20) # Note: vmax manually set for consistency across plots!
# Illustrative centroids from a single prim
plt.scatter(centroids[fspace_idx==prim_IDs.index(prim_IDs[12]), 2],
centroids[fspace_idx==prim_IDs.index(prim_IDs[12]), 1],
c='', alpha=0.5)
# Cosmetics
ax.set_xlabel('TFOR x', fontsize=16)
ax.set_ylabel('TFOR y', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=13)
plt.xlim(xlim); plt.ylim(ylim)
ax.invert_yaxis() # To match images
# Colorbar
cbar = plt.colorbar(cfset, ax=ax, pad=0.01)
cbar.set_label('RNA Counts', rotation=270, labelpad=15, fontsize=16)
cbar.ax.tick_params(labelsize=13)
# Finalize
plt.tight_layout()
# Done
plt.show()
```
<a id=atlas_test></a>
## 5. Predicting Expression from Shape: Testing
[back to top](#top)
----
```
### Settings, scoring & metrics
# General
use_PCs = 10
num_CVs = 5
test_size = 0.3
# Shuffle split for CV
cv_sets = model_selection.ShuffleSplit(n_splits=num_CVs, test_size=test_size, random_state=42)
# Prepare CV scorers
scoring = {'explained_variance' : metrics.make_scorer(metrics.explained_variance_score),
'mean_squared_error' : metrics.make_scorer(metrics.mean_squared_error),
'r2_score' : metrics.make_scorer(metrics.r2_score)}
### Various prep of feature/target spaces
# Prepare counts by adding 2nd dim
rna_counts_rdy = np.expand_dims(rna_counts, -1)
# Prepare location data by z-scoring
centroids_z = StandardScaler().fit_transform(centroids)
### Remove prims/cells that were excluded as outliers
# Prepare fspaces & counts by removing excluded prims and subselecting PCs
rna_counts_rdy = rna_counts_rdy[rna_exclude_cell_mask]
fspace_TFOR_pca_rdy = fspace_TFOR_pca[rna_exclude_cell_mask, :use_PCs]
fspace_CFOR_pca_rdy = fspace_CFOR_pca[rna_exclude_cell_mask, :use_PCs]
centroids_z_rdy = centroids_z[rna_exclude_cell_mask]
### Simple score reporting function
def report_score(scores, score_key):
print "%s: %.3f +/- %.3f" % (score_key, np.mean(scores[score_key]), np.std(scores[score_key]))
```
#### Predicting expression from TFOR
```
### Prepare single train-test split for visualization
# Split
out = model_selection.train_test_split(fspace_TFOR_pca_rdy, rna_counts_rdy,
test_size=test_size, random_state=42)
X_train, X_test, y_train, y_test = out
# Report
print "Final source fspace (full, train, test):", fspace_TFOR_pca_rdy.shape, X_train.shape, X_test.shape
print "Final target fspace (full, train, test):", rna_counts_rdy.shape, y_train.shape, y_test.shape
# Hyperparameter screening for SVR
# Param grid
gd = 1.0 / X_test.shape[1]
param_grid = [{'C': [0.01, 0.1, 1.0, 10.0, 100.0],
'epsilon': [0.01, 0.1, 0.5, 1.0],
'gamma': [gd*10.0, gd, gd*0.1, gd*0.01]}]
# Prep regressor
svr = svm.SVR(kernel='rbf')
# Run grid search
clf = model_selection.GridSearchCV(svr, param_grid,
cv=cv_sets, scoring=scoring['explained_variance'],
n_jobs=6, verbose=2)
clf.fit(fspace_TFOR_pca_rdy, rna_counts_rdy.ravel())
# Report
print "Best estimator:", clf.best_estimator_
print "Best score:", clf.best_score_
# Use best estimator for cross validation
svr = clf.best_estimator_
scores = model_selection.cross_validate(svr, fspace_TFOR_pca_rdy, rna_counts_rdy,
scoring=scoring, cv=cv_sets,
return_train_score=True, n_jobs=num_CVs)
# Report CV scores
print('\nCV scores:')
report_score(scores, 'train_explained_variance')
report_score(scores, 'train_r2_score')
report_score(scores, 'train_mean_squared_error')
report_score(scores, 'test_explained_variance')
report_score(scores, 'test_r2_score')
report_score(scores, 'test_mean_squared_error')
### Regression Plot
# Single prediction
svr.fit(X_train, y_train.ravel())
y_train_pred = svr.predict(X_train)
y_test_pred = svr.predict(X_test)
# Prep plot
fig, ax = plt.subplots(1, 2, figsize=(6,3), sharey=True)
# Create plot
ax[0].scatter(y_train, y_train_pred, color='cyan', edgecolor='darkcyan', alpha=0.5)
ax[1].scatter(y_test, y_test_pred, color='cyan', edgecolor='darkcyan', alpha=0.5)
# Reference line
max_count = rna_counts_rdy.max()
ax[0].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
ax[1].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
# Axis adjustments
ax[0].set_xlim([0, max_count])
ax[0].set_ylim([0, max_count])
ax[1].set_xlim([0, max_count])
ax[1].set_ylim([0, max_count])
# Labeling
ax[0].set_title('Training Data (TFOR)')
ax[0].set_xlabel('Ground Truth')
ax[0].set_ylabel('Predicted')
ax[1].set_title('Test Data (TFOR)')
ax[1].set_xlabel('Ground Truth')
# Done
plt.tight_layout()
plt.show()
```
#### Predicting expression from CFOR
```
### Prepare single train-test split for parametrization/visualization
# Split
out = model_selection.train_test_split(fspace_CFOR_pca_rdy, rna_counts_rdy,
test_size=test_size, random_state=42)
X_train, X_test, y_train, y_test = out
# Report
print "Final source fspace (full, train, test):", fspace_CFOR_pca_rdy.shape, X_train.shape, X_test.shape
print "Final target fspace (full, train, test):", rna_counts_rdy.shape, y_train.shape, y_test.shape
# Hyperparam screening for SVR
# Param grid
gd = 1.0 / X_test.shape[1]
param_grid = [{'C': [0.01, 0.1, 1.0, 10.0, 100.0],
'epsilon': [0.01, 0.1, 0.5, 1.0],
'gamma': [gd*10.0, gd, gd*0.1, gd*0.01]}]
# Prep regressor
svr = svm.SVR(kernel='rbf')
# Run grid search
clf = model_selection.GridSearchCV(svr, param_grid,
cv=cv_sets, scoring=scoring['explained_variance'],
n_jobs=6, verbose=2)
clf.fit(fspace_CFOR_pca_rdy, rna_counts_rdy.ravel())
# Report
print "Best estimator:", clf.best_estimator_
print "Best score:", clf.best_score_
# Use best estimator for cross validation
svr = clf.best_estimator_
scores = model_selection.cross_validate(svr, fspace_CFOR_pca_rdy, rna_counts_rdy,
scoring=scoring, cv=cv_sets,
return_train_score=True, n_jobs=num_CVs)
# Report CV scores
print('\nCV scores:')
report_score(scores, 'train_explained_variance')
report_score(scores, 'train_r2_score')
report_score(scores, 'train_mean_squared_error')
report_score(scores, 'test_explained_variance')
report_score(scores, 'test_r2_score')
report_score(scores, 'test_mean_squared_error')
### Regression Plot
# Single prediction
svr.fit(X_train, y_train.ravel())
y_train_pred = svr.predict(X_train)
y_test_pred = svr.predict(X_test)
# Prep plot
fig, ax = plt.subplots(1, 2, figsize=(6,3), sharey=True)
# Create plot
ax[0].scatter(y_train, y_train_pred, color='cyan', edgecolor='darkcyan', alpha=0.5)
ax[1].scatter(y_test, y_test_pred, color='cyan', edgecolor='darkcyan', alpha=0.5)
# Reference line
max_count = rna_counts_rdy.max()
ax[0].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
ax[1].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
# Axis adjustments
ax[0].set_xlim([0, max_count])
ax[0].set_ylim([0, max_count])
ax[1].set_xlim([0, max_count])
ax[1].set_ylim([0, max_count])
# Labeling
ax[0].set_title('Training Data (CFOR)')
ax[0].set_xlabel('Ground Truth')
ax[0].set_ylabel('Predicted')
ax[1].set_title('Test Data (CFOR)')
ax[1].set_xlabel('Ground Truth')
# Done
plt.tight_layout()
plt.show()
```
#### Predicting expression from position
```
### Prepare single train-test split for parametrization/visualization
# Split
out = model_selection.train_test_split(centroids_z_rdy, rna_counts_rdy,
test_size=test_size, random_state=42)
X_train, X_test, y_train, y_test = out
# Report
print "Final source fspace (full, train, test):", centroids_z_rdy.shape, X_train.shape, X_test.shape
print "Final target fspace (full, train, test):", rna_counts_rdy.shape, y_train.shape, y_test.shape
# Hyperparam screening for SVR
# Param grid
gd = 1.0 / X_test.shape[1]
param_grid = [{'C': [0.01, 0.1, 1.0, 10.0, 100.0],
'epsilon': [0.01, 0.1, 0.5, 1.0],
'gamma': [gd*10.0, gd, gd*0.1, gd*0.01]}]
# Prep regressor
svr = svm.SVR(kernel='rbf')
# Run grid search
clf = model_selection.GridSearchCV(svr, param_grid,
cv=cv_sets, scoring=scoring['explained_variance'],
n_jobs=6, verbose=2)
clf.fit(centroids_z_rdy, rna_counts_rdy.ravel())
# Report
print "Best estimator:", clf.best_estimator_
print "Best score:", clf.best_score_
# Use best estimator for cross validation
svr = clf.best_estimator_
scores = model_selection.cross_validate(svr, centroids_z_rdy, rna_counts_rdy,
scoring=scoring, cv=cv_sets,
return_train_score=True, n_jobs=num_CVs)
# Report CV scores
print('\nCV scores:')
report_score(scores, 'train_explained_variance')
report_score(scores, 'train_r2_score')
report_score(scores, 'train_mean_squared_error')
report_score(scores, 'test_explained_variance')
report_score(scores, 'test_r2_score')
report_score(scores, 'test_mean_squared_error')
### Regression Plot
# Single prediction
svr.fit(X_train, y_train.ravel())
y_train_pred = svr.predict(X_train)
y_test_pred = svr.predict(X_test)
# Prep plot
fig, ax = plt.subplots(1, 2, figsize=(6,3), sharey=True)
# Create plot
ax[0].scatter(y_train, y_train_pred, color='cyan', edgecolor='darkcyan', alpha=0.5)
ax[1].scatter(y_test, y_test_pred, color='cyan', edgecolor='darkcyan', alpha=0.5)
# Reference line
max_count = rna_counts_rdy.max()
ax[0].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
ax[1].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
# Axis adjustments
ax[0].set_xlim([0, max_count])
ax[0].set_ylim([0, max_count])
ax[1].set_xlim([0, max_count])
ax[1].set_ylim([0, max_count])
# Labeling
ax[0].set_title('Training Data (Location)')
ax[0].set_xlabel('Ground Truth')
ax[0].set_ylabel('Predicted')
ax[1].set_title('Test Data (Location)')
ax[1].set_xlabel('Ground Truth')
# Done
plt.tight_layout()
plt.show()
```
#### Predicting expression from TFOR+CFOR+position
```
### Prep combined data data
# Combine
fspace_combined = np.concatenate([fspace_TFOR_pca_rdy, fspace_CFOR_pca_rdy, centroids_z_rdy], axis=1)
### Prepare single train-test split for parametrization/visualization
# Split
out = model_selection.train_test_split(fspace_combined, rna_counts_rdy,
test_size=test_size, random_state=42)
X_train, X_test, y_train, y_test = out
# Report
print "Final source fspace (full, train, test):", fspace_combined.shape, X_train.shape, X_test.shape
print "Final target fspace (full, train, test):", rna_counts_rdy.shape, y_train.shape, y_test.shape
# Hyperparam screening for SVR
# Param grid
gd = 1.0 / X_test.shape[1]
param_grid = [{'C': [0.01, 0.1, 1.0, 10.0, 100.0],
'epsilon': [0.01, 0.1, 0.5, 1.0],
'gamma': [gd*10.0, gd, gd*0.1, gd*0.01]}]
# Prep regressor
svr = svm.SVR(kernel='rbf')
# Run grid search
clf = model_selection.GridSearchCV(svr, param_grid,
cv=cv_sets, scoring=scoring['explained_variance'],
n_jobs=6, verbose=2)
clf.fit(fspace_combined, rna_counts_rdy.ravel())
# Report
print "Best estimator:", clf.best_estimator_
print "Best score:", clf.best_score_
# Use best estimator for cross validation
svr = clf.best_estimator_
scores = model_selection.cross_validate(svr, fspace_combined, rna_counts_rdy,
scoring=scoring, cv=cv_sets,
return_train_score=True, n_jobs=num_CVs)
# Report CV scores
print('\nCV scores:')
report_score(scores, 'train_explained_variance')
report_score(scores, 'train_r2_score')
report_score(scores, 'train_mean_squared_error')
report_score(scores, 'test_explained_variance')
report_score(scores, 'test_r2_score')
report_score(scores, 'test_mean_squared_error')
### Regression Plot
# Single prediction
svr.fit(X_train, y_train.ravel())
y_train_pred = svr.predict(X_train)
y_test_pred = svr.predict(X_test)
# Prep plot
fig, ax = plt.subplots(1, 2, figsize=(6,3), sharey=True)
# Create plot
ax[0].scatter(y_train, y_train_pred, color='cyan', edgecolor='darkcyan', alpha=0.5)
ax[1].scatter(y_test, y_test_pred, color='cyan', edgecolor='darkcyan', alpha=0.5)
# Reference line
max_count = rna_counts_rdy.max()
ax[0].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
ax[1].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
# Axis adjustments
ax[0].set_xlim([0, max_count])
ax[0].set_ylim([0, max_count])
ax[1].set_xlim([0, max_count])
ax[1].set_ylim([0, max_count])
# Labeling
ax[0].set_title('Training Data (COMBINED)')
ax[0].set_xlabel('Ground Truth')
ax[0].set_ylabel('Predicted')
ax[1].set_title('Test Data (COMBINED)')
ax[1].set_xlabel('Ground Truth')
# Done
plt.tight_layout()
plt.show()
# Pretty regression plot for publication
# Single prediction
svr.fit(X_train, y_train.ravel())
y_train_pred = svr.predict(X_train)
y_test_pred = svr.predict(X_test)
# Prep plot
fig, ax = plt.subplots(1, 2, figsize=(6, 3.2), sharey=True)
# Create plot
ax[0].scatter(y_train, y_train_pred,
color='midnightblue', edgecolor='', alpha=0.3, s=5)
ax[1].scatter(y_test, y_test_pred,
color='midnightblue', edgecolor='', alpha=0.3, s=5)
# Reference line
max_count = rna_counts_rdy.max()
ax[0].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
ax[1].plot([0,max_count], [0,max_count], '-', c='0.75', zorder=0)
# Crop off and add cropped points back as arrows
crop = 60
if np.any(y_train_pred>crop) or np.any(y_test_pred>crop):
raise ValueError('Some predicted values are higher than `crop`!')
ax[0].scatter([crop-0.5 for i in range(np.sum(y_train[:,0]>60))], y_train_pred[y_train[:,0]>60],
color='midnightblue', edgecolor='', alpha=0.5, s=10, marker='>')
ax[1].scatter([crop-0.5 for i in range(np.sum(y_test[:,0]>60))], y_test_pred[y_test[:,0]>60],
color='midnightblue', edgecolor='', alpha=0.5, s=10, marker='>')
# Axis adjustments
ax[0].set_xlim([0, crop])
ax[0].set_ylim([0, crop])
ax[1].set_xlim([0, crop])
ax[1].set_ylim([0, crop])
# Axis cosmetics
ax[0].yaxis.set_ticks_position('left')
ax[0].xaxis.set_ticks_position('bottom')
ax[1].yaxis.set_ticks_position('left')
ax[1].xaxis.set_ticks_position('bottom')
# Labeling & other cosmetics
ax[0].set_title('Training Data')
ax[0].set_xlabel('$\it{pea3}$ counts (ground truth)')
ax[0].set_ylabel('$\it{pea3}$ counts (predicted)')
ax[1].set_title('Test Data')
ax[1].set_xlabel('$\it{pea3}$ counts (ground truth)')
plt.tight_layout()
# Done
plt.show()
```
<a id=atlas_run></a>
## 6. Predicting Expression from Shape: Running
[back to top](#top)
----
```
### Load and prepare full live-imaged shape space
# Prep loader
expA_loader = ld.DataLoaderIDR()
expA_loader.find_imports(r"data/experimentA/extracted_measurements/", recurse=True, verbose=True)
# Import shape spaces
expA_TFOR_pca, expA_IDs, expA_idx = expA_loader.load_dataset("shape_TFOR_pca_measured.tsv")
expA_CFOR_pca, _, _ = expA_loader.load_dataset("shape_CFOR_pca_measured.tsv", IDs=expA_IDs)
print "Imported TFOR shape space of shape:", expA_TFOR_pca.shape
print "Imported CFOR shape space of shape:", expA_CFOR_pca.shape
# Import TFOR centroid locations
expA_centroids = expA_loader.load_dataset("_other_measurements.tsv", IDs=expA_IDs)[0][:,3:6][:,::-1]
print "Imported TFOR centroids of shape:", expA_centroids.shape
expA_centroids_z = StandardScaler().fit_transform(expA_centroids)
# Combine
expA_combined = np.concatenate([expA_TFOR_pca[:,:use_PCs], expA_CFOR_pca[:,:use_PCs], expA_centroids_z], axis=1)
# Report
print expA_TFOR_pca.shape, expA_CFOR_pca.shape, expA_centroids_z.shape, expA_combined.shape
### Run best possible smFISH count prediction for entire atlas
# Prepare the best regressor
svr = svm.SVR(kernel='rbf', C=10.0, epsilon=0.01, gamma = 1.0 / X_test.shape[1] * 0.1)
# Train based on entire smFISH dataset
svr.fit(fspace_combined, rna_counts_rdy.ravel())
# Predict for entire atlas
expA_counts = svr.predict(expA_combined)
# Set the occasional negative count to zero
expA_counts[expA_counts < 0.0] = 0.0
```
<a id=atlas_viz></a>
## 7. Predicting Expression from Shape: Visualization
[back to top](#top)
----
```
### QC: Compare predicted atlas counts to measured counts
# Note:
# This looks quite good. The prediction obviously doesn't capture the long
# tail of the real measurements, which also pulls the overall average down
# a bit. This was to be expected and may not even be wrong.
# Get count means
count_means = np.array([np.mean(rna_counts[fspace_idx==prim_idx])
for prim_idx in range(len(prim_IDs))])
expA_means = np.array([np.mean(expA_counts[expA_idx==prim_idx])
for prim_idx in range(len(expA_IDs))])
# Fig prep
fig, ax = plt.subplots(1, 2, figsize=(6, 4.5), sharey=True)
# Make boxplots
bp_m = ax[0].boxplot([count_means, expA_means], widths=0.65, patch_artist=True, showfliers=False)
bp_a = ax[1].boxplot([rna_counts, expA_counts], widths=0.65, patch_artist=True, showfliers=False)
# Boxplot styling function (making it similar to Sevi's paper)
def style_boxplot(bp):
for patch in bp['boxes']:
patch.set(edgecolor='black', linewidth=1.2,)
for whisker in bp['whiskers']:
whisker.set(color='black', linestyle='-')
for cap in bp['caps']:
cap.set(linewidth=1.2)
for median in bp['medians']:
median.set(color='black', linewidth=1.2)
# Style the boxplots
style_boxplot(bp_m)
style_boxplot(bp_a)
# Add scatter
ax[0].scatter(np.random.normal(1.0, 0.06, len(count_means)), count_means,
zorder=10, s=20, alpha=0.7, c='midnightblue', edgecolor='')
ax[0].scatter(np.random.normal(2.0, 0.08, len(expA_means)), expA_means,
zorder=10, s=20, alpha=0.3, c='purple', edgecolor='')
ax[1].scatter(np.random.normal(1.0, 0.06, len(rna_counts)), rna_counts,
zorder=10, s=2, alpha=0.2, c='midnightblue', edgecolor='')
ax[1].scatter(np.random.normal(2.0, 0.10, len(expA_counts)), expA_counts,
zorder=10, s=2, alpha=0.05, c='purple', edgecolor='')
# Add arrows for outliers
crop = 50
ax[1].scatter(np.random.normal(1.0, 0.06, np.sum(rna_counts>crop)),
[crop-0.5 for i in range(np.sum(rna_counts>crop))],
color='midnightblue', edgecolor='', alpha=0.2, s=10, marker='^')
if np.any(expA_counts > crop): raise ValueError()
# Set axis limits
ax[0].set_ylim([-2, crop])
# Remove axis ticks
ax[0].yaxis.set_ticks_position('left')
ax[0].xaxis.set_ticks_position('bottom')
ax[1].yaxis.set_ticks_position('left')
ax[1].xaxis.set_ticks_position('bottom')
# Axis labels
from matplotlib import rcParams
rcParams['mathtext.default'] = 'regular'
ax[0].set_ylabel(r'$\it{pea3}$ transcripts per cell', fontsize=16, labelpad=5)
ax[0].set_title('sample means', fontsize=16)
ax[1].set_title('all cells', fontsize=16)
ax[0].set_xticklabels(['smFISH', 'atlas'], fontsize=14)
ax[1].set_xticklabels(['smFISH', 'atlas'], fontsize=14)
ax[0].tick_params(axis='y', which='major', labelsize=14)
plt.tight_layout()
# Print stats
print 'pMWU(means):', stats.mannwhitneyu(count_means, expA_means, alternative='two-sided')[1]
print 'pMWU(all):', stats.mannwhitneyu(rna_counts, expA_counts, alternative='two-sided')[1]
# Show
plt.show()
### Atlas tissue consensus map
# Settings
xlim = (-130, 8)
ylim = ( -19, 19)
# Get plot values & remove outliers
plot_values = expA_counts
# Tools for smoothing on scatter
from katachi.utilities.pcl_helpers import pcl_gaussian_smooth
from scipy.spatial.distance import pdist, squareform
# Cut off at prim contour outline
kernel_prim = stats.gaussian_kde(expA_centroids[:,1:].T)
f_prim = kernel_prim(expA_centroids[:,1:].T)
f_prim_mask = f_prim > f_prim.min() + (f_prim.max()-f_prim.min())*0.1
plot_values = plot_values[f_prim_mask]
plot_centroids = expA_centroids[f_prim_mask]
# Smoothen?
pdists = squareform(pdist(plot_centroids[:,1:]))
plot_values = pcl_gaussian_smooth(pdists, plot_values[:,np.newaxis], sg_percentile=0.5)[:,0]
# Initialize figure
fig, ax = plt.subplots(1, figsize=(8, 2.8))
# Contourf plot
cfset = ax.tricontourf(plot_centroids[:,2], plot_centroids[:,1], plot_values, 20,
cmap='plasma', vmax=20) # NOTE: vmax set to be consistent with measured plot!
# Illustrative centroids from a single prim
plt.scatter(expA_centroids[expA_idx==expA_IDs.index(expA_IDs[0]), 2],
expA_centroids[expA_idx==expA_IDs.index(expA_IDs[0]), 1],
c='', alpha=0.5)
# Cosmetics
ax.set_xlabel('TFOR x', fontsize=16)
ax.set_ylabel('TFOR y', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=13)
plt.xlim(xlim); plt.ylim(ylim)
ax.invert_yaxis() # To match images
# Colorbar
cbar = plt.colorbar(cfset, ax=ax, pad=0.01)
cbar.set_label('RNA Counts', rotation=270, labelpad=15, fontsize=16)
cbar.ax.tick_params(labelsize=13)
# Finalize
plt.tight_layout()
plt.show()
```
----
[back to top](#top)
| github_jupyter |
# Heart disease classification
## USING SUPPORT VECTOR MACHINE (SVM)
### IMPORTING THE LIBRARIES
```
#importing the libraries.....
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
### IMPORTING THE DATASET
```
#Reading the dataset
ds=pd.read_csv('heart.csv')
print(ds)
ds.head()
ds.describe()
#splitting the dataset into independent and dependent variables
X = ds.iloc[:,:-1].values
y = ds.iloc[:,-1].values
print(X)
print(y)
```
### FEATURE SCALING
```
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
print(X)
```
### SPLITTING THE DATASET INTO TRAINING SET AND TEST SET
```
from sklearn.model_selection import train_test_split
X_train,X_test ,y_train, y_test = train_test_split(X, y, test_size=0.25,random_state=5)
```
### CREATING THE MODEL
```
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear' , random_state = 1)
clf = classifier.fit(X_train , y_train)
y_pred=classifier.predict(X_test)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
#confusion matrix is used to check how many datapoints are predicted exactly
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test,y_pred)
print(cm)
print(round(accuracy_score(y_test,y_pred) , 2))
```
### CONFUSION MATRIX
```
from sklearn.metrics import plot_confusion_matrix
a = plot_confusion_matrix(clf , X_test , y_test)
plt.show()
```
## Using K-Nearest Neighbor Classifier (K-NN)
### IMPORTING DATASET
```
X = ds.iloc[:,:-1].values
y = ds.iloc[:,13].values
```
### Splitting data
```
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.25, random_state= 0)
```
### Normalize the data
```
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
```
### Accuracy based on K values
```
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
classifier = KNeighborsClassifier(n_neighbors = 9, metric = 'minkowski', p = 2)
classifier = classifier.fit(X_train,y_train)
#prediction
y_pred = classifier.predict(X_test)
#check accuracy
accuracy = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: {:.2f}'.format(accuracy))
```
### Confusion Matrix
```
#confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
import seaborn as sns
sns.heatmap(cm,annot=True,cmap="YlOrRd")
plt.show()
```
## Using SVM WITH PCA
### Extracting x and y
```
X=ds.iloc[:,:-1].values
y=ds.iloc[:,-1].values
```
### STANDARDIZING 'X'
```
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X=sc.fit_transform(X)
print(X)
```
### Splitting the dataset into train and test data
```
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=4)
```
### Applying the PCA
```
from sklearn.decomposition import PCA
pca=PCA(n_components=2)
X_train=pca.fit_transform(X_train)
X_test=pca.transform(X_test)
explained_variance=pca.explained_variance_ratio_
print(explained_variance)
```
### Training the svm model on training set
```
from sklearn.svm import SVC
classifier=SVC(kernel='linear',random_state=0)
classifier.fit(X_train,y_train)
```
### Predicting the Test Resuts
```
from sklearn import metrics
y_pred=classifier.predict(X_test)
print(y_pred)
```
### Calculating the Accuracy
```
accuracy=metrics.accuracy_score(y_test,y_pred)
print('Accuracy: {:.2f}'.format(accuracy))
```
### Creating the Confusion Matrix
```
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
print(cm)
```
### Plotting the Confusion Matrix
```
import seaborn as sns
sns.heatmap(cm,annot=True,cmap="YlOrRd")
plt.show()
```
## Using KNN with PCA
### Extracting x and y
```
X=ds.iloc[:,:-1].values
y=ds.iloc[:,13].values
```
### STANDARDIZING 'X'
```
from sklearn.preprocessing import StandardScaler
sc= StandardScaler()
X=sc.fit_transform(X)
```
## SPLITTING THE DATASET INTO TRAIN AND TEST DATA
```
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.15,random_state=0)
```
### APPLYING 'PCA'
```
from sklearn.decomposition import PCA
pca=PCA(n_components=2)
X_train=pca.fit_transform(X_train)
X_test=pca.transform(X_test)
explained_variance=pca.explained_variance_ratio_
print(explained_variance)
```
### TRAINING THE K-NN MODEL ON TRAINING SET
```
from sklearn.neighbors import KNeighborsClassifier
clas=KNeighborsClassifier(n_neighbors =6,metric='minkowski',p=2)
clas.fit(X_train,y_train)
```
### PREDICTING THE TEST RESULTS
```
y_pred=clas.predict(X_test)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
```
### CONFUSION MATRIX AND ACCURACY
```
from sklearn.metrics import confusion_matrix,accuracy_score
cm = confusion_matrix(y_test,y_pred)
print(cm)
accuracy_score(y_test,y_pred)
```
### PLOT OF CONFUSION MATRIX
```
import seaborn as sns
sns.heatmap(cm,annot=True,cmap="YlOrRd")
plt.show()
```
## BAR PLOT FOR COUNT OF PEOPLE DISEASED AND NOT DISEASED
```
import seaborn as sns
sns.countplot(x = 'target' , data = ds)
plt.show()
```
## SCATTER PLOT BETWEEN AGE AND MAX. HEART RATE
```
plt.scatter(x=ds.age[ds.target==1], y=ds.thalach[(ds.target==1)], c="yellow")
plt.scatter(x=ds.age[ds.target==0], y=ds.thalach[(ds.target==0)], c = 'red')
plt.legend(["Disease", "No Disease"])
plt.xlabel("Age")
plt.ylabel("Maximum Heart Rate")
plt.show()
```
## COUNT OF MALE AND FEMALE
```
pd.crosstab(ds.sex,ds.target).plot(kind="bar",figsize=(10,5),color=['#1CA53B','#EE0000'])
plt.title('Heart Disease Frequency for Sex')
plt.xlabel('Sex (0 = Female,1 = Male)')
plt.xticks(rotation=0 )
plt.legend(["No Disease", " Disease "])
plt.ylabel("Frequency")
plt.show()
```
| github_jupyter |
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import os
import csv
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
from sklearn.ensemble import ExtraTreesRegressor
import math
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import time
from sklearn.metrics import r2_score
np.random.seed(7)
from torchvision import transforms, utils
import torch.nn.functional as F
solar_gains = pd.read_csv(r'C:\Users\jrv966\Documents\GitHub\surrogate_models\data\energy_demands\thermal_losses.csv')
# inputs_solar = pd.read_csv(r'C:\Users\jrv966\Documents\GitHub\surrogate_models\data\inputs\inputs_thermal.csv')
#Copying data to numpy arrays
# X_train = np.array(inputs_solar.loc[:,['height',' Ta','perimeter','area','t_sp', 'n_inf','walls_uvalue', 'floor_uvalue', 'roof_uvalue','windows_uvalue','walls_glazing_ratio']].values)
# X_train = np.array(inputs_solar.loc[:,['G_Dh','G_Bn','RR','N','height','perimeter','aspect_ratio','walls_glazing_ratio','gvalue','blinds_cutoff']].values)
y_train = np.array(solar_gains.loc[:,'Qs-Qi(Wh)':].values)
#X_train = np.array(inputs_solar.loc[:,'G_Dh':].values)
# X_train = np.array(inputs_solar.loc[:,['G_Dh','G_Bn','RR','N','height','perimeter','aspect_ratio','walls_glazing_ratio','gvalue','blinds_cutoff']].values)
# y_train = np.array(solar_gains.loc[:,'Qs-Qi(Wh)':].values)
solar_gains = None
inputs_solar = None
#Scaling the data (substracting mean and dividing by the standard deviation)
X_train = np.divide((X_train-X_train.mean(axis=0)),(X_train.std(axis=0)))
#SPLIT DATA FROM THE ORIGINAL TRAINING DATA INTO TEST AND TRAINING SET
X_train, _, y_train, _ = train_test_split(X_train, y_train, test_size=0)
#LINEAR REGRESSION - USED AS BASELINE
linear_model = LinearRegression().fit(X_train,y_train)
y_pred = linear_model.predict(X_test)
mean_squared_error(y_test,y_pred)
#Extremely randomized trees
# y_train = np.reshape(y_train,(y_train.shape[0],1))
#Fit the model and keep track of the training time
start = time.time()
etr = ExtraTreesRegressor(n_estimators = 20,max_features = 15, min_samples_split=2, n_jobs=10).fit(X_train, y_train.ravel())
end = time.time()
print(end - start)
#Evaluate the error on the test set
y_pred_etr = etr.predict(X_test)
mse_ert = mean_squared_error(y_test,y_pred_etr)
r_score = r2_score(y_test,y_pred_etr)
from sklearn import tree
#Fit the model and keep track of the training time
start = time.time()
decision_tree = tree.DecisionTreeRegressor()
decision_tree = decision_tree.fit(X_train, y_train.ravel())
end = time.time()
print(end - start)
from treeinterpreter import treeinterpreter as ti
dt_reg_pred, dt_reg_bias, dt_reg_contrib = ti.predict(decision_tree, X_test)
#Evaluate the error on the test set
y_pred_dt = decision_tree.predict(X_test)
mse_dt = mean_squared_error(y_test,y_pred_dt)
r_score_dt = r2_score(y_test,y_pred_dt)
r_score_dt
# import pickle
# from sklearn.externals import joblib
# pkl_filename = r'C:\Users\jrv966\Documents\GitHub\surrogate_models\results\ml_models\extremely_rand_trees.pkl'
# # Save the model as a pickle in a file
# joblib.dump(etr, pkl_filename)
# Load the model from the file
# knn_from_joblib = joblib.load('filename.pkl')
# Use the loaded model to make predictions
# knn_from_joblib.predict(X_test)
class Net(nn.Module):
def __init__(self, input_shape, output_shape):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_shape[1], 11)
self.bn1 = nn.BatchNorm1d(num_features=11)
self.fc2 = nn.Linear(11,6)
self.bn2 = nn.BatchNorm1d(num_features=6)
self.fc3 = nn.Linear(6, output_shape[1])
self.Dropout = nn.Dropout(p=0.0)
def forward(self, x):
residual = x
out = self.Dropout(F.relu(self.bn1(self.fc1(x)))) + residual
out = self.Dropout(F.relu(self.bn2(self.fc2(out))))
return self.fc3(out)
device = 'cpu'
net = Net(X_train.shape, y_train.shape).to(device)
optimizer = optim.Adam(net.parameters(), lr=0.02, weight_decay=1e-5)
criterion = torch.nn.MSELoss()
X_train_torch = torch.tensor(X_train, device='cpu').float()
y_train_torch = torch.tensor(y_train, device='cpu').float()
torch.manual_seed(0)
epochs = 60
iterations = 1000
lambda1 = 60000
batch_size = int(y_train.shape[0]/iterations)
start = time.time()
for epoch in range(epochs):
batch_indexes = np.random.choice(X_train.shape[0], X_train.shape[0], replace=False)
for i in range(iterations):
if i == iterations-1:
X_train_batch = X_train_torch[batch_indexes[i*batch_size:-1]]
y_train_batch = y_train_torch[batch_indexes[i*batch_size:-1]]
else:
X_train_batch = X_train_torch[batch_indexes[i*batch_size:i*batch_size+batch_size]]
y_train_batch = y_train_torch[batch_indexes[i*batch_size:i*batch_size+batch_size]]
optimizer.zero_grad()
y_pred = net(X_train_batch)
regularization_loss = 0
for param in net.parameters():
regularization_loss += (torch.sum(torch.abs(param)))
loss = criterion(y_pred, y_train_batch) # lambda1*regularization_loss
loss.backward()
optimizer.step()
PATH = r'C:\Users\jrv966\Documents\GitHub\surrogate_models\results\ml_models\thermal\dnn'+str(epoch)
torch.save(net.state_dict(), PATH)
print(epoch, loss.item(), r2_score(y_train_batch.data.numpy(),y_pred.data.numpy()), (time.time() - start)/60)
regularization_loss = 0
for param in net.parameters():
regularization_loss += (torch.sum(torch.abs(param)))
0.2*1091034752.0/regularization_loss
regularization_loss
loss
0.02*2.4482e+11/1188.43
```
| github_jupyter |
## Recurrent neural network with an LSTM unit
```
import numpy as np
import pandas as pd
import gensim
import sklearn
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation, Embedding, Input, TimeDistributed, Dropout, Masking
from keras.optimizers import RMSprop
# hyperparameters
B = 50 # batch size
R = 300 # rnn size
S = 4 # max_sequence len
E = 300 # embedding size
```
#### reading and preparing data for training and evaluation
```
# retrieve fastText embeddings
# vec_model = gensim.models.fasttext.FastText.load_fasttext_format('fasttext/cc.fi.300.bin')
vec_model = gensim.models.KeyedVectors.load_word2vec_format('fasttext/crawl-300d-2M.vec', limit=250000)
# generate training and validation examples for the network
def generate(path, line_limit=13000, mode='train'):
with open(path, 'r', encoding='utf8') as src:
word_count = 0
line_number = 0
x = np.zeros((B, S, E))
y = np.zeros((B, S, E))
word_seqs = [None for _ in range(B)]
lemma_seqs = [None for _ in range(B)]
word_seq = []
lemma_seq = []
x_seq = []
y_seq = []
i = 0
for line in src:
line_number += 1
if line_number > line_limit:
return
if len(x_seq) == S and len(y_seq) == S:
x[i] = np.array(x_seq)
y[i] = np.array(y_seq)
word_seqs[i] = word_seq[:]
lemma_seqs[i] = lemma_seq[:]
if mode == 'train':
x_seq.pop(0)
y_seq.pop(0)
word_seq.pop(0)
lemma_seq.pop(0)
else:
x_seq = []
y_seq = []
word_seq = []
lemma_seq = []
i += 1
if i >= B:
yield x, y, word_seqs, lemma_seqs
x = np.zeros((B, S, E))
y = np.zeros((B, S, E))
word_seqs = [None for _ in range(B)]
lemma_seqs = [None for _ in range(B)]
i = 0
word_count += S
if len(line) > 2 and line[0] != '#':
values = line.split()
if '-' not in values[0]:
try:
word = vec_model[values[1]]
lemma_vec = vec_model[values[2]]
except:
word = np.zeros(E)
lemma_vec = np.zeros(E)
x_seq.append(word)
y_seq.append(lemma_vec)
word_seq.append(values[1])
lemma_seq.append(values[2])
else:
x_seq = []
y_seq = []
word_seq = []
lemma_seq = []
# 12.2k lines amount to 10k tokens
train_set = [(X, Y) for X, Y, _, _ in generate('UD_English-EWT/en_ewt-ud-train.conllu', line_limit=12200)]
# 2.5k lines amount to 2k tokens
dev_batches = [(x, y, w, l) for x, y, w, l in generate('UD_English-EWT/en_ewt-ud-dev.conllu', line_limit=2500, mode='dev')]
test_batches = [(x, y, w, l) for x, y, w, l in generate('UD_English-EWT/en_ewt-ud-test.conllu', line_limit=2500, mode='dev')]
```
#### defining and training the network
```
M = Sequential()
M.add(Masking(mask_value=.0, input_shape=(S, E)))
M.add(LSTM(R, return_sequences=True))
M.add(Dropout(.2))
M.add(TimeDistributed(Dense(E, activation='linear')))
M.compile(loss='cosine_proximity', optimizer='rmsprop')
for epoch in range(100):
train_loss = 0
train_batch_c = 0
for X, Y in train_set:
train_loss += M.train_on_batch(X, Y)
train_batch_c += 1
dev_loss = 0
dev_batch_c = 0
for X, Y, _, _ in dev_batches:
dev_loss += M.test_on_batch(X, Y)
dev_batch_c += 1
if epoch == 0 or (epoch + 1) % 10 == 0:
print('epoch:', epoch + 1,
'\ttrain loss: {0:.4f}'.format(train_loss / train_batch_c),
'\tdev loss: {0:.4f}'.format(dev_loss / dev_batch_c))
np.random.shuffle(train_set)
```
#### evaluate on test set
```
correct = 0
count = 0
for X, Y, W, L in test_batches:
pred = M.predict_on_batch(X)
for i, seq in enumerate(pred):
for j, pred_y in enumerate(seq):
if np.sum(X[i][j]) == 0:
nearest = W[i][j] # identity backoff for oov tokens
else:
nearest = vec_model.most_similar(positive=[pred_y], topn=1)[0][0]
if nearest == L[i][j]:
correct += 1
count += 1
# print('w', W[i][j], '\tl', L[i][j], '\tpred', nearest, nearest == L[i][j])
print('final test accuracy: {0:.2f}%'.format(100 * correct / count))
print('correctly lemmatized tokens:', correct)
print('all tokens:', count)
```
#### demonstration
```
def lemmatize(tokens):
"""
input: list of tokens
output: list of input tokens' predicted lemmas
"""
lemmas = []
for i in range(0, len(tokens), S):
x = np.zeros((1, S, E))
oov = []
for j, t in enumerate(tokens[i:min(i + S, len(tokens))]):
try:
x[0][j] = vec_model[t]
except:
oov.append(j)
y = M.predict([x], batch_size=1)
predicted_lemmas = []
for j in range(min(i + S, len(tokens)) - i):
if j in oov:
predicted_lemmas.append(tokens[i + j])
else:
predicted_lemmas.append(vec_model.most_similar(positive=[y[0][j]], topn=1)[0][0])
lemmas += predicted_lemmas
return lemmas
lemmatize("I knew him because he had attended my school .".split(' '))
```
| github_jupyter |
```
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
if not 'spark' in locals():
spark = SparkSession.builder \
.master("local[*]") \
.config("spark.driver.memory","64G") \
.getOrCreate()
spark
```
# Get Data from S3
First we load the data source containing raw weather measurements from S3. Since the data doesn't follow any well-known format (like CSV or JSON), we load it as raw text data and extract all required information.
But first let's load a single year, just to get an impression of the data
```
storageLocation = "s3://dimajix-training/data/weather"
```
Read in the year 2003 as `text` using the `spark.read.text` method. The data can be found at `storageLocation + "/2003"` and should be stored in a variable called `weather_raw_2003`. Also using `limit` and `toPandas` retrieve the first 10 rows and display them as a Pandas DataFrame.
```
raw_weather_2003 = spark.read.text(storageLocation + "/2003")
raw_weather_2003.limit(10).toPandas()
```
## Read in all years
Now we read in all years by creating a union. We also add the year as a logical partition column, this will be used later.
```
from functools import reduce
# Read in all years, store them in an Python array
raw_weather_per_year = [spark.read.text(storageLocation + "/" + str(i)).withColumn("year", lit(i)) for i in range(2003,2020)]
# Union all years together
raw_weather = reduce(lambda l,r: l.union(r), raw_weather_per_year)
# Display first 10 records
raw_weather.limit(10).toPandas()
```
## Extract Information
The raw data is not exactly nice to work with, so we need to extract the relevant information by using appropriate substr operations.
```
weather = raw_weather.select(
col("year"),
substring(col("value"),5,6).alias("usaf"),
substring(col("value"),11,5).alias("wban"),
substring(col("value"),16,8).alias("date"),
substring(col("value"),24,4).alias("time"),
substring(col("value"),42,5).alias("report_type"),
substring(col("value"),61,3).alias("wind_direction"),
substring(col("value"),64,1).alias("wind_direction_qual"),
substring(col("value"),65,1).alias("wind_observation"),
(substring(col("value"),66,4).cast("float") / lit(10.0)).alias("wind_speed"),
substring(col("value"),70,1).alias("wind_speed_qual"),
(substring(col("value"),88,5).cast("float") / lit(10.0)).alias("air_temperature"),
substring(col("value"),93,1).alias("air_temperature_qual")
)
weather.limit(10).toPandas()
```
## Read in Station Metadata
Fortunately station metadata is stored as CSV, so we can directly read that using Sparks `spark.read.csv` mechanisum. The data can be found at `storageLocation + '/isd-history'`.
You should also specify the `DataFrameReader` option `header` to be `True`, this will use the first line of the CSV for creating column names.
Store the result in a variable called `stations` and again print the first 10 lines using the `toPandas()` method.
```
stations = spark.read \
.option("header", True) \
.csv(storageLocation + "/isd-history")
# Display first 10 records
stations.limit(10).toPandas()
```
# Process Data
Now we want to perform a simple analysis on the data: Calculate minimum and maximum wind speed and air temperature per country and year. This needs to be performed in three steps:
1. Join weather data and stations on the columns 'usaf' and 'wban'. Note that column names are case sensitive!
2. Group the data by the relevant columns year and country
3. Perform min/max aggregations. Also pay attentions to the fields `air_temperature_qual` and `wind_speed_qual`, where "1" means valid value
**Since processing the full date range may take a considerable amount of time, you might first want to start with a single year. This can be done by temporarily replacing `raw_weather` with `raw_wather_2003`**
```
df = weather.join(stations, (weather.usaf == stations.USAF) & (weather.wban == stations.WBAN))
result = df.groupBy(df.CTRY, df.year).agg(
min(when(df.air_temperature_qual == lit(1), df.air_temperature)).alias('min_temp'),
max(when(df.air_temperature_qual == lit(1), df.air_temperature)).alias('max_temp'),
min(when(df.wind_speed_qual == lit(1), df.wind_speed)).alias('min_wind'),
max(when(df.wind_speed_qual == lit(1), df.wind_speed)).alias('max_wind')
)
pdf = result.toPandas()
pdf
```
| github_jupyter |
# BRAINWORKS - Generate Graph Data
[Mohammad M. Ghassemi](https://ghassemi.xyz), DATA Scholar, 2021
<hr>
## 0. Install Dependencies:
To begin, please import the following external and internal python libraries
```
import re
import pandas as pd
import os
import sys
from pprint import pprint
currentdir = os.getcwd()
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from utils.database.database import database
db = database()
from utils.documentCollector.pubmed import pubmed
pm = pubmed()
from configuration.config import config
```
<br>
## 1. Extract Triples From Text
To Begin,let's pull one abstract from the database
```
data = db.query(f"""SELECT content, pmid, pub_date
FROM documents
WHERE content_type = 'abstract'
LIMIT 1
""")
pprint(data)
```
<br><br>
Now let's pass this abstract to the information extraction pipelines. The results will be stored in the `triples` and `concepts` tables of the database. Note that this might take some time to run locally. We **strongly** encourage you to explore the `cluster/` for practical use of the information extraction pipelines on a large volume of data.
```
pm.extractInformation(paper_list = data, # A list of dicts containing the text, id, and date
db_insert = True, # Indicates if the results should be inserted into the database
batch_size = 1000, # How much of the data we want to process at any one time.
filter_results = True # When True, filters the triples to return only the minimum spanning set.
)
```
<br>
We can query the database to collect the extracted triples, as well as their topics; let's extract one triple from the previous paper (along with the topics) to illustrate.
```
triples = db.query("""WITH data AS(
SELECT *
FROM triples
WHERE pmid = 32810213
LIMIT 10
)
, results AS(
SELECT CONCAT(CONCAT('["',GROUP_CONCAT(DISTINCT cs.concept_name ORDER BY cs.concept_name ASC SEPARATOR '","' )),'"]') as subject_umls_topics,
CONCAT(CONCAT('["',GROUP_CONCAT(DISTINCT co.concept_name ORDER BY co.concept_name ASC SEPARATOR '","' )),'"]') as object_umls_topics,
lower(d.subject) as subject,
lower(d.object) as object,
lower(d.relation) as relation,
d.pmid as pmid,
d.pub_date as pub_date,
YEAR(d.pub_date) as pub_year
FROM data d
JOIN concepts cs ON cs.triple_hash = d.subject_hash AND cs.concept_type = 'subject' AND cs.concept_name NOT IN('Result','Cohort Studies','Combined','Mental Association','Conclusion','Consecutive','Author','findings aspects','evaluation','evidence','Publications','Lacking','Observational Study','Scientific Study','Potential','research','Country','Clinical Research','Patients','Cohort','week','Persons','Increase','inpatient','child','adult') AND cs.concept_name IS NOT NULL
JOIN concepts co ON co.triple_hash = d.object_hash AND co.concept_type = 'object' AND co.concept_name NOT IN('Result','Cohort Studies','Combined','Mental Association','Conclusion','Consecutive','Author','findings aspects','evaluation','evidence','Publications','Lacking','Observational Study','Scientific Study','Potential','research','Country','Clinical Research','Patients','Cohort','week','Persons','Increase','inpatient','child','adult') AND co.concept_name IS NOT NULL
group by subject, object
)
SELECT * FROM results""")
pprint(triples[4])
```
<br>
## 2. Generate Graph Data
Below we provide a function that takes a set of parameters and generates a graph object that can be passed to the graph API at `graph.scigami.org`.
```
def getGraph(params):
from datetime import date
import hashlib
def str2hex(input):
hash_object = hashlib.sha256(input.encode('utf-8'))
hex_dig = hash_object.hexdigest()
return '#' + hex_dig[0:6]
query = f"""WITH data AS( SELECT *
FROM {params['table']}
"""
query +=""" WHERE """
for concept in params['concepts']:
query += f""" (subject {" LIKE '%" if concept['type'] == 'LIKE' else " = '"}{concept['term']}{"%' " if concept['type'] == 'LIKE' else "' "}
OR object {" LIKE '%" if concept['type'] == 'LIKE' else " = '"}{concept['term']}{"%' " if concept['type'] == 'LIKE' else "' "}
) OR"""
query = query[:-2]
for exclude in params['exclude_concepts']:
query += f""" AND subject not like '%{exclude}%'
AND object not like '%{exclude}%' """
if params['include_relations'] != []:
query += f""" AND relation IN ({'"' + '","'.join(params['include_relations']) + '"'}) """
if params['exclude_relations'] != []:
query += f""" AND relation NOT IN ({'"' + '","'.join(params['exclude_relations']) + '"'}) """
if params['limit'] is not None:
query += f""" LIMIT {params['limit']} """
query += """)
"""
query += """, results AS(
SELECT
CONCAT(CONCAT('["',GROUP_CONCAT(DISTINCT cs.concept_name ORDER BY cs.concept_name ASC SEPARATOR '","' )),'"]') as subject_umls_topics,
CONCAT(CONCAT('["',GROUP_CONCAT(DISTINCT co.concept_name ORDER BY co.concept_name ASC SEPARATOR '","' )),'"]') as object_umls_topics,
lower(d.subject) as subject,
lower(d.object) as object,
lower(d.relation) as relation,
d.pmid as pmid,
d.pub_date as pub_date,
YEAR(d.pub_date) as pub_year
FROM data d
JOIN concepts cs ON cs.triple_hash = d.subject_hash AND cs.concept_type = 'subject' AND cs.concept_name NOT IN('Result','Cohort Studies','Combined','Mental Association','Conclusion','Consecutive','Author','findings aspects','evaluation','evidence','Publications','Lacking','Observational Study','Scientific Study','Potential','research','Country','Clinical Research','Patients','Cohort','week','Persons','Increase','inpatient','child','adult') AND cs.concept_name IS NOT NULL
JOIN concepts co ON co.triple_hash = d.object_hash AND co.concept_type = 'object' AND co.concept_name NOT IN('Result','Cohort Studies','Combined','Mental Association','Conclusion','Consecutive','Author','findings aspects','evaluation','evidence','Publications','Lacking','Observational Study','Scientific Study','Potential','research','Country','Clinical Research','Patients','Cohort','week','Persons','Increase','inpatient','child','adult') AND co.concept_name IS NOT NULL
GROUP BY subject, object
)
"""
query += f"""SELECT * FROM results """ # where information_id IN (SELECT information_id FROM select_set_{i})"""
all_triples = db.query(query)
# Pre-processing data
df = pd.DataFrame(all_triples)
df1 = df.drop(columns=["object","object_umls_topics"])
df2 = df.drop(columns=["subject","subject_umls_topics"]).rename(columns={"object":"subject","object_umls_topics":"subject_umls_topics"})
df3 = pd.concat([df1,df2])
df3.reset_index(inplace=True)
# getting earliest data appearance of subject and converting to dict
min_df = df3.groupby("subject")["pub_date"].min().to_frame().reset_index()
min_dict = dict(zip(min_df["subject"], min_df["pub_date"]))
#This gets the names of every node
raw_nodes = dict(Counter([triple['subject'] for triple in all_triples] + [triple['object'] for triple in all_triples]))
nodes, edges = [], []
for node, cnt in raw_nodes.items():
ind = df3.subject.isin([node])
ii = df3[ind].index.values[0]
topics = df3.iloc[ii]['subject_umls_topics']
nodes.append({ 'key' : node, 'attributes':{
'label' : node,
'x' : 100*random(),
'y' : 100*random(),
'size' : cnt*10,
'color' : '#008cc2',
'data':{'creation':min_dict[node].year + min_dict[node].month/12,
'topics': topics}
}
})
for i,triple in enumerate(all_triples):
todays_date = date.today()
opacity = { todays_date.year - 15 :'11',todays_date.year - 14 :'11',todays_date.year - 13 :'11',todays_date.year - 12 :'11',todays_date.year - 11 :'11',todays_date.year - 10 :'22',
todays_date.year - 9 :'33',todays_date.year - 8 :'44',todays_date.year - 7 :'55',todays_date.year - 6 :'66',todays_date.year - 5 :'77',todays_date.year - 4 :'88',
todays_date.year - 3 :'99',todays_date.year - 2 :'AA',todays_date.year - 1 : 'BB',todays_date.year : 'CC'}
edges.append({ 'key' : str(i),
'source' : triple['subject'],
'target' : triple['object'],
'attributes' : { 'label' : triple['relation'],
'type' : 'arrow',
'size' : 3,
'color' : '#041E42' + opacity[triple['pub_date'].year],
'label' : triple['relation'],
'data' :{'time':triple['pub_date'].year + triple['pub_date'].month/12,
'pmid':triple['pmid']}
}
})
return {'nodes':nodes, 'edges':edges}
```
<br>
We may call this function, passing in a parameter set, and recieve a formatted graph data object for the API
```
from collections import Counter
from random import random
from pprint import pprint
#----------------------------------------------
# Get the graph data object from the database.
#----------------------------------------------
graph = getGraph({'table' : 'triples', # The name of the table wihere the triples data is stored.
'concepts' : [{'term':'covid','type':'LIKE'}], # The term we want to search for, for instance, `covid`
'limit' : 5, # The number of triples we want to return, e.g. `5`
'include_relations' : [], # Any edges we are interested in, e.g. ['cause','caused','associated'],
'exclude_relations' : [], # Any edges we want to exclude, e.g. ['is','will be'],
'exclude_concepts' : [] # Any nodes we want to exclude, e.g. ['patient','patients','participants','participant','men','women']
})
#-----------------------------------------------
# Adding configuration information to the graph
#-----------------------------------------------
json_data = {"graph":{}, "config":{}}
json_data["graph"] = graph
json_data["config"] = {"maps" : [{"dimension": "cluster",
},
{"dimension": "node_slider",
"data" : "creation",
"args" : "node slider"
},
{"dimension": "node_size",
"data" : "degree",
"args" : {"min":10, "max":40}
},
{"dimension": "edge_slider",
"data" : "time",
"args" : "edge slider"
}],
"settings":{}
}
pprint(json_data)
```
<br> We can now call the API to obtain the graph
```
import requests
url = requests.get("http://graph.scigami.org:5000/create_graph", json=json_data).content.decode()
url
```
<br>
## Appendix
Navigating Ontologies using NLM APIs.
```
import requests
import json
from pprint import pprint
# Obtain a service ticket
r = requests.post('https://utslogin.nlm.nih.gov/cas/v1/api-key', data={'apikey' : config['UMLS']['APIKey']})
tgt = 'TGT-' + r.text.split('TGT-')[1].split('-cas')[0] + '-cas'
r = requests.post(f"""https://utslogin.nlm.nih.gov/cas/v1/tickets/{tgt}""", data={'service' : 'http://umlsks.nlm.nih.gov'})
service_ticket = r.text
# Let's search for the children of Neuronal Plasticity: https://meshb-prev.nlm.nih.gov/record/ui?ui=D009473
concept_id = 'D009473'
source = 'MSH'
base_url = 'https://uts-ws.nlm.nih.gov/rest'
extention = f"""/content/current/source/{source}/{concept_id}/children"""
search = f"""{base_url}{extention}?ticket={service_ticket}"""
r = json.loads(requests.get(search).text)
pprint(r)
```
<br>
Alternative ways to navigate the ontonologies
```
import requests
import json
from pprint import pprint
#from utils.generalPurpose import generalPurpose as gp
def flatten(my_dict, last_keys='',key_list=[], value_list=[]):
if isinstance(my_dict, dict):
for key, value in my_dict.items():
this_key = last_keys + '.' + key
if isinstance(value, dict):
flatten(my_dict[key],this_key,key_list,value_list)
elif isinstance(value,list):
flatten(my_dict[key],this_key,key_list,value_list)
elif value == None:
key_list.append(this_key[1:])
value_list.append('None')
else:
key_list.append(this_key[1:])
value_list.append(value)
if isinstance(my_dict, list):
for i in range(len(my_dict)):
this_key = last_keys + '_' + str(i) + '_'
if isinstance(my_dict[i], dict):
flatten(my_dict[i],this_key,key_list,value_list)
elif isinstance(my_dict[i],list):
flatten(my_dict[i],this_key,key_list,value_list)
elif my_dict[i] == None:
key_list.append(this_key[1:])
value_list.append('None')
else:
key_list.append(this_key[1:])
value_list.append(my_dict[i])
return dict(zip(key_list, value_list))
def extractFromFlatJson(flat_data, key_has = [], value_has = [], fetch_part = None ):
#label = extractFromPubmedData(flat_x, key_has = ['label','@language'],
# value_has = ['en'],
# fetch_part = '@value')
data_elements = flat_data.keys()
# See if this key matches the criteria
results = []
valid_keys = {}
for element in data_elements:
# Key Critera
valid_keys[element] = True
for key in key_has:
if key not in element:
valid_keys[element] = False
valid_values = {}
for element in data_elements:
if valid_keys[element]:
# Value Criteria
valid_values[element] = True
for value in value_has:
if value not in str(flat_data[element]):
valid_values[element] = False
if valid_values[element]:
if fetch_part is not None:
results.append(flat_data['.'.join(element.split('.')[:-1] + [fetch_part])])
else:
results.append(flat_data[element])
return list(set(results))
# Get the Descriptor Info.
def getMeshInfo(id):
x = json.loads(requests.get(f'https://id.nlm.nih.gov/mesh/{id}.json-ld').text)
flat_x = flatten(x)
print(f'https://id.nlm.nih.gov/mesh/{id}.json-ld')
r = {}
r['id'] = x.get('@id',None)
# Decriptor -----------------------------------------------------
if id[0] == 'D':
r['label'] = extractFromFlatJson(flat_x, key_has = ['label','@language'], value_has = ['en'], fetch_part = '@value')[0]
r['treeNumber'] = x.get('treeNumber' ,None)
r['broaderDescriptor'] = x.get('broaderDescriptor' ,None)
r['concept'] = x.get('concept' ,None)
r['preferredConcept'] = x.get('preferredConcept' ,None)
r['allowableQualifier'] = x.get('allowableQualifier',None)
# Concept ------------------------------------------------------
if id[0] == 'M':
r['label'] = extractFromFlatJson(flat_x, key_has = ['label','@language'], value_has = ['en'], fetch_part = '@value')[0]
r['scopeNotes'] = extractFromFlatJson(flat_x, key_has = ['scopeNote','@language'], value_has = ['en'], fetch_part = '@value')[0]
r['preferredTerm'] = x.get('preferredTerm' ,None)
r['narrowerConcept'] = x.get('narrowerConcept' ,None)
r['broaderConcept'] = x.get('broaderConcept' ,None)
r['relatedConcept'] = x.get('relatedConcept' ,None)
# Qualifier ------------------------------------------------------
if id[0] == 'Q':
r['label'] = extractFromFlatJson(flat_x, key_has = ['label','@language'], value_has = ['en'], fetch_part = '@value')[0]
r['preferredConcept'] = x.get('preferredConcept' ,None)
r['preferredTerm'] = x.get('preferredTerm' ,None)
r['treeNumber'] = x.get('treeNumber' ,None)
r['broaderQualifier'] = x.get('narrowerConcept' ,None)
# Terms ------------------------------------------------------
if id[0] == 'T':
r['label'] = extractFromFlatJson(flat_x, key_has = ['label','@language'], value_has = ['en'], fetch_part = '@value')[0]
for key, val in r.items():
if val is None:
r[key] = []
continue
r[key] = [val] if not isinstance(r[key], list) else val
r[key] = [x.split('/')[-1] for x in r[key]]
return r
x = getMeshInfo('M0002885')
print('Concept')
print(x['label'], x['scopeNotes'], x['narrowerConcept'], x['broaderConcept'])
print('narrower')
for concept in x['narrowerConcept']:
print(getMeshInfo(concept)['label'], getMeshInfo(concept)['id'])
print('broader')
for concept in x['broaderConcept']:
print(getMeshInfo(concept)['label'], getMeshInfo(concept)['id'])
```
| github_jupyter |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
<!--NAVIGATION-->
< [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb) | [Contents](Index.ipynb) | [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb) >
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.09-Principal-Component-Analysis.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
# In Depth: Principal Component Analysis
Up until now, we have been looking in depth at supervised learning estimators: those estimators that predict labels based on labeled training data.
Here we begin looking at several unsupervised estimators, which can highlight interesting aspects of the data without reference to any known labels.
In this section, we explore what is perhaps one of the most broadly used of unsupervised algorithms, principal component analysis (PCA).
PCA is fundamentally a dimensionality reduction algorithm, but it can also be useful as a tool for visualization, for noise filtering, for feature extraction and engineering, and much more.
After a brief conceptual discussion of the PCA algorithm, we will see a couple examples of these further applications.
We begin with the standard imports:
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
```
## Introducing Principal Component Analysis
Principal component analysis is a fast and flexible unsupervised method for dimensionality reduction in data, which we saw briefly in [Introducing Scikit-Learn](05.02-Introducing-Scikit-Learn.ipynb).
Its behavior is easiest to visualize by looking at a two-dimensional dataset.
Consider the following 200 points:
```
rng = np.random.RandomState(1)
X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T
plt.scatter(X[:, 0], X[:, 1])
plt.axis('equal');
```
By eye, it is clear that there is a nearly linear relationship between the x and y variables.
This is reminiscent of the linear regression data we explored in [In Depth: Linear Regression](05.06-Linear-Regression.ipynb), but the problem setting here is slightly different: rather than attempting to *predict* the y values from the x values, the unsupervised learning problem attempts to learn about the *relationship* between the x and y values.
In principal component analysis, this relationship is quantified by finding a list of the *principal axes* in the data, and using those axes to describe the dataset.
Using Scikit-Learn's ``PCA`` estimator, we can compute this as follows:
```
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
```
The fit learns some quantities from the data, most importantly the "components" and "explained variance":
```
print(pca.components_)
print(pca.explained_variance_)
```
To see what these numbers mean, let's visualize them as vectors over the input data, using the "components" to define the direction of the vector, and the "explained variance" to define the squared-length of the vector:
```
def draw_vector(v0, v1, ax=None):
ax = ax or plt.gca()
arrowprops=dict(arrowstyle='->',
linewidth=2,
shrinkA=0, shrinkB=0)
ax.annotate('', v1, v0, arrowprops=arrowprops)
# plot data
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
draw_vector(pca.mean_, pca.mean_ + v)
plt.axis('equal');
```
These vectors represent the *principal axes* of the data, and the length of the vector is an indication of how "important" that axis is in describing the distribution of the data—more precisely, it is a measure of the variance of the data when projected onto that axis.
The projection of each data point onto the principal axes are the "principal components" of the data.
If we plot these principal components beside the original data, we see the plots shown here:

[figure source in Appendix](06.00-Figure-Code.ipynb#Principal-Components-Rotation)
This transformation from data axes to principal axes is an *affine transformation*, which basically means it is composed of a translation, rotation, and uniform scaling.
While this algorithm to find principal components may seem like just a mathematical curiosity, it turns out to have very far-reaching applications in the world of machine learning and data exploration.
### PCA as dimensionality reduction
Using PCA for dimensionality reduction involves zeroing out one or more of the smallest principal components, resulting in a lower-dimensional projection of the data that preserves the maximal data variance.
Here is an example of using PCA as a dimensionality reduction transform:
```
pca = PCA(n_components=1)
pca.fit(X)
X_pca = pca.transform(X)
print("original shape: ", X.shape)
print("transformed shape:", X_pca.shape)
```
The transformed data has been reduced to a single dimension.
To understand the effect of this dimensionality reduction, we can perform the inverse transform of this reduced data and plot it along with the original data:
```
X_new = pca.inverse_transform(X_pca)
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8)
plt.axis('equal');
```
The light points are the original data, while the dark points are the projected version.
This makes clear what a PCA dimensionality reduction means: the information along the least important principal axis or axes is removed, leaving only the component(s) of the data with the highest variance.
The fraction of variance that is cut out (proportional to the spread of points about the line formed in this figure) is roughly a measure of how much "information" is discarded in this reduction of dimensionality.
This reduced-dimension dataset is in some senses "good enough" to encode the most important relationships between the points: despite reducing the dimension of the data by 50%, the overall relationship between the data points are mostly preserved.
### PCA for visualization: Hand-written digits
The usefulness of the dimensionality reduction may not be entirely apparent in only two dimensions, but becomes much more clear when looking at high-dimensional data.
To see this, let's take a quick look at the application of PCA to the digits data we saw in [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb).
We start by loading the data:
```
from sklearn.datasets import load_digits
digits = load_digits()
digits.data.shape
```
Recall that the data consists of 8×8 pixel images, meaning that they are 64-dimensional.
To gain some intuition into the relationships between these points, we can use PCA to project them to a more manageable number of dimensions, say two:
```
pca = PCA(2) # project from 64 to 2 dimensions
projected = pca.fit_transform(digits.data)
print(digits.data.shape)
print(projected.shape)
digits.target
i=int(np.random.random()*1797)
plt.imshow(digits.data[i].reshape(8,8),cmap='Blues')
digits.target[i]
digits.data[i].reshape(8,8)
```
We can now plot the first two principal components of each point to learn about the data:
```
plt.scatter(projected[:, 0], projected[:, 1],
c=digits.target, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('Spectral', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar();
```
Recall what these components mean: the full data is a 64-dimensional point cloud, and these points are the projection of each data point along the directions with the largest variance.
Essentially, we have found the optimal stretch and rotation in 64-dimensional space that allows us to see the layout of the digits in two dimensions, and have done this in an unsupervised manner—that is, without reference to the labels.
### What do the components mean?
We can go a bit further here, and begin to ask what the reduced dimensions *mean*.
This meaning can be understood in terms of combinations of basis vectors.
For example, each image in the training set is defined by a collection of 64 pixel values, which we will call the vector $x$:
$$
x = [x_1, x_2, x_3 \cdots x_{64}]
$$
One way we can think about this is in terms of a pixel basis.
That is, to construct the image, we multiply each element of the vector by the pixel it describes, and then add the results together to build the image:
$$
{\rm image}(x) = x_1 \cdot{\rm (pixel~1)} + x_2 \cdot{\rm (pixel~2)} + x_3 \cdot{\rm (pixel~3)} \cdots x_{64} \cdot{\rm (pixel~64)}
$$
One way we might imagine reducing the dimension of this data is to zero out all but a few of these basis vectors.
For example, if we use only the first eight pixels, we get an eight-dimensional projection of the data, but it is not very reflective of the whole image: we've thrown out nearly 90% of the pixels!

[figure source in Appendix](06.00-Figure-Code.ipynb#Digits-Pixel-Components)
The upper row of panels shows the individual pixels, and the lower row shows the cumulative contribution of these pixels to the construction of the image.
Using only eight of the pixel-basis components, we can only construct a small portion of the 64-pixel image.
Were we to continue this sequence and use all 64 pixels, we would recover the original image.
But the pixel-wise representation is not the only choice of basis. We can also use other basis functions, which each contain some pre-defined contribution from each pixel, and write something like
$$
image(x) = {\rm mean} + x_1 \cdot{\rm (basis~1)} + x_2 \cdot{\rm (basis~2)} + x_3 \cdot{\rm (basis~3)} \cdots
$$
PCA can be thought of as a process of choosing optimal basis functions, such that adding together just the first few of them is enough to suitably reconstruct the bulk of the elements in the dataset.
The principal components, which act as the low-dimensional representation of our data, are simply the coefficients that multiply each of the elements in this series.
This figure shows a similar depiction of reconstructing this digit using the mean plus the first eight PCA basis functions:

[figure source in Appendix](06.00-Figure-Code.ipynb#Digits-PCA-Components)
Unlike the pixel basis, the PCA basis allows us to recover the salient features of the input image with just a mean plus eight components!
The amount of each pixel in each component is the corollary of the orientation of the vector in our two-dimensional example.
This is the sense in which PCA provides a low-dimensional representation of the data: it discovers a set of basis functions that are more efficient than the native pixel-basis of the input data.
### Choosing the number of components
A vital part of using PCA in practice is the ability to estimate how many components are needed to describe the data.
This can be determined by looking at the cumulative *explained variance ratio* as a function of the number of components:
```
pca = PCA().fit(digits.data)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
```
This curve quantifies how much of the total, 64-dimensional variance is contained within the first $N$ components.
For example, we see that with the digits the first 10 components contain approximately 75% of the variance, while you need around 50 components to describe close to 100% of the variance.
Here we see that our two-dimensional projection loses a lot of information (as measured by the explained variance) and that we'd need about 20 components to retain 90% of the variance. Looking at this plot for a high-dimensional dataset can help you understand the level of redundancy present in multiple observations.
## PCA as Noise Filtering
PCA can also be used as a filtering approach for noisy data.
The idea is this: any components with variance much larger than the effect of the noise should be relatively unaffected by the noise.
So if you reconstruct the data using just the largest subset of principal components, you should be preferentially keeping the signal and throwing out the noise.
Let's see how this looks with the digits data.
First we will plot several of the input noise-free data:
```
def plot_digits(data):
fig, axes = plt.subplots(4, 10, figsize=(10, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(data[i].reshape(8, 8),
cmap='binary', interpolation='nearest',
clim=(0, 16))
plot_digits(digits.data)
```
Now lets add some random noise to create a noisy dataset, and re-plot it:
```
np.random.seed(42)
noisy = np.random.normal(digits.data, 4)
plot_digits(noisy)
```
It's clear by eye that the images are noisy, and contain spurious pixels.
Let's train a PCA on the noisy data, requesting that the projection preserve 50% of the variance:
```
pca = PCA(0.50).fit(noisy)
pca.n_components_
```
Here 50% of the variance amounts to 12 principal components.
Now we compute these components, and then use the inverse of the transform to reconstruct the filtered digits:
```
components = pca.transform(noisy)
filtered = pca.inverse_transform(components)
plot_digits(filtered)
```
This signal preserving/noise filtering property makes PCA a very useful feature selection routine—for example, rather than training a classifier on very high-dimensional data, you might instead train the classifier on the lower-dimensional representation, which will automatically serve to filter out random noise in the inputs.
## Example: Eigenfaces
Earlier we explored an example of using a PCA projection as a feature selector for facial recognition with a support vector machine (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)).
Here we will take a look back and explore a bit more of what went into that.
Recall that we were using the Labeled Faces in the Wild dataset made available through Scikit-Learn:
```
from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape)
```
Let's take a look at the principal axes that span this dataset.
Because this is a large dataset, we will use ``RandomizedPCA``—it contains a randomized method to approximate the first $N$ principal components much more quickly than the standard ``PCA`` estimator, and thus is very useful for high-dimensional data (here, a dimensionality of nearly 3,000).
We will take a look at the first 150 components:
```
# from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition import PCA as RandomizedPCA
pca = RandomizedPCA(150)
pca.fit(faces.data)
```
In this case, it can be interesting to visualize the images associated with the first several principal components (these components are technically known as "eigenvectors,"
so these types of images are often called "eigenfaces").
As you can see in this figure, they are as creepy as they sound:
```
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(pca.components_[i].reshape(62, 47), cmap='bone')
```
The results are very interesting, and give us insight into how the images vary: for example, the first few eigenfaces (from the top left) seem to be associated with the angle of lighting on the face, and later principal vectors seem to be picking out certain features, such as eyes, noses, and lips.
Let's take a look at the cumulative variance of these components to see how much of the data information the projection is preserving:
```
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
```
We see that these 150 components account for just over 90% of the variance.
That would lead us to believe that using these 150 components, we would recover most of the essential characteristics of the data.
To make this more concrete, we can compare the input images with the images reconstructed from these 150 components:
```
# Compute the components and projected faces
pca = RandomizedPCA(150).fit(faces.data)
components = pca.transform(faces.data)
projected = pca.inverse_transform(components)
# Plot the results
fig, ax = plt.subplots(2, 10, figsize=(10, 2.5),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i in range(10):
ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r')
ax[1, i].imshow(projected[i].reshape(62, 47), cmap='binary_r')
ax[0, 0].set_ylabel('full-dim\ninput')
ax[1, 0].set_ylabel('150-dim\nreconstruction');
```
The top row here shows the input images, while the bottom row shows the reconstruction of the images from just 150 of the ~3,000 initial features.
This visualization makes clear why the PCA feature selection used in [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb) was so successful: although it reduces the dimensionality of the data by nearly a factor of 20, the projected images contain enough information that we might, by eye, recognize the individuals in the image.
What this means is that our classification algorithm needs to be trained on 150-dimensional data rather than 3,000-dimensional data, which depending on the particular algorithm we choose, can lead to a much more efficient classification.
## Principal Component Analysis Summary
In this section we have discussed the use of principal component analysis for dimensionality reduction, for visualization of high-dimensional data, for noise filtering, and for feature selection within high-dimensional data.
Because of the versatility and interpretability of PCA, it has been shown to be effective in a wide variety of contexts and disciplines.
Given any high-dimensional dataset, I tend to start with PCA in order to visualize the relationship between points (as we did with the digits), to understand the main variance in the data (as we did with the eigenfaces), and to understand the intrinsic dimensionality (by plotting the explained variance ratio).
Certainly PCA is not useful for every high-dimensional dataset, but it offers a straightforward and efficient path to gaining insight into high-dimensional data.
PCA's main weakness is that it tends to be highly affected by outliers in the data.
For this reason, many robust variants of PCA have been developed, many of which act to iteratively discard data points that are poorly described by the initial components.
Scikit-Learn contains a couple interesting variants on PCA, including ``RandomizedPCA`` and ``SparsePCA``, both also in the ``sklearn.decomposition`` submodule.
``RandomizedPCA``, which we saw earlier, uses a non-deterministic method to quickly approximate the first few principal components in very high-dimensional data, while ``SparsePCA`` introduces a regularization term (see [In Depth: Linear Regression](05.06-Linear-Regression.ipynb)) that serves to enforce sparsity of the components.
In the following sections, we will look at other unsupervised learning methods that build on some of the ideas of PCA.
<!--NAVIGATION-->
< [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb) | [Contents](Index.ipynb) | [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb) >
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.09-Principal-Component-Analysis.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
| github_jupyter |
<a href="https://colab.research.google.com/github/Saurabh-Bagchi/Traffic-Sign-Classification.keras/blob/master/Questions_Project_1_Computer_Vision_JPMC_v3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>

Proprietary content. © Great Learning. All Rights Reserved. Unauthorized use or distribution prohibited.
# German Traffic Sign Recognition
Multi-class, single-image classification
### Dataset
The German Traffic Sign Benchmark is a multi-class, single-image classification challenge held at the International Joint Conference on Neural Networks (IJCNN) 2011. They cordially invite researchers from relevant fields to participate: The competition is designed to allow for participation without special domain knowledge. Their benchmark has the following properties:
- Single-image, multi-class classification problem
- More than 40 classes
- More than 50,000 images in total
- Large, lifelike database
#### Notes
- If the model is taking too much time to get trained then you can reduce the number of classes. There are around 43 classes in the dataset, model should be trained on a minimum of 15 classes.
### Initialize ImageDataGenerator (7 Marks)
- Rescale the images
- Specify value for validation_split & get 75% data in training and 25% data in training
### Import the necessary libraries
```
import itertools
import os
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
print("TF version:", tf.__version__)
print("Hub version:", hub.__version__)
print("GPU is", "available" if tf.config.list_physical_devices('GPU') else "NOT AVAILABLE")
# set the matplotlib backend so figures can be saved in the background
import matplotlib
matplotlib.use("Agg")
# import the necessary packages
#from pyimagesearch.resnet import ResNet
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical
from imutils import paths
import matplotlib.pyplot as plt
import argparse
import cv2
import os
import time
import pandas as pd
from PIL import Image
from tensorflow import keras
from sklearn.metrics import accuracy_score
np.random.seed(42)
tf.random.set_seed(42)
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import SGD
from keras import backend as K
K.set_image_data_format('channels_last')
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
validation_split=0.25)
test_datagen = ImageDataGenerator(rescale=1./255)
#test_size = 0.25
random_state = 42
#x_train, x_validation, y_train, y_validation = train_test_split(x_train, y_train, test_size=0.2, random_state=42)
from google.colab import drive
drive.mount('/content/drive/')
project_path = '/content/drive/MyDrive/German Traffic/'
images_zip_path = project_path + "Data - German Traffic Sign Recognition-20210113T122622Z-001.zip"
from zipfile import ZipFile
with ZipFile(images_zip_path, 'r') as z:
z.extractall()
```
### Get training data from ImageDataGenerator (5 Marks)
- Give directory path
- Give target size
- Give batch_size
- Specify classes, if you wish to use less number of classes you need to give class names in a list (Atleast 15 classes should be there)
- Specify class_mode
- Specify color_mode
- Specify subset
You can get details here
https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator
```
img_rows = 32
img_cols = 32
train_data_dir_path = '/content/Data - German Traffic Sign Recognition/Train'
test_data_dir_path = '/content/Data - German Traffic Sign Recognition'
training_set = train_datagen.flow_from_directory(train_data_dir_path,
target_size = (img_rows, img_cols),
batch_size = 1,
classes = ['0','1','2','3','4','5',
'6','7','8','9','10','11','12','13','14','15',
'16','17','18','19','20','21','22','23','24','25',
'26','27','28','29','30','31','32','33','34','35','36','37',
'38','39','40','41','42'],
class_mode='categorical',
color_mode='rgb',
subset='training')
def generate_data_from_set(gen=training_set, image_target_size = 32, batch_size = 1,
channels = 3, class_mode = 'sparse' ):
'''fetch all out test data from directory'''
total_images = gen.n
steps = total_images//batch_size
#iterations to cover all data, so if batch is 5, it will take total_images/5 iteration
x , y = [] , []
for i in range(steps):
a , b = gen.next()
x.extend(a)
y.extend(b)
return np.array(x), np.array(y)
x_train, y_train = generate_data_from_set()
x_train.shape
```
### Get validation data from ImageDataGenerator (5 Marks)
- Give directory path
- Give target size
- Give batch_size
- Specify classes, if you wish to use less number of classes you need to give class names in a list (Atleast 15 classes should be there)
- Specify class_mode
- Specify color_mode
- Specify subset
You can get details here
https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator
```
validation_set = train_datagen.flow_from_directory(train_data_dir_path,
target_size = (img_rows, img_cols),
batch_size = 1,
classes = ['0','1','2','3','4','5',
'6','7','8','9','10','11','12','13','14','15',
'16','17','18','19','20','21','22','23','24','25',
'26','27','28','29','30','31','32','33','34','35','36','37',
'38','39','40','41','42'],
class_mode='categorical',
color_mode='rgb',
subset='validation')
import contextlib
import os
#os.mkdir("frivolous_directory")
#with contextlib.suppress(UnidentifiedImageError):
x_val, y_val = generate_data_from_set(gen=validation_set)
testing_set = test_datagen.flow_from_directory(test_data_dir_path,
target_size = (img_rows, img_cols),
batch_size = 1,
classes = ['Meta'],
class_mode='categorical',
color_mode='rgb')
x_test, y_test = generate_data_from_set(gen=testing_set)
```
### Exploratory data analysis to understand German Traffic Signal Images
#### Let us check the total number of training and meta images, we have 39,209 training images and 43 reference images
```
import glob
train_image_names = glob.glob('/content/Data - German Traffic Sign Recognition/Train/*/*.png')
test_image_names = glob.glob('/content/Data - German Traffic Sign Recognition/Meta/*.png')
print("Total number of training images: ", len(train_image_names))
print("Total number of test images: ", len(test_image_names))
# make train_image_names as serie object
train_image_names = pd.Series(train_image_names)
test_image_names = pd.Series(test_image_names)
```
#### Create a dataframe of training image name and class labels so that it is easier to see distribution and identify class imbalance and also plot a sample of them
```
# train_df: a dataframe with 2 field: Filename, ClassId
train_df = pd.DataFrame()
# generate Filename field
train_df['Filename'] = train_image_names.map(lambda img_name: img_name.split("/")[-1])
# generate ClassId field
train_df['ClassId'] = train_image_names.map(lambda img_name: int(img_name.split("/")[-2]))
train_df.head()
#test_image_names
```
#### Replicate the same dataframe for the reference images available in Meta folder
```
# train_df: a dataframe with 2 field: Filename, ClassId
test_df = pd.DataFrame()
# generate Filename field
test_df['Filename'] = test_image_names.map(lambda img_name: img_name.split("/")[-1])
# generate ClassId field
test_df['ClassId'] = test_image_names.map(lambda img_name: int(img_name.split(".")[0].split("/")[-1]))
test_df.head()
```
#### Plot sample images for the training dataset, we see that images are severely blurred, some are bright while others are dull, which might impact classification, the class labels are shown as image titles
```
plot_df = train_df.sample(9).reset_index()
plt.figure(figsize=(10, 10))
for i in range(9):
img_name = plot_df.loc[i, 'Filename']
label_str = "%d"%(plot_df.loc[i, 'ClassId'])
plt.subplot(3,3,i+1)
plt.imshow(plt.imread(os.path.join('/content/Data - German Traffic Sign Recognition/Train/',label_str, img_name)))
plt.title(label_str)
plt.xticks([])
plt.yticks([])
```
#### Plotting the reference images in meta data folder, we see that these are proper images
```
plot_df = test_df.sample(9).reset_index()
plt.figure(figsize=(10, 10))
for i in range(9):
img_name = plot_df.loc[i, 'Filename']
label_str = "%d"%(plot_df.loc[i, 'ClassId'])
plt.subplot(3,3,i+1)
plt.imshow(plt.imread(os.path.join('/content/Data - German Traffic Sign Recognition/Meta/',img_name)))
plt.title(label_str)
plt.xticks([])
plt.yticks([])
```
#### We see that there is class imbalance in the training data some classes are overrepresented while some are underrepresented, so accuracy would be good if we are able to predict better in the majority class like label 38 vs label 37
```
class_id_distribution = train_df['ClassId'].value_counts()
class_id_distribution.head(10)
plt.figure(figsize=(13,5))
plt.xticks(np.arange(43))
plt.bar(class_id_distribution.index, class_id_distribution.values);
```
#### For Meta folder (reference images) we have one image for each class label
```
class_id_distribution = test_df['ClassId'].value_counts()
class_id_distribution.head(10)
plt.figure(figsize=(13,5))
plt.xticks(np.arange(43))
plt.bar(class_id_distribution.index, class_id_distribution.values);
```
### Define model (10 Marks)
- Initialize a Sequential Model
- Add Convolution, Maxpool, Dropout, Flatten & Dense layers according to your model architecture
```
# define model
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import layers
nb_epoch = 30
rows, cols = 32, 32
n_channels = 3
batch_size = 128
n_classes = 43
n_filter = 30
n_pool = 2
n_conv = 3
from keras import backend as K
#K.set_image_dim_ordering('th')
K.set_image_data_format('channels_last')
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import SGD
from keras import backend as K
K.set_image_data_format('channels_last')
model_conv = Sequential()
## If You preprocessed with gray scaling and local histogram equivalization then input_shape = (32,32,1) else (32,32,3)
model_conv.add(Conv2D(32, kernel_size=(3, 3),activation='relu', input_shape=(32, 32, 3)))
model_conv.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model_conv.add(MaxPooling2D(pool_size=(2, 2),padding='Valid'))
#model.add(BatchNormalization())
model_conv.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model_conv.add(MaxPooling2D(pool_size=(2, 2),padding='Valid'))
#model.add(BatchNormalization())
model_conv.add(Dropout(0.25))
model_conv.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model_conv.add(MaxPooling2D(pool_size=(2, 2),padding='Valid'))
#model.add(BatchNormalization())
model_conv.add(Dropout(0.5))
model_conv.add(Flatten())
model_conv.add(Dense(128, activation='relu'))
model_conv.add(Dropout(0.5))
model_conv.add(Dense(n_classes, activation='softmax'))
```
### Compile the model (5 Marks)
- Specify optimizer, loss & metrics
```
# build the model
#model = cnn_model()
model_conv.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
```
### Get model summary (3 Marks)
```
model_conv.summary()
```
### Fit the model (5 Marks)
- Specify epochs
- Specify batch_size
- Give validation_data
- Validation accuracy should be more than 90%
```
from keras.callbacks import ModelCheckpoint, EarlyStopping
filepath="/content/Data - German Traffic Sign Recognition/German_Traffic_ConvNetworkModel.hdf5"
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=2, verbose=1, mode='auto')
checkpoint_conv = ModelCheckpoint(filepath, monitor='val_accuracy',
verbose=1, save_best_only=True)
callbacks_list_conv = [checkpoint_conv,early]
training_set.n
history = model_conv.fit(x_train, y_train, batch_size=128, epochs=100, verbose=1,
callbacks=callbacks_list_conv,validation_data=(x_val, y_val))
```
### Draw plots (5 Marks)
- Plot training accuracy and validation accuracy with respect to epochs
- Plot training loss and validation loss with respect to epochs
```
import keras
from matplotlib import pyplot as plt
#history = model1.fit(train_x, train_y,validation_split = 0.1, epochs=50, batch_size=4)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
```
## Future work (ungraded)
- Try to apply transfer learning and see if you can improve the performance.
- Using transfer learning with VGG-16 to see if performance can be improved
- Using imagenet weights and including input shape compatible with current problem
```
from keras.applications.vgg16 import VGG16
from keras.models import Model
vggmodel = VGG16(weights='imagenet', include_top=False, input_shape=(32,32,3))
vggmodel.summary()
```
#### The first 19 layers are not trainable, we are using the weights as such
```
for layers in (vggmodel.layers)[:19]:
#print(layers)
layers.trainable = False
```
#### We are specifying are own outut layer as well with the number of classes and softmax activation function
```
vggmodel.summary(line_length=150)
flatten = Flatten()
new_layer2 = Dense(n_classes, activation='softmax', name='my_dense_2')
inp2 = vggmodel.input
out2 = new_layer2(flatten(vggmodel.output))
model_final = Model(inp2, out2)
model_final.summary(line_length=150)
```
#### Compiling the model and specifying optimizer and metrics as before
```
model_final.compile(loss = "categorical_crossentropy",
optimizer = optimizers.SGD(lr=0.0001, momentum=0.9), metrics=["accuracy"])
model_final.summary()
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_accuracy',
verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=40, verbose=1, mode='auto')
history2 = model_final.fit(x_train, y_train, batch_size=128, epochs=100, verbose=1,
callbacks=[checkpoint,early],validation_data=(x_val, y_val))
model_final.save_weights("vgg16_1.h5")
import keras
from matplotlib import pyplot as plt
#history = model1.fit(train_x, train_y,validation_split = 0.1, epochs=50, batch_size=4)
plt.plot(history2.history['accuracy'])
plt.plot(history2.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history2.history['loss'])
plt.plot(history2.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
```
### Transfer learning using VGG-16 is not very helpful as we were able to get validation accuracy of 31.0% while in our trained model using own network we were able to achieve validation accuracy of 92.9%
```
%%shell
jupyter nbconvert --to html /content/Questions_Project_1_Computer_Vision_JPMC_v3.ipynb
```
| github_jupyter |
#準備
## Pythonのバージョンを確認しましょう
```
!python --version
```
## インストールされているパッケージを確認しましょう
```
!pip list
```
# Python 基礎
## Hello Worldと表示してみよう
```
print('Hello World')
```
## 日本語の出力
```
print('日本語')
```
## コメントの書き方
```
# ここはコメントです、プログラムの実行に影響がありません
print('コメントの書き方は #で始まります。')
```
## 演算
```
# 足し算 +
print(4+5)
# 引き算 -
print(1-2)
# 掛け算 *
print(3*3)
# 除算 /
print(18/6)
# 除算のあまりの数
print(11%5)
# 2の三乗
print(2**3)
```
# 変数
## 変数とは
```
# 一回目 5を入れる
a_number_in_the_box= 5
# その「変数」
print(a_number_in_the_box)
# 二回目 10を入れる
a_number_in_the_box= 10
# 上の4行目と全く同じですが、出力が違います。
print(a_number_in_the_box)
```
## 文字列の変数
```
japanese_string='こんにちは'
print(japanese_string)
japanese_string='こんばんは'
print(japanese_string)
```
## 文字列の連結
```
my_name='川島'
print('こんにちは'+my_name+'さん')
```
# Pythonの型
## 型の出力
```
amount=250
type(amount)
# int型
test_integer=256
print(type(test_integer))
# str型
test_str='文字列'
print(type(test_str))
# float型
test_float=3.1415926
print(type(test_float))
# tuple型
test_tuple=(1,2,3,4,5)
print(type(test_tuple))
# list型 他の言語にもある「配列」です
test_list=[1,2,3,4,5]
print(type(test_list))
```
## 型の変換
```
num_of_epoch=10
print('反復学習の回数:'+str(num_of_epoch)+'回です。')
```
## listの作り方
```
languages=['English','French','Japanese']
print(languages)
# 黒のRBG値
black_color_rgb=[0,0,0]
print(black_color_rgb)
# 白のRBG値
white_color_rgb=[255,255,255]
print(white_color_rgb)
# 緑のRGB値
green_color_rgb=[0,255,0]
print(green_color_rgb)
```
## 二次元配列
```
three_item_list=[1,0,1]
three_item_matrix=[three_item_list,three_item_list,three_item_list]
print(three_item_matrix)
```
## 三次元配列
```
three_item_list=[1,0,1]
three_item_matrix=[three_item_list,three_item_list,three_item_list]
three_three_matrix=[three_item_matrix,three_item_matrix,three_item_matrix]
print(three_three_matrix)
```
## 文字の多次元配列
```
string_list=['二','三','四']
three_item_matrix=[string_list,string_list,string_list]
print(three_item_matrix)
```
## リストから値の出し方
```
string_list=['〇番目の文字列','一番目の文字列','二番目の文字列']
print(string_list[0])
print(string_list[1])
print(string_list[2])
```
## リストのスライス
```
train_data=[1,2,3,4,5,6,7,8,9,10]
print(train_data[0:6])
print(train_data[0:-6])
print(train_data[:3])
print(train_data[8:])
print(train_data[0:100])
```
## リスト要素の更新
```
train_data=[1,2,3,4,5,6,7,8,9,10]
train_data[0]=999
print(train_data)
```
## リスト要素の追加
```
train_data=[1,2,3,4,5,6,7,8,9,10]
train_data=train_data+[11]
print(train_data)
train_data+=[12]
print(train_data)
train_data.append(13)
print(train_data)
```
## リスト要素の削除
```
train_data=[1,2,3,4,5,6,7,8,9,10]
del train_data[0]
print(train_data)
```
## リストの代入
```
train_data=[1,2,3,4,5,6,7,8,9,10]
train_data_new=train_data
train_data_copy=train_data[:]
# train_data_newは影響が受けます
del train_data[0]
print(train_data_new)
print(train_data_copy)
```
# 条件分岐
## if文と条件式
```
# flowerは花を入れる変数です。
flower='rose'
print(flower=='rose')
if flower=='rose':
print('花は薔薇ですね')
```
# else
```
# flowerは花を入れる変数です。
flower='tulip'
print(flower=='rose')
if flower=='rose':
print('花は薔薇ですね')
else:
print('花は薔薇ではないですね')
```
# elif
```
# flowerは花を入れる変数です。
flower='tulip'
print(flower=='rose')
if flower=='rose':
print('花は薔薇ですね')
elif flower=='tulip':
print('花はチューリップですね')
else:
print('花は薔薇でもチューリップでもないですね。')
```
# 条件式の and, not, or
```
A=True
B=True
print(A and B)
if A and B:
print('AとBが同時にTrueの場合')
A=True
B=False
print(A or B)
if A or B:
print('AかBがどっちかTrueの場合')
A=False
B=True
print(A or B)
if A or B:
print('AかBがどっちかTrueの場合')
```
# for文
```
languages={'English':'英語','French':'フランス語','Japanese':'日本語'}
for one_language in languages:
print(one_language)
train_data=[1,2,3,4,5,6,7,8,9,10]
for one_data in train_data:
print(one_data)
train_data=[1,2,3,4,5,6,7,8,9,10]
for index,one_data in enumerate(train_data):
print('index:'+str(index))
print(one_data)
```
## range()による数値シーケンスの生成
```
for number in range(0,6):
print(number)
for number in range(0,110,10):
print(number)
train_data=[1,2,3,4,5,6,7,8,9,10]
for num in range(0,len(train_data)):
print(train_data[num])
number_of_epoch=10
for epoch in range(number_of_epoch):
print('学習しました:'+str(epoch)+'回')
```
# while文
```
train_data=[1,2,3,4,5,6,7,8,9,10]
counter=0
while counter<7:
print(train_data[counter])
counter=counter+1
languages={'English':'英語','French':'フランス語','Japanese':'日本語'}
for key, value in languages.items():
print(key)
print(value)
print('--------')
```
# 関数
```
languages={'English':'英語','French':'フランス語','Japanese':'日本語'}
# 関数の定義
def printLanguageTranslation(language_list):
for key, value in language_list.items():
print(key)
print(value)
print('--------')
# 関数を使います
printLanguageTranslation(languages)
```
# インポート
```
import numpy as np
print(np.__version__)
```
| github_jupyter |
# Search jobs abroad
## Scrape jobs abroad from peoplenjob.com
```
from selenium import webdriver
from time import sleep
ch_driver = webdriver.Chrome('C:/Users/beave/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Python 3.7/chromedriver.exe')
ch_driver.implicitly_wait(5)
url = 'https://www.peoplenjob.com/'
ch_driver.get(url)
job_page = ch_driver.find_element_by_link_text("채용공고")
job_page.click()
sleep(1)
job_abroad = ch_driver.find_element_by_link_text("해외근무")
job_abroad.click()
sleep(1)
```
### Practice
```
job_list = ch_driver.find_elements_by_tag_name("tr")
job_list[0].text
#we need to remove it
job_list[1].get_attribute("class")
#get date
job_list[6].find_element_by_class_name("date").text
#get job position and link
position_link = job_list[6].find_element_by_class_name("job-title")
position = position_link.text
link = position_link.find_element_by_tag_name("a").get_attribute("href")
print(position, link)
#get job type
job_list[6].find_element_by_class_name("job_type").text
#get company name
job_list[6].find_element_by_class_name("name").text
#get the working location
# //*[@id="content-main"]/div/table/tbody/tr[6]/td[5]/a
tds = job_list[6].find_elements_by_tag_name("td")
tds[4].find_element_by_tag_name("a").text
#get the due date
tds[5].text
for item in job_list[1:40]:
if item.get_attribute("class") == "info":
continue
#get the date uploaded
date = item.find_element_by_class_name("date").text
#get the job position and link
position_link = item.find_element_by_class_name("job-title")
position = position_link.text
link = position_link.find_element_by_tag_name("a").get_attribute("href")
#get job type
job_type = item.find_element_by_class_name("job_type").text
#get company name
name = item.find_element_by_class_name("name").text
#get the working location
tds = item.find_elements_by_tag_name("td")
location = tds[4].find_element_by_tag_name("a").text
#skip item if its working location is in Korea
korea = ["Se", "서울", "경기", "경북"]
loc = location[:2]
if loc in korea:
continue
#get the due date
due = tds[5].text
#print(date, position, link, job_type, name, location, due) #for check
if "무역" not in job_type:
continue
print(date, position, link, job_type, name, location, due)
```
### Start a Project
```
def get_job_ads(user_input: str):
result = []
job_list = ch_driver.find_elements_by_tag_name("tr")
del job_list[0]
for item in job_list:
if item.get_attribute("class") == "info":
continue
#get the date uploaded
date = item.find_element_by_class_name("date").text
#get the job position and link
position_link = item.find_element_by_class_name("job-title")
position = position_link.text
link = position_link.find_element_by_tag_name("a").get_attribute("href")
#get job type
job_type = item.find_element_by_class_name("job_type").text
#get company name
name = item.find_element_by_class_name("name").text
#get the working location
tds = item.find_elements_by_tag_name("td")
location = tds[4].find_element_by_tag_name("a").text
#get the due date
due = tds[5].text
#apply search criteria
if user_input not in job_type and user_input not in location:
continue
result.append([date, position, link, job_type, name, location, due])
return result
get_job_ads("무역")
def get_all_job_ads():
all_result = []
user_input = input("검색어를 입력하세요: ")
#from the first page
all_result.append(get_job_ads(user_input))
#from page 2 to page 3
page_path = '//*[@id="content-main"]/div/div[3]/ul/li['
for i in range(3,5):
page = ch_driver.find_element_by_xpath(page_path + str(i) + ']/a')
page.click()
sleep(1)
all_result.append(get_job_ads(user_input))
return all_result
get_all_job_ads()
```
| github_jupyter |
#### Copyright 2017 Google LLC.
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Sparsity and L1 Regularization
**Learning Objectives:**
* Calculate the size of a model
* Apply L1 regularization to reduce the size of a model by increasing sparsity
One way to reduce complexity is to use a regularization function that encourages weights to be exactly zero. For linear models such as regression, a zero weight is equivalent to not using the corresponding feature at all. In addition to avoiding overfitting, the resulting model will be more efficient.
L1 regularization is a good way to increase sparsity.
## Setup
Run the cells below to load the data and create feature definitions.
```
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
def preprocess_features(california_housing_dataframe):
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = pd.DataFrame()
# Create a boolean categorical feature representing whether the
# median_house_value is above a set threshold.
output_targets["median_house_value_is_high"] = (
california_housing_dataframe["median_house_value"] > 265000).astype(float)
return output_targets
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def get_quantile_based_buckets(feature_values, num_buckets):
quantiles = feature_values.quantile(
[(i+1.)/(num_buckets + 1.) for i in range(num_buckets)])
return [quantiles[q] for q in quantiles.keys()]
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
bucketized_households = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("households"),
boundaries=get_quantile_based_buckets(training_examples["households"], 10))
bucketized_longitude = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("longitude"),
boundaries=get_quantile_based_buckets(training_examples["longitude"], 50))
bucketized_latitude = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("latitude"),
boundaries=get_quantile_based_buckets(training_examples["latitude"], 50))
bucketized_housing_median_age = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("housing_median_age"),
boundaries=get_quantile_based_buckets(
training_examples["housing_median_age"], 10))
bucketized_total_rooms = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("total_rooms"),
boundaries=get_quantile_based_buckets(training_examples["total_rooms"], 10))
bucketized_total_bedrooms = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("total_bedrooms"),
boundaries=get_quantile_based_buckets(training_examples["total_bedrooms"], 10))
bucketized_population = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("population"),
boundaries=get_quantile_based_buckets(training_examples["population"], 10))
bucketized_median_income = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("median_income"),
boundaries=get_quantile_based_buckets(training_examples["median_income"], 10))
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("rooms_per_person"),
boundaries=get_quantile_based_buckets(
training_examples["rooms_per_person"], 10))
long_x_lat = tf.feature_column.crossed_column(
set([bucketized_longitude, bucketized_latitude]), hash_bucket_size=1000)
feature_columns = set([
long_x_lat,
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_total_rooms,
bucketized_total_bedrooms,
bucketized_population,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person])
return feature_columns
```
## Calculate the Model Size
To calculate the model size, we simply count the number of parameters that are non-zero. We provide a helper function below to do that. The function uses intimate knowledge of the Estimators API - don't worry about understanding how it works.
```
def model_size(estimator):
variables = estimator.get_variable_names()
size = 0
for variable in variables:
if not any(x in variable
for x in ['global_step',
'centered_bias_weight',
'bias_weight',
'Ftrl']
):
size += np.count_nonzero(estimator.get_variable_value(variable))
return size
```
## Reduce the Model Size
Your team needs to build a highly accurate Logistic Regression model on the *SmartRing*, a ring that is so smart it can sense the demographics of a city block ('median_income', 'avg_rooms', 'households', ..., etc.) and tell you whether the given city block is high cost city block or not.
Since the SmartRing is small, the engineering team has determined that it can only handle a model that has **no more than 600 parameters**. On the other hand, the product management team has determined that the model is not launchable unless the **LogLoss is less than 0.35** on the holdout test set.
Can you use your secret weapon—L1 regularization—to tune the model to satisfy both the size and accuracy constraints?
### Task 1: Find a good regularization coefficient.
**Find an L1 regularization strength parameter which satisfies both constraints — model size is less than 600 and log-loss is less than 0.35 on validation set.**
The following code will help you get started. There are many ways to apply regularization to your model. Here, we chose to do it using `FtrlOptimizer`, which is designed to give better results with L1 regularization than standard gradient descent.
Again, the model will train on the entire data set, so expect it to run slower than normal.
```
def train_linear_classifier_model(
learning_rate,
regularization_strength,
steps,
batch_size,
feature_columns,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
regularization_strength: A `float` that indicates the strength of the L1
regularization. A value of `0.0` means no regularization.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
feature_columns: A `set` specifying the input feature columns to use.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearClassifier` object trained on the training data.
"""
periods = 7
steps_per_period = steps / periods
# Create a linear classifier object.
my_optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate, l1_regularization_strength=regularization_strength)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
optimizer=my_optimizer
)
# Create input functions.
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss (on validation data):")
training_log_losses = []
validation_log_losses = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_probabilities = linear_classifier.predict(input_fn=predict_training_input_fn)
training_probabilities = np.array([item['probabilities'] for item in training_probabilities])
validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn)
validation_probabilities = np.array([item['probabilities'] for item in validation_probabilities])
# Compute training and validation loss.
training_log_loss = metrics.log_loss(training_targets, training_probabilities)
validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_log_losses.append(training_log_loss)
validation_log_losses.append(validation_log_loss)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.tight_layout()
plt.plot(training_log_losses, label="training")
plt.plot(validation_log_losses, label="validation")
plt.legend()
return linear_classifier
A regularization strength of 0.1 should be sufficient. Note that there is a compromise to be struck:
stronger regularization gives us smaller models, but can affect the classification loss.
linear_classifier = train_linear_classifier_model(
learning_rate=0.1,
# TWEAK THE REGULARIZATION VALUE BELOW
regularization_strength=0.1,
steps=300,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
print("Model size:", model_size(linear_classifier))
```
| github_jupyter |
```
# Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
from random import gauss
import math
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
import warnings
warnings.filterwarnings('ignore')
# CONSTANT Variables
NUM_SOURCE = 6
X1 = 21
X2 = 21
V = 441
N = 240
```
### Question 1.1
```
def over_N(tc, N):
""" Check whether numpy array tc is over length of the temporal source
tc : numpy array of temporal source, TC
N : integer of the length of each temporal source
Return : True or False
"""
if len(tc) >= N:
return True
else:
return False
def standardise(tc):
""" Standardise TC
tc : numpy array
Return : numpy array of standardised TC
"""
tc = tc - np.mean(tc)
tc = tc / np.std(tc)
return tc
def construct(AV, IV, duration):
""" Construct matrix TC of size 240 x 6 consisting of six temporal sources using three vectors
AV : onset arrival vector
IV : increment vector
duration : duration of ones
Return : numpy array of matrix TC.
"""
# Initialise value
iv_count = IV
tc = np.array([])
# onset arrival vector. Fills zeroes to tc
tc = np.zeros(AV)
while len(tc) < N:
# build up duration of ones
for i in range(duration):
if over_N(tc, N) == True:
break
# Add ones into TC.
tc = np.append(tc, 1)
# incremeting the vector
while (len(tc) < iv_count) & (len(tc) < N):
tc = np.append(tc, 0)
iv_count += IV
# build up onsets arrival vector
for i in range(AV):
if over_N(tc, N) == True:
break
tc = np.append(tc, 0)
# Standardise TC
tc = standardise(tc)
return tc
# Construct matrix TC
tc1 = construct(0, 30, 15)
tc2 = construct(20, 45, 20)
tc3 = construct(0, 60, 25)
tc4 = construct(0, 40, 15)
tc5 = construct(0, 40, 20)
tc6 = construct(0, 40, 25)
TC = [tc1, tc2, tc3, tc4, tc5, tc6]
# Plot each source TCs
count = 0
for tc in TC:
count += 1
plt.plot(tc)
plt.title("TC " + str(count))
plt.xlabel("N")
plt.xticks([0, 20, 40, 60, 120, 240])
plt.savefig('plots/TC_'+str(count)) #save plots
plt.show()
```
### Question 1.2¶
```
tc_df = pd.DataFrame(TC)
tc_df = tc_df.T
# Build up a correlation matrix between 6 variables
ax = sns.heatmap(tc_df.corr())
plt.title("Correlation Matrix between 6 variables");
plt.savefig('plots/CM_TC')
```
### Question 1.3
```
def slice_one(hori_start, hori_finish, verti_start, verti_finish):
""" Construct an array tmpSM of size (21 x 21) consisting of ones and zeros, by
placing ones at these pixels along "vertical, horizontl" directoon of the slice
hori_start : integer of the starting point of placing one in horizontal direction
hori_finish : integer of the finishing point of placing one in horizontal direction
verti_start : integer of the starting point of placing one in vertical direction
verti_finish : integer of the finishing point of placing one in vertical direction
Return : an array tmpSM of size 21x21
"""
tmp_sm = np.zeros(V).reshape((X1,X2))
for row in range(hori_start-1, hori_finish):
for col in range(verti_start-1, verti_finish):
# Place one
tmp_sm[row][col] = 1.0
return tmp_sm
# Construct array tmpSM of 6 different sources
tmp1 = slice_one(2, 6, 2, 6)
tmp2 = slice_one(2, 6, 15, 19)
tmp3 = slice_one(8, 13, 2, 6)
tmp4 = slice_one(8, 13, 15, 19)
tmp5 = slice_one(15, 19, 2, 6)
tmp6 = slice_one(15, 19, 15, 19)
# Construct an array tmpSM of size 6 x (21 x 21)
tmpSM = np.array([tmp1, tmp2, tmp3, tmp4, tmp5, tmp6])
count = 0
for tmp in tmpSM:
tmp_df = pd.DataFrame(tmp)
count += 1
ax = sns.heatmap(tmp_df)
plt.title("SM " + str(count))
plt.savefig('plots/SM_'+str(count))
plt.show()
# Reshape SM to size 6 X 441
SM = tmpSM.reshape((NUM_SOURCE, V))
sm_df = pd.DataFrame(SM)
sm_df = sm_df.T
# Build up a correlation matrix between 6 vectored SMs
sns.heatmap(sm_df.corr())
plt.title("Correlation Matrix between 6 vectored SMs")
plt.savefig('plots/CM_SM');
```
### Question 1.4
```
def contruct_gaussian_noise(mean, variance, length):
""" Construct white Gaussian noise
mean : mean of the gaussian noise, integer
variance : variance of the gaussian noise, integer
length : length of the gaussian noise, integer
Return : a numpy array of gaussian noise
"""
noise = np.array([gauss(mean, math.sqrt(variance)) for i in range(length * NUM_SOURCE)])
return noise
temp_noise = contruct_gaussian_noise(0.0, 0.25, N)
temp_noise = temp_noise.reshape((N,NUM_SOURCE))
spatial_noise = contruct_gaussian_noise(0.0, 0.015, V)
spatial_noise = spatial_noise.reshape((NUM_SOURCE,V))
# Correlation matrix between spatial noise
snoise_df = pd.DataFrame(spatial_noise)
snoise_df = snoise_df.T
sns.heatmap(snoise_df.corr())
plt.title("Correlation matrix between spatial noise");
plt.savefig('plots/CM_SpatialNoise');
# Correlation matrix between temporal noise
tnoise_df = pd.DataFrame(temp_noise)
sns.heatmap(tnoise_df.corr())
plt.title("Correlation matrix between temporal noise");
plt.savefig('plots/CM_TemporalNoise');
# Histogram of spatial noise
sns.histplot(data=snoise_df)
plt.title("Histogram of spatial noise");
plt.savefig('plots/Histogram_SpatialNoise');
# Histogram of temporal noise
sns.histplot(data=tnoise_df)
plt.title("Histogram of temporal noise");
plt.savefig('plots/Histogram_TemporalNoise');
# Build up product TtTs
TtTs = np.dot(temp_noise, spatial_noise)
ttts_df = pd.DataFrame(TtTs)
# Correlation of product TtTs of a subset of TtTs
mini_ttts = ttts_df[[0, 1, 2, 3, 4, 5, 6, 7, 8]]
sns.heatmap(mini_ttts.corr())
plt.title("Correlation of product TtTs");
plt.savefig('plots/CM_TtTs');
```
### Question 1.5
```
TC = np.transpose(TC)
# Build up standardised X
X = np.dot((TC + temp_noise), (SM + spatial_noise))
X_df = pd.DataFrame(X)
# Randomly select 100 time-series from X
randomly_selected = random.sample(list(range(0,V)), 100)
sample = X_df[randomly_selected]
# Plot 100 randomly selected time series from X
sns.lineplot(data = sample)
plt.title("Line plot of 100 randomly selected time series from X");
plt.xlabel("N")
plt.savefig('plots/Lineplot_randomX');
# Get variance of X acriss 441 variables
var = np.var(X_df)
# Plot variance of 441 variables
sns.scatterplot(data = var)
plt.title("Variance of 441 variables");
plt.savefig('plots/Variance_X');
# Standardise X
X = standardise(X)
```
### Question 2.1
```
def solve_lsr(TC, X):
"""
Solve a Least Square Regression (LSR) model given :
TC : a numpy matrix of 240 x 6
X : a numpy matrix of 240 x 441
Returns: 4 numpy arrays which are processed in the LSR model
"""
DTD = np.dot(np.transpose(TC), TC)
DTD_inv = np.linalg.inv(DTD)
DTX = np.dot(np.transpose(TC), X)
A_lsr = np.dot(DTD_inv, DTX)
D_lsr = np.dot(X, np.transpose(A_lsr))
return DTD, DTD_inv, DTX, A_lsr, D_lsr
# Solve LSR
DTD, DTD_inv, DTX, A_lsr, D_lsr = solve_lsr(TC, X)
# Reshape Retrieval of SM, A to size 21 x 21
Alsr = []
for row in A_lsr:
Alsr.append(row.reshape((X1, X2)))
# Plot the retrieval SM and TC which are A and D of LSR
dlsr_df = pd.DataFrame(D_lsr)
for col in range(0, NUM_SOURCE):
fig, axes = plt.subplots(1, 2, figsize=(10,3))
sns.heatmap(data = Alsr[col], ax = axes[0])
sns.lineplot(data=dlsr_df[col], ax = axes[1])
plt.title("Source " + str(col+1))
plt.tight_layout()
plt.savefig("plots/LSR_source"+str(col+1))
plt.show()
# Plot scatter plots required
sns.scatterplot(dlsr_df[2], X_df[9*X1 + 2])
plt.xlabel("3rd column of Dlsr")
plt.ylabel("30th column of standardized X")
plt.title("Scatter plot of 3rd column of Dlsr vs 30th column of standarized X")
plt.savefig("plots/scatterplot_3rdDlsr_vs_X")
plt.show()
sns.scatterplot(dlsr_df[3], X_df[9*X1 + 2])
plt.xlabel("4th column of Dlsr")
plt.ylabel("30th column of standardized X")
plt.title("Scatter plot of 4th column of Dlsr vs 30th column of standarized X")
plt.savefig("plots/scatterplot_4thDlsr_vs_X")
plt.show()
```
### Question 2.2
```
def solve_RR(lambda_value, DTD, DTX):
"""
Solve Ridge Regression (RR) Model given :
lambda_value : the regularization term in RR, integer
DTD : Product of Transpose of D and D, numpy array
DTX : Product of Transpose of D and standardised X,numpy array
Return :
A_rr : Retrieval of SM, numpy array
D_rr : Retrieval of TC, numpy array
"""
lamda_hat = lambda_value * V
I = np.identity(6)
Z = DTD + np.dot(lamda_hat, I)
Z_inv = np.linalg.inv(Z)
A_rr = np.dot(Z_inv, DTX)
D_rr = np.dot(X, np.transpose(A_rr))
return A_rr, D_rr
# Solve RR with lambda value = 0.5
A_rr, D_rr = solve_RR(0.5, DTD, DTX)
# Construct a Perason correlation of TC and D of LSR and RR
from scipy.stats import pearsonr
ctlsr = []
ctrr = []
for i in range(NUM_SOURCE):
corr, _ = pearsonr(TC[i], D_lsr[i])
ctlsr.append(corr)
corr2, _ = pearsonr(TC[i], D_rr[i])
ctrr.append(corr2)
print("Sum of CtRR greater than Sum of CtLSR: ", sum(ctrr) > sum(ctlsr))
print("Sum of CtRR: " + str(sum(ctrr)))
print("Sum of CtLSR: " + str(sum(ctlsr)))
# Solve RR with lambda value = 1000
Arr_alt, Drr_alt = solve_RR(1000, DTD, DTX)
Arr_alt_df = pd.DataFrame(Arr_alt)
Arr_alt_df = Arr_alt_df.T
alsr_df = pd.DataFrame(A_lsr)
alsr_df = alsr_df.T
# Plot First vector of Alsr vs First vector of Arr
sns.scatterplot(Arr_alt_df[0], alsr_df[0])
plt.xlabel("First vector of Arr")
plt.ylabel("First vector of Alsr")
plt.title("First vector of Alsr vs First vector of Arr")
plt.savefig("plots/arr_vs_alsr")
Arr_df = pd.DataFrame(np.transpose(A_rr))
# Plot Arr when lambda is 0.5 vs 1000
sns.lineplot(data=Arr_df[0], label='Arr when lamda=0.5')
sns.lineplot(data=Arr_alt_df[0], label='Arr when lamda=1000')
plt.title("Arr when lambda is 0.5 vs 1000")
plt.savefig("plots/arr_lambda")
Alsr_df = pd.DataFrame(A_lsr)
Drr_df = pd.DataFrame(D_rr)
tc_df = pd.DataFrame(TC)
X_df.to_csv("datafile/X.csv")
sm_df.to_csv("datafile/SM.csv")
tc_df.to_csv("datafile/TC.csv")
Arr_df.to_csv("datafile/Arr.csv")
Drr_df.to_csv("datafile/Drr.csv")
def contruct_X(i):
"""
Construct X and output the data into a csv file with corresponding i
in the filename
"""
temp_noise = contruct_gaussian_noise(0.0, 0.25, N)
temp_noise = temp_noise.reshape((N,NUM_SOURCE))
spatial_noise = contruct_gaussian_noise(0.0, 0.015, V)
spatial_noise = spatial_noise.reshape((NUM_SOURCE,V))
X = np.dot((TC + temp_noise), (SM + spatial_noise))
X_df = pd.DataFrame(X)
X_df.to_csv("datafile/X" + str(i) + ".csv")
return
for i in range(10):
contruct_X(i+1)
```
### Next : Q2.3-R.ipynb¶
| github_jupyter |
# Out-of-core Learning - Large Scale Text Classification for Sentiment Analysis
## Scalability Issues
The `sklearn.feature_extraction.text.CountVectorizer` and `sklearn.feature_extraction.text.TfidfVectorizer` classes suffer from a number of scalability issues that all stem from the internal usage of the `vocabulary_` attribute (a Python dictionary) used to map the unicode string feature names to the integer feature indices.
The main scalability issues are:
- **Memory usage of the text vectorizer**: all the string representations of the features are loaded in memory
- **Parallelization problems for text feature extraction**: the `vocabulary_` would be a shared state: complex synchronization and overhead
- **Impossibility to do online or out-of-core / streaming learning**: the `vocabulary_` needs to be learned from the data: its size cannot be known before making one pass over the full dataset
To better understand the issue let's have a look at how the `vocabulary_` attribute work. At `fit` time the tokens of the corpus are uniquely indentified by a integer index and this mapping stored in the vocabulary:
```
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=1)
vectorizer.fit([
"The cat sat on the mat.",
])
vectorizer.vocabulary_
```
The vocabulary is used at `transform` time to build the occurrence matrix:
```
X = vectorizer.transform([
"The cat sat on the mat.",
"This cat is a nice cat.",
]).toarray()
print(len(vectorizer.vocabulary_))
print(vectorizer.get_feature_names())
print(X)
```
Let's refit with a slightly larger corpus:
```
vectorizer = CountVectorizer(min_df=1)
vectorizer.fit([
"The cat sat on the mat.",
"The quick brown fox jumps over the lazy dog.",
])
vectorizer.vocabulary_
```
The `vocabulary_` is the (logarithmically) growing with the size of the training corpus. Note that we could not have built the vocabularies in parallel on the 2 text documents as they share some words hence would require some kind of shared datastructure or synchronization barrier which is complicated to setup, especially if we want to distribute the processing on a cluster.
With this new vocabulary, the dimensionality of the output space is now larger:
```
X = vectorizer.transform([
"The cat sat on the mat.",
"This cat is a nice cat.",
]).toarray()
print(len(vectorizer.vocabulary_))
print(vectorizer.get_feature_names())
print(X)
```
## The IMDb movie dataset
To illustrate the scalability issues of the vocabulary-based vectorizers, let's load a more realistic dataset for a classical text classification task: sentiment analysis on text documents. The goal is to tell apart negative from positive movie reviews from the [Internet Movie Database](http://www.imdb.com) (IMDb).
In the following sections, with a [large subset](http://ai.stanford.edu/~amaas/data/sentiment/) of movie reviews from the IMDb that has been collected by Maas et al.
- A. L. Maas, R. E. Daly, P. T. Pham, D. Huang, A. Y. Ng, and C. Potts. Learning Word Vectors for Sentiment Analysis. In the proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 142–150, Portland, Oregon, USA, June 2011. Association for Computational Linguistics.
This dataset contains 50,000 movie reviews, which were split into 25,000 training samples and 25,000 test samples. The reviews are labeled as either negative (neg) or positive (pos). Moreover, *positive* means that a movie received >6 stars on IMDb; negative means that a movie received <5 stars, respectively.
Assuming that the `../fetch_data.py` script was run successfully the following files should be available:
```
import os
train_path = os.path.join('datasets', 'IMDb', 'aclImdb', 'train')
test_path = os.path.join('datasets', 'IMDb', 'aclImdb', 'test')
```
Now, let's load them into our active session via scikit-learn's `load_files` function
```
from sklearn.datasets import load_files
train = load_files(container_path=(train_path),
categories=['pos', 'neg'])
test = load_files(container_path=(test_path),
categories=['pos', 'neg'])
```
<div class="alert alert-warning">
<b>NOTE</b>:
<ul>
<li>
Since the movie datasets consists of 50,000 individual text files, executing the code snippet above may take ~20 sec or longer.
</li>
</ul>
</div>
The `load_files` function loaded the datasets into `sklearn.datasets.base.Bunch` objects, which are Python dictionaries:
```
train.keys()
```
In particular, we are only interested in the `data` and `target` arrays.
```
import numpy as np
for label, data in zip(('TRAINING', 'TEST'), (train, test)):
print('\n\n%s' % label)
print('Number of documents:', len(data['data']))
print('\n1st document:\n', data['data'][0])
print('\n1st label:', data['target'][0])
print('\nClass names:', data['target_names'])
print('Class count:',
np.unique(data['target']), ' -> ',
np.bincount(data['target']))
```
As we can see above the `'target'` array consists of integers `0` and `1`, where `0` stands for negative and `1` stands for positive.
## The Hashing Trick
Remember the bag of word representation using a vocabulary based vectorizer:
<img src="figures/bag_of_words.svg" width="100%">
To workaround the limitations of the vocabulary-based vectorizers, one can use the hashing trick. Instead of building and storing an explicit mapping from the feature names to the feature indices in a Python dict, we can just use a hash function and a modulus operation:
<img src="figures/hashing_vectorizer.svg" width="100%">
More info and reference for the original papers on the Hashing Trick in the [following site](http://www.hunch.net/~jl/projects/hash_reps/index.html) as well as a description specific to language [here](http://blog.someben.com/2013/01/hashing-lang/).
```
from sklearn.utils.murmurhash import murmurhash3_bytes_u32
# encode for python 3 compatibility
for word in "the cat sat on the mat".encode("utf-8").split():
print("{0} => {1}".format(
word, murmurhash3_bytes_u32(word, 0) % 2 ** 20))
```
This mapping is completely stateless and the dimensionality of the output space is explicitly fixed in advance (here we use a modulo `2 ** 20` which means roughly 1M dimensions). The makes it possible to workaround the limitations of the vocabulary based vectorizer both for parallelizability and online / out-of-core learning.
The `HashingVectorizer` class is an alternative to the `CountVectorizer` (or `TfidfVectorizer` class with `use_idf=False`) that internally uses the murmurhash hash function:
```
from sklearn.feature_extraction.text import HashingVectorizer
h_vectorizer = HashingVectorizer(encoding='latin-1')
h_vectorizer
```
It shares the same "preprocessor", "tokenizer" and "analyzer" infrastructure:
```
analyzer = h_vectorizer.build_analyzer()
analyzer('This is a test sentence.')
```
We can vectorize our datasets into a scipy sparse matrix exactly as we would have done with the `CountVectorizer` or `TfidfVectorizer`, except that we can directly call the `transform` method: there is no need to `fit` as `HashingVectorizer` is a stateless transformer:
```
docs_train, y_train = train['data'], train['target']
docs_valid, y_valid = test['data'][:12500], test['target'][:12500]
docs_test, y_test = test['data'][12500:], test['target'][12500:]
```
The dimension of the output is fixed ahead of time to `n_features=2 ** 20` by default (nearly 1M features) to minimize the rate of collision on most classification problem while having reasonably sized linear models (1M weights in the `coef_` attribute):
```
h_vectorizer.transform(docs_train)
```
Now, let's compare the computational efficiency of the `HashingVectorizer` to the `CountVectorizer`:
```
h_vec = HashingVectorizer(encoding='latin-1')
%timeit -n 1 -r 3 h_vec.fit(docs_train, y_train)
count_vec = CountVectorizer(encoding='latin-1')
%timeit -n 1 -r 3 count_vec.fit(docs_train, y_train)
```
As we can see, the HashingVectorizer is much faster than the Countvectorizer in this case.
Finally, let us train a LogisticRegression classifier on the IMDb training subset:
```
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
h_pipeline = Pipeline([
('vec', HashingVectorizer(encoding='latin-1')),
('clf', LogisticRegression(random_state=1)),
])
h_pipeline.fit(docs_train, y_train)
print('Train accuracy', h_pipeline.score(docs_train, y_train))
print('Validation accuracy', h_pipeline.score(docs_valid, y_valid))
import gc
del count_vec
del h_pipeline
gc.collect()
```
# Out-of-Core learning
Out-of-Core learning is the task of training a machine learning model on a dataset that does not fit into memory or RAM. This requires the following conditions:
- a **feature extraction** layer with **fixed output dimensionality**
- knowing the list of all classes in advance (in this case we only have positive and negative reviews)
- a machine learning **algorithm that supports incremental learning** (the `partial_fit` method in scikit-learn).
In the following sections, we will set up a simple batch-training function to train an `SGDClassifier` iteratively.
But first, let us load the file names into a Python list:
```
train_path = os.path.join('datasets', 'IMDb', 'aclImdb', 'train')
train_pos = os.path.join(train_path, 'pos')
train_neg = os.path.join(train_path, 'neg')
fnames = [os.path.join(train_pos, f) for f in os.listdir(train_pos)] +\
[os.path.join(train_neg, f) for f in os.listdir(train_neg)]
fnames[:3]
```
Next, let us create the target label array:
```
y_train = np.zeros((len(fnames), ), dtype=int)
y_train[:12500] = 1
np.bincount(y_train)
```
Now, we implement the `batch_train function` as follows:
```
from sklearn.base import clone
def batch_train(clf, fnames, labels, iterations=25, batchsize=1000, random_seed=1):
vec = HashingVectorizer(encoding='latin-1')
idx = np.arange(labels.shape[0])
c_clf = clone(clf)
rng = np.random.RandomState(seed=random_seed)
for i in range(iterations):
rnd_idx = rng.choice(idx, size=batchsize)
documents = []
for i in rnd_idx:
with open(fnames[i], 'r', encoding='latin-1') as f:
documents.append(f.read())
X_batch = vec.transform(documents)
batch_labels = labels[rnd_idx]
c_clf.partial_fit(X=X_batch,
y=batch_labels,
classes=[0, 1])
return c_clf
```
Note that we are not using `LogisticRegression` as in the previous section, but we will use a `SGDClassifier` with a logistic cost function instead. SGD stands for `stochastic gradient descent`, an optimization alrogithm that optimizes the weight coefficients iteratively sample by sample, which allows us to feed the data to the classifier chunk by chuck.
And we train the `SGDClassifier`; using the default settings of the `batch_train` function, it will train the classifier on 25*1000=25000 documents. (Depending on your machine, this may take >2 min)
```
from sklearn.linear_model import SGDClassifier
sgd = SGDClassifier(loss='log', random_state=1, max_iter=1000)
sgd = batch_train(clf=sgd,
fnames=fnames,
labels=y_train)
```
Eventually, let us evaluate its performance:
```
vec = HashingVectorizer(encoding='latin-1')
sgd.score(vec.transform(docs_test), y_test)
```
### Limitations of the Hashing Vectorizer
Using the Hashing Vectorizer makes it possible to implement streaming and parallel text classification but can also introduce some issues:
- The collisions can introduce too much noise in the data and degrade prediction quality,
- The `HashingVectorizer` does not provide "Inverse Document Frequency" reweighting (lack of a `use_idf=True` option).
- There is no easy way to inverse the mapping and find the feature names from the feature index.
The collision issues can be controlled by increasing the `n_features` parameters.
The IDF weighting might be reintroduced by appending a `TfidfTransformer` instance on the output of the vectorizer. However computing the `idf_` statistic used for the feature reweighting will require to do at least one additional pass over the training set before being able to start training the classifier: this breaks the online learning scheme.
The lack of inverse mapping (the `get_feature_names()` method of `TfidfVectorizer`) is even harder to workaround. That would require extending the `HashingVectorizer` class to add a "trace" mode to record the mapping of the most important features to provide statistical debugging information.
In the mean time to debug feature extraction issues, it is recommended to use `TfidfVectorizer(use_idf=False)` on a small-ish subset of the dataset to simulate a `HashingVectorizer()` instance that have the `get_feature_names()` method and no collision issues.
<div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>
In our implementation of the batch_train function above, we randomly draw *k* training samples as a batch in each iteration, which can be considered as a random subsampling ***with*** replacement. Can you modify the `batch_train` function so that it iterates over the documents ***without*** replacement, i.e., that it uses each document ***exactly once*** per iteration?
</li>
</ul>
</div>
```
# %load solutions/23_batchtrain.py
```
| github_jupyter |
# Numpy Basics
NumPy provides an N-dimensional array type, the ndarray, which describes a collection of “items” of the *same* type.
The items can be indexed using for example N integers.
All ndarrays are homogeneous: every item takes up the same size block of memory, and all blocks are interpreted in exactly the same way.
An item extracted from an array, e.g., by indexing, is represented by a Python object whose type is one of the array scalar types built in NumPy.
<p align="center">
<img src="https://numpy.org/doc/stable/_images/threefundamental.png">
</p>
## NumPy Array Attributes
```
import numpy as np
np.random.seed(0)
def array_info(array: np.ndarray) -> None:
print(f"ndim: {array.ndim}")
print(f"shape: {array.shape}")
print(f"size: {array.size}")
print(f"dtype: {array.dtype}")
print(f"values:\n{array}\n")
```
## Array Indexing and Slicing
Array indexing refers to any use of the square brackets ([]) to index array values. There are many options to indexing, which give numpy indexing great power.
Most of the following examples show the use of indexing when referencing data in an array. The examples work just as well when assigning to an array.
Note that slices of arrays do not copy the internal array data but only produce new views of the original data.

```
x = np.array([[1, 2], [3, 4], [5, 6]])
array_info(x)
print(x[:3])
print(x[1:])
print(x[1:2])
print(x[::-1])
print(x[0, :])
print(x[0])
print(x[:, 0])
mean = [0, 0]
cov = [[1, 2],
[2, 5]]
x = np.random.multivariate_normal(mean=mean, cov=cov, size=10)
print(x)
print(x.shape)
rand_idxs = np.random.randint(low=0, high=x.shape[0], size=3)
print(rand_idxs)
x_subsample = x[rand_idxs, :]
print(x_subsample)
x_subsample = x[rand_idxs]
print(x_subsample)
```
## Subarrays are views
```
print(x)
x_sub_array = x[:2, :2]
array_info(x_sub_array)
x_sub_array[0, 0] = -1
array_info(x_sub_array)
array_info(x)
```
## Creating copies of arrays
```
x_copy = x[:2, :2].copy()
array_info(x_copy)
x_copy[0, 0] = 42
array_info(x_copy)
array_info(x)
```
## Reshaping of Arrays
```
a = np.arange(start=1, stop=10)
array_info(a)
grid = np.reshape(a, newshape=(3, 3))
array_info(grid)
x = np.array([1, 2, 3])
array_info(x)
x = np.reshape(x, newshape=(1, 3))
array_info(x)
array_info(x)
x = x[np.newaxis, :]
array_info(x)
array_info(x)
x = x.reshape((3, 1))
array_info(x)
array_info(x)
x = x.ravel()
array_info(x)
x = x.reshape((3, 1))
array_info(x)
x = x.flatten()
array_info(x)
```
### “Automatic” Reshaping
```
a = np.arange(30)
array_info(a)
b = a.reshape((2, -1, 3))
array_info(b)
```
## Changing the Dtype
| Numpy type | C type | Description |
|-|-|-|
| numpy.int8 | int8_t | Byte (-128 to 127) |
| numpy.int16 | int16_t | Integer (-32768 to 32767) |
| numpy.int32 | int32_t | Integer (-2147483648 to 2147483647) |
| numpy.int64 | int64_t | Integer (-9223372036854775808 to 9223372036854775807) |
| numpy.uint8 | uint8_t | Unsigned integer (0 to 255) |
| numpy.uint16 | uint16_t | Unsigned integer (0 to 65535) |
| numpy.uint32 | uint32_t | Unsigned integer (0 to 4294967295) |
| numpy.uint64 | uint64_t | Unsigned integer (0 to 18446744073709551615) |
| numpy.intp | intptr_t | Integer used for indexing, typically the same as ssize_t |
| numpy.uintp | uintptr_t | Integer large enough to hold a pointer |
| numpy.float32 | float | |
| numpy.float64 | double | Note that this matches the precision of the builtin python float. |
| numpy.complex64 | float complex | Complex number, represented by two 32-bit floats. |
| numpy.complex128 | double complex | Note that this matches the precision of the builtin python complex. |
```
x = np.float32([-1.0, 2.0, 3.0])
array_info(x)
x = np.array([-1.0, 2.0, 3.0], dtype=np.float32)
y = x.astype(np.int8)
array_info(y)
z = np.uint16(x)
array_info(z)
```
## Concatenation of arrays
```
x = np.array([1, 2, 3])
y = np.array([3, 2, 1])
result = np.concatenate([x, y])
array_info(result)
grid = np.array([[1, 2, 3],
[4, 5, 6]])
array_info(grid)
result = np.concatenate([grid, grid])
array_info(result)
result = np.concatenate([grid, grid], axis=0)
array_info(result)
result = np.concatenate([grid, grid], axis=1)
array_info(result)
x = np.array([1, 2, 3])
grid = np.array([[4, 5, 6],
[7, 8, 9]])
result = np.vstack([x, grid])
array_info(result)
y = np.array([[-1], [-1]])
result = np.hstack([grid, y])
array_info(result)
```
| github_jupyter |
# Project 3: Implement SLAM
---
## Project Overview
In this project, you'll implement SLAM for robot that moves and senses in a 2 dimensional, grid world!
SLAM gives us a way to both localize a robot and build up a map of its environment as a robot moves and senses in real-time. This is an active area of research in the fields of robotics and autonomous systems. Since this localization and map-building relies on the visual sensing of landmarks, this is a computer vision problem.
Using what you've learned about robot motion, representations of uncertainty in motion and sensing, and localization techniques, you will be tasked with defining a function, `slam`, which takes in six parameters as input and returns the vector `mu`.
> `mu` contains the (x,y) coordinate locations of the robot as it moves, and the positions of landmarks that it senses in the world
You can implement helper functions as you see fit, but your function must return `mu`. The vector, `mu`, should have (x, y) coordinates interlaced, for example, if there were 2 poses and 2 landmarks, `mu` will look like the following, where `P` is the robot position and `L` the landmark position:
```
mu = matrix([[Px0],
[Py0],
[Px1],
[Py1],
[Lx0],
[Ly0],
[Lx1],
[Ly1]])
```
You can see that `mu` holds the poses first `(x0, y0), (x1, y1), ...,` then the landmark locations at the end of the matrix; we consider a `nx1` matrix to be a vector.
## Generating an environment
In a real SLAM problem, you may be given a map that contains information about landmark locations, and in this example, we will make our own data using the `make_data` function, which generates a world grid with landmarks in it and then generates data by placing a robot in that world and moving and sensing over some numer of time steps. The `make_data` function relies on a correct implementation of robot move/sense functions, which, at this point, should be complete and in the `robot_class.py` file. The data is collected as an instantiated robot moves and senses in a world. Your SLAM function will take in this data as input. So, let's first create this data and explore how it represents the movement and sensor measurements that our robot takes.
---
## Create the world
Use the code below to generate a world of a specified size with randomly generated landmark locations. You can change these parameters and see how your implementation of SLAM responds!
`data` holds the sensors measurements and motion of your robot over time. It stores the measurements as `data[i][0]` and the motion as `data[i][1]`.
#### Helper functions
You will be working with the `robot` class that may look familiar from the first notebook,
In fact, in the `helpers.py` file, you can read the details of how data is made with the `make_data` function. It should look very similar to the robot move/sense cycle you've seen in the first notebook.
```
import numpy as np
from helpers import make_data
# your implementation of slam should work with the following inputs
# feel free to change these input values and see how it responds!
# world parameters
num_landmarks = 5 # number of landmarks
N = 20 # time steps
world_size = 100.0 # size of world (square)
# robot parameters
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
# make_data instantiates a robot, AND generates random landmarks for a given world size and number of landmarks
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)
```
### A note on `make_data`
The function above, `make_data`, takes in so many world and robot motion/sensor parameters because it is responsible for:
1. Instantiating a robot (using the robot class)
2. Creating a grid world with landmarks in it
**This function also prints out the true location of landmarks and the *final* robot location, which you should refer back to when you test your implementation of SLAM.**
The `data` this returns is an array that holds information about **robot sensor measurements** and **robot motion** `(dx, dy)` that is collected over a number of time steps, `N`. You will have to use *only* these readings about motion and measurements to track a robot over time and find the determine the location of the landmarks using SLAM. We only print out the true landmark locations for comparison, later.
In `data` the measurement and motion data can be accessed from the first and second index in the columns of the data array. See the following code for an example, where `i` is the time step:
```
measurement = data[i][0]
motion = data[i][1]
```
```
# print out some stats about the data
time_step = 0
print('Example measurements: \n', data[time_step][0])
print('\n')
print('Example motion: \n', data[time_step][1])
```
Try changing the value of `time_step`, you should see that the list of measurements varies based on what in the world the robot sees after it moves. As you know from the first notebook, the robot can only sense so far and with a certain amount of accuracy in the measure of distance between its location and the location of landmarks. The motion of the robot always is a vector with two values: one for x and one for y displacement. This structure will be useful to keep in mind as you traverse this data in your implementation of slam.
## Initialize Constraints
One of the most challenging tasks here will be to create and modify the constraint matrix and vector: omega and xi. In the second notebook, you saw an example of how omega and xi could hold all the values the define the relationships between robot poses `xi` and landmark positions `Li` in a 1D world, as seen below, where omega is the blue matrix and xi is the pink vector.
<img src='images/motion_constraint.png' width=50% height=50% />
In *this* project, you are tasked with implementing constraints for a 2D world. We are referring to robot poses as `Px, Py` and landmark positions as `Lx, Ly`, and one way to approach this challenge is to add *both* x and y locations in the constraint matrices.
<img src='images/constraints2D.png' width=50% height=50% />
You may also choose to create two of each omega and xi (one for x and one for y positions).
### TODO: Write a function that initializes omega and xi
Complete the function `initialize_constraints` so that it returns `omega` and `xi` constraints for the starting position of the robot. Any values that we do not yet know should be initialized with the value `0`. You may assume that our robot starts out in exactly the middle of the world with 100% confidence (no motion or measurement noise at this point). The inputs `N` time steps, `num_landmarks`, and `world_size` should give you all the information you need to construct intial constraints of the correct size and starting values.
*Depending on your approach you may choose to return one omega and one xi that hold all (x,y) positions *or* two of each (one for x values and one for y); choose whichever makes most sense to you!*
```
def initialize_constraints(N, num_landmarks, world_size):
''' This function takes in a number of time steps N, number of landmarks, and a world_size,
and returns initialized constraint matrices, omega and xi.'''
## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable
## TODO: Define the constraint matrix, Omega, with two initial "strength" values
## for the initial x, y location of our robot
omega = [0]
## TODO: Define the constraint *vector*, xi
## you can assume that the robot starts out in the middle of the world with 100% confidence
xi = [0]
return omega, xi
```
### Test as you go
It's good practice to test out your code, as you go. Since `slam` relies on creating and updating constraint matrices, `omega` and `xi` to account for robot sensor measurements and motion, let's check that they initialize as expected for any given parameters.
Below, you'll find some test code that allows you to visualize the results of your function `initialize_constraints`. We are using the [seaborn](https://seaborn.pydata.org/) library for visualization.
**Please change the test values of N, landmarks, and world_size and see the results**. Be careful not to use these values as input into your final smal function.
This code assumes that you have created one of each constraint: `omega` and `xi`, but you can change and add to this code, accordingly. The constraints should vary in size with the number of time steps and landmarks as these values affect the number of poses a robot will take `(Px0,Py0,...Pxn,Pyn)` and landmark locations `(Lx0,Ly0,...Lxn,Lyn)` whose relationships should be tracked in the constraint matrices. Recall that `omega` holds the weights of each variable and `xi` holds the value of the sum of these variables, as seen in Notebook 2. You'll need the `world_size` to determine the starting pose of the robot in the world and fill in the initial values for `xi`.
```
# import data viz resources
import matplotlib.pyplot as plt
from pandas import DataFrame
import seaborn as sns
%matplotlib inline
# define a small N and world_size (small for ease of visualization)
N_test = 5
num_landmarks_test = 2
small_world = 10
# initialize the constraints
initial_omega, initial_xi = initialize_constraints(N_test, num_landmarks_test, small_world)
# define figure size
plt.rcParams["figure.figsize"] = (10,7)
# display omega
sns.heatmap(DataFrame(initial_omega), cmap='Blues', annot=True, linewidths=.5)
# define figure size
plt.rcParams["figure.figsize"] = (1,7)
# display xi
sns.heatmap(DataFrame(initial_xi), cmap='Oranges', annot=True, linewidths=.5)
```
---
## SLAM inputs
In addition to `data`, your slam function will also take in:
* N - The number of time steps that a robot will be moving and sensing
* num_landmarks - The number of landmarks in the world
* world_size - The size (w/h) of your world
* motion_noise - The noise associated with motion; the update confidence for motion should be `1.0/motion_noise`
* measurement_noise - The noise associated with measurement/sensing; the update weight for measurement should be `1.0/measurement_noise`
#### A note on noise
Recall that `omega` holds the relative "strengths" or weights for each position variable, and you can update these weights by accessing the correct index in omega `omega[row][col]` and *adding/subtracting* `1.0/noise` where `noise` is measurement or motion noise. `Xi` holds actual position values, and so to update `xi` you'll do a similar addition process only using the actual value of a motion or measurement. So for a vector index `xi[row][0]` you will end up adding/subtracting one measurement or motion divided by their respective `noise`.
### TODO: Implement Graph SLAM
Follow the TODO's below to help you complete this slam implementation (these TODO's are in the recommended order), then test out your implementation!
#### Updating with motion and measurements
With a 2D omega and xi structure as shown above (in earlier cells), you'll have to be mindful about how you update the values in these constraint matrices to account for motion and measurement constraints in the x and y directions. Recall that the solution to these matrices (which holds all values for robot poses `P` and landmark locations `L`) is the vector, `mu`, which can be computed at the end of the construction of omega and xi as the inverse of omega times xi: $\mu = \Omega^{-1}\xi$
**You may also choose to return the values of `omega` and `xi` if you want to visualize their final state!**
```
## TODO: Complete the code to implement SLAM
## slam takes in 6 arguments and returns mu,
## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations
def slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise):
## TODO: Use your initilization to create constraint matrices, omega and xi
## TODO: Iterate through each time step in the data
## get all the motion and measurement data as you iterate
## TODO: update the constraint matrix/vector to account for all *measurements*
## this should be a series of additions that take into account the measurement noise
## TODO: update the constraint matrix/vector to account for all *motion* and motion noise
## TODO: After iterating through all the data
## Compute the best estimate of poses and landmark positions
## using the formula, omega_inverse * Xi
mu = None
return mu # return `mu`
```
## Helper functions
To check that your implementation of SLAM works for various inputs, we have provided two helper functions that will help display the estimated pose and landmark locations that your function has produced. First, given a result `mu` and number of time steps, `N`, we define a function that extracts the poses and landmarks locations and returns those as their own, separate lists.
Then, we define a function that nicely print out these lists; both of these we will call, in the next step.
```
# a helper function that creates a list of poses and of landmarks for ease of printing
# this only works for the suggested constraint architecture of interlaced x,y poses
def get_poses_landmarks(mu, N):
# create a list of poses
poses = []
for i in range(N):
poses.append((mu[2*i].item(), mu[2*i+1].item()))
# create a list of landmarks
landmarks = []
for i in range(num_landmarks):
landmarks.append((mu[2*(N+i)].item(), mu[2*(N+i)+1].item()))
# return completed lists
return poses, landmarks
def print_all(poses, landmarks):
print('\n')
print('Estimated Poses:')
for i in range(len(poses)):
print('['+', '.join('%.3f'%p for p in poses[i])+']')
print('\n')
print('Estimated Landmarks:')
for i in range(len(landmarks)):
print('['+', '.join('%.3f'%l for l in landmarks[i])+']')
```
## Run SLAM
Once you've completed your implementation of `slam`, see what `mu` it returns for different world sizes and different landmarks!
### What to Expect
The `data` that is generated is random, but you did specify the number, `N`, or time steps that the robot was expected to move and the `num_landmarks` in the world (which your implementation of `slam` should see and estimate a position for. Your robot should also start with an estimated pose in the very center of your square world, whose size is defined by `world_size`.
With these values in mind, you should expect to see a result that displays two lists:
1. **Estimated poses**, a list of (x, y) pairs that is exactly `N` in length since this is how many motions your robot has taken. The very first pose should be the center of your world, i.e. `[50.000, 50.000]` for a world that is 100.0 in square size.
2. **Estimated landmarks**, a list of landmark positions (x, y) that is exactly `num_landmarks` in length.
#### Landmark Locations
If you refer back to the printout of *exact* landmark locations when this data was created, you should see values that are very similar to those coordinates, but not quite (since `slam` must account for noise in motion and measurement).
```
# call your implementation of slam, passing in the necessary parameters
mu = slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise)
# print out the resulting landmarks and poses
if(mu is not None):
# get the lists of poses and landmarks
# and print them out
poses, landmarks = get_poses_landmarks(mu, N)
print_all(poses, landmarks)
```
## Visualize the constructed world
Finally, using the `display_world` code from the `helpers.py` file (which was also used in the first notebook), we can actually visualize what you have coded with `slam`: the final position of the robot and the positon of landmarks, created from only motion and measurement data!
**Note that these should be very similar to the printed *true* landmark locations and final pose from our call to `make_data` early in this notebook.**
```
# import the helper function
from helpers import display_world
# Display the final world!
# define figure size
plt.rcParams["figure.figsize"] = (20,20)
# check if poses has been created
if 'poses' in locals():
# print out the last pose
print('Last pose: ', poses[-1])
# display the last position of the robot *and* the landmark positions
display_world(int(world_size), poses[-1], landmarks)
```
### Question: How far away is your final pose (as estimated by `slam`) compared to the *true* final pose? Why do you think these poses are different?
You can find the true value of the final pose in one of the first cells where `make_data` was called. You may also want to look at the true landmark locations and compare them to those that were estimated by `slam`. Ask yourself: what do you think would happen if we moved and sensed more (increased N)? Or if we had lower/higher noise parameters.
**Answer**: (Write your answer here.)
## Testing
To confirm that your slam code works before submitting your project, it is suggested that you run it on some test data and cases. A few such cases have been provided for you, in the cells below. When you are ready, uncomment the test cases in the next cells (there are two test cases, total); your output should be **close-to or exactly** identical to the given results. If there are minor discrepancies it could be a matter of floating point accuracy or in the calculation of the inverse matrix.
### Submit your project
If you pass these tests, it is a good indication that your project will pass all the specifications in the project rubric. Follow the submission instructions to officially submit!
```
# Here is the data and estimated outputs for test case 1
test_data1 = [[[[1, 19.457599255548065, 23.8387362100849], [2, -13.195807561967236, 11.708840328458608], [3, -30.0954905279171, 15.387879242505843]], [-12.2607279422326, -15.801093326936487]], [[[2, -0.4659930049620491, 28.088559771215664], [4, -17.866382374890936, -16.384904503932]], [-12.2607279422326, -15.801093326936487]], [[[4, -6.202512900833806, -1.823403210274639]], [-12.2607279422326, -15.801093326936487]], [[[4, 7.412136480918645, 15.388585962142429]], [14.008259661173426, 14.274756084260822]], [[[4, -7.526138813444998, -0.4563942429717849]], [14.008259661173426, 14.274756084260822]], [[[2, -6.299793150150058, 29.047830407717623], [4, -21.93551130411791, -13.21956810989039]], [14.008259661173426, 14.274756084260822]], [[[1, 15.796300959032276, 30.65769689694247], [2, -18.64370821983482, 17.380022987031367]], [14.008259661173426, 14.274756084260822]], [[[1, 0.40311325410337906, 14.169429532679855], [2, -35.069349468466235, 2.4945558982439957]], [14.008259661173426, 14.274756084260822]], [[[1, -16.71340983241936, -2.777000269543834]], [-11.006096015782283, 16.699276945166858]], [[[1, -3.611096830835776, -17.954019226763958]], [-19.693482634035977, 3.488085684573048]], [[[1, 18.398273354362416, -22.705102332550947]], [-19.693482634035977, 3.488085684573048]], [[[2, 2.789312482883833, -39.73720193121324]], [12.849049222879723, -15.326510824972983]], [[[1, 21.26897046581808, -10.121029799040915], [2, -11.917698965880655, -23.17711662602097], [3, -31.81167947898398, -16.7985673023331]], [12.849049222879723, -15.326510824972983]], [[[1, 10.48157743234859, 5.692957082575485], [2, -22.31488473554935, -5.389184118551409], [3, -40.81803984305378, -2.4703329790238118]], [12.849049222879723, -15.326510824972983]], [[[0, 10.591050242096598, -39.2051798967113], [1, -3.5675572049297553, 22.849456408289125], [2, -38.39251065320351, 7.288990306029511]], [12.849049222879723, -15.326510824972983]], [[[0, -3.6225556479370766, -25.58006865235512]], [-7.8874682868419965, -18.379005523261092]], [[[0, 1.9784503557879374, -6.5025974151499]], [-7.8874682868419965, -18.379005523261092]], [[[0, 10.050665232782423, 11.026385307998742]], [-17.82919359778298, 9.062000642947142]], [[[0, 26.526838150174818, -0.22563393232425621], [4, -33.70303936886652, 2.880339841013677]], [-17.82919359778298, 9.062000642947142]]]
## Test Case 1
##
# Estimated Pose(s):
# [50.000, 50.000]
# [37.858, 33.921]
# [25.905, 18.268]
# [13.524, 2.224]
# [27.912, 16.886]
# [42.250, 30.994]
# [55.992, 44.886]
# [70.749, 59.867]
# [85.371, 75.230]
# [73.831, 92.354]
# [53.406, 96.465]
# [34.370, 100.134]
# [48.346, 83.952]
# [60.494, 68.338]
# [73.648, 53.082]
# [86.733, 38.197]
# [79.983, 20.324]
# [72.515, 2.837]
# [54.993, 13.221]
# [37.164, 22.283]
# Estimated Landmarks:
# [82.679, 13.435]
# [70.417, 74.203]
# [36.688, 61.431]
# [18.705, 66.136]
# [20.437, 16.983]
### Uncomment the following three lines for test case 1 and compare the output to the values above ###
# mu_1 = slam(test_data1, 20, 5, 100.0, 2.0, 2.0)
# poses, landmarks = get_poses_landmarks(mu_1, 20)
# print_all(poses, landmarks)
# Here is the data and estimated outputs for test case 2
test_data2 = [[[[0, 26.543274387283322, -6.262538160312672], [3, 9.937396825799755, -9.128540360867689]], [18.92765331253674, -6.460955043986683]], [[[0, 7.706544739722961, -3.758467215445748], [1, 17.03954411948937, 31.705489938553438], [3, -11.61731288777497, -6.64964096716416]], [18.92765331253674, -6.460955043986683]], [[[0, -12.35130507136378, 2.585119104239249], [1, -2.563534536165313, 38.22159657838369], [3, -26.961236804740935, -0.4802312626141525]], [-11.167066095509824, 16.592065417497455]], [[[0, 1.4138633151721272, -13.912454837810632], [1, 8.087721200818589, 20.51845934354381], [3, -17.091723454402302, -16.521500551709707], [4, -7.414211721400232, 38.09191602674439]], [-11.167066095509824, 16.592065417497455]], [[[0, 12.886743222179561, -28.703968411636318], [1, 21.660953298391387, 3.4912891084614914], [3, -6.401401414569506, -32.321583037341625], [4, 5.034079343639034, 23.102207946092893]], [-11.167066095509824, 16.592065417497455]], [[[1, 31.126317672358578, -10.036784369535214], [2, -38.70878528420893, 7.4987265861424595], [4, 17.977218575473767, 6.150889254289742]], [-6.595520680493778, -18.88118393939265]], [[[1, 41.82460922922086, 7.847527392202475], [3, 15.711709540417502, -30.34633659912818]], [-6.595520680493778, -18.88118393939265]], [[[0, 40.18454208294434, -6.710999804403755], [3, 23.019508919299156, -10.12110867290604]], [-6.595520680493778, -18.88118393939265]], [[[3, 27.18579315312821, 8.067219022708391]], [-6.595520680493778, -18.88118393939265]], [[], [11.492663265706092, 16.36822198838621]], [[[3, 24.57154567653098, 13.461499960708197]], [11.492663265706092, 16.36822198838621]], [[[0, 31.61945290413707, 0.4272295085799329], [3, 16.97392299158991, -5.274596836133088]], [11.492663265706092, 16.36822198838621]], [[[0, 22.407381798735177, -18.03500068379259], [1, 29.642444125196995, 17.3794951934614], [3, 4.7969752441371645, -21.07505361639969], [4, 14.726069092569372, 32.75999422300078]], [11.492663265706092, 16.36822198838621]], [[[0, 10.705527984670137, -34.589764174299596], [1, 18.58772336795603, -0.20109708164787765], [3, -4.839806195049413, -39.92208742305105], [4, 4.18824810165454, 14.146847823548889]], [11.492663265706092, 16.36822198838621]], [[[1, 5.878492140223764, -19.955352450942357], [4, -7.059505455306587, -0.9740849280550585]], [19.628527845173146, 3.83678180657467]], [[[1, -11.150789592446378, -22.736641053247872], [4, -28.832815721158255, -3.9462962046291388]], [-19.841703647091965, 2.5113335861604362]], [[[1, 8.64427397916182, -20.286336970889053], [4, -5.036917727942285, -6.311739993868336]], [-5.946642674882207, -19.09548221169787]], [[[0, 7.151866679283043, -39.56103232616369], [1, 16.01535401373368, -3.780995345194027], [4, -3.04801331832137, 13.697362774960865]], [-5.946642674882207, -19.09548221169787]], [[[0, 12.872879480504395, -19.707592098123207], [1, 22.236710716903136, 16.331770792606406], [3, -4.841206109583004, -21.24604435851242], [4, 4.27111163223552, 32.25309748614184]], [-5.946642674882207, -19.09548221169787]]]
## Test Case 2
##
# Estimated Pose(s):
# [50.000, 50.000]
# [69.035, 45.061]
# [87.655, 38.971]
# [76.084, 55.541]
# [64.283, 71.684]
# [52.396, 87.887]
# [44.674, 68.948]
# [37.532, 49.680]
# [31.392, 30.893]
# [24.796, 12.012]
# [33.641, 26.440]
# [43.858, 43.560]
# [54.735, 60.659]
# [65.884, 77.791]
# [77.413, 94.554]
# [96.740, 98.020]
# [76.149, 99.586]
# [70.211, 80.580]
# [64.130, 61.270]
# [58.183, 42.175]
# Estimated Landmarks:
# [76.777, 42.415]
# [85.109, 76.850]
# [13.687, 95.386]
# [59.488, 39.149]
# [69.283, 93.654]
### Uncomment the following three lines for test case 2 and compare to the values above ###
# mu_2 = slam(test_data2, 20, 5, 100.0, 2.0, 2.0)
# poses, landmarks = get_poses_landmarks(mu_2, 20)
# print_all(poses, landmarks)
```
| github_jupyter |
# Quality Metrics and Reconstruction Demo
Demonstrate the use of full reference metrics by comparing the reconstruction of a simulated phantom using SIRT, ART, and MLEM.
```
import numpy as np
import matplotlib.pyplot as plt
from xdesign import *
NPIXEL = 128
```
## Generate a phantom
Use one of XDesign's various pre-made and procedurally generated phantoms.
```
np.random.seed(0)
soil_like_phantom = Softwood()
```
Generate a figure showing the phantom and save the discretized ground truth map for later.
```
discrete = sidebyside(soil_like_phantom, NPIXEL)
if False:
plt.savefig('Soil_sidebyside.png', dpi='figure',
orientation='landscape', papertype=None, format=None,
transparent=True, bbox_inches='tight', pad_inches=0.0,
frameon=False)
plt.show()
```
## Simulate data acquisition
Generate a list of probe coordinates to simulate data acquisition for parallel beam around 180 degrees.
```
angles = np.linspace(0, np.pi, NPIXEL, endpoint=False),
positions = np.linspace(0, 1, NPIXEL, endpoint=False) - 0.5 + 1/NPIXEL/2
theta, h = np.meshgrid(angles, positions)
```
Make a probe.
```
probe = Probe(size=1/NPIXEL)
```
Use the probe to measure the phantom.
```
sino = probe.measure(soil_like_phantom, theta, h)
# Transform data from attenuated intensity to attenuation coefficient
sino = -np.log(sino)
plt.imshow(sino.reshape(NPIXEL, NPIXEL), cmap='viridis', origin='lower')
plt.show()
```
## Reconstruct
Reconstruct the phantom using 3 different techniques: ART, SIRT, and MLEM.
```
niter = 32 # number of iterations
gmin = [-0.5, -0.5]
gsize = [1, 1]
data = sino
init = np.full((NPIXEL, NPIXEL), 1e-12)
rec_art = art(gmin, gsize, data, theta, h, init, niter)
init = np.full((NPIXEL, NPIXEL), 1e-12)
rec_sirt = sirt(gmin, gsize, data, theta, h, init, niter)
init = np.full((NPIXEL, NPIXEL), 1e-12)
rec_mlem = mlem(gmin, gsize, data, theta, h, init, niter)
plt.figure(figsize=(12,4), dpi=100)
plt.subplot(141)
plt.imshow(rec_art, cmap='gray', origin='lower', vmin=0, vmax=1)
plt.title('ART')
plt.subplot(142)
plt.imshow(rec_sirt, cmap='gray', interpolation='none', origin='lower', vmin=0, vmax=1)
plt.title('SIRT')
plt.subplot(143)
plt.imshow(rec_mlem, cmap='gray', interpolation='none', origin='lower', vmin=0, vmax=1)
plt.title('MLEM')
plt.subplot(144)
plt.imshow(discrete, cmap='gray', interpolation='none', origin='lower', vmin=0, vmax=1)
plt.title('truth')
plt.show()
```
## Quality Metrics
Compute local quality for each reconstruction using MS-SSIM, a convolution based quality metric.
```
quality = list()
for rec in [rec_art, rec_sirt, rec_mlem]:
scales, mscore, qmap = msssim(discrete, rec)
quality.append(mscore)
```
Plot the average quality at for each reconstruction. Then display the local quality map for each reconstruction to see why certain reconstructions are ranked higher than others.
```
plt.figure()
plt.bar(["ART", "SIRT", "MLEM"], quality)
plt.show()
```
| github_jupyter |
```
from __future__ import division, print_function
import numpy as np
import cPickle as pickle
import os, glob
from utils import models
from utils.sample_helpers import JumpProposal, get_parameter_groups
from enterprise.pulsar import Pulsar
from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc
from astropy.time import Time
# post-proc stuff
import matplotlib.pyplot as plt
import pandas as pd
from utils.UL_uncert import UL_uncert
from acor import acor
from corner import corner
%matplotlib inline
```
# custom BWM model and block
```
from enterprise import constants as const
from enterprise.signals import parameter
from enterprise.signals import prior
from enterprise.signals import utils
from enterprise.signals import deterministic_signals
from enterprise.signals import gp_signals
from enterprise.signals import signal_base
def red_noise_block(prior='log-uniform', Tspan=None):
"""
Returns red noise model:
1. Red noise modeled as a power-law with 30 sampling frequencies
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for indivicual pulsar.
"""
# red noise parameters
if prior == 'uniform':
log10_A = parameter.LinearExp(-20, -11)
elif prior == 'log-uniform':
log10_A = parameter.Uniform(-20, -11)
elif prior == 'log-normal':
log10_A = parameter.Normal(-15, 4)
else:
raise NotImplementedError('Unknown prior for red noise amplitude!')
gamma = parameter.Uniform(0, 7)
# red noise signal
pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
rn = gp_signals.FourierBasisGP(pl, components=30, Tspan=Tspan)
return rn
def bwm_block(Tmin, Tmax, amp_prior='log-uniform',
skyloc=None, logmin=-18, logmax=-11,
name='bwm'):
"""
Returns deterministic GW burst with memory model:
1. Burst event parameterized by time, sky location,
polarization angle, and amplitude
:param Tmin:
Min time to search, probably first TOA (MJD).
:param Tmax:
Max time to search, probably last TOA (MJD).
:param amp_prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param skyloc:
Fixed sky location of BWM signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param logmin:
log of minimum BWM amplitude for prior (log10)
:param logmax:
log of maximum BWM amplitude for prior (log10)
:param name:
Name of BWM signal.
"""
# BWM parameters
amp_name = '{}_log10_A'.format(name)
if amp_prior == 'uniform':
log10_A_bwm = parameter.LinearExp(logmin, logmax)(amp_name)
elif amp_prior == 'log-uniform':
log10_A_bwm = parameter.Uniform(logmin, logmax)(amp_name)
elif amp_prior == 'log-normal':
log10_A_bwm = parameter.Normal(logmin, logmax)(amp_name)
else:
raise NotImplementedError('Unknown prior for BWM amplitude!')
pol_name = '{}_pol'.format(name)
pol = parameter.Uniform(0, np.pi)(pol_name)
t0_name = '{}_t0'.format(name)
t0 = parameter.Uniform(Tmin, Tmax)(t0_name)
costh_name = '{}_costheta'.format(name)
phi_name = '{}_phi'.format(name)
if skyloc is None:
costh = parameter.Uniform(-1, 1)(costh_name)
phi = parameter.Uniform(0, 2*np.pi)(phi_name)
else:
costh = parameter.Constant(skyloc[0])(costh_name)
phi = parameter.Constant(skyloc[1])(phi_name)
# BWM signal
bwm_wf = utils.bwm_delay(log10_h=log10_A_bwm, t0=t0,
cos_gwtheta=costh, gwphi=phi, gwpol=pol)
bwm = deterministic_signals.Deterministic(bwm_wf, name=name)
return bwm
def model_bwm(psrs,
Tmin_bwm=None, Tmax_bwm=None,
skyloc=None, logmin=-18, logmax=-11,
upper_limit=False, bayesephem=False, dmgp=False, free_rn=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with BWM model:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Deterministic GW burst with memory signal.
2. Optional physical ephemeris modeling.
:param Tmin_bwm:
Min time to search for BWM (MJD). If omitted, uses first TOA.
:param Tmax_bwm:
Max time to search for BWM (MJD). If omitted, uses last TOA.
:param skyloc:
Fixed sky location of BWM signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param logmin:
log of minimum BWM amplitude for prior (log10)
:param logmax:
log of maximum BWM amplitude for prior (log10)
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param free_rn:
Use free red noise spectrum model. Set to False by default
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
#amp_prior = 'log-normal'
# find the maximum time span to set GW frequency sampling
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
if Tmin_bwm == None:
Tmin_bwm = tmin/const.day
if Tmax_bwm == None:
Tmax_bwm = tmax/const.day
# white noise
s = models.white_noise_block(vary=False)
# red noise
if free_rn:
s += models.free_noise_block(prior=amp_prior, Tspan=Tspan)
else:
s += red_noise_block(prior=amp_prior, Tspan=Tspan)
# GW BWM signal block
s += bwm_block(Tmin_bwm, Tmax_bwm, amp_prior=amp_prior,
skyloc=skyloc, logmin=logmin, logmax=logmax,
name='bwm')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# timing model
s += gp_signals.TimingModel(use_svd=True)
# DM variations model
if dmgp:
s += models.dm_noise_block(gp_kernel='diag', psd='powerlaw',
prior=amp_prior, Tspan=Tspan)
s += models.dm_annual_signal()
# DM exponential dip for J1713's DM event
dmexp = models.dm_exponential_dip(tmin=54500, tmax=54900)
s2 = s + dmexp
# set up PTA
mods = []
for p in psrs:
if dmgp and 'J1713+0747' == p.name:
mods.append(s2(p))
else:
mods.append(s(p))
pta = signal_base.PTA(mods)
return pta
```
# UL analysis
```
TMIN = 53217.0
TMAX = 57387.0
tchunk = np.linspace(TMIN, TMAX, 41) # break in 2.5% chunks
tlim = []
for ii in range(len(tchunk)-2):
tlim.append(tchunk[ii:ii+3])
datadir = '/home/pbaker/nanograv/data/nano11'
noisefile = '/home/pbaker/nanograv/data/nano11_setpars.pkl'
psr_name = 'J1744-1134' # 'J1909-3744'
ephem = 'DE436'
ii_t = None # 0-40 or None
if ii_t:
TMIN, CENTER, TMAX = tlim[ii_t]
chunk = '{:.2f}'.format(CENTER)
else:
chunk = 'all'
rundir = '/home/pbaker/nanograv/bwm/sngl/det_{0:s}/{1:s}/{2:s}/'.format(ephem, psr_name, chunk)
!mkdir -p $rundir
if ii_t is not None:
print(CENTER, Time(CENTER, format='mjd').decimalyear)
# read in data from .par / .tim
par = glob.glob(datadir +'/'+ psr_name +'*.par')[0]
tim = glob.glob(datadir +'/'+ psr_name +'*.tim')[0]
psr = Pulsar(par, tim, ephem=ephem, timing_package='tempo2')
with open(noisefile, "rb") as f:
setpars = pickle.load(f)
U,_ = utils.create_quantization_matrix(psr.toas)
eps = 9
TMIN = np.floor(max(U[:,eps] * psr.toas/const.day))
TMAX = np.ceil(max(U[:,-eps] * psr.toas/const.day))
# plot TOA resid for this PSR
toas = psr.toas/const.day
resid = psr.residuals / 1.0e-6
dt = psr.toaerrs / 1.0e-6
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(111)
ax.errorbar(toas, resid, yerr=dt, marker='.', linestyle='none')
ax.axvline(x=TMIN, color='C1', ls=':')
ax.axvline(x=TMAX, color='C1', ls=':')
ax.set_xlabel('TOA (MJD)')
ax.set_ylabel('residual ($u$s)');
#################
## pta model ##
#################
logminA = -18 # mean for log-normal
logmaxA = -9 # stdev for log-normal
tmin = psr.toas.min() / 86400
tmax = psr.toas.max() / 86400
if TMIN is not None and TMAX is not None:
if TMIN<tmin:
err = "tmin ({:.1f}) BEFORE first TOA ({:.1f})".format(TMIN, tmin)
raise RuntimeError(err)
elif TMAX>tmax:
err = "tmax ({:.1f}) AFTER last TOA ({:.1f})".format(TMAX, tmax)
raise RuntimeError(err)
elif TMIN>TMAX:
err = "tmin ({:.1f}) BEFORE last tmax ({:.1f})".format(TMIN, TMAX)
raise RuntimeError(err)
else:
t0min = TMIN
t0max = TMAX
else:
tclip = (tmax - tmin) * 0.05
t0min = tmin + tclip*2 # clip first 10%
t0max = tmax - tclip # clip last 5%
pta = model_bwm([psr],
upper_limit=False, bayesephem=False,
logmin=logminA, logmax=logmaxA,
Tmin_bwm=t0min, Tmax_bwm=t0max)
pta.set_default_params(setpars)
outfile = os.path.join(rundir, 'params.txt')
with open(outfile, 'w') as f:
for pname in pta.param_names:
f.write(pname+'\n')
N = 500000
###############
## sampler ##
###############
# dimension of parameter space
x0 = np.hstack(p.sample() for p in pta.params)
ndim = len(x0)
# initial jump covariance matrix
cov = np.diag(np.ones(ndim) * 0.1**2)
# parameter groupings
groups = get_parameter_groups(pta)
sampler = ptmcmc(ndim, pta.get_lnlikelihood, pta.get_lnprior,
cov, groups=groups, outDir=rundir, resume=True)
# add prior draws to proposal cycle
jp = JumpProposal(pta)
sampler.addProposalToCycle(jp.draw_from_prior, 5)
sampler.addProposalToCycle(jp.draw_from_bwm_prior, 10)
draw_bwm_loguni = jp.build_log_uni_draw('bwm_log10_A', logminA, logmaxA)
sampler.addProposalToCycle(draw_bwm_loguni, 10)
# SAMPLE!!
sampler.sample(x0, N, SCAMweight=35, AMweight=10, DEweight=50)
```
# Post Proc
```
def trace_plot(chain, pars,
cols=3, wid_per_col=4, aspect=4/3,
kwargs={}):
rows = len(pars)//cols
if rows*cols < len(pars):
rows += 1
ax = []
width = wid_per_col * cols
height = wid_per_col * rows / aspect
fig = plt.figure(figsize=(width, height))
for pp, par in enumerate(pars):
ax.append(fig.add_subplot(rows, cols, pp+1))
ax[pp].plot(chain[:,pp], **kwargs)
ax[pp].set_xlabel(par)
plt.tight_layout()
return fig
def hist_plot(chain, pars, bins=30,
cols=3, wid_per_col=4, aspect=4/3,
kwargs={}):
hist_kwargs = {
'density':True,
'histtype':'step',
}
for key, val in kwargs.items():
hist_kwargs[key] = val
rows = len(pars)//cols
if rows*cols < len(pars):
rows += 1
ax = []
width = wid_per_col * cols
height = wid_per_col * rows / aspect
fig = plt.figure(figsize=(width, height))
for pp, par in enumerate(pars):
ax.append(fig.add_subplot(rows, cols, pp+1))
ax[pp].hist(chain[:,pp], bins=bins, **hist_kwargs)
ax[pp].set_xlabel(par)
plt.tight_layout()
return fig
def bayes_fac(samples, Nmin=200, logAmin=-18, logAmax=-9,
dAmin=0.01, dAmax=0.5):
"""
Computes the Savage Dickey Bayes Factor and uncertainty.
:param samples: MCMC samples of GWB (or common red noise) amplitude
:param ntol: Tolerance on number of samples in bin
:returns: (bayes factor, 1-sigma bayes factor uncertainty)
"""
prior = 1 / (logAmax - logAmin)
dA = np.linspace(dAmin, dAmax, 100)
bf = []
bf_err = []
mask = [] # selecting bins with more than 200 samples
for ii, delta in enumerate(dA):
n = np.sum(samples <= (logAmin + delta))
N = len(samples)
post = n / N / delta
bf.append(prior/post)
bf_err.append(bf[ii]/np.sqrt(n))
if n > Nmin:
mask.append(ii)
return np.mean(np.array(bf)[mask]), np.std(np.array(bf)[mask])
with open(rundir + '/params.txt', 'r') as f:
params = [line.rstrip('\n') for line in f]
# get just bwm params
par_bwm = []
for par in params:
if par.startswith('bwm_'):
par_bwm.append(par)
idx_bwm = [params.index(p) for p in par_bwm]
if prior == 'true-uniform':
idx_A = par_bwm.index('bwm_A')
else:
idx_A = par_bwm.index('bwm_log10_A')
idx_t0 = par_bwm.index('bwm_t0')
chain_raw = pd.read_csv(rundir + '/chain_1.txt',
sep='\t', dtype=float, header=None).values
burnfrac = 0.25
thin = 5
burn = int(burnfrac * len(chain_raw))
chain = chain_raw[burn::thin]
chain_bwm = chain[:,idx_bwm]
chain_L = chain[:,-5]
corL = acor(chain_L)[0]
N = len(chain_bwm)
print("N = {}, corL = {}".format(N, corL))
ch_plt = np.hstack([chain_bwm, chain_L.reshape(N,1)])
par_plt = par_bwm + ['logL']
trace_plot(ch_plt, par_plt, cols=3, wid_per_col=4);
xs = np.linspace(-23, -9.5, 100)
prior = 1/np.sqrt(2*np.pi)/logmaxA * np.exp(-0.5*(xs-logminA)**2/logmaxA**2)
fig = hist_plot(ch_plt, par_plt, cols=3, wid_per_col=4)
for ax in fig.axes:
ax.set_yscale('log')
#fig.axes[1].plot(xs, prior)
BF, dBF = bayes_fac(chain_bwm[:, idx_A], Nmin=50)
corner_kwargs = {'bins':30,
'show_titles':True,
'labels':par_bwm,
'smooth':0.5,
'plot_datapoints':False,
'plot_density':True,
'plot_contours':True,
'fill_contours':False,}
fig = corner(chain_bwm, color='C0', **corner_kwargs);
fig.suptitle("{:s} -- BF = {:.3f}".format(psr_name, BF), fontsize=20);
```
| github_jupyter |
# Initial Setups
## (Google Colab use only)
```
# Use Google Colab
use_colab = True
# Is this notebook running on Colab?
# If so, then google.colab package (github.com/googlecolab/colabtools)
# should be available in this environment
# Previous version used importlib, but we could do the same thing with
# just attempting to import google.colab
try:
from google.colab import drive
colab_available = True
except:
colab_available = False
if use_colab and colab_available:
drive.mount('/content/drive')
# cd to the appropriate working directory under my Google Drive
%cd 'drive/My Drive/cs696ds_lexalytics/Language Model Finetuning'
# Install packages specified in requirements
!pip install -r requirements.txt
# List the directory contents
!ls
```
## Experiment Parameters
**NOTE**: The following `experiment_id` MUST BE CHANGED in order to avoid overwriting the files from other experiments!!!!!!
**NOTE 2**: The values for the variables in the cell below can be overridden by `papermill` at runtime. Variables in other cells cannot be changed in this manner.
```
# We will use the following string ID to identify this particular (training) experiments
# in directory paths and other settings
experiment_id = 'lm_further_pretraining_gpt-2_yelp_restaurants'
# Random seed
random_seed = 696
# Dataset size related
total_subset_proportion = 1.0 # Do we want to use the entirety of the training set, or some parts of it?
validation_dataset_proportion = 0.1 # Proportion to be reserved for validation (after selecting random subset with total_subset_proportion)
# Training hyperparameters
num_train_epochs = 20 # Number of epochs
per_device_train_batch_size = 16 # training batch size PER COMPUTE DEVICE
per_device_eval_batch_size = 16 # evaluation batch size PER COMPUTE DEVICE
learning_rate = 1e-5
weight_decay = 0.01
# Settings for checkpoint resumption
# Provide a string of relative path to transformers.Trainer compatible checkpoint.
# If None, then the training will start from scratch.
checkpoint_path = None
```
## Package Imports
```
import sys
import os
import random
import numpy as np
import torch
import transformers
import datasets
import utils
# Random seed settings
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
# Print version information
print("Python version: " + sys.version)
print("NumPy version: " + np.__version__)
print("PyTorch version: " + torch.__version__)
print("Transformers version: " + transformers.__version__)
```
## PyTorch GPU settings
```
if torch.cuda.is_available():
torch_device = torch.device('cuda')
# Set this to True to make your output immediately reproducible
# Note: https://pytorch.org/docs/stable/notes/randomness.html
torch.backends.cudnn.deterministic = False
# Disable 'benchmark' mode: Set this False if you want to measure running times more fairly
# Note: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True
# Faster Host to GPU copies with page-locked memory
use_pin_memory = True
# Number of compute devices to be used for training
training_device_count = torch.cuda.device_count()
# CUDA libraries version information
print("CUDA Version: " + str(torch.version.cuda))
print("cuDNN Version: " + str(torch.backends.cudnn.version()))
print("CUDA Device Name: " + str(torch.cuda.get_device_name()))
print("CUDA Capabilities: "+ str(torch.cuda.get_device_capability()))
print("Number of CUDA devices: "+ str(training_device_count))
else:
torch_device = torch.device('cpu')
use_pin_memory = False
# Number of compute devices to be used for training
training_device_count = 1
print()
print("PyTorch device selected:", torch_device)
```
# Further pre-training
## Load the GPT-2 model
```
tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2", cache_dir='./gpt2_cache')
# https://github.com/huggingface/transformers/issues/8452
tokenizer.pad_token = tokenizer.eos_token
model = transformers.AutoModelForCausalLM.from_pretrained("gpt2", cache_dir='./gpt2_cache')
```
## Load the Yelp restaurants dataset
```
yelp = datasets.load_dataset(
'./dataset_scripts/yelp_restaurants',
data_files={
'train': 'dataset_files/yelp_restaurants/yelp_academic_dataset_review.json',
'restaurant_ids': 'dataset_files/yelp_restaurants/restaurantIDs.txt',
},
cache_dir='./dataset_cache')
data_train = yelp['train']
print("Number of training data (original):", len(data_train))
data_train_selected = data_train.shuffle(seed=random_seed).select(np.arange(0, int(len(data_train) * total_subset_proportion)))
print("Number of training data (subset):", len(data_train_selected))
# Check out how individual data points look like
print(data_train_selected[0])
```
### Preprocessing: Encode the text with Tokenizer
```
train_dataset = data_train_selected.map(
lambda e: tokenizer(e['text'], truncation=True, padding='max_length', max_length=256),
remove_columns=data_train_selected.column_names,
batched=True, num_proc=8)
```
### Train-validation split
```
# Training set size after validation split
new_train_dataset_size = int(len(train_dataset) * (1 - validation_dataset_proportion))
new_valid_dataset_size = len(train_dataset) - new_train_dataset_size
new_train_dataset = train_dataset.select(indices=np.arange(new_train_dataset_size))
new_valid_dataset = train_dataset.select(indices=np.arange(new_train_dataset_size, new_train_dataset_size + new_valid_dataset_size))
print("Training dataset after split:", len(new_train_dataset))
print("Validation dataset after split:", len(new_valid_dataset))
```
## Pre-train further
### Training settings
```
# CLM
collator = transformers.DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
# How many training steps would we have?
approx_total_training_steps = len(new_train_dataset) // (per_device_train_batch_size * training_device_count) * num_train_epochs
print("There will be approximately %d training steps." % approx_total_training_steps)
training_args = transformers.TrainingArguments(
output_dir=os.path.join('.', 'progress', experiment_id, 'results'), # output directory
overwrite_output_dir=True,
num_train_epochs=num_train_epochs, # total number of training epochs
per_device_train_batch_size=per_device_train_batch_size,
per_device_eval_batch_size=per_device_eval_batch_size,
evaluation_strategy='epoch',
logging_dir=os.path.join('.', 'progress', experiment_id, 'logs'), # directory for storing logs
logging_first_step=True,
weight_decay=weight_decay, # strength of weight decay
seed=random_seed,
learning_rate=learning_rate,
fp16=True,
fp16_backend='amp',
prediction_loss_only=True,
load_best_model_at_end=True,
dataloader_num_workers=training_device_count * 2,
dataloader_pin_memory=use_pin_memory
)
print(training_args.n_gpu)
trainer = transformers.Trainer(
model=model,
args=training_args,
data_collator=collator, # do the masking on the go
train_dataset=new_train_dataset,
eval_dataset=new_valid_dataset,
)
```
### Training loop
```
# If checkpoint_path was given, print it out
if checkpoint_path is not None:
print("Resuming from", str(checkpoint_path))
trainer.train(resume_from_checkpoint=checkpoint_path)
```
### Save the model to the local directory
```
trainer.save_model(os.path.join('.', 'trained_models', experiment_id))
tokenizer.save_pretrained(os.path.join('.', 'trained_models', experiment_id))
```
## LM Evaluation
```
eval_results = trainer.evaluate()
print(eval_results)
perplexity = np.exp(eval_results["eval_loss"])
print(perplexity)
```
## Playing with my own input sentences
```
example = f"""The {tokenizer.mask_token} of {tokenizer.mask_token} is awful, but its {tokenizer.mask_token} is fantastic."""
example_encoded = tokenizer.encode(example, add_special_tokens=True, return_tensors="pt").to(torch_device)
# Let's decode this back just to see how they were actually encoded
example_tokens = []
for id in example_encoded[0]:
example_tokens.append(tokenizer.convert_ids_to_tokens(id.item()))
print(example_tokens)
example_prediction = model(example_encoded)
example_prediction_argmax = torch.argmax(example_prediction[0], dim=-1)[0]
print(example_prediction_argmax)
print(tokenizer.decode(example_prediction_argmax))
```
| github_jupyter |
## Face and Facial Keypoint detection
After you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.
1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).
2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.
3. Use your trained model to detect facial keypoints on the image.
---
In the next python cell we load in required libraries for this section of the project.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
```
#### Select an image
Select an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.
```
import cv2
# load in color image for face detection
image = cv2.imread('images/obamas.jpg')
# switch red and blue color channels
# --> by default OpenCV assumes BLUE comes first, not RED as in many images
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plot the image
fig = plt.figure(figsize=(9,9))
plt.imshow(image)
```
## Detect all faces in an image
Next, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.
In the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.
An example of face detection on a variety of images is shown below.
<img src='images/haar_cascade_ex.png' width=80% height=80%/>
```
# load in a haar cascade classifier for detecting frontal faces
face_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')
# run the detector
# the output here is an array of detections; the corners of each detection box
# if necessary, modify these parameters until you successfully identify every face in a given image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
# make a copy of the original image to plot detections on
image_with_detections = image.copy()
# loop over the detected faces, mark the image where each face is found
for (x,y,w,h) in faces:
# draw a rectangle around each detected face
# you may also need to change the width of the rectangle drawn depending on image resolution
cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_with_detections)
```
## Loading in a trained model
Once you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.
First, load your best model by its filename.
```
import torch
from models import Net
net = Net()
## TODO: load the best saved model parameters (by your path name)
## You'll need to un-comment the line below and add the correct name for *your* saved model
net.load_state_dict(torch.load('saved_models/keypoints_model_1.pt'))
## print out your net and prepare it for testing (uncomment the line below)
net.eval()
```
## Keypoint detection
Now, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.
### TODO: Transform each detected face into an input Tensor
You'll need to perform the following steps for each detected face:
1. Convert the face from RGB to grayscale
2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
4. Reshape the numpy image into a torch image.
**Hint**: The sizes of faces detected by a Haar detector and the faces your network has been trained on are of different sizes. If you find that your model is generating keypoints that are too small for a given face, try adding some padding to the detected `roi` before giving it as input to your model.
You may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.
### TODO: Detect and display the predicted keypoints
After each face has been appropriately converted into an input Tensor for your network to see as input, you can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be "un-normalized" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:
<img src='images/michelle_detected.png' width=30% height=30%/>
```
def net_output(image):
image = image.type(torch.FloatTensor)
#print(image.size())
# forward pass to get net output
output_pts = net(image)
# reshape to batch_size x 68 x 2 pts
output_pts = output_pts.view(output_pts.size()[0], 68, -1)
return output_pts
def show_all_keypoints(image, predicted_key_pts, gt_pts=None):
"""Show image with predicted keypoints"""
# image is grayscale
plt.imshow(image, cmap='gray')
plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m')
# plot ground truth points as green pts
if gt_pts is not None:
plt.scatter(gt_pts[:, 0], gt_pts[:, 1], s=20, marker='.', c='g')
def visualize_output(torch_image, torch_key_pts):
# un-transform the image data
image = torch_image[0].data # get the image from it's Variable wrapper
#print("Image size = ", image.size())
image = image.numpy() # convert to numpy array from a Tensor
image = np.transpose(image, (1, 2, 0)) # transpose to go from torch to numpy image
# un-transform the predicted key_pts data
predicted_key_pts = torch_key_pts[0].data
predicted_key_pts = predicted_key_pts.numpy()
# undo normalization of keypoints
predicted_key_pts = predicted_key_pts*50.0+100
# call show_all_keypoints
show_all_keypoints(np.squeeze(image), predicted_key_pts)
image_copy = np.copy(image)
# loop over the detected faces from your haar cascade
for (x,y,w,h) in faces:
# Select the region of interest that is the face in the image
roi = image_copy[y-30:y+h+30, x-30:x+w+30]
## TODO: Convert the face region from RGB to grayscale
roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
roi = roi/255.
## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
roi = cv2.resize(roi, (224, 224))
## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)
roi = roi.reshape((224, 224, 1))
roi = roi.transpose((2, 0, 1))
# Make it [B, C, H, W] : B = batch size
roi = roi[None, :, :, :]
## TODO: Make facial keypoint predictions using your loaded, trained network
# Convert to torch tensory
roi = torch.from_numpy(roi)
# Predict output keypoints for the roi
output_pts = net_output(roi)
visualize_output(roi, output_pts)
## TODO: Display each detected face and the corresponding keypoints
```
| github_jupyter |
# <center>Using Ordinary Differential Equations (ODEs) in Simulating 2-D Wildland Fire Behavior</center>
<center>by Diane Wang</center>
---
# ODEs used in fire behavior simulation
Indoor fire models are subdivided into two categories, zone models and field models (Rehm et al. 2011). The formulation of both zone and field models start from the basic conservation laws of mass, momentum, energy and species, together with radiative transport (Rehm et al. 2011). They take advantage of approximate mathematical submodels of the physical processes that occur in enclosure fires to simplify the conservation laws (Rehm et al. 2011). As a result, they end up with nonlinear ordinary differential equations together with complex, nonlinear algebraic relations connecting dependent variables. These simplifications reduce both the data and the computational resources required to predict the progression of a fire in a structure (Rehm et al. 2011). The model CFAST, Jones et al (2004), is a recent example from NIST of this class of models. The ordinary differential equations (ODEs) are often used to describe the fire front propagation. Zone fire models usually consist of ordinary differential equations coupled with algebraic equations. These equations govern all of the dependent variables (room pressure, layer height, layer temperatures and densities, etc.) for a simple two-layer zone-fire model (Rehm et al. 1992).
Rehm et al. also considered the Lagrangian approach to describe fire front propagation. The governing equations in their model are two ordinary differential equations (ODEs) describing the propagation of each Lagrangian element in the x,y-plane.
$$
\frac{dx}{dt} = U_nn_x
$$
$$
\frac{dy}{dt} = U_nn_y
$$
where $x$, $y$ is the location of the Lagrangian element on the fire front, $U_n$ is the normal component of the spread-rate vector of the fire front at the location $(x,y)$, and $n_x$, $n_y$ are the components of the unit normal to the fire front directed toward the unburnt fuel. The fire front curve at any specified time t is described by the vector function $(x(s,t), y(s,t))$, where s is a parameter determining the distance along the curve (Rehm et al. 2008).
However, the fire behavior is usually too complex. Therefore, Partial Differential Equations (PDEs) are more useful and practical in fire simulation. Computational fluid dynamics (CFD) models are very useful to predict fire behaviors by including fluid mechanics, heat transfer, combustion, and interaction with surroundings. These models usually need to solve partial differential equations. (I will not use the complicated CFD models and thus I will not dig into the details of the equations included in these models.) Mandel et al. combined a data assimilation method with a partial differential equation (PDE) based model for wildland fire that achieves good physical behavior and can run faster than real time. The model is based on just two PDEs in two horizontal spatial dimensions: prognostic (predictive) equations for heat and fuel (Mandel et al. 2008). They use a single semi-empirical reaction rate to achieve the desired combustion model results. In other words, they solved the set of equations known as the reaction-convection-diffusion problem using reaction rates based on the Arrhenius equation, which relates the rate at which a chemical reaction proceeds to the temperature (Mandel et al. 2008).
Masarie worked with both ordinary differential equation (ODE) and partial differential equation (PDE) systems models to solve inverse problems for an ODE system and a parabolic heat-type PDE. ODEs are used to model the complexity of
interacting fire demand and risk using linear interactions among their levels and rates of change (Masarie et al. 2018). The ODE model is for a collection of fire suppression resources being shared amongst a collection of fire management zones (Masarie et al. 2018).
---
# References
- Jones,WalterW.,RichardD.Peacock,GlennP.Forney,PaulA.Reneke(2004)CFAST Consolidated Model of Fire Growth and Smoke Transport (Version 6) Technical Reference Guide, NIST Special Publication 1030. http://fire.nist.gov/bfrlpubs/.
- Mandel, Jan, Lynn S. Bennethum, Jonathan D. Beezley, Janice L. Coen, Craig C. Douglas, Minjeong Kim, and Anthony Vodacek. “A Wildland Fire Model with Data Assimilation.” Mathematics and Computers in Simulation 79, no. 3 (December 2008): 584–606. https://doi.org/10.1016/j.matcom.2008.03.015.
- Masarie, Alex Taylor. “DIFFERENTIAL EQUATION MODELS OF WILDFIRE SUPPRESSION ALLOCATION,” n.d., 133.
- Rehm, Ronald G. “The Effects of Winds from Burning Structures on Ground-Fire Propagation at the Wildland–Urban Interface.” Combustion Theory and Modelling 12, no. 3 (May 20, 2008): 477–96. https://doi.org/10.1080/13647830701843288.
- Rehm, Ronald G, and Glenn P Forney. “A Note on the Pressure Equations Used in Zone Fire Modeling.” Gaithersburg, MD: National Institute of Standards and Technology, 1992. https://doi.org/10.6028/NIST.IR.4906.
- Rehm, Ronald G, and Randall J McDermott. “Mathematical Modeling of Wildland-Urban Interface Fires.” 0 ed. Gaithersburg, MD: National Institute of Standards and Technology, 2011. https://doi.org/10.6028/NIST.IR.7803.
| github_jupyter |
# A Neural Network for Regression (Estimate blood pressure from PPG signal)
*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [HW page](http://kovan.ceng.metu.edu.tr/~sinan/DL/index.html) on the course website.*
Having gained some experience with neural networks, let us train a network that estimates the blood pressure from a PPG signal window.
All of your work for this exercise will be done in this notebook.
# A Photoplethysmograph (PPG) signal
A PPG (photoplethysmograph) signal is a signal obtained with a pulse oximeter, which illuminates the skin and measures changes in light absorption. A PPG signal carries rich information about the status of the cardiovascular health of a person, such as breadth rate, heart rate and blood pressure. An example is shown below, where you also see the blood pressure signal that we will estimate (the data also has the ECG signal, which you should ignore).
<img width="80%" src="PPG_ABG_ECG_example.png">
# Constructing the Dataset
In this task, you are expected to perform the full pipeline for creating a learning system from scratch. Here is how you should construct the dataset:
* Download the dataset from the following website, and only take "Part 1" from it (it is too big): https://archive.ics.uci.edu/ml/datasets/Cuff-Less+Blood+Pressure+Estimation
* Take a window of size $W$ from the PPG channel between time $t$ and $t+W$. Let us call this $\textbf{x}_t$.
* Take the corresponding window of size $W$ from the ABP (arterial blood pressure) channel between time $t$ and $t+W$. Find the maxima and minima of this signal within the window (you can use "findpeaks" from Matlab or "find_peaks_cwt" from scipy). Here is an example window from the ABP signal, and its peaks:
<img width="60%" src="ABP_peaks.png">
* Calculate the average of the maxima, call it $y^1_t$, and the average of the minima, call it $y^2_t$.
* Slide the window over the PPG signals and collect many $(\textbf{x}_t, <y^1_t, y^2_t>)$ instances. In other words, your network outputs two values.
* This will be your input-output for training the network.
```
import random
import numpy as np
from metu.data_utils import load_dataset
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Create a small net and some toy data to check your implementations.
# Note that we set the random seed for repeatable experiments.
from cs231n.classifiers.neural_net_for_regression import TwoLayerNet
input_size = 4
hidden_size = 10
num_classes = 3
num_inputs = 5
def init_toy_model():
np.random.seed(0)
return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
def init_toy_data():
np.random.seed(1)
X = 10 * np.random.randn(num_inputs, input_size)
y = np.array([[0, 1, 2], [1, 2, 3], [2, 3, 4], [2, 1, 4], [2, 1, 4]])
return X, y
net = init_toy_model()
X, y = init_toy_data()
```
# Forward pass: compute scores
Open the file `cs231n/classifiers/neural_net_for_regression.py` and look at the method `TwoLayerNet.loss`. This function is very similar to the loss functions you have written for the previous exercises: It takes the data and weights and computes the *regression* scores, the squared error loss, and the gradients on the parameters.
To be more specific, you will implement the following loss function:
$$\frac{1}{2}\sum_i\sum_{j} (o_{ij} - y_{ij})^2 + \frac{1}{2}\lambda\sum_j w_j^2,$$
where $i$ runs through the samples in the batch; $o_{ij}$ is the prediction of the network for the $i^{th}$ sample for output $j$, and $y_{ij}$ is the correct value; $\lambda$ is the weight of the regularization term.
The first layer uses ReLU as the activation function. The output layer does not use any activation functions.
Implement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.
```
scores = net.loss(X)
print ('Your scores:')
print (scores)
print('')
print ('correct scores:')
correct_scores = np.asarray([
[-0.81233741, -1.27654624, -0.70335995],
[-0.17129677, -1.18803311, -0.47310444],
[-0.51590475, -1.01354314, -0.8504215 ],
[-0.15419291, -0.48629638, -0.52901952],
[-0.00618733, -0.12435261, -0.15226949]])
print (correct_scores)
print('')
# The difference should be very small. We get < 1e-7
print ('Difference between your scores and correct scores:')
print (np.sum(np.abs(scores - correct_scores)))
```
# Forward pass: compute loss
In the same function, implement the second part that computes the data and regularizaion loss.
```
loss, _ = net.loss(X, y, reg=0.1)
correct_loss = 66.3406756909
print('loss:', loss)
# should be very small, we get < 1e-10
print ('Difference between your loss and correct loss:')
print (np.sum(np.abs(loss - correct_loss)))
```
# Backward pass
Implement the rest of the function. This will compute the gradient of the loss with respect to the variables `W1`, `b1`, `W2`, and `b2`. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:
```
from cs231n.gradient_check import eval_numerical_gradient
# Use numeric gradient checking to check your implementation of the backward pass.
# If your implementation is correct, the difference between the numeric and
# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.
loss, grads = net.loss(X, y, reg=0.1)
# these should all be less than 1e-8 or so
for param_name in grads:
f = lambda W: net.loss(X, y, reg=0.1)[0]
param_grad_num = eval_numerical_gradient(f, net.params[param_name])
print ('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])))
```
# Load the PPG dataset for training your regression network
```
# Load the PPG dataset
# If your memory turns out to be sufficient, try loading a subset
def get_data(datafile,
training_ratio=0.9,
test_ratio=0.06,
val_ratio=0.01,
window=input_size,
width_limit=50,
stride=750):
# Load the PPG training data stride
X, y = load_dataset(datafile, window=window, stride=stride, width_limit=width_limit)
# TODO: Split the data into training, validation and test sets
length=len(y)
num_training=int(length*training_ratio)
num_val = int(length*val_ratio)
num_test = min((length-num_training-num_val), int(length*test_ratio))
mask = range(num_training-1)
X_train = X[mask]
y_train = y[mask]
mask = range(num_training, num_training+num_test)
X_test = X[mask]
y_test = y[mask]
mask = range(num_training+num_test, num_training+num_test+num_val)
X_val = X[mask]
y_val = y[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
datafile = './metu/dataset/Part_1.mat' #TODO: PATH to your data file
input_size = 1000 # TODO: Size of the input of the network
X_train, y_train, X_val, y_val, X_test, y_test = get_data(datafile, window=input_size, width_limit=50, stride=750)
print ("Number of instances in the training set: ", len(X_train))
print ("Number of instances in the validation set: ", len(X_val))
print ("Number of instances in the testing set: ", len(X_test))
```
# Now train our network on the PPG dataset
```
# Now, let's train a neural network
input_size = input_size
hidden_size = 500 # TODO: Choose a suitable hidden layer size
num_classes = 2 # We have two outputs
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=50000, batch_size=64,
learning_rate=1e-5, learning_rate_decay=0.95,
reg=0.5, verbose=True)
# Predict on the validation set
#val_err = ... # TODO: Perform prediction on the validation set
val_err = np.sum(np.square(net.predict(X_val) - y_val), axis=1).mean()
print ('Validation error: ', val_err)
```
# Debug the training and improve learning
You should be able to get a validation error of 5.
So far so good. But, is it really good? Let us plot the validation and training errors to see how good the network did. Did it memorize or generalize? Discuss your observations and conclusions. If its performance is not looking good, propose and test measures. This is the part that will show me how well you have digested everything covered in the lectures.
```
# Plot the loss function and train / validation errors
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
train = plt.plot(stats['train_err_history'], label='train')
val = plt.plot(stats['val_err_history'], label='val')
plt.legend(loc='upper right', shadow=True)
plt.title('Classification error history')
plt.xlabel('Epoch')
plt.ylabel('Clasification error')
plt.show()
print(stats['train_err_history'])
iterations_per_epoch = int(max(X_train.shape[0] / 32, 1))
print(iterations_per_epoch, X_train.shape[0])
```
# Finetuning and Improving Your Network (Bonus)
There are many aspects and hyper-parameters you can play with. Do play with them and find the best setting here.
| github_jupyter |
## GPS Spoofing Detection
### 1. load data and preprocess
```
# Load Data
import utils
import os
import numpy as np
import config
A, B = utils.load_image_pairs(path=config.SWISS_1280x720)
assert A.shape[0]==B.shape[0]
n = A.shape[0]
print(A.shape, B.shape)
# Some configuration
#feature_map_file_name = './mid_product/features_suzhou_res34_eval.h5'#'features_suzhou_res50.h5'
feature_map_file_name = config.FULL_RESIZED_FEATURE
#dst_file_name = './mid_product/dst_suzhou_res34_eval.npy'# 'dst_suzhou_res50.npy'
# feature_shape = (512, 18, 26) # SWISS, resnet-18/34
feature_shape = (512, 17, 34) # SUZHOU, 1280x720, resnet-18/34
# feature_shape = (2048, 17, 34) # SUZHOU, resnet-50
# Preprocess data by doing transformation
import torch
#A = A.astype(np.float)/255.0
#B = B.astype(np.float)/255.0 #.transpose(0,3,1,2)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
x_a = torch.from_numpy((A-mean)/std).permute(0,3,1,2).float()
x_b = torch.from_numpy((B-mean)/std).permute(0,3,1,2).float()
print(x_a.size(), x_b.size())
```
### 2. get feature maps
```
import torchvision.models as models
from torch import nn
pretrained_model = models.resnet34(pretrained=True)
feature_extractor = nn.Sequential(*list(pretrained_model.children())[:-1])
feature_extractor.eval()
for param in feature_extractor.parameters():
param.requires_grad = False
# Generate feature map and save
import h5py
def h5_save(fname, f_a, f_b):
'''save f_a and f_b as fname'''
with h5py.File(fname,'w') as f:
f.create_dataset('f_a', data=f_a)
f.create_dataset('f_b', data=f_b)
def h5_read(fname):
'''read fname and return f_a and f_b'''
with h5py.File(fname,'r') as f:
return f['f_a'][:], f['f_b'][:]
if not os.path.exists(feature_map_file_name):
f_a = np.zeros((n,)+feature_shape)
f_b = np.zeros((n,)+feature_shape)
for i in range(n):
print( "Generating feature maps of %d th pair."%(i) )
a = feature_extractor(x_a[i:i+1,:,:,:])
b = feature_extractor(x_b[i:i+1,:,:,:])
f_a[i] = a.detach().numpy()
f_b[i] = b.detach().numpy()
h5_save(feature_map_file_name, f_a, f_b)
else:
print("Feature maps file already exists, we just read it.")
f_a, f_b = h5_read(feature_map_file_name)
# compute distance between unpaird and paired images
print("A domain feature maps size:", f_a.shape)
dst=np.zeros((n,n))
for shift in range(n):
for idx in range(n):
a = f_a[idx:idx+1]
b = f_b[(idx+shift)%n:(idx+shift)%n+1]
dst[idx,shift] = np.linalg.norm(a - b)
print('dst(idx,shift)(%d,%d)=%f' % (idx,shift,dst[idx,shift]))
np.save(dst_file_name, dst)
# visualize
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
fig_size = [10,10]
plt.rcParams["figure.figsize"] = fig_size
plt.axis('equal')
ax = sns.heatmap(dst,
xticklabels=2,
yticklabels=2)
ax.set_xlabel('Shift')
ax.set_ylabel('Image Index')
ax.set_title('Distance Matrix')
plt.show()
```
### 3. analyse the feature maps
```
print('Min, Max and Mean of Distances:')
print(np.min(dst), np.max(dst), np.average(dst))
lower, higher = np.min(dst), np.max(dst)
dst_t, dst_f = dst[:,0], dst[:,1:]
#print(dst_t.shape, dst_f.shape)
print('Min, Max and Mean of paired images:', np.min(dst_t), np.max(dst_t), np.mean(dst_t))
print('Min, Max and Mean of unpaired images:', np.min(dst_f), np.max(dst_f), np.mean(dst_f))
print('Sorted distance of paired images:')
print(np.sort(dst_t))
print('First n sorted distance of unpaired images:')
print(np.sort(dst_f.flatten())[:n])
def predict(dst, threshold):
return (dst <= threshold).astype(np.int)
def ground_truth(dst):
n = dst.shape[0]
gt = np.zeros((n,n)).astype(np.int)
gt[:,0] = 1
return gt
def confusion_matrix(pred, gt):
n = gt.shape[0]
TP = np.sum(gt[:,0] == pred[:,0])
FN = np.sum(gt[:,0] != pred[:,0])
TN = np.sum(gt[:,1:] == pred[:,1:])
FP = np.sum(gt[:,1:] != pred[:,1:])
TPR=TP/(TP+FN)
FPR=FP/(FP+TN)
ACC = (TP+TN)/(TP+TN+FP+FN)
precision = TP/(TP+FP)
recall = TP/(TP+FN)
F1 = 2*precision*recall/(precision+recall)
return TP,FP,TN,FN, TPR,FPR, ACC,precision,recall,F1
threshold = 465
pred = predict(dst, threshold)
gt = ground_truth(dst)
print("TP, FP, TN, FN, TPR, FPR, Accuracy, Precision, Recall, F1:")
print(confusion_matrix(pred, gt))
#%matplotlib inline
plt.axis('equal')
ax = sns.heatmap(pred,
xticklabels=2,
yticklabels=2)
ax.set_xlabel('Shift')
ax.set_ylabel('Image Index')
ax.set_title('Prediction Matrix')
plt.show()
# Draw ROC curve
np.seterr(divide='ignore',invalid='ignore')
ROC_x, ROC_y = [0], [0]
for threshold in range(int(lower),int(higher)+1):
pred = predict(dst, threshold)
gt = ground_truth(dst)
conf_mat = confusion_matrix(pred, gt)
x, y = conf_mat[5], conf_mat[4] # x: FPR, y: TPR
ROC_x.append(x)
ROC_y.append(y)
ROC_x.append(1)
ROC_y.append(1)
#%matplotlib inline
plt.plot(ROC_x, ROC_y)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.axis('equal')
plt.show()
```
| github_jupyter |
```
!pip install qucumber
import numpy as np
import torch
import matplotlib.pyplot as plt
from qucumber.nn_states import ComplexWaveFunction
from qucumber.callbacks import MetricEvaluator
import qucumber.utils.unitaries as unitaries
import qucumber.utils.cplx as cplx
import qucumber.utils.training_statistics as ts
import qucumber.utils.data as data
import qucumber
# set random seed on cpu but not gpu, since we won't use gpu for this tutorial
qucumber.set_random_seed(1234, cpu=True, gpu=False)
```
The main difference between the previous tasks and the additional data provided is that in the first cases we tried to reconstruct the energy of the system from which we measured, while in the latter the reconstruction is of the original state from which the measurements were made.
In the following code we made a 4 qubit LiH state reconstruction by another method using the qucumber library and following the example with 2 qubits at the following link:
https://github.com/PIQuIL/QuCumber/blob/master/examples/Tutorial2_TrainComplexWaveFunction/tutorial_qubits.ipynb
# Reconstruction of a LiH
## The wavefunction to be reconstructed
The four qubits wavefunction below (coefficients stored in `LiH - psi.txt`) will be reconstructed.
$$\vert\psi \rangle = \alpha1 \vert0000\rangle + \beta1 \vert 0001\rangle + \gamma1 \vert0010\rangle + \delta1 \vert0011\rangle $$
+
$$\alpha2 \vert0100\rangle + \beta2 \vert 0101\rangle + \gamma2 \vert0110\rangle + \delta2 \vert0111\rangle$$
+
$$\alpha3 \vert1000\rangle + \beta3 \vert 1001\rangle + \gamma3 \vert1010\rangle + \delta3 \vert1011\rangle$$
+
$$\alpha4 \vert1100\rangle + \beta4 \vert 1101\rangle + \gamma4 \vert1110\rangle + \delta4 \vert1111\rangle$$
where
$$
\alpha 1 = 4.9639 e-03
\beta 1 = -1.8227 e-16
\gamma 1 = 5.7627 e-02
\delta 1 = -1.1165 e-01
$$
;
$$
\alpha 2 = 3.2638 e-02
\beta 2 = 2.4447 e-16
\gamma 2 = -3.5453 e-02
\delta 2 = 5.7627 e-02
$$
;
$$
\alpha 3 = -8.1068 e-17
\beta 3 = 2.1391 e-16
\gamma 3 = -4.5975 e-16
\delta 3 = 7.6073 e-16
$$
;
$$
\alpha 4 = 9.8866 e-01
\beta 4 = 4.2995 e-16
\gamma 4 = 3.2638 e-02
\delta 4 = 4.9639 e-03
$$
The example dataset, `LiH - train_samples.txt`, comprises of 500 $\sigma$ measurements made in various bases (X, Y and Z). A corresponding file containing the bases for each data point in `LiH - train_samples.txt`, `LiH - train_bases.txt.txt`, is also required. As per convention, spins are represented in binary notation with zero and one denoting spin-down and spin-up, respectively.
## Using qucumber to reconstruct the wavefunction
The Python class `ComplexWaveFunction` contains generic properties of a RBM meant to reconstruct a complex wavefunction, the most notable one being the gradient function required for stochastic gradient descent.
To instantiate a `ComplexWaveFunction` object, one needs to specify the number of visible and hidden units in the RBM. The number of visible units, `num_visible`, is given by the size of the physical system, i.e. the number of spins or qubits (2 in this case), while the number of hidden units, `num_hidden`, can be varied to change the expressiveness of the neural network.
**Note:** The optimal `num_hidden` : `num_visible` ratio will depend on the system. For the two-qubit wavefunction described above, good results can be achieved when this ratio is 1.
On top of needing the number of visible and hidden units, a `ComplexWaveFunction` object requires the user to input a dictionary containing the unitary operators (2x2) that will be used to rotate the qubits in and out of the computational basis, Z, during the training process. The `unitaries` utility will take care of creating this dictionary.
The `MetricEvaluator` class and `training_statistics` utility are built-in amenities that will allow the user to evaluate the training in real time.
Lastly, the `cplx` utility allows QuCumber to be able to handle complex numbers as they are not currently supported by PyTorch.
### Training
To evaluate the training in real time, the fidelity between the true wavefunction of the system and the wavefunction that QuCumber reconstructs, $\vert\langle\psi\vert\psi_{RBM}\rangle\vert^2$, will be calculated along with the Kullback-Leibler (KL) divergence (the RBM's cost function). First, the training data and the true wavefunction of this system need to be loaded using the `data` utility.
```
train_path = "LiH - train_samples.txt"
train_bases_path = "LiH - train_bases.txt"
psi_path = "LiH - psi.txt"
bases_path = "LiH - qubit_bases.txt"
train_samples, true_psi, train_bases, bases = data.load_data(
train_path, psi_path, train_bases_path, bases_path
)
train_samples = train_samples[0:10000,:]
train_bases = train_bases[0:10000,:]
unitary_dict = unitaries.create_dict()
nv = train_samples.shape[-1]
nh = nv
nn_state = ComplexWaveFunction(
num_visible=nv, num_hidden=nh, unitary_dict=unitary_dict, gpu=True
)
epochs = 500
pbs = 100 # pos_batch_size
nbs = pbs # neg_batch_size
lr = 0.01
k = 10
def alpha1(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
alpha_ = cplx.norm(
torch.tensor([rbm_psi[0][0], rbm_psi[1][0]], device=nn_state.device)
/ normalization
)
return alpha_
def beta1(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
beta_ = cplx.norm(
torch.tensor([rbm_psi[0][1], rbm_psi[1][1]], device=nn_state.device)
/ normalization
)
return beta_
def gamma1(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
gamma_ = cplx.norm(
torch.tensor([rbm_psi[0][2], rbm_psi[1][2]], device=nn_state.device)
/ normalization
)
return gamma_
def delta1(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
delta_ = cplx.norm(
torch.tensor([rbm_psi[0][3], rbm_psi[1][3]], device=nn_state.device)
/ normalization
)
return delta_
def alpha2(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
alpha_ = cplx.norm(
torch.tensor([rbm_psi[0][4], rbm_psi[1][4]], device=nn_state.device)
/ normalization
)
return alpha_
def beta2(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
beta_ = cplx.norm(
torch.tensor([rbm_psi[0][5], rbm_psi[1][5]], device=nn_state.device)
/ normalization
)
return beta_
def gamma2(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
gamma_ = cplx.norm(
torch.tensor([rbm_psi[0][6], rbm_psi[1][6]], device=nn_state.device)
/ normalization
)
return gamma_
def delta2(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
delta_ = cplx.norm(
torch.tensor([rbm_psi[0][7], rbm_psi[1][7]], device=nn_state.device)
/ normalization
)
return delta_
def alpha3(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
alpha_ = cplx.norm(
torch.tensor([rbm_psi[0][8], rbm_psi[1][8]], device=nn_state.device)
/ normalization
)
return alpha_
def beta3(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
beta_ = cplx.norm(
torch.tensor([rbm_psi[0][9], rbm_psi[1][9]], device=nn_state.device)
/ normalization
)
return beta_
def gamma3(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
gamma_ = cplx.norm(
torch.tensor([rbm_psi[0][10], rbm_psi[1][10]], device=nn_state.device)
/ normalization
)
return gamma_
def delta3(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
delta_ = cplx.norm(
torch.tensor([rbm_psi[0][11], rbm_psi[1][11]], device=nn_state.device)
/ normalization
)
return delta_
def alpha4(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
alpha_ = cplx.norm(
torch.tensor([rbm_psi[0][12], rbm_psi[1][12]], device=nn_state.device)
/ normalization
)
return alpha_
def beta4(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
beta_ = cplx.norm(
torch.tensor([rbm_psi[0][13], rbm_psi[1][13]], device=nn_state.device)
/ normalization
)
return beta_
def gamma4(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
gamma_ = cplx.norm(
torch.tensor([rbm_psi[0][14], rbm_psi[1][14]], device=nn_state.device)
/ normalization
)
return gamma_
def delta4(nn_state, space, **kwargs):
rbm_psi = nn_state.psi(space)
normalization = nn_state.normalization(space).sqrt_()
delta_ = cplx.norm(
torch.tensor([rbm_psi[0][15], rbm_psi[1][15]], device=nn_state.device)
/ normalization
)
return delta_
period = 25
space = nn_state.generate_hilbert_space()
callbacks = [
MetricEvaluator(
period,
{
"Fidelity": ts.fidelity,
"KL": ts.KL,
#"normα1": alpha1,
# "normβ1": beta1,
# "normγ1": gamma1,
"normδ1": delta1,
#"normα2": alpha2,
# "normβ2": beta2,
# "normγ2": gamma2,
# "normδ2": delta2,
#"normα3": alpha3,
# "normβ3": beta3,
# "normγ3": gamma3,
# "normδ3": delta3,
#"normα4": alpha4,
# "normβ4": beta4,
# "normγ4": gamma4,
# "normδ4": delta4,
},
target=true_psi,
bases=bases,
verbose=True,
space=space,
)
]
nn_state.fit(
train_samples,
epochs=epochs,
pos_batch_size=pbs,
neg_batch_size=nbs,
lr=lr,
k=k,
input_bases=train_bases,
callbacks=callbacks,
time=True,
)
fidelities = callbacks[0].Fidelity
KLs = callbacks[0]["KL"]
coeffs = callbacks[0]["normδ1"]
epoch = np.arange(period, epochs + 1, period)
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(14, 3))
ax = axs[0]
ax.plot(epoch, fidelities, color="C0", markeredgecolor="black")
ax.set_ylabel(r"Fidelity")
ax.set_xlabel(r"Epoch")
ax.set_ylim([0.8,1])
ax = axs[1]
ax.plot(epoch, KLs, color="C1", markeredgecolor="black")
ax.set_ylabel(r"KL Divergence")
ax.set_xlabel(r"Epoch")
ax.set_ylim([0,0.2])
ax = axs[2]
ax.plot(epoch, coeffs, color="C2", markeredgecolor="black")
ax.set_ylabel(r"$\vert\delta 1\vert$")
ax.set_xlabel(r"Epoch")
#ax.set_ylim([0.01,0.03])
plt.tight_layout()
plt.show()
```
| github_jupyter |
# Solutions: Corollary 0.0.4 in $\mathbb R^2$
*These are **solutions** to the worksheet on corollary 0.0.4. Please **DO NOT LOOK AT IT** if you haven't given the worksheet a fair amount of thought.*
In this worksheet we will run through the proof of Corollary 0.0.4 from Vershynin. We will "pythonize" the proof step-by-step in the case of a polytope in $\mathbb R^2$ and visualize it. Please fill in the code wherever indicated. Here is the corollary (slightly generalized) for reference:
**Corollary 0.0.4 (Generalized)**: Let $P$ be a polytope in $\mathbb R^n$ with $N$ vertices. Then $P$ can be covered by at most $N^{\lceil (\text{diam}(T)/\epsilon)^2 \rceil}$ Euclidean balls of radii $\epsilon > 0$.
```
# Some useful imports:
import numpy as np
import matplotlib.pyplot as plt
import itertools as it
import math
```
## The Proof of Corollary 0.0.4 in $\mathbb R^2$
Fix $\epsilon > 0$ and a polytope $P$ in $\mathbb R^n$ (in our case $n = 2$). Denote by $T$ the set of vertices of $P$.
```
# Set epsilon to your favorite positive number
epsilon = 1.0
# Represent a polytope P in R2 by listing its points clockwise
# Represent the points as numpy arrays of length 2
P = [np.array([0,0]),
np.array([0,1]),
np.array([1,1]),
np.array([1.5,.5]),
np.array([1,0])]
# Set N to the number of vertices of P (don't hard-code it in if you want to be able to change P easily later)
N = len(P)
```
Let us define the centers of the balls as follows. \
Let $k := \lceil (\text{diam}(T)/\epsilon)^2 \rceil$. Recall that the $\text{diam}(T) = \sup_{x, y \in T} \lvert x - y \rvert$.
```
# Compute diam(T)
diamT = 0
for i in range(N):
for j in range(i+1, N):
d_xixj = np.sqrt(np.sum((P[i] - P[j])**2))
diamT = d_xixj if (d_xixj > diamT) else diamT
# Compute k
k = math.ceil((diamT/epsilon)**2)
```
Consider the set
\begin{equation}
\mathcal N := \left\{ \frac{1}{k} \sum_{j=1}^k x_j : x_j \text{ are vertices of } P \right\}
\end{equation}
```
# Construct \mathcal N
calN = []
# This gives an iterator over all combinations
# of k elements of P with replacement.
combinations = it.combinations_with_replacement(P, k)
# Compute the vector average and append it to calN
for comb in combinations:
vec_sum = np.array([0,0])
for vec in comb:
vec_sum = vec_sum + vec
calN.append(vec_sum / k)
```
We claim that the family of $\epsilon$-balls centered at $\mathcal N$ satisfy the conclusion of the corollary. To check this, note that the polytope $P$ is the convex hull of the set of its vertices. Thus we apply Theorem 0.0.2 to any point $x \in P = \text{conv}(T)$ and deduce that $x$ is within distance $\text{diam(T)} / \sqrt k \leq \epsilon$ from some point in $\mathcal N$. This shows that the $\epsilon$-balls centered at $\mathcal N$ indeed cover $P$.
```
# We visualize the covering here
# Feel free to play around with the visualization!
scale = 10
figure, axes = plt.subplots(figsize=(scale, scale))
axes.axis('equal')
axes.set_xlim([-1, 1.5])
axes.set_ylim([-1,1.5])
plt.fill([p[0] for p in P], [p[1] for p in P], 'y', fill = True)
plt.plot([p[0] for p in calN], [p[1] for p in calN], 'or')
for p in calN:
axes.add_artist(plt.Circle((p[0], p[1]), epsilon, fill = False))
```
We can bound the cardinality of $\mathcal N$ by noting that there are $N^k$ ways to choose $k$ out of $N$ vertices with repetition. Thus $|\mathcal N| \leq N^k = N^{\lceil (\text{diam}(T)/\epsilon)^2 \rceil}$. In fact we can be more clever by noticing that the order in which we choose the elements does not matter (this is addressed in exercise 0.0.6).
## Further Questions
At least in $\mathbb R^2$, Corollary 0.0.4 is rather wasteful. How can we come up with a more efficient covering of a polytope?
Is there a way to cleverly construct a subset of $\mathcal N$ that gets the job done?
Copyright (c) 2020 TRIPODS/GradStemForAll 2020 Team
| github_jupyter |
```
import math,random
m=int(input("请输入一个整数作为上界\n"))
k=int(input("请输入一个整数作为下界\n"))
n=int(input("请输入你要随机生成的整数的个数\n"))
def fun ():
i=0
total=0
while i<n:
num=random.randint(k,m)
print("第",i+1,"次随机生成的数为:",num)
total=total+num
i+=1
aver=total/n
root=math.sqrt(aver)
print(n,"个平均数的平方根为",root)
fun()
import math,random
n=int(input("请输入你要随机生成的整数的个数\n"))
max=int(input("请输入一个整数作为上界\n"))
min=int(input("请输入一个整数作为下界\n"))
i=0
sum1=0
sum2=0
while i<n:
num=random.randint(min,max)
print("第",i+1,"次随机生成的数为",num)
a=math.log10(num)
sum1=sum1+a
sum2=sum2+1/a
i+=1
print("∑log(random,2)=",sum1,"∑1/log(random,2)=",sum2)
import math,random
n=int(input("请输入你要相加的数字的个数\n"))
a=random.randint(1,9)
print("本次随机生成的数为:",a)
b=math.pow(10,0)*a
print(b)
i=0
sum=b
while i+1<n:
b=math.pow(10,i+1)*a+b
sum=sum+b
print(b)
i+=1
print(n,"个数相加的和为",sum)
import math,random
def welcome():
print('''
欢迎来到python游戏世界 :)\n''')
def menu():
print('''======☚游戏菜单☛======
1.游戏介绍
2.进入游戏
3.创作团队
4.退出游戏
【如果您为第一次进入游戏,建议您输入1来查看规则。】
''')
a=int(input("请根据提示输入相应的数字\n"))
if(a==1):
intro()
elif(a==2):
game()
elif(a==3):
team()
else:print("Goodbye.Thanks.")
def intro():
print("======☜游戏介绍☞======")
print('''*该游戏是由 用户 在 一任意区间内 猜想数字,由 电脑 进行随机猜测。
*当 电脑 猜出数字后, 用户 需要根据<提示>进行输入以确定电脑猜测是否正确。
*电脑猜测的次数根据用户输入的 区间大小 决定。
【区间上下界由用户随意决定】
~~~~~~~~~~~~~~~~~~~~~~~~~~''')
menu()
def game ():
max=int(input("请输入您猜想的数所在区间的上界\n"))
min=int(input("请输入您猜想的数所在区间的下界\n"))
n=int(input("请输入您猜想的数字\n"))
time=math.log2(max-min+1)
print("电脑能猜的次数为",int(time))
i=1
while i<=time:
m=random.randint(min,max)
print("第",i,"次电脑猜的数字为",m)
print('''====================
&若电脑猜对了,请输入1
&若电脑猜大了,请输入2
&若电脑猜小了,请输入3
====================''')
b=int(input("请根据提示输入相应的数字\n"))
if(b==1):
print('''人工智能成功猜到了你的想法!可怕!(゚Д゚≡゚Д゚)
.-" "-.
/ \
| |
|, .-. .-. ,|
| )(__/ \__)( |
|/ /\ \|
(@_ (_ ^^ _)
_ ) \_______\__|IIIIII|__/__________________________
(_)@8@8{}<________|-\IIIIII/-|___________________________>
)_/ \ /
(@ `--------`
~`~`~`~`~`~`~`~`~`~`游戏结束~`~`~`~`~`~`~`~`~`''')
break
elif(b==2):
print("!!!!看来人工智能还是猜不出人类的想法的!☺")
max=m
elif(b==3):
print("!!!!看来人工智能还是猜不出人类的想法的!☺")
min=m
i+=1
else:
print('''♚♚人工智能没有猜出你的想法!人类获胜!♚♚
."". ."",
| | / /
| | / /
| | / /
| |/ ;-._
} ` _/ / ;
| /` ) / /
| / /_/\_/\
|/ / |
( ' \ '- |
\ `. /
| |
| |
~`~`~`~`~`~`~`~`~`~`游戏结束~`~`~`~`~`~`~`~`~`''')
menu()
def team():
print('''@@该游戏由201611580487号选手亲自开发创作。若有雷同,处属巧合。如有合作项目请联系邮箱915874439@qq.com。谢谢!@@
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~''')
menu()
def main():
welcome()
menu()
if __name__ == '__main__':
main()
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# $\texttt{GiRaFFE}$: Solving GRFFE equations at a higher Finite Differencing order
## Authors: Zach Etienne & Patrick Nelson
### Formatting improvements courtesy Brandon Clark
[comment]: <> (Abstract: TODO)
[comment]: <> (Notebook Status and Validation Notes: TODO)
### NRPy+ Source Code for this module: [GiRaFFE_HO/GiRaFFE_Higher_Order.py](../edit/GiRaFFE_HO/GiRaFFE_Higher_Order.py) [\[tutorial\]](Tutorial-GiRaFFE_Higher_Order.ipynb) Constructs GRFFE evolution equations and initial data as SymPy expressions.
## Introduction:
This module focuses on using the equations developed in the [Tutorial-GiRaFFE_Higher_Order](Tutorial-GiRaFFE_Higher_Order.ipynb) module to build an Einstein Toolkit (ETK) thorn to solve the GRFFE equations in Cartesian coordinates. This tutorial will focus on implementing the time evolution aspects; others can be contructed to set up specific initial data.
When interfaced properly with the ETK, this module will propagate the initial data for $\tilde{S}_i$, $A_i$, and $\sqrt{\gamma} \Phi$, defined in the last tutorial, forward in time by integrating the equations for $\partial_t \tilde{S}_i$, $\partial_t A_i$ and $\partial_t [\sqrt{\gamma} \Phi]$ subject to spatial boundary conditions. The time evolution itself is handled by the $\text{MoL}$ (Method of Lines) thorn in the $\text{CactusNumerical}$ arrangement, and the boundary conditions by the $\text{Boundary}$ thorn in the $\text{CactusBase}$ arrangement.
Similar to the other ETK modules we have built, we will construct the WaveToyNRPy module in two steps.
1. Call on NRPy+ to convert the SymPy expressions for the evolution equations into one C-code kernel.
1. Write the C code and linkages to the Einstein Toolkit infrastructure (i.e., the .ccl files) to complete this Einstein Toolkit module.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Call on NRPy+ to convert the SymPy expression for the evolution equations and A-to-B into one C-code kernel for each
1. [Step 1.a](#import): Import NRPy+ core modules and the `GiRaFFE_Higher_Order` NRPy+ module
1. [Step 1.b](#ccode_output): Create the C code output kernel
1. [Step 1.c](#ccode_write): Write C code to files
1. [Step 1.d](#a2bdriver): The A-to-B driver
1. [Step 2](#etk): Interfacing with the Einstein Toolkit
1. [Step 2.a](#einstein_c): Constructing the Einstein Toolkit C-code calling functions that include the C code kernels
1. [Step 2.b](#conservative2primitive): The Conservative to Primitive Solver
1. [Step 2.b.i](#old_giraffe): Including `GiRaFFE_headers.h` from old version of GiRaFFE
1. [Step 2.b.ii](#compute_conservatives): Writing `compute_conservatives_FFE.C`
1. [Step 2.c](#grmhd): GRMHD variables provided by HydroBase
1. [Step 2.d](#timelevel): Copying initial data to additional timelevels
1. [Step 2.e](#cclfiles): CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure
1. [Step2.e.i](#interface): `interface.ccl`
1. [Step2.e.ii](#param): `param.ccl`
1. [Step2.e.iiii](#schedule): `schedule.ccl`
1. [Step 2.f](#einstein_list): Add the C code to the Einstein Toolkit compilation list
1. [Step 3](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Call on NRPy+ to convert the SymPy expressions for the evolution equations and A-to-B into one C-code kernel for each \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
<a id='import'></a>
## Step 1.a: Import NRPy+ core modules and the `GiRaFFE_Higher_Order` NRPy+ module \[Back to [top](#toc)\]
$$\label{import}$$
We start by importing the core NRPy+ modules we need and setting commonly used parameters. Since we are writing an ETK thorn, we'll need to set `grid::GridFuncMemAccess` to `"ETK"`. We will then import the [GiRaFFE_Higher_Order.py](../edit/GiRaFFE_HO/GiRaFFE_Higher_Order.py) module we previously created and run its main function to generate the SymPy for the expressions we want.
```
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
import loop
#Step 0: Set the spatial dimension parameter to 3.
par.set_parval_from_str("grid::DIM", 3)
DIM = par.parval_from_str("grid::DIM")
par.set_parval_from_str("grid::GridFuncMemAccess","ETK")
# Step 1c: Call the GiRaFFE_Higher_Order() function from within the
# GiRaFFE_HO/GiRaFFE_Higher_Order.py module.
import GiRaFFE_HO.GiRaFFE_Higher_Order as gho
gho.GiRaFFE_Higher_Order()
# Step 1: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 2)
```
<a id='ccode_output'></a>
## Step 1.b: Create the C code output kernel \[Back to [top](#toc)\]
$$\label{ccode_output}$$
To do this, for each header file we need, we will set up an array of lhrh objects representing the gridfunctions to print. We will then pass that array to `FD_outputC()` to get a string of optimized C code. Next, we use the loop function to add code to the kernel so that is applied to the whole ETK grid.
```
# Step 2: Create the C code output kernel.
Prereqs_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","uU0"),rhs=gho.uU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","uU1"),rhs=gho.uU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","uU2"),rhs=gho.uU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","uD0"),rhs=gho.uD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","uD1"),rhs=gho.uD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","uD2"),rhs=gho.uD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","u0alpha"),rhs=gho.u0alpha),\
lhrh(lhs=gri.gfaccess("out_gfs","alpsqrtgam"),rhs=gho.alpsqrtgam),\
lhrh(lhs=gri.gfaccess("out_gfs","AevolParen"),rhs=gho.AevolParen),\
lhrh(lhs=gri.gfaccess("out_gfs","PevolParenU0"),rhs=gho.PevolParenU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","PevolParenU1"),rhs=gho.PevolParenU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","PevolParenU2"),rhs=gho.PevolParenU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU00"),rhs=gho.gammaUU[0][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU01"),rhs=gho.gammaUU[0][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU02"),rhs=gho.gammaUU[0][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU11"),rhs=gho.gammaUU[1][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU12"),rhs=gho.gammaUU[1][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU22"),rhs=gho.gammaUU[2][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammadet"),rhs=gho.gammadet),\
]
metric_quantities_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU00"),rhs=gho.gammaUU[0][0]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU01"),rhs=gho.gammaUU[0][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU02"),rhs=gho.gammaUU[0][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU11"),rhs=gho.gammaUU[1][1]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU12"),rhs=gho.gammaUU[1][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammaUU22"),rhs=gho.gammaUU[2][2]),\
lhrh(lhs=gri.gfaccess("out_gfs","gammadet"),rhs=gho.gammadet),\
]
# To best format this for the ETK, we'll need to register these gridfunctions.
Stilde_rhsD = ixp.register_gridfunctions_for_single_rank1("AUX","Stilde_rhsD")
A_rhsD = ixp.register_gridfunctions_for_single_rank1("AUX","A_rhsD")
psi6Phi_rhs = gri.register_gridfunctions("AUX","psi6Phi_rhs")
Conservs_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","Stilde_rhsD0"),rhs=gho.Stilde_rhsD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","Stilde_rhsD1"),rhs=gho.Stilde_rhsD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","Stilde_rhsD2"),rhs=gho.Stilde_rhsD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","A_rhsD0"),rhs=gho.A_rhsD[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","A_rhsD1"),rhs=gho.A_rhsD[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","A_rhsD2"),rhs=gho.A_rhsD[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi_rhs"),rhs=gho.psi6Phi_rhs),\
]
Prereqs_CKernel = fin.FD_outputC("returnstring",Prereqs_to_print,params="outCverbose=False")
#Prereqs_CKernel = "const double u0 = u0GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];\n" + Prereqs_CKernel
metric_quantities_CKernel = fin.FD_outputC("returnstring",metric_quantities_to_print,params="outCverbose=False")
Conservs_CKernel = fin.FD_outputC("returnstring",Conservs_to_print,params="outCverbose=False")
#Conservs_CKernel = "const double u0 = u0GF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];\n" + Conservs_CKernel
Prereqs_looped = loop.loop(["i2","i1","i0"],["0","0","0"],\
["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],\
["1","1","1"],["#pragma omp parallel for","",""],"",\
Prereqs_CKernel.replace("time","cctk_time"))
metric_quantities_looped = loop.loop(["i2","i1","i0"],["0","0","0"],\
["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],\
["1","1","1"],["#pragma omp parallel for","",""],"",\
metric_quantities_CKernel.replace("time","cctk_time"))
Conservs_looped = loop.loop(["i2","i1","i0"],["cctk_nghostzones[2]","cctk_nghostzones[1]","cctk_nghostzones[0]"],\
["cctk_lsh[2]-cctk_nghostzones[2]","cctk_lsh[1]-cctk_nghostzones[1]",\
"cctk_lsh[0]-cctk_nghostzones[0]"],\
["1","1","1"],["#pragma omp parallel for","",""],"",\
Conservs_CKernel.replace("time","cctk_time"))
```
<a id='ccode_write'></a>
## Step 1.c: Write C code to files \[Back to [top](#toc)\]
$$\label{ccode_write}$$
Here, we write our C code to files after first creating appropriate directories. Note that we also import [u0_smallb_Poynting__Cartesian.py](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py) for its `computeu0_Cfunction`.
```
# Step 3: Create directories for the thorn if they don't exist.
!mkdir GiRaFFE_HO 2>/dev/null # 2>/dev/null: Don't throw an error if the directory already exists.
!mkdir GiRaFFE_HO/src 2>/dev/null # 2>/dev/null: Don't throw an error if the directory already exists.
# Step 4: Write the C code kernel to file.
with open("GiRaFFE_HO/src/Prereqs.h", "w") as file:
file.write(str(Prereqs_looped))
with open("GiRaFFE_HO/src/metric_quantities.h", "w") as file:
file.write(str(metric_quantities_looped))
with open("GiRaFFE_HO/src/Conservs.h", "w") as file:
file.write(str(Conservs_looped))
# Step 5: Import the function to calculate u0 and write it to a file.
import u0_smallb_Poynting__Cartesian.u0_smallb_Poynting__Cartesian as u0etc
#u0etc.compute_u0_smallb_Poynting__Cartesian(gammaDD,betaU,alpha,ValenciavU,BU)
with open("GiRaFFE_HO/src/computeu0_Cfunction.h", "w") as file:
file.write(u0etc.computeu0_Cfunction)
```
<a id='a2bdriver'></a>
## Steps 1.d: The A-to-B driver \[Back to [top](#toc)\]
$$\label{a2bdriver}$$
We will also need a routine to compute new Valencia 3-velocities at each timestep using a conservative-to-primitive solver. Since we need $v^i_{(n)}$ everywhere, this will require us to compute $B^i$ everywhere. However, $B^i = \epsilon^{ijk} \partial_j A_k$ requires derivatives of $A_i$, so getting $B^i$ will require some finesse. A chief aspect of this will require using lower-order finite differencing in the ghost zones. To that end, we will create header files for each finite differencing order $\leq 10$, as well as upwinded- and downwinded-derivatives at 2nd order. These will let us compute the derivative at the outermost gridpoints.
```
# Step 6: The A-to-B driver
# Step 6a: Import the Levi-Civita symbol and build the corresponding tensor.
# We already have a handy function to define the Levi-Civita symbol in WeylScalars
import WeylScal4NRPy.WeylScalars_Cartesian as weyl
LeviCivitaDDD = weyl.define_LeviCivitaSymbol_rank3()
LeviCivitaUUU = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
LCijk = LeviCivitaDDD[i][j][k]
#LeviCivitaDDD[i][j][k] = LCijk * sp.sqrt(gho.gammadet)
LeviCivitaUUU[i][j][k] = LCijk / sp.sqrt(gho.gammadet)
AD_dD = ixp.declarerank2("AD_dD","nosym")
BU = ixp.zerorank1() # BU is already registered as a gridfunction, but we need to zero its values and declare it in this scope.
# Step 6b: We can use this function to compactly reset to expressions to print at each FD order.
def set_BU_to_print():
return [lhrh(lhs=gri.gfaccess("out_gfs","BU0"),rhs=BU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","BU1"),rhs=BU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","BU2"),rhs=BU[2])]
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]
# Step 6c: We'll lower the FD order at each stage and write to a new file.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 10)
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_10.h",set_BU_to_print(),params="outCverbose=False")
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 8)
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_8.h",set_BU_to_print(),params="outCverbose=False")
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 6)
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_6.h",set_BU_to_print(),params="outCverbose=False")
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 4)
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_4.h",set_BU_to_print(),params="outCverbose=False")
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 2)
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_2.h",set_BU_to_print(),params="outCverbose=False")
# Step 6c: For the outermost points, we'll need a separate file for each face.
# These will correspond to an upwinded and a downwinded file for each direction.
AD_ddnD = ixp.declarerank2("AD_ddnD","nosym")
for i in range(DIM):
BU[i] = 0
for j in range(DIM):
for k in range(DIM):
if j == 0:
BU[i] += LeviCivitaUUU[i][j][k] * AD_ddnD[k][j]
else:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_2x0D.h",set_BU_to_print(),params="outCverbose=False")
AD_dupD = ixp.declarerank2("AD_dupD","nosym")
for i in range(DIM):
BU[i] = 0
for j in range(DIM):
for k in range(DIM):
if j == 0:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dupD[k][j]
else:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_2x0U.h",set_BU_to_print(),params="outCverbose=False")
for i in range(DIM):
BU[i] = 0
for j in range(DIM):
for k in range(DIM):
if j == 1:
BU[i] += LeviCivitaUUU[i][j][k] * AD_ddnD[k][j]
else:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_2x1D.h",set_BU_to_print(),params="outCverbose=False")
for i in range(DIM):
BU[i] = 0
for j in range(DIM):
for k in range(DIM):
if j == 1:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dupD[k][j]
else:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_2x1U.h",set_BU_to_print(),params="outCverbose=False")
for i in range(DIM):
BU[i] = 0
for j in range(DIM):
for k in range(DIM):
if j == 2:
BU[i] += LeviCivitaUUU[i][j][k] * AD_ddnD[k][j]
else:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_2x2D.h",set_BU_to_print(),params="outCverbose=False")
for i in range(DIM):
BU[i] = 0
for j in range(DIM):
for k in range(DIM):
if j == 2:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dupD[k][j]
else:
BU[i] += LeviCivitaUUU[i][j][k] * AD_dD[k][j]
fin.FD_outputC("GiRaFFE_HO/src/B_from_A_2x2U.h",set_BU_to_print(),params="outCverbose=False")
```
<a id='etk'></a>
# Step 2: Interfacing with the Einstein Toolkit \[Back to [top](#toc)\]
$$\label{etk}$$
<a id='einstein_c'></a>
## Step 2.a: Constructing the Einstein Toolkit C-code calling functions that include the C code kernels. \[Back to [top](#toc)\]
$$\label{einstein_c}$$
Now that we have generated the C code kernel `GiRaFFE_RHSs.h` and the parameters file `NRPy_params.h`, we will need to write C code to make use of these files. To do this, we can simply follow the example within the [IDScalarWaveNRPy tutorial notebook](Tutorial-ETK_thorn-IDScalarWaveNRPy.ipynb). Functions defined by these files will be called by the Einstein Toolkit scheduler (specified in `schedule.ccl` below).
Also, we will write the logic that determines which files are called where in order to calculate $B^i$ here.
1. Take the primary finite differencing order $N$ from the `param.ccl` file. Fill in the interior points with the corresponding FD order.
1. Then, for each face, at $0+{\rm cctk\_nghostzones[face]}-1$ and ${\rm cctk\_lsh[face]}-{\rm cctk\_nghostzones[face]}+1$, calculate $B^i$ at order $N-2$
1. Continue moving outwards: at the points $0+{\rm cctk\_nghostzones[face]}-p$ and ${\rm cctk\_lsh[face]}-{\rm cctk\_nghostzones[face]}+p$, calculate B at order $N-2p$.
1. When ${\rm cctk\_nghostzones[face]}-p = 0$, use the upwinding and downwinding derivatives for the appropriate face.
```
%%writefile GiRaFFE_HO/src/GiRaFFE.c
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "Symmetry.h"
const int MAXFACE = -1;
const int NUL = +0;
const int MINFACE = +1;
/* TODO: Start using this to directly interface with HydroBase!
*CCTK_REAL *Bvec0,*Bvec1,*Bvec2;
*Bvec0 = &Bvec[0*cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]];
*Bvec1 = &Bvec[1*cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]];
*Bvec2 = &Bvec[2*cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]];
*/
// Declare boundary condition FACE_UPDATE function,
// which fills in the ghost zones with successively
// lower order finite differencing
void AtoB(const cGH* restrict const cctkGH,const int ORDER,
const CCTK_REAL *AD0GF,const CCTK_REAL *AD1GF,const CCTK_REAL *AD2GF,
CCTK_REAL *BU0GF,CCTK_REAL *BU1GF,CCTK_REAL *BU2GF,
const int i0min, const int i0max,
const int i1min, const int i1max,
const int i2min, const int i2max,
const int FACEX0, const int FACEX1, const int FACEX2) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));
const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));
const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));
const CCTK_REAL *gammaDD00GF = gxx;
const CCTK_REAL *gammaDD01GF = gxy;
const CCTK_REAL *gammaDD02GF = gxz;
const CCTK_REAL *gammaDD11GF = gyy;
const CCTK_REAL *gammaDD12GF = gyz;
const CCTK_REAL *gammaDD22GF = gzz;
if(ORDER==8) {
printf("Computing A to B with Order = 8...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_8.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(ORDER==6) {
printf("Computing A to B with Order = 6...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_6.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(ORDER==4) {
printf("Computing A to B with Order = 4...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_4.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(ORDER==2) {
printf("Computing A to B with Order = 2...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_2.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(ORDER==0) {
if(FACEX0==MAXFACE) {
printf("Computing A to B at x = max...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_2x0D.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(FACEX0==MINFACE) {
printf("Computing A to B at x = min...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_2x0U.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(FACEX1==MAXFACE) {
printf("Computing A to B at y = max...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_2x1D.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(FACEX1==MINFACE) {
printf("Computing A to B at y = min...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_2x1U.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(FACEX2==MAXFACE) {
printf("Computing A to B at z = max...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_2x2D.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else if(FACEX2==MINFACE) {
printf("Computing A to B at z = min...\n");
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "B_from_A_2x2U.h"
if(isnan(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)]) || isinf(BU2GF[CCTK_GFINDEX3D(cctkGH, i0, i1, i2)])) {
printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
}
}
} else {
printf("ERROR. FACEX parameters not set properly.\n");
exit(1);
}
} else {
printf("ERROR. ORDER = %d not supported!\n",ORDER);
exit(1);
}
}
void driver_A_to_B(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const int *NG = cctk_nghostzones;
const int *Nx = cctk_lsh;
CCTK_INT ORDER = NG[0]*2;
for(int ii=0;ii<cctk_lsh[2]*cctk_lsh[1]*cctk_lsh[0];ii++) {
BU0[ii] = 1.0 / 0.0;
BU1[ii] = 1.0 / 0.0;
BU2[ii] = 1.0 / 0.0;
}
printf("Starting A to B driver with Order = %d...\n",ORDER);
AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, NG[0],Nx[0]-NG[0],NG[1],Nx[1]-NG[1],NG[2],Nx[2]-NG[2], NUL,NUL,NUL);
int imin[3] = { NG[0], NG[1], NG[2] };
int imax[3] = { Nx[0]-NG[0], Nx[1]-NG[1], Nx[2]-NG[2] };
while(ORDER>0) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
ORDER -= 2;
AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL);
if(ORDER!=0) imin[0]--;
AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL);
if(ORDER!=0) imax[0]++;
AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL);
if(ORDER!=0) imin[1]--;
AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL);
if(ORDER!=0) imax[1]++;
AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
if(ORDER!=0) imin[2]--;
AtoB(cctkGH,ORDER, AD0,AD1,AD2,BU0,BU1,BU2, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
if(ORDER!=0) imax[2]++;
}
}
void GiRaFFE_HO_calc_prereqs(const cGH* restrict const cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,
const CCTK_REAL invdx0,const CCTK_REAL invdx1,const CCTK_REAL invdx2,
const CCTK_REAL *alphaGF, const CCTK_REAL *betaU0GF, const CCTK_REAL *betaU1GF, const CCTK_REAL *betaU2GF,
const CCTK_REAL *gammaDD00GF,const CCTK_REAL *gammaDD01GF,const CCTK_REAL *gammaDD02GF,const CCTK_REAL *gammaDD11GF,const CCTK_REAL *gammaDD12GF,const CCTK_REAL *gammaDD22GF,
const CCTK_REAL *ValenciavU0GF, const CCTK_REAL *ValenciavU1GF, const CCTK_REAL *ValenciavU2GF, const CCTK_REAL *u4upperZeroGF,
const CCTK_REAL *AD0GF,const CCTK_REAL *AD1GF,const CCTK_REAL *AD2GF,const CCTK_REAL *psi6PhiGF,
CCTK_REAL *gammaUU00GF,CCTK_REAL *gammaUU01GF,CCTK_REAL *gammaUU02GF,CCTK_REAL *gammaUU11GF,CCTK_REAL *gammaUU12GF,CCTK_REAL *gammaUU22GF,CCTK_REAL *gammadetGF,
CCTK_REAL *uU0GF, CCTK_REAL *uU1GF, CCTK_REAL *uU2GF,CCTK_REAL *uD0GF, CCTK_REAL *uD1GF, CCTK_REAL *uD2GF,
CCTK_REAL *u0alphaGF, CCTK_REAL *alpsqrtgamGF, CCTK_REAL *AevolParenGF,
CCTK_REAL *PevolParenU0GF,CCTK_REAL *PevolParenU1GF,CCTK_REAL *PevolParenU2GF) {
DECLARE_CCTK_PARAMETERS;
#include "Prereqs.h"
}
void GiRaFFE_HO_calc_rhs(const cGH* restrict const cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,
const CCTK_REAL invdx0,const CCTK_REAL invdx1,const CCTK_REAL invdx2,
const CCTK_REAL *alphaGF, const CCTK_REAL *betaU0GF, const CCTK_REAL *betaU1GF, const CCTK_REAL *betaU2GF,
const CCTK_REAL *gammaDD00GF,const CCTK_REAL *gammaDD01GF,const CCTK_REAL *gammaDD02GF,const CCTK_REAL *gammaDD11GF,const CCTK_REAL *gammaDD12GF,const CCTK_REAL *gammaDD22GF,
const CCTK_REAL *gammaUU00GF,const CCTK_REAL *gammaUU01GF,const CCTK_REAL *gammaUU02GF,const CCTK_REAL *gammaUU11GF,const CCTK_REAL *gammaUU12GF,const CCTK_REAL *gammaUU22GF,const CCTK_REAL *gammadetGF,
const CCTK_REAL *ValenciavU0GF, const CCTK_REAL *ValenciavU1GF, const CCTK_REAL *ValenciavU2GF, const CCTK_REAL *u4upperZeroGF,
const CCTK_REAL *uU0GF, const CCTK_REAL *uU1GF, const CCTK_REAL *uU2GF, const CCTK_REAL *uD0GF, const CCTK_REAL *uD1GF, const CCTK_REAL *uD2GF,
const CCTK_REAL *AD0GF,const CCTK_REAL *AD1GF,const CCTK_REAL *AD2GF,const CCTK_REAL *psi6PhiGF,
const CCTK_REAL *BU0GF,const CCTK_REAL *BU1GF,const CCTK_REAL *BU2GF,
const CCTK_REAL *u0alphaGF, const CCTK_REAL *alpsqrtgamGF, const CCTK_REAL *AevolParenGF,
const CCTK_REAL *PevolParenU0GF,const CCTK_REAL *PevolParenU1GF,const CCTK_REAL *PevolParenU2GF,
CCTK_REAL *Stilde_rhsD0GF, CCTK_REAL *Stilde_rhsD1GF, CCTK_REAL *Stilde_rhsD2GF,
CCTK_REAL *A_rhsD0GF, CCTK_REAL *A_rhsD1GF, CCTK_REAL *A_rhsD2GF, CCTK_REAL *psi6Phi_rhsGF) {
DECLARE_CCTK_PARAMETERS;
#include "Conservs.h"
}
void calc_u0(const CCTK_REAL alpha,const CCTK_INT idx,
const CCTK_REAL gammaDD00,const CCTK_REAL gammaDD01,const CCTK_REAL gammaDD02,const CCTK_REAL gammaDD11,const CCTK_REAL gammaDD12,const CCTK_REAL gammaDD22,
CCTK_REAL *ValenciavU0GF,CCTK_REAL *ValenciavU1GF,CCTK_REAL *ValenciavU2GF,CCTK_REAL *u0GF)
{
DECLARE_CCTK_PARAMETERS;
CCTK_REAL u0;
CCTK_REAL ValenciavU0 = ValenciavU0GF[idx];
CCTK_REAL ValenciavU1 = ValenciavU1GF[idx];
CCTK_REAL ValenciavU2 = ValenciavU2GF[idx];
#include "computeu0_Cfunction.h"
u0GF[idx] = u0;
ValenciavU0GF[idx] = ValenciavU0;
ValenciavU1GF[idx] = ValenciavU1;
ValenciavU2GF[idx] = ValenciavU2;
}
void GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));
const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));
const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));
#pragma omp parallel for
for(int i2=0; i2<cctk_lsh[2]; i2++) {
for(int i1=0; i1<cctk_lsh[1]; i1++) {
for(int i0=0; i0<cctk_lsh[0]; i0++) {
const CCTK_INT idx = CCTK_GFINDEX3D(cctkGH, i0,i1,i2);
calc_u0(alp[idx],idx,
gxx[idx],gxy[idx],gxz[idx],gyy[idx],gyz[idx],gzz[idx],
ValenciavU0,ValenciavU1,ValenciavU2,u4upperZero);
}
}
}
GiRaFFE_HO_calc_prereqs(cctkGH,cctk_lsh,cctk_nghostzones,
invdx0, invdx1, invdx2,
alp, betax, betay, betaz,
gxx, gxy, gxz, gyy, gyz, gzz,
ValenciavU0, ValenciavU1, ValenciavU2, u4upperZero,
AD0, AD1, AD2, psi6Phi,
gammaUU00,gammaUU01,gammaUU02,gammaUU11,gammaUU12,gammaUU22,gammadet,
uU0, uU1, uU2, uD0, uD1, uD2,
u0alpha, alpsqrtgam, AevolParen,
PevolParenU0, PevolParenU1, PevolParenU2);
}
void GiRaFFE_HO_set_rhs(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));
const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));
const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));
GiRaFFE_HO_calc_rhs(cctkGH,cctk_lsh,cctk_nghostzones,
invdx0, invdx1, invdx2,
alp, betax, betay, betaz,
gxx, gxy, gxz, gyy, gyz, gzz,
gammaUU00,gammaUU01,gammaUU02,gammaUU11,gammaUU12,gammaUU22,gammadet,
ValenciavU0, ValenciavU1, ValenciavU2, u4upperZero,
uU0, uU1, uU2, uD0, uD1, uD2,
AD0, AD1, AD2, psi6Phi,
BU0, BU1, BU2,
u0alpha, alpsqrtgam, AevolParen,
PevolParenU0, PevolParenU1, PevolParenU2,
Stilde_rhsD0, Stilde_rhsD1, Stilde_rhsD2,
A_rhsD0, A_rhsD1, A_rhsD2, psi6Phi_rhs);
}
/* Boundary Condition code adapted from WaveToyC thorn in ETK, implementing built-in
* ETK BC functionality
*/
void GiRaFFE_HO_SelectBCs(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const char *bctype;
bctype = NULL;
if (CCTK_EQUALS(bound,"flat") || CCTK_EQUALS(bound,"static") ||
CCTK_EQUALS(bound,"radiation") || CCTK_EQUALS(bound,"robin") ||
CCTK_EQUALS(bound,"none"))
{
bctype = bound;
}
else if (CCTK_EQUALS(bound,"zero"))
{
bctype = "scalar";
}
/* Uses all default arguments, so invalid table handle -1 can be passed */
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"GiRaFFE_HO::StildeD0", bctype) < 0)
{
CCTK_WARN (0, "GiRaFFE_HO_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"GiRaFFE_HO::StildeD1", bctype) < 0)
{
CCTK_WARN (0, "GiRaFFE_HO_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"GiRaFFE_HO::StildeD2", bctype) < 0)
{
CCTK_WARN (0, "GiRaFFE_HO_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"GiRaFFE_HO::AD0", bctype) < 0)
{
CCTK_WARN (0, "GiRaFFE_HO_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"GiRaFFE_HO::AD1", bctype) < 0)
{
CCTK_WARN (0, "GiRaFFE_HO_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"GiRaFFE_HO::AD2", bctype) < 0)
{
CCTK_WARN (0, "GiRaFFE_HO_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"GiRaFFE_HO::psi6Phi", bctype) < 0)
{
CCTK_WARN (0, "GiRaFFE_HO_Boundaries: Error selecting boundary condition");
}
}
void GiRaFFE_HO_InitSymBound(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
int sym[3];
sym[0] = 1;
sym[1] = 1;
sym[2] = 1;
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::uU0");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::uU1");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::uU2");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::u0alpha");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::alpsqrtgam");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::AevolParen");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::PevolParenU0");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::PevolParenU1");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::PevolParenU2");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::StildeD0");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::StildeD1");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::StildeD2");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::AD0");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::AD1");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::AD2");
SetCartSymVN(cctkGH, sym,"GiRaFFE_HO::psi6Phi");
return;
}
void GiRaFFE_HO_RegisterVars(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
CCTK_INT ierr CCTK_ATTRIBUTE_UNUSED = 0;
/* Register all the evolved grid functions with MoL */
ierr += MoLRegisterEvolved(CCTK_VarIndex("GiRaFFE_HO::StildeD0"), CCTK_VarIndex("GiRaFFE_HO::Stilde_rhsD0"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("GiRaFFE_HO::StildeD1"), CCTK_VarIndex("GiRaFFE_HO::Stilde_rhsD1"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("GiRaFFE_HO::StildeD2"), CCTK_VarIndex("GiRaFFE_HO::Stilde_rhsD2"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("GiRaFFE_HO::AD0"), CCTK_VarIndex("GiRaFFE_HO::A_rhsD0"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("GiRaFFE_HO::AD1"), CCTK_VarIndex("GiRaFFE_HO::A_rhsD1"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("GiRaFFE_HO::AD2"), CCTK_VarIndex("GiRaFFE_HO::A_rhsD2"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("GiRaFFE_HO::psi6Phi"), CCTK_VarIndex("GiRaFFE_HO::psi6Phi_rhs"));
/* Register all the evolved Array functions with MoL */
return;
}
```
<a id='conservative2primitive'></a>
## Step 2.b: The Conservative to Primitive Solver \[Back to [top](#toc)\]
$$\label{conservative2primitive}$$
We will also need to use the conservative to primitive solver from the old version of $\texttt{GiRaFFE}$, included here for convenience. It is slightly modified to use our new variable names and to otherwise work with the slightly different scheduler. It also computes the inverse and determinant of the three metric.
```
%%writefile GiRaFFE_HO/src/driver_conserv_to_prims_FFE.C
/* We evolve forward in time a set of functions called the
* "conservative variables" (magnetic field and Poynting vector),
* and any time the conserv's are updated, we must recover the
* primitive variables (velocities), before reconstructing & evaluating
* the RHSs of the MHD equations again.
*
* This file contains the routine for this algebraic calculation.
* The velocity is calculated with formula (85), arXiv:1310.3274v2
* $v^i = 4 \pi \alpha \gamma^{ij} {\tilde S}_j \gamma{-1/2} B^{-2} - \beta^i$
* The force-free condition: $B^2>E^2$ is checked before computing the velocity.
* and after imposing the constraint ${\tilde B}^i {\tilde S}_i = 0$
* The procedure is as described in arXiv:1310.3274v2:
* 1. ${\tilde S}_i ->{\tilde S}_i - ({\tilde S}_j {\tilde B}^j) {\tilde B}^i/{\tilde B}^2$
* 2. $f = \sqrt{(1-\gamma_{max}^{-2}){\tilde B}^4/(16 \pi^2 \gamma {\tilde S}^2)}$
* 3. ${\tilde S}_i -> {\tilde S}_i min(1,f)
* 4. $v^i = 4 \pi \alpha \gamma^{ij} {\tilde S}_j \gamma{-1/2} B^{-2} - \beta^i$
* 5. ${\tilde n}_i v^i = 0$
*
* All equations are from: http://arxiv.org/pdf/1310.3274.pdf (v2)
* */
#include "cctk.h"
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sys/time.h>
#include <cmath>
#include <ctime>
#include <cstdlib>
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "Symmetry.h"
#ifndef M_PI
#define M_PI 3.141592653589793238463
#endif
#include "GiRaFFE_headers.h"
//#include "inlined_functions.C"
void GiRaFFE_HO_update_metric_det_inverse(const cGH* restrict const cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,
const CCTK_REAL *gammaDD00GF,const CCTK_REAL *gammaDD01GF,const CCTK_REAL *gammaDD02GF,const CCTK_REAL *gammaDD11GF,const CCTK_REAL *gammaDD12GF,const CCTK_REAL *gammaDD22GF,
CCTK_REAL *gammaUU00GF,CCTK_REAL *gammaUU01GF,CCTK_REAL *gammaUU02GF,CCTK_REAL *gammaUU11GF,CCTK_REAL *gammaUU12GF,CCTK_REAL *gammaUU22GF,CCTK_REAL *gammadetGF) {
#include "metric_quantities.h"
}
extern "C" void GiRaFFE_HO_conserv_to_prims_FFE(CCTK_ARGUMENTS) {
printf("Starting conservative-to-primitive solver...\n");
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
// We use proper C++ here, for file I/O later.
using namespace std;
const int imin=0,jmin=0,kmin=0;
const int imax=cctk_lsh[0],jmax=cctk_lsh[1],kmax=cctk_lsh[2];
const CCTK_REAL dz = CCTK_DELTA_SPACE(2);
CCTK_REAL error_int_numer=0,error_int_denom=0;
CCTK_INT num_vel_limits=0,num_vel_nulls_current_sheet=0;
GiRaFFE_HO_update_metric_det_inverse(cctkGH,cctk_lsh,cctk_nghostzones,
gxx, gxy, gxz, gyy, gyz, gzz,
gammaUU00,gammaUU01,gammaUU02,gammaUU11,gammaUU12,gammaUU22,gammadet);
#pragma omp parallel for reduction(+:error_int_numer,error_int_denom,num_vel_limits,num_vel_nulls_current_sheet) schedule(static)
for(int k=kmin;k<kmax;k++)
for(int j=jmin;j<jmax;j++)
for(int i=imin;i<imax;i++) {
const int index = CCTK_GFINDEX3D(cctkGH,i,j,k);
const CCTK_REAL rL = r[index];
if(rL>min_radius_inside_of_which_conserv_to_prims_FFE_and_FFE_evolution_is_DISABLED) {
const CCTK_REAL sqrtg = sqrt(gammadet[index]); // Determinant of 3-metric
// \gamma_{ij}, computed from \tilde{\gamma}_{ij}
const CCTK_REAL gxxL = gxx[index];
const CCTK_REAL gxyL = gxy[index];
const CCTK_REAL gxzL = gxz[index];
const CCTK_REAL gyyL = gyy[index];
const CCTK_REAL gyzL = gyz[index];
const CCTK_REAL gzzL = gzz[index];
// \gamma^{ij} = psim4 * \tilde{\gamma}^{ij}
const CCTK_REAL gupxxL = gammaUU00[index];
const CCTK_REAL gupxyL = gammaUU01[index];
const CCTK_REAL gupxzL = gammaUU02[index];
const CCTK_REAL gupyyL = gammaUU11[index];
const CCTK_REAL gupyzL = gammaUU12[index];
const CCTK_REAL gupzzL = gammaUU22[index];
// Read in magnetic field and momentum variables once from memory, since memory access is expensive:
const CCTK_REAL BU0L = BU0[index];
const CCTK_REAL BU1L = BU1[index];
const CCTK_REAL BU2L = BU2[index];
// End of page 7 on http://arxiv.org/pdf/1310.3274.pdf
const CCTK_REAL BtildexL = BU0L*sqrtg;
const CCTK_REAL BtildeyL = BU1L*sqrtg;
const CCTK_REAL BtildezL = BU2L*sqrtg;
const CCTK_REAL Btilde_xL = gxxL*BtildexL + gxyL*BtildeyL + gxzL*BtildezL;
const CCTK_REAL Btilde_yL = gxyL*BtildexL + gyyL*BtildeyL + gyzL*BtildezL;
const CCTK_REAL Btilde_zL = gxzL*BtildexL + gyzL*BtildeyL + gzzL*BtildezL;
CCTK_REAL StildeD0L = StildeD0[index];
CCTK_REAL StildeD1L = StildeD1[index];
CCTK_REAL StildeD2L = StildeD2[index];
if(i==114 && j==114 && k==114) {
printf("gupxxL = %.15e\nStildeD0L = %.15e\ngupxyL = %.15e\nStildeD1L = %.15e\ngupxzL = %.15e\nStildeD2L = %.15e\n",gupxxL,StildeD0L,gupxyL,StildeD1L,gupxzL,StildeD2L);
}
const CCTK_REAL StildeD0_orig = StildeD0L;
const CCTK_REAL StildeD1_orig = StildeD1L;
const CCTK_REAL StildeD2_orig = StildeD2L;
const CCTK_REAL ValenciavU0_orig = ValenciavU0[index];
const CCTK_REAL ValenciavU1_orig = ValenciavU1[index];
const CCTK_REAL ValenciavU2_orig = ValenciavU2[index];
//const CCTK_REAL alpL = alp[index];
//const CCTK_REAL fourpialpha = 4.0*M_PI*alpL;
const CCTK_REAL fourpi = 4.0*M_PI;
//const CCTK_REAL betaxL = betax[index];
//const CCTK_REAL betayL = betay[index];
//const CCTK_REAL betazL = betaz[index];
//* 1. Just below Eq 90: Enforce orthogonality of B^i & S^i, so that B^i S_i = 0
//* Correction ${\tilde S}_i ->{\tilde S}_i - ({\tilde S}_j {\tilde B}^j) {\tilde B}_i/{\tilde B}^2$
//* NOTICE THAT THE {\tilde B}_i IS LOWERED, AS IT SHOULD BE. THIS IS A TYPO IN PASCHALIDIS ET AL.
// First compute Btilde^i Stilde_i:
const CCTK_REAL BtildeiSt_i = StildeD0L*BtildexL + StildeD1L*BtildeyL + StildeD2L*BtildezL;
//printf("xterm = %f ; yterm = %f ; zterm = %f\n",StildeD0L*BtildexL,StildeD1L*BtildeyL,StildeD2L*BtildezL);
// Then compute (Btilde)^2
const CCTK_REAL Btilde2 = gxxL*BtildexL*BtildexL + gyyL*BtildeyL*BtildeyL + gzzL*BtildezL*BtildezL
+ 2.0*(gxyL*BtildexL*BtildeyL + gxzL*BtildexL*BtildezL + gyzL*BtildeyL*BtildezL);
//#define APPLY_GRFFE_FIXES
// Now apply constraint: Stilde_i = Stilde_i - (Btilde^i Stilde_i) / (Btilde)^2
#ifdef APPLY_GRFFE_FIXES
StildeD0L -= BtildeiSt_i*Btilde_xL/Btilde2;
StildeD1L -= BtildeiSt_i*Btilde_yL/Btilde2;
StildeD2L -= BtildeiSt_i*Btilde_zL/Btilde2;
//printf("BtildeiSt_i = %f ; Btilde2 = %f\n",BtildeiSt_i,Btilde2);
#endif
// Now that tildeS_i has been fixed, let's compute tildeS^i:
CCTK_REAL mhd_st_upx = gupxxL*StildeD0L + gupxyL*StildeD1L + gupxzL*StildeD2L;
CCTK_REAL mhd_st_upy = gupxyL*StildeD0L + gupyyL*StildeD1L + gupyzL*StildeD2L;
CCTK_REAL mhd_st_upz = gupxzL*StildeD0L + gupyzL*StildeD1L + gupzzL*StildeD2L;
// Just below Eq. 86 in http://arxiv.org/pdf/1310.3274.pdf:
CCTK_REAL St2 = StildeD0L*mhd_st_upx + StildeD1L*mhd_st_upy + StildeD2L*mhd_st_upz;
//* 2. Eq. 92: Factor $f = \sqrt{(1-\gamma_{max}^{-2}){\tilde B}^4/(16 \pi^2 \gamma {\tilde S}^2)}$
#ifdef APPLY_GRFFE_FIXES
const CCTK_REAL gmax = GAMMA_SPEED_LIMIT;
if(St2 > (1.0 - 1.0/(gmax*gmax))*Btilde2*Btilde2/ (16.0*M_PI*M_PI*sqrtg*sqrtg)) {
const CCTK_REAL fact = sqrt((1.0 - 1.0/(gmax*gmax))/St2)*Btilde2/(4.0*M_PI*sqrtg);
//* 3. ${\tilde S}_i -> {\tilde S}_i min(1,f)
StildeD0L *= MIN(1.0,fact);
StildeD1L *= MIN(1.0,fact);
StildeD2L *= MIN(1.0,fact);
// Recompute S^i
mhd_st_upx = gupxxL*StildeD0L + gupxyL*StildeD1L + gupxzL*StildeD2L;
mhd_st_upy = gupxyL*StildeD0L + gupyyL*StildeD1L + gupyzL*StildeD2L;
mhd_st_upz = gupxzL*StildeD0L + gupyzL*StildeD1L + gupzzL*StildeD2L;
/*
printf("%e %e %e | %e %e %e | %e %e %e | oldgamma: %e %e should be > %e vfix\n",x[index],y[index],z[index],
BU0L,BU1L,BU2L,
St2,(1.0 - 1.0/(gmax*gmax))*Btilde2*Btilde2/ (16.0*M_PI*M_PI*sqrtg*sqrtg),gmax,
sqrt(Btilde2 / (Btilde2 - 16*M_PI*M_PI*sqrtg*sqrtg * St2 / Btilde2) ) , Btilde2,16*M_PI*M_PI*sqrtg*sqrtg * St2 / Btilde2 );
//exit(1);
*/
// Recompute Stilde^2:
St2 = StildeD0L*mhd_st_upx + StildeD1L*mhd_st_upy + StildeD2L*mhd_st_upz;
if( St2 >= Btilde2*Btilde2/ (16.0*M_PI*M_PI*sqrtg*sqrtg) ) {
printf("ERROR: Velocity cap fix wasn't effective; still have B^2 > E^2\n"); exit(1);
}
num_vel_limits++;
}
#endif
//* 4. Eq. 85: $v^i = 4 pi \alpha \gamma^{ij} {\tilde S}_j \gamma{-1/2} B^{-2} - \beta^i$:
// See, e.g., Eq 71 in http://arxiv.org/pdf/1310.3274.pdf
// ... or end of page 7 on http://arxiv.org/pdf/1310.3274.pdf:
const CCTK_REAL B2 = Btilde2/(sqrtg*sqrtg);
/*
Eq. 75:
v^i = \alpha \gamma^{ij} S_j / \mathcal{B}^2 - \beta^i
Eq. 7: \mathcal{B}^{\mu} = B^{\mu}/\sqrt{4 \pi}
-> v^i = 4 \pi \alpha \gamma^{ij} S_j / B^2 - \beta^i
Eq. 79: \tilde{S_i} = \sqrt{\gamma} S_i
-> v^i = 4 \pi \alpha \gamma^{ij} \tilde{S}_j / (\sqrt{\gamma} B^2) - \beta^i
*/
// Modified from the original GiRaFFE to use Valencia, not drift velocity
const CCTK_REAL ValenciavU0L = fourpi*mhd_st_upx/(sqrtg*B2);
const CCTK_REAL ValenciavU1L = fourpi*mhd_st_upy/(sqrtg*B2);
/* ValenciavU2L not necessarily const! See below. */
CCTK_REAL ValenciavU2L = fourpi*mhd_st_upz/(sqrtg*B2);
//* 5. Eq. 94: ${\tilde n}_i v^i = 0$ in the current sheet region
// n^i is defined as the normal from the current sheet, which lies in the
// xy-plane (z=0). So n = (0,0,1)
#ifdef APPLY_GRFFE_FIXES
if(current_sheet_null_v) {
CCTK_REAL zL = z[index];
if (fabs(zL) <= (4.0 + 1.0e-2)*dz ) {
//ValenciavU2L = 0.0;
ValenciavU2L = - (ValenciavU0L*gxzL + ValenciavU1L*gyzL) / gzzL;
// FIXME: This is probably not right, but also definitely not the problem.
// ValenciavU2L reset: TYPICALLY WOULD RESET CONSERVATIVES TO BE CONSISTENT. LET'S NOT DO THAT, TO AVOID MESSING UP B-FIELDS
if(1==1) {
CCTK_REAL PRIMS[MAXNUMVARS];
int ww=0;
PRIMS[ww] = ValenciavU0L; ww++;
PRIMS[ww] = ValenciavU1L; ww++;
PRIMS[ww] = ValenciavU2L; ww++;
PRIMS[ww] = BU0L; ww++;
PRIMS[ww] = BU1L; ww++;
PRIMS[ww] = BU2L; ww++;
CCTK_REAL METRIC[NUMVARS_FOR_METRIC],dummy=0;
ww=0;
// FIXME: NECESSARY?
//psi_bssn[index] = exp(phi[index]);
METRIC[ww] = (1.0/12.0) * log(gammadet[index]);ww++;
METRIC[ww] = dummy; ww++; // Don't need to set psi.
METRIC[ww] = gxx[index]; ww++;
METRIC[ww] = gxy[index]; ww++;
METRIC[ww] = gxz[index]; ww++;
METRIC[ww] = gyy[index]; ww++;
METRIC[ww] = gyz[index]; ww++;
METRIC[ww] = gzz[index]; ww++;
METRIC[ww] = alp[index]-1; ww++;
METRIC[ww] = betax[index]; ww++;
METRIC[ww] = betay[index]; ww++;
METRIC[ww] = betaz[index]; ww++;
METRIC[ww] = gammaUU00[index]; ww++;
METRIC[ww] = gammaUU01[index]; ww++;
METRIC[ww] = gammaUU02[index]; ww++;
METRIC[ww] = gammaUU11[index]; ww++;
METRIC[ww] = gammaUU12[index]; ww++;
METRIC[ww] = gammaUU22[index]; ww++;
CCTK_REAL CONSERVS[NUM_CONSERVS] = {0.0, 0.0, 0.0}; // 3 conservative variables: Stilde_x, Stilde_y, Stilde_z
GiRaFFE_HO_compute_conservatives(PRIMS,METRIC, CONSERVS);
StildeD0L = CONSERVS[STILDEX];
StildeD1L = CONSERVS[STILDEY];
StildeD2L = CONSERVS[STILDEZ];
}
num_vel_nulls_current_sheet++;
}
}
#endif
ValenciavU0[index] = ValenciavU0L;
ValenciavU1[index] = ValenciavU1L;
ValenciavU2[index] = ValenciavU2L;
//Now we compute the difference between original & new conservatives, for diagnostic purposes:
//error_int_numer += fabs(StildeD0L - StildeD0_orig) + fabs(StildeD1L - StildeD1_orig) + fabs(StildeD2L - StildeD2_orig);
//error_int_denom += fabs(StildeD0_orig) + fabs(StildeD1_orig) + fabs(StildeD2_orig);
/*
if(fabs(ValenciavU0_orig) > 1e-13 && fabs(ValenciavU0L-ValenciavU0_orig)/ValenciavU0_orig > 1e-2) printf("BAD ValenciavU0: %e %e | %e %e %e\n",ValenciavU0L,ValenciavU0_orig,x[index],y[index],z[index]);
if(fabs(ValenciavU1_orig) > 1e-13 && fabs(ValenciavU1L-ValenciavU1_orig)/ValenciavU1_orig > 1e-2) printf("BAD ValenciavU1: %e %e | %e %e %e\n",ValenciavU1L,ValenciavU1_orig,x[index],y[index],z[index]);
if(fabs(ValenciavU2_orig) > 1e-13 && fabs(ValenciavU2L-ValenciavU2_orig)/ValenciavU2_orig > 1e-2) printf("BAD ValenciavU2: %e %e | %e %e %e\n",ValenciavU2L,ValenciavU2_orig,x[index],y[index],z[index]);
*/
error_int_numer += fabs(ValenciavU0L - ValenciavU0_orig) + fabs(ValenciavU1L - ValenciavU1_orig) + fabs(ValenciavU2L - ValenciavU2_orig);
error_int_denom += fabs(ValenciavU0_orig) + fabs(ValenciavU1_orig) + fabs(ValenciavU2_orig);
StildeD0[index] = StildeD0L;
StildeD1[index] = StildeD1L;
StildeD2[index] = StildeD2L;
}
}
if (cctk_time==0) {
/* Write the initial data to separate, dedicated gridfunctions:
These will be accessed later by VolumeIntegrals_GRMHD to assess convergence. */
#pragma omp parallel for
for(int i2=0; i2<cctk_lsh[2]; i2++) {
for(int i1=0; i1<cctk_lsh[1]; i1++) {
for(int i0=0; i0<cctk_lsh[0]; i0++) {
CCTK_INT idx3 = CCTK_GFINDEX3D(cctkGH, i0,i1,i2);
AD0_init[idx3] = AD0[idx3];
AD1_init[idx3] = AD1[idx3];
AD2_init[idx3] = AD2[idx3];
psi6Phi_init[idx3] = psi6Phi[idx3];
CCTK_REAL lapse = alp[idx3];
CCTK_REAL betaxL = betax[idx3];
CCTK_REAL betayL = betay[idx3];
CCTK_REAL betazL = betaz[idx3];
ValenciavU0_init[idx3] = lapse*ValenciavU0[idx3]-betaxL;
ValenciavU1_init[idx3] = lapse*ValenciavU1[idx3]-betayL;
ValenciavU2_init[idx3] = lapse*ValenciavU2[idx3]-betazL;
BU0_init[idx3] = StildeD0[idx3];
BU1_init[idx3] = StildeD1[idx3];
BU2_init[idx3] = StildeD2[idx3];
}
}
}
}
CCTK_VInfo(CCTK_THORNSTRING,"FFEC2P: Lev: %d NumPts= %d | Error: %.3e, ErrDenom: %.3e, v_limits: %d / %d = %.3e, v_nulls: %d / %d = %.3e",
(int)GetRefinementLevel(cctkGH),
cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2],
error_int_numer/(error_int_denom+1e-300),error_int_denom,
/**/ num_vel_limits, cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2],
(CCTK_REAL)num_vel_limits/((CCTK_REAL)cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]),
/**/ num_vel_nulls_current_sheet, cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2],
(CCTK_REAL)num_vel_nulls_current_sheet/((CCTK_REAL)cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]));
}
```
<a id='old_giraffe'></a>
### Step 2.b.i: Including `GiRaFFE_headers.h` from old version of GiRaFFE \[Back to [top](#toc)\]
$$\label{old_giraffe}$$
We will also include `GiRaFFE_headers.h` from the old version of GiRaFFE, which defines constants on which our conservative-to-primitive solver depends.
```
%%writefile GiRaFFE_HO/src/GiRaFFE_headers.h
// To safeguard against double-including this header file:
#ifndef GIRAFFE_HEADERS_H_
#define GIRAFFE_HEADERS_H_
#define MIN(a,b) ( ((a) < (b)) ? (a) : (b) )
#define MAX(a,b) ( ((a) > (b)) ? (a) : (b) )
#define SQR(x) ((x) * (x))
#define ONE_OVER_SQRT_4PI 0.282094791773878143474039725780
#define VERR_DEF_PARAMS __LINE__, __FILE__, CCTK_THORNSTRING
// The order here MATTERS, as we assume that GUPXX+1=GUPYY, etc.
static const int PHI=0,PSI=1,GXX=2,GXY=3,GXZ=4,GYY=5,GYZ=6,GZZ=7,
LAPM1=8,SHIFTX=9,SHIFTY=10,SHIFTZ=11,GUPXX=12,GUPYY=13,GUPZZ=14,
NUMVARS_FOR_METRIC_FACEVALS=15; //<-- Be _sure_ to set this correctly, or you'll have memory access bugs!
// These are not used for facevals in the reconstruction step, but boy are they useful anyway.
static const int GUPXY=15,GUPXZ=16,GUPYZ=17,
NUMVARS_FOR_METRIC=18; //<-- Be _sure_ to set this correctly, or you'll have memory access bugs!
// The order here MATTERS, and must be consistent with the order in the in_prims[] array in driver_evaluate_FFE_rhs.C.
static const int VX=0,VY=1,VZ=2,
BX_CENTER=3,BY_CENTER=4,BZ_CENTER=5,BX_STAGGER=6,BY_STAGGER=7,BZ_STAGGER=8,
VXR=9,VYR=10,VZR=11,VXL=12,VYL=13,VZL=14,MAXNUMVARS=15; //<-- Be _sure_ to define MAXNUMVARS appropriately!
static const int UT=0,UX=1,UY=2,UZ=3;
// The "I" suffix denotes interpolation. In other words, these
// definitions are used for interpolation ONLY. The order here
// matters as well!
static const int SHIFTXI=0,SHIFTYI=1,SHIFTZI=2,GUPXXI=3,GUPXYI=4,GUPXZI=5,GUPYYI=6,GUPYZI=7,GUPZZI=8,
PSII=9,LAPM1I=10,A_XI=11,A_YI=12,A_ZI=13,LAPSE_PSI2I=14,LAPSE_OVER_PSI6I=15,MAXNUMINTERP=16;
// Again, the order here MATTERS, since we assume in the code that, e.g., smallb[0]=b^t, smallb[3]=b^z, etc.
static const int SMALLBT=0,SMALLBX=1,SMALLBY=2,SMALLBZ=3,SMALLB2=4,NUMVARS_SMALLB=5;
// Again, the order here MATTERS, since we assume in the code that, CONSERV[STILDEX+1] = \tilde{S}_y
static const int STILDEX=0,STILDEY=1,STILDEZ=2,NUM_CONSERVS=3;
static const int LAPSE=0,PSI2=1,PSI4=2,PSI6=3,PSIM4=4,LAPSEINV=5,NUMVARS_METRIC_AUX=6;
#define SET_LAPSE_PSI4(array_name,METRIC) { \
array_name[LAPSE] = METRIC[LAPM1]+1.0; \
array_name[PSI2] = exp(2.0*METRIC[PHI]); \
array_name[PSI4] = SQR(array_name[PSI2]); \
array_name[PSI6] = array_name[PSI4]*array_name[PSI2]; \
array_name[PSIM4] = 1.0/array_name[PSI4]; \
array_name[LAPSEINV] = 1.0/array_name[LAPSE]; \
}
// Keeping track of ghostzones between routines is a nightmare, so
// we instead attach ghostzone info to each gridfunction and set
// the ghostzone information correctly within each routine.
struct gf_and_gz_struct {
CCTK_REAL *gf;
int gz_lo[4],gz_hi[4];
};
struct output_stats {
int font_fixed,vel_limited,failure_checker;
long n_iter;
};
// FIXME: For cosmetic purposes, we might want to make everything either zero-offset or one-offset, instead of a mixture.
const int kronecker_delta[4][3] = { { 0,0,0 },
{ 1,0,0 },
{ 0,1,0 },
{ 0,0,1 } };
/* PUBLIC FUNCTIONS, USED OUTSIDE GiRaFFE AS WELL */
void GiRaFFE_HO_compute_conservatives(const CCTK_REAL *PRIMS, const CCTK_REAL *METRIC, CCTK_REAL *CONSERVS);
#include "compute_conservatives_FFE.C"
void GiRaFFE_set_symmetry_gzs_staggered(const cGH *cctkGH, const int *cctk_lsh,const CCTK_REAL *X,const CCTK_REAL *Y,const CCTK_REAL *Z, CCTK_REAL *gridfunc,
const CCTK_REAL *gridfunc_syms,const int stagger_x,const int stagger_y,const int stagger_z);
#endif // GIRAFFE_HEADERS_H
```
<a id='compute_conservatives'></a>
### Step 2.b.ii: Writing `compute_conservatives_FFE.C` \[Back to [top](#toc)\]
$$\label{compute_conservatives}$$
The conservative to primitive solver will also depend on the function provided by `compute_conservatives_FFE.C`.
```
%%writefile GiRaFFE_HO/src/compute_conservatives_FFE.C
void GiRaFFE_HO_compute_conservatives(const CCTK_REAL *PRIMS, const CCTK_REAL *METRIC, CCTK_REAL *CONSERVS) {
const CCTK_REAL psi_bssnL = exp(METRIC[PHI]);
const CCTK_REAL psi2 = psi_bssnL*psi_bssnL;
const CCTK_REAL psi4 = psi2*psi2;
const CCTK_REAL sqrtg = psi4*psi2;
// \gamma_{ij}, computed from \tilde{\gamma}_{ij}
const CCTK_REAL gxxL = psi4*METRIC[GXX];
const CCTK_REAL gxyL = psi4*METRIC[GXY];
const CCTK_REAL gxzL = psi4*METRIC[GXZ];
const CCTK_REAL gyyL = psi4*METRIC[GYY];
const CCTK_REAL gyzL = psi4*METRIC[GYZ];
const CCTK_REAL gzzL = psi4*METRIC[GZZ];
// Read in magnetic field and momentum variables once from memory, since memory access is expensive:
const CCTK_REAL BxL = PRIMS[BX_CENTER];
const CCTK_REAL ByL = PRIMS[BY_CENTER];
const CCTK_REAL BzL = PRIMS[BZ_CENTER];
const CCTK_REAL vxL = PRIMS[VX];
const CCTK_REAL vyL = PRIMS[VY];
const CCTK_REAL vzL = PRIMS[VZ];
//const CCTK_REAL fourpialpha_inv = 1.0/( 4.0*M_PI*(METRIC[LAPM1] + 1.0) );
const CCTK_REAL fourpi_inv = 1.0/( 4.0*M_PI );
//const CCTK_REAL betaxL = METRIC[SHIFTX];
//const CCTK_REAL betayL = METRIC[SHIFTY];
//const CCTK_REAL betazL = METRIC[SHIFTZ];
const CCTK_REAL B2 = gxxL*BxL*BxL + gyyL*ByL*ByL + gzzL*BzL*BzL
+ 2.0*(gxyL*BxL*ByL + gxzL*BxL*BzL + gyzL*ByL*BzL);
// NOTE: SIGNIFICANTLY MODIFIED FROM ILLINOISGRMHD VERSION:
// velocities in GiRaFFE are defined to be "drift" velocity.
// cf. Eqs 47 and 85 in http://arxiv.org/pdf/1310.3274.pdf
// Modified again from the original GiRaFFE to use Valencia velocity
const CCTK_REAL v_xL = gxxL*vxL + gxyL*vyL + gxzL*vzL;
const CCTK_REAL v_yL = gxyL*vxL + gyyL*vyL + gyzL*vzL;
const CCTK_REAL v_zL = gxzL*vxL + gyzL*vyL + gzzL*vzL;
/*
* Comments:
* Eq. 85 in https://arxiv.org/pdf/1310.3274.pdf:
* v^i = 4 pi alpha * (gamma^{ij} tilde{S}_j) / (sqrtgamma * B^2) - beta^i
* which implies that
* (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha) = gamma^{ij} tilde{S}_j
* Multiply both sides by gamma_{ik}:
* gamma_{ik} (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha) = gamma_{ik} gamma^{ij} tilde{S}_j
*
* -> tilde{S}_k = gamma_{ik} (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha)
*/
CONSERVS[STILDEX] = v_xL * sqrtg * B2 * fourpi_inv;
CONSERVS[STILDEY] = v_yL * sqrtg * B2 * fourpi_inv;
CONSERVS[STILDEZ] = v_zL * sqrtg * B2 * fourpi_inv;
}
```
<a id='grmhd'></a>
## Step 2.c: GRMHD variables provided by HydroBase \[Back to [top](#toc)\]
$$\label{grmhd}$$
This thorn should also use the common GRMHD variables provided by HydroBase, to allow it to interact easily with other MHD thorns. To that end, we will need to read in the common MHD variables at the beginning of our evolution, and then write the new values at the end of our evolution.
```
%%writefile GiRaFFE_HO/src/GiRaFFE_HydroBase_conversion.c
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "Symmetry.h"
void HydroBase_to_GiRaFFE(CCTK_ARGUMENTS) {
/* BUi <- Bvec[i]
* ADi <- Avec[i]
* ValenciavUi <- vel[i]
*/
DECLARE_CCTK_PARAMETERS;
DECLARE_CCTK_ARGUMENTS;
CCTK_INT idx3;
CCTK_INT idx4[3];
#pragma omp parallel for
for(int i2=0; i2<cctk_lsh[2]; i2++) {
for(int i1=0; i1<cctk_lsh[1]; i1++) {
for(int i0=0; i0<cctk_lsh[0]; i0++) {
idx3 = CCTK_GFINDEX3D(cctkGH, i0,i1,i2);
idx4[0] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,0);
idx4[1] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,1);
idx4[2] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,2);
BU0[idx3] = Bvec[idx4[0]];
BU1[idx3] = Bvec[idx4[1]];
BU2[idx3] = Bvec[idx4[2]];
AD0[idx3] = Avec[idx4[0]];
AD1[idx3] = Avec[idx4[1]];
AD2[idx3] = Avec[idx4[2]];
ValenciavU0[idx3] = vel[idx4[0]];
ValenciavU1[idx3] = vel[idx4[1]];
ValenciavU2[idx3] = vel[idx4[2]];
// We don't set Phi, because it is always set to zero in GiRaFFE ID.
}
}
}
}
void GiRaFFE_to_HydroBase(CCTK_ARGUMENTS) {
/* Bvec[i] <- BUi
* Avec[i] <- ADi
* vel[i] <- ValenciavUi
*/
DECLARE_CCTK_PARAMETERS;
DECLARE_CCTK_ARGUMENTS;
CCTK_INT idx3;
CCTK_INT idx4[3];
#pragma omp parallel for
for(int i2=0; i2<cctk_lsh[2]; i2++) {
for(int i1=0; i1<cctk_lsh[1]; i1++) {
for(int i0=0; i0<cctk_lsh[0]; i0++) {
idx3 = CCTK_GFINDEX3D(cctkGH, i0,i1,i2);
idx4[0] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,0);
idx4[1] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,1);
idx4[2] = CCTK_GFINDEX4D(cctkGH, i0,i1,i2,2);
Bvec[idx4[0]] = BU0[idx3];
Bvec[idx4[1]] = BU1[idx3];
Bvec[idx4[2]] = BU2[idx3];
Avec[idx4[0]] = AD0[idx3];
Avec[idx4[1]] = AD1[idx3];
Avec[idx4[2]] = AD2[idx3];
vel[idx4[0]] = ValenciavU0[idx3];
vel[idx4[1]] = ValenciavU1[idx3];
vel[idx4[2]] = ValenciavU2[idx3];
// We don't set Phi, because it is always set to zero in GiRaFFE ID thorns.
}
}
}
}
```
<a id='timelevel'></a>
## Step 2.d: Copying initial data to additional timelevels \[Back to [top](#toc)\]
$$\label{timelevel}$$
Since the ETK thorn Boundary will access all three timelevels for our evolved variables, we will need to make sure that those timelevels actually contain data at time zero. So, we will copy initial data to both other timelevels.
```
%%writefile GiRaFFE_HO/src/postpostinitial__copy_timelevels.c
//-------------------------------------------------
// Stuff to run right after initial data is set up
//-------------------------------------------------
#include "cctk.h"
//#include <cstdio>
//#include <cstdlib>
#include "cctk_Arguments.h"
#include "cctk_Functions.h"
#include "cctk_Parameters.h"
#include "Symmetry.h"
//#include "GiRaFFE_headers.h"
//extern "C"
void GiRaFFE_HO_PostPostInitial__Copy_Timelevels(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
//------------------------------------------------------------------
// FILL _p AND _p_p TIMELEVELS. Probably don't need to do this if
// Carpet::init_fill_timelevels=yes and
// MoL::initial_data_is_crap = yes
// NOTE: We don't fill metric data here.
// FIXME: Do we really need this?
if(cctk_time==0) {
#pragma omp parallel for
for(int k=0;k<cctk_lsh[2];k++) for(int j=0;j<cctk_lsh[1];j++) for(int i=0;i<cctk_lsh[0];i++) {
const int index = CCTK_GFINDEX3D(cctkGH,i,j,k);
StildeD0_p[index] = StildeD0[index];
StildeD1_p[index] = StildeD1[index];
StildeD2_p[index] = StildeD2[index];
psi6Phi_p[index] = psi6Phi[index];
AD0_p[index] = AD0[index];
AD1_p[index] = AD1[index];
AD2_p[index] = AD2[index];
StildeD0_p_p[index] = StildeD0[index];
StildeD1_p_p[index] = StildeD1[index];
StildeD2_p_p[index] = StildeD2[index];
psi6Phi_p_p[index] = psi6Phi[index];
AD0_p_p[index] = AD0[index];
AD1_p_p[index] = AD1[index];
AD2_p_p[index] = AD2[index];
}
}
}
```
<a id='cclfiles'></a>
## Step 2.e: CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure \[Back to [top](#toc)\]
$$\label{cclfiles}$$
Writing a module ("thorn") within the Einstein Toolkit requires that three "ccl" files be constructed, all in the root directory of the thorn:
<a id='interface'></a>
### Step 2.e.i: `interface.ccl` \[Back to [top](#toc)\]
$$\label{interface}$$
1. `interface.ccl`: defines the gridfunction groups needed, and provides keywords denoting what this thorn provides and what it should inherit from other thorns. This file governs the interaction between this thorn and others; more information can be found in the [official Einstein Toolkit documentation](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-260000C2.2).
With "implements", we give our thorn its unique name. By "inheriting" other thorns, we tell the Toolkit that we will rely on variables and functions that exist and are declared "public" within those thorns. Then, we tell the toolkit that we want the gridfunctions $A_i$, $\tilde{S}_i$, and $\sqrt{\gamma}\Phi$ to be visible to other thorns by using the keyword "public".
```
%%writefile GiRaFFE_HO/interface.ccl
implements: GiRaFFE_HO
inherits: admbase HydroBase Boundary grid Tmunubase
USES INCLUDE: loopcontrol.h
USES INCLUDE: Symmetry.h
USES INCLUDE: Boundary.h
CCTK_INT FUNCTION MoLRegisterEvolved(CCTK_INT IN EvolvedIndex, CCTK_INT IN RHSIndex)
USES FUNCTION MoLRegisterEvolved
CCTK_INT FUNCTION GetBoundarySpecification(CCTK_INT IN size, CCTK_INT OUT ARRAY nboundaryzones, CCTK_INT OUT ARRAY is_internal, CCTK_INT OUT ARRAY is_staggered, CCTK_INT OUT ARRAY shiftout)
USES FUNCTION GetBoundarySpecification
CCTK_INT FUNCTION SymmetryTableHandleForGrid(CCTK_POINTER_TO_CONST IN cctkGH)
USES FUNCTION SymmetryTableHandleForGrid
CCTK_INT FUNCTION Boundary_SelectGroupForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN group_name, CCTK_STRING IN bc_name)
USES FUNCTION Boundary_SelectGroupForBC
CCTK_INT FUNCTION Boundary_SelectVarForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN var_name, CCTK_STRING IN bc_name)
USES FUNCTION Boundary_SelectVarForBC
public:
cctk_real GiRaFFE_aux type = GF Timelevels=1 tags='prolongation="none"'
{
uU0,uU1,uU2,uD0,uD1,uD2,u0alpha,alpsqrtgam,AevolParen,PevolParenU0,PevolParenU1,PevolParenU2,
gammaUU00,gammaUU01,gammaUU02,gammaUU11,gammaUU12,gammaUU22,gammadet
} "The evolved scalar fields"
public:
cctk_real GiRaFFE_Bs type = GF Timelevels=1 tags='InterpNumTimelevels=1 prolongation="none"'
{
BU0,BU1,BU2
} "The B field"
public:
cctk_real GiRaFFE_Vs type = GF Timelevels=1 tags='InterpNumTimelevels=1 prolongation="none"'
{
u4upperZero,ValenciavU0,ValenciavU1,ValenciavU2
} "The zeroth component of the four velocity and the Valencia 3-velocity"
public:
cctk_real GiRaFFE_rhs type = GF Timelevels=1 tags='prolongation="none" Checkpoint="no"'
{
Stilde_rhsD0,Stilde_rhsD1,Stilde_rhsD2,A_rhsD0,A_rhsD1,A_rhsD2,psi6Phi_rhs
} "The evolved scalar fields"
public:
cctk_real GiRaFFE_vars type = GF Timelevels=3 tags='prolongation="none"'
{
StildeD0,StildeD1,StildeD2,AD0,AD1,AD2,psi6Phi
} "The evolved scalar fields"
public:
cctk_real GiRaFFEfood_init type = GF Timelevels=1 tags='prolongation="none"'
{
AD0_init,AD1_init,AD2_init,psi6Phi_init,ValenciavU0_init,ValenciavU1_init,ValenciavU2_init,BU0_init,BU1_init,BU2_init
} "Stores the initial data for later debugging"
#########################################
### Aliased functions from Carpet ###
#########################################
CCTK_INT FUNCTION \
GetRefinementLevel \
(CCTK_POINTER_TO_CONST IN cctkGH)
USES FUNCTION GetRefinementLevel
```
<a id='param'></a>
### Step 2.e.ii: `param.ccl` \[Back to [top](#toc)\]
$$\label{param}$$
2. `param.ccl`: specifies free parameters within the thorn, enabling them to be set at runtime. It is required to provide allowed ranges and default values for each parameter. More information on this file's syntax can be found in the [official Einstein Toolkit documentation](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-265000C2.3). A number of parameters are defined, and more parameters can be easily added in later versions. We also set the number of timelevels we will store in memory.
```
%%writefile GiRaFFE_HO/param.ccl
shares: MethodOfLines
USES CCTK_INT MoL_Num_Evolved_Vars
USES CCTK_INT MoL_Num_ArrayEvolved_Vars
restricted:
CCTK_INT GiRaFFE_HO_MaxNumEvolvedVars "Number of evolved variables used by this thorn" ACCUMULATOR-BASE=MethodofLines::MoL_Num_Evolved_Vars STEERABLE=RECOVER
{
7:7 :: "Number of evolved variables used by this thorn"
} 7
restricted:
CCTK_INT GiRaFFE_HO_MaxNumArrayEvolvedVars "Number of Array evolved variables used by this thorn" ACCUMULATOR-BASE=MethodofLines::MoL_Num_ArrayEvolved_Vars STEERABLE=RECOVER
{
0:0 :: "Number of Array evolved variables used by this thorn"
} 0
restricted:
KEYWORD bound "Type of boundary condition to use"
{
"flat" :: "Flat (von Neumann, n grad phi = 0) boundary condition"
"static" :: "Static (Dirichlet, dphi/dt=0) boundary condition"
"radiation" :: "Radiation boundary condition"
"robin" :: "Robin (phi(r) = C/r) boundary condition"
"zero" :: "Zero (Dirichlet, phi=0) boundary condition"
"none" :: "Apply no boundary condition"
} "radiation"
restricted:
CCTK_INT timelevels "Number of active timelevels" STEERABLE=RECOVER
{
0:3 :: ""
} 3
restricted:
CCTK_REAL xi "The damping factor for the psi6Phi evolution equation"
{
*:* :: "The damping factor for the psi6Phi evolution equation"
} 0.0
# SPEED LIMIT: Set maximum relativistic gamma factor
#
REAL GAMMA_SPEED_LIMIT "Maximum relativistic gamma factor. Note the default is much higher than IllinoisGRMHD. (GRFFE can handle higher Lorentz factors)"
{
1:* :: "Positive > 1, though you'll likely have troubles far above 2000."
} 2000.0
REAL min_radius_inside_of_which_conserv_to_prims_FFE_and_FFE_evolution_is_DISABLED "As parameter suggests, this is the minimum radius inside of which the conservatives-to-primitives solver is disabled. In the Aligned Rotator test, this should be set equal to R_NS_aligned_rotator." STEERABLE=ALWAYS
{
-1. :: "disable the conservative-to-primitive solver modification"
(0:* :: "any positive value"
} -1.
# Set the drift velocity perpendicular to the current sheet to zero.
BOOLEAN current_sheet_null_v "Shall we null the velocity normal to the current sheet?"
{
} "no" #Necessary for the split monopole
```
<a id='schedule'></a>
### Step 2.e.iii: schedule.ccl \[Back to [top](#toc)\]
$$\label{schedule}$$
3. `schedule.ccl`: allocates storage for gridfunctions, defines how the thorn's functions should be scheduled in a broader simulation, and specifies the regions of memory written to or read from gridfunctions. `schedule.ccl`'s official documentation may be found [here](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-268000C2.4).
For clarity, we will outline the desired schedule we wish to create in the toolkit:
[comment]: <> (Implement markdown formatting for code: TODO)
0. **GiRaFFEfood_HO**
1. **`GiRaFFE_ExactWaldID`**
1. Sets up the vector potential and initial Valencia 3-velocity.
1. Reads: gammaDD02, gammaDD12, gammaDD22 (Everywhere)
1. Writes: AD0, AD1, AD2, ValenciavU0, ValenciavU1, ValenciavU2 (Everywhere)
1. **`driver_A_to_B`**
1. Computes the magnetic field from the vector potential everywhere (this function is from **GiRaFFE_HO**).
1. Reads: gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, AD0, AD1, AD2 (Everywhere)
1. Writes: BU0, BU1, BU2 (Everywhere)
1. **`StildeD_from_initial_data`**
1. Computes the initial Poynting flux from the initial magnetic field and Valencia 3-velocity data.
1. Reads: gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, BU0, BU1, BU2, ValenciavU0, ValenciavU1, ValenciavU2 (Everywhere)
1. Writes: StildeD0, StildeD1, StildeD2 (Interior)
0. **GiRaFFE_HO**
1. **`HydroBase_to_GiRaFFE`**
1. Reads data from **HydroBase** variables into **GiRaFFE_HO** variables.
1. Reads: Avec, Bvec, vel (from **HydroBase**) (Everywhere)
1. Writes: AD0, AD1, AD2, BU0, BU1, BU2, ValenciavU0, ValenciavU1, ValenciavU2 (Everywhere)
1. **`GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS`**
1. Sets auxiliary gridfunctions that will need to be finite-differenced for the right-hand sides.
1. Reads: alpha, betaU0, betaU1, betaU2, gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, ValenciavU0, ValenciavU1, ValenciavU2, AD0, AD1, AD2, psi6Phi (Everywhere)
1. Writes: uU0, uU1, uU2, u0alpha, alpsqrtgam, AevolParen, PevolParenU0, PevolParenU1, PevolParenU2, gammaUU00, gammaUU01, gammaUU02, gammaUU11, gammaUU12, gammaUU22, gammadet, u4upperZero (Everywhere)
1. **`GiRaFFE_HO_set_rhs`**
1. Sets the RHSs for the ETK's MoL solver.
1. Reads: alpha, betaU0, betaU1, betaU2, gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, ValenciavU0, ValenciavU1, ValenciavU2, AD0, AD1, AD2, BU0, BU1, BU2, uU0, uU1, uU2, u0alpha, alpsqrtgam, AevolParen, PevolParenU0, PevolParenU1, PevolParenU2, gammaUU00, gammaUU01, gammaUU02, gammaUU11, gammaUU12, gammaUU22, gammadet (Everywhere)
1. Writes: Stilde_rhsD0, Stilde_rhsD1, Stilde_rhsD2, A_rhsD0, A_rhsD1, A_rhsD2, psi6Phi_rhs (Interior)
1. **`GiRaFFE_HO_SelectBCs`**
1. Apply boundary conditions.
1. Reads: StildeD0, StildeD1, StildeD2, AD0, AD1, AD2, psi6Phi (Interior)
1. Writes: StildeD0, StildeD1, StildeD2, AD0, AD1, AD2, psi6Phi (Boundaries)
1. **`driver_A_to_B`**
1. Computes the magnetic field from the vector potential everywhere.
1. Reads: gammaDD00, gammaDD01, gammaDD02, gammaDD11, gammaDD12, gammaDD22, AD0, AD1, AD2 (Everywhere)
1. Writes:BU0, BU1, BU2 (Everywhere)
1. **`GiRaFFE_to_HydroBase`**
1. Reads data from **GiRaFFE_HO** variables into **HydroBase** variables.
1. Reads: AD0, AD1, AD2, BU0, BU1, BU2, ValenciavU0, ValenciavU1, ValenciavU2 (Everywhere)
1. Writes: Avec, Bvec, vel (from HydroBase) (Everywhere)
We first assign storage for both scalar gridfunctions, and then specify the standardized ETK "scheduling bins" in which we want each of our thorn's functions to run.
```
%%writefile GiRaFFE_HO/schedule.ccl
STORAGE: GiRaFFE_rhs[1]
STORAGE: GiRaFFE_vars[3]
STORAGE: GiRaFFE_aux[1]
STORAGE: GiRaFFE_Bs[1]
STORAGE: GiRaFFE_Vs[1]
STORAGE: GiRaFFEfood_init[1]
STORAGE: HydroBase::rho[1],HydroBase::press[1],HydroBase::eps[1],HydroBase::vel[1],HydroBase::Bvec[1],HydroBase::Avec[1],HydroBase::Aphi[1]
# POSTPOSTINITIAL
schedule GROUP GiRaFFE_PostPostInitial at CCTK_POSTPOSTINITIAL before MoL_PostStep after HydroBase_Con2Prim
{
} "HydroBase_Con2Prim in CCTK_POSTPOSTINITIAL sets conserv to prim then outer boundaries (OBs, which are technically disabled). The post OB SYNCs actually reprolongate the conservative variables, making cons and prims INCONSISTENT. So here we redo the con2prim, avoiding the SYNC afterward, then copy the result to other timelevels"
#schedule GiRaFFE_HO_InitSymBound at BASEGRID
#{
# LANG: C
# OPTIONS: global
#} "Schedule symmetries"
# Sets the gridfunctions that are needed for RHS; GiRaFFE_HO_set_rhs will need these to be set so it can finite-difference them
schedule GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS as GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS IN MoL_CalcRHS
{
LANG: C
READS: admbase::alp(Everywhere)
READS: admbase::betax(Everywhere)
READS: admbase::betay(Everywhere)
READS: admbase::betaz(Everywhere)
READS: admbase::gxx(Everywhere)
READS: admbase::gxy(Everywhere)
READS: admbase::gxz(Everywhere)
READS: admbase::gyy(Everywhere)
READS: admbase::gyz(Everywhere)
READS: admbase::gzz(Everywhere)
READS: GiRaFFE_HO::ValenciavU0(Everywhere)
READS: GiRaFFE_HO::ValenciavU1(Everywhere)
READS: GiRaFFE_HO::ValenciavU2(Everywhere)
READS: GiRaFFE_HO::AD0(Everywhere)
READS: GiRaFFE_HO::AD1(Everywhere)
READS: GiRaFFE_HO::AD2(Everywhere)
READS: GiRaFFE_HO::psi6Phi(Everywhere)
WRITES: GiRaFFE_HO::uU0(Everywhere)
WRITES: GiRaFFE_HO::uU1(Everywhere)
WRITES: GiRaFFE_HO::uU2(Everywhere)
WRITES: GiRaFFE_HO::u0alpha(Everywhere)
WRITES: GiRaFFE_HO::alpsqrtgam(Everywhere)
WRITES: GiRaFFE_HO::AevolParen(Everywhere)
WRITES: GiRaFFE_HO::PevolParenU0(Everywhere)
WRITES: GiRaFFE_HO::PevolParenU1(Everywhere)
WRITES: GiRaFFE_HO::PevolParenU2(Everywhere)
WRITES: GiRaFFE_HO::gammaUU00(Everywhere)
WRITES: GiRaFFE_HO::gammaUU01(Everywhere)
WRITES: GiRaFFE_HO::gammaUU02(Everywhere)
WRITES: GiRaFFE_HO::gammaUU11(Everywhere)
WRITES: GiRaFFE_HO::gammaUU12(Everywhere)
WRITES: GiRaFFE_HO::gammaUU22(Everywhere)
WRITES: GiRaFFE_HO::gammadet(Everywhere)
WRITES: GiRaFFE_HO::u4upperZero(Everywhere)
SYNC: GiRaFFE_aux
} "Sets prerequisite quantities for the GiRaFFE right-hand sides"
schedule GiRaFFE_HO_set_rhs as GiRaFFE_HO_Evolution IN MoL_CalcRHS after GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS
{
LANG: C
READS: admbase::alp(Everywhere)
READS: admbase::betax(Everywhere)
READS: admbase::betay(Everywhere)
READS: admbase::betaz(Everywhere)
READS: admbase::gxx(Everywhere)
READS: admbase::gxy(Everywhere)
READS: admbase::gxz(Everywhere)
READS: admbase::gyy(Everywhere)
READS: admbase::gyz(Everywhere)
READS: admbase::gzz(Everywhere)
READS: GiRaFFE_HO::ValenciavU0(Everywhere)
READS: GiRaFFE_HO::ValenciavU1(Everywhere)
READS: GiRaFFE_HO::ValenciavU2(Everywhere)
READS: GiRaFFE_HO::AD0(Everywhere)
READS: GiRaFFE_HO::AD1(Everywhere)
READS: GiRaFFE_HO::AD2(Everywhere)
READS: GiRaFFE_HO::BU0(Everywhere)
READS: GiRaFFE_HO::BU1(Everywhere)
READS: GiRaFFE_HO::BU2(Everywhere)
READS: GiRaFFE_HO::uU0(Everywhere)
READS: GiRaFFE_HO::uU1(Everywhere)
READS: GiRaFFE_HO::uU2(Everywhere)
READS: GiRaFFE_HO::u0alpha(Everywhere)
READS: GiRaFFE_HO::alpsqrtgam(Everywhere)
READS: GiRaFFE_HO::AevolParen(Everywhere)
READS: GiRaFFE_HO::PevolParenU0(Everywhere)
READS: GiRaFFE_HO::PevolParenU1(Everywhere)
READS: GiRaFFE_HO::PevolParenU2(Everywhere)
READS: GiRaFFE_HO::gammaUU00(Everywhere)
READS: GiRaFFE_HO::gammaUU01(Everywhere)
READS: GiRaFFE_HO::gammaUU02(Everywhere)
READS: GiRaFFE_HO::gammaUU11(Everywhere)
READS: GiRaFFE_HO::gammaUU12(Everywhere)
READS: GiRaFFE_HO::gammaUU22(Everywhere)
READS: GiRaFFE_HO::gammadet(Everywhere)
WRITES: GiRaFFE_HO::Stilde_rhsD0(Interior)
WRITES: GiRaFFE_HO::Stilde_rhsD1(Interior)
WRITES: GiRaFFE_HO::Stilde_rhsD2(Interior)
WRITES: GiRaFFE_HO::A_rhsD0(Interior)
WRITES: GiRaFFE_HO::A_rhsD1(Interior)
WRITES: GiRaFFE_HO::A_rhsD2(Interior)
WRITES: GiRaFFE_HO::psi6Phi_rhs(Interior)
} "Sets the GiRaFFE right-hand sides"
schedule GiRaFFE_HO_SelectBCs in MoL_PostStep
{
LANG: C
OPTIONS: level
SYNC: GiRaFFE_vars
} "Boundaries of GiRaFFE equations"
schedule GROUP ApplyBCs as GiRaFFE_HO_ApplyBCs in MoL_PostStep after GiRaFFE_HO_SelectBCs
{
READS: GiRaFFE_HO::AD0(Interior)
READS: GiRaFFE_HO::AD1(Interior)
READS: GiRaFFE_HO::AD2(Interior)
READS: GiRaFFE_HO::psi6Phi(Interior)
READS: GiRaFFE_HO::ValenciavU0(Interior)
READS: GiRaFFE_HO::ValenciavU1(Interior)
READS: GiRaFFE_HO::ValenciavU2(Interior)
READS: GiRaFFE_HO::StildeD0(Interior)
READS: GiRaFFE_HO::StildeD1(Interior)
READS: GiRaFFE_HO::StildeD2(Interior)
WRITES: GiRaFFE_HO::AD0(Boundary)
WRITES: GiRaFFE_HO::AD1(Boundary)
WRITES: GiRaFFE_HO::AD2(Boundary)
WRITES: GiRaFFE_HO::psi6Phi(Boundary)
WRITES: GiRaFFE_HO::ValenciavU0(Boundary)
WRITES: GiRaFFE_HO::ValenciavU1(Boundary)
WRITES: GiRaFFE_HO::ValenciavU2(Boundary)
WRITES: GiRaFFE_HO::StildeD0(Boundary)
WRITES: GiRaFFE_HO::StildeD1(Boundary)
WRITES: GiRaFFE_HO::StildeD2(Boundary)
} "Apply boundary conditions"
schedule GROUP ApplyBCs as GiRaFFE_HO_ApplyBCs at POSTRESTRICT
{
} "Apply boundary conditions"
schedule driver_A_to_B as driver_A_to_B in HydroBase_Boundaries
{
LANG: C
READS: admbase::gxx(Everywhere)
READS: admbase::gxy(Everywhere)
READS: admbase::gxz(Everywhere)
READS: admbase::gyy(Everywhere)
READS: admbase::gyz(Everywhere)
READS: admbase::gzz(Everywhere)
READS: GiRaFFE_HO::AD0(Everywhere)
READS: GiRaFFE_HO::AD1(Everywhere)
READS: GiRaFFE_HO::AD2(Everywhere)
WRITES: GiRaFFE_HO::BU0(Everywhere)
WRITES: GiRaFFE_HO::BU1(Everywhere)
WRITES: GiRaFFE_HO::BU2(Everywhere)
} "Calculates the B-field from the vector potential"
schedule GiRaFFE_HO_RegisterVars in MoL_Register
{
LANG: C
OPTIONS: meta
} "Register Variables for MoL"
# Nontrivial primitives solve, for vx,vy,vz:
#schedule GiRaFFE_HO_conserv_to_prims_FFE in MoL_CalcRHS after HydroBase_to_GiRaFFE before GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS
#{
# LANG: C
# READS: admbase::gxx(Everywhere)
# READS: admbase::gxy(Everywhere)
# READS: admbase::gxz(Everywhere)
# READS: admbase::gyy(Everywhere)
# READS: admbase::gyz(Everywhere)
# READS: admbase::gzz(Everywhere)
# READS: GiRaFFE_HO::BU0(Everywhere)
# READS: GiRaFFE_HO::BU1(Everywhere)
# READS: GiRaFFE_HO::BU2(Everywhere)
# READS: GiRaFFE_HO::ValenciavU0(Everywhere)
# READS: GiRaFFE_HO::ValenciavU1(Everywhere)
# READS: GiRaFFE_HO::ValenciavU2(Everywhere)
# READS: GiRaFFE_HO::StildeD0(Everywhere)
# READS: GiRaFFE_HO::StildeD1(Everywhere)
# READS: GiRaFFE_HO::StildeD2(Everywhere)
# WRITES: GiRaFFE_HO::ValenciavU0(Everywhere)
# WRITES: GiRaFFE_HO::ValenciavU1(Everywhere)
# WRITES: GiRaFFE_HO::ValenciavU2(Everywhere)
# WRITES: GiRaFFE_HO::StildeD0(Everywhere)
# WRITES: GiRaFFE_HO::StildeD1(Everywhere)
# WRITES: GiRaFFE_HO::StildeD2(Everywhere)
#} "Applies the FFE condition B^2>E^2 and recomputes the velocities"
# Schedule this AFTER the evolution as well.
schedule GiRaFFE_HO_conserv_to_prims_FFE in HydroBase_Boundaries AFTER driver_A_to_B
{
LANG: C
READS: admbase::gxx(Everywhere)
READS: admbase::gxy(Everywhere)
READS: admbase::gxz(Everywhere)
READS: admbase::gyy(Everywhere)
READS: admbase::gyz(Everywhere)
READS: admbase::gzz(Everywhere)
READS: GiRaFFE_HO::BU0(Everywhere)
READS: GiRaFFE_HO::BU1(Everywhere)
READS: GiRaFFE_HO::BU2(Everywhere)
READS: GiRaFFE_HO::ValenciavU0(Everywhere)
READS: GiRaFFE_HO::ValenciavU1(Everywhere)
READS: GiRaFFE_HO::ValenciavU2(Everywhere)
READS: GiRaFFE_HO::StildeD0(Everywhere)
READS: GiRaFFE_HO::StildeD1(Everywhere)
READS: GiRaFFE_HO::StildeD2(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU0(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU1(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU2(Everywhere)
WRITES: GiRaFFE_HO::StildeD0(Everywhere)
WRITES: GiRaFFE_HO::StildeD1(Everywhere)
WRITES: GiRaFFE_HO::StildeD2(Everywhere)
} "Applies the FFE condition B^2>E^2 and recomputes the velocities"
schedule HydroBase_to_GiRaFFE IN MoL_CalcRHS before GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS
{
LANG: C
READS: HydroBase::Avec(Everywhere)
READS: HydroBase::Bvec(Everywhere)
READS: HydroBase::vel(Everywhere)
WRITES: GiRaFFE_HO::BU0(Everywhere)
WRITES: GiRaFFE_HO::BU1(Everywhere)
WRITES: GiRaFFE_HO::BU2(Everywhere)
WRITES: GiRaFFE_HO::AD0(Everywhere)
WRITES: GiRaFFE_HO::AD1(Everywhere)
WRITES: GiRaFFE_HO::AD2(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU0(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU1(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU2(Everywhere)
} "Converts the HydroBase variables to GiRaFFE variables"
schedule GiRaFFE_to_HydroBase AT CCTK_ANALYSIS AFTER ML_BSSN_evolCalcGroup
{
LANG: C
READS: GiRaFFE_HO::BU0(Everywhere)
READS: GiRaFFE_HO::BU1(Everywhere)
READS: GiRaFFE_HO::BU2(Everywhere)
READS: GiRaFFE_HO::AD0(Everywhere)
READS: GiRaFFE_HO::AD1(Everywhere)
READS: GiRaFFE_HO::AD2(Everywhere)
READS: GiRaFFE_HO::ValenciavU0(Everywhere)
READS: GiRaFFE_HO::ValenciavU1(Everywhere)
READS: GiRaFFE_HO::ValenciavU2(Everywhere)
WRITES: HydroBase::Avec(Everywhere)
WRITES: HydroBase::Bvec(Everywhere)
WRITES: HydroBase::vel(Everywhere)
} "Converts the GiRaFFE variables to HydroBase variables"
############################################################
# Schedule Blocks that are run BEFORE the evolution to finish setting up initial data:
schedule driver_A_to_B IN GiRaFFE_ID_Converter as initial_driver_A_to_B before first_initialdata
{
LANG: C
READS: admbase::gxx(Everywhere)
READS: admbase::gxy(Everywhere)
READS: admbase::gxz(Everywhere)
READS: admbase::gyy(Everywhere)
READS: admbase::gyz(Everywhere)
READS: admbase::gzz(Everywhere)
READS: GiRaFFE_HO::AD0(Everywhere)
READS: GiRaFFE_HO::AD1(Everywhere)
READS: GiRaFFE_HO::AD2(Everywhere)
WRITES: GiRaFFE_HO::BU0(Everywhere)
WRITES: GiRaFFE_HO::BU1(Everywhere)
WRITES: GiRaFFE_HO::BU2(Everywhere)
} "Calculates the B-field from the vector potential"
schedule GiRaFFE_HO_conserv_to_prims_FFE in GiRaFFE_ID_Converter after first_initialdata
{
LANG: C
READS: admbase::gxx(Everywhere)
READS: admbase::gxy(Everywhere)
READS: admbase::gxz(Everywhere)
READS: admbase::gyy(Everywhere)
READS: admbase::gyz(Everywhere)
READS: admbase::gzz(Everywhere)
READS: GiRaFFE_HO::BU0(Everywhere)
READS: GiRaFFE_HO::BU1(Everywhere)
READS: GiRaFFE_HO::BU2(Everywhere)
READS: GiRaFFE_HO::ValenciavU0(Everywhere)
READS: GiRaFFE_HO::ValenciavU1(Everywhere)
READS: GiRaFFE_HO::ValenciavU2(Everywhere)
READS: GiRaFFE_HO::StildeD0(Everywhere)
READS: GiRaFFE_HO::StildeD1(Everywhere)
READS: GiRaFFE_HO::StildeD2(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU0(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU1(Everywhere)
WRITES: GiRaFFE_HO::ValenciavU2(Everywhere)
WRITES: GiRaFFE_HO::StildeD0(Everywhere)
WRITES: GiRaFFE_HO::StildeD1(Everywhere)
WRITES: GiRaFFE_HO::StildeD2(Everywhere)
} "Applies the FFE condition B^2>E^2 and recomputes the velocities"
# Copy data to other timelevels.
#schedule GiRaFFE_HO_PostPostInitial__Copy_Timelevels in GiRaFFE_PostPostInitial as mhdpostid after initial_driver_A_to_B# after p2c
schedule GiRaFFE_HO_PostPostInitial__Copy_Timelevels in SetTmunu as mhdpostid# after initial_driver_A_to_B
{
READS: GiRaFFE_HO::AD0(Everywhere)
READS: GiRaFFE_HO::AD1(Everywhere)
READS: GiRaFFE_HO::AD2(Everywhere)
READS: GiRaFFE_HO::psi6Phi(Everywhere)
READS: GiRaFFE_HO::ValenciavU0(Everywhere)
READS: GiRaFFE_HO::ValenciavU1(Everywhere)
READS: GiRaFFE_HO::ValenciavU2(Everywhere)
READS: GiRaFFE_HO::StildeD0(Everywhere)
READS: GiRaFFE_HO::StildeD1(Everywhere)
READS: GiRaFFE_HO::StildeD2(Everywhere)
LANG: C
} "Compute post-initialdata quantities"
# FIXME: This is getting run too many times, even during the evolution!
```
This yields the following, as output by the Toolkit itself, with some extra formatting:
[comment]: <> (Implement markdown formatting for code: TODO)
1. GiRaFFE_HO::GiRaFFE_HO_RegisterVars: [meta] Register Variables for MoL
1. GROUP GiRaFFE_Initial: Schedule GiRaFFE functions in HydroBase_Initial
1. GiRaFFEfood_HO::GiRaFFE_Food: Initial data for GiRaFFE
1. GiRaFFE_HO::initial_driver_A_to_B: Calculates the B-field from the vector potential
1. GiRaFFE_HO::mhdpostid: Compute post-initialdata quantities
1. GROUP GiRaFFE_ID_Converter: Translate ET-generated, HydroBase-compatible initial data and convert into variables used by GiRaFFE
1. GiRaFFEfood_HO::first_initialdata: [local] Convert HydroBase initial data (ID) to ID that GiRaFFE can read.
1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations
1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions
1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential
1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations
1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions
1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential
1. GROUP GiRaFFE_PostPostInitial: HydroBase_Con2Prim in CCTK_POSTPOSTINITIAL sets conserv to prim then outer boundaries (OBs, which are technically disabled). The post OB SYNCs actually reprolongate the conservative variables, making cons and prims INCONSISTENT. So here we redo the con2prim, avoiding the SYNC afterward, then copy the result to other timelevels
1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations
1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions
1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential
1. GiRaFFE_HO::GiRaFFE_to_HydroBase: Converts the GiRaFFE variables to HydroBase variables
1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations
1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions
1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential
1. GiRaFFE_HO::HydroBase_to_GiRaFFE: Converts the HydroBase variables to GiRaFFE variables
1. GiRaFFE_HO::GiRaFFE_HO_set_GFs_to_finite_difference_for_RHS: Sets prerequisite quantities for the GiRaFFE right-hand sides
1. GiRaFFE_HO::GiRaFFE_HO_Evolution: Sets the GiRaFFE right-hand sides
1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations
1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions
1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential
1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions
1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations
1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions
1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential
1. GiRaFFE_HO::GiRaFFE_to_HydroBase: Converts the GiRaFFE variables to HydroBase variables
1. GiRaFFE_HO::GiRaFFE_HO_SelectBCs: [level] Boundaries of GiRaFFE equations
1. GROUP GiRaFFE_HO_ApplyBCs: Apply boundary conditions
1. GiRaFFE_HO::driver_A_to_B: Calculates the B-field from the vector potential
<a id='einstein_list'></a>
## Step 2.f: Add the C file to Einstein Toolkit compilation list \[Back to [top](#toc)\]
$$\label{einstein_list}$$
We will also need `make.code.defn`, which indicates the list of files that need to be compiled.
```
%%writefile GiRaFFE_HO/src/make.code.defn
SRCS = GiRaFFE.c driver_conserv_to_prims_FFE.C \
GiRaFFE_HydroBase_conversion.c \
postpostinitial__copy_timelevels.c
```
<a id='latex_pdf_output'></a>
# Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-ETK_thorn-GiRaFFE_Higher_Order.pdf](Tutorial-ETK_thorn-GiRaFFE_Higher_Order.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ETK_thorn-GiRaFFE_Higher_Order.ipynb
!pdflatex -interaction=batchmode Tutorial-ETK_thorn-GiRaFFE_Higher_Order.tex
!pdflatex -interaction=batchmode Tutorial-ETK_thorn-GiRaFFE_Higher_Order.tex
!pdflatex -interaction=batchmode Tutorial-ETK_thorn-GiRaFFE_Higher_Order.tex
!rm -f Tut*.out Tut*.aux Tut*.log
```
| github_jupyter |
```
import pandas as pd
startups = pd.read_csv('data/startups_1.csv', index_col=0)
startups[:3]
```
### With the variables we found so far here, we achieved a maximum performance of 75% (ROC AUC), so let's try to extract some more features in order to increase the model performance
### Let's find the # of acquisitons made by each company
```
#I'm considering only Acquisitions made in USA, with USD (dollars)
acquisitions = pd.read_csv('data/acquisitions.csv')
acquisitions = acquisitions[acquisitions['acquirer_country_code'] == 'USA']
acquisitions[:3]
#acquirer_permalink
#rounds_agg = df_rounds.groupby(['company_permalink', 'funding_round_type'])['raised_amount_usd'].agg({'amount': [ pd.Series.sum, pd.Series.count]})
number_of_acquisitions = acquisitions.groupby(['acquirer_permalink'])['acquirer_permalink'].agg({'amount': [ pd.Series.count]}).reset_index()
number_of_acquisitions.columns = number_of_acquisitions.columns.droplevel()
number_of_acquisitions.columns = ['permalink', 'number_of_acquisitions']
number_of_acquisitions = number_of_acquisitions.set_index('permalink')
number_of_acquisitions[:3]
```
### Let's find the # of investments made by each company
```
investments = pd.read_csv('data/investments.csv')
investments = investments[investments['investor_country_code'] == 'USA']
investments[:3]
#acquirer_permalink
#rounds_agg = df_rounds.groupby(['company_permalink', 'funding_round_type'])['raised_amount_usd'].agg({'amount': [ pd.Series.sum, pd.Series.count]})
number_of_investments = investments.groupby(['investor_permalink'])['investor_permalink'].agg({'amount': [ pd.Series.count]}).reset_index()
number_of_investments.columns = number_of_investments.columns.droplevel()
number_of_investments.columns = ['permalink', 'number_of_investments']
number_of_investments = number_of_investments.set_index('permalink')
number_of_investments[:3]
#Number of different companies in which each company have invested in
number_of_unique_investments = investments.groupby(['investor_permalink'])['company_permalink'].agg({'amount': [ pd.Series.nunique]}).reset_index()
number_of_unique_investments.columns = number_of_unique_investments.columns.droplevel()
number_of_unique_investments.columns = ['permalink', 'number_of_unique_investments']
number_of_unique_investments = number_of_unique_investments.set_index('permalink')
number_of_unique_investments[:3]
number_of_investors_per_round = investments.groupby(['company_permalink', 'funding_round_permalink'])['investor_permalink'].agg({'investor_permalink': [ pd.Series.count]}).reset_index()
number_of_investors_per_round.columns = number_of_investors_per_round.columns.droplevel(0)
number_of_investors_per_round.columns = ['company_permalink', 'funding_round_permalink', 'count']
number_of_investors_per_round = number_of_investors_per_round.groupby(['company_permalink']).agg({'count': [ pd.Series.mean]}).reset_index()
number_of_investors_per_round.columns = number_of_investors_per_round.columns.droplevel(0)
number_of_investors_per_round.columns = ['company_permalink', 'number_of_investors_per_round']
number_of_investors_per_round = number_of_investors_per_round.set_index('company_permalink')
number_of_investors_per_round[:3]
from numpy import nanmean
#investments['raised_amount_usd'].dtype()
investments['raised_amount_usd'] = investments['raised_amount_usd'].astype(float)
avg_amount_invested_per_round = investments.groupby(['company_permalink', 'funding_round_permalink'])['raised_amount_usd'].agg({'raised_amount_usd': [ pd.Series.mean]}).reset_index()
avg_amount_invested_per_round.columns = avg_amount_invested_per_round.columns.droplevel(0)
avg_amount_invested_per_round.columns = ['company_permalink', 'funding_round_permalink', 'mean']
avg_amount_invested_per_round = avg_amount_invested_per_round.groupby(['company_permalink']).agg({'mean': [ pd.Series.mean]}).reset_index()
avg_amount_invested_per_round.columns = avg_amount_invested_per_round.columns.droplevel(0)
avg_amount_invested_per_round.columns = ['company_permalink', 'avg_amount_invested_per_round']
avg_amount_invested_per_round = avg_amount_invested_per_round.set_index('company_permalink')
avg_amount_invested_per_round = avg_amount_invested_per_round.fillna(0)
avg_amount_invested_per_round[:3]
startups = startups.join(number_of_acquisitions).join(number_of_investments).join(number_of_unique_investments).join(number_of_investors_per_round).join(avg_amount_invested_per_round)
startups[['number_of_acquisitions', 'number_of_investments', 'number_of_unique_investments','number_of_investors_per_round', 'avg_amount_invested_per_round']] = startups[['number_of_acquisitions', 'number_of_investments', 'number_of_unique_investments','number_of_investors_per_round', 'avg_amount_invested_per_round']].fillna(value=0)
startups[:3]
startups.to_csv('data/startups_1_1.csv')
```
| github_jupyter |
# Lab: Titanic Survival Exploration with Decision Trees
## Getting Started
In this lab, you will see how decision trees work by implementing a decision tree in sklearn.
We'll start by loading the dataset and displaying some of its rows.
```
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Pretty display for notebooks
%matplotlib inline
# Set a random seed
import random
random.seed(42)
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
```
Recall that these are the various features present for each passenger on the ship:
- **Survived**: Outcome of survival (0 = No; 1 = Yes)
- **Pclass**: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)
- **Name**: Name of passenger
- **Sex**: Sex of the passenger
- **Age**: Age of the passenger (Some entries contain `NaN`)
- **SibSp**: Number of siblings and spouses of the passenger aboard
- **Parch**: Number of parents and children of the passenger aboard
- **Ticket**: Ticket number of the passenger
- **Fare**: Fare paid by the passenger
- **Cabin** Cabin number of the passenger (Some entries contain `NaN`)
- **Embarked**: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)
Since we're interested in the outcome of survival for each passenger or crew member, we can remove the **Survived** feature from this dataset and store it as its own separate variable `outcomes`. We will use these outcomes as our prediction targets.
Run the code cell below to remove **Survived** as a feature of the dataset and store it in `outcomes`.
```
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
features_raw = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(features_raw.head())
```
The very same sample of the RMS Titanic data now shows the **Survived** feature removed from the DataFrame. Note that `data` (the passenger data) and `outcomes` (the outcomes of survival) are now *paired*. That means for any passenger `data.loc[i]`, they have the survival outcome `outcomes[i]`.
## Preprocessing the data
Now, let's do some data preprocessing. First, we'll remove the names of the passengers, and then one-hot encode the features.
**Question:** Why would it be a terrible idea to one-hot encode the data without removing the names?
(Andw
```
# Removing the names
features_no_names = features_raw.drop(['Name'], axis=1)
# One-hot encoding
features = pd.get_dummies(features_no_names)
```
And now we'll fill in any blanks with zeroes.
```
features = features.fillna(0.0)
display(features.head())
```
## (TODO) Training the model
Now we're ready to train a model in sklearn. First, let's split the data into training and testing sets. Then we'll train the model on the training set.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, outcomes, test_size=0.2, random_state=42)
# Import the classifier from sklearn
from sklearn.tree import DecisionTreeClassifier
# TODO: Define the classifier, and fit it to the data
model = DecisionTreeClassifier()
model.fit(X_train,y_train)
```
## Testing the model
Now, let's see how our model does, let's calculate the accuracy over both the training and the testing set.
```
# Making predictions
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
# Calculate the accuracy
from sklearn.metrics import accuracy_score
train_accuracy = accuracy_score(y_train, y_train_pred)
test_accuracy = accuracy_score(y_test, y_test_pred)
print('The training accuracy is', train_accuracy)
print('The test accuracy is', test_accuracy)
```
# Exercise: Improving the model
Ok, high training accuracy and a lower testing accuracy. We may be overfitting a bit.
So now it's your turn to shine! Train a new model, and try to specify some parameters in order to improve the testing accuracy, such as:
- `max_depth`
- `min_samples_leaf`
- `min_samples_split`
You can use your intuition, trial and error, or even better, feel free to use Grid Search!
**Challenge:** Try to get to 85% accuracy on the testing set. If you'd like a hint, take a look at the solutions notebook next.
```
# TODO: Train the model
# Import the classifier from sklearn
from sklearn.tree import DecisionTreeClassifier
# TODO: Define the classifier, and fit it to the data
model = DecisionTreeClassifier(max_depth = 9, min_samples_leaf = 5, min_samples_split = 12)
model.fit(X_train,y_train)
# TODO: Make predictions
# Making predictions
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
# TODO: Calculate the accuracy
# Calculate the accuracy
from sklearn.metrics import accuracy_score
train_accuracy = accuracy_score(y_train, y_train_pred)
test_accuracy = accuracy_score(y_test, y_test_pred)
print('The training accuracy is', train_accuracy)
print('The test accuracy is', test_accuracy)
```
| github_jupyter |
# Pypi & Pip
PyPi is short form for Python Package Index (PyPI). PyPI helps you find and install open source software developed and shared by the Python community. All the python packages are distributed to python community through pypi.org . These packages are called as Distributed or intallable packages. To install any distributed or installable package we use command called Pip.
```
pip install <package-name>
pip install requests
```
you can also specify which version of python package to install in the command.
```
pip install <package-name>==<version>
pip install requests==2.1.0
```
# How does pip install work?
Every package/distribution that is being installed will have a setup.py. When you call pip install <package-name> that is nothing but python setup.py build and python setup.py install
What happens in this flow, with python setup.py build, it will download all the code of package to build folder installing any dependant packages, after that it will build a binary wheel specifically for your machine out of the source. Then it needs to determine which library directory to install the package in—the system's, the user's, or a virtualenv's? This is controlled by sys.prefix, which in turn is controlled by pip's executable path and the PYTHONPATH and PYTHONHOME environment variables. Finally, it moves the wheel files into the appropriate library directory, and compiles the python source files into bytecode for faster execution.
```
# sample setup.py
import os
from setuptools import setup, find_packages
setup(
name='<package_name>',
version='0.3.1',
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
description='A brief description of your package',
long_description=README,
url='<your package github repo url>',
author='<author name>',
author_email='<Author email>',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
install_requires=[
<list of any other packages that needs to be installed.>
],
)
```
# Virtualenv
A Virtual Environment is an isolated working copy of Python which allows you to work on a specific project without worry of affecting other projects. It enables multiple side-by-side installations of Python, one for each project. Following are the commands to install virtualenv.
On macOS and Linux:
```
python3 -m pip install --user virtualenv
```
On Windows:
```
py -m pip install --user virtualenv
```
We create vitrualenv using the following commands
On macOS and Linux:
```
python3 -m venv env
```
On Windows:
```
py -m venv env
```
Before you can start installing or using packages in your virtual environment you’ll need to activate it. To activate the environment use the following commands
On macOS and Linux:
```
source env/bin/activate
```
On Windows:
```
.\env\Scripts\activate
```
Following are some important commands when we use virtualenv
> pip freeze
shows packages YOU installed via pip in that environment
> pip freeze > requirements.txt
used to write the installed packages into the file.
> pip install -r requrements.txt
Used to install all the packages inside requirements
# Create your own package
We will see how to publish a simple helloworld as a pypi package. I'm creating a simple package with hellow as a folder and to make it a package I'm adding __init__.py to it.
Inside that folder I'm creating a simple file called greeting and inside my greeting file, I'm adding a simple function called hello_world that prints hello_world
```
helloworld/
├── hellow
├── __init__.py
└── greeting.py
1 directory, 2 files
# in greeting.py
def hello_world():
print ("hello world")
```
As discussed earlier we need setup.py file to make a python package into a distributed package. So I'm creating a setup.py file parallet to hellow folder. In my setup.py I'll add corresponding information required for that package.
```
├── hellow
│ ├── __init__.py
│ └── greeting.py
└── setup.py
1 directory, 3 files
# in setup.py
import os
from setuptools import setup, find_packages
setup(
name='chaitu_210_hw_greeting',
version='1.0',
packages=['hellow'],
include_package_data=True,
description='A brief description of your package',
long_description='',
url='https://www.test.com/',
author='chaitanya',
author_email='chaitu210@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
install_requires=[
],
)
```
Before make our python package as a distributed package, we will create an account in pypi.org, you can create an using the following link
https://pypi.org/account/register/
Now to upload our package we run the following commands
```
> python setup.py bdist_wheel sdist
> pip install twine
> twine upload dist/*
```
On running the last command it will ask for your pypi username and password. On successful upload we are now ready to use the package in any other project or any python developer can install the package using
pip install chaitu_210_hw_greeting
There are more options that we can research while create a package for Eg: Manifest.in, docs, README.md etc.
Manifest.in : Used for adding the non python files like htmls
Docs: If your package has more documentation you will use this.
README.md: This is used to give detailed description/usage about your package.
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/AssetManagement/export_FeatureCollection.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/AssetManagement/export_FeatureCollection.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/AssetManagement/export_FeatureCollection.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('Installing geemap ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
import ee
import geemap
```
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
```
Map = geemap.Map(center=[40,-100], zoom=4)
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
fromFT = ee.FeatureCollection('ft:1CLldB-ULPyULBT2mxoRNv7enckVF0gCQoD2oH7XP')
polys = fromFT.geometry()
centroid = polys.centroid()
lng, lat = centroid.getInfo()['coordinates']
print("lng = {}, lat = {}".format(lng, lat))
Map.setCenter(lng, lat, 10)
Map.addLayer(fromFT)
taskParams = {
'driveFolder': 'image',
'fileFormat': 'KML' # CSV, KMZ, GeoJSON
}
# export all features in a FeatureCollection as one file
task = ee.batch.Export.table(fromFT, 'export_fc', taskParams)
task.start()
# # export each feature in a FeatureCollection as an individual file
# count = fromFT.size().getInfo()
# for i in range(2, 2 + count):
# fc = fromFT.filter(ee.Filter.eq('system:index', str(i)))
# task = ee.batch.Export.table(fc, 'watershed-' + str(i), taskParams)
# task.start()
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
| github_jupyter |
<h1> Structured data prediction using Cloud ML Engine </h1>
This notebook illustrates:
<ol>
<li> Exploring a BigQuery dataset using Datalab
<li> Creating datasets for Machine Learning using Dataflow
<li> Creating a model using the high-level Estimator API
<li> Training on Cloud ML Engine
<li> Deploying model
<li> Predicting with model
</ol>
Before starting the lab, upgrade packages that are required for this notebook.
```
%%bash
pip install --upgrade tensorflow==1.4
pip install --ignore-installed --upgrade pytz==2018.4
pip uninstall -y google-cloud-dataflow
pip install --upgrade apache-beam[gcp]==2.6
```
**Now you have to restart the kernel by clicking the "Reset Session" in the menu bar** to reflect the newly installed modules.
After restarting the kernel, you can resume the code execution from the next cell.
```
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
```
<h1>Part 1: Data Analysis and Preparation</h1>
<h2> Exploring data </h2>
The data is natality data (record of births in the US). My goal is to predict the baby's weight given a number of factors about the pregnancy and the baby's mother. Later, we will want to split the data into training and eval datasets. The hash of the year-month will be used for that.
```
query="""
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE year > 2000
"""
import google.datalab.bigquery as bq
df = bq.Query(query + " LIMIT 100").execute().result().to_dataframe()
df.head()
```
Let's write a query to find the unique values for each of the columns and the count of those values.
```
def get_distinct_values(column_name):
sql = """
SELECT
{0},
COUNT(1) AS num_babies,
AVG(weight_pounds) AS avg_wt
FROM
publicdata.samples.natality
WHERE
year > 2000
GROUP BY
{0}
""".format(column_name)
return bq.Query(sql).execute().result().to_dataframe()
df = get_distinct_values('is_male')
df.plot(x='is_male', y='num_babies', kind='bar');
df.plot(x='is_male', y='avg_wt', kind='bar');
df = get_distinct_values('mother_age')
df = df.sort_values('mother_age')
df.plot(x='mother_age', y='num_babies');
df.plot(x='mother_age', y='avg_wt');
df = get_distinct_values('plurality')
df = df.sort_values('plurality')
df.plot(x='plurality', y='num_babies', logy=True, kind='bar');
df.plot(x='plurality', y='avg_wt', kind='bar');
df = get_distinct_values('gestation_weeks')
df = df.sort_values('gestation_weeks')
df.plot(x='gestation_weeks', y='num_babies', logy=True, kind='bar', color='royalblue');
df.plot(x='gestation_weeks', y='avg_wt', kind='bar', color='royalblue');
```
All these factors seem to play a part in the baby's weight. Male babies are heavier on average than female babies. Teenaged and older moms tend to have lower-weight babies. Twins, triplets, etc. are lower weight than single births. Preemies weigh in lower as do babies born to single moms. In addition, it is important to check whether you have enough data (number of babies) for each input value. Otherwise, the model prediction against input values that doesn't have enough data may not be reliable.
<p>
In the rest of this notebook, we will use machine learning to combine all of these factors to come up with a prediction of a baby's weight.
<h2> Creating a ML dataset using Dataflow </h2>
<p>
I'm going to use Cloud Dataflow to read in the BigQuery data, do some preprocessing, and write it out as CSV files.
Instead of using Beam/Dataflow, I had three other options:
<ol>
<li> Use Cloud Dataprep to visually author a Dataflow pipeline. Cloud Dataprep also allows me to explore the data, so we could have avoided much of the handcoding of Python/Seaborn calls above as well!
<li> Read from BigQuery directly using TensorFlow.
<li> Use the BigQuery console (http://bigquery.cloud.google.com) to run a Query and save the result as a CSV file. For larger datasets, you may have to select the option to "allow large results" and save the result into a CSV file on Google Cloud Storage.
</ol>
<p>
However, in this case, I want to do some preprocessing. I want to modify the data such that we can simulate what is known if no ultrasound has been performed. If I didn't need preprocessing, I could have used the web console. Also, I prefer to script it out rather than run queries on the user interface. Therefore, I am using Cloud Dataflow for the preprocessing.
```
import apache_beam as beam
import datetime
def to_csv(rowdict):
# pull columns from BQ and create a line
import hashlib
import copy
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks'.split(',')
# create synthetic data where we assume that no ultrasound has been performed
# and so we don't know sex of the baby. Let's assume that we can tell the difference
# between single and multiple, but that the errors rates in determining exact number
# is difficult in the absence of an ultrasound.
no_ultrasound = copy.deepcopy(rowdict)
w_ultrasound = copy.deepcopy(rowdict)
no_ultrasound['is_male'] = 'Unknown'
if rowdict['plurality'] > 1:
no_ultrasound['plurality'] = 'Multiple(2+)'
else:
no_ultrasound['plurality'] = 'Single(1)'
# Change the plurality column to strings
w_ultrasound['plurality'] = ['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)'][rowdict['plurality']-1]
# Write out two rows for each input row, one with ultrasound and one without
for result in [no_ultrasound, w_ultrasound]:
data = ','.join([str(result[k]) if k in result else 'None' for k in CSV_COLUMNS])
key = hashlib.sha224(data).hexdigest() # hash the columns to form a key
yield str('{},{}'.format(data, key))
def preprocess(in_test_mode):
job_name = 'preprocess-babyweight-features' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')
if in_test_mode:
OUTPUT_DIR = './preproc'
else:
OUTPUT_DIR = 'gs://{0}/babyweight/preproc/'.format(BUCKET)
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': job_name,
'project': PROJECT,
'teardown_policy': 'TEARDOWN_ALWAYS',
'max_num_workers': 3, # CHANGE THIS IF YOU HAVE MORE QUOTA
'no_save_main_session': True
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
if in_test_mode:
RUNNER = 'DirectRunner'
else:
RUNNER = 'DataflowRunner'
p = beam.Pipeline(RUNNER, options=opts)
query = """
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE year > 2000
AND weight_pounds > 0
AND mother_age > 0
AND plurality > 0
AND gestation_weeks > 0
AND month > 0
"""
if in_test_mode:
query = query + ' LIMIT 100'
for step in ['train', 'eval']:
if step == 'train':
selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) < 3'.format(query)
else:
selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) = 3'.format(query)
(p
| '{}_read'.format(step) >> beam.io.Read(beam.io.BigQuerySource(query=selquery, use_standard_sql=True))
| '{}_csv'.format(step) >> beam.FlatMap(to_csv)
| '{}_out'.format(step) >> beam.io.Write(beam.io.WriteToText(os.path.join(OUTPUT_DIR, '{}.csv'.format(step))))
)
job = p.run()
preprocess(in_test_mode=False)
```
You may get a warning about access scopes. It's safe to ignore this.
Note that after you launch this, the actual processing is happening on the Cloud. Go to the GCP web console to the Dataflow section and monitor the running job. You'll see a job that's running. If you click it, you should get a screen like this. It took about <b>55 minutes</b> for me.
<img src="dataflow.png" width="500"/>
Once the job has completed, run the cell below to check the location of the are processed files.
```
%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
```
<h1>Part 2: Developing a Machine Learning Model using TensorFlow and Cloud ML Engine</h1>
<h2> Creating a TensorFlow model using the Estimator API </h2>
<p>
First, write an input_fn to read the data.
```
import shutil
import numpy as np
import tensorflow as tf
```
We may get a few warnings when we run this. Don't worry about them.
```
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
TRAIN_STEPS = 1000
def read_dataset(prefix, pattern, batch_size=512):
# use prefix to create filename
filename = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, pattern)
if prefix == 'train':
mode = tf.estimator.ModeKeys.TRAIN
num_epochs = None # indefinitely
else:
mode = tf.estimator.ModeKeys.EVAL
num_epochs = 1 # end-of-input after this
# the actual input function passed to TensorFlow
def _input_fn():
# could be a path to one file or a file pattern.
input_file_names = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(
input_file_names, shuffle=True, num_epochs=num_epochs)
# read CSV
reader = tf.TextLineReader()
_, value = reader.read_up_to(filename_queue, num_records=batch_size)
if mode == tf.estimator.ModeKeys.TRAIN:
value = tf.train.shuffle_batch([value], batch_size, capacity=10*batch_size,
min_after_dequeue=batch_size, enqueue_many=True,
allow_smaller_final_batch=False)
value_column = tf.expand_dims(value, -1)
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
features.pop(KEY_COLUMN)
label = features.pop(LABEL_COLUMN)
return features, label
return _input_fn
```
Next, define the feature columns.
```
def get_wide_deep():
# define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# sparse columns are wide
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, 3)
# continuous columns are deep
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
```
To predict with the TensorFlow model, we also need a serving input function. We will want all the inputs from our user.
```
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
```
Finally, train!
```
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.learn.python.learn import learn_runner
PATTERN = "00000-of-" # process only one of the shards, for testing purposes
def train_and_evaluate(output_dir):
wide, deep = get_wide_deep()
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir=output_dir,
linear_feature_columns=wide,
dnn_feature_columns=deep,
dnn_hidden_units=[64, 32])
train_spec=tf.estimator.TrainSpec(
input_fn=read_dataset('train', PATTERN),
max_steps=TRAIN_STEPS)
exporter = tf.estimator.FinalExporter('exporter',serving_input_fn)
eval_spec=tf.estimator.EvalSpec(
input_fn=read_dataset('eval', PATTERN),
steps=None,
exporters=exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
shutil.rmtree('babyweight_trained', ignore_errors=True) # start fresh each time
train_and_evaluate('babyweight_trained')
```
Now that we have the TensorFlow code working on a subset of the data (in the code above, I was reading only the 00000-of-x file), we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine.
<p>
<h2> Training on Cloud ML Engine </h2>
<p>
Training on Cloud ML Engine requires:
<ol>
<li> Making the code a Python package
<li> Using gcloud to submit the training code to Cloud ML Engine
</ol>
<p>
The code in model.py is the same as in the above cells. I just moved it to a file so that I could package it up as a module.
(explore the <a href="babyweight/trainer">directory structure</a>).
```
%bash
grep "^def" babyweight/trainer/model.py
```
After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_steps lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about <b>a minute</b> in which you won't see any output ...
```
%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_steps=1000
```
Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about <b> 30 min </b> for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section.
```
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
#gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version 1.4 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_steps=100000
```
Training finished with a RMSE of about 1 lb. Obviously, this is our first model. We could probably add in some features and do some hyper-parameter tuning to get to a lower RMSE. I'll leave that to you. If you create a better model, I'd love to hear about it -- please do write a short blog post about what you did, and tweet it at me -- @lak_gcp.
```
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/babyweight/trained_model'.format(BUCKET))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print('Stopped TensorBoard with pid {}'.format(pid))
```
<table width="70%">
<tr><td><img src="weights.png"/></td><td><img src="rmse.png" /></tr>
</table>
<h2> Deploying the trained model </h2>
<p>
Deploying the trained model to act as a REST web service is a simple gcloud call.
```
%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter
%bash
MODEL_NAME="babyweight"
MODEL_VERSION="soln"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version 1.4
```
Once this has been created, it will display 'done'.
<h2> Using the model to predict </h2>
<p>
Send a JSON request to the endpoint of the service to make it predict a baby's weight ... I am going to try out how well the model would have predicted the weights of our two kids and a couple of variations while we are at it ...
```
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import json
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1', credentials=credentials)
request_data = {'instances':
[
{
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
parent = 'projects/%s/models/%s/versions/%s' % (PROJECT, 'babyweight', 'soln')
response = api.projects().predict(body=request_data, name=parent).execute()
print(json.dumps(response, sort_keys = True, indent = 4))
```
When I ran this, the four predictions for each of the requests in `request_data` above are 7.6, 7.2, 6.5, and 6.2 pounds. Yours may be different.
Copyright 2018 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
```
# default_exp key_driver_analysis
#hide
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
# Key Driver Analysis
> Key driver analysis to yield clues into **potential** causal relationships in your data by determining variables with high predictive power, high correlation with outcome, etc.
```
#hide
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
!wget -P /tmp https://raw.githubusercontent.com/amaiya/causalnlp/main/nbs/sample_data/houses.csv
!wget -P /tmp https://raw.githubusercontent.com/amaiya/causalnlp/main/nbs/sample_data/adult-census.csv
!pip install -q causalnlp
#hide
from nbdev.showdoc import *
#export
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 500)
import time
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
import matplotlib.pyplot as plt
import shap
from causalnlp.preprocessing import DataframePreprocessor
class KeyDriverAnalysis:
"""
Performs key driver analysis
"""
def __init__(self, df, outcome_col='outcome', text_col=None, include_cols=[], ignore_cols=[],
verbose=1):
"""
Instantiates the KeyDriverAnalysis instance.
"""
self.v = verbose
self.pp = None # set with call to _preprocess
self.df, self.x, self.y = self._preprocess(df, outcome_col=outcome_col, text_col=text_col,
include_cols=include_cols, ignore_cols=ignore_cols)
def _preprocess(self, df, outcome_col='outcome', text_col=None, include_cols=[], ignore_cols=[]):
"""
preprocesses DataFrame
"""
temp_treatment = 'CausalNLP_temp_treatment'
df = df.copy()
df[temp_treatment] = [0] * df.shape[0]
# preprocess
self.pp = DataframePreprocessor(treatment_col = temp_treatment,
outcome_col = outcome_col,
text_col=text_col,
include_cols=include_cols,
ignore_cols=ignore_cols,
verbose=self.v)
df, x, y, _ = self.pp.preprocess(df,
training=True,
min_df=0.05,
max_df=0.5,
ngram_range=(1,1),
stop_words='english')
return df, x, y
def correlations(self, outcome_only=True):
"""
Computes corelations between independent variables and outcome
"""
df = self.x.copy()
df[self.pp.outcome_col] = self.y
corrALL = df.apply(pd.to_numeric, errors='coerce').corr()
if outcome_only:
df_results = corrALL[[self.pp.outcome_col]]
df_results = df_results.sort_values(by=self.pp.outcome_col, key=abs, ascending=False)
return df_results.iloc[1: , :]
#return df_results.sort_values(by=[self.pp.outcome_col])
else:
return corrALL
def importances(self, plot=True, split_pct=0.2,
use_shap=False, shap_background_size=50,
rf_model=None, n_estimators=100, n_jobs=-1, random_state=42):
"""
Identifies important predictors using a RandomForest model.
"""
X_train, X_test, y_train, y_test = train_test_split(self.x.values, self.y.values,
test_size=split_pct,
random_state=random_state)
rf_type = RandomForestClassifier if self.pp.is_classification else RandomForestRegressor
rf = rf_type(n_estimators = n_estimators,
n_jobs = n_jobs,
oob_score = True,
bootstrap = True,
random_state = random_state)
rf.fit(X_train, y_train)
if self.v:
print('R^2 Training Score: {:.2f} \nOOB Score: {:.2f} \nR^2 Validation Score: {:.2f}'.format(
rf.score(X_train, y_train),
rf.oob_score_,
rf.score(X_test, y_test)))
driverNames = self.x.columns.values
if use_shap:
explainer = shap.KernelExplainer(rf.predict, X_test[:shap_background_size,:])
shap_values = explainer.shap_values(X_test[:shap_background_size,:])
if plot:
shap.summary_plot(shap_values, X_test[:shap_background_size,:], feature_names=driverNames)
vals = np.abs(shap_values).mean(0)
df_results = pd.DataFrame(list(zip(driverNames, vals)),
columns=['Driver','Importance'])
df_results.sort_values(by=['Importance'],
ascending=False, inplace=True)
return df_results
else:
df_results = pd.DataFrame(data = {'Driver': driverNames,
'Importance': rf.feature_importances_})
df_results = df_results.sort_values('Importance', ascending=False)
if plot:
feat_importances = pd.Series(rf.feature_importances_, index=driverNames)
feat_importances.nlargest(20).plot(kind='barh')
return df_results
show_doc(KeyDriverAnalysis.correlations)
import pandas as pd
df = pd.read_csv('sample_data/houses.csv')
from causalnlp.key_driver_analysis import KeyDriverAnalysis
kda = KeyDriverAnalysis(df, outcome_col='SalePrice', ignore_cols=['Id', 'YearSold'])
df_results = kda.correlations()
df_results.head()
assert df_results.iloc[[0]].index.values[0] == 'OverallQual'
show_doc(KeyDriverAnalysis.importances)
```
#### Example: Variable Importances for Housing Prices
```
df_results = kda.importances()
df_results.head()
```
#### Example: Variable Importances for Probability of Making Over $50K
```
#notest
import pandas as pd
df = pd.read_csv('sample_data/adult-census.csv')
kda = KeyDriverAnalysis(df, outcome_col='class', ignore_cols=['fnlwgt'])
df_results = kda.importances(use_shap=True, plot=True)
df_results.head()
```
| github_jupyter |
# Missing values in scikit-learn
```
#code adapted from https://github.com/thomasjpfan/ml-workshop-intermediate-1-of-2
```
## SimpleImputer
```
from sklearn.impute import SimpleImputer
import numpy as np
import sklearn
sklearn.set_config(display='diagram')
import pandas as pd
url = 'https://raw.githubusercontent.com/davidrkearney/Kearney_Data_Science/master/_notebooks/df_panel_fix.csv'
df = pd.read_csv(url, error_bad_lines=False)
df
import pandas as pd
import sklearn
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
df.columns
sklearn.set_config(display='diagram')
X, y = df.drop(['it', 'Unnamed: 0'], axis = 1), df['it']
X = X.select_dtypes(include='number')
X
_ = X.hist(figsize=(30, 15), layout=(5, 8))
df.isnull().sum()
```
### Default uses mean
```
imputer = SimpleImputer()
imputer.fit_transform(X)
df.isnull().sum()
```
### Add indicator!
```
imputer = SimpleImputer(add_indicator=True)
imputer.fit_transform(X)
df.isnull().sum()
```
### Other strategies
```
imputer = SimpleImputer(strategy='median')
imputer.fit_transform(X)
imputer = SimpleImputer(strategy='most_frequent')
imputer.fit_transform(X)
```
## Categorical data
```
import pandas as pd
imputer = SimpleImputer(strategy='constant', fill_value='sk_missing')
imputer.fit_transform(df)
```
## pandas categorical
```
df['a'] = df['a'].astype('category')
df
df.dtypes
imputer.fit_transform(df)
# %load solutions/03-ex01-solutions.py
from sklearn.datasets import fetch_openml
cancer = fetch_openml(data_id=15, as_frame=True)
X, y = cancer.data, cancer.target
X.shape
X.isna().sum()
imputer = SimpleImputer(add_indicator=True)
X_imputed = imputer.fit_transform(X)
X_imputed.shape
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, stratify=y
)
log_reg = make_pipeline(
SimpleImputer(add_indicator=True),
StandardScaler(),
LogisticRegression(random_state=0)
)
log_reg.fit(X_train, y_train)
log_reg.score(X_test, y_test)
```
## HistGradientBoosting Native support for missing values
```
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
hist = HistGradientBoostingClassifier(random_state=42)
hist.fit(X_train, y_train)
hist.score(X_test, y_test)
```
## Grid searching the imputer
```
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
iris = pd.read_csv('data/iris_w_missing.csv')
iris.head()
X = iris.drop('target', axis='columns')
y = iris['target']
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=0, stratify=y
)
pipe = Pipeline([
('imputer', SimpleImputer(add_indicator=True)),
('rf', RandomForestClassifier(random_state=42))
])
```
## scikit-learn uses `get_params` to find names
```
pipe.get_params()
```
## Is it better to add the indicator?
```
from sklearn.model_selection import GridSearchCV
params = {
'imputer__add_indicator': [True, False]
}
grid_search = GridSearchCV(pipe, param_grid=params, verbose=1)
grid_search.fit(X_train, y_train)
grid_search.best_params_
grid_search.best_score_
grid_search.score(X_test, y_test)
```
## Compare to `make_pipeline`
```
from sklearn.pipeline import make_pipeline
pipe2 = make_pipeline(SimpleImputer(add_indicator=True),
RandomForestClassifier(random_state=42))
pipe2.get_params()
```
## Which imputer to use?
```
from sklearn.impute import KNNImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
params = {
'imputer': [
SimpleImputer(strategy='median', add_indicator=True),
SimpleImputer(strategy='mean', add_indicator=True),
KNNImputer(add_indicator=True),
IterativeImputer(estimator=RandomForestRegressor(random_state=42),
random_state=42, add_indicator=True)]
}
search_cv = GridSearchCV(pipe, param_grid=params, verbose=1, n_jobs=-1)
search_cv.fit(X_train, y_train)
search_cv.best_params_
search_cv.best_score_
search_cv.score(X_test, y_test)
```
| github_jupyter |
# Logistic Regression
Implementation of logistic regression for binary class.
### Imports
```
import torch
import numpy as np
import matplotlib.pyplot as plt
from io import BytesIO
%matplotlib inline
```
### Dataset
```
data_source = np.lib.DataSource()
data = data_source.open('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')
x = np.genfromtxt(BytesIO(data.read().encode()), delimiter=',', usecols=range(2), max_rows=100)
y = np.zeros(100)
y[50:] = 1
np.random.seed(1)
idx = np.arange(y.shape[0])
np.random.shuffle(idx)
X_test, y_test = x[idx[:25]], y[idx[:25]]
X_train, y_train = x[idx[25:]], y[idx[25:]]
mu, std = np.mean(X_train, axis=0), np.std(X_train, axis=0)
X_train, X_test = (X_train - mu) / std, (X_test - mu) / std
plt.scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], label='class 0', marker='o')
plt.scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], label='class 1', marker='s')
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.legend()
plt.show()
```
### Model
```
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class LogisticRegression(torch.nn.Module):
def __init__(self, num_features):
super(LogisticRegression, self).__init__()
self.linear = torch.nn.Linear(num_features, 1)
# change random weigths to zero
self.linear.weight.detach().zero_()
self.linear.bias.detach().zero_()
def forward(self, x):
netinputs = self.linear(x)
output = torch.sigmoid(netinputs)
return output
```
### Train
```
model = LogisticRegression(num_features=2).to(device)
cost_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
X_train_tensor = torch.tensor(X_train, dtype=torch.float32, device=device)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32, device=device).view(-1, 1)
def custom_where(cond, x_1, x_2):
return (cond * x_1) + ((1-cond) * x_2)
def comp_accuracy(label_var, pred_probas):
pred_labels = custom_where((pred_probas > 0.5).float(), 1, 0).view(-1)
acc = torch.sum(pred_labels == label_var.view(-1)).float() / label_var.size(0)
return acc
for epoch in range(10):
# Compute outputs
out = model(X_train_tensor)
# Compute gradients
loss = cost_fn(out, y_train_tensor)
optimizer.zero_grad()
loss.backward()
# Update weights
optimizer.step()
pred_probas = model(X_train_tensor)
acc = comp_accuracy(y_train_tensor, pred_probas)
print('Epoch: %03d' % (epoch + 1), end="")
print('Train Accuracy: %.3f' % acc, end="")
print('Cost: %.3f' % cost_fn(pred_probas, y_train_tensor))
print('\nModel parameters:')
print(' Weights: %s' % model.linear.weight)
print(' Bias: %s' % model.linear.bias)
```
### Test
```
X_test_tensor = torch.tensor(X_test, dtype=torch.float32, device=device)
y_test_tensor = torch.tensor(y_test, dtype=torch.float32, device=device)
pred_probas = model(X_test_tensor)
test_acc = comp_accuracy(y_test_tensor, pred_probas)
print('Test set accuracy: %.2f%%' % (test_acc*100))
```
| github_jupyter |
```
# Jovian Commit Essentials
# Please retain and execute this cell without modifying the contents for `jovian.commit` to work
!pip install jovian --upgrade -q
import jovian
jovian.set_project('05b-cifar10-resnet')
jovian.set_colab_id('1JkC4y1mnrW0E0JPrhY-6aWug3uGExuRf')
```
# Classifying CIFAR10 images using ResNets, Regularization and Data Augmentation in PyTorch
_A.K.A. Training an image classifier from scratch to over 90% accuracy in less than 5 minutes on a single GPU_
### Part 6 of "Deep Learning with Pytorch: Zero to GANs"
This tutorial series is a hands-on beginner-friendly introduction to deep learning using [PyTorch](https://pytorch.org), an open-source neural networks library. These tutorials take a practical and coding-focused approach. The best way to learn the material is to execute the code and experiment with it yourself. Check out the full series here:
1. [PyTorch Basics: Tensors & Gradients](https://jovian.ai/aakashns/01-pytorch-basics)
2. [Gradient Descent & Linear Regression](https://jovian.ai/aakashns/02-linear-regression)
3. [Working with Images & Logistic Regression](https://jovian.ai/aakashns/03-logistic-regression)
4. [Training Deep Neural Networks on a GPU](https://jovian.ai/aakashns/04-feedforward-nn)
5. [Image Classification using Convolutional Neural Networks](https://jovian.ai/aakashns/05-cifar10-cnn)
6. [Data Augmentation, Regularization and ResNets](https://jovian.ai/aakashns/05b-cifar10-resnet)
7. [Generating Images using Generative Adversarial Networks](https://jovian.ai/aakashns/06b-anime-dcgan/)
In this tutorial, we'll use the following techniques to train a state-of-the-art model in less than 5 minutes to achieve over 90% accuracy in classifying images from the CIFAR10 dataset:
- Data normalization
- Data augmentation
- Residual connections
- Batch normalization
- Learning rate scheduling
- Weight Decay
- Gradient clipping
- Adam optimizer
### How to run the code
This tutorial is an executable [Jupyter notebook](https://jupyter.org) hosted on [Jovian](https://www.jovian.ai). You can _run_ this tutorial and experiment with the code examples in a couple of ways: *using free online resources* (recommended) or *on your computer*.
#### Option 1: Running using free online resources (1-click, recommended)
The easiest way to start executing the code is to click the **Run** button at the top of this page and select **Run on Colab**. [Google Colab](https://colab.research.google.com) is a free online platform for running Jupyter notebooks using Google's cloud infrastructure. You can also select "Run on Binder" or "Run on Kaggle" if you face issues running the notebook on Google Colab.
#### Option 2: Running on your computer locally
To run the code on your computer locally, you'll need to set up [Python](https://www.python.org), download the notebook and install the required libraries. We recommend using the [Conda](https://docs.conda.io/projects/conda/en/latest/user-guide/install/) distribution of Python. Click the **Run** button at the top of this page, select the **Run Locally** option, and follow the instructions.
### Using a GPU for faster training
You can use a [Graphics Processing Unit](https://en.wikipedia.org/wiki/Graphics_processing_unit) (GPU) to train your models faster if your execution platform is connected to a GPU manufactured by NVIDIA. Follow these instructions to use a GPU on the platform of your choice:
* _Google Colab_: Use the menu option "Runtime > Change Runtime Type" and select "GPU" from the "Hardware Accelerator" dropdown.
* _Kaggle_: In the "Settings" section of the sidebar, select "GPU" from the "Accelerator" dropdown. Use the button on the top-right to open the sidebar.
* _Binder_: Notebooks running on Binder cannot use a GPU, as the machines powering Binder aren't connected to any GPUs.
* _Linux_: If your laptop/desktop has an NVIDIA GPU (graphics card), make sure you have installed the [NVIDIA CUDA drivers](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html).
* _Windows_: If your laptop/desktop has an NVIDIA GPU (graphics card), make sure you have installed the [NVIDIA CUDA drivers](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html).
* _macOS_: macOS is not compatible with NVIDIA GPUs
If you do not have access to a GPU or aren't sure what it is, don't worry, you can execute all the code in this tutorial just fine without a GPU.
Let's begin by installing and importing the required libraries.
```
# Uncomment and run the appropriate command for your operating system, if required
# No installation is reqiured on Google Colab / Kaggle notebooks
# Linux / Binder / Windows (No GPU)
# !pip install numpy matplotlib torch==1.7.0+cpu torchvision==0.8.1+cpu torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html
# Linux / Windows (GPU)
# pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
# MacOS (NO GPU)
# !pip install numpy matplotlib torch torchvision torchaudio
import os
import torch
import torchvision
import tarfile
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torchvision.datasets.utils import download_url
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import torchvision.transforms as tt
from torch.utils.data import random_split
from torchvision.utils import make_grid
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
matplotlib.rcParams['figure.facecolor'] = '#ffffff'
project_name='05b-cifar10-resnet'
```
## Preparing the CIFAR10 Dataset
This notebook is an extension to the tutorial [Image Classification using CNNs in PyTorch](https://jovian.ai/aakashns/05-cifar10-cnn), where we trained a deep convolutional neural network to classify images from the CIFAR10 dataset with around 75% accuracy. Here are some images from the dataset:

Let's begin by downloading the dataset and creating PyTorch datasets to load the data, just as we did in the previous tutorial.
```
from torchvision.datasets.utils import download_url
# Dowload the dataset
dataset_url = "https://s3.amazonaws.com/fast-ai-imageclas/cifar10.tgz"
download_url(dataset_url, '.')
# Extract from archive
with tarfile.open('./cifar10.tgz', 'r:gz') as tar:
tar.extractall(path='./data')
# Look into the data directory
data_dir = './data/cifar10'
print(os.listdir(data_dir))
classes = os.listdir(data_dir + "/train")
print(classes)
```
We can create training and validation datasets using the `ImageFolder` class from `torchvision`. In addition to the `ToTensor` transform, we'll also apply some other transforms to the images. There are a few important changes we'll make while creating PyTorch datasets for training and validation:
1. **Use test set for validation**: Instead of setting aside a fraction (e.g. 10%) of the data from the training set for validation, we'll simply use the test set as our validation set. This just gives a little more data to train with. In general, once you have picked the best model architecture & hypeparameters using a fixed validation set, it is a good idea to retrain the same model on the entire dataset just to give it a small final boost in performance.
2. **Channel-wise data normalization**: We will normalize the image tensors by subtracting the mean and dividing by the standard deviation across each channel. As a result, the mean of the data across each channel is 0, and standard deviation is 1. Normalizing the data prevents the values from any one channel from disproportionately affecting the losses and gradients while training, simply by having a higher or wider range of values that others.
<img src="https://i.imgur.com/LYxXBVg.png" width="360">
3. **Randomized data augmentations**: We will apply randomly chosen transformations while loading images from the training dataset. Specifically, we will pad each image by 4 pixels, and then take a random crop of size 32 x 32 pixels, and then flip the image horizontally with a 50% probability. Since the transformation will be applied randomly and dynamically each time a particular image is loaded, the model sees slightly different images in each epoch of training, which allows it generalize better.

```
# Data transforms (normalization & data augmentation)
stats = ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_tfms = tt.Compose([tt.RandomCrop(32, padding=4, padding_mode='reflect'),
tt.RandomHorizontalFlip(),
# tt.RandomRotate
# tt.RandomResizedCrop(256, scale=(0.5,0.9), ratio=(1, 1)),
# tt.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
tt.ToTensor(),
tt.Normalize(*stats,inplace=True)])
valid_tfms = tt.Compose([tt.ToTensor(), tt.Normalize(*stats)])
# PyTorch datasets
train_ds = ImageFolder(data_dir+'/train', train_tfms)
valid_ds = ImageFolder(data_dir+'/test', valid_tfms)
```
Next, we can create data loaders for retrieving images in batches. We'll use a relatively large batch size of 500 to utlize a larger portion of the GPU RAM. You can try reducing the batch size & restarting the kernel if you face an "out of memory" error.
```
batch_size = 400
# PyTorch data loaders
train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=3, pin_memory=True)
valid_dl = DataLoader(valid_ds, batch_size*2, num_workers=3, pin_memory=True)
```
Let's take a look at some sample images from the training dataloader. To display the images, we'll need to _denormalize_ the pixels values to bring them back into the range `(0,1)`.
```
def denormalize(images, means, stds):
means = torch.tensor(means).reshape(1, 3, 1, 1)
stds = torch.tensor(stds).reshape(1, 3, 1, 1)
return images * stds + means
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_xticks([]); ax.set_yticks([])
denorm_images = denormalize(images, *stats)
ax.imshow(make_grid(denorm_images[:64], nrow=8).permute(1, 2, 0).clamp(0,1))
break
show_batch(train_dl)
```
The colors seem out of place because of the normalization. Note that normalization is also applied during inference. If you look closely, you can see the cropping and reflection padding in some of the images. Horizontal flip is a bit difficult to detect from visual inspection.
## Using a GPU
To seamlessly use a GPU, if one is available, we define a couple of helper functions (`get_default_device` & `to_device`) and a helper class `DeviceDataLoader` to move our model & data to the GPU as required. These are described in more detail in a [previous tutorial](https://jovian.ml/aakashns/04-feedforward-nn#C21).
```
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
```
Based on where you're running this notebook, your default device could be a CPU (`torch.device('cpu')`) or a GPU (`torch.device('cuda')`)
```
device = get_default_device()
device
```
We can now wrap our training and validation data loaders using `DeviceDataLoader` for automatically transferring batches of data to the GPU (if available).
```
train_dl = DeviceDataLoader(train_dl, device)
valid_dl = DeviceDataLoader(valid_dl, device)
```
## Model with Residual Blocks and Batch Normalization
One of the key changes to our CNN model this time is the addition of the resudial block, which adds the original input back to the output feature map obtained by passing the input through one or more convolutional layers.

Here is a very simple Residual block:
```
class SimpleResidualBlock(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.ReLU()
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.conv2(out)
return self.relu2(out) + x # ReLU can be applied before or after adding the input
simple_resnet = to_device(SimpleResidualBlock(), device)
for images, labels in train_dl:
out = simple_resnet(images)
print(out.shape)
break
del simple_resnet, images, labels
torch.cuda.empty_cache()
```
This seeming small change produces a drastic improvement in the performance of the model. Also, after each convolutional layer, we'll add a batch normalization layer, which normalizes the outputs of the previous layer.
Go through the following blog posts to learn more:
* Why and how residual blocks work: https://towardsdatascience.com/residual-blocks-building-blocks-of-resnet-fd90ca15d6ec
* Batch normalization and dropout explained: https://towardsdatascience.com/batch-normalization-and-dropout-in-neural-networks-explained-with-pytorch-47d7a8459bcd
We will use the ResNet9 architecture, as described in [this blog series](https://www.myrtle.ai/2018/09/24/how_to_train_your_resnet/) :

```
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['lrs'][-1], result['train_loss'], result['val_loss'], result['val_acc']))
def conv_block(in_channels, out_channels, pool=False):
layers = [nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)]
if pool: layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
class ResNet9(ImageClassificationBase):
def __init__(self, in_channels, num_classes):
super().__init__()
# 3 x 32 x 32 with batch size 400
self.conv1 = conv_block(in_channels, 64) # 64 x 32 x 32
self.conv2 = conv_block(64, 128, pool=True) # 128 x 16 x 16, feature map is reduced to 16 x 16 because pool is set to true
self.res1 = nn.Sequential(conv_block(128, 128),
conv_block(128, 128)) # 128 x 16 x 16
self.conv3 = conv_block(128, 256, pool=True) # 256 x 8 x 8
self.conv4 = conv_block(256, 512, pool=True) # 512 x 4 x 4
self.res2 = nn.Sequential(conv_block(512, 512),
conv_block(512, 512)) # 512 x 4 x 4
self.classifier = nn.Sequential(nn.MaxPool2d(4), # 512 x 1 x 1
nn.Flatten(), # 512
nn.Dropout(0.2), # 512. Dropout makes the model learn by analyzing relationships not by specific values
nn.Linear(512, num_classes)) # 10
def forward(self, xb):
out = self.conv1(xb)
out = self.conv2(out)
out = self.res1(out) + out
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.classifier(out)
return out
model = to_device(ResNet9(3, 10), device)
model
```
## Training the model
Before we train the model, we're going to make a bunch of small but important improvements to our `fit` function:
* **Learning rate scheduling**: Instead of using a fixed learning rate, we will use a learning rate scheduler, which will change the learning rate after every batch of training. There are many strategies for varying the learning rate during training, and the one we'll use is called the **"One Cycle Learning Rate Policy"**, which involves starting with a low learning rate, gradually increasing it batch-by-batch to a high learning rate for about 30% of epochs, then gradually decreasing it to a very low value for the remaining epochs. Learn more: https://sgugger.github.io/the-1cycle-policy.html
* **Weight decay**: We also use weight decay, which is yet another regularization technique which prevents the weights from becoming too large by adding an additional term to the loss function.Learn more: https://towardsdatascience.com/this-thing-called-weight-decay-a7cd4bcfccab
* **Gradient clipping**: Apart from the layer weights and outputs, it also helpful to limit the values of gradients to a small range to prevent undesirable changes in parameters due to large gradient values. This simple yet effective technique is called gradient clipping. Learn more: https://towardsdatascience.com/what-is-gradient-clipping-b8e815cdfb48
Let's define a `fit_one_cycle` function to incorporate these changes. We'll also record the learning rate used for each batch.
```
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader,
weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):
torch.cuda.empty_cache()
history = []
# Set up cutom optimizer with weight decay
optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
# Set up one-cycle learning rate scheduler
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
steps_per_epoch=len(train_loader))
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
lrs = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
# Gradient clipping
if grad_clip:
nn.utils.clip_grad_value_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
# Record & update learning rate
lrs.append(get_lr(optimizer))
sched.step()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
result['lrs'] = lrs
model.epoch_end(epoch, result)
history.append(result)
return history
history = [evaluate(model, valid_dl)]
history
```
We're now ready to train our model. Instead of SGD (stochastic gradient descent), we'll use the Adam optimizer which uses techniques like momentum and adaptive learning rates for faster training. You can learn more about optimizers here: https://ruder.io/optimizing-gradient-descent/index.html
```
epochs = 8
max_lr = 0.01
grad_clip = 0.1
weight_decay = 1e-4
opt_func = torch.optim.Adam
%%time
history += fit_one_cycle(epochs, max_lr, model, train_dl, valid_dl,
grad_clip=grad_clip,
weight_decay=weight_decay,
opt_func=opt_func)
train_time='4:24'
```
Our model trained to over **90% accuracy in under 5 minutes**! Try playing around with the data augmentations, network architecture & hyperparameters to achive the following results:
1. 94% accuracy in under 10 minutes (easy)
2. 90% accuracy in under 2.5 minutes (intermediate)
3. 94% accuracy in under 5 minutes (hard)
Let's plot the valdation set accuracies to study how the model improves over time.
```
def plot_accuracies(history):
accuracies = [x['val_acc'] for x in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
plot_accuracies(history)
```
We can also plot the training and validation losses to study the trend.
```
def plot_losses(history):
train_losses = [x.get('train_loss') for x in history]
val_losses = [x['val_loss'] for x in history]
plt.plot(train_losses, '-bx')
plt.plot(val_losses, '-rx')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Training', 'Validation'])
plt.title('Loss vs. No. of epochs');
plot_losses(history)
```
It's clear from the trend that our model isn't overfitting to the training data just yet. Try removing batch normalization, data augmentation and residual layers one by one to study their effect on overfitting.
Finally, let's visualize how the learning rate changed over time, batch-by-batch over all the epochs.
```
def plot_lrs(history):
lrs = np.concatenate([x.get('lrs', []) for x in history])
plt.plot(lrs)
plt.xlabel('Batch no.')
plt.ylabel('Learning rate')
plt.title('Learning Rate vs. Batch no.');
plot_lrs(history)
```
As expected, the learning rate starts at a low value, and gradually increases for 30% of the iterations to a maximum value of `0.01`, and then gradually decreases to a very small value.
## Testing with individual images
While we have been tracking the overall accuracy of a model so far, it's also a good idea to look at model's results on some sample images. Let's test out our model with some images from the predefined test dataset of 10000 images.
```
def predict_image(img, model):
# Convert to a batch of 1
xb = to_device(img.unsqueeze(0), device)
# Get predictions from model
yb = model(xb)
# Pick index with highest probability
_, preds = torch.max(yb, dim=1)
# Retrieve the class label
return train_ds.classes[preds[0].item()]
img, label = valid_ds[0]
plt.imshow(img.permute(1, 2, 0).clamp(0, 1))
print('Label:', train_ds.classes[label], ', Predicted:', predict_image(img, model))
img, label = valid_ds[1002]
plt.imshow(img.permute(1, 2, 0))
print('Label:', valid_ds.classes[label], ', Predicted:', predict_image(img, model))
img, label = valid_ds[6153]
plt.imshow(img.permute(1, 2, 0))
print('Label:', train_ds.classes[label], ', Predicted:', predict_image(img, model))
```
Identifying where our model performs poorly can help us improve the model, by collecting more training data, increasing/decreasing the complexity of the model, and changing the hypeparameters.
## Save and Commit
Let's save the weights of the model, record the hyperparameters, and commit our experiment to Jovian. As you try different ideas, make sure to record every experiment so you can look back and analyze the results.
```
torch.save(model.state_dict(), 'cifar10-resnet9.pth')
!pip install jovian --upgrade --quiet
import jovian
jovian.reset()
jovian.log_hyperparams(arch='resnet9',
epochs=epochs,
lr=max_lr,
scheduler='one-cycle',
weight_decay=weight_decay,
grad_clip=grad_clip,
opt=opt_func.__name__)
jovian.log_metrics(val_loss=history[-1]['val_loss'],
val_acc=history[-1]['val_acc'],
train_loss=history[-1]['train_loss'],
time=train_time)
jovian.commit(project=project_name, environment=None, outputs=['cifar10-resnet9.pth'])
```
## Summary and Further Reading
You are now ready to train state-of-the-art deep learning models from scratch. Try working on a project on your own by following these guidelines: https://jovian.ai/learn/deep-learning-with-pytorch-zero-to-gans/assignment/course-project
Here's a summary of the different techniques used in this tutorial to improve our model performance and reduce the training time:
* **Data normalization**: We normalized the image tensors by subtracting the mean and dividing by the standard deviation of pixels across each channel. Normalizing the data prevents the pixel values from any one channel from disproportionately affecting the losses and gradients. [Learn more](https://medium.com/@ml_kid/what-is-transform-and-transform-normalize-lesson-4-neural-networks-in-pytorch-ca97842336bd)
* **Data augmentation**: We applied random transformations while loading images from the training dataset. Specifically, we will pad each image by 4 pixels, and then take a random crop of size 32 x 32 pixels, and then flip the image horizontally with a 50% probability. [Learn more](https://www.analyticsvidhya.com/blog/2019/12/image-augmentation-deep-learning-pytorch/)
* **Residual connections**: One of the key changes to our CNN model was the addition of the resudial block, which adds the original input back to the output feature map obtained by passing the input through one or more convolutional layers. We used the ResNet9 architecture [Learn more](https://towardsdatascience.com/residual-blocks-building-blocks-of-resnet-fd90ca15d6ec).
* **Batch normalization**: After each convolutional layer, we added a batch normalization layer, which normalizes the outputs of the previous layer. This is somewhat similar to data normalization, except it's applied to the outputs of a layer, and the mean and standard deviation are learned parameters. [Learn more](https://towardsdatascience.com/batch-normalization-and-dropout-in-neural-networks-explained-with-pytorch-47d7a8459bcd)
* **Learning rate scheduling**: Instead of using a fixed learning rate, we will use a learning rate scheduler, which will change the learning rate after every batch of training. There are [many strategies](https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate) for varying the learning rate during training, and we used the "One Cycle Learning Rate Policy". [Learn more](https://sgugger.github.io/the-1cycle-policy.html)
* **Weight Decay**: We added weight decay to the optimizer, yet another regularization technique which prevents the weights from becoming too large by adding an additional term to the loss function. [Learn more](https://towardsdatascience.com/this-thing-called-weight-decay-a7cd4bcfccab)
* **Gradient clipping**: We also added gradient clippint, which helps limit the values of gradients to a small range to prevent undesirable changes in model parameters due to large gradient values during training. [Learn more.](https://towardsdatascience.com/what-is-gradient-clipping-b8e815cdfb48#63e0)
* **Adam optimizer**: Instead of SGD (stochastic gradient descent), we used the Adam optimizer which uses techniques like momentum and adaptive learning rates for faster training. There are many other optimizers to choose froma and experiment with. [Learn more.](https://ruder.io/optimizing-gradient-descent/index.html)
As an exercise, you should try applying each technique independently and see how much each one affects the performance and training time. As you try different experiments, you will start to cultivate the intuition for picking the right architectures, data augmentation & regularization techniques.
You are now ready to move on to the next tutorial in this series: [Generating Images using Generative Adversarial Networks](https://jovian.ai/aakashns/06b-anime-dcgan/)
| github_jupyter |
```
#python deep_dream.py path_to_your_base_image.jpg prefix_for_results
#python deep_dream.py img/mypic.jpg results/dream
from __future__ import print_function
from keras.preprocessing.image import load_img, img_to_array
import numpy as np
import scipy
import argparse
from keras.applications import inception_v3
from keras import backend as K
from keras.preprocessing import image
import keras
import tensorflow as tf
## load striped CAV, layer 9
import os
import cav
working_dir = '/home/tyler/Desktop/tcav_on_azure'
subpath = 'striped_sub_1-random500_0-mixed9'
cav_path = 'cavs/' + subpath + '-linear-0.1.pkl'
path = os.path.join(working_dir, cav_path)
this_cav = cav.CAV.load_cav(path)
layer_9_cav = this_cav.cavs[0]
K.set_learning_phase(0)
# Build the InceptionV3 network with our placeholder.
# The model will be loaded with pre-trained ImageNet weights.
model = inception_v3.InceptionV3(weights='imagenet',include_top=False)
dream = model.input
print('Model loaded.')
# Playing with these hyperparameters will also allow you to achieve new effects
step = 0.05 # Gradient ascent step size
num_octave = 1 # Number of scales at which to run gradient ascent
octave_scale = 1.4 # Size ratio between scales
iterations = 50 # Number of ascent steps per scale
max_loss = 100000000000
base_image_path = '/home/tyler/Desktop/tcav_on_azure/concepts/horse_sub_1/img100.jpg'
#result_prefix = '/home/tyler/Desktop/tcav_on_azure/results/test'
settings = {
'features': {
'mixed9': 10
},}
img = preprocess_image(base_image_path)
layer_9_cav = layer_9_cav.reshape(-1,1)
layer_9_cav_K = K.constant(layer_9_cav)
layer_dict = dict([(layer.name, layer) for layer in model.layers])
sess = K.get_session()
# Define the loss.
#loss = K.variable(0.)
#for layer_name in settings['features']:
# Add the L2 norm of the features of a layer to the loss.
# assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
# coeff = settings['features'][layer_name]
# x = layer_dict[layer_name].output
# acts = x
# We avoid border artifacts by only involving non-border pixels in the loss.
# scaling = K.prod(K.cast(K.shape(x), 'float32'))
#loss += 3
# if K.image_data_format() == 'channels_first':
# loss += coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling
# else:
# loss += coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling
# Compute the gradients of the dream wrt the loss.
#grads = K.gradients(loss, model.input)[0]
# Normalize gradients.
#grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())
# Set up function to retrieve the value of the loss and gradients given an input image.
#outputs = [loss, grads]
#fetch_loss_and_grads = K.function([model.input], outputs)
loss_2 = K.variable(0.)
for layer_name in settings['features']:
assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
coeff = settings['features'][layer_name]
acts = layer_dict[layer_name].output
#flat_act = np.reshape(np.asarray(acts).squeeze(), -1)
#flat_act_norm = keras.utils.normalize(flat_act)
#loss2 = euclidean_distance(vec_norm(layer_9_cav),flat_act_norm)
#loss_2 += K.sum(K.square(K.reshape(acts,(131072,)) - layer_9_cav_K))
#loss_2 += K.dot(K.reshape(acts,(1,131072)),K.transpose(layer_9_cav_K))
loss_2 -= K.dot(K.reshape(acts,(1,131072)),layer_9_cav_K)
#loss_2 = layer_9_cav_K
#loss_2 = loss
grads_2 = K.gradients(loss_2, model.input)[0]
grads_2 /= K.maximum(K.mean(K.abs(grads_2)), K.epsilon())
outputs_2 = [loss_2, grads_2, acts]
fetch_loss_and_grads_2 = K.function([model.input], outputs_2)
def eval_loss_and_grads(x):
outs = fetch_loss_and_grads_2([x])
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
#def eval_loss_and_grads(x):
# outs = fetch_loss_and_grads(x)
# loss_value = get_loss(x)
# grads = K.gradients(loss, model.input)[0]
# grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())
# return loss_value, grads
def gradient_ascent(x, iterations, step, max_loss=None):
for i in range(iterations):
loss_value, grad_values = eval_loss_and_grads(x)
if max_loss is not None and loss_value > max_loss:
break
print('..Loss value at', i, ':', loss_value)
#print(loss.eval())
x -= step * grad_values
return x
def save_img(img, fname):
pil_img = deprocess_image(np.copy(img))
scipy.misc.imsave(fname, pil_img)
tf.logging.set_verbosity(0)
img_pic = image.load_img(base_image_path, target_size=(299, 299))
img = image.img_to_array(img_pic)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
#original_img = np.copy(img)
img = gradient_ascent(img,iterations=iterations,step=step,max_loss=max_loss)
save_img(img, fname='results/test_1.png')
img_path = 'results/test_1.png'
test_img = image.load_img(img_path, target_size=(299, 299))
test_img
tf.logging.set_verbosity(0)
img_pic = image.load_img(base_image_path, target_size=(299, 299))
img = image.img_to_array(img_pic)
img = preprocess_image(base_image_path)
if K.image_data_format() == 'channels_first':
original_shape = img.shape[2:]
else:
original_shape = img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
original_img = np.copy(img)
shrunk_original_img = resize_img(img, successive_shapes[0])
for shape in successive_shapes:
#print('Processing image shape', shape)
#img = resize_img(img, shape)
img = gradient_ascent(img,
iterations=iterations,
step=step,
max_loss=max_loss)
#upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
#same_size_original = resize_img(original_img, shape)
#lost_detail = same_size_original - upscaled_shrunk_original_img
#img += lost_detail
#shrunk_original_img = resize_img(original_img, shape)
save_img(img, fname='results/test_1.png')
#img
```
## Working
```
layer_name = 'mixed9'
layer_out = layer_dict[layer_name].output
layer_out
img_in = shrunk_original_img
img_in.shape
new_acts = fetch_loss_and_grads_2([img_in])[0]
new_acts
img_in
layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{endpoints_v3['input']: img_in})
layer_9_acts[0][5][0]
new_acts[0][5][0]
```
## New Loss
```
def get_loss(this_img):
layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{endpoints_v3['input']: this_img})
flat_act = np.reshape(np.asarray(layer_9_acts).squeeze(), -1)
loss += euclidean_distance(vec_norm(layer_9_cav),vec_norm(flat_act))
return loss
get_loss(original_img)
original_img.shape
sess = K.get_session()
#my_graph = tf.get_default_graph()
#my_graph.get_collection()
sess
model.input
this_img = original_img
loss = K.variable(0.)
layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{model.input: this_img})
flat_act = np.reshape(np.asarray(layer_9_acts).squeeze(), -1)
loss += euclidean_distance(vec_norm(layer_9_cav),vec_norm(flat_act))
#K.clear_session()
#loss
#loss.eval(sess)
#K.clear_session()
#endpoints_v3
model.input
#img.shape
layer_9_acts = layer_dict[layer_name].output
layer_9_acts
x.shape
sess.run(bottlenecks_tensors[bottleneck_name],
{self.ends['input']: examples})
#bottlenecks_tensors
layer_9_cav
img.shape
model.input
#sess.run(bottlenecks_tensors[bottleneck_name],{model.input: img})
#layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{endpoints_v3['input']: img})
#flat_act = np.reshape(np.asarray(layer_9_acts).squeeze(), -1)
#layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{endpoints_v3['input']: x})
#flat_act = np.reshape(np.asarray(layer_9_acts).squeeze(), -1)
#euclidean_distance(vec_norm(layer_9_cav),vec_norm(flat_act))
```
## Static functions
```
def preprocess_image(image_path):
# Util function to open, resize and format pictures
# into appropriate tensors.
img = load_img(image_path)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# Util function to convert a tensor into a valid image.
if K.image_data_format() == 'channels_first':
x = x.reshape((3, x.shape[2], x.shape[3]))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((x.shape[1], x.shape[2], 3))
x /= 2.
x += 0.5
x *= 255.
x = np.clip(x, 0, 255).astype('uint8')
return x
def resize_img(img, size):
img = np.copy(img)
if K.image_data_format() == 'channels_first':
factors = (1, 1,
float(size[0]) / img.shape[2],
float(size[1]) / img.shape[3])
else:
factors = (1,
float(size[0]) / img.shape[1],
float(size[1]) / img.shape[2],
1)
return scipy.ndimage.zoom(img, factors, order=1)
def euclidean_distance(a,b):
return np.linalg.norm(a-b)
def vec_norm(vec):
return vec / np.linalg.norm(vec)
def get_bottleneck_tensors():
"""Add Inception bottlenecks and their pre-Relu versions to endpoints dict."""
graph = tf.get_default_graph()
bn_endpoints = {}
for op in graph.get_operations():
# change this below string to change which layers are considered bottlenecks
# use 'ConcatV2' for InceptionV3
# use 'MaxPool' for VGG16 (for example)
if 'ConcatV2' in op.type:
name = op.name.split('/')[0]
bn_endpoints[name] = op.outputs[0]
return bn_endpoints
endpoints_v3 = dict(
input=model.inputs[0].name,
input_tensor=model.inputs[0],
logit=model.outputs[0].name,
prediction=model.outputs[0].name,
prediction_tensor=model.outputs[0],
)
bottlenecks_tensors = get_bottleneck_tensors()
bottleneck_name = 'mixed9'
#Process:
# Load the original image.
# Define a number of processing scales (i.e. image shapes), from smallest to largest.
# Resize the original image to the smallest scale.
# For every scale, starting with the smallest (i.e. current one):
# Run gradient ascent
# Upscale image to the next scale
# Reinject the detail that was lost at upscaling time
# Stop when we are back to the original size.
#To obtain the detail lost during upscaling, we simply take the original image, shrink it down, upscale it,
# and compare the result to the (resized) original image.
```
| github_jupyter |
# Getting Started with the AppEEARS API: Submitting and Downloading a Point Request
### This tutorial demonstrates how to use Python to connect to the AppEEARS API
The Application for Extracting and Exploring Analysis Ready Samples ([AppEEARS](https://lpdaacsvc.cr.usgs.gov/appeears/)) offers a simple and efficient way to access and transform geospatial data from a variety of federal data archives in an easy-to-use web application interface. AppEEARS enables users to subset [geospatial data](https://lpdaacsvc.cr.usgs.gov/appeears/products) spatially, temporally, and by band/layer for point and area samples. AppEEARS returns not only the requested data, but also the associated quality values, and offers interactive visualizations with summary statistics in the web interface. The [AppEEARS API](https://lpdaacsvc.cr.usgs.gov/appeears/api/) offers users **programmatic access** to all features available in AppEEARS, with the exception of visualizations. The API features are demonstrated in this notebook.
***
### Example: Submit a point request with multiple points in U.S. National Parks for extracting vegetation and land surface temperature data
Connect to the AppEEARS API, query the list of available products, submit a point sample request, download the request, become familiar with AppEEARS Quality API, and import the results into Python for visualization. AppEEARS point requests allow users to subset their desired data using latitude/longitude geographic coordinate pairs (points) for a time period of interest, and for specific data layers within data products. AppEEARS returns the valid data from the parameters defined within the sample request.
#### Data Used in the Example:
- Data layers:
- Combined MODIS Leaf Area Index (LAI)
- [MCD15A3H.006](https://doi.org/10.5067/MODIS/MCD15A3H.006), 500m, 4 day: 'Lai_500m'
- Terra MODIS Land Surface Temperature
- [MOD11A2.006](https://doi.org/10.5067/MODIS/MOD11A2.006), 1000m, 8 day: 'LST_Day_1km', 'LST_Night_1km'
***
# Topics Covered:
1. **Getting Started**
1a. Enable Access to the API
1b. Set Up the Working Environment
1c. Login
2. **Query Available Products [Products API]**
2a. Search and Explore Available Products [List Products]
2b. Search and Explore Available Layers [List Layers]
3. **Submit a Point Request [Tasks]**
3a. Compile a JSON [Task Object]
3b. Submit a Task Request [Submit Task]
3c. Retrieve Task Status [Retrieve Task]
4. **Download a Request [Bundle API]**
4a. Explore Files in Request Output [List Files]
4b. Download all Files in a Request [Download File]
5. **Explore AppEEARS Quality Service [Quality API]**
5a. Explore Quality Layers [List Quality Layers]
5b. Show Quality Values [List Quality Values]
5c. Decode Quality Values [Decode Quality Values]
6. **BONUS: Import Request Output and Visualize**
6a. Import CSV
6b. Plot Results (Line/Scatter Plots)
***
### Dependencies:
- This tutorial was tested using Python 3.6.1.
- A [NASA Earthdata Login](https://urs.earthdata.nasa.gov/) account is required to complete this tutorial. You can create an account at the link provided.
***
### AppEEARS Information:
To access AppEEARS, visit: https://lpdaacsvc.cr.usgs.gov/appeears/
> For comprehensive documentation of the full functionality of the AppEEARS API, please see the AppEEARS [API Documentation](https://lpdaacsvc.cr.usgs.gov/appeears/api/)
***
### Source Code used to Generate this Tutorial: UPDATE LATER
- [Jupyter Notebook](https://git.earthdata.nasa.gov/projects/LPDUR/repos/cole-sandbox/browse/.ipynb_checkpoints/AppEEARS_GeoTIFF_TS-checkpoint.ipynb)
***
# 1. Getting Started
<div class="alert alert-block alert-warning" >
<b>NOTE:</b> If this is your first time using the AppEEARS API, you must first enable API access by following the instructions provided below.
</div>
***
## 1a. Enable Access to the API
#### AppEEARS API access requires the same [NASA Earthdata Login](https://urs.earthdata.nasa.gov/) as the AρρEEARS user interface. In addition to having a valid NASA Earthdata Login account, the API feature must be enabled for the user within AρρEEARS.
#### To enable access to the AppEEARS API, navigate to the [AppEEARS website](https://lpdaacsvc.cr.usgs.gov/appeears/). Click the *Sign In* button in the top right portion of the AppEEARS landing page screen.

#### Once you are signed in, click the *Manage User* icon in the top right portion of the AppEEARS landing page screen and select *Settings*.

#### Select the *Enable API* box to gain access to the AppEEARS API.

***
## 1b. Set Up the Working Environment
#### Next, import the required packages, set the input/working directory, and create an output directory for the results.
```
# Import packages
import requests as r
import getpass, pprint, time, os, cgi
```
#### If you are missing any of the packages above, download them in order to use the full functionality of this tutorial.
```
# Set input directory, change working directory
inDir = 'C:/Users/ckrehbiel/Documents/cole-sandbox/Basic_API/' # IMPORTANT: Update to reflect directory on your OS
os.chdir(inDir) # Change to working directory
# below, set the AppEEARS API to a variable (this URL also will take you to the AppEEARS API documentation)
api = 'https://lpdaacsvc.cr.usgs.gov/appeears/api/' # https://lpdaacsvc.cr.usgs.gov/appeears/api/
```
<div class="alert alert-block alert-warning" >
<b>If you plan to execute this tutorial on your own OS, `inDir` above needs to be changed.</b>
</div>
***
## 1c. Login
#### To submit a request, you must first login to the AppEEARS API. Use the `getpass` package to enter your NASA Earthdata login **Username** and **Password**. When prompted after executing the code block below, enter your username followed by your password.
```
user = getpass.getpass() # Input NASA Earthdata Login Username
password = getpass.getpass() # Input NASA Earthdata Login Password
```
#### Use the `requests` package to post your username and password. A successful login will provide you with a token to be used later in this tutorial to submit a request. For more information or if you are experiencing difficulties, please see the [API Documentation](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#login)
```
token_response = r.post('{}login'.format(api), auth=(user, password)).json() # Insert base API URL, call the login service, provide credentials & return json
del user, password # Remove user and password information upon a successful login
token_response # Print response
```
#### Above, you should see a Bearer token. Notice that this token will expire approximately 48 hours after being acquired.
***
# 2. Query Available Products [[Product API](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#product)]
## 2a. Search and Explore Available Products [[List Products](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#list-products)]
#### The product API provides details about all of the products and layers available in AppEEARS. Below, call the product API to list all of the products available in AppEEARS.
```
product_response = r.get('{}product'.format(api)).json() # request all products in the product service
print('AppEEARS currently supports {} products.'.format(len(product_response))) # Print the number of products available in AppEEARS
```
#### Next, create a dictionary indexed by product name, making it easier to query a specific product.
```
products = {p['ProductAndVersion']: p for p in product_response} # Create a dictionary indexed by the product name and version
products['MCD15A3H.006'] # Print information for MCD15A3H.006 LAI/FPAR Product
```
#### The product service provides many useful details, including if a product is currently available in AppEEARS, a description, and information on the spatial and temporal resolution.
#### Below, make a list of all product+version names, and search for products containing *LAI* in their description.
```
prodNames = {p['ProductAndVersion'] for p in product_response} # Make list of all products (including version)
# Make a for loop to search list of products 'Description' for the acronym 'LAI'
for p in prodNames:
if 'Leaf Area Index' in products[p]['Description']:
pprint.pprint(products[p]) # Print the information for each product containing LAI in its description
```
#### Using the information above, start a list of desired products by using the highest temporal resolution LAI product available, `MCD15A3H.006`.
```
prods = ['MCD15A3H.006'] # Start a list for products to be requested, beginning with MCD15A3H.006
prods.append('MOD11A2.006') # Append the MOD11A2.006 8 day LST product to the list of products desired
```
***
# 2b. Search and Explore Available Layers [[List Layers](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#list-layers)]
#### The product API allows you to call all of the layers available for a given product. Each product is referenced by its `ProductAndVersion` property. For a list of the layer names only, print the keys from the dictionary above.
```
lst_response = r.get('{}product/{}'.format(api, prods[1])).json() # request all layers for the second product (index 1) in the product list: MOD11A2.006
list(lst_response.keys())
```
#### Use the dictionary key `'LST_Day_1km'` to see the information for that layer in the response.
```
lst_response['LST_Day_1km'] # Print layer response
```
#### AppEEARS also allows subseting data spectrally (by band). Create a tupled list with the product name and the specific layers desired.
```
layers = [(prods[1],'LST_Day_1km'),(prods[1],'LST_Night_1km')] # Create tupled list linking desired product with desired layers
```
#### Next, request the layers for the `MCD15A3H.006` product.
```
lai_response = r.get('{}product/{}'.format(api, prods[0])).json() # request all layers for the first product (index 0) in the product list: MCD15A3H.006
list(lai_response.keys()) # Print the LAI layer names
lai_response['Lai_500m']['Description'] # Make sure the correct layer is requested
```
#### Above, `Lai_500m` is the desired layer within the `MCD15A3h.006` product.
#### Next, append `Lai_500m` to the tupled list of desired product/layers.
```
layers.append((prods[0],'Lai_500m'))
```
#### Below, take the tupled list (layers) and create a dictionary to store each layer+product combination. This will make it easier to insert into the json file used to submit a request in Section 3.
```
prodLayer = []
for l in layers:
prodLayer.append({
"layer": l[1],
"product": l[0]
})
prodLayer
```
***
# 3. Submit a Point Request [[List Layers](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#list-layers)]
#### The **Submit task** API call provides a way to submit a new request to be processed. It can accept data via JSON, query string, or a combination of both. In the example below, compile a json and submit a request. Tasks in AppEEARS correspond to each request associated with your user account. Therefore, each of the calls to this service requires an authentication token (see Section 1C.), which is stored in a header below.
```
token = token_response['token'] # Save login token to a variable
head = {'Authorization': 'Bearer {}'.format(token)} # Create a header to store token information, needed to submit a request below
```
---
## 3a. Compile a JSON [[Task Object](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#task-object)]
#### In this section, begin by setting up the information needed to compile an acceptable json for submitting an AppEEARS point request. For detailed information on required json parameters, see the [API Documentation](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#tasks).
```
task_type = ['point','area'] # Type of task, area or point
task_name = input() # user-defined name of the task 'NPS Vegetation'
startDate = '01-01-2017' # Start of the date range for which to extract data: MM-DD-YYYY
endDate = '12-31-2017' # End of the date range for which to extract data: MM-DD-YYYY
recurring = False # Specify True for a recurring date range
# yearRange = [2000, 2016] if recurring = True, set yearRange and change start and end date to MM-DD
```
#### For point requests, the coordinates property must also be inside the task object. Below, define a GeoJSON object containing the latitude and longitude (geographic projection) points desired. Optionally, set id and category properties to further identify your selected coordinates.
```
coordinates = [{
"id": "0",
"longitude": "-112.127134",
"latitude": "36.206228",
"category": "Grand Canyon"
}, {
"id": "1",
"longitude": "-112.973760",
"latitude": "37.289327",
"category": "Zion"
}]
```
#### Finally, compile the JSON to be submitted as a point request.
```
task = {
'task_type': task_type[0],
'task_name': task_name,
'params': {
'dates': [
{
'startDate': startDate,
'endDate': endDate
}],
'layers': prodLayer,
'coordinates': coordinates
}
}
pprint.pprint(task)
```
***
## 3b. Submit a Task Request [[Submit Task](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#submit-task)]
#### Below, post a call to the API task service, using the `task` json created above.
```
task_response = r.post('{}task'.format(api), json=task, headers=head).json() # Post the json to the API task service, and return the response as a json
task_response # Print task response
```
***
## 3c. Retrieve Task Status [[Retrieve Task](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#retrieve-task)]
#### This API call will list all of the requests associated with your user account, automatically sorted by date descending with the most recent requests listed first.
#### The AppEEARS API contains some helpful formatting resources. Below, limit the API response to 4 and set `pretty` to True to format the response as an organized json, making it easier to read. Additional information on AppEEARS API [Pagination](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#pagination) and [formatting](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#formatting) can be found in the API documentation.
```
params = {'limit': 4, 'pretty': True} # Limit API response to 4 most recent entries, and return as pretty json
tasks_response = r.get('{}task'.format(api),params = params, headers=head).json() # Query the task service setting the params and header containing user token
tasks_response # Print tasks response
```
#### Next, take the task id returned from the `task_response` that was generated when submitting your request, and use the AppEEARS API status service to check the status of your request.
```
task_id = task_response['task_id'] # Set task id from request submission
status_response = r.get('{}status/{}'.format(api, task_id), headers=head).json() # Call status service with specific task ID and user credentials, return json
status_response # Print response
```
#### Above, if your request is still processing, you can find information on the status of how close it is to completing.
#### Below, call the task service for your request every 20 seconds to check the status of your request.
```
# Ping API until request is complete, then continue to Section 4
starttime = time.time()
while r.get('{}task/{}'.format(api, task_id), headers=head).json()['status'] != 'done':
print(r.get('{}task/{}'.format(api, task_id), headers=head).json()['status'])
time.sleep(20.0 - ((time.time() - starttime) % 20.0))
print(r.get('{}task/{}'.format(api, task_id), headers=head).json()['status'])
```
***
# 4. Download a Request [[Bundle API](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#bundle)]
#### Before downloading the request output, set up an output directory to store the output files, and examine the files contained in the request output.
```
destDir = os.path.join(inDir, task_name) # Set up output directory using input directory and task name
if not os.path.exists(destDir):os.makedirs(destDir) # Create the output directory
```
---
## 4a. Explore Files in Request Output [[List Files](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#list-files)]
#### The bundle API provides information about completed tasks. For any completed task, a bundle can be queried to return the files contained as a part of the task request. Below, call the bundle API and return all of the output files.
```
bundle = r.get('{}bundle/{}'.format(api,task_id)).json() # Call API and return bundle contents for the task_id as json
bundle # Print bundle contents
```
***
## 4b. Download Files in a Request (Automation) [[Download File](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#download-file)]
#### Now, use the contents of the bundle to select the file name and id and store as a dictionary.
```
files = {} # Create empty dictionary
for f in bundle['files']: files[f['file_id']] = f['file_name'] # Fill dictionary with file_id as keys and file_name as values
files
```
#### Use the `files` dictionary and a for loop to automate downloading all of the output files into the output directory.
```
for f in files:
dl = r.get('{}bundle/{}/{}'.format(api, task_id, f),stream=True) # Get a stream to the bundle file
filename = os.path.basename(cgi.parse_header(dl.headers['Content-Disposition'])[1]['filename']) # Parse the filename from the Content-Disposition header
filepath = os.path.join(destDir, filename) # Create output file path
with open(filepath, 'wb') as f: # Write file to dest dir
for data in dl.iter_content(chunk_size=8192): f.write(data)
print('Downloaded files can be found at: {}'.format(destDir))
```
***
# 5. AppEEARS Quality API [[Quality API](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#quality)]
#### The quality API provides quality details about all of the data products available in AppEEARS. Below are examples of how to query the quality API for listing quality products, layers, and values. The final example (Section 5c.) demonstrates how AppEEARS quality services can be leveraged to decode pertinent quality values for your data.
#### First, reset pagination to include `offset` which allows you to set the number of results to skip before starting to return information. Next, make a call to list all of the data product layers and where the associated quality product and layer information is located.
```
params = {'limit': 6, 'pretty': True, 'offset': 20} # Limit response to 6 most recent entries, start w/ 20th entry, and return pretty json
quality_response = r.get('{}quality'.format(api), params=params).json() # Call quality API using pagination and return json
quality_response
```
---
## 5a. Explore Quality Layers [[List Quality Layers](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#list-quality-layers)]
#### This API call will list all of the quality layer information for a product.
```
product = 'MCD15A3H.006' # Product used in the example
ql_response = r.get('{}quality/{}'.format(api,product)).json() # Call API to retrieve quality layers for selected product
ql_response # Print response
```
---
## 5b. Show Quality Values [[List Quality Values](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#list-quality-values)]
#### This API call will list all of the values for a given quality layer.
```
qlayer = ql_response[1]['QualityLayers'][0] # Set quality layer from ql_response for 'Lai_500m' above
qv_response = r.get('{}quality/{}/{}'.format(api, product, qlayer)).json() # Call API for list of list of bit-word quality values
qv_response # Print response
```
---
## 5c. Decode Quality Values [[Decode Quality Values](https://lpdaacsvc.cr.usgs.gov/appeears/api/?language=Python%203#decode-quality-values)]
#### This API call will decode the bits for a given quality value.
```
val = 1 # Set a specific value
q_response = r.get('{}quality/{}/{}/{}'.format(api, product, qlayer, val)).json() # Call quality API for specific value
q_response # Print Response
```
***
# 6. BONUS: Import Request Output and Visualize
#### Here, import the CSV file containing the results from your request, and create some basic visualizations using `matplotlib`.
```
# Import Packages
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
list(files.values()) # List files downloaded
```
---
## 6a. Import CSV
#### Use the `Pandas` package to import the CSV of results from the AppEEARS request.
```
df = pd.read_csv(os.path.join(destDir,'NPS-Vegetation-MOD11A2-006-results.csv')) # Import CSV to a Pandas dataframe
df.columns # Print dataframe column names
```
#### Select the MOD11A2.006 LST Day column for the data from Grand Canyon National Park.
```
lstDay_GC = df['MOD11A2_006_LST_Day_1km'][df.Category == 'Grand Canyon'] # Filter dataframe for the pixel from GC and only keep LST Day 1 km column
dates = df['Date'] # Create list of dates
lst_response.keys() # Print the response from the product service call from earlier in the tutorial
lst_response['LST_Day_1km'] # Print product information service for the desired layer
```
#### We will need to use the product service information to find the fill value and units for visualization purposes.
```
lstFill = lst_response['LST_Day_1km']['FillValue'] # Set fill value
units = lst_response['LST_Day_1km']['Units'] # Set units
```
---
## 6b. Plot Results (Line/Scatter Plots)
#### Next, plot a time series of daytime LST for the selected point in Grand Canyon National Park for 2017.
```
# Set matplotlib plots inline
%matplotlib inline
```
#### Below, filter the LST data to exclude fill values, and plot as a time series with some additional formatting.
```
fig = plt.figure(1, figsize=(25, 7.5)) # Set the figure size
plt.style.use("dark_background") # Set default background to black instead of white
ax = fig.add_subplot(111) # Create a subplot
ax.plot(lstDay_GC[lstDay_GC!=lstFill], 'k', lw=2.5, color='#1f77b4') # Filter out fill values and plot as a blue line
ax.plot(lstDay_GC[lstDay_GC!=lstFill], 'bo', ms=10, color='#1f77b4', alpha = 0.5) # Filter out fill values and plot as a blue circle
ax.set_xticks((np.arange(0,len(lstDay_GC)))) # Set the x ticks
ax.set_xticklabels(dates, rotation=45,fontsize=10) # Set the x tick labels
ax.set_yticks((np.arange(250,325, 10))) # Arrange the y ticks
ax.set_yticklabels(np.arange(250,325, 10),fontsize=12,fontweight='bold') # Set the Y tick labels
ax.set_xlabel('Date',fontsize=16,fontweight='bold') # Set x-axis label
ax.set_ylabel("{}({})".format('LST_Day_1km', units),fontsize=16,fontweight='bold') # Set y-axis label
ax.set_title('Time Series',fontsize=20,fontweight='bold'); # Set title
```
#### Next, add the LST Night values for Grand Canyon NP, and create a time series plot with both `LST_Day_1km` and `LST_Night_1km`.
```
lstNight_GC = df['MOD11A2_006_LST_Night_1km'][df.Category == 'Grand Canyon'] # Filter df for the pixel from GC, only keep LST Night column
fig = plt.figure(1, figsize=(25, 7.5)) # Set the figure size
plt.style.use("dark_background") # Set default background to black instead of white
ax = fig.add_subplot(111) # Create a subplot
ax.plot(lstDay_GC[lstDay_GC!=lstFill], 'k', lw=2.5, color='#1f77b4') # Filter out fill values and plot as a blue line
ax.plot(lstNight_GC[lstNight_GC!=lstFill], 'k', lw=2.5, color='#e60000') # Filter out fill values and plot as a red line
ax.legend(('LST_Day_1km', 'LST_Night_1km'),loc='upper left') # Add a legend
ax.plot(lstDay_GC[lstDay_GC!=lstFill], 'bo', ms=10, color='#1f77b4', alpha = 0.5) # Filter out fill values and plot as a blue circle
ax.plot(lstNight_GC[lstNight_GC!=lstFill], 'bo', ms=10, color='#e60000', alpha = 0.5) # Filter out fill values and plot as a red circle
ax.set_xticks((np.arange(0,len(lstDay_GC)))) # Set the x ticks
ax.set_xticklabels(dates, rotation=45,fontsize=10) # Set the x tick labels
ax.set_yticks((np.arange(250,325, 10))) # Arrange the y ticks
ax.set_yticklabels(np.arange(250,325, 10),fontsize=12,fontweight='bold') # Set the Y tick labels
ax.set_xlabel('Date',fontsize=16,fontweight='bold') # Set x-axis label
ax.set_ylabel("{}({})".format('LST_Day_1km', units),fontsize=16,fontweight='bold') # Set y-axis label
ax.set_title('Daytime and Nighttime LST at Grand Canyon NP',fontsize=20,fontweight='bold'); # Set title
```
#### Finally, bring in the daytime LST data from Zion National park, and compare with daytime LST at Grand Canyon National Park, shown below in a scatterplot using `matplotlib`.
```
lstDay_Z = df['MOD11A2_006_LST_Day_1km'][df.Category == 'Zion'] # Filter df for pixel from Zion, only keep LST Day
lstDay_Z[lstDay_Z==lstFill] = np.nan # Set fill values = nan for plotting purposes
lstDay_GC[lstDay_GC==lstFill] = np.nan # Set fill values = nan for plotting purposes
fig = plt.figure(1, figsize=(10, 7.5)) # Set the figure size
plt.style.use("dark_background") # Set default background to black
fig.suptitle('MODIS LST: Grand Canyon vs. Zion National Park, 2017',fontsize=20,fontweight='bold') # Add a title for the plot
ax = fig.add_subplot(111) # Add subplot
ax.set_ylabel("{} {} ({})".format('Grand Canyon:', 'LST_Day_1km', units),fontsize=16,fontweight='bold') # Set y-axis label
ax.set_xlabel("{} {} ({})".format('Zion:','LST_Day_1km', units),fontsize=16,fontweight='bold') # Set x-axis label
ax.scatter(lstDay_GC, lstDay_Z , alpha=1, c='#1f77b4', edgecolors='none', s=50, label="LST_Day"); # Plot values for both points
```
### This example can provide a template to use for your own research workflows. Leveraging the AppEEARS API for searching, extracting, and formatting analysis ready data, and importing it directly into Python means that you can keep your entire research workflow in a single software program, from start to finish.
***
<div class="alert alert-block alert-info">
<h1> Contact Information </h1>
<h3> Material written by Cole Krehbiel$^{1}$ </h3>
<b>Contact:</b> LPDAAC@usgs.gov
<b>Voice:</b> +1-605-594-6116
<b>Organization:</b> Land Processes Distributed Active Archive Center (LP DAAC)
<b>Website:</b> https://lpdaac.usgs.gov/
<b>Date last modified:</b> 03-30-2018
$^{1}$Innovate! Inc., contractor to the U.S. Geological Survey, Earth Resources Observation and Science (EROS) Center, Sioux Falls, South Dakota, 57198-001, USA. Work performed under USGS contract G15PD00467 for LP DAAC$^{2}$.
$^{2}$LP DAAC Work performed under NASA contract NNG14HH33I.
| github_jupyter |
# Make a plot with both redshift and universe age axes using astropy.cosmology
## Authors
Neil Crighton, Stephanie T. Douglas
## Learning Goals
* Plot relationships using `matplotlib`
* Add a second axis to a `matplotlib` plot
* Relate distance, redshift, and age for two different types of cosmology using `astropy.cosmology`
## Keywords
units, physics, cosmology, matplotlib
## Summary
Each redshift corresponds to an age of the universe, so if you're plotting some quantity against redshift, it's often useful show the universe age too. The relationship between the two changes depending the type of cosmology you assume, which is where `astropy.cosmology` comes in. In this tutorial we'll show how to use the tools in `astropy.cosmology` to make a plot like this:
```
# Set up matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import Image
Image(filename="ang_dist.png", width=500)
```
We start with a cosmology object. We will make a flat cosmology (which means that the curvature density $\Omega_k=0$) with a hubble parameter of $70$ km/s/Mpc and matter density $\Omega_M=0.3$ at redshift 0. The `FlatLambdaCDM` cosmology then automatically infers that the dark energy density $\Omega_\Lambda$ must $=0.7$, because $\Omega_M + \Omega_\Lambda + \Omega_k = 1$.
```
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
# In this case we just need to define the matter density
# and hubble parameter at z=0.
# Note the default units for the hubble parameter H0 are km/s/Mpc.
# We will pass in a `Quantity` object with the units specified.
cosmo = FlatLambdaCDM(H0=70*u.km/u.s/u.Mpc, Om0=0.3)
```
Note that we could instead use one of the built-in cosmologies, like `WMAP9` or `Planck13`, in which case we would just redefine the `cosmo` variable.
Now we need an example quantity to plot versus redshift. Let's use the angular diameter distance, which is the physical transverse distance (the size of a galaxy, say) corresponding to a fixed angular separation on the sky. To calculate the angular diameter distance for a range of redshifts:
```
import numpy as np
zvals = np.arange(0, 6, 0.1)
dist = cosmo.angular_diameter_distance(zvals)
```
Note that we passed an array of redshifts to `cosmo.angular_diameter_distance` and it produced a corresponding array of distance values, one for each redshift. Let's plot them:
```
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
```
To check the units of the angular diameter distance, take a look at the unit attribute:
```
dist.unit
```
Now let's put some age labels on the top axis. We're going to pick a series of round age values where we want to place axis ticks. You may need to tweak these depending on your redshift range to get nice, evenly spaced ticks.
```
ages = np.array([13, 10, 8, 6, 5, 4, 3, 2, 1.5, 1.2, 1])*u.Gyr
```
To link the redshift and age axes, we have to find the redshift corresponding to each age. The function `z_at_value` does this for us.
```
from astropy.cosmology import z_at_value
ageticks = [z_at_value(cosmo.age, age) for age in ages]
```
Now we make the second axes, and set the tick positions using these values.
```
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
```
We have ticks on the top axis at the correct ages, but they're labelled with the redshift, not the age. We can fix this by setting the tick labels by hand.
```
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
ax2.set_xticklabels(['{:g}'.format(age) for age in ages.value])
```
We need to make sure the top and bottom axes have the same redshift limits. They may not line up properly in the above plot, for example, depending on your setup (the age of the universe should be ~13 Gyr at z=0).
```
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
ax2.set_xticklabels(['{:g}'.format(age) for age in ages.value])
zmin, zmax = 0.0, 5.9
ax.set_xlim(zmin, zmax)
ax2.set_xlim(zmin, zmax)
```
We're almost done. We just need to label all the axes, and add some minor ticks. Let's also tweak the y axis limits to avoid putting labels right near the top of the plot.
```
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
ax2.set_xticklabels(['{:g}'.format(age) for age in ages.value])
zmin, zmax = 0, 5.9
ax.set_xlim(zmin, zmax)
ax2.set_xlim(zmin, zmax)
ax2.set_xlabel('Time since Big Bang (Gyr)')
ax.set_xlabel('Redshift')
ax.set_ylabel('Angular diameter distance (Mpc)')
ax.set_ylim(0, 1890)
ax.minorticks_on()
```
Now for comparison, let's add the angular diameter distance for a different cosmology, from the Planck 2013 results. And then finally, we save the figure to a png file.
```
from astropy.cosmology import Planck13
dist2 = Planck13.angular_diameter_distance(zvals)
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist2, label='Planck 2013')
ax.plot(zvals, dist, label=
'$h=0.7,\ \Omega_M=0.3,\ \Omega_\Lambda=0.7$')
ax.legend(frameon=0, loc='lower right')
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
ax2.set_xticklabels(['{:g}'.format(age) for age in ages.value])
zmin, zmax = 0.0, 5.9
ax.set_xlim(zmin, zmax)
ax2.set_xlim(zmin, zmax)
ax2.set_xlabel('Time since Big Bang (Gyr)')
ax.set_xlabel('Redshift')
ax.set_ylabel('Angular diameter distance (Mpc)')
ax.minorticks_on()
ax.set_ylim(0, 1890)
fig.savefig('ang_dist.png', dpi=200, bbox_inches='tight')
```
`bbox_inches='tight'` automatically trims any whitespace from around the plot edges.
And we're done!
## Exercise
Well, almost done. Notice that we calculated the times on the upper axis using the original cosmology, not the new cosmology based on the Planck 2013 results. So strictly speaking, this axis applies only to the original cosmology, although the difference between the two is small. As an exercise, you can try plot two different upper axes, slightly offset from each other, to show the times corresponding to each cosmology. Take a look at the first answer to [this question on Stack Overflow](http://stackoverflow.com/questions/7733693/matplotlib-overlay-plots-with-different-scales) for some hints on how to go about this.
| github_jupyter |
ERROR: type should be string, got "https://www.kaggle.com/CVxTz/keras-bidirectional-lstm-baseline-lb-0-051\n\n```\nimport gc\nimport numpy as np\nimport pandas as pd\n\nfrom nltk.corpus import stopwords\nfrom gensim.models import KeyedVectors\nfrom tqdm import tqdm\n\nfrom keras.models import Model\nfrom keras.layers import Dense, Embedding, Input\nfrom keras.layers import LSTM, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling3D, Dropout\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nmax_features = 200000\nsequence_length = 196\nembedding_dim = 300\ncreate_embedding = False\n\n\ntrain = pd.read_pickle(\"../data/train_spacy_clean.pkl\")\ntest = pd.read_pickle(\"../data/test_spacy_clean.pkl\")\n\ntrain['comment_reversed'] = train.comment_text.apply(lambda x: ' '.join(x.split(' ')[::-1]))\ntest['comment_reversed'] = test.comment_text.apply(lambda x: ' '.join(x.split(' ')[::-1]))\nlist_classes = [\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]\n\ntokenizer = text.Tokenizer(num_words=max_features)\ntokenizer.fit_on_texts(train.comment_text.values.tolist() + train.comment_reversed.values.tolist() +\n test.comment_text.values.tolist() + test.comment_reversed.values.tolist())\n\nlist_tokenized_train = tokenizer.texts_to_sequences(train.comment_text.values)\nlist_tokenized_train2 = tokenizer.texts_to_sequences(train.comment_reversed.values)\nlist_tokenized_test = tokenizer.texts_to_sequences(test.comment_text.values)\nlist_tokenized_test2 = tokenizer.texts_to_sequences(test.comment_reversed.values)\n\n\nword_index = tokenizer.word_index\nnb_words = min(max_features, len(word_index)) + 1\n\nX_train = sequence.pad_sequences(list_tokenized_train, maxlen=sequence_length)\nX_train2 = sequence.pad_sequences(list_tokenized_train2, maxlen=sequence_length)\ny_train = train[list_classes].values\n\nX_test = sequence.pad_sequences(list_tokenized_test, maxlen=sequence_length)\nX_test2 = sequence.pad_sequences(list_tokenized_test2, maxlen=sequence_length)\n\ndel train, test, list_tokenized_train, list_tokenized_train2, list_tokenized_test, list_tokenized_test2\ngc.collect()\nif create_embedding:\n embedding_file = '/home/w/Projects/Toxic/data/embeddings/GoogleNews-vectors-negative300.bin.gz'\n word2vec = KeyedVectors.load_word2vec_format(embedding_file, binary=True)\n print('Found %s word vectors of word2vec' % len(word2vec.vocab))\n\n embedding_matrix = np.zeros((nb_words, embedding_dim))\n for word, i in tqdm(word_index.items()):\n if word in word2vec.vocab:\n embedding_matrix[i] = word2vec.word_vec(word)\n print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\nelse:\n embedding_matrix = pd.read_pickle('../data/embeddings/GoogleNews_300dim_embedding.pkl')\nimport keras_models_quora\n\n\nepochs = 100\nbatch_size = 128\n\n\nmodel_callbacks = [EarlyStopping(monitor='val_loss', patience=6, verbose=1, mode='min'),\n ReduceLROnPlateau(monitor='val_loss', factor=0.7, verbose=1,\n patience=4, min_lr=1e-6)]\n\n\nmodel = keras_models_quora.decomposable_attention('../data/embeddings/GoogleNews_300dim_embedding.pkl', maxlen=196)\nmodel.fit([X_train, X_train2], y_train, batch_size=batch_size, epochs=epochs, \n validation_split=0.1, callbacks=model_callbacks)\n\ny_test = model.predict(X_test)\n```\n\n" | github_jupyter |

#### <a href="https://github.com/rdipietro"><i class="fab fa-github"></i> GitHub</a> <a href="https://twitter.com/rsdipietro"><i class="fab fa-twitter"></i> Twitter</a>
I'm Rob DiPietro, a PhD student in the [Department of Computer Science at Johns Hopkins](https://www.cs.jhu.edu/), where I'm advised by [Gregory D. Hager](http://www.cs.jhu.edu/~hager/). My research focuses on machine learning for complex time-series data, applied primarily to health care. For example, is it possible to learn meaningful representations of surgical motion without supervision? And can we use these representations to improve automated skill assessment and automated coaching during surgical training?
## Contents
- [Recent News](#recent-news)
- [Recent Projects](#recent-projects)
- [Open-Source Contributions](#open-source-contributions)
- [Teaching](#teaching)
- [Tutorials](#tutorials)
- [Curriculum Vitae / Publications](#curriculum-vitae-publications)
- [Some Fun](#some-fun)
<a id="recent-news"></a>
## Recent News
Our paper on surgical activity recognition has been accepted as an oral presentation at MICCAI 2016:
R. DiPietro, C. Lea, A. Malpani, N. Ahmidi, S. Vedula, G.I. Lee, M.R. Lee, G.D. Hager: Recognizing Surgical Activities with Recurrent Neural Networks. Medical Image Computing and Computer Assisted Intervention (2016).
http://arxiv.org/abs/1606.06329
https://github.com/rdipietro/miccai-2016-surgical-activity-rec
<a id="recent-projects"></a>
## Recent Projects
### Unsupervised Learning for Surgical Motion

Here we learn meaningful representations of surgical motion, without supervision, by learning to predict the future. This is accomplished by combining an RNN encoder-decoder with mixture density networks to model the distribution over future motion given past motion.
The visualization shows 2-D dimensionality reductions (using t-SNE) of our obtained encodings, colored according to high-level activity, which we emphasize were not used during training. The 3 primary activities are suture throw (green), knot tying (orange), and grasp pull run suture (red), while the final activity, intermaneuver segment (blue), encompasses everything that occurs in between the primary activities.
(Under submission; link to paper and PyTorch code coming soon.)
### RNNs for Extremely Long-Term Dependencies

Here we develop mixed history recurrent neural networks (MIST RNNs), which use an attention mechanism over exponentially-spaced delays to the past in order to capture extremely long-term dependencies.
The visualization shows gradient norms as a function of delay, $\tau$. These norms can be interpreted as how much learning signal is available for a loss at time $t$ from events at time $t - \tau$ of the past.
For more information, please see [our paper](https://openreview.net/forum?id=HkElQvkvz), which was accepted as a workshop paper at ICLR 2018. Code for an older version of our paper can also be found [here](https://github.com/rdipietro/mist-rnns); we hope to release an updated version soon (now in PyTorch).
### RNNs for Surgical Activity Recognition

Here we apply long short-term memory to the task of surgical activity recognition from motion (e.g., x, y, z over time), and in doing so improve state-of-the-art performance in terms of both accuracy and edit distance.
The visualization shows results for three test sequences: best performance (top), median performance (middle), and worst performance (bottom), as measured by accuracy. In all cases, we are recognizing 4 activities over time – suture throw (black), knot tying (blue), grasp pull run suture (green), and intermaneuver segment (yellow) – showing ground truth labels above and predictions below.
For more information, please see [our paper](https://arxiv.org/abs/1606.06329), which was accepted as an oral presentation at MICCAI 2016. Code is also available [here](https://github.com/rdipietro/miccai-2016-surgical-activity-rec).
<a id="open-source-contributions"></a>
## Open-Source Contributions
Nowadays nearly all of my code is written using Python, NumPy, and PyTorch. I moved to PyTorch from TensorFlow in 2017, and my experience has resembled [Andrej Karpathy's](https://twitter.com/karpathy/status/868178954032513024) :).
I've made small open-source contributions (code, tests, and/or docs) to [TensorFlow](https://github.com/tensorflow/tensorflow), [PyTorch](https://github.com/pytorch/pytorch), [Edward](https://github.com/blei-lab/edward), [Pyro](https://github.com/uber/pyro), and other projects.
Some of my projects can be found here: <a href="https://github.com/rdipietro"><i class="fab fa-github"></i> GitHub</a>
<a id="teaching"></a>
## Teaching
I've fortunately had the chance to teach quite a bit at Hopkins. I love teaching, despite its tendency to devour time.
### Johns Hopkins University
- **Co-Instructor for EN.601.382, Machine Learning: Deep Learning Lab.** Spring, 2018.
- **Co-Instructor for EN.601.482/682, Machine Learning: Deep Learning.** Spring, 2018.
- **Teaching Assistant for EN.601.475/675, Introduction to Machine Learning.** Fall, 2017.
- **Instructor for EN.500.111, HEART: Machine Learning for Surgical Workflow Analysis.** Fall, 2015.
- **Teaching Assistant for EN.600.476/676, Machine Learning: Data to Models.** Spring, 2015.
- **Co-Instructor for EN.600.120, Intermediate Programming.** Spring, 2014.
- **Instructor for EN.600.101, MATLAB for Data Analytics.** Intersession, 2014.
<a id="tutorials"></a>
## Tutorials
- [A Friendly Introduction to Cross-Entropy Loss](http://rdipietro.github.io/friendly-intro-to-cross-entropy-loss). Introduces entropy, cross entropy, KL divergence, and discusses connections to likelihood.
- [TensorFlow Scan Examples](http://rdipietro.github.io/tensorflow-scan-examples). This is an old tutorial in which we build, train, and evaluate a simple recurrent neural network from scratch. I do not recommend this tutorial. Instead, I recommend switching to PyTorch if at all possible :).
<a id="curriculum-vitae-publications"></a>
## Curriculum Vitae / Publications
My CV is [here](dipietro-cv.pdf) and my publications are [here](https://scholar.google.com/citations?user=hgSJHaUAAAAJ).
<a id="some-fun"></a>
## Some Fun
#### Paragliding in Switzerland (2014)

#### Skydiving in Switzerland (2010)

| github_jupyter |
# Table of Contents
<p><div class="lev1"><a href="#Learning-Objectives"><span class="toc-item-num">1 </span>Learning Objectives</a></div><div class="lev2"><a href="#Disclaimer"><span class="toc-item-num">1.1 </span>Disclaimer</a></div><div class="lev1"><a href="#Plotting-with-ggplot"><span class="toc-item-num">2 </span>Plotting with ggplot</a></div><div class="lev1"><a href="#Building-your-plots-iteratively"><span class="toc-item-num">3 </span>Building your plots iteratively</a></div><div class="lev1"><a href="#Boxplot"><span class="toc-item-num">4 </span>Boxplot</a></div><div class="lev2"><a href="#Challenges"><span class="toc-item-num">4.1 </span>Challenges</a></div><div class="lev1"><a href="#Plotting-time-series-data"><span class="toc-item-num">5 </span>Plotting time series data</a></div><div class="lev1"><a href="#Faceting"><span class="toc-item-num">6 </span>Faceting</a></div><div class="lev1"><a href="#Challenge"><span class="toc-item-num">7 </span>Challenge</a></div><div class="lev1"><a href="#Customization"><span class="toc-item-num">8 </span>Customization</a></div>
# Learning Objectives #
By the end of this lesson the learner will:
* Create a ggplot object
* Set universal plot settings
* Modify an existing ggplot object
* Change the aesthetics of a plot such as colour
* Edit the axis labels
* Build complex plots using a step-by-step approach
* Create scatter plots, box plots, and time series plots
* Use the facet_wrap and facet_grid commands to create a collection of plots splitting the data by a factor variable
* Create customized plot styles to meet their needs
## Disclaimer ##
Python has powerful built-in plotting capabilities such as `matplotlib`, but for this exercise, we will be using the [`ggplot`](http://ggplot.yhathq.com/) package, which facilitates the creation of highly-informative plots of structured data based on the R implementation of [`ggplot2`](http://ggplot2.org/) and [The Grammar of Graphics](http://link.springer.com/book/10.1007%2F0-387-28695-0) by Leland Wilkinson.
```
import pandas as pd
surveys_complete = pd.read_csv('data_output/surveys_complete.csv')
surveys_complete
%matplotlib inline
from ggplot import *
```
# Plotting with ggplot
We will make the same plot using the `ggplot` package.
`ggplot` is a plotting package that makes it simple to create complex plots
from data in a dataframe. It uses default settings, which help creating
publication quality plots with a minimal amount of settings and tweaking.
ggplot graphics are built step by step by adding new elements.
To build a ggplot we need to:
- bind the plot to a specific data frame using the `data` argument
- define aesthetics (`aes`), by selecting the variables to be plotted and the variables to define the presentation
such as plotting size, shape color, etc.,
```
ggplot( aesthetics= aes(x = 'weight', y = 'hindfoot_length'), data = surveys_complete)
```
- add `geoms` -- graphical representation of the data in the plot (points,
lines, bars). To add a geom to the plot use `+` operator:
```
ggplot( aes(x = 'weight', y = 'hindfoot_length'), data = surveys_complete) + geom_point()
```
The `+` in the `ggplot2` package is particularly useful because it allows you
to modify existing `ggplot` objects. This means you can easily set up plot
"templates" and conveniently explore different types of plots, so the above
plot can also be generated with code like this:
```
# Create
surveys_plot = ggplot( aes(x = 'weight', y = 'hindfoot_length'), data = surveys_complete)
# Draw the plot
surveys_plot + geom_point()
```
Notes:
- Anything you put in the `ggplot()` function can be seen by any geom layers
that you add (i.e., these are universal plot settings). This includes the x and
y axis you set up in `aes()`.
- You can also specify aesthetics for a given geom independently of the
aesthetics defined globally in the `ggplot()` function.
# Building your plots iteratively
Building plots with ggplot is typically an iterative process. We start by
defining the dataset we'll use, lay the axes, and choose a geom.
```
ggplot(aes(x = 'weight', y = 'hindfoot_length'), data = surveys_complete, ) + geom_point()
```
Then, we start modifying this plot to extract more information from it. For
instance, we can add transparency (alpha) to avoid overplotting.
```
ggplot(aes(x = 'weight', y = 'hindfoot_length'), data = surveys_complete) + \
geom_point(alpha = 0.1)
```
We can also add colors for all the points
```
ggplot(aes(x = 'weight', y = 'hindfoot_length'),data = surveys_complete) + \
geom_point(alpha = 0.1, color = "blue")
```
Or to color each species in the plot differently:
```
# ggplot(data = surveys_complete, aes(x = weight, y = hindfoot_length)) +
# geom_point(alpha = 0.1, aes(color=species_id))
ggplot(aes(x = 'weight', y = 'hindfoot_length', color='species_id'),data = surveys_complete) + \
geom_point( alpha = 0.1)
```
# Boxplot
Visualising the distribution of weight within each species.
R code:
```R
ggplot(data = surveys_complete, aes(x = species_id, y = hindfoot_length)) +
geom_boxplot()
```
```
ggplot( aes(x = 'species_id', y = 'hindfoot_length'), data = surveys_complete) + geom_boxplot()
```
By adding points to boxplot, we can have a better idea of the number of
measurements and of their distribution:
R code:
```R
ggplot(data = surveys_complete, aes(x = species_id, y = hindfoot_length)) +
geom_jitter(alpha = 0.3, color = "tomato") +
geom_boxplot(alpha = 0)
```
```
surveys_complete['species_factor'] = surveys_complete['species_id'].astype('category').cat.codes
xlabels = sorted(set(surveys_complete['species_id'].values) )
xcodes = sorted(set(surveys_complete['species_factor'].values))
ggplot(aes(x = 'species_factor', y = 'hindfoot_length'),data = surveys_complete) + \
geom_point(position='jitter',alpha=0.7,jittersize=0.4) + \
scale_x_continuous(breaks=xcodes, labels=xlabels) + \
xlab('species_id') + geom_boxplot(alpha=0)
```
Notice how the boxplot layer is behind the jitter layer? What do you need to
change in the code to put the boxplot in front of the points such that it's not
hidden.
## Challenges
> Boxplots are useful summaries, but hide the *shape* of the distribution. For
> example, if there is a bimodal distribution, this would not be observed with a
> boxplot. An alternative to the boxplot is the violin plot (sometimes known as a
> beanplot), where the shape (of the density of points) is drawn.
>
> - Replace the box plot with a violin plot; see `geom_violin()`
>
> In many types of data, it is important to consider the *scale* of the
> observations. For example, it may be worth changing the scale of the axis to
> better distribute the observations in the space of the plot. Changing the scale
> of the axes is done similarly to adding/modifying other components (i.e., by
> incrementally adding commands).
>
> - Represent weight on the log10 scale; see `scale_y_log10()`
>
> - Create boxplot for `hindfoot_length`.
>
> - Add color to the datapoints on your boxplot according to the plot from which
> the sample was taken (`site_id`)
Hint: Check the class for `site_id`. Consider changing the class of `site_id`
from integer to factor. Why does this change how R makes the graph?
```
## Challenges:
## Start with the boxplot we created:
ggplot(aes(x = 'species_factor', y = 'hindfoot_length'),data = surveys_complete) + \
geom_jitter(alpha=0.3) + \
scale_x_discrete(breaks=xcodes, labels=xlabels) + \
xlab('species_id') + geom_boxplot(alpha=0)
## 1. Replace the box plot with a violin plot; see `geom_violin()`.
ggplot(aes(x = 'species_factor', y = 'hindfoot_length'),data = surveys_complete) + \
geom_jitter(alpha=0.3) + \
scale_x_discrete(breaks=xcodes, labels=xlabels) + \
xlab('species_id') + geom_violin(alpha=0)
## 2. Represent weight on the log10 scale; see `scale_y_log10()`.
ggplot(aes(x = 'species_factor', y = 'hindfoot_length'),data = surveys_complete) + \
geom_jitter(alpha=0.3) + \
scale_x_discrete(breaks=xcodes, labels=xlabels) + \
xlab('species_id') + geom_violin(alpha=0) + \
scale_y_log(base=10)
## 3. Create boxplot for `hindfoot_length`.
ggplot(aes(x = 'species_factor', y = 'hindfoot_length'),data = surveys_complete) + \
geom_jitter(alpha=0.01) + \
scale_x_discrete(breaks=xcodes, labels=xlabels) + \
xlab('species_id') + geom_boxplot(alpha=0) + \
scale_y_log(base=10)
## 4. Add color to the datapoints on your boxplot according to the
## plot from which the sample was taken (`site_id`).
## Hint: Check the class for `site_id`. Consider changing the class
## of `site_id` from integer to factor. Why does this change how R
## makes the graph?
ggplot(aes(x = 'species_factor', y = 'hindfoot_length', color='site_id'),data = surveys_complete) + \
geom_jitter(alpha=0.01) + \
scale_x_discrete(breaks=xcodes, labels=xlabels) + \
xlab('species_id') + geom_boxplot(alpha=0) + \
scale_y_log(base=10)
```
# Plotting time series data
Let's calculate number of counts per year for each species. To do that we need
to group data first and count records within each group.
```
yearly_counts = surveys_complete[['year','species_id']].groupby(['year', 'species_id']).size().reset_index()
yearly_counts.columns = ['year','species_id', 'n']
yearly_counts
```
Timelapse data can be visualised as a line plot with years on x axis and counts
on y axis.
```
ggplot(aes(x = 'year', y = 'n'),data = yearly_counts) + \
geom_line()
```
Unfortunately this does not work, because we plot data for all the species
together. We need to tell ggplot to draw a line for each species by modifying
the aesthetic function to include `group = species_id`.
```
ggplot(aes(x = 'year', y = 'n', group='species_id'),data = yearly_counts) + geom_line()
```
We will be able to distinguish species in the plot if we add colors.
```
ggplot(aes(x = 'year', y = 'n', color='species_id'),data = yearly_counts) + geom_line()
```
# Faceting
ggplot has a special technique called *faceting* that allows to split one plot
into multiple plots based on a factor included in the dataset. We will use it to
make one plot for a time series for each species.
Now we would like to split line in each plot by sex of each individual
measured. To do that we need to make counts in data frame grouped by year,
species_id, and sex:
```
yearly_sex_counts = surveys_complete.groupby( ['year','species_id', 'sex']).count()
yearly_sex_counts['n'] = yearly_sex_counts['record_id']
yearly_sex_counts = yearly_sex_counts['n'].reset_index()
yearly_sex_counts
```
We can now make the faceted plot splitting further by sex (within a single plot):
```
ggplot(aes(x = "year", y = "n", color = "species_id", group = "sex"), data = yearly_sex_counts, ) + \
geom_line() + \
facet_wrap( "species_id")
```
Usually plots with white background look more readable when printed. We can set
the background to white using the function `theme_bw()`. Additionally you can also remove
the grid.
```
ggplot(aes(x = "year", y = "n", color = "species_id", group = "sex"),data = yearly_sex_counts ) + \
geom_line() + \
facet_wrap( "species_id") + \
theme_bw() + \
theme()
```
To make the plot easier to read, we can color by sex instead of species (species
are already in separate plots, so we don't need to distinguish them further).
```
ggplot(aes(x = "year", y = "n", color = "sex", group = "sex"), data = yearly_sex_counts) + \
geom_line() + \
facet_wrap("species_id") + \
theme_bw()
```
# Challenge
> Use what you just learned to create a plot that depicts how the average weight
> of each species changes through the years.
<!-- Answer
```
yearly_weight = surveys_complete[["year", "species_id","weight"]].groupby(["year", "species_id"]).mean().reset_index()
yearly_weight.columns = ["year", "species_id","avg_weight"]
yearly_weight
ggplot( aes(x="year", y="avg_weight", color = "species_id", group = "species_id"),data = yearly_weight) + \
geom_line() + \
facet_wrap("species_id") + \
theme_bw()
## Plotting time series challenge:
## Use what you just learned to create a plot that depicts how the
## average weight of each species changes through the years.
```
The `facet_wrap` geometry extracts plots into an arbitrary number of dimensions
to allow them to cleanly fit on one page. On the other hand, the `facet_grid`
geometry allows you to explicitly specify how you want your plots to be
arranged via formula notation (`rows ~ columns`; a `.` can be used as
a placeholder that indicates only one row or column).
Let's modify the previous plot to compare how the weights of male and females
has changed through time.
```
## One column, facet by rows
yearly_sex_weight = surveys_complete[
['year','sex','species_id','weight']].groupby(
["year", "sex", "species_id"]).mean().reset_index()
yearly_sex_weight.columns = ['year','sex','species_id','avg_weight']
yearly_sex_weight
ggplot( aes(x="year", y="avg_weight", color = "species_id", group = "species_id"),data = yearly_sex_weight) + \
geom_line() + \
facet_grid("sex")
# One row, facet by column
ggplot( aes(x="year", y="avg_weight", color = "species_id", group = "species_id"),data = yearly_sex_weight) + \
geom_line() + \
facet_grid(None, "sex")
```
# Customization
Take a look at the ggplot2 cheat sheet
(https://www.rstudio.com/wp-content/uploads/2015/08/ggplot2-cheatsheet.pdf), and
think of ways to improve the plot. You can write down some of your ideas as
comments in the Etherpad.
Now, let's change names of axes to something more informative than 'year'
and 'n' and add a title to this figure:
```
ggplot( aes(x = "year", y = "n", color = "sex", group = "sex"),data = yearly_sex_counts) + \
geom_line() + \
facet_wrap( "species_id" ) + \
labs(title = 'Observed species in time',
x = 'Year of observation',
y = 'Number of species') + \
theme_bw()
```
The axes have more informative names, but their readability can be improved by
increasing the font size. While we are at it, we'll also change the font family:
```
ggplot( aes(x = "year", y = "n", color = "sex", group = "sex"),data = yearly_sex_counts) + \
geom_line() + \
facet_wrap( "species_id" ) + \
theme_bw() + \
theme(axis_title_x = element_text(size=16, family="Arial"),
axis_title_y = element_text(size=16, family="Arial")) + \
labs(title = 'Observed species in time',
x = 'Year of observation',
y = 'Number of species')
```
After our manipulations we notice that the values on the x-axis are still not
properly readable. Let's change the orientation of the labels and adjust them
vertically and horizontally so they don't overlap. You can use a 90 degree
angle, or experiment to find the appropriate angle for diagonally oriented
labels.
```
ggplot( aes(x = "year", y = "n", color = "sex", group = "sex"),data = yearly_sex_counts) + \
geom_line() + \
facet_wrap( "species_id" ) + \
labs(title = 'Observed species in time',
x = 'Year of observation',
y = 'Number of species') + \
theme_bw() + \
theme(axis_text_x = element_text(color="grey", size=10, angle=90, hjust=.5, vjust=.5),
axis_text_y = element_text(color="grey", size=10, hjust=0),
)
```
If you like the changes you created to the default theme, you can save them as
an object to easily apply them to other plots you may create:
```
arial_grey_theme = theme(axis_text_x = element_text(color="grey", size=10, angle=90, hjust=.5, vjust=.5),
axis_text_y = element_text(color="grey", size=10))
ggplot(surveys_complete, aes(x = 'species_id', y = 'hindfoot_length')) + \
geom_boxplot() + \
arial_grey_theme
```
With all of this information in hand, please take another five minutes to either
improve one of the plots generated in this exercise or create a beautiful graph
of your own. Use the RStudio ggplot2 cheat sheet, which we linked earlier for
inspiration.
Here are some ideas:
* See if you can change thickness of the lines.
* Can you find a way to change the name of the legend? What about its labels?
* Use a different color palette (see http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/)
After creating your plot, you can save it to a file in your favourite format.
You can easily change the dimension (and its resolution) of your plot by
adjusting the appropriate arguments (`width`, `height` and `dpi`):
```
my_plot = ggplot(yearly_sex_counts, aes(x = "year", y = "n", color = "sex", group = "sex"))
my_plot += geom_line()
my_plot += facet_wrap("species_id")
my_plot += labs(title = 'Observed species in time',
x = 'Year of observation',
y = 'Number of species')
my_plot += theme_bw()
my_plot += theme(axis_text_x = element_text(color="grey", size=10, angle=90, hjust=.5, vjust=.5),
axis_text_y = element_text(color="grey", size=10))
my_plot.save("name_of_file.png", width=15, height=10)
## Final plotting challenge:
## With all of this information in hand, please take another five
## minutes to either improve one of the plots generated in this
## exercise or create a beautiful graph of your own. Use the RStudio
## ggplot2 cheat sheet for inspiration:
## https://www.rstudio.com/wp-content/uploads/2015/08/ggplot2-cheatsheet.pdf
```
| github_jupyter |
# Travelling Salesman Problem with subtour elimination
This example shows how to solve a TSP by eliminating subtours using:
1. amplpy (defining the subtour elimination constraint in AMPL and instantiating it appropriately)
2. ampls (adding cuts directly from the solver callback)
### Options
```
SOLVER = "xpress"
SOLVER_OPTIONS = ['outlev=1']
USE_CALLBAKCS = True
PLOTSUBTOURS = True
TSP_FILE = "../tsp/a280.tsp"
import sys
sys.path.append('D:/Development/ampl/solvers-private/build/vs64/bin')
```
### Imports
```
# Import utilities
from amplpy import AMPL, DataFrame # pip install amplpy
if SOLVER == "gurobi":
import amplpy_gurobi as ampls # pip install ampls-gurobi
elif SOLVER == "cplex":
import amplpy_cplex as ampls # pip install ampls-
elif SOLVER == "xpress":
import amplpy_xpress as ampls # pip install ampls-gurobi
import tsplib95 as tsp # pip install tsplib95
import matplotlib.pyplot as plt # pip install matplotlib
import matplotlib.colors as colors
from time import time
plt.rcParams['figure.dpi'] = 200
```
### Register jupyter magics for AMPL
```
from amplpy import register_magics
register_magics('_ampl_cells') # Store %%ampl cells in the list _ampl_cells
```
### Define TSP model in AMPL
```
%%ampl
set NODES ordered;
param hpos {NODES};
param vpos {NODES};
set PAIRS := {i in NODES, j in NODES: ord(i) < ord(j)};
param distance {(i,j) in PAIRS}
:= sqrt((hpos[j]-hpos[i])**2 + (vpos[j]-vpos[i])**2);
var X {PAIRS} binary;
minimize Tour_Length: sum {(i,j) in PAIRS} distance[i,j] * X[i,j];
subject to Visit_All {i in NODES}:
sum {(i, j) in PAIRS} X[i,j] + sum {(j, i) in PAIRS} X[j,i] = 2;
```
Function to load TSP data files and return a dictionary of (nodeid : coordinate)
```
def getDictFromTspFile(tspFile):
p = tsp.load(tspFile)
if not p.is_depictable:
print("Problem is not depictable!")
# Amendments as we need the nodes lexographically ordered
nnodes = len(list(p.get_nodes()))
i = 0
while nnodes>1:
nnodes = nnodes/10
i+=1
formatString = f"{{:0{i}d}}"
nodes = {formatString.format(value) : p.node_coords[index+1] for index, value in enumerate(p.get_nodes())}
return nodes
```
Create AMPL object with amplpy and load model and data
```
# Get the model from the cell above
tsp_model = _ampl_cells[0]
# Load model in AMPL
ampl = AMPL()
ampl.eval(tsp_model)
ampl.option["solver"] = SOLVER
ampl.option[SOLVER + "_options"] = ' '.join(SOLVER_OPTIONS)
# Set problem data from tsp file
nodes = getDictFromTspFile(TSP_FILE)
# Pass them to AMPL using a dataframe
df = DataFrame(index=[('NODES')], columns=['hpos', 'vpos'])
df.setValues(nodes)
ampl.setData(df, "NODES")
# Set some globals that never change during the execution of the problem
NODES = set(nodes.keys())
CPOINTS = {node : complex(coordinate[0], coordinate[1]) for (node, coordinate) in nodes.items()}
```
Define some helpers functions to plot the tours
```
def plotTours(tours: list, points_coordinate: dict):
# Plot all the tours in the list each with a different color
colors = ['b', 'g', 'c', 'm', 'y', 'k']
for i, tour in enumerate(tours):
tourCoordinates = [points_coordinate[point.strip("'")] for point in tour]
color = colors[i % len(colors)]
plot_all(tourCoordinates, color = color)
plt.show()
def plot_all(tour, alpha=1, color=None):
# Plot the tour as blue lines between blue circles
plotline(list(tour) + [tour[0]], alpha=alpha, color=color)
plotline([tour[0]], 's', alpha=alpha, color=color)
def plotline(points, style='o-', alpha=1, color=None):
"Plot a list of points (complex numbers) in the 2-D plane."
X, Y = XY(points)
if color:
plt.plot(X, Y, style, alpha=alpha, color=color)
else:
plt.plot(X, Y, style, alpha=alpha)
def XY(points):
"Given a list of points, return two lists: X coordinates, and Y coordinates."
return [p.real for p in points], [p.imag for p in points]
```
Define some helper functions to help with the graphs (e.g. get the subtour given a set of arcs)
```
# Graphs helper routines
def trasverse(node, arcs: set, allnodes: set, subtour = None) -> list:
# Trasverses all the arcs in the set arcs, starting from node
# and returns the tour
if not subtour:
subtour = list()
# Find arcs involving the current node
myarcs = [(i,j) for (i,j) in arcs if node == i or node == j]
if len(myarcs) == 0:
return
# Append the current node to the current subtour
subtour.append(node)
# Use the first arc found
myarc = myarcs[0]
# Find destination (or origin) node
destination = next(i for i in myarc if i != node)
# Remove from arcs and nodes to visit
arcs.remove(myarc)
if node in allnodes:
allnodes.remove(node)
trasverse(destination, arcs, allnodes, subtour)
return subtour
def findSubTours(arcs: set, allnodes: set):
"""Find all the subtours defined by a set of arcs and
return them as a list of list
"""
subtours = list()
allnodes = allnodes.copy()
while len(allnodes) > 0:
l = trasverse(next(iter(allnodes)), arcs, allnodes)
subtours.append(l)
return subtours
```
AMPLPY implementation of sub-tours elimination
```
def amplSubTourElimination(ampl: AMPL):
# Add the constraint and the needed parameters
subToursAMPL = """param nSubtours >= 0 integer, default 0;
set SUB {1..nSubtours} within NODES;
subject to Subtour_Elimination {k in 1..nSubtours}:
sum {i in SUB[k], j in NODES diff SUB[k]}
if (i, j) in PAIRS then X[i, j] else X[j, i] >= 2;"""
ampl.eval(subToursAMPL)
nSubtoursParam = ampl.getParameter("nSubtours")
SubtoursSet = ampl.getSet("SUB")
allsubtours = list()
while True: # Repeat until the solution contains only one tour
ampl.solve()
# Get solution
ARCS = ampl.getData("{(i,j) in PAIRS : X[i,j] > 0} X[i,j];")
ARCS = set([(i, j) for (i, j, k)in ARCS.toList()])
subtours = findSubTours(ARCS, NODES)
# If we have only one tour, the solution is valid
if len(subtours) <= 1:
break
print(f"Found {len(subtours)} subtours, plotting them and adding cuts")
if PLOTSUBTOURS:
plotTours(subtours, CPOINTS)
# Else add the current tours to the list of subtours
allsubtours.extend(subtours)
# And add those to the constraints by assigning the values to
# the parameter and the set
nSubtoursParam.set(len(allsubtours))
for (i, tour) in enumerate(allsubtours):
SubtoursSet[i+1].setValues(tour)
```
ampls callbacks implementation of subtours elimination
```
# Callback class that actually add the cuts if subtours are found in a solution
class MyCallback(ampls.GenericCallback):
def __init__(self):
# Constructor, simply sets the iteration number to 0
super().__init__()
self.iteration = 0
def run(self):
try:
# For each solution
if self.getAMPLWhere() == ampls.Where.MIPSOL:
self.iteration += 1
print(f"/nIteration {self.iteration}: Finding subtours")
sol = self.getSolutionVector()
arcs = [xvars[i] for i, value in enumerate(sol) if value > 0]
subTours = findSubTours(set(arcs), set(vertices))
if len(subTours) ==1:
print("No subtours detected. Not adding any cut")
return 0
print(f"Adding {len(subTours)} cuts")
if PLOTSUBTOURS:
plotTours(subTours, CPOINTS)
for subTour in subTours:
st1 = set(subTour)
nst1 = set(vertices) - st1
externalArcs = [(i, j) if i < j else (j, i) for i in st1 for j in nst1]
varsExternalArcs = [xinverse[i, j] for (i, j) in externalArcs]
coeffs = [1 for i in range(len(varsExternalArcs))]
if PLOTSUBTOURS:
print("Adding cut for subtour:", st1)
self.addLazyIndices(varsExternalArcs, coeffs, ampls.CutDirection.GE, 2)
if len(subTours) == 2:
return 0
print("Continue solving")
return 0
except Exception as e:
print('Error:', e)
return 1
# Global variables to store entities needed by the callbacks
# that never change
xvars = None
xinverse = None
vertices = None
def solverSubTourElimination(ampl: AMPL, solver, solver_options):
global xvars, xinverse, vertices
# Export the model using ampls
model = ampl.exportModel(solver, solver_options)
model.enableLazyConstraints()
# Get the global maps between solver vars and AMPL entities
varMap = model.getVarMapFiltered("X")
#print("varMap:", varMap)
inverse = model.getVarMapInverse()
xvars = {index: ampls.var2tuple(var)[1:] for var, index in varMap.items()}
xinverse = {ampls.var2tuple(var)[1:]: index for index, var in inverse.items()}
vertices = list(sorted(set([x[0] for x in xvars.values()] + [x[1] for x in xvars.values()])))
# Assign the callback
callback = MyCallback()
model.setCallback(callback)
print("Start optimization")
# Start the optimization
model.optimize()
# Import the solution back to AMPL
ampl.importSolution(model)
```
Script running the optimization
```
t0 = time()
if not USE_CALLBAKCS:
amplSubTourElimination(ampl)
else:
solverSubTourElimination(ampl, SOLVER, SOLVER_OPTIONS)
```
Get the solution, print it and display it
```
# Get the solution into ARCS
ARCS = ampl.getData("{(i,j) in PAIRS : X[i,j] > 0} X[i,j];")
ARCS = set([(i,j) for (i,j,k) in ARCS.toList()])
# Display it
tours = findSubTours(ARCS, NODES)
for st in tours:
print(st)
plotTours(tours, CPOINTS)
ampl.getValue('Tour_Length')
time()-t0
```
| github_jupyter |
# 动手实现胶囊网络
## 前言
2017年,Hinton团队提出胶囊网络,首次将标量型网络扩展到矢量。本着learning by doing的态度,我尝试对原论文进行复现,因此这里不会对其原论文原理和思想有太多解释。尽可能保证工程性和完整性,并在实现过程中不断总结和反思。实现过程中也许会有一些bug,欢迎交流和提交issue~
**Author**: QiangZiBro
**Github**: https://github/QiangZiBro
## 1.1 引入必备的包
本文依赖第三方框架pytorch,实验使用1.2,基本来说各个版本都可以用。
```
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import matplotlib.pyplot as plt
import numpy as np
from torchvision import transforms
from torchvision.utils import save_image
```
## 1.2 超参数定义
```
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 5
batch_size = 64
learning_rate = 1e-3
```
## 1.3 数据加载
```
# MNIST dataset
root="/home/qiangzibro/2TB1/"
train_dataset = torchvision.datasets.MNIST(root=root,
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root=root,
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
def show_img(data_tuple):
img, label = data_tuple
img = img.squeeze()
plt.imshow(img)
print(f"label is {label}")
show_img(test_dataset[1])
```
## 1.4 胶囊网络的压缩函数与动态路由算法
胶囊网络由三层组成:卷积层,初级胶囊层,卷积胶囊层,其中卷积胶囊层使用了动态路由算法。我们来一一实现这些功能。
首先实现两个函数,压缩函数与动态路由函数
```
def squash(x):
"""
Args:
x: (B, 10, 16)
Return:
squashed x (B, 10, 16)
"""
L = torch.norm(x, dim=2, keepdim=True) #(B, 10, 1)
L_square = L**2 #(B, 10, 1)
c = (L_square)/(1+L_square)/L #(B, 10, 1)
s = c*x #(B, 10, 16)
s[s==np.nan] = 0
return s
x = torch.rand(1,10,16)
squash(x).shape
```
动态路由算法相当于一个聚类过程,将底层的若干个向量以迭代的路由方法,选取若干具有代表性的胶囊。
- 输入 (B, 10, 32x6x6, 16)
- 输出 (B, 10, 16)
要注意的细节
- b的维度是多少? b是有batchsize的,我最开是设置的是没有batchsize,效果不太好。
```
def dynamic_routing(x, iterations=3):
"""
Args:
x: u_hat, (B, 10, 32x6x6, 16, 1)
Return:
v: next layer output (B, 10, 16)
"""
N = 32*6*6 # previous layer
N1 = 10 # next layer
B = x.shape[0]
b = torch.zeros(B,N1,N,1, 1).to(x.device)
for _ in range(iterations):
# probability of each vector to be distributed is 1
# (B,10,32*6*6,1, 1)
c = F.softmax(b, dim=1)
# (B,10,16)
s = torch.sum(x.matmul(c), dim=2).squeeze(-1)
# (B,10,16)
v = squash(s)
# (B,10,32*6*6,1,1)
b = b + v[:,:,None,None,:].matmul(x)
return v
x = torch.rand(1,10,32*6*6,16, 1)
dynamic_routing(x).shape
```
## 1.5 初级胶囊层
在实现初级胶囊层时,我们要了解一些细节,比如
- 怎样表示一个胶囊? 每个像素点上一个1x8的向量
- 怎样计算出初级胶囊?使用32组卷积核,每组输出8个通道
- 怎样在程序里存这些胶囊?我的做法是将所有的胶囊放在一列,换句话说,放在一个`(B, 32*6*6, 8)`的矩阵里面
```
class PrimaryCapsuleLayer(nn.Module):
def __init__(self):
super().__init__()
self.primary_capsule_layer = \
nn.ModuleList([nn.Conv2d(256,8,9, stride=2) for _ in range(32)])
def forward(self, x):
""" Produce primary capsules
Args:
x: features with (B, 256, 20, 20)
Return:
vectors (B, 32*6*6, 8)
"""
capsules = [conv(x) for conv in self.primary_capsule_layer] # [[B, 8, 6, 6] * 32]
capsules_reshaped = [c.reshape(-1,8,6*6) for c in capsules] # [[B, 8, 36] * 32]
s = torch.cat(capsules_reshaped, dim=-1).permute(0, 2, 1) # (B, 32*6*6, 8)
return squash(s)
# 测试单元
def test_for_primary_capsule_layer():
input = torch.rand(1,256,20,20)
layer = PrimaryCapsuleLayer()
assert layer(input).shape == (1,32*6*6, 8)
test_for_primary_capsule_layer()
```
## 1.6 卷积胶囊层
在实现卷积胶囊层时,我们要了解一些细节,比如
- 高维矩阵相乘怎么进行计算?
比如对(B, 32x6x6, 8)大小的向量矩阵,通过权重矩阵,得到输出(B,10, 32x6x6,16)的矩阵,通过下面高维矩阵相乘方式推出$o=Wx$
- W `(1, 10, 32x6x6, 16, 8)`
- x `(B, 1, 32x6x6, 8, 1) `
- o `(B, 10, 32x6x6, 16, 1)`
上面两个矩阵相乘看起来有点复杂,怎么思考呢?多维矩阵的相乘可以看作最后两个维度作矩阵乘法,两个维度我们肯定很清楚,维度(a,b)和(b,c)两个矩阵相乘就是(a,c)。其他维度要么进行广播机制,要么不变。所以,从上面的维度,可以知道,前两个维度实行广播机制,第三个不变,最后两个维度的乘法也就是(16,8)和(8,1)的向量相乘,完成了变换。因此,就有了下面的例子
```python
B = 1
x = torch.rand(B,32*6*6,8)
x = x[:,None,...,None]
w = torch.rand(1,10,32*6*6,16,8)
w.matmul(x).shape
# torch.Size([1, 10, 1152, 16, 1])
```
```
class CapsLayer(nn.Module):
def __init__(self,nclasses=10, out_channels_dim=16):
super().__init__()
self.W = nn.Parameter(1e-3 * torch.randn(1,nclasses,32*6*6,out_channels_dim,8))
def forward(self, x):
"""Predict and routing
Args:
x: Input vectors, (B, 32*6*6, 8)
Return:
class capsules, (B, 10, 16)
"""
x = x[:,None,...,None]
u_hat = self.W.matmul(x) # (B, 10, 32x6x6, 16, 1)
assert u_hat.shape[1:] == (10, 32*6*6, 16, 1)
class_capsules = dynamic_routing(u_hat)
return class_capsules
def test_for_caps_layer():
input = torch.rand(1,32*6*6,8)
layer = CapsLayer()
assert layer(input).shape == (1,10,16)
test_for_caps_layer()
```
## 1.7 胶囊网络
实现了前面必须的几层,相信胶囊网络也是非常好搭了。我们定义的胶囊网络最后输出为10个分类向量
```
class CapsNet(nn.Module):
def __init__(self):
super().__init__()
self.conv_layer = nn.Conv2d(1,256,9)
self.primary_layer = PrimaryCapsuleLayer()
self.caps_layer = CapsLayer(nclasses=10, out_channels_dim=16)
def forward(self, x):
"""
Args:
x : Input img, (B, 1, 28, 28)
Return:
the class capsules, each capsule is a 16 dimension vector
"""
x = self.conv_layer(x) # (B, 256, 20, 20)
x = self.primary_layer(x) # (B, 32*6*6, 8)
x = self.caps_layer(x) # (B, 10, 16)
return x
def test_for_caps_net():
input = torch.rand(1,1,28,28)
model = CapsNet()
assert model(input).shape == (1,10,16)
test_for_caps_net()
```
# 实验1 训练一个分类的胶囊网络
## 实现损失
现在我们来到第一个实验,训练一个分类网络。首先了解原文提到的损失
$$
L_{k}=T_{k} \max \left(0, m^{+}-\left\|\mathbf{v}_{k}\right\|\right)^{2}+\lambda\left(1-T_{k}\right) \max \left(0,\left\|\mathbf{v}_{k}\right\|-m^{-}\right)^{2}
$$
这其中$\left\|\mathbf{v}_{k}\right\|$就是胶囊网络最后输出来的分类胶囊,k表示第k个。这个式子可以理解为一个分段函数
\begin{equation}
L_{k}=\left\{
\begin{aligned}
\max \left(0, m^{+}-\left\|\mathbf{v}_{k}\right\|\right)^{2}& & {第k个胶囊正确分类} \\
\lambda \max \left(0,\left\|\mathbf{v}_{k}\right\|-m^{-}\right)^{2} & & {第k个胶囊错误分类}
\end{aligned}
\right.
\end{equation}
其中$m^{+}=0.9,m^{-}=0.1, \lambda=0.5$,也就是说,分类胶囊是概率为0.9以上且分类正确的,以及概率为0.1且错误的,我们都是为是”好“的,因此训练时我们采样梯度下降到方式往这个方向靠拢。
因此,我们首先需要自定义一个损失。在pytorch里面,损失定义很简单
一些细节
- 使用onehot向量T和预测胶囊模长相乘来选取正确预测的值,(1-T)和预测胶囊模长相乘来选取错误预测的值
```
#TODO: test it!
def margin_loss(y, y_hat):
"""
Args:
y: ground truth labels (B)
y_hat: class capsules with (B, 10, 16)
Return
the margin loss
"""
_lambda = 0.5
m_plus = 0.9
m_minus = 0.1
nclasses = 10
y_norm = y_hat.norm(dim=-1) # (B,10)
T = F.one_hot(y, nclasses) # use it as index for right class (B,10)
T = T.float()
right = torch.max(torch.zeros_like(y_norm), m_plus-y_norm*T)
right = right**2
wrong = torch.max(torch.zeros_like(y_norm), y_norm*(1-T)-m_minus)
wrong = _lambda*wrong**2
return torch.sum(right+wrong)
def test_margin_loss():
y = torch.randint(0,10,(20,))
y_hat = torch.rand(20,10,16)
print(margin_loss(y,y_hat).item())
import time
def train(net, epochs, dataloader,reconstruction=False, report=30):
"""
global variable:
- train_loader
"""
net.train()
optimizer = torch.optim.Adam(net.parameters())
train_history = []
for epoch in range(epochs):
t0 = time.time()
epoch_loss = torch.tensor(0.)
for batch, (X_batch, y_batch) in enumerate(dataloader):
X_batch, y_batch = X_batch.to(device), y_batch.to(device)
if reconstruction:
y_hat_param = net(X_batch, y_batch)
loss = total_loss(X_batch, y_batch, y_hat_param)
else:
y_hat = net(X_batch)
loss = margin_loss(y_batch, y_hat)
epoch_loss += loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_history.append(epoch_loss.item())
print(f"Epoch {epoch+1} loss is {epoch_loss}")
return train_history
# Start training
torch.autograd.set_detect_anomaly(True)
encoder = CapsNet().to(device)
train(encoder, 5, train_loader, margin_loss, report=460)
# Test the model
def evaluate(model, test_loader):
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
outputs = outputs.norm(dim=-1)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
acc = evaluate(encoder, test_loader)
print('Test Accuracy of the model on the 10000 test images: {:.2f}%'.format(100 * acc))
```
有点惊喜,训练了五个epoch,效果还不错。
# 实验2 构建重建网络
## 解码器实现
除了做简单的分类外,原作者对预测正确的向量进行解码,解码到图片空间。这个过程现有的实现都是用MLP来实现的。也就是说,对一个预测正确的向量(1x16),使用`(16 --> 28*28)`的解码网络即可,只是中间网络层数可能需要多一些。原文用了三个全连接层,[1][2]给隐层size设定都是512,1024。
这里编程的细节有
- 运用zip来迭代两个序列
- 根据list来选元素: `class_capsules[torch.arange(B), y]`,参考https://discuss.pytorch.org/t/selecting-element-on-dimension-from-list-of-indexes/36319/3
- 在训练的时候,因为前面encoder已经训练过了,我们可以冻结掉前面编码网络参数,只训练解码网络,可以进行下列操作
```python
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
set_parameter_requires_grad(encoder, True)
# 对优化器
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, net.parameters()),
lr=0.1
)
```
```
class MLPDecoder(nn.Module):
"""Decode the input predicted vectors tor origin images
Usage:
decoder = MLPDecoder([512, 1024], 16, (28,28))
reconstructed_x = decoder(selected_capsules)
"""
def __init__(self, hidden, in_channels, out_shape):
super().__init__()
self.out_shape = out_shape
h,w = out_shape
out_channels = w*h
self.mlp = nn.Sequential(*[
nn.Linear(_in, _out)
for _in,_out in zip([in_channels]+hidden, hidden+[out_channels])
])
def forward(self, x):
"""
Args:
x: (B,16)
Return:
reconstructed images with (B,1,28,28)
"""
B = x.shape[0]
x = self.mlp(x)
x = x.reshape(B, 1, *self.out_shape)
return x
def test_decoder():
decoder = MLPDecoder([512, 1024], 16, (28,28))
x = torch.rand(5,16)
assert decoder(x).shape == (5,1,28,28)
test_decoder()
```
对于自编码器的设计,我们将编解码设得灵活一点,通过构造函数传入
```
class CapsAE(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, x, y):
"""
Args:
x: (B, C, H, W) (B,1,28,28)
y: (B)
Return:
reconstructed images with (B,1,28,28)
"""
B = x.shape[0]
class_capsules = self.encoder(x) # (B, 10, 16)
selected_capsules = class_capsules[torch.arange(B), y] # (B, 16)
assert selected_capsules.shape == (B, 16)
reconstructed_x = self.decoder(selected_capsules)
return class_capsules,reconstructed_x
```
- 加上重建损失的总损失
$$
L_{total} = L_{margin} + 0.0005 \times L_{reconstruction}
$$
其中
$$
L_{reconstruction} = ||x - \hat{x} ||^2
$$
```
def total_loss(x, y, y_hat_params, c=0.0005):
""" marigin loss + 0.00005reconstruction loss
Args:
x: (B,C,H,W)
y: (B,)
y_hat_params: a tuple of (class_capsules, reconstructed_x)
"""
class_capsules, reconstructed_x = y_hat_params
return margin_loss(y, class_capsules)+c*F.mse_loss(x,reconstructed_x)
encoder = CapsNet()
decoder = MLPDecoder([512, 1024], 16, (28,28))
autoencoder = CapsAE(
encoder = encoder,
decoder = decoder
).to(device)
train_loss = train(autoencoder, 5, train_loader, reconstruction=True, report=460)
```
## 模型评估
对自编码器,我们从两个方面来评估:
- 编码器的分类能力
- 解码器解码效果
```
acc = evaluate(encoder, test_loader)
print('Test accuracy of the model on the 10000 test images of encoder in AE: {:.2f}%'.format(100 * acc))
def evaluate_ae(model, test_loader, once=False):
""" get all reconstruction results
Args:
model: autoencoder
test_loader
Return:
origin images and reconstructed images (N,1,28,28)
"""
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
X_ = [] # reconstructed images
X = [] # origin images
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
_, x_ = model(images, labels)
X_.append(x_)
X.append(images)
if once:
break
return torch.cat(X, dim=0),torch.cat(X_, dim=0)
X,X_ = evaluate_ae(autoencoder, test_loader, once=True)
X,X_ = X.cpu(),X_.cpu()
fg, axs = plt.subplots(nrows=2, ncols=10, gridspec_kw={'hspace': 0, 'wspace': 0.1}, figsize=(13,5))
fg.suptitle('Groundtruths - Reconstructions')
for i in range(10):
axs[0, i].imshow(X[i].squeeze(), cmap='binary')
axs[1, i].imshow(X_[i].squeeze(), cmap='binary')
axs[0, i].axis('off')
axs[1, i].axis('off')
plt.show()
```
## 总结
- 重建效果 不是特别好,和[1]相比有些糊,但是基本轮廓是重建出来了
# 实验3 分类胶囊的每一位代表什么?
```
def evaluate_class_capsule(model, x, y, delta=1, dim=0, l=5):
""" Simply adding class capsules digit from -7 to 7, to
see what happens about reconstruction.
Args:
model: autoencoder
x: input image (B,1,28,28)
y
dim: which dim you want to research
Return
[origin image,reconstructed_xs] [(B,1,28,28), ... ,(B,1,28,28)]
"""
model.eval()
B = x.shape[0]
encoder, decoder = model.encoder, model.decoder
with torch.no_grad():
# Auto encoder, but adding class capsules digit from -7 to 7
class_capsules = encoder(x) # (B, 10, 16)
selected_capsules = class_capsules[torch.arange(B), y] # (B, 16)
assert selected_capsules.shape == (B, 16)
index = F.one_hot(torch.ones(1, dtype=torch.long)*dim, num_classes=16)
index = index.float().to(device)
shifted_capsules = [selected_capsules+i*delta*index for i in range(-l,l+1)]
reconstructed_xs = [decoder(i) for i in shifted_capsules]
reconstructed_xs.insert(0, x)
return reconstructed_xs
def research_for_class_capsule(N=2, dim=0, delta=0.5):
""" Simply test N of first batch of test loader for dim
"""
for X,y in test_loader:
X,y = X[0:N].to(device),y[0:N].to(device)
result = evaluate_class_capsule(autoencoder, X, y, delta=delta, dim=dim)
break
fg, axs = plt.subplots(nrows=N, ncols=len(result), gridspec_kw={'hspace': 0, 'wspace': 0.1}, figsize=(13,5))
fg.suptitle(f'research for capsule dim={dim}, delta={delta}')
for i in range(N):
for j in range(len(result)):
axs[i, j].imshow(result[j][i].squeeze().cpu(), cmap='binary')
axs[i, j].axis('off')
plt.show()
def research_for_class_capsule_for(i=0, delta=0.5):
""" Test class capsule dim usage for i th test image
"""
result = []
for X,y in test_loader:
X,y = X[i][None,...].to(device),y[i][None,...].to(device)
for dim in range(16):
result.append(
evaluate_class_capsule(autoencoder, X, y, delta=delta, dim=dim)
)
break
fg, axs = plt.subplots(nrows=16, ncols=len(result[0]), gridspec_kw={'hspace': 0, 'wspace': 0.1}, figsize=(13,13))
fg.suptitle(f'research for each dim in capsule, delta={delta}')
for i in range(16):
for j in range(len(result[0])):
axs[i, j].imshow(result[i][j].squeeze().cpu(), cmap='binary')
axs[i, j].axis('off')
plt.show()
```
## 一大波结果
```
for i in range(10):
research_for_class_capsule_for(i, 0.05)
for i in range(10):
research_for_class_capsule_for(i, 0.1)
```
# BUG 记录
- `RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same`
原因:(1)初级胶囊层的32个卷积单元要放在一个nn.ModuleList里(2)动态路由中的b和c需要与输入在同一个cpu或gpu中。
- `one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [1, 10, 1152, 1, 1]], which is output 0 of AddBackward0, is at version 6; expected version 5 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True)` 这个报错说明的问题是一些需要进行梯度计算的参数在计算后发生了更改,比如类似`b[0]=1`这样的inplace操作。发生了这个错误,首先使用`torch.autograd.set_detect_anomaly(True)`看看能不能定位到错误位置。不过我记得修改了什么地方,查明BUG原因:在我实现的动态路由里,不能用`+=`来更新b,具体如下
```python
# 报错
b += torch.mean(torch.matmul(v[:,:,None,None,:], x), dim=0,keepdim=True)
# 正常
b = b + torch.mean(torch.matmul(v[:,:,None,None,:], x), dim=0,keepdim=True)
# 正常
b = b + v[:,:,None,None,:].matmul(x).mean(dim=0,keepdim=True)
```
可能的一点解释就是这样的方法是这种操作让pytorch以为是原位操作,才会报的错
- `RuntimeError: Function 'PowBackward0' returned nan values in its 0th output.`
# 思考
- 训练速度,我实现版本的速度比较慢,比[1]几乎慢6倍,这个问题需要检查下模型,还有较大改进空间
# 参考资料
[1] https://github.com/gchochla/capsules-utils
[2] https://github.com/gram-ai/capsule-networks
| github_jupyter |
```
import networkx as nx
import matplotlib.pyplot as plt
from collections import Counter
from custom import custom_funcs as cf
import warnings
warnings.filterwarnings('ignore')
from circos import CircosPlot
%load_ext autoreload
%autoreload 2
%matplotlib inline
```
## Load Data
We will load the [sociopatterns network](http://konect.uni-koblenz.de/networks/sociopatterns-infectious) data for this notebook. From the Konect website:
> This network describes the face-to-face behavior of people during the exhibition INFECTIOUS: STAY AWAY in 2009 at the Science Gallery in Dublin. Nodes represent exhibition visitors; edges represent face-to-face contacts that were active for at least 20 seconds. Multiple edges between two nodes are possible and denote multiple contacts. The network contains the data from the day with the most interactions.
```
# Load the sociopatterns network data.
G = cf.load_sociopatterns_network()
```
# Hubs: How do we evaluate the importance of some individuals in a network?
Within a social network, there will be certain individuals which perform certain important functions. For example, there may be hyper-connected individuals who are connected to many, many more people. They would be of use in the spreading of information. Alternatively, if this were a disease contact network, identifying them would be useful in stopping the spread of diseases. How would one identify these people?
## Approach 1: Neighbors
One way we could compute this is to find out the number of people an individual is conencted to. NetworkX let's us do this by giving us a `G.neighbors(node)` function.
```
# Let's find out the number of neighbors that individual #7 has.
len(G.neighbors(7))
```
### Exercise
Can you create a ranked list of the importance of each individual, based on the number of neighbors they have?
Hint: One suggested output would be a list of tuples, where the first element in each tuple is the node ID (an integer number), and the second element is the number of neighbors that it has.
Hint: Python's `sorted(iterable, key=lambda x:...., reverse=True)` function may be of help here.
```
# Possible Answers:
# sorted(G.nodes(), key=lambda x:len(G.neighbors(x)), reverse=True)
sorted([(n, G.neighbors(n)) for n in G.nodes()], key=lambda x: len(x[1]), reverse=True)
```
## Approach 2: Degree Centrality
The number of other nodes that one node is connected to is a measure of its centrality. NetworkX implements a **degree centrality**, which is defined as the number of neighbors that a node has normalized to the number of individuals it could be connected to in the entire graph. This is accessed by using `nx.degree_centrality(G)`
```
nx.degree_centrality(G)
```
If you inspect the dictionary closely, you will find that node 51 is the one that has the highest degree centrality, just as we had measured by counting the number of neighbors.
There are other measures of centrality, namely **betweenness centrality**, **flow centrality** and **load centrality**. You can take a look at their definitions on the NetworkX API docs and their cited references. You can also define your own measures if those don't fit your needs, but that is an advanced topic that won't be dealt with here.
The NetworkX API docs that document the centrality measures are here: http://networkx.readthedocs.io/en/networkx-1.11/reference/algorithms.centrality.html?highlight=centrality#module-networkx.algorithms.centrality
### Exercises
The following exercises are designed to get you familiar with the concept of "distribution of metrics" on a graph.
1. Can you create a histogram of the distribution of degree centralities?
2. Can you create a histogram of the distribution of number of neighbors?
3. Can you create a scatterplot of the degree centralities against number of neighbors?
4. If I have `n` nodes, then how many possible edges are there in total, assuming self-edges are allowed? What if self-edges are not allowed?
Hint: You may want to use:
plt.hist(list_of_values)
and
plt.scatter(x_values, y_values)
Hint: You can access the dictionary `.keys()` and `.values()` and cast them as a list.
If you know the Matplotlib API, feel free to get fancy :).
```
# Possible Answers:
fig = plt.figure(0)
# Get a list of degree centrality scores for all of the nodes.
degree_centralities = list(nx.degree_centrality(G).values())
# Plot the histogram of degree centralities.
plt.hist(degree_centralities)
# Set the plot title.
plt.title('Degree Centralities')
fig = plt.figure(1)
neighbors = [len(G.neighbors(node)) for node in G.nodes()]
plt.hist(neighbors)
# plt.yscale('log')
plt.title('Number of Neighbors')
fig = plt.figure(2)
plt.scatter(degree_centralities, neighbors, alpha=0.1)
plt.xlabel('Degree Centralities')
plt.ylabel('Number of Neighbors')
```
### Exercise
Before we move on to paths in a network, see if you can use the Circos plot to visualize the network.
```
from circos import CircosPlot
import numpy as np
nodes = sorted(G.nodes())
edges = G.edges()
edgeprops = dict(alpha=0.1)
nodecolor = plt.cm.viridis(np.arange(len(nodes)) / len(nodes)) # be sure to use viridis!
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
c = CircosPlot(nodes, edges, radius=10, ax=ax, fig=fig, edgeprops=edgeprops, nodecolor=nodecolor)
c.draw()
plt.savefig('images/sociopatterns.png', dpi=300)
```
What can you deduce about the structure of the network, based on this visualization?
Nodes are sorted by ID. Nodes are more connected to proximal rather than distal nodes. The data are based on people streaming through an enclosed space, so it makes sense that people are mostly connected to others proximal in order, but occasionally some oddballs stick around.
# Paths in a Network
Graph traversal is akin to walking along the graph, node by node, restricted by the edges that connect the nodes. Graph traversal is particularly useful for understanding the local structure (e.g. connectivity, retrieving the exact relationships) of certain portions of the graph and for finding paths that connect two nodes in the network.
Using the synthetic social network, we will figure out how to answer the following questions:
1. How long will it take for a message to spread through this group of friends? (making some assumptions, of course)
2. How do we find the shortest path to get from individual A to individual B?
## Shortest Path
Let's say we wanted to find the shortest path between two nodes. How would we approach this? One approach is what one would call a **breadth-first search** (http://en.wikipedia.org/wiki/Breadth-first_search). While not necessarily the fastest, it is the easiest to conceptualize.
The approach is essentially as such:
1. Begin with a queue of the starting node.
2. Add the neighbors of that node to the queue.
1. If destination node is present in the queue, end.
2. If destination node is not present, proceed.
3. For each node in the queue:
1. Remove node from the queue.
2. Add neighbors of the node to the queue. Check if destination node is present or not.
3. If destination node is present, end. <!--Credit: @cavaunpeu for finding bug in pseudocode.-->
4. If destination node is not present, continue.
### Exercise
Try implementing this algorithm in a function called `path_exists(node1, node2, G)`.
The function should take in two nodes, `node1` and `node2`, and the graph `G` that they belong to, and return a Boolean that indicates whether a path exists between those two nodes or not. For convenience, also print out whether a path exists or not between the two nodes.
```
def path_exists(node1, node2, G):
"""
This function checks whether a path exists between two nodes (node1, node2) in graph G.
Special thanks to @ghirlekar for suggesting that we keep track of the "visited nodes" to
prevent infinite loops from happening.
Reference: https://github.com/ericmjl/Network-Analysis-Made-Simple/issues/3
"""
visited_nodes = set()
queue = [node1]
for node in queue:
neighbors = G.neighbors(node)
if node2 in neighbors:
print('Path exists between nodes {0} and {1}'.format(node1, node2))
return True
break
else:
queue.remove(node)
visited_nodes.add(node)
queue.extend([n for n in neighbors if n not in visited_nodes])
if len(queue) == 0:
print('Path does not exist between nodes {0} and {1}'.format(node1, node2))
return False
# Test your answer below
def test_path_exists():
assert path_exists(18, 5, G)
assert path_exists(22, 318, G)
test_path_exists()
```
If you write an algorithm that runs breadth-first, the recursion pattern is likely to follow what we have done above. If you do a depth-first search (i.e. DFS), the recursion pattern is likely to look a bit different. Take it as a challenge exercise to figure out how a DFS looks like.
Meanwhile... thankfully, NetworkX has a function for us to use, titled `has_path`, so we don't have to implement this on our own. :-)
http://networkx.readthedocs.io/en/networkx-1.11/reference/generated/networkx.algorithms.shortest_paths.generic.has_path.html
```
nx.has_path(G, 400, 1)
```
NetworkX also has other shortest path algorithms implemented.
http://networkx.readthedocs.io/en/networkx-1.11/reference/algorithms.shortest_paths.html
We can build upon these to build our own graph query functions. Let's see if we can trace the shortest path from one node to another.
`nx.shortest_path(G, source, target)` gives us a list of nodes that exist within one of the shortest paths between the two nodes. (Not all paths are guaranteed to be found.)
```
nx.shortest_path(G, 4, 400)
```
Incidentally, the node list is in order as well.
### Exercise
Write a function that extracts the edges in the shortest path between two nodes and puts them into a new graph, and draws it to the screen. It should also return an error if there is no path between the two nodes.
Hint: You may want to use `G.subgraph(iterable_of_nodes)` to extract just the nodes and edges of interest from the graph `G`. You might want to use the following lines of code somewhere:
newG = G.subgraph(nodes_of_interest)
nx.draw(newG)
newG will be comprised of the nodes of interest and the edges that connect them.
```
# Possible Answer:
def extract_path_edges(G, source, target):
# Check to make sure that a path does exists between source and target.
if nx.has_path(G, source, target):
nodes = nx.shortest_path(G, source, target)
newG = G.subgraph(nodes)
return newG
else:
raise Exception('Path does not exist between nodes {0} and {1}.'.format(source, target))
newG = extract_path_edges(G, 4, 400)
nx.draw(newG, with_labels=True)
```
### Challenge Exercise (at home)
These exercises below are designed to let you become more familiar with manipulating and visualizing subsets of a graph's nodes.
Write a function that extracts only node, its neighbors, and the edges between that node and its neighbors as a new graph. Then, draw the new graph to screen.
```
# Possible Answer
def extract_neighbor_edges(G, node):
neighbors = G.neighbors(node)
newG = nx.Graph()
for n1, n2 in G.edges():
if (n1 == node and n2 in neighbors) or (n1 in neighbors and n2 == node):
newG.add_edge(n1, n2)
return newG
fig = plt.figure(0)
newG = extract_neighbor_edges(G, 19)
nx.draw(newG, with_labels=True)
def extract_neighbor_edges2(G, node):
neighbors = G.neighbors(node)
newG = nx.Graph()
for neighbor in neighbors:
if (node, neighbor) in G.edges() or (neighbor, node) in G.edges():
newG.add_edge(node, neighbor)
return newG
fig = plt.figure(1)
newG = extract_neighbor_edges2(G, 19)
nx.draw(newG, with_labels=True)
```
### Challenge Exercises (at home)
Let's try some other problems that build on the NetworkX API. Refer to the following for the relevant functions:
http://networkx.readthedocs.io/en/networkx-1.11/reference/algorithms.shortest_paths.html
1. If we want a message to go from one person to another person, and we assume that the message takes 1 day for the initial step and 1 additional day per step in the transmission chain (i.e. the first step takes 1 day, the second step takes 2 days etc.), how long will the message take to spread from any two given individuals? Write a function to compute this.
2. What is the distribution of message spread times from person to person? What about chain lengths?
```
# Possible answer to Question 1:
# All we need here is the length of the path.
def compute_transmission_time(G, source, target):
"""
Fill in code below.
"""
length = nx.shortest_path_length(G, source, target)
time = sum([i for i in range(1, length+1)])
return time
compute_transmission_time(G, 14, 4)
# Possible answer to Question 2:
# We need to know the length of every single shortest path between every pair of nodes.
# If we don't put a source and target into the nx.shortest_path_length(G) function call, then
# we get a dictionary of dictionaries, where all source-->target-->lengths are shown.
lengths = []
times = []
for source, sink_length in nx.shortest_path_length(G).items():
for sink, length in sink_length.items():
times.append(sum(range(1, length+1)))
lengths.append(length)
plt.figure(0)
plt.bar(Counter(lengths).keys(), Counter(lengths).values())
plt.figure(1)
plt.bar(Counter(times).keys(), Counter(times).values())
```
# Hubs Revisited
It looks like individual 19 is an important person of some sorts - if a message has to be passed through the network in the shortest time possible, then usually it'll go through person 19. Such a person has a high **betweenness centrality**. This is implemented as one of NetworkX's centrality algorithms. Check out the Wikipedia page for a further description.
http://en.wikipedia.org/wiki/Betweenness_centrality
```
btws = nx.betweenness_centrality(G, normalized=False)
plt.bar(btws.keys(), btws.values())
```
### Exercise
Plot betweeness centrality against degree centrality for the network data.
```
# Possible answer:
deg_centrality = nx.degree_centrality(G)
btw_centrality = nx.betweenness_centrality(G)
deg_cent_sorted = [i[1] for i in sorted(zip(deg_centrality.keys(), deg_centrality.values()))]
btw_cent_sorted = [i[1] for i in sorted(zip(btw_centrality.keys(), btw_centrality.values()))]
plt.scatter(deg_cent_sorted, btw_cent_sorted)
plt.xlabel('degree')
plt.ylabel('betweeness')
plt.title('centrality scatterplot')
```
**Think about it...**
From the scatter plot, we can see that the dots don't all fall on the same line. Degree centrality and betweenness centrality don't necessarily correlate. Can you think of scenarios where this is true?
What would be the degree centrality and betweenness centrality of the middle connecting node in the **barbell graph** below?
```
nx.draw(nx.barbell_graph(5, 1))
```
| github_jupyter |
```
print("Bismillahir Rahmanir Rahim")
```
## Imports and Paths
```
from IPython.display import display, HTML
from lime.lime_tabular import LimeTabularExplainer
from pprint import pprint
from scipy.spatial.distance import pdist, squareform
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics.pairwise import cosine_similarity
from scipy import spatial
%matplotlib inline
import glob
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import pathlib
import sklearn
import seaborn as sns
import statsmodels
import eli5
import lime
import shap
shap.initjs()
```
# 1. Predictive Models
## Load and preprocess data
Train/test split = 0.80/0.20
```
# Set the seed experimentations and interpretations.
np.random.seed(111)
project_path = pathlib.Path.cwd().parent.parent.parent
modelling_result_path = str(project_path) + '/datasets/modelling-results/'
plots_path = str(project_path) + '/plots/'
# print(project_path)
from sklearn.datasets import load_iris
iris = load_iris()
train, test, labels_train, labels_test = train_test_split(iris.data, iris.target, train_size=0.80)
x_testset = test
feature_names = iris.feature_names
target_names = iris.target_names
total_targets = len(target_names) # total number of unique target names
unique_targets = np.unique(iris.target) # LIME only takes integer
targets_labels = dict(zip(unique_targets, target_names))
print("Feature names", feature_names)
print("Target names", target_names)
print("Number of uniques label or target names", unique_targets)
print("Target labels as unique target (key) with target names (value)", targets_labels)
print("Training record", train[0:1])
print("Label for training record", labels_train[0:1])
```
## Train and evaluate models.
Train Random Forest model so these can be used as black box models when evaluating explanations methods.
### Fit Random Forest
```
rf = RandomForestClassifier(n_estimators=500, class_weight='balanced_subsample')
rf.fit(train, labels_train)
```
### Predict using random forest model
```
labels_pred_rf = rf.predict(test)
score_rf = metrics.accuracy_score(labels_test, labels_pred_rf)
print("\nRandom Forest accuracy score.", score_rf)
predict_proba_rf = rf.predict_proba(test[:5])
print("\nRandom Forest predict probabilities\n\n", predict_proba_rf)
predict_rf = rf.predict(test[:5])
print("\nRandom Forest predictions", predict_rf)
```
### Classification report of random forest
```
report_rf = classification_report(labels_test, labels_pred_rf, target_names=target_names)
print("Random Forestclassification report.")
print(report_rf)
```
### Classification report of random forest displayed as dataframe
```
report_rf = classification_report(labels_test, labels_pred_rf, target_names=target_names, output_dict=True)
report_rf = pd.DataFrame(report_rf).transpose().round(2)
report_rf = report_rf.iloc[:total_targets,:-1]
display(report_rf)
```
### Average F1-score of random forest model
```
avg_f1_rf = report_rf['f1-score'].mean()
print("Random Forest average f1-score", avg_f1_rf)
```
### Confusion matrix of random forest model
```
matrix_rf = confusion_matrix(labels_test, labels_pred_rf)
matrix_rf = pd.DataFrame(matrix_rf, columns=target_names).transpose()
matrix_rf.columns = target_names
display(matrix_rf)
```
### Combine confusion matrix and classification report of random forest model
```
matrix_report_rf = pd.concat([matrix_rf, report_rf], axis=1)
display(matrix_report_rf)
```
### Saving confusion matrix and classification report of random forest model into csv
It is because CSV can be used to draw table in LaTex easily.
```
filename = 'iris_matrix_report_rf.csv'
matrix_report_rf.to_csv(modelling_result_path + filename, index=True)
```
### Extract target names for prediction of random forest model
```
labels_names_pred_rf = []
for label in labels_pred_rf:
labels_names_pred_rf.append(targets_labels[label])
print("Random Forest predicted targets and their names.\n")
print(labels_pred_rf)
print(labels_names_pred_rf)
```
# 2. Explanation Models
## a. Interpreting models using LIME
### LIME util functions
```
def lime_explanations(index, x_testset, explainer, model, unique_targets, class_predictions):
instance = x_testset[index]
exp = explainer.explain_instance(instance,
model.predict_proba,
labels=unique_targets,
top_labels=None,
num_features=len(x_testset[index]),
num_samples=6000)
# Array class_predictions contains predicted class labels
exp_vector_predicted_class = exp.as_map()[class_predictions[index]]
return (exp_vector_predicted_class, exp.score), exp
def explanation_to_dataframe(index, x_testset, explainer, model, unique_targets, class_predictions, dataframe):
feature_imp_tuple, exp = lime_explanations(index,
x_testset,
explainer,
model,
unique_targets,
class_predictions)
exp_val = tuple(sorted(feature_imp_tuple[0]))
data = dict((x, y) for x, y in exp_val)
list_val = list(data.values())
list_val.append(feature_imp_tuple[1])
dataframe.loc[index] = list_val
return dataframe, exp
""" Define LIME Explainer
"""
explainer_lime = LimeTabularExplainer(train,
mode = 'classification',
training_labels = labels_train,
feature_names=feature_names,
verbose=False,
class_names=target_names,
feature_selection='auto',
discretize_continuous=True)
from tqdm import tqdm
col_names = list(feature_names)
col_names.append('lime_score')
```
### Interpret random forest model for all test instances using LIME
```
explanations_lime_rf = pd.DataFrame(columns=col_names)
for index in tqdm(range(0,len(test))):
explanations_lime_rf, exp = explanation_to_dataframe(index,
test,
explainer_lime,
rf, # random forest model
unique_targets,
labels_pred_rf, # random forest predictions
explanations_lime_rf)
print("LIME explanations on random forest.")
display(explanations_lime_rf.head())
display(explanations_lime_rf.iloc[:,:-1].head(1))
```
## b. Interpreting models using SHAP
### SHAP util functions
```
def shapvalue_to_dataframe(test, labels_pred, shap_values, feature_names):
exp_shap_array = []
for test_index in range(0, len(test)):
label_pred = labels_pred[test_index]
exp_shap_array.append(shap_values[label_pred][test_index])
df_exp_shap = pd.DataFrame(exp_shap_array)
df_exp_shap.columns = feature_names
return df_exp_shap
```
### Interpret random forest model for all test instances using SHAP
```
shap_values_rf = shap.TreeExplainer(rf).shap_values(test)
shap.summary_plot(shap_values_rf, test, feature_names=feature_names)
```
### Extracting SHAP values as explanations
**_shap_values_** returns 3D array in a form of (num_classes, num_test_instance, num_features) e.g. for iris dataset the 3D array shape would be (3, 30, 4)
### Extract explanations (SHAP values) of random forest predictions.
```
explanations_shap_rf = shapvalue_to_dataframe(test,
labels_pred_rf,
shap_values_rf,
feature_names)
display(explanations_shap_rf.head())
display(explanations_shap_rf.iloc[:,:].head(1))
```
# 3. Local lipschitz estimation as a stability measure
### Local lipschitz estimation util functions
```
def norm(Xs, x0, norm=2):
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html
norm = np.linalg.norm(x0 - Xs, norm) # /np.linalg.norm(b[0] - b, 2)
return norm
def neighborhood_with_euclidean(x_points, anchor_index, radius):
# http://mathonline.wikidot.com/open-and-closed-balls-in-euclidean-space
x_i = x_points[anchor_index]
x_js = x_points.tolist()
dist = (x_i - x_js)**2
dist = np.sum(dist, axis=1)
dist = np.sqrt(dist)
neighborhood_indices = []
for index in range(0, len(dist)):
if dist[index] < radius:
neighborhood_indices.append(index)
return neighborhood_indices
def neighborhood_with_KDTree(x_points, anchor_index, radius):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html
tree = spatial.KDTree(x_points)
neighborhood_indices = tree.query_ball_point(x_points[anchor_index],
radius * np.sqrt(len(x_points[anchor_index])))
return neighborhood_indices
```
### Local Lipschitz of explanation methods
```
def lipschitz_formula(nearby_points, nearby_points_exp, anchorX, anchorX_exp):
anchorX_norm2 = np.apply_along_axis(norm, 1, nearby_points, anchorX)
anchorX_exp_norm2 = np.apply_along_axis(norm, 1, nearby_points_exp, anchorX_exp)
anchorX_avg_norm2 = anchorX_exp_norm2/anchorX_norm2
anchorX_LC_argmax = np.argmax(anchorX_avg_norm2)
return anchorX_avg_norm2, anchorX_LC_argmax
def lipschitz_estimate(anchorX, x_points, explanations_x_points, anchor_index, neighborhood_indices):
# extract anchor point explanations
anchorX_exp = explanations_x_points[anchor_index]
# extract anchor point neighborhood's explanations
nearby_points = x_points[neighborhood_indices]
nearby_points_exp = explanations_x_points[neighborhood_indices]
# find local lipschitz estimate (lc)
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_formula(nearby_points,
nearby_points_exp,
anchorX,
anchorX_exp)
return anchorX_avg_norm2, anchorX_LC_argmax
def find_lipschitz_estimates(x_points, x_points_lime_exp, x_points_shap_exp, radii):
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.apply_along_axis.html
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.argmax.html
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html
instances = []
anchor_x_index = []
lc_coefficient_lime = []
x_deviation_index_lime = []
x_deviation_index_shap = []
lc_coefficient_shap = []
radiuses = []
neighborhood_size = []
for radius in radii:
for anchor_index in range(0, len(x_points)):
# define neighorbood of around anchor point using radius and KDTree
# neighborhood_indices = neighborhood_with_KDTree(x_points, anchor_index, radius)
# define neighorbood of around anchor point using radius and Euclidean Distance
neighborhood_indices = neighborhood_with_euclidean(x_points, anchor_index, radius)
# remove anchor index to remove anchor point and append neighborhood_size
neighborhood_indices.remove(anchor_index)
neighborhood_size.append(len(neighborhood_indices))
# append radius (it is useful column when apply filtering based on radius)
radiuses.append(radius)
# extract anchor point and its original index
anchorX = x_points[anchor_index]
instances.append(anchorX)
anchor_x_index.append(anchor_index)
if len(neighborhood_indices) != 0:
# find local lipschitz estimate (lc) LIME
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_estimate(anchorX,
x_points,
x_points_lime_exp,
anchor_index,
neighborhood_indices)
lc_coefficient_lime.append(anchorX_avg_norm2[anchorX_LC_argmax])
# find deviation point from anchor point LIME explanations
deviation_point_index = neighborhood_indices[anchorX_LC_argmax]
x_deviation_index_lime.append(deviation_point_index)
# find local lipschitz estimate (lc) SHAP
anchorX_avg_norm2, anchorX_LC_argmax = lipschitz_estimate(anchorX,
x_points,
x_points_shap_exp,
anchor_index,
neighborhood_indices)
lc_coefficient_shap.append(anchorX_avg_norm2[anchorX_LC_argmax])
# find deviation point from anchor point LIME explanations
deviation_point_index = neighborhood_indices[anchorX_LC_argmax]
x_deviation_index_shap.append(deviation_point_index)
else:
lc_coefficient_lime.append(-1)
x_deviation_index_lime.append('NaN')
lc_coefficient_shap.append(-1)
x_deviation_index_shap.append('NaN')
# columns_lipschitz will be reused so to avoid confusion naming convention should remain similar
columns_lipschitz = ['instance', 'anchor_x_index', 'lc_coefficient_lime', 'x_deviation_index_lime',
'lc_coefficient_shap', 'x_deviation_index_shap', 'radiuses', 'neighborhood_size']
zippedList = list(zip(instances, anchor_x_index, lc_coefficient_lime, x_deviation_index_lime,
lc_coefficient_shap, x_deviation_index_shap, radiuses, neighborhood_size))
return zippedList, columns_lipschitz
```
### Set instances, explanations and epsilon choices
```
X = pd.DataFrame(test)
display(X.head().values)
x_points = X.copy().values
radii = [1.00]
# radii = [0.75, 1.00, 1.25]
```
### Lipschitz estimations
Predictive model: random forest
Explanation methods: LIME, SHAP
```
print("LIME generated explanations")
X_lime_exp = explanations_lime_rf.iloc[:,:-1].copy()
display(X_lime_exp.head())
print("SHAP generated explanations")
X_shap_exp = explanations_shap_rf.iloc[:,:].copy()
display(X_shap_exp.head())
x_points_lime_exp = X_lime_exp.copy().values
x_points_shap_exp = X_shap_exp.copy().values
zippedList, columns_lipschitz = find_lipschitz_estimates(x_points,
x_points_lime_exp,
x_points_shap_exp,
radii)
rf_lipschitz = pd.DataFrame(zippedList, columns=columns_lipschitz)
display(rf_lipschitz)
```
# 4. Results
## a. Selecting anchor point or point of interest to demonstrate results
Here the selection is made based on max 'lc_coefficient_lime' just to take an example point.
### Anchor point
```
highest_deviation_example = rf_lipschitz.loc[rf_lipschitz['lc_coefficient_lime'].idxmax()]
display(highest_deviation_example)
print("Anchor Point")
anchor_point_index = highest_deviation_example["anchor_x_index"]
anchor_point = highest_deviation_example['instance']
print(anchor_point)
```
### Deviation point with respect to LIME explanation
```
print("\nDeviation Point with respect to LIME explanation")
deviation_point_lime_index = highest_deviation_example["x_deviation_index_lime"]
deviation_point_lime = rf_lipschitz['instance'][deviation_point_lime_index]
print(deviation_point_lime)
```
### Deviation point with respect to SHAP explanation
```
print("\nDeviation Point with respect to SHAP explanation")
deviation_point_shap_index = highest_deviation_example["x_deviation_index_shap"]
deviation_point_shap = rf_lipschitz['instance'][deviation_point_shap_index]
print(deviation_point_shap)
```
### Anchor point and deviation point LIME explanation
```
print("Anchor Point LIME explanation")
anchor_point_lime_exp = x_points_lime_exp[anchor_point_index]
anchor_point_lime_exp = [ round(elem, 3) for elem in anchor_point_lime_exp ]
print(anchor_point_lime_exp)
print("\nDeviation Point LIME explanation")
deviation_point_lime_exp = x_points_lime_exp[deviation_point_lime_index]
deviation_point_lime_exp = [ round(elem, 3) for elem in deviation_point_lime_exp ]
print(deviation_point_lime_exp)
```
### Anchor point and deviation point SHAP explanation
```
print("Anchor Point SHAP explanation")
anchor_point_shap_exp = x_points_shap_exp[anchor_point_index]
anchor_point_shap_exp = [ round(elem, 3) for elem in anchor_point_shap_exp ]
print(anchor_point_shap_exp)
print("\nDeviation Point SHAP explanation")
deviation_point_shap_exp = x_points_shap_exp[deviation_point_shap_index]
deviation_point_shap_exp = [ round(elem, 3) for elem in deviation_point_shap_exp ]
print(deviation_point_shap_exp)
```
## b. Preparing results for box plots
Predictive model: random forest
Epsilon: 1.00
Explanation methods: LIME, SHAP
Evaluation: Lipschitz estimations as stability
```
epsilon1 = rf_lipschitz.loc[rf_lipschitz['neighborhood_size'] > 0]
epsilon1 = epsilon1[epsilon1['radiuses'] == 1.00]
display(epsilon1.head())
epsilon1_lc_lime_aggre = np.mean(epsilon1['lc_coefficient_lime'])
epsilon1_lc_shap_aggre = np.mean(epsilon1['lc_coefficient_shap'])
print("\nLIME, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_lime_aggre)
print("SHAP, epsilon 1.00, Aggregated L(x) = ", epsilon1_lc_shap_aggre)
lc_lime_df = epsilon1.loc[:, ['lc_coefficient_lime']]
lc_lime_df.rename(columns={'lc_coefficient_lime': 'Lipschitz Estimates'}, inplace=True)
lc_lime_df['method'] = 'LIME'
lc_lime_df['Dataset'] = 'Iris'
lc_shap_df = epsilon1.loc[:, ['lc_coefficient_shap']]
lc_shap_df.rename(columns={'lc_coefficient_shap': 'Lipschitz Estimates'}, inplace=True)
lc_shap_df['method'] = 'SHAP'
lc_shap_df['Dataset'] = 'Iris'
```
# 5. Visualize Results
### Highest deviation example and corresponding LIME and SHAP examples
```
print(feature_names)
print('\nAnchor Point in worst deviation case')
print(anchor_point)
print(anchor_point_lime_exp)
print(anchor_point_shap_exp)
print('\nDeviation Point in worst deviation case')
print(deviation_point)
print(deviation_point_lime_exp)
print(deviation_point_shap_exp)
```
## Final plot to explain deviation as unstability in explanations
```
# Some example data to display
x = np.linspace(0, 2 * np.pi, 400)
y = np.sin(x ** 2)
fig, axs = plt.subplots(2, 4)
fig.set_size_inches(28.5, 14.5)
# position axs[0, 0]
axs[0, 0].set_title('Feature Value')
colors = [["#3DE8F7","w"],[ "#3DE8F7","w"], [ "#3DE8F7","w"], [ "#3DE8F7","w"]]
anchor_point_dict = dict(zip(feature_names, anchor_point))
anchor_point_df = pd.DataFrame.from_dict(anchor_point_dict, orient='index').reset_index()
table = axs[0, 0].table(
cellText = anchor_point_df.values,
loc = 'center',
cellColours = colors,
colWidths=[0.3] * 2)
table.set_fontsize(12)
table.scale(1.5,6)
cellDict = table.get_celld()
cellDict[(0,1)].set_width(0.15)
cellDict[(1,1)].set_width(0.15)
cellDict[(2,1)].set_width(0.15)
cellDict[(3,1)].set_width(0.15)
axs[0, 0].axis('off')
axs[0, 0].axis('tight')
# position axs[0, 1]
axs[0, 1].set_title('Explanation')
x = feature_names[::-1]
y = np.array(anchor_point_shap_exp[::-1]) # anchor_point_shap_exp
# print(x, y)
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
above_threshold = np.maximum(y - threshold, 0)
below_threshold = np.minimum(y, threshold)
axs[0, 1].barh(x, below_threshold, width, color="#FF4D4D") # below threshold value
axs[0, 1].barh(x, above_threshold, width, color="#3DE8F7", left=below_threshold) # above threshold value
axs[0, 1].set_yticks(ind+width/2)
# position axs[0, 2]
axs[0, 2].set_title('Feature Value')
colors = [["#3DE8F7","w"],[ "#3DE8F7","w"], [ "#3DE8F7","w"], [ "#3DE8F7","w"]]
anchor_point_dict = dict(zip(feature_names, anchor_point))
anchor_point_df = pd.DataFrame.from_dict(anchor_point_dict, orient='index').reset_index()
table = axs[0, 2].table(
cellText = anchor_point_df.values,
loc = 'center',
cellColours = colors,
colWidths=[0.3] * 2)
table.set_fontsize(12)
table.scale(1.5,6)
cellDict = table.get_celld()
cellDict[(0,1)].set_width(0.15)
cellDict[(1,1)].set_width(0.15)
cellDict[(2,1)].set_width(0.15)
cellDict[(3,1)].set_width(0.15)
axs[0, 2].axis('off')
axs[0, 2].axis('tight')
# position axs[0, 3]
axs[0, 3].set_title('Explanation')
x = feature_names[::-1]
y = np.array(anchor_point_lime_exp[::-1]) # # anchor_point_lime_exp
# print(x, y)
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
above_threshold = np.maximum(y - threshold, 0)
below_threshold = np.minimum(y, threshold)
# ax.barh(ind, y, width, color="#3DE8F7")
axs[0, 3].barh(x, below_threshold, width, color="#FF4D4D") # below threshold value
axs[0, 3].barh(x, above_threshold, width, color="#3DE8F7", left=below_threshold) # above threshold value
axs[0, 3].set_yticks(ind+width/2)
# position axs[1, 0]
axs[1, 0].set_title('Feature Value')
colors = [["#FF4D4D","w"],[ "#3DE8F7","w"], [ "#3DE8F7","w"], [ "#3DE8F7","w"]]
deviation_point_dict = dict(zip(feature_names, deviation_point_shap)) # deviation_point_shap
deviation_point_df = pd.DataFrame.from_dict(deviation_point_dict, orient='index').reset_index()
table = axs[1, 0].table(
cellText = deviation_point_df.values,
loc = 'center',
cellColours = colors,
colWidths=[0.3] * 2)
table.set_fontsize(12)
table.scale(1.5,6)
cellDict = table.get_celld()
cellDict[(0,1)].set_width(0.15)
cellDict[(1,1)].set_width(0.15)
cellDict[(2,1)].set_width(0.15)
cellDict[(3,1)].set_width(0.15)
axs[1, 0].axis('off')
axs[1, 0].axis('tight')
# position axs[1, 1]
axs[1, 1].set_title('Explanation')
x = feature_names[::-1]
y = np.array(deviation_point_shap_exp[::-1]) # deviation_point_shap_exp
# print(x, y)
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
above_threshold = np.maximum(y - threshold, 0)
below_threshold = np.minimum(y, threshold)
# ax.barh(ind, y, width, color="#3DE8F7")
axs[1, 1].barh(x, below_threshold, width, color="#FF4D4D") # below threshold value
axs[1, 1].barh(x, above_threshold, width, color="#3DE8F7", left=below_threshold) # above threshold value
axs[1, 1].set_yticks(ind+width/2)
# position axs[1, 2]
axs[1, 2].set_title('Feature Value')
colors = [["#3DE8F7","w"],[ "#3DE8F7","w"], [ "#FF4D4D","w"], [ "#FF4D4D","w"]]
deviation_point_dict = dict(zip(feature_names, deviation_point_lime)) # deviation_point_lime
deviation_point_df = pd.DataFrame.from_dict(deviation_point_dict, orient='index').reset_index()
table = axs[1, 2].table(
cellText = deviation_point_df.values,
loc = 'center',
cellColours = colors,
colWidths=[0.3] * 2)
table.set_fontsize(12)
table.scale(1.5,6)
cellDict = table.get_celld()
cellDict[(0,1)].set_width(0.15)
cellDict[(1,1)].set_width(0.15)
cellDict[(2,1)].set_width(0.15)
cellDict[(3,1)].set_width(0.15)
axs[1, 2].axis('off')
axs[1, 2].axis('tight')
# position axs[1, 3]
axs[1, 3].set_title('Explanation')
x = feature_names[::-1]
y = np.array(deviation_point_lime_exp[::-1]) # deviation_point_lime_exp
# print(x,y)
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
above_threshold = np.maximum(y - threshold, 0)
below_threshold = np.minimum(y, threshold)
# ax.barh(ind, y, width, color="#3DE8F7")
axs[1, 3].barh(x, below_threshold, width, color="#FF4D4D") # below threshold value
axs[1, 3].barh(x, above_threshold, width, color="#3DE8F7", left=below_threshold) # above threshold value
axs[1, 3].set_yticks(ind+width/2)
# for ax in axs.flat:
# ax.set(xlabel='x-label', ylabel='y-label')
# # Hide x labels and tick labels for top plots and y ticks for right plots.
# for ax in axs.flat:
# ax.label_outer()
# fig.suptitle('(a) SHAP (L=0.2)', fontsize=16)
fig.text(0.3, 0.04, '(a) SHAP (L=0.20)', ha='center', fontsize=20, fontstyle='italic')
fig.text(0.7, 0.04, '(a) LIME (L=2.80)', ha='center', fontsize=20, fontstyle='italic')
fig.savefig(plots_path + 'experiments_figure1.png')
```
### 1. Visualize anchor point and corresponding LIME explanation
```
''' anchor point
'''
anchor_point_dict = dict(zip(feature_names, anchor_point))
# print(anchor_point_dict)
anchor_point_columns = ['Feature', 'Value']
colors = [["#3DE8F7","w"],[ "#3DE8F7","w"], [ "#3DE8F7","w"], [ "#3DE8F7","w"]]
anchor_point_df = pd.DataFrame.from_dict(anchor_point_dict, orient='index').reset_index()
fig, ax = plt.subplots()
table = ax.table(cellText = anchor_point_df.values,
# colLabels = anchor_point_df.columns,
loc = 'center',
cellColours = colors,
colWidths=[0.3] * 2)
table.set_fontsize(10)
table.scale(1,4)
cellDict = table.get_celld()
cellDict[(0,1)].set_width(0.15)
cellDict[(1,1)].set_width(0.15)
cellDict[(2,1)].set_width(0.15)
cellDict[(3,1)].set_width(0.15)
ax.axis('off')
ax.axis('tight')
fig.patch.set_visible(False)
fig.tight_layout()
plt.title('Feature Value')
''' corresponding LIME explanation
'''
x = feature_names[::-1]
print(x)
y = np.array(anchor_point_lime_exp[::-1]) # anchor_x_maximise_lc_exp_lime
print(y)
fig, ax = plt.subplots()
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
# split it up
above_threshold = np.maximum(y - threshold, 0)
below_threshold = np.minimum(y, threshold)
# ax.barh(ind, y, width, color="#3DE8F7")
ax.barh(x, below_threshold, width, color="#FF4D4D") # below threshold value
ax.barh(x, above_threshold, width, color="#3DE8F7", left=below_threshold) # above threshold value
ax.set_yticks(ind+width/2)
```
### 2. Visualize anchor point and corresponding SHAP explanation
```
''' anchor point
'''
anchor_point_dict = dict(zip(feature_names, anchor_point))
colors = [["#3DE8F7","w"],[ "#3DE8F7","w"], [ "#3DE8F7","w"], [ "#3DE8F7","w"]]
anchor_point_df = pd.DataFrame.from_dict(anchor_point_dict, orient='index').reset_index()
fig, ax = plt.subplots()
table = ax.table(cellText = anchor_point_df.values,
# colLabels = anchor_point_df.columns,
loc = 'center',
cellColours = colors,
colWidths=[0.3] * 2)
table.set_fontsize(10)
table.scale(1,4)
cellDict = table.get_celld()
cellDict[(0,1)].set_width(0.15)
cellDict[(1,1)].set_width(0.15)
cellDict[(2,1)].set_width(0.15)
cellDict[(3,1)].set_width(0.15)
ax.axis('off')
ax.axis('tight')
fig.patch.set_visible(False)
fig.tight_layout()
plt.title('Feature Value')
''' corresponding LIME explanation
'''
x = feature_names[::-1]
print(x)
y = np.array(anchor_point_shap_exp[::-1]) # anchor_x_maximise_lc_exp_lime
print(y)
fig, ax = plt.subplots()
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
# split it up
above_threshold = np.maximum(y - threshold, 0)
below_threshold = np.minimum(y, threshold)
# ax.barh(ind, y, width, color="#3DE8F7")
ax.barh(x, below_threshold, width, color="#FF4D4D") # below threshold value
ax.barh(x, above_threshold, width, color="#3DE8F7", left=below_threshold) # above threshold value
ax.set_yticks(ind+width/2)
plt.title('Explanation')
```
### 3. Visualize deviation point and corresponding LIME explanation
```
''' anchor point
'''
deviation_point_dict = dict(zip(feature_names, deviation_point))
# print(anchor_point_dict)
deviation_point_columns = ['Feature', 'Value']
colors = [["#3DE8F7","w"],[ "#3DE8F7","w"], [ "#FF4D4D","w"], [ "#FF4D4D","w"]]
deviation_point_df = pd.DataFrame.from_dict(deviation_point_dict, orient='index').reset_index()
# deviation_point_df.rename(columns={'index': 'Feature', 0: 'Value' }, inplace=True)
fig, ax = plt.subplots()
table = ax.table(cellText = deviation_point_df.values,
# colLabels = deviation_point_df.columns,
loc = 'center',
cellColours = colors,
colWidths=[0.3] * 2)
table.set_fontsize(10)
table.scale(1,4)
cellDict = table.get_celld()
cellDict[(0,1)].set_width(0.15)
cellDict[(1,1)].set_width(0.15)
cellDict[(2,1)].set_width(0.15)
cellDict[(3,1)].set_width(0.15)
ax.axis('off')
ax.axis('tight')
fig.patch.set_visible(False)
fig.tight_layout()
plt.title('Feature Value')
''' corresponding LIME explanation
'''
x = feature_names[::-1]
print(x)
y = np.array(deviation_point_lime_exp[::-1]) # anchor_x_maximise_lc_exp_lime
print(y)
fig, ax = plt.subplots()
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
# split it up
above_threshold = np.maximum(y - threshold, 0)
below_threshold = np.minimum(y, threshold)
# ax.barh(ind, y, width, color="#3DE8F7")
ax.barh(x, below_threshold, width, color="#FF4D4D") # below threshold value
ax.barh(x, above_threshold, width, color="#3DE8F7", left=below_threshold) # above threshold value
ax.set_yticks(ind+width/2)
plt.title('Explanation')
# for key, cell in cellDict.items():
# print (str(key[0])+", "+ str(key[1])+"\t"+str(cell.get_text()))
```
### 4. Visualize deviation point and corresponding SHAP explanation
```
''' anchor point
'''
deviation_point_dict = dict(zip(feature_names, deviation_point))
# print(anchor_point_dict)
deviation_point_columns = ['Feature', 'Value']
colors = [["#3DE8F7","w"],[ "#3DE8F7","w"], [ "#3DE8F7","w"], [ "#3DE8F7","w"]]
deviation_point_df = pd.DataFrame.from_dict(deviation_point_dict, orient='index').reset_index()
# deviation_point_df.rename(columns={'index': 'Feature', 0: 'Value' }, inplace=True)
fig, ax = plt.subplots()
table = ax.table(cellText = deviation_point_df.values,
# colLabels = deviation_point_df.columns,
loc = 'center',
cellColours = colors,
colWidths=[0.3] * 2)
table.set_fontsize(10)
table.scale(1,4)
cellDict = table.get_celld()
cellDict[(0,1)].set_width(0.15)
cellDict[(1,1)].set_width(0.15)
cellDict[(2,1)].set_width(0.15)
cellDict[(3,1)].set_width(0.15)
ax.axis('off')
ax.axis('tight')
fig.patch.set_visible(False)
fig.tight_layout()
plt.title('Feature Value')
''' corresponding LIME explanation
'''
x = feature_names[::-1]
print(x)
y = np.array(deviation_point_shap_exp[::-1]) # anchor_x_maximise_lc_exp_lime
print(y)
fig, ax = plt.subplots()
width = 0.75 # the width of the bars
ind = np.arange(len(y)) # the x locations for the groups
# split it up
above_threshold = np.maximum(y - threshold, 0)
below_threshold = np.minimum(y, threshold)
# ax.barh(ind, y, width, color="#3DE8F7")
ax.barh(x, below_threshold, width, color="#FF4D4D") # below threshold value
ax.barh(x, above_threshold, width, color="#3DE8F7", left=below_threshold) # above threshold value
ax.set_yticks(ind+width/2)
plt.title('Explanation')
```
### Visualize lipschitz estimations for all test instances
```
df = lc_lime_df.append(lc_shap_df)
ax = sns.boxplot(x='method', y="Lipschitz Estimates", data=df)
ax = sns.boxplot(x="Dataset", y="Lipschitz Estimates",
hue="method",
data=df)
sns.despine(offset=10, trim=True)
```
### LIME visualizations by single points
```
explainer_lime = LimeTabularExplainer(train,
mode = 'classification',
training_labels = labels_train,
feature_names=feature_names,
verbose=False,
class_names=target_names,
feature_selection='auto',
discretize_continuous=True)
x_instance = test[anchor_index]
LR_exp_lime = explainer_lime.explain_instance(x_instance,
LR_iris.predict_proba,
labels=np.unique(iris.target),
top_labels=None,
num_features=len(x_instance),
num_samples=6000)
LR_exp_lime.show_in_notebook()
x_instance = test[similar_point_index]
LR_exp_lime = explainer_lime.explain_instance(x_instance,
LR_iris.predict_proba,
labels=np.unique(iris.target),
top_labels=None,
num_features=len(x_instance),
num_samples=6000)
LR_exp_lime.show_in_notebook()
i = np.random.randint(0, test.shape[0])
i = 0
LR_exp_lime_map = LR_exp_lime.as_map()
# pprint(LR_exp_lime_map)
print('Predicted class for i:', labels_pred_lr[i])
LR_exp_lime_list = LR_exp_lime.as_list(label=labels_pred_lr[i])
# pprint(LR_exp_lime_list)
```
## Conclusions
```
lr_lime_iris = [2.657, 3.393, 1.495]
rf_lime_iris = [3.010, 3.783, 1.767]
lr_shap_iris = [2.716, 3.512, 1.463]
rf_shap_iris = [1.969, 3.546, 2.136]
find_min_vector = np.array([lr_lime_iris, rf_lime_iris, lr_shap_iris, rf_shap_iris])
np.amin(find_min_vector, axis=0)
from sklearn.linear_model import Ridge
import numpy as np
n_samples, n_features = 10, 5
rng = np.random.RandomState(0)
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
clf = Ridge(alpha=1.0)
clf.fit(X, y)
```
### Debuging Space
```
""" Use euclidean distance to define neighborhood points
"""
display(X.head())
points = X.values
epsilon = 0.75 * np.sqrt(len(points[0]))
dist = (points[0] - points[1:])**2
dist = np.sum(dist, axis=1)
dist = np.sqrt(dist)
print(dist)
neighborhood_indices = []
for index in range(0, len(dist)):
if dist[index] < epsilon:
neighborhood_indices.append(index)
print(neighborhood_indices)
```
| github_jupyter |
# 1. Multi-layer Perceptron
### Train and evaluate a simple MLP on the Reuters newswire topic classification task.
This is a collection of documents that appeared on Reuters newswire in 1987. The documents were assembled and indexed with categories.
Dataset of 11,228 newswires from Reuters, labeled over 46 topics. As with the IMDB dataset, each wire is encoded as a sequence of word indexes (same conventions).
Each wire is encoded as a sequence of word indexes (integers). For convenience, words are indexed by overall frequency in the dataset, so that for instance the integer "3" encodes the 3rd most frequent word in the data. This allows for quick filtering operations such as: "only consider the top 10,000 most common words, but eliminate the top 20 most common words".
As a convention, "0" does not stand for a specific word, but instead is used to encode any unknown word.
Source: https://archive.ics.uci.edu/ml/datasets/Reuters-21578+Text+Categorization+Collection
```
# Reuters data
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
#Import keras
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.utils import np_utils
from keras.preprocessing.text import Tokenizer
max_words = 1000
batch_size = 32
nb_epoch = 5
import os
path_to_data = os.path.abspath(os.path.join('..', 'data', 'reuters.pkl'))
print('Loading data...')
(X_train, y_train), (X_test, y_test) = reuters.load_data(path_to_data, nb_words=max_words, test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
nb_classes = np.max(y_train)+1
print(nb_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(nb_words=max_words)
X_train = tokenizer.sequences_to_matrix(X_train, mode='binary')
X_test = tokenizer.sequences_to_matrix(X_test, mode='binary')
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Convert class vector to binary class matrix (for use with categorical_crossentropy)')
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
print('Building model...')
model = Sequential()
model.add(Dense(512, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
nb_epoch=nb_epoch, batch_size=batch_size,
verbose=1, validation_split=0.1)
score = model.evaluate(X_test, Y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
```
### Exercise
1. Add more dense layers: Try with one more dense layer. Then with two dense layers. Evaluate accuracy
2. Add dropout using the following code and evaluate accuracy:
`model.add(Dropout(0.5))`
| github_jupyter |
## Approach 1: Dynamic Programming
Throughout this document, the following packages are required:
```
import numpy as np
import scipy, math
from scipy.stats import poisson
from scipy.optimize import minimize
```
### Heterogeneous Exponential Case
The following functions implement the heterogeneous exponential case (Theorem 2.28).
Laat $B_j \sim \text{Exp}(\mu_j)$ de behandeltijd van de $j$-de klant. Er geldt:
\begin{align*}
p_{k\ell,i}(t)
= \mathbb{P}_{i}(N_t = \ell\mid N_0 = k)
&= \mathbb{P}\left(\sum_{j=i-k+1}^{i-\ell+2}B_j \leq t\right) - \mathbb{P}\left(\sum_{j=i-k+1}^{i-\ell+3}B_j \leq t\right) \\
&= \sum_{j=i-k+1}^{i-\ell+2}\frac{c_{i-k+1,k-\ell+1,j}}{\mu_j}(1 - e^{-\mu_j t}) - \sum_{j=i-k+1}^{i-\ell+3}\frac{c_{i-k+1,k-\ell+2,j}}{\mu_j}(1 - e^{-\mu_j t}),
\end{align*}
de $p_{k1,i}(t)$ klopt wel.
```
# helper functions
def phi(k,l,s,mu):
return np.sum([c(k,l,j,mu) * np.exp(-mu[j-1] * s) for j in range(k,k+l+1)])
def psi(j,t,mu):
return (1 - np.exp(-mu[j-1] * t)) / mu[j-1]
def c(k,l,j,mu):
"""Computes the weights c of phi recursively (Lemma 2.23)."""
# storage indices
k_, l_, j_ = k - 1, l, j - 1
if c_stored[k_][l_][j_] != None:
pass
elif k == j and not l:
c_stored[k_][l_][j_] = mu[k_]
elif l:
if j >= k and j < k + l:
c_stored[k_][l_][j_] = c(k,l-1,j,mu) * mu[k_+l_] / (mu[k_+l_] - mu[j-1])
elif k + l == j:
c_stored[k_][l_][j_] = np.sum([c(k,l-1,m,mu) * mu[j-1] / (mu[m-1] - mu[j-1]) for m in range(k,k+l)])
return c_stored[k_][l_][j_]
def trans_prob_het(t,i,k,mu):
"""Computes the transition probabilities (Prop. 2.25)."""
p = [phi(i-k+1,k-l+1,t,mu) / mu[i-l+1] for l in range(2,k+2)]
return [1 - np.sum(p)] + p
def cost_het(t,i,k,mu,omega,n):
"""Computes the cost when t is the next interarrival time."""
f = t - np.sum([c(i-k+1,k-1,j,mu) * psi(j,t,mu) / mu[j-1] for j in range(i-k+1,i+1)])
#g = 0 ## alternative
#for l in range(k-1):
# g += (k - l - 1) * np.sum([c(i-k+1,l,j,mu) * psi(j,t,mu) / mu[i-k+l] for j in range(i-k+1,i-k+l+2)])
h = np.sum(1 / mu[i-k:i-1])
p = trans_prob_het(t,i,k,mu)
cost = omega * f[0] + (1 - omega) * h + np.sum([Cstar_het(i+1,l,mu,omega,n) * p[l-1] for l in range(1,k+2)])
return cost
def Cstar_het(i,k,mu,omega,n):
"""Implements the Heterogeneous Exponential Case."""
mu = np.array(mu)
if C_matrix[i-1][k-1] != None: # retrieve stored value
pass
elif i == n: # initial condition
C_matrix[i-1][k-1] = (1 - omega) * np.sum(1 / mu[i-k:i-1])
# C_matrix[i-1][k-1] = (1 - omega) * np.sum([(k - l - 1) / mu[n-k+l] for l in range(k)]) ## alternative
else:
optimization = minimize(cost_het,0,args=(i,k,mu,omega,n),bounds=((0,500),))
C_matrix[i-1][k-1] = optimization.fun
minima[i-1][k-1] = optimization.x[0]
print(i,k,minima[i-1][k-1],C_matrix[i-1][k-1]) # displays C_i(k) and interarrival time
return C_matrix[i-1][k-1]
def trans_prob_het(t,i,k,mu):
"""Computes the transition probabilities (Prop. 2.25)."""
p = [phi(i-k+1,k-l+1,t,mu) / mu[i-l+1] for l in range(2,k+2)]
return [1 - np.sum(p)] + p
def trans_prob_het2(t,i,k,mu):
p = [0] * (k+1)
p[0] = np.sum([c(i-k+1,k-1,j,mu) * psi(j,t,mu) for j in range(i-k+1,i+1)])
for l in range(2,k+1):
p[l-1] = np.sum([c(i-k+1,k-l,j,mu) * psi(j,t,mu) for j in range(i-k+1,i-l+2)]) \
- np.sum([c(i-k+1,k-l+1,j,mu) * psi(j,t,mu) for j in range(i-k+1,i-l+3)])
p[k] = np.exp(-mu[i-k] * t)
return p
t = 3.82
i = 4
k = 4
mu = np.linspace(0.5,1.5,n)
print(trans_prob_het(t,i,k,mu))
print(trans_prob_het2(t,i,k,mu))
g = 0
for l in range(k-1):
g += (k - l - 1) * np.sum([c(i-k+1,l,j,mu) * psi(j,t,mu) / mu[i-k+l] for j in range(i-k+1,i-k+l+2)])
print(g)
g2 = (k-1) * psi(i-k+1,t,mu)
for l in range(1,k-1):
g2 += (k - l - 1) * (np.sum([c(i-k+1,l-1,j,mu) * (t - psi(j,t,mu)) / mu[j-1] for j in range(i-k+1,i-k+l+1)]) \
- np.sum([c(i-k+1,l,j,mu) * (t - psi(j,t,mu)) / mu[j-1] for j in range(i-k+1,i-k+l+2)]))
print(g2)
```
With this code, we can compute the optimal cost $C_{1}(1)$ for the heterogeneous case dynamically. An example:
```
omega = 0.7
n = 5
mu = np.linspace(0.5,1.5,n)
# mu = np.array([1e-1 * i for i in range(n)])
# mu = mu - np.mean(mu) + 1
print("omega =", omega, "and mu =", mu, "\n")
print("(i,k,t*,C)")
C_matrix = [[None for k in range(n)] for i in range(n)]
minima = [[None for k in range(n)] for i in range(n)]
c_stored = [[[None for j in range(n)] for l in range(n)] for k in range(n)]
# compute values
for i in range(1,n+1):
for k in range(1,i+1):
Cstar_het(i,k,mu,omega=omega,n=n)
# cost
print("\nCost:", C_matrix[0][0])
```
We can also compute the minimal cost when scheduling all clients instantaneously:
```
def compute_probN_het(t,mu):
"""Computes P(N_ti = j) for i=1,...,n and j=1,...,i."""
n = len(mu)
p = np.zeros((n,n))
p[0][0] = 1
for i in range(2,n+1):
x = t[i-1] - t[i-2]
# j = 1
for k in range(1,i):
p[i-1][0] += np.sum([c(i-k,k-1,m,mu) * psi(m,x,mu) for m in range(i-k,i)]) * p[i-2][k-1]
# j = 2,...,i
for j in range(2,i+1):
p[i-1][j-1] = np.sum([(phi(i-k,k-j+1,x,mu) / mu[i-j]) * p[i-2][k-1] for k in range(j-1,i)])
return p
def static_cost_het(t,mu,omega):
"""Computes the cost of the optimal static schedule."""
mu, n = np.array(mu), len(mu)
EW, EI = np.zeros(n), np.zeros(n)
p = compute_probN_het(t,mu)
for i in range(2,n+1):
x = t[i-1] - t[i-2]
EW[i-2] = np.sum([np.sum(1 / mu[i-j:i-1]) * p[i-1][j-1] for j in range(2,i+1)])
for j in range(1,i):
f = np.sum([c(i-j,j-1,m,mu) * (x - psi(m,x,mu)) / mu[m-1] for m in range(i-j,i)])
EI[i-2] += f * p[i-2][j-1]
return omega * np.sum(EI) + (1 - omega) * np.sum(EW)
```
Again we give an example, in which we compare the dynamic program with the static program:
```
omega = 0.1
n = 10
mu = np.linspace(0.5,1.5,n)
# Delta = 1.5
# mu = np.linspace(1 - Delta/2,1 + Delta/2,n)
# mu = mu[::-1]
# mu = np.random.permutation(mu)
C_matrix = [[None for k in range(n)] for i in range(n)]
minima = [[None for k in range(n)] for i in range(n)]
c_stored = [[[None for j in range(n)] for l in range(n)] for k in range(n)]
# compute values
for i in range(1,n+1):
for k in range(1,i+1):
Cstar_het(i,k,mu,omega=omega,n=n)
# cost
dynamic_cost = C_matrix[0][0]
c_stored = [[[None for j in range(n)] for l in range(n)] for k in range(n)]
optimization = minimize(static_cost_het,range(n),args=(mu,omega), bounds=(((0,0),) + (((0,None)),) * (n-1)))
print(optimization)
static_cost = optimization.fun
print("\nmu:",mu)
print("omega:",omega)
print("\nDynamic Cost:", round(dynamic_cost,2))
print("Static Cost:", round(static_cost,2))
ratio = dynamic_cost / static_cost
print("ratio:", round(ratio,2))
# TODO: Old code
# def C_static_het(times,i,k,mu,omega=0.5,n=15):
# """
# Implements the Heterogeneous Exponential Case.
# """
# mu = np.array(mu)
# # print("i",i)
# if C_matrix[i-1][k-1] != None: # retrieve stored value
# pass
# elif i == n: # initial condition
# C_matrix[i-1][k-1] = (1 - omega) * np.sum([1 / mu[j-1] for j in range(i-k+1,i)])
# else:
# # print("n",n)
# # print("i:",i)
# t = times[i]
# # print(t)
# # helper function
# psi = lambda j,t: (1 - np.exp(-mu[j-1] * t)) / mu[j-1]
# # compute f and g
# f = np.sum([c(i-k+1,k-1,j,mu) * (t - psi(j,t)) / mu[j-1] for j in range(i-k+1,i+1)])
# g = np.sum([1 / mu[j-1] for j in range(i-k+1,i)])
# p = trans_prob_het(t,i,k,mu)
# cost = omega * f + (1 - omega) * g + C_static_het(times,i+1,1,mu,omega,n) * p[0]
# for l in range(2,k+2):
# # print(i)
# cost += C_static_het(times,i+1,l,mu,omega,n) * p[l-1]
# C_matrix[i-1][k-1] = cost
# # print(i,k,minima[i-1][k-1],C_matrix[i-1][k-1]) # displays C_i(k) and interarrival time
# # print(i,k,C_matrix[i-1][k-1])
# return C_matrix[i-1][k-1]
# t = [ 0. ,4.65288472, 8.33895249, 11.07420249, 13.03410038]
# n = len(t)
# mu = np.linspace(0.5,1.5,n)
# omega = 0.1
# C_matrix = [[None for k in range(n)] for i in range(n)]
# c_stored = [[[None for j in range(n)] for l in range(n)] for k in range(n)]
# print("Cost:",C_static_het(t,1,1,mu,omega,n))
# # for i in range(1,n+1):
# # for k in range(1,i+1):
# # print(t[i-1])
# # C_static_het(t[i-1],i,k,mu,omega=omega,n=n)
# C_matrix
```
| github_jupyter |
<a href="https://github.com/PaddlePaddle/PaddleSpeech"><img style="position: absolute; z-index: 999; top: 0; right: 0; border: 0; width: 128px; height: 128px;" src="https://nosir.github.io/cleave.js/images/right-graphite@2x.png" alt="Fork me on GitHub"></a>
# 使用 Transformer 进行语音识别
# 0. 视频理解与字幕
```
# 下载demo视频
!test -f work/source/subtitle_demo1.mp4 || wget -c https://paddlespeech.bj.bcebos.com/demos/asr_demos/subtitle_demo1.mp4 -P work/source/
import IPython.display as dp
from IPython.display import HTML
html_str = '''
<video controls width="600" height="360" src="{}">animation</video>
'''.format("work/source/subtitle_demo1.mp4 ")
dp.display(HTML(html_str))
print ("ASR结果为:当我说我可以把三十年的经验变成一个准确的算法他们说不可能当我说我们十个人就能实现对十九个城市变电站七乘二十四小时的实时监管他们说不可能")
```
> Demo实现:[https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/demos/automatic_video_subtitiles/](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/demos/automatic_video_subtitiles/)
# 1. 前言
## 1.1 背景知识
语音识别(Automatic Speech Recognition, ASR) 是一项从一段音频中提取出语言文字内容的任务。
目前该技术已经广泛应用于我们的工作和生活当中,包括生活中使用手机的语音转写,工作上使用的会议记录等等。
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/0231a71b0617485d85586d232f65db6379115befdf014068bd90fb15c5786c94"/>
<br>
(出处:DLHLP 李宏毅 语音识别课程PPT)
</div>
<br></br>
## 1.2 发展历史
* 早期,生成模型流行阶段:GMM-HMM (上世纪90年代)
* 深度学习爆发初期: DNN,CTC[1] (2006)
* RNN 流行,Attention 提出初期: RNN-T[2](2013), DeepSpeech[3] (2014), DeepSpeech2 [4] (2016), LAS[5](2016)
* Attetion is all you need 提出开始[6]: Transformer[6](2017),Transformer-transducer[7](2020) Conformer[8] (2020)
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/d6060426bba341a187422803c0f8ac2e2162c5c5422e4070a3425c09f7801379" height=1300, width=1000 />
</div>
目前 Transformer 和 Conformer 是语音识别领域的主流模型,因此本教程采用了 Transformer 作为讲解的主要内容,并在课后作业中步骤了 Conformer 的相关练习。
# 2. 实战:使用Transformer进行语音识别的流程
CTC 的输出相互独立,使得每一帧利用上下文的信息的能力不足。
而 seq2seq(Transformer,Conformer) 的模型采用自回归的解码方式,所以其建模能力更强,但不便于支持流式。
对于Transformer模型,它的Encoder可以有效对语音特征的上下文进行建模。而它的Decoder具有语言模型的能力,能够将语言模型融合进整个模型中,是真正意义上的端到端模型。
下面简单介绍下 Transformer 语音识别模型,其主要分为 2 个部分:
- Encoder:声学特征会首先进入 Encoder,产生高层特征编码。
- Decoder:Decoder 利用 Encoder 产生的特征编码解码得到预测结果。
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/13bec64ab9544a3a91205a9633d9f015f2ddb0c3586d49ffb39307daed0229a0" height=40%, width=50%/>
</div>
## 2.1 准备工作
### 2.1.1 安装 paddlespeech
```
!pip install --upgrade pip && pip install paddlespeech==0.1.0
```
### 2.1.2 准备工作目录
```
!mkdir -p ./work/workspace_asr
%cd ./work/workspace_asr
```
### 2.1.3 获取预训练模型和音频文件
```
# 获取模型
!test -f transformer.model.tar.gz || wget -nc https://paddlespeech.bj.bcebos.com/s2t/aishell/asr1/transformer.model.tar.gz
!tar xzvf transformer.model.tar.gz
# 获取用于预测的音频文件
!test -f ./data/demo_01_03.wav || wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/zh/demo_01_03.wav -P ./data/
import IPython
IPython.display.Audio('./data/demo_01_03.wav')
# 快速体验识别结果
!paddlespeech asr --input ./data/demo_01_03.wav
```
### 2.1.4 导入python包
```
import paddle
import soundfile
import warnings
warnings.filterwarnings('ignore')
from yacs.config import CfgNode
from paddlespeech.s2t.transform.spectrogram import LogMelSpectrogramKaldi
from paddlespeech.s2t.transform.cmvn import GlobalCMVN
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.models.u2 import U2Model
from matplotlib import pyplot as plt
%matplotlib inline
```
### 2.1.5 设置预训练模型的路径
```
config_path = "conf/transformer.yaml"
checkpoint_path = "./exp/transformer/checkpoints/avg_20.pdparams"
decoding_method = "attention"
audio_file = "data/demo_01_03.wav"
# 读取 conf 文件并结构化
transformer_config = CfgNode(new_allowed=True)
transformer_config.merge_from_file(config_path)
transformer_config.decoding.decoding_method = decoding_method
print(transformer_config)
```
## 2.2 获取特征
### 2.2.1 音频特征 logfbank
#### 2.2.1.1 语音特征提取整体流程图
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/54aefbc16dbf4487a7abe38b0210e5dbf1bb0c74fbe4459f94880a06950269f9" height=1200, width=800 />
<br>
由"莊永松、柯上優 DLHLP - HW1 End-to-end Speech Recognition PPT" 修改得
</div>
#### 2.2.1.2 logfbank 提取过程简化图
logfbank 特征提取大致可以分为 3 个步骤:
1. 语音时域信号经过预加重(信号高频分量补偿),然后进行分帧。
2. 每一帧数据加窗后经过离散傅立叶变换(DFT)得到频谱图。
3. 将频谱图的特征经过 Mel 滤波器得到 logmel fbank 特征。
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/08f7ccecc848495599c350aa2c440071b818ba0465734dd29701a2ff149f0a8c"/>
<br>
由"DLHLP 李宏毅 语音识别课程 PPT" 修改得
</div>
#### 2.2.1.3 CMVN 计算过程
对于所有获取的特征,模型在使用前会使用 CMVN 的方式进行归一化
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/46df63199d88481d9a2713a45ce63d00220e8ac42f9940e886282017758b54bf"/>
</div>
### 2.2.2 构建音频特征提取对象
```
# 构建 logmel 特征
logmel_kaldi= LogMelSpectrogramKaldi(
fs= 16000,
n_mels= 80,
n_shift= 160,
win_length= 400,
dither= True)
# 特征减均值除以方差
cmvn = GlobalCMVN(
cmvn_path="data/mean_std.json"
)
```
### 2.2.3 提取音频的特征
```
array, _ = soundfile.read(audio_file, dtype="int16")
array = logmel_kaldi(array, train=False)
audio_feature_i = cmvn(array)
audio_len = audio_feature_i.shape[0]
audio_len = paddle.to_tensor(audio_len)
audio_feature = paddle.to_tensor(audio_feature_i, dtype='float32')
audio_feature = paddle.unsqueeze(audio_feature, axis=0)
print (audio_feature.shape)
plt.figure()
plt.imshow(audio_feature_i.T, origin='lower')
plt.show()
```
## 2.3 使用模型获得结果
### 2.3.1 Transofomer 语音识别模型的结构
Transformer 模型主要由 2 个部分组成,包括 Transformer Encoder 和 Transformer Decoder。
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/1edcd4ef683c4ef981b375ab8df388b40e3afc5f439f47f1a6f2f230908b63b1" height=50%, width=50% />
</div>
### 2.3.2 Transformer Encoder
Transformer encoder 主要是对音频的原始特征(这里原始特征使用的是 80 维 logfbank)进行特征编码,其输入是 logfbank,输出是特征编码。包含:
* 位置编码(position encoding)
* 降采样模块(subsampling embedding): 由2层降采样的 CNN 构成。
* Transformer Encoder Layer :
* self-attention: 主要特点是Q(query), K(key)和V(value)都是用了相同的值
* Feed forward Layer: 由两层全连接层构建,其特点是保持了输入和输出的特征维度是一致的。
#### 2.3.2.1 Self-Attention
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/72ffd9016d3841149723be2dde2a48c495ce8a95358946bca3736053812c788c" height=50%, width=50% />
</div>
其主要步骤可以分为三步:
1. `Q` 和 `K` 的向量通过求内积的方式计算相似度,经过 scale 和 softmax 后,获得每个 `Q` 和所有`K` 之间的 score。
2. 将每个 `Q` 和所有 `K` 之间的 score 和 `V` 进行相乘,再将相乘后的结果求和,得到 self-attetion 的输出向量。
3. 使用多个 Attetion 模块均进行第一步和第二步,并将最后的输出向量进行合并,得到最终 Multi-Head Self-Attention 的输出。
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/fcdef1992e6d4c909403d603062d09e4d5adaff0226e4367b35d27aea2da1303" height=30%, width=30% />
</div>
### 2.3.3 Transformer Decoder
Transformer 的 Decoder 用于获取最后的输出结果。其结构和 Encoder 有一定的相似性,也具有 Attention 模块和 Feed forward layer。
主要的不同点有 2 个:
1. Decoder 采用的是一种自回归的方式进行解码。
2. Decoder 在 Multi-head self-attention 和 Feed forward layer 模块之间增加了一层 Multi-head cross-attention 层用于获取 Encoder 得到的特征编码。
#### 2.3.3.1 Masked Multi-head Self-Attention
细心的同学可能发现了,Decoder 的一个 Multi-head self-attention 前面有一个 mask 。增加了这个 mask 的原因在于进行 Decoder 训练的时候,Decoder 的输入是一句完整的句子,而不是像预测这样一步步输入句子的前缀。
为了模拟预测的过程,Decoder 训练的时候需要用 mask 遮住句子。 例如 `T=1` 时,就要 mask 输入中除第一个字符以外其他的字符,`T=2` 的时候则需要 mask 除前两个字符以外的其余字符。
#### 2.3.3.2 Cross Attention
Decoder 在每一步的解码过程中,都会利用 Encoder 的输出的特征编码进行 cross-attention。
其中Decoder会将自回结果的编码作为 Attention 中的 `Q` ,而 Encoder 输出的特征编码作为 `K` 和 `V` 来完成 attetion 计算,从而利用 Encoder 提取的音频信息。
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/8e93122eb65344ea885a8af9014de4569b7c9c9f55aa45f7ac17ba2d0b0af260" hegith=30%, width=30% />
</div>
#### 2.3.3.3 Decoder的自回归解码
其采用了一种自回归的结构,即 Decoder 的上一个时间点的输出会作为下一个时间点的输入。
另外,计算的过程中,Decoder 会利用 Encoder 的输出信息。
如果使用贪心(greedy)的方式,Decoder 的解码过程如下:
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/0acaf9f243304120832018b83a4b7c67b8d578f710ce4eeba6062ab9661ef9e7" hegith=50%, width=50% />
</div>
使用 greedy 模式解码比较简单,但是很有可能会在解码过程中丢失整体上效果更好的解码结果。
因此我们实际使用的是 beam search 方式的解码,beam search 模式下的 decoder 的解码过程如下:
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/367f8f7cd4b4451ab45dd883045c500d941f0d235fca4ad2a3ccb925ec59aea2" hegith=50%, width=50%/>
</div>
### 2.3.4 模型训练
模型训练同时使用了 CTC 损失和 cross entropy 交叉熵损失进行损失函数的计算。
其中 Encoder 输出的特征直接进入 CTC Decoder 得到 CTC 损失。
而 Decoder 的输出使用 cross entropy 损失。
<div align=center>
<img src="https://ai-studio-static-online.cdn.bcebos.com/fe1d3864f18f4df0a9ab3df8dc4e361a693250b387344273952315ca14d30732"/>
<br>
(由"莊永松、柯上優 DLHLP - HW1 End-to-end Speech Recognition PPT" 修改得)
</div>
### 2.3.5 构建Transformer模型
```
model_conf = transformer_config.model
# input_dim 存储的是特征的纬度
model_conf.input_dim = 80
# output_dim 存储的字表的长度
model_conf.output_dim = 4233
print ("model_conf", model_conf)
model = U2Model.from_config(model_conf)
```
### 2.3.6 加载预训练的模型
```
model_dict = paddle.load(checkpoint_path)
model.set_state_dict(model_dict)
```
### 2.3.7 进行预测
```
decoding_config = transformer_config.decoding
text_feature = TextFeaturizer(unit_type='char',
vocab=transformer_config.collator.vocab_filepath)
result_transcripts = model.decode(
audio_feature,
audio_len,
text_feature=text_feature,
decoding_method=decoding_config.decoding_method,
beam_size=decoding_config.beam_size,
ctc_weight=decoding_config.ctc_weight,
decoding_chunk_size=decoding_config.decoding_chunk_size,
num_decoding_left_chunks=decoding_config.num_decoding_left_chunks,
simulate_streaming=decoding_config.simulate_streaming)
print ("预测结果对应的token id为:")
print (result_transcripts[1][0])
print ("预测结果为:")
print (result_transcripts[0][0])
```
# 3. 作业
1. 使用开发模式安装 [PaddleSpeech](https://github.com/PaddlePaddle/PaddleSpeech)
环境要求:docker, Ubuntu 16.04,root user。
参考安装方法:[使用Docker安装paddlespeech](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md#hard-get-the-full-funciton-on-your-mechine)
2. 跑通 example/aishell/asr1 中的 conformer 模型,完成训练和预测。
3. 按照 example 的格式使用自己的数据集训练 ASR 模型。
# 4. 关注 PaddleSpeech
请关注我们的 [Github Repo](https://github.com/PaddlePaddle/PaddleSpeech/),非常欢迎加入以下微信群参与讨论:
- 扫描二维码
- 添加运营小姐姐微信
- 通过后回复【语音】
- 系统自动邀请加入技术群
<center><img src="https://ai-studio-static-online.cdn.bcebos.com/87bc7da42bcc401bae41d697f13d8b362bfdfd7198f14096b6d46b4004f09613" width="300" height="300" ></center>
# 5. 参考文献
[1] Graves A, Fernández S, Gomez F, et al. Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks[C]//Proceedings of the 23rd international conference on Machine learning. 2006: 369-376.
[2] Graves A, Mohamed A, Hinton G. Speech recognition with deep recurrent neural networks[C]//2013 IEEE international conference on acoustics, speech and signal processing. Ieee, 2013: 6645-6649.
[3] Hannun A, Case C, Casper J, et al. Deep speech: Scaling up end-to-end speech recognition[J]. arXiv preprint arXiv:1412.5567, 2014.
[4] Amodei D, Ananthanarayanan S, Anubhai R, et al. Deep speech 2: End-to-end speech recognition in english and mandarin[C]//International conference on machine learning. PMLR, 2016: 173-182.
[5] Chan W, Jaitly N, Le Q, et al. Listen, attend and spell: A neural network for large vocabulary conversational speech recognition[C]//2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2016: 4960-4964.
[6] Vaswani A, Shazeer N, Parmar N, et al. Attention is all you need[C]//Advances in neural information processing systems. 2017: 5998-6008.
[7] Zhang Q, Lu H, Sak H, et al. Transformer transducer: A streamable speech recognition model with transformer encoders and rnn-t loss[C]//ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020: 7829-7833.
[8] Gulati A, Qin J, Chiu C C, et al. Conformer: Convolution-augmented transformer for speech recognition[J]. arXiv preprint arXiv:2005.08100, 2020.
| github_jupyter |
# Assignment 1 - Creating and Manipulating Graphs
Eight employees at a small company were asked to choose 3 movies that they would most enjoy watching for the upcoming company movie night. These choices are stored in the file `Employee_Movie_Choices.txt`.
A second file, `Employee_Relationships.txt`, has data on the relationships between different coworkers.
The relationship score has value of `-100` (Enemies) to `+100` (Best Friends). A value of zero means the two employees haven't interacted or are indifferent.
Both files are tab delimited.
```
import networkx as nx
import pandas as pd
import numpy as np
from networkx.algorithms import bipartite
# This is the set of employees
employees = set(['Pablo',
'Lee',
'Georgia',
'Vincent',
'Andy',
'Frida',
'Joan',
'Claude'])
# This is the set of movies
movies = set(['The Shawshank Redemption',
'Forrest Gump',
'The Matrix',
'Anaconda',
'The Social Network',
'The Godfather',
'Monty Python and the Holy Grail',
'Snakes on a Plane',
'Kung Fu Panda',
'The Dark Knight',
'Mean Girls'])
# Function to plot graphs
def plot_graph(G, weight_name=None):
'''
G: a networkx G
weight_name: name of the attribute for plotting edge weights (if G is weighted)
'''
%matplotlib notebook
import matplotlib.pyplot as plt
plt.figure()
pos = nx.spring_layout(G)
edges = G.edges()
weights = None
if weight_name:
weights = [int(G[u][v][weight_name]) for u,v in edges]
labels = nx.get_edge_attributes(G,weight_name)
nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)
nx.draw_networkx(G, pos, edges=edges, width=weights);
else:
nx.draw_networkx(G, pos, edges=edges);
```
### Question 1
Using NetworkX, load in the bipartite graph from `Employee_Movie_Choices.txt` and return that graph.
*This function should return a networkx graph with 19 nodes and 24 edges*
```
def answer_one():
# Your Code Here
movie_choices_graph = nx.read_edgelist('Employee_Movie_Choices.txt', delimiter="\t", nodetype=str)
return movie_choices_graph
!cat Employee_Movie_Choices.txt
G = answer_one()
plot_graph(G)
```
### Question 2
Using the graph from the previous question, add nodes attributes named `'type'` where movies have the value `'movie'` and employees have the value `'employee'` and return that graph.
*This function should return a networkx graph with node attributes `{'type': 'movie'}` or `{'type': 'employee'}`*
```
def answer_two():
# Your Code Here
G = answer_one()
list_of_nodes = G.nodes()
for node in list_of_nodes:
if node in employees:
G.add_node(node, type="employee")
elif node in movies:
G.add_node(node, type="movie")
return G
G = answer_two()
plot_graph(G)
```
### Question 3
Find a weighted projection of the graph from `answer_two` which tells us how many movies different pairs of employees have in common.
*This function should return a weighted projected graph.*
```
def answer_three():
# Your Code Here
G = answer_two()
weighted_projected_graph = bipartite.weighted_projected_graph(G, employees)
return weighted_projected_graph
G = answer_three()
plot_graph(G, "weight")
```
### Question 4
Suppose you'd like to find out if people that have a high relationship score also like the same types of movies.
Find the Pearson correlation ( using `DataFrame.corr()` ) between employee relationship scores and the number of movies they have in common. If two employees have no movies in common it should be treated as a 0, not a missing value, and should be included in the correlation calculation.
*This function should return a float.*
```
def answer_four():
# Your Code Here
G = answer_three()
Rel = nx.read_edgelist('Employee_Relationships.txt' ,data=[('relationship_score', int)])
G_df = pd.DataFrame(G.edges(data=True), columns=['From', 'To', 'movies_score'])
Rel_df = pd.DataFrame(Rel.edges(data=True), columns=['From', 'To', 'relationship_score'])
G_copy_df = G_df.copy()
G_copy_df.rename(columns={"From":"From1", "To":"From"}, inplace=True)
G_copy_df.rename(columns={"From1":"To"}, inplace=True)
G_final_df = pd.concat([G_df, G_copy_df])
final_df = pd.merge(G_final_df, Rel_df, on = ['From', 'To'], how='right')
final_df['movies_score'] = final_df['movies_score'].map(set_value)
final_df['movies_score'] = final_df['movies_score'].map(lambda x: x['weight'])
final_df['relationship_score'] = final_df['relationship_score'].map(lambda x: x['relationship_score'])
value = final_df['movies_score'].corr(final_df['relationship_score'])
return value
def set_value(val):
if val is np.nan:
return {'weight': 0}
else:
return val
answer_four()
```
| github_jupyter |
# Distributed
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Distributed" data-toc-modified-id="Distributed-1"><span class="toc-item-num">1 </span>Distributed</a></span><ul class="toc-item"><li><span><a href="#Distributed-Cluster" data-toc-modified-id="Distributed-Cluster-1.1"><span class="toc-item-num">1.1 </span>Distributed Cluster</a></span></li><li><span><a href="#Create-and-Connect-to-Dask-Distributed-Cluster" data-toc-modified-id="Create-and-Connect-to-Dask-Distributed-Cluster-1.2"><span class="toc-item-num">1.2 </span>Create and Connect to Dask Distributed Cluster</a></span></li><li><span><a href="#Perfom-computation-on-a-dask-array" data-toc-modified-id="Perfom-computation-on-a-dask-array-1.3"><span class="toc-item-num">1.3 </span>Perfom computation on a dask array</a></span></li><li><span><a href="#Going-Further" data-toc-modified-id="Going-Further-1.4"><span class="toc-item-num">1.4 </span>Going Further</a></span></li></ul></li></ul></div>
## Distributed Cluster
As we have seen so far, Dask allows you to simply construct graphs of tasks with dependencies, as well as have graphs created automatically for you using functional, Numpy syntax on data collections. None of this would be very useful, if there weren't also a way to execute these graphs, in a parallel and memory-aware way. So far we have been calling `thing.compute()` or `dask.compute(thing)` without worrying what this entails. Now we will discuss the options available for that execution, and in particular, the distributed scheduler, which comes with additional functionality.
## Create and Connect to Dask Distributed Cluster
Let's begin by importing `Client` and `LocalCluster` objects/classes
```
from dask.distributed import Client, LocalCluster
# Setup a local cluster.
# By default this sets up 1 worker per core
cluster = LocalCluster()
cluster
```
☝️ Don't forget to click the link above to view the scheduler dashboard! (you may wish to have both the notebook and dashboard side-by-side)
```
client = Client(cluster) # Connect to a Dask cluster in order to submit computation
client
```
## Perfom computation on a dask array
```
import dask.array as da
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
bigshape = (500, 2400, 3600)
chunk_shape = (10, 1200, 1800)
big_ones = da.ones(bigshape, chunks=chunk_shape)
big_ones
big_calc = (big_ones * big_ones[::-1, ::-1]).mean()
big_calc
%time big_calc.compute()
```
**Create a histogram**
```
random_values = da.random.normal(size=(1e8,), chunks=(20e6,))
hist, bins = da.histogram(random_values, bins=10, range=[-5, 5])
random_values
hist
hist.visualize()
%%time
x = 0.5 * (bins[1:] + bins[:-1])
width = np.diff(bins)
plt.bar(x, hist, width);
```
## Going Further
- [Dask Tutorial on Distributed](https://github.com/dask/dask-tutorial/blob/master/05_distributed.ipynb)
- [Dask Tutorial on Advanced Distributed](https://github.com/dask/dask-tutorial/blob/master/06_distributed_advanced.ipynb)
<div class="alert alert-block alert-success">
<p>Previous: <a href="02_dask_arrays.ipynb">Dask Arrays</a></p>
<p>Next: <a href="04_dask_and_xarray.ipynb">Dask + Xarray</a></p>
</div>
| github_jupyter |
# DAG Creation and Submission
Launch this tutorial in a Jupyter Notebook on Binder:
[](https://mybinder.org/v2/gh/htcondor/htcondor-python-bindings-tutorials/master?urlpath=lab/tree/DAG-Creation-And-Submission.ipynb)
In this tutorial, we will learn how to use `htcondor.dags` to create and submit an HTCondor DAGMan workflow.
Our goal will be to create an image of the Mandelbrot set.
This is a perfect problem for high-throughput computing because each point in the image can be calculated completely independently of any other point, so we are free to divide the image creation up into patches, each created by a single HTCondor job.
DAGMan will enter the picture to coordinate stitching the image patches we create back into a single image.
## Making a Mandelbrot set image locally
We'll use `goatbrot` (https://github.com/beejjorgensen/goatbrot) to make the image.
`goatbrot` can be run from the command line, and takes a series of options to specify which part of the Mandelbrot set to draw, as well as the properties of the image itself.
`goatbrot` options:
- `-i 1000` The number of iterations.
- `-c 0,0` The center point of the image region.
- `-w 3` The width of the image region.
- `-s 1000,1000` The pixel dimensions of the image.
- `-o test.ppm` The name of the output file to generate.
We can run a shell command from Jupyter by prefixing it with a `!`:
```
! ./goatbrot -i 10 -c 0,0 -w 3 -s 500,500 -o test.ppm
! convert test.ppm test.png
```
Let's take a look at the test image. It won't be very good, because we didn't run for very many iterations.
We'll use HTCondor to produce a better image!
```
from IPython.display import Image
Image('test.png')
```
## What is the workflow?
We can parallelize this calculation by drawing rectangular sub-regions of the full region ("tiles") we want and stitching them together into a single image using `montage`.
Let's draw this out as a graph, showing how data (image patches) will flow through the system.
(Don't worry about this code, unless you want to know how to make dot diagrams in Python!)
```
from graphviz import Digraph
import itertools
num_tiles_per_side = 2
dot = Digraph()
dot.node('montage')
for x, y in itertools.product(range(num_tiles_per_side), repeat = 2):
n = f'tile_{x}-{y}'
dot.node(n)
dot.edge(n, 'montage')
dot
```
Since we can chop the image up however we'd like, we have as many tiles per side as we'd like (try changing `num_tiles_per_side` above).
The "shape" of the DAG is the same: there is a "layer" of `goatbrot` jobs that calculate tiles, which all feed into `montage`.
Now that we know the structure of the problem, we can start describing it to HTCondor.
## Describing `goatbrot` as an HTCondor job
We describe a job using a `Submit` object.
It corresponds to the submit *file* used by the command line tools.
It mostly behaves like a standard Python dictionary, where the keys and values correspond to submit descriptors.
```
import htcondor
tile_description = htcondor.Submit(
executable = 'goatbrot', # the program we want to run
arguments = '-i 10000 -c $(x),$(y) -w $(w) -s 500,500 -o tile_$(tile_x)-$(tile_y).ppm', # the arguments to pass to the executable
log = 'mandelbrot.log', # the HTCondor job event log
output = 'goatbrot.out.$(tile_x)_$(tile_y)', # stdout from the job goes here
error = 'goatbrot.err.$(tile_x)_$(tile_y)', # stderr from the job goes here
request_cpus = '1', # resource requests; we don't need much per job for this problem
request_memory = '128MB',
request_disk = '1GB',
)
print(tile_description)
```
Notice the heavy use of macros like `$(x)` to specify the tile.
Those aren't built-in submit macros; instead, we will plan on passing their values in through **vars**.
Vars will let us customize each individual job in the tile layer by filling in those macros individually.
Each job will recieve a dictionary of macro values; our next goal is to make a list of those dictionaries.
We will do this using a function that takes the number of tiles per side as an argument.
As mentioned above, the **structure** of the DAG is the same no matter how "wide" the tile layer is.
This is why we define a function to produce the tile vars instead of just calculating them once: we can vary the width of the DAG by passing different arguments to `make_tile_vars`.
More customizations could be applied to make different images (for example, you could make it possible to set the center point of the image).
```
def make_tile_vars(num_tiles_per_side, width = 3):
width_per_tile = width / num_tiles_per_side
centers = [
width_per_tile * (n + 0.5 - (num_tiles_per_side / 2))
for n in range(num_tiles_per_side)
]
vars = []
for (tile_y, y), (tile_x, x) in itertools.product(enumerate(centers), repeat = 2):
var = dict(
w = width_per_tile,
x = x,
y = -y, # image coordinates vs. Cartesian coordinates
tile_x = str(tile_x).rjust(5, '0'),
tile_y = str(tile_y).rjust(5, '0'),
)
vars.append(var)
return vars
tile_vars = make_tile_vars(2)
for var in tile_vars:
print(var)
```
If we want to increase the number of tiles per side, we just pass in a larger number.
Because the `tile_description` is **parameterized** in terms of these variables, it will work the same way no matter what we pass in as `vars`.
```
tile_vars = make_tile_vars(4)
for var in tile_vars:
print(var)
```
## Describing montage as an HTCondor job
Now we can write the `montage` job description.
The problem is that the arguments and input files depend on how many tiles we have, which we don't know ahead-of-time.
We'll take the brute-force approach of just writing a function that takes the tile `vars` we made in the previous section and using them to build the `montage` job description.
Not that some of the work of building up the submit description is done in Python.
This is a major advantage of communicating with HTCondor via Python: you can do the hard work in Python instead of in submit language!
One area for possible improvement here is to remove the duplication of the format of the input file names, which is repeated here from when it was first used in the `goatbrot` submit object. When building a larger, more complicated workflow, it is important to reduce duplication of information to make it easier to modify the workflow in the future.
```
def make_montage_description(tile_vars):
num_tiles_per_side = int(len(tile_vars) ** .5)
input_files = [f'tile_{d["tile_x"]}-{d["tile_y"]}.ppm' for d in tile_vars]
return htcondor.Submit(
executable = '/usr/bin/montage',
arguments = f'{" ".join(input_files)} -mode Concatenate -tile {num_tiles_per_side}x{num_tiles_per_side} mandelbrot.png',
transfer_input_files = ', '.join(input_files),
log = 'mandelbrot.log',
output = 'montage.out',
error = 'montage.err',
request_cpus = '1',
request_memory = '128MB',
request_disk = '1GB',
)
montage_description = make_montage_description(make_tile_vars(2))
print(montage_description)
```
## Describing the DAG using `htcondor.dags`
Now that we have the job descriptions, all we have to do is use `htcondor.dags` to tell DAGMan about the dependencies between them.
`htcondor.dags` is a subpackage of the HTCondor Python bindings that lets you write DAG descriptions using a higher-level language than raw DAG description file syntax.
Incidentally, it also lets you use Python to drive the creation process, increasing your flexibility.
**Important Concept:** the code from `dag = dags.DAG()` onwards only defines the **topology** (or **structure**) of the DAG.
The `tile` layer can be flexibly grown or shrunk by adjusting the `tile_vars` without changing the topology, and this can be clearly expressed in the code.
The `tile_vars` are driving the creation of the DAG. Try changing `num_tiles_per_side` to some other value!
```
from htcondor import dags
num_tiles_per_side = 2
# create the tile vars early, since we need to pass them to multiple places later
tile_vars = make_tile_vars(num_tiles_per_side)
dag = dags.DAG()
# create the tile layer, passing in the submit description for a tile job and the tile vars
tile_layer = dag.layer(
name = 'tile',
submit_description = tile_description,
vars = tile_vars,
)
# create the montage "layer" (it only has one job in it, so no need for vars)
# note that the submit description is created "on the fly"!
montage_layer = tile_layer.child_layer(
name = 'montage',
submit_description = make_montage_description(tile_vars),
)
```
We can get a textual description of the DAG structure by calling the `describe` method:
```
print(dag.describe())
```
## Write the DAG to disk
We still need to write the DAG to disk to get DAGMan to work with it.
We also need to move some files around so that the jobs know where to find them.
```
from pathlib import Path
import shutil
dag_dir = (Path.cwd() / 'mandelbrot-dag').absolute()
# blow away any old files
shutil.rmtree(dag_dir, ignore_errors = True)
# make the magic happen!
dag_file = dags.write_dag(dag, dag_dir)
# the submit files are expecting goatbrot to be next to them, so copy it into the dag directory
shutil.copy2('goatbrot', dag_dir)
print(f'DAG directory: {dag_dir}')
print(f'DAG description file: {dag_file}')
```
## Submit the DAG via the Python bindings
Now that we have written out the DAG description file, we can submit it for execution using the standard Python bindings submit mechanism.
The `Submit` class has a static method which can read a DAG description and generate a corresponding `Submit` object:
```
dag_submit = htcondor.Submit.from_dag(str(dag_file), {'force': 1})
print(dag_submit)
```
Now we can enter the DAG directory and submit the DAGMan job, which will execute the graph:
```
import os
os.chdir(dag_dir)
schedd = htcondor.Schedd()
with schedd.transaction() as txn:
cluster_id = dag_submit.queue(txn)
print(f"DAGMan job cluster is {cluster_id}")
os.chdir('..')
```
Let's wait for the DAGMan job to complete by reading it's event log:
```
dag_job_log = f"{dag_file}.dagman.log"
print(f"DAG job log file is {dag_job_log}")
# read events from the log, waiting forever for the next event
dagman_job_events = htcondor.JobEventLog(str(dag_job_log)).events(None)
# this event stream only contains the events for the DAGMan job itself, not the jobs it submits
for event in dagman_job_events:
print(event)
# stop waiting when we see the terminate event
if event.type is htcondor.JobEventType.JOB_TERMINATED and event.cluster == cluster_id:
break
```
Let's look at the final image!
```
Image(dag_dir / "mandelbrot.png")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/shivammehta007/NLPinEnglishLearning/blob/master/Sequence_2_sequence_Generation/Sequence2SequenceQuestionGenerator.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Question Generation
Additional Dependencies
```
%%capture
!pip install fairseq
!rm -rf NLPinEnglishLearning
import os
from getpass import getpass
import urllib
import subprocess
def run_command(command):
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
encoding="utf-8",
errors="replace",
)
while True:
realtime_output = process.stdout.readline()
if realtime_output == "" and process.poll() is not None:
break
if realtime_output:
print(realtime_output.strip(), flush=True)
cmd_string = 'git clone https://github.com/shivammehta007/NLPinEnglishLearning.git'
run_command(cmd_string)
from google.colab import drive
drive.mount('/content/drive')
```
## Download Glove from Kaggle
```
import os
import json
kaggle_info = json.load(open("/content/drive/My Drive/kaggle.json"))
os.environ['KAGGLE_USERNAME'] = kaggle_info["username"]
os.environ['KAGGLE_KEY'] = kaggle_info["key"]
!kaggle datasets list --user thanakomsn
!kaggle datasets download thanakomsn/glove6b300dtxt
%mkdir .vector_cache
%mv glove6b300dtxt.zip .vector_cache/
!unzip .vector_cache/glove6b300dtxt.zip
%ls -a .vector_cache/
```
## Training Baseline Model
```
%cd QuestionGenerator/Sequence_2_sequence_Generation/Baseline
```
### Download Dataset
```
!python datadownloader.py --help
```
### PreProcess Dataset
```
!python preprocessing.py --help
```
### Train Model
```
!python train.py --help
```
### Inference Model
```
!python inference.py --help
```
# Other Models
```
%cd ..
%cd ../FairSeq_models/
!python preprocess.py
!python train.py --help
!python generate.py --help
```
# Evaluation
```
%%capture
!pip install git+https://github.com/Maluuba/nlg-eval.git@master
!nlg-eval --setup
%cd NLPinEnglishLearning/Sequence_2_sequence_Generation/FairSeq_models/data/raw/
```
## LSTM
```
!grep ^H lstm.out | cut -f3- > lstm.out.sys
!grep ^T lstm.out | cut -f2- > lstm.out.ref
!nlg-eval --hypothesis=lstm.out.sys --references=lstm.out.ref
```
## CNN
```
!grep ^H cnn.out | cut -f3- > cnn.out.sys
!grep ^T cnn.out | cut -f2- > cnn.out.ref
!nlg-eval --hypothesis=cnn.out.sys --references=cnn.out.ref
```
## Tranformer
```
!grep ^H transformer.out | cut -f3- > transformer.out.sys
!grep ^T transformer.out | cut -f2- > transformer.out.ref
!nlg-eval --hypothesis=transformer.out.sys --references=transformer.out.ref
```
| github_jupyter |
```
import figurefirst
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
from IPython.display import display,SVG
def make_plot(template_filename, output_filename):
## Define colors, spine locations, and notes for data ######################
colors = {'group1': 'green',
'group2': 'blue',
'group3': 'orange'}
spines = {'ax1': ['left', 'top'],
'ax2': ['left', 'bottom'],
'ax3': ['right', 'bottom']}
functions = { 'ax1': np.sin,
'ax2': np.cos,
'ax3': np.tan}
notes = {'group1': 'random data about green fish',
'group2': 'random data about blue squirrels',
'group3': 'random data about orange bats'}
## Open layout and generate matplotlib axes ################################
layout = figurefirst.svg_to_axes.FigureLayout(template_filename)
layout.make_mplfigures()
## Iterate through groups and axes and plot data ###########################
for group in ['group1', 'group2', 'group3']:
for ax in ['ax1', 'ax2', 'ax3']:
# grab the axis in this group
mpl_axis = layout.axes[(group, ax)]
# generate some data
x_data = np.arange(0,10,0.1)
y_data = functions[ax](x_data)
# plot the data
mpl_axis.plot(x_data, y_data, color=colors[group])
# optional: make the spines look nice
mpl_axis.set_ylim(-1.2,1.2)
mpl_axis.set_xlim(-1,11)
figurefirst.mpl_functions.adjust_spines( mpl_axis, spines[ax],
spine_location_offset=5,
xticks=[0,5,10],
yticks=[-1, 0, 1])
figurefirst.mpl_functions.set_fontsize(mpl_axis.figure,8)
# add the figure (group) to the layout as a new layer
layout.append_figure_to_layer(layout.figures[group], group,
cleartarget=True, # clear out the layer
save_traceback=True, # save the function call traceback
notes=notes[group]) # save notes about the data into the svg
## Hide the design layer and save the new svg file ##########################
layout.set_layer_visibility(inkscape_label = 'layout_design',vis = False)
layout.write_svg(output_filename)
template_filename = 'figure_groups_and_templates.svg'
output_filename = 'figure_groups_and_templates_output.svg'
make_plot(template_filename, output_filename)
# Display the layout and svg and the data svg
plt.close('all')
display(SVG('figure_groups_and_templates.svg'))
display(SVG('figure_groups_and_templates_output.svg'))
# Now generate the same plot with a different layout. Note, the code does not change.
template_filename = 'figure_groups_and_templates_vertical.svg'
output_filename = 'figure_groups_and_templates_vertical_output.svg'
make_plot(template_filename, output_filename)
# display the layout and svg and the data svg
plt.close('all')
display(SVG('figure_groups_and_templates_vertical.svg'))
display(SVG('figure_groups_and_templates_vertical_output.svg'))
```
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import scanpy.api as sc
import scipy as sp
import itertools
import numpy as np
import scipy.stats as stats
from scipy.integrate import dblquad
import seaborn as sns
from statsmodels.stats.multitest import fdrcorrection
import imp
np.arange(1, 5, 0.001).shape
%%time
temp = stats.gamma.rvs(a=np.arange(.001, 12, 1), size=(50000, 12))
%%time
temp = stats.gamma.rvs(a=np.arange(1, 5, 0.001), size=(50000, 4000))
np.random.dirichlet([[1, 2, 3], [1, 2, 3]], 5)
def generalized_binom_coef(x,y):
# if type(x) == int and type(y) == np.ndarray:
# x = np.full_like(y, x)
# if type(y) == int and type(y) == np.ndarray:
# y = np.full_like(x, y)
return sp.special.gamma(x+1) / (sp.special.gamma(y+1) * sp.special.gamma(x-y+1))
class toy_rv(stats.rv_discrete):
def _pmf(self, k, n, p):
return generalized_binom_coef(n, k) * p ** (k) * (1-p)**(n-k)
def naive_estimation(observed, p_hat=0.1):
mu_hat = np.log(observed.mean()) - np.log(p_hat) - (1/2)*np.log(observed.var()/observed.mean()**2 - (1-p_hat)/observed.mean() + 1)
sigma_hat = np.sqrt(np.log(observed.var()/observed.mean()**2 - (1-p_hat)/observed.mean() + 1))
return mu_hat, sigma_hat
def generate_moment_mat(size=2):
B = np.zeros((size, 2))
B[:, 0] = np.arange(size)+1
B[:, 1] = (np.arange(size)+1)**2/2
return B
def get_observed_moments(data, order=2):
return np.array([(data**(i+1)).mean() for i in range(order)]).reshape(-1, 1)
first = data.mean()
second_sum = (data**2).sum()
if data.max() == 1:
second_sum += 2
second = second_sum / data.shape[0]
third = (data**3).mean()
if order == 2:
return np.array([first, second]).reshape(-1, 1)
else:
return np.array([first, second, third]).reshape(-1, 1)
def linsys_estimation(data, p=0.1, order=2, lam_max=3):
if order == 3:
A = np.zeros((3,3))
A[0, 0] = p
A[1, 0] = -p*(p-1)
A[1, 1] = p**2
A[2, 0] = (2*p**3 - 3*p**2 + p)
A[2, 1] = (3*p**2 - 3*p**3)
A[2, 2] = p**3
else:
A = np.zeros((2,2))
A[0, 0] = p
A[1, 0] = -p*(p-1)
A[1, 1] = p**2
#lam = (1-(data < 1).mean())
lam = 0
logex = np.log(np.linalg.inv(A).dot(get_observed_moments(data, order=order)))
B = generate_moment_mat(size=order)
#R = lam * np.array([1, 1]).reshape(1, -1)
sol = np.linalg.inv(B.T.dot(B) + lam*np.eye(2)).dot(B.T).dot(logex)
#sol = np.linalg.inv(B.T.dot(B)).dot(B.T.dot(logex) - R)
return logex, sol[0, 0], np.sqrt(sol[1, 0])
mu = -8
sigma = 1.25
```
### Sample mu and sigma
```
np.sqrt(2)*np.sqrt(0.1)*0.9
0.65238058/np.sqrt(3.76735706)/np.sqrt(0.14083737)
parameter_mean = np.array([-1.22026592, 1.30844892])
cov = -.4
parameter_cov = np.array([[ 3.76735706, -0.65238058],
[-0.65238058, 0.14083737]])
sample = stats.multivariate_normal.rvs(mean=parameter_mean, cov=parameter_cov, size=1000)
parameter_mean = [-1, 1.4]
cov = -.4
parameter_cov = [[2, cov], [cov, 0.1]]
sample = stats.multivariate_normal.rvs(mean=parameter_mean, cov=parameter_cov, size=1000)
plt.scatter(sample[:, 0], sample[:, 1], s=1)
plt.xlabel('Sampled mean');
plt.ylabel('Sampled sigma')
plt.title('Prior distribution of parameters')
# values_mat = np.arange(0, alpha.shape[0]).reshape(1, -1).T.dot(np.arange(0, alpha.shape[0]).reshape(1, -1))
# (np.cov(weights_1.T) * values_mat).sum()
def estimate_permuted_distribution(observed, frac):
# Get some summaries
counts = np.bincount(observed)
props = counts / counts.sum()
alpha = counts * frac
# Compute the Dirichlet parameters
dir_mean = stats.dirichlet.mean(alpha+1e-100)
dir_var = stats.dirichlet.var(alpha+1e-100)
dir_cov = -alpha.reshape(-1, 1).dot(alpha.reshape(1, -1))/(alpha.sum() + 1)
np.diag(dir_cov) = dir_var
stats.dirichlet.var(alpha+1e-10)
alpha
params_1 = [1, 1]
lognorm_data_1 = stats.lognorm.rvs(
s=params_1[1],
scale=np.exp(params_1[0]),
size=2000)
data_1 = \
toy_rv().rvs(
n=lognorm_data_1,
p=0.1)
params_2 = [-1, 1]
lognorm_data_2 = stats.lognorm.rvs(
s=params_2[1],
scale=np.exp(params_2[0]),
size=2000)
data_2 = \
toy_rv().rvs(
n=lognorm_data_2,
p=0.1)
num_permutation = 50000
all_data = np.concatenate([data_1, data_2])
alpha.shape
%%time
alpha = np.bincount(all_data)/2
values = np.tile(np.arange(0, alpha.shape[0]).reshape(1, -1), (num_permutation, 1))
weights = stats.gamma.rvs(alpha+1e-5, size=(num_permutation, alpha.shape[0]))
second_moments = ((weights) * values**2).sum(axis=1)
first_moments = ((weights) * values).sum(axis=1)
var = second_moments - first_moments**2
%%time
diff_means = np.zeros(num_permutation)
alpha = np.bincount(all_data)/2
values = np.tile(np.arange(0, alpha.shape[0]).reshape(1, -1), (num_permutation, 1))
weights = stats.dirichlet.rvs(alpha + 1e-11, size=num_permutation)
second_moments = ((weights) * values**2).sum(axis=1)
first_moments = ((weights) * values).sum(axis=1)
var = second_moments - first_moments**2
%%time
first_moments_naive = np.zeros(num_permutation)
second_moments_naive = np.zeros(num_permutation)
for i in range(num_permutation):
perm_idx = np.random.permutation(all_data.shape[0])
group_1 = np.random.choice(all_data, size=int(all_data.shape[0]/2), replace=True)
first_moments_naive[i] = group_1.mean()
second_moments_naive[i] = (group_1**2).mean()
s, loc, scale = stats.lognorm.fit(second_moments, floc=0)
sns.distplot(second_moments_naive, bins=30)
sns.distplot(second_moments, bins=30)
sample = np.random.choice(second_moments, 100)
plt.scatter(sample, stats.lognorm.pdf(sample, s=s, loc=loc, scale=scale), s=10)
s, loc, scale = stats.lognorm.fit(first_moments, floc=0)
sns.distplot(first_moments_naive, bins=30)
sns.distplot(first_moments, bins=30)
sample = np.random.choice(first_moments, 100)
plt.scatter(sample, stats.lognorm.pdf(sample, s=s, loc=loc, scale=scale), s=10)
diff_means
all_data.shape
pd.Series(data).value_counts()
# The old fashioned way
mean_list = []
var_list = []
logmean_list = []
logvar = []
datas = []
for trial in range(1000):
params = stats.multivariate_normal.rvs(mean=parameter_mean, cov=parameter_cov, size=1)
mu = params[0]
sigma = params[1]
lognorm_data = stats.lognorm.rvs(
s=sigma,
scale=np.exp(mu),
size=5000)
data = \
toy_rv().rvs(
n=lognorm_data,
p=0.1)
# if data.max() == 1:
# continue
datas.append(data)
#logex, m, v = linsys_estimation(data, order=2)
m_naive, v_naive = naive_estimation(data)
mean_list.append(m_naive)
var_list.append(v_naive)
logmean_list.append(np.exp(logex[0,0]))
logvar.append(np.exp(logex[1,0]) - np.exp(logex[0,0])**2)
plugin_means = [x.mean() for x in datas]
plugin_vars = [x.var() for x in datas]
len(plugin_means)
plt.scatter(plugin_means, plugin_vars, s=6)
plt.plot([0, 1], [0, 1])
plt.xlim(0, 1)
plt.ylim(0, 1.5)
plt.scatter(plugin_means, plugin_vars)
plt.plot([0, 1.2], [0, 1.2])
stats.pearsonr(
np.array(mean_list)[~np.isnan(np.array(mean_list))],
np.array(var_list)[~np.isnan(np.array(var_list))])
np.where(np.array(mean_list) <-3)[0]
mean_list[67]
var_list[67]
plt.hist(datas[67])
np.array(mean_list).mean()
np.sqrt(np.array(var_list)).mean()
np.cov(mean_list, np.sqrt(np.array(var_list)).T)
plt.scatter(mean_list, np.array(var_list), s=3)
plt.scatter(sample[:, 0], sample[:, 1], s=3)
plt.title('Simulated mean-variance relationship')
plt.xlabel('Estimated mean');
plt.ylabel('Estimated variance');
plt.xlim(-7,5, 7.5);
plt.ylim(0.4, 2.5)
```
### Keep sigma constant
```
mean_list = []
var_list = []
naive_mean_list = []
naive_var_list = []
logmean_list = []
logvar = []
datas = []
mus = np.arange(-3, 2, 0.05)
for mu in mus:
print(mu)
lognorm_data = stats.lognorm.rvs(
s=sigma,
scale=np.exp(mu),
size=1000)
data = \
toy_rv().rvs(
n=lognorm_data,
p=0.1)
#if data.sum() == 0:
#continue
datas.append(data)
logex, m, v = linsys_estimation(data, order=2)
m_naive, v_naive = naive_estimation(data)
mean_list.append(m)
var_list.append(v)
naive_mean_list.append(m_naive)
naive_var_list.append(v_naive)
logmean_list.append(np.exp(logex[0,0]))
logvar.append(np.exp(logex[1,0]) - np.exp(logex[0,0])**2)
logmeans = np.exp(mus + sigma**2/2)
logvars = (np.exp(sigma**2) - 1)*np.exp(2*mus + sigma**2)
#plt.scatter(mean_list, var_list)
plt.figure(figsize=(8, 10))
plt.subplot(2, 1, 1)
plt.scatter(naive_mean_list, naive_var_list)
plt.scatter(mus, np.ones(mus.shape[0])*sigma, s=5)
plt.xlabel('Estimated mean');
plt.ylabel('Estimated variance');
plt.subplot(2, 1, 2)
plt.scatter(naive_mean_list, 0.1*np.array([(x**2).mean() - x.mean() for x in datas]), s=20)
plt.plot([-6, 2], [0, 0])
plt.ylim(-0.01, 0.05)
plt.xlabel('Estimated mean');
plt.ylabel('Difference between \n first and second moments');
m_naive
v_naive
m
v
plt.scatter(mean_list, var_list)
plt.scatter(mean_list, var_list)
plt.scatter(mean_list, var_list)
stats.pearsonr(mean_list, var_list)
logex
np.log(get_observed_moments(lognorm_data, order=3))
plt.figure(figsize=(10, 3))
plt.subplot(1, 2, 1);
plt.hist(lognorm_data, bins=50);
plt.subplot(1, 2, 2);
plt.hist(data, bins=50);
logex = np.log(np.linalg.inv(A).dot(get_observed_moments(data, order=3)))
np.array([(x**2).mean() - x.mean() for x in datas]).shape
logmeans.shape
#plt.scatter([x.mean() for x in datas], [(x**2).mean() for x in datas])
plt.scatter(logmeans, 10*np.array([(x**2).mean() - x.mean() for x in datas]), s=50)
#plt.xlim(0, 2);
#plt.ylim(-0.001, 0.01)
#plt.plot([0, 1], [0, 1])
#plt.ylim(0, 0.05);plt.xlim(0, 0.05)
plt.scatter(mus, logmean_list)
#plt.plot([-4, 2], [0.75, 0.75])
plt.hist(datas[0], bins=50);
plt.scatter(logmean_list, logvar, s=5)
plt.scatter(logmeans, logvars, s=5)
#plt.scatter(logmeans, 20*np.array([(x**2).mean() - x.mean() for x in datas]), s=20)
# #plt.scatter(logmeans, [(x**2).mean() - x.mean() for x in datas], s=5)
plt.xlim(0, 10);
plt.ylim(0, 0.5)
plt.scatter(mus, mean_list)
stats.linregress(logmeans, logmean_list)
plt.plot(mus, logmeans)
logex
np.log(get_observed_moments(lognorm_data, order=3))
def generate_moment_mat(size=2):
B = np.zeros((size, 2))
B[:, 0] = np.arange(size)+1
B[:, 1] = (np.arange(size)+1)**2/2
return B
def get_observed_moments(data, order=2):
return np.array([(data**(i+1)).mean() for i in range(order)]).reshape(-1, 1)
def linsys_estimation(data, p=0.1, order=2, lam=0.1):
if order == 3:
A = np.zeros((3,3))
A[0, 0] = p
A[1, 0] = -p*(p-1)
A[1, 1] = p**2
A[2, 0] = (2*p**3 - 3*p**2 + p)
A[2, 1] = (3*p**2 - 3*p**3)
A[2, 2] = p**3
else:
A = np.zeros((2,2))
A[0, 0] = p
A[1, 0] = -p*(p-1)
A[1, 1] = p**2
logex = np.log(np.linalg.inv(A).dot(get_observed_moments(data, order=order)))
B = generate_moment_mat(size=order)
R = lam * np.array([1, -1]).reshape(1, -1)
#sol = np.linalg.inv(B.T.dot(B) + R.T.dot(R)).dot(B.T).dot(logex)
sol = np.linalg.inv(B.T.dot(B)).dot(B.T.dot(logex) - R)
return np.exp(logex), sol[0, 0], np.sqrt(sol[1, 0])
linsys_estimation(data, order=3)
n = 5
p = 0.1
stats.binom.expect(lambda x: x**3, args=(n,p))
n**2*(p**2) - n*p*(p-1)
n**3*p**3 + n**2*(3*p**2 - 3*p**3) + n*(2*p**3 - 3*p**2 + p)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_excel('Tips.xlsx')
df
df.drop(244,inplace = True)
```
**1.What is the overall average tip?**
```
df['tip'].mean()
```
**2.Get a numerical summary for 'tip' - are the median and mean very different? What does this tell you about the field?**
```
df['tip'].describe()
```
There is no large different between mean and median so data is not skewed.
**3.Prepare a boxplot for 'tip', are there any outliers?**
```
sns.boxplot(df['tip'])
plt.show()
```
Yes,there are outiers after six.
**4.Prepare a boxplot for 'total_bill', are there any outliers?**
```
sns.boxplot(df['total_bill'])
plt.show()
```
Yes,there are outliers after 40.
**5.Gender: what is the percent of females in the data?**
```
df.sex.value_counts(normalize = True)
```
percent of female in the data is 35.655
**6.Prepare a bar plot with the bars representing the percentage of records for each gender.**
```
df.sex.value_counts(normalize = True).plot.bar()
plt.show()
```
**7.Does the average tip differ by gender? Does one gender tip more than the other?**
```
df.groupby('sex')['tip'].mean().plot.bar()
plt.plot()
```
yes,male gives slightly more tips than the female .
**8.Does the average tip differ by the time of day?**
```
df.groupby('time')['tip'].mean()
```
yes, people in dinner gives more tip than the people in lunch.
**9.Does the average tip differ by size (number of people at the table)?**
```
df.groupby('size')['tip'].mean()
```
yes,more number of people increase in the tips.
**10.Do smokers tip more than non-smokers?**
```
df.groupby('smoker')['tip'].sum()
```
No,non-smoker gives more tips than the smoker.
**11.Gender vs. smoker/non-smoker and tip size - create a 2 by 2 and get the average tip size. Which group tips the most?**
```
df.groupby(['sex','smoker'])['size'].mean().plot.bar()
plt.show()
```
male who are non-smoker give more tip than others
**12.Create a new metric called 'pct_tip' = tip/ total_bill - this would be percent tip give, and should be a better measure of the tipping behaviour.**
```
df['pct_tip'] = df['tip'] / df['total_bill'] #tips.insert(2,"pct_tip",tips.tip/tips.total_bill)
df
```
**13.Does pct_tip differ by gender? Does one gender tip more than the other?**
```
df.groupby('sex')['pct_tip'].sum()
yes,it differs male give more than the women.
```
**14.Does pct_tip differ by size (number of people at the table)?**
```
df.groupby('size')['pct_tip'].sum()
```
yes,it differs table with size 2 has given more pct_tip.
**15.Make the gender vs. smoker view using pct_tip - does your inference change?**
```
df.groupby(['sex','smoker'])['pct_tip'].sum().plot.bar()
plt.show()
```
No,there is no change in our inference
**16.Make a scatter plot of total_bill vs. tip.**
```
plt.scatter(x= df.total_bill, y = df.tip)
plt.xlabel('total_bill')
plt.ylabel('tip')
plt.show()
```
**17.Make a scatter plot of total_bill vs. pct_tip.**
```
plt.scatter(x= df.total_bill, y = df.pct_tip)
plt.xlabel('total_bill')
plt.ylabel('pct_tip')
plt.show()
```
| github_jupyter |
# [NTDS'19] assignment 2: learning with graphs — solution
[ntds'19]: https://github.com/mdeff/ntds_2019
[Clément Vignac](https://people.epfl.ch/clement.vignac), [EPFL LTS4](https://lts4.epfl.ch) and
[Guillermo Ortiz Jiménez](https://gortizji.github.io), [EPFL LTS4](https://lts4.epfl.ch).
## Students
* Team: `<your team number>`
* Students: `<your name`> (for the indivudual submission) or `<the name of all students in the team>` (for the team submission)
## Rules
Grading:
* The first deadline is for individual submissions. The second deadline is for the team submission.
* All team members will receive the same grade based on the team solution submitted on the second deadline.
* As a fallback, a team can ask for individual grading. In that case, solutions submitted on the first deadline are graded.
* Collaboration between team members is encouraged. No collaboration between teams is allowed.
Submission:
* Textual answers shall be short. Typically one to two sentences.
* Code has to be clean.
* You cannot import any other library than we imported.
* When submitting, the notebook is executed and the results are stored. I.e., if you open the notebook again it should show numerical results and plots. We won't be able to execute your notebooks.
* The notebook is re-executed from a blank state before submission. That is to be sure it is reproducible. You can click "Kernel" then "Restart Kernel and Run All Cells" in Jupyter.
## Objective
In this assignment you will experiment with the main concepts of spectral graph theory, as well as familizarize yourself with the main data science techniques for network data.
The assignment is made of three parts:
1. [Spectral Graph Theory](#sgt)
1. [Regularization on graphs with Graph Signal Processing](#gsp)
1. [Machine Learning on Graphs](#ml)
<a id='sgt'></a>
## Part I: Spectral Graph Theory
### Eigenvectors and eigenvalues
We will start by reviewing some of the main concepts in spectral graph theory and see some of its applications to dimensionality reduction and data clustering. To illustrate the main concepts we will use the standard two moon dataset.
```
import numpy as np
from scipy.spatial.distance import pdist, squareform
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
from pygsp.graphs import TwoMoons
G = TwoMoons(moontype='synthesized', N=2000)
X = G.coords
Y = G.labels.astype(int)
plt.scatter(X[:, 0], X[:, 1], c=Y)
plt.show()
```
#### Question 1: Graph construction
Build a similarity graph using the euclidean distance between data points.
**Note:** Use an RBF kernel to set the edge weights $w_{ij}=\exp(-||x_i- x_j||_2^2 / ~ 2 \sigma^2)$ of your adjacency and threshold the ones with the smallest magnitude.
```
def epsilon_similarity_graph(X: np.ndarray, sigma=1, epsilon=0):
""" X (n x d): coordinates of the n data points in R^d.
sigma (float): width of the kernel
epsilon (float): threshold
Return:
adjacency (n x n ndarray): adjacency matrix of the graph.
"""
dist = squareform(pdist(X))
adjacency = np.exp(- dist ** 2 / (2 * sigma ** 2))
adjacency[adjacency < epsilon] = 0
np.fill_diagonal(adjacency, 0)
return adjacency
adjacency = epsilon_similarity_graph(X, sigma=0.5, epsilon=0.1)
plt.spy(adjacency)
plt.show()
```
How do you choose `sigma`?
**`sigma` reflects a typical (spatial) distance between the points. We want the graph to be connected but we also want it to have two clusters that would correspond to our data. One possible good strategy would be to start with average distance and then reduce it up to the point where we start seeing two separated clusters.**
How do you choose the threshold `epsilon`?
**`epsilon` is a sparsity parameter. Epsilon should be reasonably low to keep the weights that have a meaningful distribution (it should be wide enough or simply diverse in terms of values). A good strategy for choosing `epsilon` would be to plot the distribution of the weights and tune it accordingly. In this case, the value of `epsilon` should around `0.7`.**
#### Question 2: Laplacian
Build the combinatorial and normalized graph laplacians for this dataset.
```
def compute_laplacian(adjacency: np.ndarray, normalize: bool):
""" Return:
L (n x n ndarray): combinatorial or symmetric normalized Laplacian.
"""
D = np.diag(np.sum(adjacency, 1)) # Degree matrix
combinatorial = D - adjacency
if normalize:
D_norm = np.diag(np.clip(np.sum(adjacency, 1), 1, None)**(-1/2))
return D_norm @ combinatorial @ D_norm
else:
return combinatorial
laplacian_comb = compute_laplacian(adjacency, normalize=False)
laplacian_norm = compute_laplacian(adjacency, normalize=True)
```
#### Question 3: Eigendecomposition
For both Laplacian matrices, compute the eigendecomposition $L = U^\top \Lambda U$, where the columns $u_k \in \mathbb{R}^N$ of $U = [u_1, \dots, u_N] \in \mathbb{R}^{N \times N}$ are the eigenvectors and the diagonal elements $\lambda_k = \Lambda_{kk}$ are the corresponding eigenvalues. Make sure that the eigenvalues are ordered, i.e., $\lambda_1 \leq \lambda_2 \leq \dots \leq \lambda_N$.
Justify your choice of a solver for the eigendecomposition.
**We need a solver that works with real symmetric matrices. Also, we want the values to be sorted. `np.linalg.eigh` is a good choice since it satisfies both conditions.**
```
def spectral_decomposition(laplacian: np.ndarray):
""" Return:
lamb (np.array): eigenvalues of the Laplacian
U (np.ndarray): corresponding eigenvectors.
"""
return np.linalg.eigh(laplacian)
lamb_comb, U_comb = spectral_decomposition(laplacian_comb)
lamb_norm, U_norm = spectral_decomposition(laplacian_norm)
```
#### Question 4: Interpretation
We plot the sorted eigenvalues as a function of their index:
```
plt.figure(figsize=(12,5))
plt.subplot(121)
plt.plot(lamb_comb)
plt.xlabel('Index')
plt.ylabel('Eigenvalue')
plt.title('Eigenvalues $L_{comb}$')
plt.subplot(122)
plt.plot(lamb_norm)
plt.xlabel('Index')
plt.ylabel('Eigenvalue')
plt.title('Eigenvalues $L_{norm}$')
plt.show()
```
What is the lowest eigenvalue $\lambda_0$ and the corresponding eigenvector $u_0$? Answer for both Laplacian matrices.
**For both, combinatorial and normalized, Laplacian matrices, the lowest eigenvalues $\lambda_0$ are 0 (technically, they are not exactly 0 due to a numerical error).**
**Here is a good detailed answer regarding eigenvectors (team 1):**
- <b>The eigenvector $u_{0}$ for the combinatorial laplacian:</b><br>
by the eigenvalue equation we have: $$L u_{0} = \lambda_{0} u_{0}$$
since $\lambda_{0}=0$, then: $$L u_{0} = 0$$
multiply by $u_{0}^{T}$ we get: $$u_{0}^{T} L u_{0} = u_{0}^{T} * 0 = 0$$
but the quadratic form of the combinatorial laplacian is given by: $$u_{0}^{T} L u_{0} = \sum_{(i, j) \in E} w_{i,j}(u_{0}[i] - u_{0}[j])^2$$
hence: $$\sum_{(i, j) \in E} w_{i,j}(u_{0}[i] - u_{0}[j])^2 = 0$$
for this to hold, $u_{0}[i] = u_{0}[j]$ for every edge $(i, j) \in E$. Then, $$u_{0} = c \begin{bmatrix} 1 \\ 1 \\ \vdots \\ 1 \end{bmatrix}$$
Therefore, the value of $u_{0}$ is the unit vector $e$.
- <b>The eigenvector $u_{0}^{'}$ for the normalized laplacian:</b><br>
if we follow the same argument as before, we have that $$(u_{0}^{'})^{T} L_{n} u_{0}^{'} = 0$$
since $$L_n = D^{-\frac{1}{2}} L D^{-\frac{1}{2}}$$
we get: $$(u_{0}^{'})^{T} D^{-\frac{1}{2}} L D^{-\frac{1}{2}} u_{0}^{'} = 0$$
as we shown this yields that $D^{-\frac{1}{2}} u_{0}^{'}$ is a unit vector $e$.<br>
Therefore, $u_{0}^{'} = D^{\frac{1}{2}} e$
When filtering a signal or computing polynomials, which Laplacian provides the best numerical stability? Justify your answer.
**Normalized. Eigenvalues of normalized Laplacian are bounded between 0 and 2 while eigenvalues of combinatorial Laplacian are unbounded and have values proportional to the size of a graph which might make further computations numerically unstable.**
#### Question 5: Connected components
The eigendecomposition provides an easy way to compute the number of connected components in the graph. Fill the following function:
```
def compute_number_connected_components(lamb: np.array, threshold: float):
""" lamb: array of eigenvalues of a Laplacian
Return:
n_components (int): number of connected components.
"""
return np.count_nonzero(lamb <= threshold)
```
Tune the parameters $\epsilon$ and $\sigma$ of the similarity graph so that the graph is connected. Otherwise, clustering would be too simple!
```
print(compute_number_connected_components(lamb_norm, threshold=1e-12))
```
### Spectral clustering
Let us now see one application of spectral graph theory to clustering the two moon dataset.
#### Question 6: Baseline
As a baseline, let us first see how the simplest clustering algorithm, K-means, performs on this dataset. Use K-means to assign a cluster to each point.
```
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2)
y_pred = kmeans.fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.show()
```
K-means cannot find a good solution to this problem. Why?
**K-means expects clusters that are convex and isotropic (i.e. roughly ball-shaped) and therefore performs poorly with the elongated shapes present in the dataset.**
#### Question 7: Spectral clustering
As opposed to naive K-means, spectral clustering doesn't operate on the input space but on the eigenspace of the graph that represents the data. Implement spectral clustering. You can use
[this tutorial](http://lasa.epfl.ch/teaching/lectures/ML_Phd/Notes/tutoSC.pdf).
```
class SpectralClustering():
def __init__(self, n_classes: int, normalize: bool):
self.n_classes = n_classes
self.normalize = normalize
self.laplacian = None
self.e = None
self.U = None
self.clustering_method = KMeans(n_classes)
def fit_predict(self, adjacency):
""" Your code should be correct both for the combinatorial
and the symmetric normalized spectral clustering.
Return:
y_pred (np.ndarray): cluster assignments.
"""
self.laplacian = compute_laplacian(adjacency, self.normalize)
self.e, self.U = spectral_decomposition(self.laplacian)
n_connected = compute_number_connected_components(self.e, threshold=1e-12)
first_columns = self.U[:, :self.n_classes]
if self.normalize:
first_columns = first_columns / np.linalg.norm(first_columns, axis=1)[:, None]
y_pred = self.clustering_method.fit_predict(first_columns)
return y_pred
print("Connected components:", compute_number_connected_components(lamb_norm, threshold=1e-12))
spectral_clustering = SpectralClustering(n_classes=2, normalize=True)
y_pred = spectral_clustering.fit_predict(adjacency)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.show()
```
#### Question 8: On your dataset
Can you think of another 2D dataset in which k-means would badly perform, but spectral clustering would not?
Construct it!
For this question you can import any dataset of your choice, for example from `sklearn.datasets` or `pygsp.graphs`, but you can also get creative and define something of your own. First, create and plot the dataset.
```
# borrowed from team #05 submission
def Smiley(N = 2000):
""" return a 2D dataset representing a smiley with 4 classes (head, eyes, mouth) """
# Head
length = np.random.uniform(1.75, 2, size=int(2*N/5))
angle = np.pi * np.random.uniform(0, 2, size=int(2*N/5))
X_head = np.stack([length * np.cos(angle), length * np.sin(angle)], axis=1)
Y_head = np.ones(X_head.shape[0])
# eye 1
length = np.random.uniform(0.1, 0.4, size=int(N/5))
angle = np.pi * np.random.uniform(0, 2, size=int(N/5))
X_eye1 = np.stack([-0.75 +length * np.cos(angle), 0.75 +length * np.sin(angle)], axis=1)
Y_eye1 = 2*np.ones(X_eye1.shape[0])
# eye 2
length = np.random.uniform(0.1, 0.4, size=int(N/5))
angle = np.pi * np.random.uniform(0, 2, size=int(N/5))
X_eye2 = np.stack([0.75 +length * np.cos(angle), 0.75 +length * np.sin(angle)], axis=1)
Y_eye2 = 3*np.ones(X_eye2.shape[0])
# mouth
length = np.random.uniform(1, 1.25, size=int(N/5))
angle = np.pi * np.random.uniform(-0.15, -0.85, size=int(N/5))
X_mouth = np.stack([length * np.cos(angle), -0.2 +length * np.sin(angle)], axis=1)
Y_mouth = 4*np.ones(X_mouth.shape[0])
X = np.concatenate([X_head, X_eye1, X_eye2, X_mouth], axis=0)
Y = np.concatenate([Y_head, Y_eye1, Y_eye2, Y_mouth], axis=0)
return X, Y
X_s, Y_s = Smiley(1000)
fig, ax = plt.subplots(1,1,figsize=(5,5))
ax.scatter(X_s[:, 0], X_s[:, 1], c=Y_s, cmap='coolwarm')
ax.set_title('Smiley dataset (4 cluster)')
plt.show()
```
Run K-means:
```
kmeans = KMeans(n_clusters=4)
y_pred_s = kmeans.fit_predict(X_s)
fig, ax = plt.subplots(1,1,figsize=(5,5))
plt.scatter(X_s[:, 0], X_s[:, 1], c=y_pred_s, cmap='coolwarm')
plt.show()
```
Create the similarity graph, and run spectral clustering with both the combinatorial and normalized Laplacian matrices:
```
adjacency_s = epsilon_similarity_graph(X_s, sigma=0.3, epsilon=0.5)
lamb_norm_s, _ = spectral_decomposition(compute_laplacian(adjacency_s, normalize=True))
print("Connected components:", compute_number_connected_components(lamb_norm_s, threshold=1e-12))
# normalized
spectral_clustering_n = SpectralClustering(n_classes=4, normalize=True)
y_pred_s_norm = spectral_clustering.fit_predict(adjacency_s)
# non normalized
spectral_clustering = SpectralClustering(n_classes=4, normalize=False)
y_pred_s = spectral_clustering.fit_predict(adjacency_s)
fig, ax = plt.subplots(1,2,figsize=(12,5))
ax[0].scatter(X_s[:, 0], X_s[:, 1], c=y_pred_s, cmap='coolwarm')
ax[0].set_title('Spectral clustering - non normalized')
ax[1].scatter(X_s[:, 0], X_s[:, 1], c=y_pred_s_norm, cmap='coolwarm')
ax[1].set_title('Spectral clustering - normalized')
plt.show()
```
**K-means also performs poorly on this dataset because of the non-convex/non-spherical clusters it contains. Spectral clustering works well regardless of normalization, provided the graph is constructed with the appropriate parameters $\sigma$ and $\epsilon$. The choice of those parameters is (again) crucial for spectral clustering to work as expected.**
### Dimensionality Reduction with Laplacian Eigenmaps
Most datasets are very high-dimensional, which means it can be very hard to understand their geometry. Fortunately, there exists multiple techniques that can help us to reduce the dimensionality of the data, and allow us to visualize it.
In this part of the assignment we will use MNIST to compare these techniques. Indeed, without dimensionality reduction it would be very difficult to answer questions like: are the different digits clustered together in different areas of space?
But first, let's load our dataset:
```
from utils import load_mnist
X_mnist, y_mnist = load_mnist()
classes = np.unique(y_mnist)
```
#### Question 9: Laplacian eigenmaps
Most dimensionality reduction algorithms are constructed such that some property of the dataset remains invariant in the lower dimensional representation. Before implementing laplacian eigenmaps, can you say what property of the data does this algorithm preserve?
Solution:
**Laplacian eigenmaps make the assumption that observations low on a low-dimensional possibly non linear manifold. They aim at preserving proximity of points on the manifold.**
Implement a function that uses Laplacian eigenmaps to do dimensionality reduction.
Solution (from team 3):
```
def laplacian_eigenmaps(X:np.ndarray, dim: int, sigma: float, epsilon: float, normalize: bool):
""" Return:
coords (n x dim array): new coordinates for the data points."""
adjacency = epsilon_similarity_graph(X, sigma, epsilon)
laplacian = compute_laplacian(adjacency, normalize)
lamb, U = spectral_decomposition(laplacian)
# number of connected components = number of zero eigenvalues,
# zero eigenvalues are associated with constant vectors
n_CC = compute_number_connected_components(lamb, threshold=1e-12)
# only take columns associated with non-zero eigenvalues
cols_to_take = range(n_CC, n_CC + dim + 1)
coords = U[:, cols_to_take]
return coords
```
Use this function to visualize MNIST in 2D. Feel free to play with the different parameters.
```
dim = 2
sigma = 2e3
epsilon = 0
normalize = True
X_2d = laplacian_eigenmaps(X_mnist, dim, sigma, epsilon, normalize)
for i in classes:
mask = y_mnist == i
plt.scatter(X_2d[mask, 0], X_2d[mask, 1], label=i)
plt.legend()
plt.title("Visualization of MNIST in 2d using Laplacian eigenmaps")
plt.show()
```
Visualize MNIST in 3D:
```
dim = 3
sigma = 2e3
epsilon = 0
normalize = True
X_3d = laplacian_eigenmaps(X_mnist, dim, sigma, epsilon, normalize)
fig = plt.figure()
ax = Axes3D(fig)
for i in classes:
mask = y_mnist == i
ax.scatter(X_3d[mask, 0], X_3d[mask, 1], X_3d[mask, 2], label=i)
plt.legend()
plt.title("Visualization of MNIST in 3d using Laplacian eigenmaps")
plt.show()
```
#### Question 10: Comparison with other methods
We provide the visualization of MNIST with other methods:
```
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, Isomap
# This cell can take a few minutes to run
run_this_cell = True
if run_this_cell:
# In 2d
embeddings = [PCA(n_components=2, copy=True, whiten=True, tol=1e-5),
Isomap(n_components=2, n_neighbors=5),
TSNE(n_components=2)]
for embedding in embeddings:
X_embedded = embedding.fit_transform(X_mnist)
fig = plt.figure()
for i in classes:
mask = y_mnist == i
plt.scatter(X_embedded[mask, 0], X_embedded[mask, 1], label=i)
plt.legend()
plt.title('Embedding method: '+ type(embedding).__name__)
plt.show()
# In 3d
embeddings = [PCA(n_components=3, copy=True, whiten=True, tol=1e-5),
Isomap(n_components=3, n_neighbors=5),
TSNE(n_components=3)]
for embedding in embeddings:
X_embedded = embedding.fit_transform(X_mnist)
fig = plt.figure()
ax = Axes3D(fig)
for i in classes:
mask = y_mnist == i
ax.scatter(X_embedded[mask, 0], X_embedded[mask, 1], X_embedded[mask, 2], label=i)
ax.legend()
ax.title.set_text('Embedding method: '+ type(embedding).__name__)
plt.show()
```
In a few words, what are the principles guiding the design of each method? Compare their results.
Solution (from team 3):
**PCA is a linear method that uses the $k$ largest singular values from the singular value decomposition (SVD) of the data matrix. These axes form the best linear subspace of dimension $k$ because the variance of the orthogonal projection of the data points is maximal on it.
It is the method of choice if the data can be summarized as _linear_ combinations of features.**
**Isomap is a non-linear method that starts with the conversion of the data matrix to a graph, then the shortest path matrix is computed, and the linear method _Multi-Dimensional Scaling_ method is applied on the shortest path matrix. The advantages of Isomap are that it is able to discover manifolds of arbitrary dimensionality and it is guaranteed to converge to the global optimal solution.
We can see that it produces a better separation of the classes than PCA on the MNIST dataset.**
**T-SNE is yet another non-linear method, which tries to circumvent the _crowding problem_, i.e. when a lot of data points are constrained in a small part of space. It does this by minimizing the divergence of distributions of the data points transformed into conditional probabilities and a measure of similarity between the new points in the sub-space $\mathbb{R}^k$. It usually produces better visualizations than the other methods thanks to its crowding-circumvention property, and we can see that it is the case here.**
<a id='gsp'></a>
## Part II: Regularization on graphs with Graph Signal Processing
In this part of the assignment we are going to familiarize ourselves with the main concepts in Graph Signal Processing and regularization on graphs in general. From now on, you can only use the following libraries as well as the functions that you implemented in the previous parts.
```
import pandas as pd
import numpy as np
from pygsp.graphs import Bunny
```
In this exercise we will use a nearest-neighbor graph constructed from the Stanford Bunny point cloud included in the PyGSP library.
```
G = Bunny()
adjacency = np.asarray(G.W.todense())
n_nodes = adjacency.shape[0]
```
We will use the following function to plot our signals on this graph.
```
def plot_bunny(x=None, title='', vlim=[-0.03, 0.03]):
fig = plt.gcf()
ax = plt.gca()
if not isinstance(ax, Axes3D):
ax = plt.subplot(111, projection='3d')
if x is not None:
x = np.squeeze(x)
p = ax.scatter(G.coords[:,0], G.coords[:,1], G.coords[:,2], c=x, marker='o',
s=5, cmap='RdBu_r', vmin=vlim[0], vmax=vlim[1])
ax.view_init(elev=-90, azim=90)
ax.dist = 7
ax.set_axis_off()
ax.set_title(title)
if x is not None:
fig.colorbar(p)
plt.subplot(111, projection='3d')
plot_bunny()
```
#### Question 11: Graph frequencies
Let us start by constructing the normalized graph laplacians from the adjacency matrix and find its spectral decomposition.
```
laplacian = compute_laplacian(adjacency, normalize=True)
lam, U = spectral_decomposition(laplacian)
```
Plot the eigenvalues.
```
plt.figure(figsize=(6, 5))
plt.plot(lam)
plt.title('Eigenvalues $L_{norm}$')
plt.show()
```
To make things more clear we will plot some of its eigenvectors (0, 1, 3, 10, 100) as signals on the bunny graph.
```
plt.figure(figsize=(18, 9))
plt.subplot(231, projection='3d')
plot_bunny(x=U[:,0], title='Eigenvector #0')
plt.subplot(232, projection='3d')
plot_bunny(x=U[:,1], title='Eigenvector #1')
plt.subplot(233, projection='3d')
plot_bunny(x=U[:,2], title='Eigenvector #2')
plt.subplot(234, projection='3d')
plot_bunny(x=U[:,3], title='Eigenvector #3')
plt.subplot(235, projection='3d')
plot_bunny(x=U[:,10], title='Eigenvector #10')
plt.subplot(236, projection='3d')
plot_bunny(x=U[:,100], title='Eigenvector #100')
```
What can you say in terms of the variation (smoothness) of these signals? How can the smoothness of a signal be measured?
Solution:
**These signals become less and less smooth as the corresponding eigenvalue increases. In general, the inverse of the smoothness of a signal on a graph can be measured by the quadratic form of the Laplacian $x^T L X = \sum_{(i, j) \in \mathcal E} w_{i, j} \|x_i - x_j \|^ 2$. This quantity can also be seen as the square norm of the graph gradient.**
#### Question 12: Graph Fourier Transform
Create a function to compute the Graph Fourier Transform (GFT) of a graph signal and its inverse.
**Note**: You can assume that you have internal access to the eigendecomposition (`U` and `lam`) of the laplacian.
```
def GFT(signal: np.ndarray):
return U.T @ signal
def iGFT(fourier_coefficients: np.ndarray):
return U @ fourier_coefficients
```
Now, let's create a graph signal:
```
x = G.coords[:, 0] + G.coords[:, 1] + 3 * G.coords[:, 2]
x /= np.linalg.norm(x)
noise = np.random.randn(n_nodes)
noise /= np.linalg.norm(noise)
x_noisy = x + 0.3*noise
plot_bunny(x_noisy, vlim=[min(x_noisy), max(x_noisy)])
```
and plot its graph spectrum:
```
plt.figure(figsize=(10, 6))
plt.plot(lam, np.abs(GFT(x_noisy)), 'r.')
plt.plot(lam, np.abs(GFT(x)), 'g-')
plt.xlabel('$\lambda$')
plt.ylabel('GFT')
plt.legend(['$x_{noisy}$', '$x$'])
```
#### Question 13: Graph filters
We will try to extract the signal from the noise using graph filters. Let us start by creating three ideal graph filters.
```
ideal_lp = np.ones((n_nodes,))
ideal_bp = np.ones((n_nodes,))
ideal_hp = np.ones((n_nodes,))
ideal_lp[lam >= 0.1] = 0 # Low-pass filter with cut-off at lambda=0.1
ideal_bp[lam < 0.1] = 0 # Band-pass filter with cut-offs at lambda=0.1 and lambda=0.5
ideal_bp[lam > 0.5] = 0
ideal_hp[lam <= 1] = 0 # High-pass filter with cut-off at lambda=1
```
Additionally, create the ideal graph filter that implements the solution of Tikhonov regularization.
```
alpha = 0.99 / np.max(lam)
ideal_tk = np.ones((n_nodes,))
ideal_tk = 1 / (1 + alpha*lam)
```
Let's plot the spectral responses:
```
plt.plot(lam, ideal_lp, '-', label='LP')
plt.plot(lam, ideal_bp, '-', label='BP')
plt.plot(lam, ideal_hp, '-', label='HP')
plt.plot(lam, ideal_tk, '-', label='Tikhonov')
plt.xlabel('$\lambda$')
plt.ylabel('Spectral response')
plt.legend(loc='lower right')
```
Create a function to filter a signal given an ideal graph filter
```
def ideal_graph_filter(x: np.ndarray, spectral_response: np.ndarray):
"""Return a filtered signal."""
x_gft = GFT(x)
filter_gft = x_gft * spectral_response
return iGFT(filter_gft)
```
Let us visualize the results:
```
x_lp = ideal_graph_filter(x_noisy,ideal_lp)
x_bp = ideal_graph_filter(x_noisy,ideal_bp)
x_hp = ideal_graph_filter(x_noisy,ideal_hp)
x_tk = ideal_graph_filter(x_noisy,ideal_tk)
plt.figure(figsize=(18, 9))
plt.subplot(231, projection='3d')
plot_bunny(x=x, title='signal (true)', vlim=[min(x), max(x)])
plt.subplot(232, projection='3d')
plot_bunny(x=x_noisy, title='signal (noisy)', vlim=[min(x), max(x)])
plt.subplot(233, projection='3d')
plot_bunny(x=x_lp, title='Low-pass', vlim=[min(x_lp), max(x_lp)])
plt.subplot(234, projection='3d')
plot_bunny(x=x_bp, title='Band-pass', vlim=[min(x_bp), max(x_bp)])
plt.subplot(235, projection='3d')
plot_bunny(x=x_hp, title='High-pass', vlim=[min(x_hp), max(x_hp)])
plt.subplot(236, projection='3d')
plot_bunny(x=x_tk, title='Tikhonov denoised signal', vlim=[min(x_tk), max(x_tk)])
```
How would you link to the observations you made before about the spectral decomposition of the laplacian?
Also, judging from the results, what type of model prior do you think Tikhonov regularization enforces?
Solution: **Graph filtering as an operation that scales the coordinates of a graph signal in the basis given by the spectral decomposition of the laplacian. In this sense, a low pass filter only preserves the components associated with the smallest eigenvalues (and hence it smoothens the signal), a high pass filter preserves the components associated with the largest eignevalues (and hence it produces signals with rapid spatial variations), and a band pass filter preserves the components in between (and produces a mildly smooth signal).**
**Looking at the spectral response of the Tikhonov filter we see that it weights down the components associated with large eigenvalues, and preserves the low frequencies. We thus say that this is a low pass filter.**
#### Question 14: Polynomial graph filters
We have seen how we can use the GFT to define different filters that enhance or reduce certain frequency bands. However, to do so, we require an explicit eigendecomposition of the graph laplacian, which has a cost $O(n^3)$. For very large graphs this is very intense computationally. We will now see how we can obtain similar results by filtering the signals directly without resorting to an eigendecomposition.
The key idea is to use a polynomial of the graph laplacian to define a graph filter, i.e., $g(L)x=\sum_{k=1}^K \alpha_k L^k x$, and use the fact that the powers of a diagonalizable matrix can be written in terms of powers of its eigenvalues. This is
$$
L^k=(U\Lambda U^T)^k=U\Lambda^k U^T = U\begin{bmatrix}
(\lambda_0)^k &\dots & 0\\
\vdots & \ddots & \vdots\\
0 & \dots & (\lambda_N)^k
\end{bmatrix} U^T.
$$
This means that a polynomial of the graph laplacian acts independently on each eigenvalue of the graph, and has a frequency spectrum of
$$g(\lambda)=\sum_{k=1}^K \alpha_k \lambda^k.$$
Hence,
$$g(L)x=\sum_{k=1}^K \alpha_k L^k x=\sum_{k=1}^K \alpha_k U\Lambda^k U^T x=U \left(\sum_{k=1}^K \alpha_k\Lambda^k \right)U^T x=\operatorname{iGFT}\left(g(\Lambda)\operatorname{GFT}(x)\right).$$
With these ingredients, we have reduced the design of graph filters in the vertex domain to a regression task that approximates a given spectral response by a polynomial. There are multiple ways to do this, but in this assignment we will implement a very simple strategy based on [least-squares regression](https://en.wikipedia.org/wiki/Polynomial_regression#Matrix_form_and_calculation_of_estimates).
Implement a function to find the coefficients of a polynomial that approximates a given ideal filter.
**Hint:** `np.vander` and `np.linalg.lstsq`.
```
def fit_polynomial(lam: np.ndarray, order: int, spectral_response: np.ndarray):
""" Return an array of polynomial coefficients of length 'order'."""
A = np.vander(lam, order, increasing=True)
coeff = np.linalg.lstsq(A, spectral_response, rcond=None)[0]
return coeff
```
Implement a function to compute the frequency response of that filter.
```
def polynomial_graph_filter_response(coeff: np.array, lam: np.ndarray):
""" Return an array of the same shape as lam.
response[i] is the spectral response at frequency lam[i]. """
response = np.zeros_like(lam)
for n, c in enumerate(coeff):
response += c * (lam**n)
return response
```
Let us fit the Tikhonov ideal filter with several polynomials of different order.
```
plt.plot(lam, ideal_tk)
orders = [1, 2, 3, 5, 10, 20]
for order in orders:
coeff_tk = fit_polynomial(lam, order, ideal_tk)
plt.plot(lam, polynomial_graph_filter_response(coeff_tk, lam))
plt.xlabel('$\lambda$')
plt.ylabel('Spectral response')
plt.legend(orders)
```
So far, we have only defined a way to compute the coefficients of our laplacian polynomial. Let us now compute our graph filter.
```
def polynomial_graph_filter(coeff: np.array, laplacian: np.ndarray):
""" Return the laplacian polynomial with coefficients 'coeff'. """
power = np.eye(laplacian.shape[0])
filt = coeff[0] * power
for n, c in enumerate(coeff[1:]):
power = laplacian @ power
filt += c * power
return filt
```
Based on the previous plot, choose a filter order that achieves (in your opinion) a good tradeoff in terms of computational complexity and response accuracy.
```
order = 3
coeff_tk = fit_polynomial(lam, order, ideal_tk)
g_tk = polynomial_graph_filter(coeff_tk, laplacian)
```
#### Question 15: ARMA filter
As you have seen in class, polynomial graph filters are only one of the ways in which you can approximate ideal graph filters.
In this sense, ARMA filters are a natural way to implement Tikhonov denoising on graphs.
Let us recall the general solution of the Tikhonov regularized denoising problem
$$y=(I+\alpha L)^{-1}x. $$
With a little bit of algebra manipulation we can rewrite this expression as
$$
y = -\alpha L y + x,
$$
from which we can derive the iterative algorithm
$$
y_k = -\alpha L y_{k-1} + x\qquad k=1,2,\dots
$$
which is guaranteed to converge as long as $\alpha \lambda_{max} < 1$.
Implement the ARMA version of Tikhonov regularization.
```
def arma_tikhonov(x: np.ndarray, laplacian: np.ndarray, alpha: float, max_iter=50):
""" Return an array of the same shape as x."""
y = x
for k in range(max_iter):
y = - alpha * laplacian @ y + x
return y
```
Filter the previous noisy graph signal with the polynomial and ARMA approximations of the ideal Tikhonov filter.
```
x_tk_polynomial = g_tk @ x_noisy
x_tk_arma = arma_tikhonov(x_noisy, laplacian, alpha)
```
Let us compare with the previous version.
```
plt.figure(figsize=(18, 4))
plt.subplot(131, projection='3d')
plot_bunny(x_tk, title='Ideal filter', vlim=[min(x_tk), max(x_tk)])
plt.subplot(132, projection='3d')
plot_bunny(x_tk_polynomial, title='Polynomial filter', vlim=[min(x_tk), max(x_tk)])
plt.subplot(133, projection='3d')
plot_bunny(x_tk_arma, title='ARMA filter', vlim=[min(x_tk), max(x_tk)])
```
<a id='ml'></a>
## Part III: Machine Learning on Graphs
So far, we have only played with toy examples. Let us see the use of these tools in practice! In particular, let us see how we can use some graph filters to construct features to feed a classifier. For this part of the assignment we will import some extra packages.
```
import time
import networkx as nx
from sklearn.linear_model import LogisticRegression
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl import DGLGraph
from dgl.data.citation_graph import load_cora
np.random.seed(0)
torch.manual_seed(1)
```
We will use the CORA dataset and the citation graph that we created in Assignment 1. However, to simplify the next tasks we will directly use the preprocessed version of this dataset contained within the Deep Graph Library (DGL).
In this assignment, we will interpret CORA's features as multidimensional graph signals living on the citation graph.
Our task is to design a classifier that uses these features and the geometry of the graph can identify the type of paper each node represents.
The goal of this exercise is to do semi-supervised learning on graphs.
We assume that we know to which scientific field a small subset of the papers belongs (the ones contained in `train_mask`).
The goal is to predict to which field the other papers belong, using both the citation graph and the bag-of-word representation of each paper.
```
cora = load_cora()
features = torch.FloatTensor(cora.features) # Feature vector for each paper
labels = torch.LongTensor(cora.labels) # The field to which each paper belongs
train_mask = torch.BoolTensor(cora.train_mask) # Mask of nodes selected for training
val_mask = torch.BoolTensor(cora.val_mask) # Mask of nodes selected for validation
test_mask = torch.BoolTensor(cora.test_mask) # Mask of nodes selected for testing
in_feats = features.shape[1]
n_classes = cora.num_labels
n_edges = cora.graph.number_of_edges()
graph = cora.graph
adjacency = np.asarray(nx.to_numpy_matrix(graph))
n_nodes = adjacency.shape[0]
```
For this exercise we will use the normalized laplacian.
```
laplacian = compute_laplacian(adjacency, normalize=True)
lam, U = spectral_decomposition(laplacian)
lam_max = np.max(lam)
```
#### Question 16: Logistic regression
The simplest classification method consists in ignoring the citation graph and trying to classify the papers using only the features.
In this case, the problem is viewed as a standard classification task.
To train our classifier we will select a few nodes in our graph for training and fit a [logistic regression classifier](https://en.wikipedia.org/wiki/Logistic_regression) on them.
To avoid overfitting to the test set when we do hyperparameter tuning, we will also select a validation set.
And finally, we will test our classifier on the rest of the nodes.
**Hint:** use `sklearn.linear_model.LogisticRegression`.
```
train_features = features[train_mask]
train_labels = labels[train_mask]
val_features = features[val_mask]
val_labels = labels[val_mask]
test_features = features[test_mask]
test_labels = labels[test_mask]
log_reg = LogisticRegression(penalty='l2', multi_class="auto", solver="liblinear", C=1e4, fit_intercept=False, max_iter=1000)
log_reg.fit(train_features, train_labels)
train_acc = log_reg.score(train_features, train_labels)
val_acc = log_reg.score(val_features, val_labels)
test_acc = log_reg.score(test_features, test_labels)
print('Train accuracy {:.4f} | Validation accuracy {:.4f} | Test accuracy {:.4f}'.format(train_acc, val_acc, test_acc))
```
#### Question 17: Handcrafted graph filters
That's not a bad start! Now, let's try to improve a bit the results by taking into account the graph structure using tools from GSP. For this purpose, we will design a handcrafted filter that will be used to denoise the signal, before feeding it to a logistic regression.
However, before we start, what hypothesis can you make on the spectral properties of the denoised signal?
**We can make the assumption that papers that are connected are similar, therefore making the associated signal smooth. The denoised signal here should then be made mostly of lower frequencies, and we will use a low-pass filter to create new features.**
Based on this prior, design an ideal filter response that you believe could enhance important features of the graph.
**Note:** you just need to design one graph filter that we will apply to all features. Don't design a different filter for each feature.
**Note:** finding the right filter can be very challenging, don't worry if you can't find it. Just make sure you experiment with a few configurations and parameters.
```
alpha = 0.99 / lam_max
ideal_filter = np.ones((n_nodes,))
ideal_filter = 1 / (1 + alpha*lam)
```
Choose a filter order to approximate your filter using laplacian polynomials.
```
order = 5
coeff = fit_polynomial(lam, order, ideal_filter)
graph_filter = polynomial_graph_filter(coeff, laplacian)
```
Let's plot the frequency response of your spectral template and its polynomial approximation.
```
plt.plot(lam, ideal_filter)
plt.plot(lam, polynomial_graph_filter_response(coeff, lam))
plt.legend(['Ideal', 'Polynomial'])
plt.xlabel('$\lambda$')
plt.ylabel('Spectral response')
```
Now, let's create the new features.
```
filtered_features = graph_filter @ features.numpy()
train_features = filtered_features[train_mask,:]
train_labels = labels[train_mask]
val_features = filtered_features[val_mask,:]
val_labels = labels[val_mask]
test_features = filtered_features[test_mask,:]
test_labels = labels[test_mask]
```
Train another logistic regression classifier on the new features. Remember to play with the regularization parameters to achieve a well performing model.
```
log_reg = LogisticRegression(penalty='l2', multi_class="auto", solver="liblinear", C=1e4, fit_intercept=False, max_iter=1000)
log_reg.fit(train_features, train_labels)
```
Evaluate your model.
```
train_acc = log_reg.score(train_features, train_labels)
val_acc = log_reg.score(val_features, val_labels)
test_acc = log_reg.score(test_features, test_labels)
print('Train accuracy {:.4f} | Validation accuracy {:.4f} | Test accuracy {:.4f}'.format(train_acc, val_acc, test_acc))
```
#### Question 18: Graph convolutional networks
By now, you will probably have seen that it is challenging to find the right combination of spectral response, filter parameters and regularization method. And in most cases, this is a painstaking job. Wouldn't it be great to automate these tasks?
Fortunately, this is possible if we use the right tools! Specifically, we will see that Graph Convolutional Networks are a great framework to automatize the feature extraction method.
In this exercise, we will follow the same classification pipeline as above, but instead of hand-crafting our filter we will let `PyTorch` find the coefficients for us using gradient descent.
In this section, most of the code is already written. Try to understand it and to play with some parameters. It may be useful if you want to solve some learning task in your project.
We start by constructing a `LaplacianPolynomial` model in `DGL`. It computes the function: $f(X) = \sum_{i=1}^{k} \alpha_i L^i X \theta$ where the trainable parameters are the coefficients $\alpha_i$ and the matrix $\theta$. This function can be interpreted as a filtering of $X$ by $\sum_{i=1}^{k} \alpha_i L^i$ followed by a linear layer.
```
class LaplacianPolynomial(nn.Module):
def __init__(self,
in_feats: int,
out_feats: int,
k: int,
dropout_prob: float,
norm=True):
super().__init__()
self._in_feats = in_feats
self._out_feats = out_feats
self._k = k
self._norm = norm
# Contains the weights learned by the Laplacian polynomial
self.pol_weights = nn.Parameter(torch.Tensor(self._k + 1))
# Contains the weights learned by the logistic regression (without bias)
self.logr_weights = nn.Parameter(torch.Tensor(in_feats, out_feats))
self.dropout = nn.Dropout(p=dropout_prob)
self.reset_parameters()
def reset_parameters(self):
"""Reinitialize learnable parameters."""
torch.manual_seed(0)
torch.nn.init.xavier_uniform_(self.logr_weights, gain=0.01)
torch.nn.init.normal_(self.pol_weights, mean=0.0, std=1e-3)
def forward(self, graph, feat):
r"""Compute graph convolution.
Notes
-----
* Input shape: :math:`(N, *, \text{in_feats})` where * means any number of additional
dimensions, :math:`N` is the number of nodes.
* Output shape: :math:`(N, *, \text{out_feats})` where all but the last dimension are
the same shape as the input.
Parameters
----------
graph (DGLGraph) : The graph.
feat (torch.Tensor): The input feature
Returns
-------
(torch.Tensor) The output feature
"""
feat = self.dropout(feat)
graph = graph.local_var()
# D^(-1/2)
norm = torch.pow(graph.in_degrees().float().clamp(min=1), -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = torch.reshape(norm, shp)
# mult W first to reduce the feature size for aggregation.
feat = torch.matmul(feat, self.logr_weights)
result = self.pol_weights[0] * feat.clone()
for i in range(1, self._k + 1):
old_feat = feat.clone()
if self._norm:
feat = feat * norm
graph.ndata['h'] = feat
# Feat is not modified in place
graph.update_all(fn.copy_src(src='h', out='m'),
fn.sum(msg='m', out='h'))
if self._norm:
graph.ndata['h'] = graph.ndata['h'] * norm
feat = old_feat - graph.ndata['h']
result += self.pol_weights[i] * feat
return result
def extra_repr(self):
"""Set the extra representation of the module,
which will come into effect when printing the model.
"""
summary = 'in={_in_feats}, out={_out_feats}'
summary += ', normalization={_norm}'
return summary.format(**self.__dict__)
```
Once we have are model ready we just need to create a function that performs one step of our training loop, and another one that evaluates our model.
```
def train(model, g, features, labels, loss_fcn, train_mask, optimizer):
model.train() # Activate dropout
logits = model(g, features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
def evaluate(model, g, features, labels, mask):
model.eval() # Deactivate dropout
with torch.no_grad():
logits = model(g, features)[mask] # only compute the evaluation set
labels = labels[mask]
_, indices = torch.max(logits, dim=1)
correct = torch.sum(indices == labels)
return correct.item() * 1.0 / len(labels)
```
Choose the training parameters.
```
pol_order = 3
lr = 0.2
weight_decay = 5e-6
n_epochs = 1000
p_dropout = 0.8
```
And train the classifier end to end.
```
graph = DGLGraph(cora.graph)
model = LaplacianPolynomial(in_feats, n_classes, pol_order, p_dropout)
loss_fcn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=lr,
weight_decay=weight_decay)
dur = []
for epoch in range(n_epochs):
if epoch >= 3:
t0 = time.time()
loss = train(model, graph, features, labels, loss_fcn, train_mask, optimizer)
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, graph, features, labels, val_mask)
print("Epoch {:05d} | Time(s) {:.4f} | Train Loss {:.4f} | Val Accuracy {:.4f}". format(
epoch, np.mean(dur), loss.item(), acc))
print()
acc = evaluate(model, graph, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
```
Trained this way our GCN based on polynomials of the laplacian is a black box. Fortunately, however, the only difference between this shallow model and our previous classifier is the way we chose the filter coefficients.
Let's see what the network learned.
Print the coefficients of the learned filter.
```
coeff_gcn = model.pol_weights.detach().numpy()
print(coeff_gcn)
```
To interpret the model we can plot the frequency response of the learned filter.
```
plt.semilogy(lam, np.abs(polynomial_graph_filter_response(coeff_gcn, lam)))
plt.xlabel('$\lambda$')
plt.ylabel('Spectral response (db)')
```
#### Question 19
As we said, the whole classification pipeline of the previous exercise is identical to the one we tried before: Graph filtering + Logistic regression. The only difference lies in the way we chose the filter coefficients. First we were choosing them manually, and now, we let `PyTorch` find them for us. However, if everything is correct we should be able to use this filter to construct new hand-crafted features and train a logistic regression model that achieves good accuracy on the training set. Let's do that!
Use the learned coefficients to train a new feature extractor:
```
graph_gcn_filter = polynomial_graph_filter(coeff_gcn, laplacian)
```
Let's extract the new features by filtering the data:
```
features_gcn = graph_gcn_filter @ features.numpy()
train_features_gcn = features_gcn[train_mask,:]
train_labels = labels[train_mask]
val_features_gcn = features_gcn[val_mask,:]
val_labels = labels[val_mask]
test_features_gcn = features_gcn[test_mask,:]
test_labels = labels[test_mask]
```
Train a logistic regression on these features:
```
log_reg_gcn = LogisticRegression(penalty='l2', multi_class="auto", solver="liblinear", C=1e4, fit_intercept=False, max_iter=1000)
log_reg_gcn.fit(train_features_gcn, train_labels)
```
Finally, let's evaluate this model:
```
train_acc = log_reg_gcn.score(train_features_gcn, train_labels)
val_acc = log_reg_gcn.score(val_features_gcn, val_labels)
test_acc = log_reg_gcn.score(test_features_gcn, test_labels)
print('Train accuracy {:.4f} | Validation accuracy {:.4f} | Test accuracy {:.4f}'.format(train_acc, val_acc, test_acc))
```
The performance of this model may not be exactly the same as the one obtained with Pytorch. What are the differences in the training procedure that can explain this gap?
Solution :
**The model is the same in the two cases: it is a logistic regression composed with a Laplacian polynomial. However, there are two differences:**
* **The main one is that in the Pytorch code, the filters and the logistic regression are trained simulatenously (it is called end-to-end learning). In the code that uses Scikit learn, the logistic regression is learned separately, in a two-step process.**
* **The optimizer is not the same in the two cases, and there are differences in the regularization as well. The Pytorch code uses Dropout and a l2 regularizer (the weight decay parameter in Adam) whereas Scikit learn only uses l2 regularization with another parameter value.**
| github_jupyter |
# __Fundamentos de programación__
<strong>Hecho por:</strong> Juan David Argüello Plata
## __1. Variables__
Una variable es el <u>nombre</u> con el que se identifica información de interés.
```
nom_variable = contenido
```
El contenido de una variable puede cambiar de naturaleza; por eso se dice que Python es un lenguaje dinámico.
### __1.1. Naturaleza de las variables__
| Naturaleza | Ejemplo |
|----------|---|
| Numérico | `x = 5` |
| Textual | `text = 'Esta es una frase'` |
| Lista | `lista = [0,1,2,"texto"]` |
| Tupla | `tupla = (0,1,2,"texto")` |
| Diccionario | `dic = {"num":5, "text": "hola"}` |
### __1.2. Variable numérica__
La forma en como se define una variable numérica y el tipo de operaciones básicas que se pueden emplear con ellas se muestra a continuación.
```
#Declarar una variable numérica es igual que en el álgebra...
x = 1
print(x)
x = 5
w = 10
z = 20
print("x = ", x, ", w = ", w, ", z = ", z) #Podemos ser más específicos a la hora de imprimir información
```
También se pueden hacer operaciones matemáticas, pero _cuidado_: es importante escribir bien las ecuaciones.
Si se quisiera resolver:
$$
\begin{equation}
y = \frac{x}{w \, z}
\end{equation}
$$
Se debe escribir el algoritmo así:
```
y = x/(w*z)
print(y)
```
Porque si se escribe y ejecuta así:
```
y = x/w*z
print(y)
```
Se estaría realmente resolviendo:
$$
\begin{equation}
y = \frac{x}{w} z
\end{equation}
$$
<h1><strong>Ejercicio:</strong></h1>
Resuelve la siguiente ecuación:
$$
\begin{equation}
y = \frac{m \, n}{m ^{2}} \frac{n +1}{ \left(n^{-2} m \right) ^{3}}
\end{equation}
$$
Dónde:
* $n = 2$
* $m = 10$
```
```
### __1.2. Variable de texto__
A continuación, se puede observar la naturaleza de las variables textuales.
```
t = "Esta es una oración" #De igual manera que la variable numérica.
print(t)
#Es posible adicionar texto
t2 = ", ¿o no?"
frase_completa = t+t2
print(frase_completa)
#Podemos también acceder a las letras en un texto
print(frase_completa[0])
#Y a fragmentos de una oración
print(frase_completa[2:])
```
### __1.3. Listas__
Variables _dinámicas_ con contenido de cualquier naturaleza.
```
#Ejemplo de lista
l = ['a','b','c', [0,1]]
print(l)
#¿Cómo accedemos a la información?
print(l[0]) #Recuerda: el contenido de la lista empieza desde 0, 1, 2, ...
#Podemos redefinir el contenido de la siguiente manera:
l[0] = 'z'
print(l) #De esta manera, la lista se cambia su valor
print(l[3][0]) #También podemos leer la información de una lista dentro de otra lista
```
### __1.4. Tuplas__
Variables _estáticas_ con contenido de cualquier naturaleza.
```
t = ('a',0,20,'2', ('Hola', 'Adiós')) #Similar a la lista
print(t)
#También podemos acceder a su contenido... y jugar con él
print('¿' + t[4][0] + '?, ' + t[4][1])
#Pero si lo intentamos cambiar...
t[0] = 1
```
### __1.5. Diccionarios__
Tipo de variable usada en programación web. Facilita la lectura de código al darle _"nombres"_ a su contenido.
```
#Si vamos al súper mercado
lista_mercado = {
'manzana':2,
'peras':3,
'uvas': 4
}
print(lista_mercado)
#Podemos ser aún más específicos...
lista_mercado = {
'Frutas': {
'Manzanas': {'Unidades': 'Un', 'Cant': 2},
'Peras': {'Unidades': 'Un', 'Cant': 1},
'Uvas': {'Unidades': 'Lb', 'Cant': 4}
}
}
print(lista_mercado)
#Se accede a la información de la siguiente manera:
print(lista_mercado['Frutas']['Manzanas'])
```
| github_jupyter |
# Multi-level Models in Keras Playground
Linear Mixed effects models, also known as hiearchical linear models, also known as multi-level models, are powerful linear ensemble modeling tools that can do both regression and classification tasks for many structured data sets. This notebook describes what a multi-level model is, and how to implement a model using the neural network library keras. Model outputs are compared to multi-level models available in the statsmodels package.
A comparison among:
[StatsModels](https://github.com/statsmodels/statsmodels)
[Keras](https://github.com/fchollet/keras) with Tensorflow backend
For brevity, this tutorial will ignore cross-validation and hold out data as tools for model assessment.
## A very brief introduction to multi-level models
Multi-level models account for different levels within a data set. Levels are groupings of data that apply across several observations. For example, a classic data set (simulated below), is the math achievement versus SES status for students who attend catholic schools versus public schools. The first level is the student's SES, whereas the second level is their attended school. Multi-level models can account for fixed effects (i.e., the variance does not change within groups), and random effects (i.e., the variance is distributed across groups). Multi-level models are linear models. In the case of the catholic school data set the equation that predicts student math achievement is:
This equation is wrong lol
$$ math\_achievement = \alpha_{01} + \beta_{01} * SES + \beta_{02} * catholic\_school $$
Takes the general form of:
$$ Y_{ij} = \beta_{0j} + \beta_{1j}X+{ij} + r_{ij} $$
$$ \beta_{0j} = \gamma_{00} + \gamma_{01}W_j + u_{0j} $$
$$ \beta_{1j} = \gamma_{10} + \gamma_{11}W_j + u_{1j} $$
And the more specific form:
$$ Y_{math achievement, i} = \beta{0,school} + \beta_{1,school}X_{i, SES} + \beta_{2,school}X_{i,school} + r_{i,j} $$
$$ \beta_{0, school} = \gamma_{00} + \gamma_{01}W_{school} + u_{0, school} $$
$$ \beta_{1, school} = \gamma_{00} + \gamma_{11}W_{school} + u_{0, school} $$
Where
| variable | description |
|----------------------------------------|--------------------------------------------------------------------------------------------------|
| $i=1,2,3...$ | the student indicator, i.e., the student ID. |
| $j=catholic,public$ | the school group indicator |
| $\beta_{0, school}$,$\beta_{1,school}$ | level-1 coefficients. in this case the SES status and the categorical school belonging variable. |
| $\gamma_{00}...\gamma{11}$ | level-2 coefficients. Also known as fixed effects. |
| $X_{ij}$ | Level-1 variable. SES, etc. |
| $W_{ij}$ | Level-2 predictor. school belonging, etc. |
| $r_{ij}$ | Level-1 random effect. |
| $u_{0,j},u_{1,j}...$ | Level-2 random effects. |
These equations in summation for different intercepts and coefficients for models depending on whether a student attended a catholic school or public school.
```
import numpy as np
import statsmodels.formula.api as smf
from patsy import dmatrices
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import r2_score
%load_ext watermark
```
`watermark` prints the versions of libraries, pythons, and computer hardware in case this matters for your use of this notebook. It notes that `keras` is not installed. This is because `keras` comes with a `tensorflow` installation and thus does not need to be installed again.
```
%watermark -v -m -p numpy,pandas,statsmodels,tensorflow,keras,matplotlib
```
# Prepare data and quick visualization
For the first comparison, we use the california housing data set and create a categorical variable called `HouseAgeGroup`. This is due to the data analysis showing there is probably several groups of house ages based on periods of housing expansion. `sklearn` has many built in data sets that include data descriptions. The california housing data set has no strong correlations between data points and makes for a good first attempt at modeling data.
```
from sklearn.datasets import fetch_california_housing as get_data
data = get_data()
df = pd.DataFrame(data['data'], columns=data['feature_names'])
# popGroup is high=1; low=0
df['popGroup'] = df.Population.apply(lambda x: 1 if x >= 1000 else 0).astype('category')
def house_age_group(age):
if age >= 52:
return 3
elif age >= 30 and age < 52:
return 2
elif age < 30:
return 1
df['HouseAgeGroup'] = df.HouseAge.apply(lambda x: house_age_group(x))
df.head()
print(data['DESCR'])
fig, ax = plt.subplots(1, 5, figsize=(15, 5), sharey=True)
for n, c in enumerate(df.columns[1:6]):
df.plot.scatter(ax=ax[n], x=c, y='MedInc', alpha=0.05)
df.HouseAge.hist(bins=np.arange(1, 53, 1))
fig, ax = plt.subplots(figsize=(9, 7))
z = df.corr('spearman')
cbar = ax.pcolormesh(z, cmap='seismic', vmin=-0.7, vmax=0.7)
fig.colorbar(cbar, label='Spearman Correlation')
ax.set_xticks(np.arange(0, 8, 1)+0.5)
ax.set_yticks(np.arange(0, 8, 1)+0.5)
ax.set_xticklabels(z.columns)
ax.set_yticklabels(z.index)
for n,mc in enumerate(z.values):
for i,m in enumerate(mc):
ax.text(n+0.3, i+0.35, str(round(m,2)), color='black', fontsize=12)
```
# Using StatsModels to perform a linear mixed model of reaction time
`statsmodels` can use `R`-like formulas to define fixed effects equations. However it uses the groups argument instead of the `|` within the equation to declare random effects. It is common practice to have a variable that has random effects also have fixed effects. This is because [random effects without fixed effects implies that the variable has no average effect.](https://stats.stackexchange.com/questions/173159/can-a-variable-be-both-random-and-fixed-effect-at-the-same-time-in-a-mixed-effec)
```
# https://www.statsmodels.org/stable/mixed_linear.html
formula = "MedInc ~ AveRooms + AveBedrms + AveRooms*AveBedrms + C(HouseAgeGroup)"
md = smf.mixedlm(formula, df, groups=df['HouseAgeGroup'])
mdf = md.fit()
print(mdf.summary())
fe_params = pd.DataFrame(mdf.fe_params,columns=['LMM'])
random_effects = pd.DataFrame(mdf.random_effects)
random_effects = random_effects.transpose()
random_effects = random_effects.rename(index=str, columns={'groups': 'LMM'})
random_effects
```
In this case it seems like these groups are not so important (high or low population). However the other features in the model seem to be able to explain at least half the variance in the median incomes of home owners in southern california.
```
ypred = mdf.predict(df)
fig, ax = plt.subplots()
ax.scatter(df['MedInc'], ypred, alpha=0.05)
ax.set_ylim(0, 10)
ax.set_ylabel('Predicted', fontsize=15)
ax.set_xlabel('Actual', fontsize=15)
ax.plot([0, 10], [0, 10], color='red')
print('R2 score:', r2_score(df['MedInc'], ypred))
```
# creating a design matrix from a statsmodels formula
`statsmodels` can accept `pandas` dataframes directly as input with the defined groups. `keras` can not. Thus we need to create a [design matrix](https://en.wikipedia.org/wiki/Design_matrix) directly for training the `keras` model.
```
Y, X = dmatrices(formula, data=df, return_type='matrix')
Terms = X.design_info.column_names
_, Z = dmatrices('MedInc ~ -1 + C(HouseAgeGroup)', data=df, return_type='matrix')
X = np.asarray(X) # fixed effect
Z = np.asarray(Z) # mixed effect
Y = np.asarray(Y).flatten()
nfixed = np.shape(X)
nrandm = np.shape(Z)
```
# Using Keras
`keras` is a library that allows the construction of neural networks. Neural networks at the most basic level are linear combinations of variables, that is linear models. They can include a lot more sophistication but at their core, they are no different than any other model that is based on linear combinations of variables. Thus, `keras` provides a modular and verbose way to construct multi-level models.
```
import tensorflow.keras as keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Add, Dense
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import TensorBoard
K.clear_session()
nb_epoch = 500
fixedpred = np.argmax(X,axis=1)
randmpred = np.argmax(Z,axis=1)
Xinput = Input(batch_shape=(None, nfixed[1]-1), name='level_1_variables')
fixed_keras = Dense(1, input_dim=nfixed[1]-1, name = 'fixedEffect')(Xinput)
Zinput = Input(batch_shape=(None, nrandm[1]), name='level_2_variables')
randm_keras = Dense(1, input_dim=nrandm[1], use_bias=None, name = 'randomEffect')(Zinput)
merged = keras.layers.add([fixed_keras, randm_keras])
model = Model([Xinput, Zinput], merged)
model.compile(loss='mean_squared_error', optimizer='adam')
# train the model
model.fit([X[:,1:], Z], Y.flatten(),
epochs=nb_epoch,
batch_size=100,
verbose=0,
shuffle=True,
)
Ypredict = model.predict([X[:,1:], Z])
betakeras = np.hstack((model.get_weights()[1], model.get_weights()[0].flatten()))
bkeras = model.get_weights()[2].flatten()
from tensorflow.keras.utils import plot_model
pm = plot_model(model,
to_file='model.png',
show_shapes=True,
show_layer_names=True,
rankdir='TB')
from IPython.display import display, Image
display(Image(filename='model.png'))
fe_params['Keras'] = pd.Series(betakeras, index=fe_params.index)
random_effects['Keras'] = pd.Series(bkeras, index=random_effects.index)
fe_params
fig, ax = plt.subplots(figsize=(5, 10))
yticks = np.arange(fe_params.shape[0])
ax.plot(fe_params, yticks)
ax.set_yticks(yticks)
ax.set_yticklabels(labels=fe_params.index, rotation=0)
ax.legend(['LMM', 'Keras'], fontsize=15)
ax.set_xlabel('Coefficient value', fontsize=15)
random_effects
fig, ax = plt.subplots(figsize=(10, 5))
random_effects.reset_index().plot(ax=ax)
# ax.set_xticks(np.arange(0, 20, 1))
ax.set_title('random effects', fontsize=15)
fig, ax = plt.subplots(figsize=(9, 7))
ax.scatter(ypred, Ypredict, alpha=0.5, label='model comparison')
ax.plot([-100, 100], [-100, 100], label='perfect match', color='red')
ax.set_ylabel('Keras', fontsize=15)
ax.set_xlabel('statsmodels', fontsize=15)
ax.set_ylim(-20, 50)
ax.set_xlim(-25, 80)
ax.legend(fontsize=15, title='Median Income')
fig, ax = plt.subplots(figsize=(12, 5))
ax.plot(ypred - Ypredict.flatten(), marker='o', linewidth=0)
ax.set_ylabel('statsmodels(y) - keras(y)', fontsize=15)
print('R2 score of model comparison:', r2_score(ypred, Ypredict))
```
# Catholic School Simulation
The catholic school data set is a classic hierarchical data set used in education research to justify multi-level models (Bryk and Raudenbush, 2002). The data set is typically a 2D comparison of math achievement (typically described as a math test score) versus socio-economic status (SES) of students who attend catholic or public schools. The catholic school students perform better than the public school students thus justifying the need for a linear model to have two intercepts and slopes depending on the group they belong to.
In this case, I have simulated the catholic school data set.
```
num_samples = 1000
# The desired mean values of the sample.
mu = np.array([5.0, 10.0])
# The desired covariance matrix.
r = np.array([
[ 3.40, 5.75],
[ 5.75, 5.50]
])
# Generate the random samples.
y = np.random.multivariate_normal(mu, r, size=num_samples)
catholic_data = pd.DataFrame({'SES':y[:,0], 'math_score':y[:,1]})
catholic_data['catholic_student'] = [1 if n>0.5 else 0 for n in np.random.random(num_samples)]
catholic_data['math_score'] = catholic_data.apply(lambda x: x['math_score']*3 if x['catholic_student']==1 else x['math_score'], axis=1)
catholic_data['math_score'] = catholic_data['math_score']/catholic_data['math_score'].max()
catholic_data['SES'] = catholic_data['SES'].apply(lambda x: (x - catholic_data['SES'].mean())/catholic_data['SES'].std())
catholic_data['colors'] = catholic_data['catholic_student'].apply(lambda x: 'green' if x==1 else 'purple')
catholic_data.describe()
fig, ax = plt.subplots(figsize=(9, 7))
catholic_data.plot.scatter(x='SES', y='math_score', color=catholic_data['colors'], alpha=0.5, ax=ax, s=55)
ax.set_ylabel('Math Achievement', fontsize=15)
ax.set_xlabel('Socio-Economic Status', fontsize=15)
```
# statsmodels catholic data
```
# https://www.statsmodels.org/stable/mixed_linear.html
# random effects should be fixed effects unless you want to imply the average effect of the random effect is 0
# https://stats.stackexchange.com/questions/173159/can-a-variable-be-both-random-and-fixed-effect-at-the-same-time-in-a-mixed-effec
formula = 'math_score ~ SES + SES * C(catholic_student)'
md = smf.mixedlm(formula, catholic_data, groups=catholic_data['catholic_student'])
mdf = md.fit()
print(mdf.summary())
fe_params = pd.DataFrame(mdf.fe_params,columns=['LMM'])
random_effects = pd.DataFrame(mdf.random_effects)
random_effects = random_effects.transpose()
random_effects = random_effects.rename(index=str, columns={'groups': 'LMM'})
random_effects
mdf.random_effects
ypred = mdf.predict(catholic_data)
fig, ax = plt.subplots()
ax.scatter(catholic_data['math_score'], ypred, alpha=0.5)
ax.set_ylabel('Predicted', fontsize=15)
ax.set_xlabel('Actual', fontsize=15)
ax.plot([0, 1], [0, 1], color='red')
```
# keras catholic data
```
Y, X = dmatrices(formula, data=catholic_data, return_type='matrix')
Terms = X.design_info.column_names
_, Z = dmatrices('math_score ~ -1 + C(catholic_student)', data=catholic_data, return_type='matrix')
X = np.asarray(X) # fixed effect
Z = np.asarray(Z) # mixed effect
Y = np.asarray(Y).flatten()
nfixed = np.shape(X)
nrandm = np.shape(Z)
K.clear_session()
nb_epoch = 500
fixedpred = np.argmax(X,axis=1)
randmpred = np.argmax(Z,axis=1)
Xinput = Input(batch_shape=(None, nfixed[1]-1), name='individualEffects')
fixed_keras = Dense(1, input_dim=nfixed[1]-1, name='fixedEffect')(Xinput)
Zinput = Input(batch_shape=(None, nrandm[1]), name='schoolEffects')
randm_keras = Dense(1, input_dim=nrandm[1], use_bias=None, name='randomEffect')(Zinput)
merged = keras.layers.add([fixed_keras, randm_keras])
model = Model([Xinput, Zinput], merged)
model.compile(loss='mean_squared_error', optimizer='adam')
# train the model
model.fit([X[:,1:], Z], Y.flatten(),
epochs=nb_epoch,
batch_size=100,
verbose=0,
shuffle=True,
)
Ypredict = model.predict([X[:,1:], Z])
betakeras = np.hstack((model.get_weights()[1], model.get_weights()[0].flatten()))
bkeras = model.get_weights()[2].flatten()
from tensorflow.keras.utils import plot_model
pm = plot_model(model,
to_file='model.png',
show_shapes=True,
show_layer_names=True,
rankdir='TB')
from IPython.display import display, Image
display(Image(filename='model.png'))
fe_params['Keras'] = pd.Series(betakeras, index=fe_params.index)
random_effects['Keras'] = pd.Series(bkeras, index=random_effects.index)
fe_params
fig, ax = plt.subplots(figsize=(5, 10))
yticks = np.arange(fe_params.shape[0])
ax.plot(fe_params, yticks)
ax.set_yticks(yticks)
ax.set_yticklabels(labels=fe_params.index, rotation=0)
ax.legend(['LMM', 'Keras'], fontsize=15)
ax.set_xlabel('Coefficient value', fontsize=15)
random_effects
```
It is important to note from before that, although the model fitting (i.e., regression coefficients) are not the same across different approach, the model prediction is highly similar (at least it pass the eyeball test).
# statsmodels compared to keras predicted output
As we can see below, the predicted math achievement value for each student is identical between the `statsmodels` multi-level model and the `keras` model.
```
fig, (ax, ax2) = plt.subplots(1, 2, figsize=(13, 6))
ax.scatter(ypred, Ypredict, alpha=0.5, label='model comparison')
ax.plot([0, 1], [0, 1], color='red', linewidth=2)
ax.set_ylabel('Keras', fontsize=15)
ax.set_xlabel('statsmodels', fontsize=15)
ax.legend(fontsize=15, title='catholic school\nmath achievement')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax2.scatter(Y, ypred, label='statsmodels', alpha=0.5, s=100)
ax2.scatter(Y, Ypredict, label='keras', marker='x', color='black', alpha=0.5)
ax2.plot([0, 1], [0, 1], color='red', linewidth=2, label='perfect prediction')
ax2.legend(fontsize=15, title='catholic school\nmath achievement')
ax2.set_ylabel('Predicted', fontsize=15)
ax2.set_xlabel('Actual', fontsize=15)
fig.tight_layout()
fig, ax = plt.subplots(figsize=(12, 5))
ax.plot(ypred - Ypredict.flatten(), marker='o', linewidth=0)
ax.set_ylabel('statsmodels(y) - keras(y)', fontsize=15)
print('R2 score of model comparison:', r2_score(ypred, Ypredict))
```
| github_jupyter |
**Exercise set 1**
==================
>The goal of this exercise is to introduce some concepts from
>Chapter 1, for instance the difference between **hard** and **soft** modeling.
**Exercise 1.1** A traveling juggling group has an act out in the open where they shoot a person out from
a cannon. The problem is to predict where the person will land depending on the angle of the cannon and
the initial velocity.
**(a)** This problem can be solved by both a **hard** and a **soft** modeling approach. What are the advantages
and disadvantages of soft and hard modeling?
**(b)** What **other** variables than the cannon angle and the initial velocity might influence the results?
**(c)** The problem can be approached from a hard modeling perspective using Newton’s equations. Assume
that you do not know anything about Newtonian mechanics, describe how you would solve this
problem using a **soft** modeling approach.
**(d)** Describe how the problem can be solved by **combining** both hard and soft modeling.
**Double click here to type your answer to exercise 1.1:**
This is a markdown cell, so you can type latex math $\sum_i^3 \frac{\pi}{2}$, *italics*, **bold**, ***bold italics***, and then execute the cell (by using shift+enter, or the *Run* button on top) to generate the final text. To make a cell markdown instead of code (default): Select the cell, go to the *Cell* menu on top and change *Cell Type* to "markdown"
**Exercise 1.2** A chemist wants to inspect the flow of materials passing through a pipe in a factory. The
aim is to classify the flow into different types using acoustic sensors attached to the pipes. Before she can
use chemometric methods for this purpose she needs to convert the recordings into a data table.
**(a)** Should each sound recording be represented as a **column** or a **row** vector in the data matrix?
**(b)** What **variables** could be used here to describe the sound recordings?
**(c)** Are there any problems related to the representation of the signals we need to be aware of when
analyzing these acoustic samples?
**Answer 1.2:** (double click here)
**Exercise 1.3** A pharmaceutical company wants to create a new drug for malaria. To do this,
they use a *quantitative* structure-activity relationship (QSAR) approach. In QSAR a model is formed
(typically a regression model) which predicts the biological activity of a compound given information about
its molecular structure. To make a model, a set of compounds with known biological activity (here:
anti-malaria activity) and structure is used. The data matrix will have one row for each molecule and each
column contains a variable describing something related to the structure.
**(a)** Can you suggest **variables** that may be used to describe the structure of the molecules?
**(b)** Assume you decide to use XYZ coordinates for each atom as variables. What **problems** would you
encounter for such a representation?
**(c)** Can you suggest ways to **solve** the problems occurring above?
**Answer 1.3:** (double click here)
**Exercise 1.4** Assume you want to compare 20 spectra which all contain only three overlapping Gaussian
peaks. **Which** of the following situations can we expect to encounter problems when using the sampling
point representation (SPR)? SPR means that we sample the profile or shape at regular intervals where
each sample point is represented by a variable, see Figure 1 below:
**(a)** Peak positions and widths are constant for all 20 spectra. Only the peak heights vary.
**(b)** Peak positions are constant for all 20 spectra. Peak widths and heights vary.
**(c)** Peak positions, widths and heights vary for the three peaks in the 20 spectra.

**Figure 1:** The shape of the profile is represented by a set of sampled points, usually regularly, along
the abscissa. Each sampling point becomes a variable in the vector representing the profile in the data
analysis.
**Answer 1.4:** (double click here)
**Exercise 1.5** Consider the previous problem with three Gaussian peaks.
**(a)** Suggest a representation **other** than the SPR which is making use of the functional form for these
spectra and which ensures comparability between samples for the different variables when peaks are
shifted.
**(b)** Is there a **type** of peak shift which the functional representation that you answered for **(a)** cannot handle or are all types
of shifts accounted for?
**Answer 1.5:** (double click here)
**Exercise 1.6** When we perform a sampling point representation (SPR) of 1D data profiles, it is observed
that shifts of peaks can cause serious problems when analyzing the data. However, it is also observed that
shifting of **broader peaks** is less detrimental to the analysis as shifting of narrow peaks. Explain why
this is so.
**Answer 1.6:** (double click here)
**Exercise 1.7** The company FoodInspect Inc wants to create a product for its customers which enables
them to analyze the freshness of fruits and vegetables when they are out buying food. The constraints for
making this product are: it must be very cheap (not more than $50), easy to use (i.e. we assume every
customer has no scientific or technical skills) and reliable. Suggest how such a product can be created
using chemometric methods.
**Answer 1.7**: (double click here)
**Important Note!**: If you want to save your answers, please do not forget to download this notebook, as described in [exercise 0](./00_jupyter_introduction.ipynb).
| github_jupyter |
# Data Preparation
Clone GitHub repository to Colab storage.
```
!git clone https://github.com/megagonlabs/HappyDB.git
!ls
!ls HappyDB/happydb/data
```
# Utility functions
```
import numpy as np
from sklearn.base import clone
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold, GridSearchCV, train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, f1_score
import warnings
warnings.filterwarnings('ignore')
def run_cv(X, y, clf, num_classes):
kf = KFold(n_splits=5, random_state=1)
cm = np.zeros([num_classes,
num_classes],
dtype="int") # Initialize confusion matrix with 0
f1_list = []
for i, (train_index, test_index) in enumerate(kf.split(X)):
print("Fold {}".format(i + 1))
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cur_clf = clone(clf)
cur_clf.fit(X_train, y_train)
y_pred = cur_clf.predict(X_test)
cm += confusion_matrix(y_test, y_pred)
f1_list.append(f1_score(y_test, y_pred, average="macro"))
f1_scores = np.array(f1_list)
return (f1_scores, cm)
```
## Loading CSV file as DataFrame
Use `.read_csv()` function to load a CSV file.
```
import pandas as pd
hm_df = pd.read_csv("HappyDB/happydb/data/cleaned_hm.csv")
hm_df.head()
# Filtering out samples that do not have ground truth labels
# or # of sentences > 3
filtered_hm_df = hm_df[(hm_df["num_sentence"] <= 3) &
(~ hm_df["ground_truth_category"].isnull())]
print("Original # of HM: {}".format(len(hm_df)))
print("Filtered # of HM: {}".format(len(filtered_hm_df)))
```
# Label vector & Feature matrix creation
Let's create label vector and feature matrix from the DataFrame.
```
# Label Encoder
le = LabelEncoder()
y = le.fit_transform(filtered_hm_df["ground_truth_category"])
y
le.classes_
Xcount = CountVectorizer().fit_transform(filtered_hm_df["cleaned_hm"])
```
# Try other feature extraction methods
```
%%time
# Creates feature vectors
Xtfidf = TfidfVectorizer().fit_transform(filtered_hm_df["cleaned_hm"])
Xlda = LatentDirichletAllocation().fit_transform(
CountVectorizer().fit_transform(filtered_hm_df["cleaned_hm"]))
Xcount_lda = np.concatenate([Xcount.todense(), Xlda], axis=1)
f1_scores_count, _ = run_cv(Xcount, y, LogisticRegression(), len(le.classes_))
f1_scores_tfidf, _ = run_cv(Xtfidf, y, LogisticRegression(), len(le.classes_))
f1_scores_lda, _ = run_cv(Xlda, y, LogisticRegression(), len(le.classes_))
f1_scores_count_lda, _ = run_cv(Xcount_lda, y, LogisticRegression(), len(le.classes_))
eval_df = pd.DataFrame({"CountVec": f1_scores_count,
"TfidfVec": f1_scores_tfidf,
"LDA": f1_scores_lda,
"Count+LDA": f1_scores_count_lda})
eval_df
```
Try!
- Try different configurations of `CountVectorizer()` `TfidfVectorizer()` `LatentDirichletAllocation()`.
- Replace `LogisticRegression()` with other algorithms.
- Replace `LogisticRegression()` wigh `GridSearchCV(LogisticRegression(), ...)`
```
import spacy
nlp = spacy.load("en_core_web_sm")
# Sample code from spaCy
doc = nlp("Apple is looking at buying U.K. startup for $1 billion")
info_list = []
for token in doc:
info_list.append([token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop])
pd.DataFrame(
info_list, columns=["TEXT", "LEMMA", "POS", "TAG", "DEP", "SHAPE", "ALPHA", "STOP"])
```
# Feature Engineering
Use the following ideas as preprocessing
- Remove stop words
- Filter adjectives, nouns, and verbs
```
pos_set = ["ADJ", "PROPN", "NOUN", "VERB"]
proc_hm_list = []
for hm in filtered_hm_df["cleaned_hm"].tolist():
filtered_tokens = []
for token in nlp(hm):
# Remove stop words
if token.is_stop:
continue
# Filter tokens that belong to predefined POS types
if token.pos_ not in pos_set:
continue
filtered_tokens.append(token.lemma_)
proc_hm = " ".join(filtered_tokens)
proc_hm_list.append(proc_hm)
filtered_hm_df["proc_hm"] = proc_hm_list
filtered_hm_df["proc_hm"]
Xcount_proc = CountVectorizer().fit_transform(filtered_hm_df["proc_hm"])
f1_scores_count_proc, _ = run_cv(Xcount_proc, y, LogisticRegression(), len(le.classes_))
eval_df = pd.DataFrame({"CountVec": f1_scores_count,
"TfidfVec": f1_scores_tfidf,
"LDA": f1_scores_lda,
"Count+LDA": f1_scores_count_lda,
"Proc+CountVec": f1_scores_count_proc})
eval_df.mean(axis=0)
```
| github_jupyter |
```
from utils import whiteboard as wb
from compas.datastructures import Mesh
from compas.datastructures import subdivision as sd
from compas_plotters import MeshPlotter
mesh = Mesh.from_polyhedron(8)
mesh.summary()
mesh2 = sd.mesh_subdivide_tri(mesh)
mesh3 = sd.trimesh_subdivide_loop(mesh2)
mesh4 = sd.mesh_subdivide_catmullclark(mesh3)
plotter = MeshPlotter(mesh4)
plotter.draw_edges()
plotter.draw_faces()
plotter.draw_vertices(radius=0.01)
plotter.show()
mesh4.summary()
```
# 3d viewer
```
import ipyvolume as ipv
vertices, faces = mesh3.to_vertices_and_faces()
vertices
faces
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
z = [v[2] for v in vertices]
ipv.figure(width=800, height=450)
viewermesh = ipv.plot_trisurf(x, y, z, faces, color='white')
ipv.show()
vertices, faces = mesh4.to_vertices_and_faces()
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
z = [v[2] for v in vertices]
ipv.figure(width=800, height=450)
viewermesh = ipv.plot_trisurf(x, y, z, faces, color='white')
ipv.show()
faces
wb()
triangles_only = []
for f in faces:
if len(f) == 3:
triangles_only.append(f)
else:
for i in range(len(f) - 2):
triangles_only.append([f[0], f[i+1], f[i+2]])
triangles_only
ipv.figure(width=800, height=450)
viewermesh = ipv.plot_trisurf(x, y, z, triangles_only, color='white')
ipv.style.use('minimal')
ipv.show()
def old_draw_compas_mesh(mesh, color='white'):
"""
Renders a compas mesh on a 3D canvas with ipyvolume.
Parameters
----------
mesh : :class: compas.datastructures.Mesh
the mesh to be shown in 3D
Returns
-------
an instance of ipyvolume.widgets.Mesh
"""
# extract lists of vertices and faces
vertices, faces = mesh.to_vertices_and_faces()
# extract x, y and z values into separate lists
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
z = [v[2] for v in vertices]
# triangulate n-gons
triangles_only = []
for f in faces:
if len(f) == 3:
triangles_only.append(f)
else:
for i in range(len(f) - 2):
triangles_only.append([f[0], f[i+1], f[i+2]])
# create the ipyvolume plot
ipv.figure(width=800, height=450)
viewermesh = ipv.plot_trisurf(x, y, z, triangles_only, color='white')
ipv.style.use('minimal')
ipv.show()
return viewermesh
mesh5 = sd.mesh_subdivide_doosabin(mesh3)
draw_compas_mesh(mesh5)
from utilities import draw_compas_mesh
ipvmesh = draw_compas_mesh(mesh5, color='cyan')
ipvmesh.__dict__
```
| github_jupyter |
# Collaborative filtering on the MovieLense Dataset
## Learning Objectives
1. Know how to build a BigQuery ML Matrix Factorization Model
2. Know how to use the model to make recommendations for a user
3. Know how to use the model to recommend an item to a group of users
###### This notebook is based on part of Chapter 9 of [BigQuery: The Definitive Guide](https://www.oreilly.com/library/view/google-bigquery-the/9781492044451/ "http://shop.oreilly.com/product/0636920207399.do") by Lakshmanan and Tigani.
### MovieLens dataset
To illustrate recommender systems in action, let’s use the MovieLens dataset. This is a dataset of movie reviews released by GroupLens, a research lab in the Department of Computer Science and Engineering at the University of Minnesota, through funding by the US National Science Foundation.
Download the data and load it as a BigQuery table using:
```
import os
PROJECT = "your-project-here" # REPLACE WITH YOUR PROJECT ID
# Do not change these
os.environ["PROJECT"] = PROJECT
%%bash
rm -r bqml_data
mkdir bqml_data
cd bqml_data
curl -O 'http://files.grouplens.org/datasets/movielens/ml-20m.zip'
unzip ml-20m.zip
yes | bq rm -r $PROJECT:movielens
bq --location=US mk --dataset \
--description 'Movie Recommendations' \
$PROJECT:movielens
bq --location=US load --source_format=CSV \
--autodetect movielens.ratings ml-20m/ratings.csv
bq --location=US load --source_format=CSV \
--autodetect movielens.movies_raw ml-20m/movies.csv
```
## Exploring the data
Two tables should now be available in <a href="https://console.cloud.google.com/bigquery">BigQuery</a>.
Collaborative filtering provides a way to generate product recommendations for users, or user targeting for products. The starting point is a table, <b>movielens.ratings</b>, with three columns: a user id, an item id, and the rating that the user gave the product. This table can be sparse -- users don’t have to rate all products. Then, based on just the ratings, the technique finds similar users and similar products and determines the rating that a user would give an unseen product. Then, we can recommend the products with the highest predicted ratings to users, or target products at users with the highest predicted ratings.
```
%%bigquery --project $PROJECT
SELECT *
FROM movielens.ratings
LIMIT 10
```
A quick exploratory query yields that the dataset consists of over 138 thousand users, nearly 27 thousand movies, and a little more than 20 million ratings, confirming that the data has been loaded successfully.
```
%%bigquery --project $PROJECT
SELECT
COUNT(DISTINCT userId) numUsers,
COUNT(DISTINCT movieId) numMovies,
COUNT(*) totalRatings
FROM movielens.ratings
```
On examining the first few movies using the query following query, we can see that the genres column is a formatted string:
```
%%bigquery --project $PROJECT
SELECT *
FROM movielens.movies_raw
WHERE movieId < 5
```
We can parse the genres into an array and rewrite the table as follows:
```
%%bigquery --project $PROJECT
CREATE OR REPLACE TABLE movielens.movies AS
SELECT * REPLACE(SPLIT(genres, "|") AS genres)
FROM movielens.movies_raw
%%bigquery --project $PROJECT
SELECT *
FROM movielens.movies
WHERE movieId < 5
```
## Matrix factorization
Matrix factorization is a collaborative filtering technique that relies on factorizing the ratings matrix into two vectors called the user factors and the item factors. The user factors is a low-dimensional representation of a user_id and the item factors similarly represents an item_id.
We can create the recommender model using (<b>Optional</b>, takes 30 minutes. Note: we have a model we already trained if you want to skip this step):
```
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId', rating_col='rating')
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender`)
```
Note that we create a model as usual, except that the model_type is matrix_factorization and that we have to identify which columns play what roles in the collaborative filtering setup.
What did you get? Our model took an hour to train, and the training loss starts out extremely bad and gets driven down to near-zero over next the four iterations:
<table>
<tr>
<th>Iteration</th>
<th>Training Data Loss</th>
<th>Evaluation Data Loss</th>
<th>Duration (seconds)</th>
</tr>
<tr>
<td>4</td>
<td>0.5734</td>
<td>172.4057</td>
<td>180.99</td>
</tr>
<tr>
<td>3</td>
<td>0.5826</td>
<td>187.2103</td>
<td>1,040.06</td>
</tr>
<tr>
<td>2</td>
<td>0.6531</td>
<td>4,758.2944</td>
<td>219.46</td>
</tr>
<tr>
<td>1</td>
<td>1.9776</td>
<td>6,297.2573</td>
<td>1,093.76</td>
</tr>
<tr>
<td>0</td>
<td>63,287,833,220.5795</td>
<td>168,995,333.0464</td>
<td>1,091.21</td>
</tr>
</table>
However, the evaluation data loss is quite high, and much higher than the training data loss. This indicates that overfitting is happening, and so we need to add some regularization. Let’s do that next. Note the added l2_reg=0.2 (<b>Optional</b>, takes 30 minutes):
```
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender_l2
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId',
rating_col='rating', l2_reg=0.2)
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender_l2`)
```
Now, we get faster convergence (three iterations instead of five), and a lot less overfitting. Here are our results:
<table>
<tr>
<th>Iteration</th>
<th>Training Data Loss</th>
<th>Evaluation Data Loss</th>
<th>Duration (seconds)</th>
</tr>
<tr>
<td>2</td>
<td>0.6509</td>
<td>1.4596</td>
<td>198.17</td>
</tr>
<tr>
<td>1</td>
<td>1.9829</td>
<td>33,814.3017</td>
<td>1,066.06</td>
</tr>
<tr>
<td>0</td>
<td>481,434,346,060.7928</td>
<td>2,156,993,687.7928</td>
<td>1,024.59</td>
</tr>
</table>
By default, BigQuery sets the number of factors to be the log2 of the number of rows. In our case, since we have 20 million rows in the table, the number of factors would have been chosen to be 24. As with the number of clusters in K-Means clustering, this is a reasonable default but it is often worth experimenting with a number about 50% higher (36) and a number that is about a third lower (16):
**TODO 1**: Create a Matrix Factorization model with 16 factors
```
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender_16
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId',
rating_col='rating', l2_reg=0.2, num_factors=16)
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender_16`)
```
When we did that, we discovered that the evaluation loss was lower (0.97) with num_factors=16 than with num_factors=36 (1.67) or num_factors=24 (1.45). We could continue experimenting, but we are likely to see diminishing returns with further experimentation. So, let’s pick this as the final matrix factorization model and move on.
## Making recommendations
With the trained model, we can now provide recommendations. For example, let’s find the best comedy movies to recommend to the user whose userId is 903. In the query below, we are calling ML.PREDICT passing in the trained recommendation model and providing a set of movieId and userId to carry out the predictions on. In this case, it’s just one userId (903), but all movies whose genre includes Comedy.
```
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
SELECT
movieId, title, 903 AS userId
FROM movielens.movies, UNNEST(genres) g
WHERE g = 'Comedy'
))
ORDER BY predicted_rating DESC
LIMIT 5
```
## Filtering out already rated movies
Of course, this includes movies the user has already seen and rated in the past. Let’s remove them.
**TODO 2**: Make a prediction for user 903 that does not include already seen movies.
```
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
WITH seen AS (
SELECT ARRAY_AGG(movieId) AS movies
FROM movielens.ratings
WHERE userId = 903
)
SELECT
movieId, title, 903 AS userId
FROM movielens.movies, UNNEST(genres) g, seen
WHERE g = 'Comedy' AND movieId NOT IN UNNEST(seen.movies)
))
ORDER BY predicted_rating DESC
LIMIT 5
```
For this user, this happens to yield the same set of movies -- the top predicted ratings didn’t include any of the movies the user has already seen.
## Customer targeting
In the previous section, we looked at how to identify the top-rated movies for a specific user. Sometimes, we have a product and have to find the customers who are likely to appreciate it. Suppose, for example, we wish to get more reviews for movieId=96481 which has only one rating and we wish to send coupons to the 5 users who are likely to rate it the highest.
**TODO 3**: Find the top five users who will likely enjoy *American Mullet (2001)*
```
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
WITH allUsers AS (
SELECT DISTINCT userId
FROM movielens.ratings
)
SELECT
96481 AS movieId,
(SELECT title FROM movielens.movies WHERE movieId=96481) title,
userId
FROM
allUsers
))
ORDER BY predicted_rating DESC
LIMIT 5
```
### Batch predictions for all users and movies
What if we wish to carry out predictions for every user and movie combination? Instead of having to pull distinct users and movies as in the previous query, a convenience function is provided to carry out batch predictions for all movieId and userId encountered during training. A limit is applied here, otherwise, all user-movie predictions will be returned and will crash the notebook.
```
%%bigquery --project $PROJECT
SELECT *
FROM ML.RECOMMEND(MODEL `cloud-training-demos.movielens.recommender_16`)
LIMIT 10
```
As seen in a section above, it is possible to filter out movies the user has already seen and rated in the past. The reason already seen movies aren’t filtered out by default is that there are situations (think of restaurant recommendations, for example) where it is perfectly expected that we would need to recommend restaurants the user has liked in the past.
Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| github_jupyter |
# Scikit-Learn
<!--<badge>--><a href="https://colab.research.google.com/github/TheAIDojo/Machine_Learning_Bootcamp/blob/main/Week 03 - Machine Learning Algorithms/1- Scikit_Learn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a><!--</badge>-->
[Scikit-learn](http://scikit-learn.org/stable/) is a python-based machine learning library providing implementations of a great many algorithms for supervised and unsupervised learning. In large part, it builds upon the cabilities of NumPy, SciPy, matplotlib, and Pandas.
In the context of supervised learning, the primary objects scikit-learn defines are called **estimators**. Each of these defines a `fit` method, which develops a model from provided training data, and a `predict` method, which uses the model to map a new instance to a suitable target value. Scikit-learn also defines multiple utilities for partitioning and manipulating data sets as well as evaluating models.
Below, we cover some of the basic steps needed to create a model in scikit-learn. These notes are based on material appearing in the *scikit-learn tutorials*.
* [Tutorial](http://scikit-learn.org/stable/tutorial/index.html)
* [Cheatsheet](https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Scikit_Learn_Cheat_Sheet_Python.pdf)
## Datasets
Scikit-learn comes bundled with several pre-defined (typically small) `datasets` that users can explore.
load_boston() Load and return the boston house-prices dataset (regression).
load_iris() Load and return the iris dataset (classification).
load_diabetes() Load and return the diabetes dataset (regression).
load_digits() Load and return the digits dataset (classification).
load_linnerud() Load and return the linnerud dataset (multivariate regression).
load_wine() Load and return the wine dataset (classification).
load_breast_cancer() Load and return the breast cancer wisconsin dataset (classification).
The iris dataset is loaded below, and a description of it is printed.
```
import numpy as np
import pandas as pd
# using 'from * import ...' allows as to import submodules directly
from sklearn import (
datasets,
model_selection,
linear_model,
metrics,
neighbors,
tree,
ensemble,
preprocessing,
)
# alternatively, we can import the whole package as such
import sklearn
iris_dataset = (
datasets.load_iris()
) # sklearn.datasets.load_iris() works exactly the same
print(iris_dataset.DESCR)
```
We can also use `iris_dataset.data` and `iris_dataset.targets` to create or x & y (inputs & outputs) pairs that will be used for training and testing
```
x = pd.DataFrame(iris_dataset.data, columns=iris_dataset.feature_names)
y = pd.DataFrame(iris_dataset.target, columns=["Labels"])
x
```
Alternatively, can load a dataset into x & y directly (i.e. into input/output pairs) by setting the `return_X_y` parameter to `True`
```
x, y = datasets.load_iris(return_X_y=True)
x.shape, y.shape
```
## Train/Test Split
In order to validate that our model can generalize to data that it wasn't trained on, it's necessary to create a sperate **testing dataset** that will not be used in training.
Within the `model_selection` submodule of Scikit Learn, there's the `train_test_split` that we can use to automatically split the data into training and testing pairs.
Here's an explanation of the different parameters taken directly from the function's docstring
#### **Parameters**
**arrays** : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
**test_size** : float, int or None, optional (default=None)
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If train_size is also None, it will
be set to 0.25.
**train_size** : float, int, or None, (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
**random_state** : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
**shuffle** : boolean, optional (default=True)
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
**stratify** : array-like or None (default=None)
If not None, data is split in a stratified fashion, using this as
the class labels.
```
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.1, random_state=42, stratify=y
)
```
Please note that the `stratify` parameter works only in the context of classification tasks where there are a fixed amount of possible outputs/targets
# Fitting and predicting: estimator basics
Scikit-learn provides dozens of built-in machine learning algorithms and models, called estimators. Each estimator can be fitted to some data using its fit method.
Here is a simple example where we fit a Linear Regression to some very basic data:
```
x = [[ 1, 2, 3], # 2 samples, 3 features
[11, 12, 13]]
y = [0, 1]# classes of each sample
model = linear_model.LogisticRegression()
model.fit(x,y)
pred= model.predict(x) # predict classes of the training data
print(pred)
pred= model.predict([[4, 5, 6], [14, 15, 16]]) # predict classes of new data
print(pred)
```
The `fit` method generally accepts 2 inputs:
1. The samples matrix (or design matrix) X. The size of X is typically (n_samples, n_features), which means that samples are represented as rows and features are represented as columns.
2. The target values y which are real numbers for regression tasks, or integers for classification (or any other discrete set of values). For unsupervized learning tasks, y does not need to be specified. y is usually 1d array where the i th entry corresponds to the target of the i th sample (row) of X.
Both X and y are usually expected to be numpy arrays or equivalent array-like data types, though some estimators work with other formats such as sparse matrices.
Once the estimator is fitted, it can be used for predicting target values of new data. You don’t need to re-train the estimator:
# Linear Regression
In statistics, linear regression is a linear approach to modelling the relationship between a set a features, and a desired output. The case of one input feature is called simple linear regression; for more than one, the process is called multiple linear regression.
Scikit Learn defines this algorithm in `LinearRegression` class as a part of the `linear_models` module.
First, we load the data
```
x, y = datasets.load_diabetes(return_X_y=True)
# normalize the values of x and y
y_normalize = preprocessing.MinMaxScaler()
y_norm = y_normalize.fit_transform(y.reshape(-1, 1)) # normlize the y
x_normalize = preprocessing.StandardScaler()
x_norm = x_normalize.fit_transform(x) # normlize the x
print("Diabetes features/input shape:", x.shape)
print("Diabetes target/output shape:", y.shape)
```
Second, we split the data into 90/10 training/testing split (90% of the data will be used for training while 10% will be used for testing)
```
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x_norm, y_norm.reshape(-1), test_size=0.1, random_state=42
)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
```
Third, we train (i.e. `fit`) the model using the training dataset (`x_train` as inputs, `y_train` as targets)
```
regressor = (
linear_model.LinearRegression()
) # initialize the parameter of linear regression model
regressor.fit(x_train, y_train) # training the model on the train data
# we can preview the learned coefficients (i.e. weights) and intercept (i.e. bias)
print("Weights:\n", regressor.coef_)
print("Bias:\n", regressor.intercept_)
```
Fourth, we'll feed the test set into the trained model
```
y_pred = regressor.predict(x_test)
```
Finally, we'll evaluate the predicted output against the ground-truth values in `y_test` using Scikit Learn's `metrics` module
One of the most used metrics to evaluate regression models is `mean_squared_error` which has the following formula: $$\frac{1}{n}\sum_{i=1}^{n}(\hat y_i - y_i)^2$$
Where `n` is the total number of examples evaluated (in this case 45), $\hat y$ is the predicted value (here `y_pred`) and $y$ is the ground-truth value (here `y_test`)
```
metrics.mean_squared_error(y_test, y_pred)
```
# Logistic Regression
In statistics, the logistic model (or logit model) is used to model the probability of a certain class or event existing such as pass/fail, win/lose, alive/dead or healthy/sick. This can be extended to model several classes of events such as determining whether an image contains a cat, dog, lion, etc. Each object being detected in the image would be assigned a probability between 0 and 1, with a sum of one.
Scikit Learn defines this algorithm in `LogisticRegression` class as a part of the `linear_models` module.
First, we load the data
```
x, y = datasets.load_breast_cancer(return_X_y=True)
# normalize the values of x
x_normalize = preprocessing.StandardScaler()
x_norm = x_normalize.fit_transform(x)
print("Breast Cancer features/input shape:", x_norm.shape)
print("Breast Cancer target/output shape:", y.shape)
```
Second, we split the data into 90/10 training/testing split (90% of the data will be used for training while 10% will be used for testing)
Since this is a classification problem (we only have two possible outputs, 1 or 0), we can use the `stratify` parameter to ensure that the two possible output values are distributed proportionally between the training and testing sets and preserve the data's original distribution across the two sets.
```
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x_norm, y, test_size=0.1, random_state=42, stratify=y
)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
```
Third, we train (i.e. `fit`) the model using the training dataset (`x_train` as inputs, `y_train` as targets)
```
classifier = linear_model.LogisticRegression()
classifier.fit(x_train, y_train)
# we can preview the learned coefficients (i.e. weights) and intercept (i.e. bias)
print("Weights:\n", regressor.coef_)
print("Bias:\n", regressor.intercept_)
```
Fourth, we'll feed the test set into the trained model
```
y_pred = classifier.predict(x_test)
```
Finally, we'll evaluate the predicted output against the ground-truth values in `y_test` using Scikit Learn's `metrics` module
One of the most used metrics to evaluate classification models is `accuracy_score` which calculates the precentage of the examples that the trained classifier guessed correctly
```
metrics.accuracy_score(y_test, y_pred)
```
# Pipeline
Scikit-learn's pipeline class is a useful tool for encapsulating multiple different transformers alongside an estimator into one object, so that you only have to call your important methods once `( fit() , predict() , etc).`
```
# Import the sklearn pipeline
from sklearn.pipeline import Pipeline
# Download the dataset
x, y = datasets.load_breast_cancer(return_X_y=True)
# Split the dataset to train and test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.1, random_state=42, stratify=y
)
```
The first step in building the pipeline is to define each transformer type. The convention here is generally to create transformers for the different variable types. In the code below I have created a numeric transformer which applies a StandardScaler, and includes a SimpleImputer to fill in any missing values. This is a really nice function in scikit-learn and has a number of options for filling missing values. I have chosen to use median but another method may result in better performance. The categorical transformer also has a SimpleImputer with a different fill method, and leverages OneHotEncoder to transform the categorical values into integers.
```
# Create the sklearn pipeline
pipe = Pipeline([('scaler', preprocessing.StandardScaler()),
('Logistic_R', linear_model.LogisticRegression())])
# fit the pipeline
pipe.fit(x_train, y_train)
# Calculate the Accuracy of the model
pipe.score(x_test, y_test)
```
| github_jupyter |
# NOTES:
- Waiting vs blocking
--> blocking holds up everything (could be selective?)
--> waiting for specific resources to reach inactive state (flags?)
- Platemap vs positionmap
- Axes orientation
# TODO:
- tip touch
- get motor current position
- tip touch
- calibration
- initialization reference
- GUI
- pyVISA
```
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from IPython.display import display
import ipywidgets as widgets
from __future__ import division
%matplotlib notebook
```
# Motor
Lin Engineering
http://www.linengineering.com/wp-content/uploads/downloads/Silverpak_17C/documentation/Lin_Command_Manual.pdf
1. Determine appropriate velocity_max = microsteps/sec
2. Determine motor limits
3. Determine conv = microsteps/mm
4. Determine orientation (P+; D-)
```
import serial as s
import time
import yaml
# TODO: get current position for relative move
class Motor:
def __init__(self, config_file, init=True):
self.serial = s.Serial() # placeholder
f = open(config_file, 'r')
self.config = yaml.load(f)
f.close()
if init:
self.initialize()
def initialize(self):
self.serial = s.Serial(**self.config['serial']) # open serial connection
# TODO set moving current
# TODO set holding current
self.set_velocity(self.config['velocity_limit']) # set velocity
self.home() # move motor to home
def cmd(self, cmd_string, block=True):
full_string = self.config['prefix'] + cmd_string + self.config['terminator']
self.serial.write(full_string)
time.sleep(0.15) # TODO: monitor for response?
response = self.serial.read(self.serial.inWaiting()).decode('utf8', 'ignore')
while block and self.is_busy():
pass
return response
def is_busy(self):
cmd_string = 'Q'
time.sleep(0.05)
response = self.cmd(cmd_string, False)
return response.rfind('`') == -1
# velocity: (usteps/sec)
def set_velocity(self, velocity):
if velocity > self.config['velocity_limit']:
velocity = self.config['velocity_limit']
print 'ERR: Desired velocity exceeds velocity_limit; velocity now set to velocity_limit'
cmd_string = 'V{}R'.format(velocity)
return self.cmd(cmd_string)
def halt(self):
cmd_string = 'T'
self.cmd(cmd_string)
def home(self):
cmd_string = 'Z{}R'.format(self.config['ustep_max'])
return self.cmd(cmd_string)
def move(self, mm, block=True):
ustep = int(self.config['conv']*mm)
if ustep > self.config['ustep_max']:
ustep = self.config['ustep_max']
print 'ERR: Desired move to {} mm exceeds max of {} mm; moving to max instead'.format(mm, self.config['ustep_max']/self.config['conv'])
if ustep < self.config['ustep_min']:
ustep = self.config['ustep_min']
print 'ERR: Desired move to {} mm exceeds min of {} mm; moving to min instead'.format(mm, self.config['ustep_min']/self.config['conv'])
cmd_string = 'A{}R'.format(ustep)
return self.cmd(cmd_string, block)
def move_relative(self, mm):
ustep = int(self.config['conv']*mm)
ustep_current = int(self.config['ustep_max']/2) # TODO: limit movement (+ and -)
if mm >= 0:
if (ustep_current + ustep) > self.config['ustep_max']:
ustep = self.config['ustep_max'] - ustep_current
print 'ERR: Desired move of +{} mm exceeds max of {} mm; moving to max instead'.format(mm, self.config['ustep_max']/self.config['conv'])
cmd_string = 'P{}R'.format(ustep)
else:
if (ustep_current + ustep) < self.config['ustep_min']:
ustep = self.config['ustep_min'] - ustep_current
print 'ERR: Desired move of {} mm exceeds min of {} mm; moving to min instead'.format(mm, self.config['ustep_min']/self.config['conv'])
ustep = -1*ustep
cmd_string = 'D{}R'.format(ustep)
return self.cmd(cmd_string)
def where(self):
cmd_string = '?0'
ustep = self.cmd(cmd_string)
retrun float(ustep)/self.config['conv']
def exit(self):
self.serial.close()
m = Motor('config/le_motor.yaml')
m.serial.write('/1Q\r')
time.sleep(0.5)
m.serial.read(m.serial.inWaiting())
m.cmd('Z1000R')
print m.move(32)
time.sleep(1)
print m.move(20)
print m.cmd('P100000D100000P100000D100000P100000D100000P100000D100000R')
print m.cmd('/1?0')
m.exit()
```
# ASI Controller
Applied Scientific Instrumentation
http://www.asiimaging.com/downloads/manuals/Operations_and_Programming_Manual.pdf
1. Set hall effect sensors to appropriate limits
2. Determine orientation (X+-, Y+-)
```
import serial as s
import time
import yaml
# TODO: Fix serial.read encoding
class ASI_Controller:
def __init__(self, config_file, init=True):
self.serial = s.Serial() # placeholder
f = open(config_file, 'r')
self.config = yaml.load(f)
f.close()
if init:
self.initialize()
def initialize(self):
self.serial = s.Serial(**self.config['serial']) # open serial connection
self.cmd_xy('mc x+ y+') # enable motor control for xy
self.cmd_z('mc z+') # enable motor control for z
print "Initializing stage..."
self.move_xy(2000, -2000) # move to switch limits (bottom right)
self.r_xy(-0.5, 0.5) # move from switch limits 0.5 mm
def cmd(self, cmd_string):
full_string = self.config['prefix'] + cmd_string + self.config['terminator']
self.serial.write(full_string)
time.sleep(0.05)
response = self.serial.read(self.serial.inWaiting())
return response
def halt(self):
self.halt_xy()
self.halt_z()
# XY ----------------------------------------------
def cmd_xy(self, cmd_string, block=True):
full_string = '2h ' + cmd_string
response = self.cmd(full_string)
while block and self.is_busy_xy():
time.sleep(0.05)
pass
return response
def is_busy_xy(self):
status = self.cmd('2h STATUS')[0]
return status == 'B'
def halt_xy(self):
self.cmd_xy('HALT', False)
def where_xy(self):
response = self.cmd_xy('WHERE X Y')
if response.find('A'):
pos_xy = response.split()[1:3]
pos_x = float(pos_xy[0])
pos_y = float(pos_xy[1])
return pos_x, pos_y
else:
return None, None
def move_xy(self, x_mm, y_mm):
conv = self.config['conv']
xStr = 'x=' + str(float(x_mm) * conv)
yStr = 'y=' + str(float(y_mm) * conv)
return self.cmd_xy(' '.join(['m', xStr, yStr]))
def r_xy(self, x_mm, y_mm):
conv = self.config['conv']
xStr = 'x=' + str(float(x_mm) * conv)
yStr = 'y=' + str(float(y_mm) * conv)
return self.cmd_xy(' '.join(['r', xStr, yStr]))
# Z -----------------------------------------------
def cmd_z(self, cmd_string, block=True):
while block and self.is_busy_z():
time.sleep(0.3)
full_string = '1h ' + cmd_string
return self.cmd(full_string)
def is_busy_z(self):
status = self.cmd('1h STATUS')
return status[0] == 'B'
def halt_z(self):
self.cmd_z('HALT', False)
def where_z(self):
response = self.cmd_z('WHERE Z')
if response.find('A'):
pos_z = float(response.split()[1:2])
return pos_z
else:
return None
def move_z(self, z_mm):
conv = self.config['conv']
zStr = 'z=' + str(float(z_mm) * conv)
return self.cmd_z(' '.join(['m', zStr]))
def r_z(self, z_mm):
conv = self.config['conv']
zStr = 'z=' + str(float(z_mm) * conv)
return self.cmd_z(' '.join(['r', zStr]))
def exit(self):
self.serial.close()
a = ASI_Controller('config/asi_controller.yaml')
a.exit()
```
# Autosipper
```
from utils import lookup, read_delim_pd
import numpy as np
class Autosipper:
def __init__(self, z, xy):
self.Z = z # must be initialized first!
self.XY = xy
while True:
fp = raw_input('Type in plate map file:')
try:
self.load_platemap(fp) # load platemap
break
except IOError:
print 'No file', fp
raw_input('Place dropper above reference (press enter when done)')
self.XY.cmd_xy('here x y') # establish current position as 0,0
def load_platemap(self, filepath):
self.platemap = read_delim_pd(filepath)
def go_to(self, columns, values):
x1,y1,z1 = np.array(lookup(self.platemap, columns, values)[['x','y','z']])[0]
self.Z.home() # move needle to travel height (blocking)
self.XY.move_xy(x1,y1) # move stage (blocking)
self.Z.move(z1) # move needle to bottom of well (blocking)
def where(self):
pos_x, pos_y = XY.where_xy()
pos_z = Z.where()
return pos_x, pos_y, pos_z
def exit(self):
self.XY.exit()
self.Z.exit()
d = Autosipper(Motor('config/le_motor.yaml'), ASI_Controller('config/asi_controller.yaml'))
d.platemap
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(d.platemap['x'], d.platemap['y'], d.platemap['z'], s=5)
plt.show()
d.Z.home
d.XY.r_xy(0,5)
d.go_to(['name'],'A12')
d.exit()
```
# Communication: PyVISA
Install NI-VISA:
https://pyvisa.readthedocs.io/en/stable/getting_nivisa.html
```
import visa
rm = visa.ResourceManager()
rm.list_resources()
rm.list_resources_info()
```
# Documentation: Sphinx
http://thomas-cokelaer.info/tutorials/sphinx/docstring_python.html
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.