code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
# change to root directory of project
import os
os.chdir('/home/tm/sciebo/corona/twitter_analysis/')
from bld.project_paths import project_paths_join as ppj
from IPython.display import display
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
#import requests
#import json
#import argparse
#from google.cloud import language
#from google.oauth2 import service_account
#from google.cloud.language import enums
#from google.cloud.language import types
```
## Data management
```
data = pd.read_csv(
ppj("IN_DATA", "training_data/data_clean_translated.csv")
).iloc[:, 1:]
data_processed = pd.read_csv(
ppj("IN_DATA", "training_data/data_processed_translated.csv"),
).iloc[:, 1:]
df = data.copy()
df["processed"] = data_processed.text
df['sentiment_score'] = df.sentiment.replace({'neutral': 0, 'negative': -1, 'positive': 1})
df = df.dropna()
```
## Functions
```
def classify_sentiment(list_of_text, method):
"""Classify sentiment for each item in ``list_of_text``.
Args:
list_of_text (list): List of strings for which the sentiment
should be classified.
method (str): Name of method that should be used. Possible
values are 'google', 'vader', 'textblob'.
Returns:
sentiments (list): List of respective sentiment score
for each item in ``list_of_text``.
"""
analyzer = return_sentiment_analyzer(method)
sentiments = analyzer(list_of_text)
return sentiments
def return_sentiment_analyzer(method):
"""Return specific sentiment analyzer function.
Args:
method (str): Name of method that should be used. Possible
values are 'google', 'vader', 'textblob'.
Returns:
analyzer (function): Function which return a sentiment score
given text input. Inner workings depend on ``method``.
"""
functions = {
'google': analyze_google,
'textblob': analyze_textblob,
'vader': analyze_vader,
}
analyzer = functions[method]
return analyzer
def analyze_google(list_of_text):
"""Return sentiment for each text in ``list_of_text``.
Sentiments are analyzed using googles cloud natural language
api.
Args:
list_of_text (list): List of strings for which the sentiment
should be classified.
Returns:
sentiments (list): List of respective sentiment score
for each item in ``list_of_text``, where the sentiment score
is computed using google cloud natural language.
"""
client = language.LanguageServiceClient.from_service_account_json(
'src/keys/ose-twitter-analysis-8508806b2efb.json'
)
sentiments = []
for text in list_of_text:
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT
)
annotations = client.analyze_sentiment(document=document)
sentiments.append(annotations.document_sentiment.score)
return sentiments
def analyze_textblob(list_of_text):
"""Return sentiment for each text in ``list_of_text`` using ``textblob``.
Args:
list_of_text (list): List of strings for which the sentiment
should be classified.
Returns:
sentiments (list): List of respective sentiment score
for each item in ``list_of_text``, where the sentiment score
is computed using the package ``textblob``.
"""
sentiments = [
TextBlob(text).sentiment.polarity for text in list_of_text
]
return sentiments
def analyze_vader(list_of_text):
"""Return sentiment for each text in ``list_of_text`` using ``vaderSentiment``.
Args:
list_of_text (list): List of strings for which the sentiment
should be classified.
Returns:
sentiments (list): List of respective sentiment score
for each item in ``list_of_text``, where the sentiment score
is computed using the package ``vaderSentiment``.
"""
analyzer = SentimentIntensityAnalyzer()
sentiments = [
analyzer.polarity_scores(text)['compound'] for text in list_of_text
]
return sentiments
```
## Analysis
```
analyzers = ['textblob', 'vader'] #, 'google']
for col in ['text', 'processed']:
for m in analyzers:
df[m + "_" + col] = classify_sentiment(df[col].to_list(), method=m)
def continuous_to_class(score):
new_score = np.zeros(score.shape)
new_score[score < -0.33] = -1
new_score[score > 0.33] = 1
new_score = pd.Series(new_score).replace(
{-1: 'negative', 0: 'neutral', 1: 'positive'}
)
return new_score
def confusion_matrix_to_readable(cmat, labels):
columns = ['pred_' + lab for lab in labels]
rows = ['true_' + lab for lab in labels]
df = pd.DataFrame(cmat, columns=columns, index=rows)
return df
def absolute_to_freq(cmat):
total = cmat.sum(axis=1)
return cmat / total[:, np.newaxis]
le = LabelEncoder()
le = le.fit(df["sentiment"])
y_true = le.transform(df["sentiment"])
columns = [
'textblob_text',
'vader_text',
'textblob_processed',
'vader_processed'
]
predictions = [
le.transform(continuous_to_class(df[col])) for col in columns
]
cmats = [
confusion_matrix(y_true, pred) for pred in predictions
]
cmats_freq = [absolute_to_freq(cmat) for cmat in cmats]
df_cmats = [
confusion_matrix_to_readable(cmat, le.classes_) for cmat in cmats_freq
]
```
## Benchmark
```
weights = pd.Series(y_true).value_counts() / len(y_true)
weights = weights.reindex(le.transform(['negative', 'neutral', 'positive']))
weights
```
### Evaluation
```
for col, df_tmp in zip(columns, df_cmats):
print(col)
display(df_tmp)
print(f"Percent correctly classified: {df_tmp.values.diagonal().dot(weights)}")
```
| github_jupyter |
# Table of Contents
<p><div class="lev2 toc-item"><a href="#Common-Layers" data-toc-modified-id="Common-Layers-01"><span class="toc-item-num">0.1 </span>Common Layers</a></div><div class="lev3 toc-item"><a href="#Convolution-Layers" data-toc-modified-id="Convolution-Layers-011"><span class="toc-item-num">0.1.1 </span>Convolution Layers</a></div><div class="lev4 toc-item"><a href="#tf.nn.depthwise_conv2d" data-toc-modified-id="tf.nn.depthwise_conv2d-0111"><span class="toc-item-num">0.1.1.1 </span>tf.nn.depthwise_conv2d</a></div><div class="lev4 toc-item"><a href="#tf.nn.separable_conv2d" data-toc-modified-id="tf.nn.separable_conv2d-0112"><span class="toc-item-num">0.1.1.2 </span>tf.nn.separable_conv2d</a></div><div class="lev4 toc-item"><a href="#tf.nn.conv2d_transpose" data-toc-modified-id="tf.nn.conv2d_transpose-0113"><span class="toc-item-num">0.1.1.3 </span>tf.nn.conv2d_transpose</a></div><div class="lev3 toc-item"><a href="#Activation-Functions" data-toc-modified-id="Activation-Functions-012"><span class="toc-item-num">0.1.2 </span>Activation Functions</a></div><div class="lev4 toc-item"><a href="#tf.nn.relu" data-toc-modified-id="tf.nn.relu-0121"><span class="toc-item-num">0.1.2.1 </span>tf.nn.relu</a></div><div class="lev4 toc-item"><a href="#tf.sigmoid" data-toc-modified-id="tf.sigmoid-0122"><span class="toc-item-num">0.1.2.2 </span>tf.sigmoid</a></div><div class="lev4 toc-item"><a href="#tf.tanh" data-toc-modified-id="tf.tanh-0123"><span class="toc-item-num">0.1.2.3 </span>tf.tanh</a></div><div class="lev4 toc-item"><a href="#tf.nn.dropout" data-toc-modified-id="tf.nn.dropout-0124"><span class="toc-item-num">0.1.2.4 </span>tf.nn.dropout</a></div><div class="lev3 toc-item"><a href="#Pooling-Layers" data-toc-modified-id="Pooling-Layers-013"><span class="toc-item-num">0.1.3 </span>Pooling Layers</a></div><div class="lev4 toc-item"><a href="#tf.nn.max_pool" data-toc-modified-id="tf.nn.max_pool-0131"><span class="toc-item-num">0.1.3.1 </span>tf.nn.max_pool</a></div><div class="lev4 toc-item"><a href="#tf.nn.avg_pool" data-toc-modified-id="tf.nn.avg_pool-0132"><span class="toc-item-num">0.1.3.2 </span>tf.nn.avg_pool</a></div><div class="lev3 toc-item"><a href="#Normalization" data-toc-modified-id="Normalization-014"><span class="toc-item-num">0.1.4 </span>Normalization</a></div><div class="lev4 toc-item"><a href="#tf.nn.local_response_normalization-(tf.nn.lrn)" data-toc-modified-id="tf.nn.local_response_normalization-(tf.nn.lrn)-0141"><span class="toc-item-num">0.1.4.1 </span>tf.nn.local_response_normalization (tf.nn.lrn)</a></div><div class="lev3 toc-item"><a href="#High-Level-Layers" data-toc-modified-id="High-Level-Layers-015"><span class="toc-item-num">0.1.5 </span>High Level Layers</a></div><div class="lev4 toc-item"><a href="#tf.contrib.layers.convolution2d" data-toc-modified-id="tf.contrib.layers.convolution2d-0151"><span class="toc-item-num">0.1.5.1 </span>tf.contrib.layers.convolution2d</a></div><div class="lev4 toc-item"><a href="#tf.contrib.layers.fully_connected" data-toc-modified-id="tf.contrib.layers.fully_connected-0152"><span class="toc-item-num">0.1.5.2 </span>tf.contrib.layers.fully_connected</a></div><div class="lev4 toc-item"><a href="#Layer-Input" data-toc-modified-id="Layer-Input-0153"><span class="toc-item-num">0.1.5.3 </span>Layer Input</a></div>
## Common Layers
For a neural network architecture to be considered a CNN, it requires at least one convolution layer (`tf.nn.conv2d`). There are practical uses for a single layer CNN (edge detection), for image recognition and categorization it is common to use different layer types to support a convolution layer. These layers help reduce over-fitting, speed up training and decrease memory usage.
The layers covered in this chapter are focused on layers commonly used in a CNN architecture. A CNN isn't limited to use only these layers, they can be mixed with layers designed for other network architectures.
```
# setup-only-ignore
import tensorflow as tf
import numpy as np
# setup-only-ignore
sess = tf.InteractiveSession()
```
### Convolution Layers
One type of convolution layer has been covered in detail (`tf.nn.conv2d`) but there are a few notes which are useful to advanced users. The convolution layers in TensorFlow don't do a full convolution, details can be found in [the TensorFlow API documentation](https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#convolution). In practice, the difference between a convolution and the operation TensorFlow uses is performance. TensorFlow uses a technique to speed up the convolution operation in all the different types of convolution layers.
There are use cases for each type of convolution layer but for `tf.nn.conv2d` is a good place to start. The other types of convolutions are useful but not required in building a network capable of object recognition and classification. A brief summary of each is included.
#### tf.nn.depthwise_conv2d
Used when attaching the output of one convolution to the input of another convolution layer. An advanced use case is using a `tf.nn.depthwise_conv2d` to create a network following the [inception architecture](http://arxiv.org/abs/1512.00567).
#### tf.nn.separable_conv2d
Similar to `tf.nn.conv2d` but not a replacement. For large models, it speeds up training without sacrificing accuracy. For small models, it will converge quickly with worse accuracy.
#### tf.nn.conv2d_transpose
Applies a kernel to a new feature map where each section is filled with the same values as the kernel. As the kernel strides over the new image, any overlapping sections are summed together. There is a great explanation on how `tf.nn.conv2d_transpose` is used for learnable upsampling in [Stanford's CS231n Winter 2016: Lecture 13](https://www.youtube.com/watch?v=ByjaPdWXKJ4&t=20m00s).
### Activation Functions
These functions are used in combination with the output of other layers to generate a feature map. They're used to smooth (or differentiate) the results of certain operations. The goal is to introduce non-linearity into the neural network. Non-linearity means that the input is a curve instead of a straight line. Curves are capable of representing more complex changes in input. For example, non-linear input is capable of describing input which stays small for the majority of the time but periodically has a single point at an extreme. Introduction of non-linearity in a neural network allows it to train on the complex patterns found in data.
TensorFlow has [multiple activation functions](https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#activation-functions) available. With CNNs, `tf.nn.relu` is primarily used because of its performance although it sacrifices information. When starting out, using `tf.nn.relu` is recommended but advanced users may create their own. When considering if an activation function is useful there are a few primary considerations.
1. The function is [**monotonic**](https://en.wikipedia.org/wiki/Monotonic_function), so its output should always be increasing or decreasing along with the input. This allows gradient descent optimization to search for local minima.
2. The function is [**differentiable**](https://en.wikipedia.org/wiki/Differentiable_function), so there must be a derivative at any point in the function's domain. This allows gradient descent optimization to properly work using the output from this style of activation function.
Any functions which satisfy those considerations could be used as activation functions. In TensorFlow there are a few worth highlighting which are common to see in CNN architectures. A brief summary of each is included with a small sample code illustrating their usage.
#### tf.nn.relu
A rectifier (rectified linear unit) called a ramp function in some documentation and looks like a skateboard ramp when plotted. ReLU is linear and keeps the same input values for any positive numbers while setting all negative numbers to be 0. It has the benefits that it doesn't suffer from [gradient vanishing](https://en.wikipedia.org/wiki/Vanishing_gradient_problem) and has a range of <span class="math-tex" data-type="tex">\\([0,+\infty)\\)</span>. A drawback of ReLU is that it can suffer from neurons becoming saturated when too high of a learning rate is used.
```
features = tf.range(-2, 3)
# Keep note of the value for negative features
sess.run([features, tf.nn.relu(features)])
```
In this example, the input in a rank one tensor (vector) of integer values between <span class="math-tex" data-type="tex">\\([-2, 3]\\)</span>. A `tf.nn.relu` is ran over the values the output highlights that any value less than 0 is set to be 0. The other input values are left untouched.
#### tf.sigmoid
A sigmoid function returns a value in the range of <span class="math-tex" data-type="tex">\\([0.0, 1.0]\\)</span>. Larger values sent into a `tf.sigmoid` will trend closer to 1.0 while smaller values will trend towards 0.0. The ability for sigmoids to keep a values between <span class="math-tex" data-type="tex">\\([0.0, 1.0]\\)</span> is useful in networks which train on probabilities which are in the range of <span class="math-tex" data-type="tex">\\([0.0, 1.0]\\)</span>. The reduced range of output values can cause trouble with input becoming saturated and changes in input becoming exaggerated.
```
# Note, tf.sigmoid (tf.nn.sigmoid) is currently limited to float values
features = tf.to_float(tf.range(-1, 3))
sess.run([features, tf.sigmoid(features)])
```
In this example, a range of integers is converted to be float values (`1` becomes `1.0`) and a sigmoid function is ran over the input features. The result highlights that when a value of 0.0 is passed through a sigmoid, the result is 0.5 which is the midpoint of the simoid's domain. It's useful to note that with 0.5 being the sigmoid's midpoint, negative values can be used as input to a sigmoid.
#### tf.tanh
A hyperbolic tangent function (tanh) is a close relative to `tf.sigmoid` with some of the same benefits and drawbacks. The main difference between `tf.sigmoid` and `tf.tanh` is that `tf.tanh` has a range of <span class="math-tex" data-type="tex">\\([-1.0, 1.0]\\)</span>. The ability to output negative values may be useful in certain network architectures.
```
# Note, tf.tanh (tf.nn.tanh) is currently limited to float values
features = tf.to_float(tf.range(-1, 3))
sess.run([features, tf.tanh(features)])
```
In this example, all the setup is the same as the `tf.sigmoid` example but the output shows an important difference. In the output of `tf.tanh` the midpoint is 0.0 with negative values. This can cause trouble if the next layer in the network isn't expecting negative input or input of 0.0.
#### tf.nn.dropout
Set the output to be 0.0 based on a configurable probability. This layer performs well in scenarios where a little randomness helps training. An example scenario is when there are patterns being learned which are too tied to their neighboring features. This layer will add a little noise to the output being learned.
**NOTE**: This layer should only be used during training because the random noise it adds will give misleading results while testing.
```
features = tf.constant([-0.1, 0.0, 0.1, 0.2])
# Note, the output should be different on almost ever execution. Your numbers won't match
# this output.
sess.run([features, tf.nn.dropout(features, keep_prob=0.5)])
```
In this example, the output has a 50% probability of being kept. Each execution of this layer will have different output (most likely, it's somewhat random). When an output is dropped, its value is set to 0.0.
### Pooling Layers
Pooling layers reduce over-fitting and improving performance by reducing the size of the input. They're used to scale down input while keeping important information for the next layer. It's possible to reduce the size of the input using a `tf.nn.conv2d` alone but these layers execute much faster.
#### tf.nn.max_pool
Strides over a tensor and chooses the maximum value found within a certain kernel size. Useful when the intensity of the input data is relevant to importance in the image.

The same example is modeled using example code below. The goal is to find the largest value within the tensor.
```
# Usually the input would be output from a previous layer and not an image directly.
batch_size=1
input_height = 3
input_width = 3
input_channels = 1
layer_input = tf.constant([
[
[[1.0], [0.2], [1.5]],
[[0.1], [1.2], [1.4]],
[[1.1], [0.4], [0.4]]
]
])
# The strides will look at the entire input by using the image_height and image_width
kernel = [batch_size, input_height, input_width, input_channels]
max_pool = tf.nn.max_pool(layer_input, kernel, [1, 1, 1, 1], "VALID")
sess.run(max_pool)
```
The `layer_input` is a tensor with a shape similar to the output of `tf.nn.conv2d` or an activation function. The goal is to keep only one value, the largest value in the tensor. In this case, the largest value of the tensor is `1.5` and is returned in the same format as the input. If the `kernel` were set to be smaller, it would choose the largest value in each kernel size as it strides over the image.
Max-pooling will commonly be done using `2x2` receptive field (kernel with a height of 2 and width of 2) which is often written as a "2x2 max-pooling operation". One reason to use a `2x2` receptive field is that it's the smallest amount of downsampling which can be done in a single pass. If a `1x1` receptive field were used then the output would be the same as the input.
#### tf.nn.avg_pool
Strides over a tensor and averages all the values at each depth found within a kernel size. Useful when reducing values where the entire kernel is important, for example, input tensors with a large width and height but small depth.

The same example is modeled using example code below. The goal is to find the average of all the values within the tensor.
```
batch_size=1
input_height = 3
input_width = 3
input_channels = 1
layer_input = tf.constant([
[
[[1.0], [1.0], [1.0]],
[[1.0], [0.5], [0.0]],
[[0.0], [0.0], [0.0]]
]
])
# The strides will look at the entire input by using the image_height and image_width
kernel = [batch_size, input_height, input_width, input_channels]
max_pool = tf.nn.avg_pool(layer_input, kernel, [1, 1, 1, 1], "VALID")
sess.run(max_pool)
```
Doing a summation of all the values in the tensor, then divide them by the size of the number of scalars in the tensor:
<br />
<span class="math-tex" data-type="tex">\\(\dfrac{1.0 + 1.0 + 1.0 + 1.0 + 0.5 + 0.0 + 0.0 + 0.0 + 0.0}{9.0}\\)</span>
This is exactly what the example code did above but by reducing the size of the kernel, it's possible to adjust the size of the output.
### Normalization
Normalization layers are not unique to CNNs and aren't used as often. When using `tf.nn.relu`, it is useful to consider normalization of the output. Since ReLU is unbounded, it's often useful to utilize some form of normalization to identify high-frequency features.
#### tf.nn.local_response_normalization (tf.nn.lrn)
Local response normalization is a function which shapes the output based on a summation operation best explained in [TensorFlow's documentation](https://www.tensorflow.org/versions/master/api_docs/python/nn.html#local_response_normalization).
> ... Within a given vector, each component is divided by the weighted, squared sum of inputs within depth_radius.
One goal of normalization is to keep the input in a range of acceptable numbers. For instance, normalizing input in the range of <span class="math-tex" data-type="tex">\\([0.0,1.0]\\)</span> where the full range of possible values is normalized to be represented by a number greater than or equal to `0.0` and less than or equal to `1.0`. Local response normalization normalizes values while taking into account the significance of each value.
[Cuda-Convnet](https://code.google.com/p/cuda-convnet/wiki/LayerParams) includes further details on why using local response normalization is useful in some CNN architectures. [ImageNet](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf) uses this layer to normalize the output from `tf.nn.relu`.
```
# Create a range of 3 floats.
# TensorShape([batch, image_height, image_width, image_channels])
layer_input = tf.constant([
[[[ 1.]], [[ 2.]], [[ 3.]]]
])
lrn = tf.nn.local_response_normalization(layer_input)
sess.run([layer_input, lrn])
```
In this example code, the layer input is in the format `[batch, image_height, image_width, image_channels]`. The normalization reduced the output to be in the range of <span class="math-tex" data-type="tex">\\([-1.0, 1.0]\\)</span>. For `tf.nn.relu`, this layer will reduce its unbounded output to be in the same range.
### High Level Layers
TensorFlow has introduced high level layers designed to make it easier to create fairly standard layer definitions. These aren't required to use but they help avoid duplicate code while following best practices. While getting started, these layers add a number of non-essential nodes to the graph. It's worth waiting until the basics are comfortable before using these layers.
#### tf.contrib.layers.convolution2d
The `convolution2d` layer will do the same logic as `tf.nn.conv2d` while including weight initialization, bias initialization, trainable variable output, bias addition and adding an activation function. Many of these steps haven't been covered for CNNs yet but should be familiar. A kernel is a trainable variable (the CNN's goal is to train this variable), weight initialization is used to fill the kernel with values (`tf.truncated_normal`) on its first run. The rest of the parameters are similar to what have been used before except they are reduced to short-hand version. Instead of declaring the full kernel, now it's a simple tuple `(1,1)` for the kernel's height and width.
```
image_input = tf.constant([
[
[[0., 0., 0.], [255., 255., 255.], [254., 0., 0.]],
[[0., 191., 0.], [3., 108., 233.], [0., 191., 0.]],
[[254., 0., 0.], [255., 255., 255.], [0., 0., 0.]]
]
])
conv2d = tf.contrib.layers.convolution2d(
image_input,
num_outputs=4,
kernel_size=(1,1), # It's only the filter height and width.
activation_fn=tf.nn.relu,
stride=(1, 1), # Skips the stride values for image_batch and input_channels.
trainable=True)
# It's required to initialize the variables used in convolution2d's setup.
sess.run(tf.global_variables_initializer())
sess.run(conv2d)
```
This example setup a full convolution against a batch of a single image. All the parameters are based off of the steps done throughout this chapter. The main difference is that `tf.contrib.layers.convolution2d` does a large amount of setup without having to write it all again. This can be a great time saving layer for advanced users.
**NOTE**: `tf.to_float` should not be used if the input is an image, instead use `tf.image.convert_image_dtype` which will properly change the range of values used to describe colors. In this example code, float values of `255.` were used which aren't what TensorFlow expects when is sees an image using float values. TensorFlow expects an image with colors described as floats to stay in the range of <span class="math-tex" data-type="tex">\\([0,1]\\)</span>.
#### tf.contrib.layers.fully_connected
A fully connected layer is one where every input is connected to every output. This is a fairly common layer in many architectures but for CNNs, the last layer is quite often fully connected. The `tf.contrib.layers.fully_connected` layer offers a great short-hand to create this last layer while following best practices.
Typical fully connected layers in TensorFlow are often in the format of `tf.matmul(features, weight) + bias` where `feature`, `weight` and `bias` are all tensors. This short-hand layer will do the same thing while taking care of the intricacies involved in managing the `weight` and `bias` tensors.
```
features = tf.constant([
[[1.2], [3.4]]
])
fc = tf.contrib.layers.fully_connected(features, num_outputs=2)
# It's required to initialize all the variables first or there'll be an error about precondition failures.
sess.run(tf.global_variables_initializer())
sess.run(fc)
```
This example created a fully connected layer and associated the input tensor with each neuron of the output. There are plenty of other parameters to tweak for different fully connected layers.
#### Layer Input
Each layer serves a purpose in a CNN architecture. It's important to understand them at a high level (at least) but without practice they're easy to forget. A crucial layer in any neural network is the input layer, where raw input is sent to be trained and tested. For object recognition and classification, the input layer is a `tf.nn.conv2d` layer which accepts images. The next step is to use real images in training instead of example input in the form of `tf.constant` or `tf.range` variables.
| github_jupyter |
# Machine Learning Engineer Nanodegree
## Unsupervised Learning
## Project 3: Creating Customer Segments
Welcome to the third project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
## Getting Started
In this project, you will analyze a dataset containing data on various customers' annual spending amounts (reported in *monetary units*) of diverse product categories for internal structure. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer.
The dataset for this project can be found on the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers). For the purposes of this project, the features `'Channel'` and `'Region'` will be excluded in the analysis — with focus instead on the six product categories recorded for customers.
Run the code block below to load the wholesale customers dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
```
# Import libraries necessary for this project
import numpy as np
import pandas as pd
import renders as rs
from IPython.display import display # Allows the use of display() for DataFrames
# Show matplotlib plots inline (nicely formatted in the notebook)
import matplotlib as mpl
%matplotlib inline
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
```
## Data Exploration
In this section, you will begin exploring the data through visualizations and code to understand how each feature is related to the others. You will observe a statistical description of the dataset, consider the relevance of each feature, and select a few sample data points from the dataset which you will track through the course of this project.
Run the code block below to observe a statistical description of the dataset. Note that the dataset is composed of six important product categories: **'Fresh'**, **'Milk'**, **'Grocery'**, **'Frozen'**, **'Detergents_Paper'**, and **'Delicatessen'**. Consider what each category represents in terms of products you could purchase.
```
# Display a description of the dataset
display(data.describe())
```
### Implementation: Selecting Samples
To get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, add **three** indices of your choice to the `indices` list which will represent the customers to track. It is suggested to try different sets of samples until you obtain customers that vary significantly from one another.
```
# TODO: Select three indices of your choice you wish to sample from the dataset
indices = [28,29,30]
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
```
### Question 1
Consider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers.
*What kind of establishment (customer) could each of the three samples you've chosen represent?*
**Hint:** Examples of establishments include places like markets, cafes, and retailers, among many others. Avoid using names for establishments, such as saying *"McDonalds"* when describing a sample customer as a restaurant.
**Answer:**
By looking at the statistical description of the dataset, the median and the mean are quite apart to each other. This implies that the distribution of the dataset is skewed. In such case, the median is probably a better benchmark to descirbe the data than the mean. Also the median is more robust to outliers than the mean.
Sample 0 has high costs on every catelogue except "Fresh". The cost on "Frozen" is about the median. This one may represent a retailer or a convenient store.
Sample 1 has a very high cost on "Fresh" only. The costs on "Frozen" and "Delicatessen" are just above the medians. The costs on everything else are low. This one may represent a restaurant.
Sample 2 has high costs on "Fresh", "Grocery", "Detergents_Paper", and "Delicatessen". The costs of "Milk" and "Frozen" are about the medians. This one may represent a retailer.
### Implementation: Feature Relevance
One interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? We can make this determination quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature.
In the code block below, you will need to implement the following:
- Assign `new_data` a copy of the data by removing a feature of your choice using the `DataFrame.drop` function.
- Use `sklearn.cross_validation.train_test_split` to split the dataset into training and testing sets.
- Use the removed feature as your target label. Set a `test_size` of `0.25` and set a `random_state`.
- Import a decision tree regressor, set a `random_state`, and fit the learner to the training data.
- Report the prediction score of the testing set using the regressor's `score` function.
```
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeRegressor
seed = 42
num_labels = len(data.axes[1])
score = np.zeros(num_labels, dtype = float)
for i, drop_feature in enumerate(data.axes[1]):
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
new_data = pd.DataFrame(data, copy=True)
new_data.drop(drop_feature, axis=1, inplace=True)
new_label = data.get(drop_feature)
# TODO: Split the data into training and testing sets using the given feature as the target
X_train, X_test, y_train, y_test = train_test_split(new_data, new_label, test_size=0.25, random_state=seed)
# TODO: Create a decision tree regressor and fit it to the training set
regressor = DecisionTreeRegressor(random_state=seed)
regressor.fit(X_train, y_train)
score[i] = regressor.score(X_test, y_test)
# TODO: Report the score of the prediction using the testing set
print "{:+.3f} is the relevance score between other categories and '{}'.".format(score[i],drop_feature)
```
### Question 2
*Which feature did you attempt to predict? What was the reported prediction score? Is this feature is necessary for identifying customers' spending habits?*
**Hint:** The coefficient of determination, `R^2`, is scored between 0 and 1, with 1 being a perfect fit. A negative `R^2` implies the model fails to fit the data.
**Answer:**
The reported prediction scores of all six features are shown on the following table:
| Feature | Score |
|------------------ |-------- |
| Fresh | -0.386 |
| Milk | 0.156 |
| Grocery | 0.682 |
| Frozen | -0.210 |
| Detergents_Paper | 0.272 |
| Delicatessen | -2.255 |
The higher a R<sup>2</sup> score, the more tendency customers would spend on this category along with other five categories. It means that this feature tends to be more dependent on other features. The lower a R<sup>2</sup> score, the more independent a category is. Therefore, among all these features, "Delicatessen" is the best fit for identifying customers' spending habits, and followed with "Fresh" and "Frozen".
### Visualize Feature Distributions
To get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If you found that the feature you attempted to predict above is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if you believe that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. Run the code block below to produce a scatter matrix.
```
# Produce a scatter matrix for each pair of features in the data
pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
import seaborn as sns
sns.heatmap(data.corr())
```
### Question 3
*Are there any pairs of features which exhibit some degree of correlation? Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict? How is the data for those features distributed?*
**Hint:** Is the data normally distributed? Where do most of the data points lie?
**Answer:**
Among all these features, "Milk", "Grocery", and "Detergents_Paper" exhibit some degree of correlation to each others. Especially between "Grocery" and "Detergents_Paper" it shows a strong correlation. All scatter matrices of these pairs of data tends to show a roughly linear relationship. This result confirms the relevance of the features predicted above.
The data for these features distributed in a highly skewed pattern. Most of the data lie at lower values, and few of them lie at the long tail with higher values.
## Data Preprocessing
In this section, you will preprocess the data to create a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results you obtain from your analysis are significant and meaningful.
### Implementation: Feature Scaling
If data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most [often appropriate](http://econbrowser.com/archives/2014/02/use-of-logarithms-in-economics) to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a [Box-Cox test](http://scipy.github.io/devdocs/generated/scipy.stats.boxcox.html), which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm.
In the code block below, you will need to implement the following:
- Assign a copy of the data to `log_data` after applying a logarithm scaling. Use the `np.log` function for this.
- Assign a copy of the sample data to `log_samples` after applying a logrithm scaling. Again, use `np.log`.
```
mpl.rcParams.update(mpl.rcParamsDefault)
%matplotlib inline
# TODO: Scale the data using the natural logarithm
log_data = np.log(data)
# TODO: Scale the sample data using the natural logarithm
log_samples = np.log(samples)
# Produce a scatter matrix for each pair of newly-transformed features
pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
```
### Observation
After applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. For any pairs of features you may have identified earlier as being correlated, observe here whether that correlation is still present (and whether it is now stronger or weaker than before).
Run the code below to see how the sample data has changed after having the natural logarithm applied to it.
```
# Display the log-transformed sample data
display(log_samples)
```
### Implementation: Outlier Detection
Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use [Tukey's Method for identfying outliers](http://datapigtechnologies.com/blog/index.php/highlighting-outliers-in-your-data-with-the-tukey-method/): An *outlier step* is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal.
In the code block below, you will need to implement the following:
- Assign the value of the 25th percentile for the given feature to `Q1`. Use `np.percentile` for this.
- Assign the value of the 75th percentile for the given feature to `Q3`. Again, use `np.percentile`.
- Assign the calculation of an outlier step for the given feature to `step`.
- Optionally remove data points from the dataset by adding indices to the `outliers` list.
**NOTE:** If you choose to remove any outliers, ensure that the sample data does not contain any of these points!
Once you have performed this implementation, the dataset will be stored in the variable `good_data`.
```
# For each feature find the data points with extreme high or low values
for feature in log_data.keys():
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
Q1 = np.percentile(log_data[feature], 25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
Q3 = np.percentile(log_data[feature], 75)
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
step = 1.5 * (Q3 - Q1)
# Display the outliers
print "Data points considered outliers for the feature '{}':".format(feature)
display(log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))])
# OPTIONAL: Select the indices for data points you wish to remove
outliers = [65, 66, 75, 128, 154]
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
```
### Question 4
*Are there any data points considered outliers for more than one feature? Should these data points be removed from the dataset? If any data points were added to the `outliers` list to be removed, explain why.*
##### **Answer:**
Yes, there are 5 data points are considered outliers for more than one feature, and they are [65, 66, 75, 128, 154]. These data points should be removed from the dataset, because they are outliers for more than one feature, and they are not good representatives of the normal data. By removing these data from the dataset, the data will fit into a cleaner distribution and will be much easier for further processes later.
## Feature Transformation
In this section you will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers.
### Implementation: PCA
Now that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the `good_data` to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the *explained variance ratio* of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new "feature" of the space, however it is a composition of the original features present in the data.
In the code block below, you will need to implement the following:
- Import `sklearn.decomposition.PCA` and assign the results of fitting PCA in six dimensions with `good_data` to `pca`.
- Apply a PCA transformation of the sample log-data `log_samples` using `pca.transform`, and assign the results to `pca_samples`.
```
from sklearn.decomposition import PCA
# TODO: Apply PCA to the good data with the same number of dimensions as features
pca = PCA()
pca.fit(good_data)
# TODO: Apply a PCA transformation to the sample log-data
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = rs.pca_results(good_data, pca)
```
### Question 5
*How much variance in the data is explained* ***in total*** *by the first and second principal component? What about the first four principal components? Using the visualization provided above, discuss what the first four dimensions best represent in terms of customer spending.*
**Hint:** A positive increase in a specific dimension corresponds with an *increase* of the *positive-weighted* features and a *decrease* of the *negative-weighted* features. The rate of increase or decrease is based on the indivdual feature weights.
**Answer:**
The first principal component explains 44.30% variance, and the second principal component explains 26.38%, and in total these two principal component explain 70.68% variance.
The third principal component explains 12.31% variance, and the forth principal component explains 10.12%, and in total these first four principal component explain 93.11% variance.
Here 0.4 will be used as a threshold value to judge the importance of the feature weights. The first dimension represents the customers spending mostly on Milk, Grocery, and Detergents_Paper. In fact, the first dimension is most correlated to the spending of Detergents_Paper. As we discussed above, these three categories have a linear correlation to each others. The second dimension represents the customers spending mostly on Fresh, Frozen, and Delicatessen. The third dimension represents the customers mainly spending on Delicatessen, and disfavoring spending on Fresh. The forth dimension represents the customer mostly spending on Frozen and not spending on Delicatessen.
### Observation
Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points.
```
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
```
### Implementation: Dimensionality Reduction
When using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the *cumulative explained variance ratio* is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards.
In the code block below, you will need to implement the following:
- Assign the results of fitting PCA in two dimensions with `good_data` to `pca`.
- Apply a PCA transformation of `good_data` using `pca.transform`, and assign the reuslts to `reduced_data`.
- Apply a PCA transformation of the sample log-data `log_samples` using `pca.transform`, and assign the results to `pca_samples`.
```
# TODO: Fit PCA to the good data using only two dimensions
pca = PCA(n_components=2)
pca.fit(good_data)
# TODO: Apply a PCA transformation the good data
reduced_data = pca.transform(good_data)
# TODO: Apply a PCA transformation to the sample log-data
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
```
### Observation
Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions.
```
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
```
## Clustering
In this section, you will choose to use either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm to identify the various customer segments hidden in the data. You will then recover specific data points from the clusters to understand their significance by transforming them back into their original dimension and scale.
### Question 6
*What are the advantages to using a K-Means clustering algorithm? What are the advantages to using a Gaussian Mixture Model clustering algorithm? Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why?*
**Answer:**
A K-Means clustering algorithm is fast to run, and it produces clean and tight clusters with straight boudaries as being a type of hard clustering algorithm. But it also has disadvantages, such as tending to reach local minima, difficulty to predict K-value, not working well with non globalur clusters.
A Guassian Mixture Model is a Expectation Maximization algorithm. It monotonically increases likelihood, which means it does not diverge, and practically converges better than a K-Means clustering algorithm. It is also a type of soft clustering, which does not provide clean clusters, but much more structural information. Comparing to a K-Means clustering algorithm, a Guassian Mixture Model is slower since it has to incorporate information about the distributions of the data, thus it has to deal with the co-variance, mean, variance, and prior probabilities of the data, and also has to assign probabilities to belonging to each clusters.
Regarding to this wholesale customer data, since the data intrinsically are not divided by clean and tight categories, I would think a Gaussian Mixture Model fits better for this project. Furthermore, the dataset is relatively small, so it shouldn't be too costly to run.
### Implementation: Creating Clusters
Depending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known *a priori*, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the "goodness" of a clustering by calculating each data point's *silhouette coefficient*. The [silhouette coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the *mean* silhouette coefficient provides for a simple scoring method of a given clustering.
In the code block below, you will need to implement the following:
- Fit a clustering algorithm to the `reduced_data` and assign it to `clusterer`.
- Predict the cluster for each data point in `reduced_data` using `clusterer.predict` and assign them to `preds`.
- Find the cluster centers using the algorithm's respective attribute and assign them to `centers`.
- Predict the cluster for each sample data point in `pca_samples` and assign them `sample_preds`.
- Import sklearn.metrics.silhouette_score and calculate the silhouette score of `reduced_data` against `preds`.
- Assign the silhouette score to `score` and print the result.
```
from sklearn.mixture import GMM
from sklearn.metrics import silhouette_score
for i in [6,5,4,3,2]:
# TODO: Apply your clustering algorithm of choice to the reduced data
gmm = GMM(n_components=i,covariance_type='diag',random_state=seed,verbose=0)
clusterer = gmm.fit(reduced_data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# TODO: Find the cluster centers
centers = clusterer.means_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(pca_samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
score = silhouette_score(reduced_data,preds,metric='euclidean',random_state=seed)
print "For {:d} clusters, the mean silhouette coefficient is {:.3f}.".format(i,score)
```
### Question 7
*Report the silhouette score for several cluster numbers you tried. Of these, which number of clusters has the best silhouette score?*
**Answer:**
The silhouette scores for various cluster numbers are reported below:
| Cluster Number | SilhouetteScore |
|----------------|-----------------|
| 2 | 0.412 |
| 3 | 0.374 |
| 4 | 0.331 |
| 5 | 0.281 |
| 6 | 0.278 |
When 2 clusters are divided, the silhouette score is the highest.
### Cluster Visualization
Once you've chosen the optimal number of clusters for your clustering algorithm using the scoring metric above, you can now visualize the results by executing the code block below. Note that, for experimentation purposes, you are welcome to adjust the number of clusters for your clustering algorithm to see various visualizations. The final visualization provided should, however, correspond with the optimal number of clusters.
```
# Display the results of the clustering from implementation
rs.cluster_results(reduced_data, preds, centers, pca_samples)
```
### Implementation: Data Recovery
Each cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the *averages* of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to *the average customer of that segment*. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations.
In the code block below, you will need to implement the following:
- Apply the inverse transform to `centers` using `pca.inverse_transform` and assign the new centers to `log_centers`.
- Apply the inverse function of `np.log` to `log_centers` using `np.exp` and assign the true centers to `true_centers`.
```
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
true_centers.plot(kind = 'bar', figsize = (10, 5))
```
### Question 8
Consider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project. *What set of establishments could each of the customer segments represent?*
**Hint:** A customer who is assigned to `'Cluster X'` should best identify with the establishments represented by the feature set of `'Segment X'`.
**Answer:**
Since we know the data distribution is highly skewed, the median will be considered as a major parameter to describe the data instead of the mean. For "Segment 0", the costs on "Fresh" and "Frozen" are higher than the medians. This segment may represent a set of establishments of restaurants and cafes. For "Segment 1", the costs of "Milk", "Grocery", and "Detergents_Paper" are highers than the medians, and the cost of "Delicatessen" is close to the median. This segment may represent a set of establishments of retailers.
### Question 9
*For each sample point, which customer segment from* ***Question 8*** *best represents it? Are the predictions for each sample point consistent with this?*
Run the code block below to find which cluster each sample point is predicted to be.
```
# Display the predictions
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
```
**Answer:**
Sample 0 is predicted to be in Segment 1, which may represent retailers. As we discussed above, Sample 0 has higher costs than the mdeian on every catelogue except "Fresh". This characteristic matches Segment 1, in which the costs on most categories are high except "Fresh" and "Frozen".
Sample 1 is predicted to be in Segment 0, which may represent restaurants/cafes. Sample 1 has a very high cost on "Fresh" only. The costs on "Frozen" and "Delicatessen" are just above the medians. This characteristic is exactly identical as Segment 0.
Sample 2 is predicted to be in Segment 1, which may represent retailers. Sample 2 has higher costs on "Fresh", "Grocery", "Detergents_Paper", and "Delicatessen". Even though it has a high cost on "Fresh", by looking at all other features and overall pattern, this sample should be same as Sample 0 and its characteristic fits into Segment 1.
The predictions for each sample point are consistent with the initial guesses.
## Conclusion
### Question 10
*Companies often run [A/B tests](https://en.wikipedia.org/wiki/A/B_testing) when making small changes to their products or services. If the wholesale distributor wanted to change its delivery service from 5 days a week to 3 days a week, how would you use the structure of the data to help them decide on a group of customers to test?*
**Hint:** Would such a change in the delivery service affect all customers equally? How could the distributor identify who it affects the most?
**Answer:**
If the wholesale distributor wanted to change its delivery service from 5 days a week to 3 days a week, the customers of restaurants may be impacted much more than the customers of retailers. Because the restaurants need a lot more "Fresh" products, and would be benefit from more frequent deliveries.
To design an A/B test to see if changing delivery service will cause decline of customers' spending in Segment 0 (restaurants), we can randomly pick a set of samples from Segment 0, and randomly divide the sample set into two groups: Group A with 5 days a week delivery and Group B with 3 days a week delivery. After a specific period, we can collect the sample data of customers' spending, and perform a two-sample Student's t-test. If the null hypothesis is rejected, we can make a conclusion that changing delivery does impact the customers in Segment 0.
If we want to know whether changing delivery will also impact the customers in Segment 1 (retailers), an independent and separate A/B test can be done on Segment 1. Following the same procedure, a set of samples should be randomly picked from Segment 1, and randomly devided into two groups. A two-sample Student's t-test can be done and the null hypothesis can be examined.
### Question 11
*Assume the wholesale distributor wanted to predict a new feature for each customer based on the purchasing information available. How could the wholesale distributor use the structure of the data to assist a supervised learning analysis?*
**Hint:** What other input feature could the supervised learner use besides the six product features to help make a prediction?
**Answer:**
The segmentation information can be used as a new binary feature (or a categorical feature if more than 2 segments) in a supervised learning analysis. By adding this new segmentation feature, it can be a major factor for classification of the data and therefore improve the performance of a supervised learning analysis.
Another one may be the distance between the instance and the cluster center. It is like using the distance as a weight in a k-nearest neighbors algorithm, but here the distance is a measurement of the likelihood to the typical customer representatives.
### Visualizing Underlying Distributions
At the beginning of this project, it was discussed that the `'Channel'` and `'Region'` features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the `'Channel'` feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier on to the original dataset.
Run the code block below to see how each data point is labeled either `'HoReCa'` (Hotel/Restaurant/Cafe) or `'Retail'` the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling.
```
# Display the clustering results based on 'Channel' data
rs.channel_results(reduced_data, outliers, pca_samples)
```
### Question 12
*How well does the clustering algorithm and number of clusters you've chosen compare to this underlying distribution of Hotel/Restaurant/Cafe customers to Retailer customers? Are there customer segments that would be classified as purely 'Retailers' or 'Hotels/Restaurants/Cafes' by this distribution? Would you consider these classifications as consistent with your previous definition of the customer segments?*
**Answer:**
By looking at this distribution graph, two clusters exist, and they are not divided clean and tight. There are some data points mixed into other cluster. I've chosen a soft clustering algorithm and two clusters to anlysis the dataset. These choices perfectly match the characteristics of the dataset.
Since the data points are mixed together, therefore there is no customer segments can be classified as purely "Retailers" or "Hotels/Restaurants/Cafes". I would consider these classifications as consistent with my previous definition of the customer segments.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
| github_jupyter |
```
import matplotlib.pyplot as plt
import os, glob, cv2, random
import seaborn as sns
import pandas as pd
from PIL import Image
import tensorflow as tf
from tensorflow import keras
import numpy as np
```
# Preview
```
path = "./dataset/"
# 학습 데이터 준비
filenames = os.listdir(path)
X=[]
y=[]
categories=[]
for filename in filenames:
image = Image.open(path + filename)
image = np.array(image)
X.append(image)
category=filename.split("_")[0]
if category =="close":
y.append([0])
else:
y.append([1])
X = np.array(X)
y = np.array(y)
X.shape, y.shape
n_total = len(X)
X_result = np.empty((n_total, 26, 34,1))
for i, x in enumerate(X):
img = x.reshape((26, 34,1))
X_result[i] = img
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(X_result, y, test_size=0.1)
print(x_train.shape, y_train.shape)
print(x_val.shape, y_val.shape)
np.save('dataset/x_train.npy', x_train)
np.save('dataset/y_train.npy', y_train)
np.save('dataset/x_val.npy', x_val)
np.save('dataset/y_val.npy', y_val)
plt.subplot(2, 1, 1)
plt.title(str(y_train[0]))
plt.imshow(x_train[0].reshape((26, 34)), cmap='gray')
plt.subplot(2, 1, 2)
plt.title(str(y_val[3]))
plt.imshow(x_val[3].reshape((26, 34)), cmap='gray')
sns.distplot(y_train, kde=False)
sns.distplot(y_val, kde=False)
import datetime
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Activation, Conv2D, Flatten, Dense, MaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
plt.style.use('dark_background')
x_train2 = np.load('dataset/x_train.npy').astype(np.float32)
y_train2 = np.load('dataset/y_train.npy').astype(np.float32)
x_val2 = np.load('dataset/x_val.npy').astype(np.float32)
y_val2 = np.load('dataset/y_val.npy').astype(np.float32)
print(x_train2.shape, y_train2.shape)
print(x_val2.shape, y_val2.shape)
x_train1 = np.load('make_model/dataset/x_train.npy').astype(np.float32)
y_train1 = np.load('make_model/dataset/y_train.npy').astype(np.float32)
x_val1 = np.load('make_model/dataset/x_val.npy').astype(np.float32)
y_val1 = np.load('make_model/dataset/y_val.npy').astype(np.float32)
print(x_train1.shape, y_train1.shape)
print(x_val1.shape, y_val1.shape)
x_train = np.concatenate((x_train1,x_train2),axis=0)
y_train = np.concatenate((y_train1,y_train2),axis=0)
x_val = np.concatenate((x_val1,x_val2),axis=0)
y_val = np.concatenate((y_val1,y_val2),axis=0)
print(x_train.shape, y_train.shape)
print(x_val.shape, y_val.shape)
plt.subplot(2, 1, 1)
plt.title(str(y_train[0]))
plt.imshow(x_train[0].reshape((26, 34)), cmap='gray')
plt.subplot(2, 1, 2)
plt.title(str(y_val[4]))
plt.imshow(x_val[4].reshape((26, 34)), cmap='gray')
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2
)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow(
x=x_train, y=y_train,
batch_size=32,
shuffle=True
)
val_generator = val_datagen.flow(
x=x_val, y=y_val,
batch_size=32,
shuffle=False
)
inputs = Input(shape=(26, 34, 1))
net = Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu')(inputs)
net = MaxPooling2D(pool_size=2)(net)
net = Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(net)
net = MaxPooling2D(pool_size=2)(net)
net = Conv2D(128, kernel_size=3, strides=1, padding='same', activation='relu')(net)
net = MaxPooling2D(pool_size=2)(net)
net = Flatten()(net)
net = Dense(512)(net)
net = Activation('relu')(net)
net = Dense(1)(net)
outputs = Activation('sigmoid')(net)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.summary()
start_time = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
model.fit_generator(
train_generator, epochs=50, validation_data=val_generator,
callbacks=[
ModelCheckpoint('./gaze_tracking/trained_models/%s.h5' % (start_time), monitor='val_acc', save_best_only=True, mode='max', verbose=1),
ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=10, verbose=1, mode='auto', min_lr=1e-05)
]
)
from sklearn.metrics import accuracy_score, confusion_matrix
import seaborn as sns
model = load_model('./gaze_tracking/trained_models/%s.h5' % (start_time))
y_pred = model.predict(x_val/255.)
y_pred_logical = (y_pred > 0.5).astype(np.int)
print ('test acc: %s' % accuracy_score(y_val, y_pred_logical))
cm = confusion_matrix(y_val, y_pred_logical)
sns.heatmap(cm, annot=True)
ax = sns.distplot(y_pred, kde=False)
```
| github_jupyter |
```
import numpy as np # biblioteca utilizada para tratar com número/vetores/matrizes
import matplotlib.pyplot as plt # utilizada para plotar gráficos ao "estilo" matlab
import pandas as pd #biblioteca utilizada para realizar operações sobre dataframes
from google.colab import files #biblioteca do google colab utilizada para importar arquivos
uploaded=files.upload() #importa os arquivos
import io #biblioteca utilizada para tratar os comandos de entrada e saida
data = pd.read_csv(io.BytesIO(uploaded['AAPL.csv'])) # utilizado para importar o arquivo CSV que contém o banco de dados
data.head() #comando utilizado para mostrar as 5 primeiras colunas do dataframe
plt.plot(data["Date"],data["Open"])
plt.ylabel("Preço")
plt.xlabel("Data")
#comando utilizado para gerar o boxplot
#boxplot é empregado para ver se existem outlier
data.boxplot(column='Open')
#comando utilizado para verificar se existem dados nulos, numéricos ou não
data.info()
#preparando os dados para a entrada
entradas=data.iloc[:,1:2].values # retira do dataframe apenas a coluna relativa ao preço de abertura das ações
entradas[:5] #utilizada para realiar um print apenas das 5 primeiras linhas (pode ser visto que esse é um array, não um dataframe)
#normalizando os dados (os algoritmos de ML, em geral, não trabalham bem com dados em escalas diferentes)
from sklearn.preprocessing import MinMaxScaler #biblioteca utilizada para realizar o preprocessamento dos dados
scaler = MinMaxScaler(feature_range = (0, 1)) #cria o objeto que será utilizado para realizar a normalização dos dados
# feaure_range = define o intervalo de escala dos dados
dados_normalizados = scaler.fit_transform(entradas) # aplica o método de transformação dos dados
dados_normalizados[:5]
#Como as redes recorrentes utilizam dados no tempo T e valores passados (T-n), a entrada da rede deve conter os
#valores presentes e os (T-n). Assim, é necessário realizar uma modificação nos dados
features_set = []
labels = []
for i in range(60, 1259):
features_set.append(dados_normalizados[i-60:i, 0])
labels.append(dados_normalizados[i, 0])
#transformando os dados em um array para serem utilizados como entrada
features_set, labels = np.array(features_set), np.array(labels)
#conferindo a dimensão dos dados
print(features_set.shape) # método utilizado para retornar a dimensão dos dados
print(labels.shape)
#transformando os dados para o formato aceito pelas redes recorrentes do Keras
# 1 - formato em 3D
# (a,b,c) -> a = número de linhas do dataset
# -> b = número de steps (entradas) da rede
# -> c = número de saídas (indicators)
# método da biblioteca numpy que é utilizado para converter os dados de entrada (1199,60) em (1199,60,1)
features_set = np.reshape(features_set, (features_set.shape[0], features_set.shape[1], 1)) #
print(features_set.shape)
from keras.models import Sequential #classe utilizada para criar o modelo sequencial utilizando o keras
from keras.layers import Dense # Classe utilizada para criar as camadas que são completamente conectadas
from keras.layers import LSTM # Classe para a rede recorrente utilizando Long Shor Term Memory
from keras.layers import Dropout # Classe utilizada para a camada de dropout (utilizada para evitar o overfiting)
model = Sequential() # objeto para a criação do modelo keras sequencial
model.add(LSTM(units=50, return_sequences=True, input_shape=(features_set.shape[1], 1))) #cria a camada de entrada
# como pode ser visto, ela é adicionada como uma pilha, cada nova camada é adicionada com o método add
# na camada de entrada, é necessário definir o tamanho do vetor de entrada (input_shape)
# units=50 indica que na camada de entrada devem existir 50 neurônios
# return_sequences= True indica que devem ser adicionadas novas camadas
#adição da camada de dropout
model.add(Dropout(0.2)) # o valor de 0.2 indica que 20% dos neurônios dessa camada serão perdidos para cada interação
#Adicionando mais camadas ao modelo
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50)) # como essa é a última camada LSTM utilizada, a variável return_sequences=False
model.add(Dropout(0.2))
#adiciona a camada de saída com apenas 1 neurônio, pois vamos realizar a previsão de apenas uma variável (Previsão do valor de abertura)
model.add(Dense(units = 1))
#comando utilizado para ver a configuração do nosso modelo
model.summary()
#definição do tipo de função perda a ser utilizada e do tipo do otimizador
# o otimizador é utilizado para minimizar a função perda
# a função perda indica como deve ser calculado o erro do modelo (valor real - valor previsto)
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
#treinamento do modelo
model.fit(features_set, labels, epochs = 100, batch_size = 32)
# valores de entrada
# saída
# número épocas para o treinamento (vezes em que vamos realizar interações durante o treinamento)
#batch_size = quantidade de dados utilizados por vez para realizar o treinamento
uploaded2=files.upload() #importa os arquivos para o teste dos dados
data_test = pd.read_csv(io.BytesIO(uploaded2['AAPL_previsao.csv'])) # utilizado para importar o arquivo CSV que contém o banco de dados para teste
data_test.head() #verifica as 5 primeiras linhas do dataframe utilizado para teste
plt.plot(data_test.Date,data_test.Open) #plot para os dados a serem utilizados como teste
df_data_apple=data_test.iloc[:, 1:2].values
df_data_apple = pd.concat((data['Open'], data_test['Open']), axis=0) # concatena os dados utilizados para teste e os utilizados para treinamento, tudo em um mesmo dataframe de 1 coluna
df_data_apple.head()
test_inputs = df_data_apple[len(df_data_apple) - len(data_test) - 60:].values
#normalização dos dados para teste, como fizemos com os dados de treinamento
test_inputs = test_inputs.reshape(-1,1)
test_inputs = scaler.transform(test_inputs)
#preparação dos 60 dados a setem utilizado
test_features = []
for i in range(60, 80):
test_features.append(test_inputs[i-60:i, 0])
#preparando os dados como entrada para o modelo de previsão
test_features = np.array(test_features)
test_features = np.reshape(test_features, (test_features.shape[0], test_features.shape[1], 1))
#previsão utilizando o modelo gerado
previsao = model.predict(test_features)
#inverte a transformação (normalização) dos dados de previsão
previsao = scaler.inverse_transform(previsao)
#plot do resultado da previsão e do real
plt.figure(figsize=(10,6))
plt.plot(data_test.Open, color='blue', label='Preço Real das Ações da Apple')
plt.plot(previsao , color='red', label='Previsão do Preço das Ações da Apple')
plt.title('Previsão do Preço de Abertura das Ações da Apple')
plt.xlabel('Data')
plt.ylabel('Preço')
plt.legend()
plt.show()
```
| github_jupyter |
# Twitter Sentiment Analysis for Indian Election 2019
**Abstract**<br>
The goal of this project is to do sentiment analysis for the Indian Elections. The data used is the tweets that are extracted from Twitter. The BJP and Congress are the two major political parties that will be contesting the election. The dataset will consist of tweets for both the parties. The tweets will be labeled as positive or negative based on the sentiment score obtained using Textblob library. This data will be used to build models that can classify new tweets as positive or negative. The models built are a Bidirectional RNN and GloVe word embedding model.
**Implementation**<br>
```
import os
import pandas as pd
import tweepy
import re
import string
from textblob import TextBlob
import preprocessor as p
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import nltk
nltk.download('punkt')
import pandas as pd
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding, SimpleRNN,Input
from keras.models import Sequential,Model
from keras.preprocessing import sequence
from keras.layers import Dense,Dropout
from keras.layers import Embedding, Flatten, Dense,Conv1D,MaxPooling1D
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import itertools
import seaborn as sns
from sklearn.metrics import confusion_matrix
from keras.utils import to_categorical
from collections import Counter
import tensorflow as tf
from keras.layers import LSTM, Bidirectional, Dropout
```
**Data Creation**
We use Tweepy API to access Twitter and download tweets. Tweepy supports accessing Twitter via Basic Authentication and the newer method, OAuth. Twitter has stopped accepting Basic Authentication so OAuth is now the only way to use the Twitter API.
The below code downloads the tweets from Twitter based on the keyword that we pass. The tweets sentiment score is obtained using the textblog library. The Tweets are then preprocessed. The preprocessing involved removing emoticons, removing stopwords.
```
consumer_key= '9oO3eQOBkuvCRPqMsFvnShRrq'
consumer_secret= 'BMWGbdC05jDcsWU5oI7AouWvwWmi46b2bD8zlnWXaaRC7832ep'
access_token='313324341-yQa0jL5IWmUKT15M6qM53uGeGW7FGcy1xAgx5Usy'
access_token_secret='OyjmhcMCbxGqBQAWzq12S0zrGYUvjChsZKavMYmPCAlrE'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#file location changed to "data/telemedicine_data_extraction/" for clearer path
congress_tweets = "C:/Users/Abhishek/Election Twitter Sentiment analysis/congress_test.csv"
bjp_tweets = "C:/Users/Abhishek/Election Twitter Sentiment analysis/bjp_test_new.csv"
#set two date variables for date range
start_date = '2019-04-1'
end_date = '2019-04-20'
```
**Data cleaning scripts**
```
# Happy Emoticons
emoticons_happy = set([
':-)', ':)', ';)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}',
':^)', ':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D',
'=-3', '=3', ':-))', ":'-)", ":')", ':*', ':^*', '>:P', ':-P', ':P', 'X-P',
'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b', '>:)', '>;)', '>:-)',
'<3'
])
# Sad Emoticons
emoticons_sad = set([
':L', ':-/', '>:/', ':S', '>:[', ':@', ':-(', ':[', ':-||', '=L', ':<',
':-[', ':-<', '=\\', '=/', '>:(', ':(', '>.<', ":'-(", ":'(", ':\\', ':-c',
':c', ':{', '>:\\', ';('
])
#Emoji patterns
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
#combine sad and happy emoticons
emoticons = emoticons_happy.union(emoticons_sad)
#mrhod clean_tweets()
def clean_tweets(tweet):
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(tweet)
#after tweepy preprocessing the colon left remain after removing mentions
#or RT sign in the beginning of the tweet
tweet = re.sub(r':', '', tweet)
tweet = re.sub(r'…', '', tweet)
#replace consecutive non-ASCII characters with a space
tweet = re.sub(r'[^\x00-\x7F]+',' ', tweet)
#remove emojis from tweet
tweet = emoji_pattern.sub(r'', tweet)
#filter using NLTK library append it to a string
filtered_tweet = [w for w in word_tokens if not w in stop_words]
filtered_tweet = []
#looping through conditions
for w in word_tokens:
#check tokens against stop words , emoticons and punctuations
if w not in stop_words and w not in emoticons and w not in string.punctuation:
filtered_tweet.append(w)
return ' '.join(filtered_tweet)
#print(word_tokens)
#print(filtered_sentence)
#method write_tweets()
def write_tweets(keyword, file):
# If the file exists, then read the existing data from the CSV file.
if os.path.exists(file):
df = pd.read_csv(file, header=0)
else:
df = pd.DataFrame(columns=COLS)
#page attribute in tweepy.cursor and iteration
for page in tweepy.Cursor(api.search, q=keyword,
count=200, include_rts=False, since=start_date).pages(50):
for status in page:
new_entry = []
status = status._json
## check whether the tweet is in english or skip to the next tweet
if status['lang'] != 'en':
continue
#when run the code, below code replaces the retweet amount and
#no of favorires that are changed since last download.
if status['created_at'] in df['created_at'].values:
i = df.loc[df['created_at'] == status['created_at']].index[0]
if status['favorite_count'] != df.at[i, 'favorite_count'] or \
status['retweet_count'] != df.at[i, 'retweet_count']:
df.at[i, 'favorite_count'] = status['favorite_count']
df.at[i, 'retweet_count'] = status['retweet_count']
continue
#tweepy preprocessing called for basic preprocessing
#clean_text = p.clean(status['text'])
#call clean_tweet method for extra preprocessing
filtered_tweet=clean_tweets(status['text'])
#pass textBlob method for sentiment calculations
blob = TextBlob(filtered_tweet)
Sentiment = blob.sentiment
#seperate polarity and subjectivity in to two variables
polarity = Sentiment.polarity
subjectivity = Sentiment.subjectivity
#new entry append
new_entry += [status['id'], status['created_at'],
status['source'], status['text'],filtered_tweet, Sentiment,polarity,subjectivity, status['lang'],
status['favorite_count'], status['retweet_count']]
#to append original author of the tweet
new_entry.append(status['user']['screen_name'])
try:
is_sensitive = status['possibly_sensitive']
except KeyError:
is_sensitive = None
new_entry.append(is_sensitive)
# hashtagas and mentiones are saved using comma separted
hashtags = ", ".join([hashtag_item['text'] for hashtag_item in status['entities']['hashtags']])
new_entry.append(hashtags)
mentions = ", ".join([mention['screen_name'] for mention in status['entities']['user_mentions']])
new_entry.append(mentions)
#get location of the tweet if possible
try:
location = status['user']['location']
except TypeError:
location = ''
new_entry.append(location)
try:
coordinates = [coord for loc in status['place']['bounding_box']['coordinates'] for coord in loc]
except TypeError:
coordinates = None
new_entry.append(coordinates)
single_tweet_df = pd.DataFrame([new_entry], columns=COLS)
df = df.append(single_tweet_df, ignore_index=True)
csvFile = open(file, 'a' ,encoding='utf-8')
df.to_csv(csvFile, mode='a', columns=COLS, index=False, encoding="utf-8")
#declare keywords as a query for three categories
Congress_keywords = '#IndianNationalCongress OR #RahulGandhi OR #SoniaGandhi OR #INC'
BJP_keywords = '#BJP OR #Modi OR #AmitShah OR #BhartiyaJantaParty'
```
Creates two CSV files. First saves tweets for BJP and second saves tweets for Congress.
```
#call main method passing keywords and file path
write_tweets(Congress_keywords, congress_tweets)
write_tweets(BJP_keywords, bjp_tweets)
```
**LABELING TWEETS AS POSITIVE NEGATIVE**<br>
The tweepy libary gives out sentiment polarity in the range of -1 to +1. For our topic of election prediction the neutral tweets would be of no use as they will not provide any valuable information. Thus for simplicity purpose I have labeled tweets as only positive and negative. Tweets with polarity less than 0 will be labelled negative(0) and greater than 0 will be positive(1)
```
bjp_df['polarity'] = bjp_df['polarity'].apply(lambda x: 1 if x > 0 else 0)
congress_df['polarity'] = congress_df['polarity'].apply(lambda x: 1 if x > 0 else 0)
bjp_df['polarity'].value_counts()
```

```
congress_df['polarity'].value_counts()
```

## **RESAMPLING THE DATA** <br>
Since the ratio of the negative tweets to positive tweets is not proportional. Our data set is not balanced. This will create a bias while training the model. To avoid this I have resampled the data. New data was downloaded from twitter using the above procedure. For both the parties only positive tweets were sampled and appened to the main files to balance the data. After balancing the data. The count of positive and negative tweets for both the parties is as follows. The code for the resampling procedure can be found in the notebook Data_Labeling.ipynb

**CREATING FINAL DATASET**
```
frames = [bjp, congress]
election_data = pd.concat(frames)
```
The final dataset that will be used for our analysis saved in a csv file. That file can be loaded used to run our models. The final dataset looks as follows.

**TOKENIZING DATA**
We tokenize the text and keep the maximum length of the the vector 1000.

**TRAIN TEST SPLIT WITH 80:20 RATIO**
```
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(.20 * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
```
**CREATING EMBEDDING MATRIX WITH HELP OF PRETRAINED MODEL: GLOVE**
Word Embeddings are text converted into numbers. There are number of ways to represent the numeric forms.<br>
Types of embeddings: Frequency based, Prediction based.<br>Frequency Based: Tf-idf, Co-occurrence matrix<br>
Prediction-Based: BOW, Skip-gram model
Using Pre-trained word vectors: Word2vec, Glove
Word Embedding is done for the experiment with the pre trained word vector Glove.
Glove version used : 100-dimensional GloVe embeddings of 400k words computed on a 2014 dump of English Wikipedia. Training is performed on an aggregated global word-word co-occurrence matrix, giving us a vector space with meaningful substructures

```
embedding_matrix = np.zeros((len(word_index) + 1, 100))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
```
Creating an embedding layer using GloVe
```
embedding_layer = Embedding(len(word_index) + 1,
100,
weights=[embedding_matrix],
input_length=1000,
trainable=False)
```
# Model 1
**Glove Word Embedding model**
GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and the resulting representations showcase inter-esting linear substructures of the word vector space. GloVe can be used to find relations between words like synonyms, company - product relations, zip codes, and cities etc. It is also used by the spaCy model to build semantic word em-beddings/feature vectors while computing the top list words that match with distance measures such as Cosine Similar-ity and Euclidean distance approach.
```
def model_creation():
input_layer = Input(shape=(1000,), dtype='int32')
embed_layer = embedding_layer(input_layer)
x = Dense(100,activation='relu')(embed_layer)
x = Dense(50,activation='relu', kernel_regularizer=keras.regularizers.l2(0.002))(x)
x = Flatten()(x)
x = Dense(50,activation='relu', kernel_regularizer=keras.regularizers.l2(0.002))(x)
x = Dropout(0.5)(x)
x = Dense(50, activation='relu')(x)
x = Dropout(0.5)(x)
#x = Dense(512, activation='relu')(x)
#x = Dropout(0.4)(x)
final_layer = Dense(1, activation='sigmoid')(x)
opt = keras.optimizers.Adam(lr= learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model = Model(input_layer,final_layer)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
return model
```
**MODEL 1 Architecture**
```
learning_rate = 0.0001
batch_size = 1024
epochs = 10
model_glove = model_creation()
```


**SAVE BEST MODEL AND WEIGHTS for Model1**
```
# serialize model to JSON
model_json = model_glove.to_json()
with open(".\\SavedModels\\Model_glove.h5", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model_glove.save_weights(".\\SavedModels\\Weights_glove.h5")
```
**MODEL1 LOSS AND ACCURAY**

**MODEL1 PERFORMANCE**
```
def plot_modelacc(fit_model):
with plt.style.context('ggplot'):
plt.plot(fit_model.history['acc'])
plt.plot(fit_model.history['val_acc'])
plt.ylim(0,1)
plt.title("MODEL ACCURACY")
plt.xlabel("# of EPOCHS")
plt.ylabel("ACCURACY")
plt.legend(['train', 'test'], loc='upper left')
return plt.show()
def plot_model_loss(fit_model):
with plt.style.context('ggplot'):
plt.plot(fit_model.history['loss'])
plt.plot(fit_model.history['val_loss'])
plt.title("MODEL LOSS")
plt.xlabel("# of EPOCHS")
plt.ylabel("LOSS")
plt.legend(['train', 'test'], loc='upper left')
return plt.show()
```

**CONFUSION MATRIX**<br>
A confusion matrix will show us the how the model predicted with respect to the acutal output.

True Positives: 870 (Predicted True and True in reality)<br>
True Negative: 1141(Predicted False and False in realtity)<br>
False Positive: 33 (Predicted Positve but Negative in reality)<br>
False Negative: 29 (Predicted Negative but Positive in reality)
# Model 2
**Bidirectional RNN model**
Bidirectional Recurrent Neural Networks (BRNN) connect two hidden layers of opposite directions to the same output. With this form of generative deep learning, the output layer can get information from past (backwards) and future (forward) states simultaneously.Invented in 1997 by Schuster and Paliwal,BRNNs were introduced to increase the amount of input information available to the network. For example, multilayer perceptron (MLPs) and time delay neural network (TDNNs) have limitations on the input data flexibility, as they require their input data to be fixed. Standard recurrent neural network (RNNs) also have restrictions as the future input information cannot be reached from the current state. On the contrary, BRNNs do not require their input data to be fixed. Moreover, their future input information is reachable from the current state.
BRNN are especially useful when the context of the input is needed. For example, in handwriting recognition, the performance can be enhanced by knowledge of the letters located before and after the current letter.
**MODEL 1 Architecture**


**SAVING BEST MODEL2 AND ITS WEIGHTS**
```
# serialize model to JSON
model_json = model.to_json()
with open(".\\SavedModels\\Model_Bidir_LSTM.h5", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(".\\SavedModels\\Weights_bidir_LSTM.h5")
print("Saved model to disk")
```
**MODEL 2 LOSS AND ACCURACY**


**MODEL 2 CONFUSION MATRIX**

True Positives: 887(Predicted True and True in reality)
True Negative: 1140(Predicted False and False in realtity)
False Positive: 35 (Predicted Positve but Negative in reality)
False Negative: 11 (Predicted Negative but Positive in reality)
**PREDICTION USING THE BEST MODEL**
The models were compared based on the Test loss and Test Accuracy. The Bidirectional RNN performed slightly better than the GloVe model. The RNN despite its simple architec-ture performed better than the Glove model. We use the Bidirectional RNN to make the predictions for the tweets that will be used to infer election results.
Load the test data on which the predictions will be made using our best model. The data for both the parties was collected using the same procedure like above.
```
congress_test = pd.read_csv('congress_test.csv')
bjp_test = pd.read_csv('bjp_test.csv')
```
We took equal samples for both the files. We took 2000 tweets for Congress and 2000 for BJP. The party that gets the most number of positive votes can be infered to have the higest probablity of winning the 2019 English.
```
congress_test =congress_test[:2000]
bjp_test = bjp_test[0:2000]
```
Tokenize the tweets in the same was that were used for the Bidirectional RNN model.
```
congress_inputs = tokenze_data(congress_inputs)
bjp_inputs = tokenze_data(bjp_inputs)
```
**LOAD THE BEST MODEL (BIDIRECTIONAL LSTM)**
```
from keras.models import model_from_json
# load json and create model
json_file = open(".\\SavedModels\\Model_Bidir_LSTM.h5", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(".\\SavedModels\\Weights_bidir_LSTM.h5")
print("Loaded model from disk")
```
**SENTIMENT PREDICTION USING THE MODEL**
```
congress_prediction = loaded_model.predict(congress_inputs)
bjp_prediction = loaded_model.predict(bjp_inputs)
```
If the probabilty of the outcome is greater than 0.5 for any class then the sentiment belongs to that particular class. Since we are concerned with only the count of positive sentiments. We will check the second column variables for our inference.
```
congress_pred = (congress_prediction>0.5)
bjp_pred = (bjp_prediction>0.5)
def get_predictions(party_pred):
x = 0
for i in party_pred:
if(i[1]==True):
x+=1
return x
```

**CONCLUSION**
Just like the training data the majority of the tweets have a negative sentiment attached to them. After feeding 2000 tweets for both the Congress and BJP. The model predicted that BJP has 660 positive tweets while Congress has 416 positive tweets.<br><br> This indicated that the contest this year would be close and the chances of BJP winning on Majority like the 2015 elections are less. This has been corraborated by the poor perfomace of the BJP in the recent state elections where the lost power in three Major Hindi speaking states Rajasthan, Madhya Pradesh and Chattishgarh. <br><br>
**FUTURE SCOPE**
For this project only, a small sample of twitter data was considered for the analysis. It is difficult to give an estimate based on the limited amount of information we had access to. For future work, we can start by increasing the size of our dataset. In addition to Twitter, data can also be obtained from websites like Facebook, News websites. Apart from these we can try different models like Bidirectional RNN with attention mechanism. We can implement BERT which is currently the state of the art for solving various Natural Language Pro-cessing problems.
**LISCENCE**
**REFERENCES**
[1] Sepp Hochreiter and Jurgen Schmidhuber, “Long short- ¨ term memory,” Neural computation, vol. 9, no. 8, pp. 1735–1780,1997.<br>
[2] Mike Schuster and Kuldip K Paliwal, “Bidirectional recurrentneural networks,” Signal Processing, IEEE Transactions on, vol. 45, no. 11, pp. 2673–2681, 1997.<br>
[3] Jeffrey Pennington, Richard Socher, Christopher D. Manning.GloVe: Global Vectors for Word Representation <br>
[4] Apoorv Agarwal Boyi Xie Ilia Vovsha Owen Rambow Rebecca Passonneau Sentiment Analysis of Twitter Data <br>
[5] Alex Graves and Jurgen Schmidhuber, “Framewise ¨ phoneme classification with bidirectional LSTM and other neural network
architectures,” Neural Networks, vol. 18, no. 5, pp. 602–610,2005
| github_jupyter |
# Using geoprocessing tools
In ArcGIS API for Python, geoprocessing toolboxes and tools within them are represented as Python module and functions within that module. To learn more about this organization, refer to the page titled [Accessing geoprocessing tools](https://developers.arcgis.com/python/guide/accessing-geoprocessing-tools/).In this part of the guide, we will observe:
- [Invoking geoprocessing tools](#invoking-geoprocessing-tools)
- [Understanding tool input parameter and output return types](#understanding-tool-input-parameter-and-output-return-types)
- [Using helper types](#using-helper-types)
- [Using strings as input](#using-strings-as-input)
- [Tools with multiple outputs](#tools-with-multiple-outputs)
- [Invoking tools that create multiple outputs](#invoking-tools-that-create-multiple-outputs)
- [Using named tuple to access multiple outputs](#using-named-tuple-to-access-multiple-outputs)
- [Tools that export map image layer as output](#tools-that-export-map-image-layer-as-output)
<a id="invoking-geoprocessing-tools"></a>
## Invoking Geoprocessing Tools
You can execute a geoprocessing tool easily by importing its toolbox as a module and calling the function for the tool. Let us see how to execute the `extract_zion_data` tool from the Zion toolbox URL:
```
# connect to ArcGIS Online
from arcgis.gis import GIS
from arcgis.geoprocessing import import_toolbox
gis = GIS()
# import the Zion toolbox
zion_toolbox_url = 'http://gis.ices.dk/gis/rest/services/Tools/ExtractZionData/GPServer'
zion = import_toolbox(zion_toolbox_url)
result = zion.extract_zion_data()
```
Thus, executing a geoprocessing tool is that simple. Let us learn a few more concepts that will help in using these tools efficiently.
<a id="understanding-tool-input-parameter-and-output-return-types"></a>
## Understanding tool input parameter and output return types
The functions for calling geoprocessing tools can accept and return built-in Python types such as str, int, bool, float, dicts, datetime.datetime as well as some helper types defined in the ArcGIS API for Python such as the following:
* `arcgis.features.FeatureSet` - a set of features
* `arcgis.geoprocessing.LinearUnit` - linear distance with specified units
* `arcgis.geoprocessing.DataFile` - a url or item id referencing data
* `arcgis.geoprocessing.RasterData` - url or item id and format of raster data
The tools can also accept lists of the above types.
**Note**: When the helper types are used an input, the function also accepts strings in their place. For example '5 Miles' can be passed as an input instead of LinearUnit(5, 'Miles') and a URL can be passed instead of a `DataFile` or `RasterData` input.
Some geoprocessing tools are configured to return an `arcgis.mapping.MapImageLayer` for visualizing the results of the tool.
In all cases, the documentation of the tool function indicates the type of input parameters and the output values.
<a id="using-helper-types"></a>
### Using helper types
The helper types (`LinearUnit`, `DataFile` and `RasterData`) defined in the `arcgis.geoprocessing` module are simple classes that hold strings or URLs and have a dictionary representation.
The `extract_zion_data()` tool invoked above returns an output zip file as a `DataFile`:
```
type(result)
```
The output `Datafile` can be queried as shown in the snippet below.
```
result
```
The value types such as `DataFile` include helpful methods such as download:
```
result.download()
```
<a id="using-strings-as-input"></a>
### Using strings as input
Strings can also be used as inputs in place of the helper types such as `LinearUnit`, `RasterData` and `DataFile`.
The example below calls the viewshed tool to compute and display the geographical area that is visible from a clicked location on the map. The function accepts an observation point as a `FeatureSet` and a viewshed distance as a `LinearUnit`, and returns a `FeatureSet`:
```
viewshed = import_toolbox('http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Elevation/ESRI_Elevation_World/GPServer')
help(viewshed.viewshed)
import arcgis
arcgis.env.out_spatial_reference = 4326
map = gis.map('South San Francisco', zoomlevel=12)
map
```

The code snippet below adds an event listener to the map, such that when clicked, `get_viewshed()` is called with the map widget and clicked point geometry as inputs. The event handler creates a `FeatureSet` from the clicked point geometry, and uses the string '5 Miles' as input for the viewshed_distance parameter instead of creating a `LinearUnit` object. These are passed into the viewshed function that returns the viewshed from the observation point. The map widget is able to draw the returned `FeatureSet` using its `draw()` method:
```
from arcgis.features import Feature, FeatureSet
def get_viewshed(m, g):
res = viewshed.viewshed(FeatureSet([Feature(g)]),"5 Miles") # "5 Miles" or LinearUnit(5, 'Miles') can be passed as input
m.draw(res)
map.on_click(get_viewshed)
```
<a id="tools-with-multiple-outputs"></a>
## Tools with multiple outputs
Some Geoprocessing tools can return multiple results. For these tools, the corresponding function returns the multiple output values as a [named tuple](https://docs.python.org/3/library/collections.html#namedtuple-factory-function-for-tuples-with-named-fields).
The example below uses a tool that returns multiple outputs:
```
sandiego_toolbox_url = 'https://gis-public.co.san-diego.ca.us/arcgis/rest/services/InitialResearchPacketCSV_Phase2/GPServer'
multioutput_tbx = import_toolbox(sandiego_toolbox_url)
help(multioutput_tbx.initial_research_packet_csv)
```
<a id="invoking-tools-that-create-multiple-outputs"></a>
### Invoking tools that create multple outputs
The code snippet below shows how multiple outputs returned from a tool can be automatically unpacked by Python into multiple variables. Also, since we're not interested in the job status output, we can discard it using "_" as the variable name:
```
report_output_csv_file, output_map_flags_file, soil_output_file, _ = multioutput_tbx.initial_research_packet_csv()
report_output_csv_file
output_map_flags_file
soil_output_file
```
<a id="using-named-tuple-to-access-multiple-outputs"></a>
### Using named tuple to access multiple tool outputs
The code snippet below shows using a named tuple to access the multiple outputs returned from the tool:
```
results = multioutput_tbx.initial_research_packet_csv()
results.report_output_csv_file
results.job_status
```
<a id="tools-that-export-map-image-layer-as-output"></a>
## Tools that export MapImageLayer as output
Some Geoprocessing tools are configured to return their output as MapImageLayer for easier visualization of the results. The resultant layer can be added to a map or queried.
An example of such a tool is below:
```
hotspots = import_toolbox('https://sampleserver6.arcgisonline.com/arcgis/rest/services/911CallsHotspot/GPServer')
help(hotspots.execute_911_calls_hotspot)
result_layer, output_features, hotspot_raster = hotspots.execute_911_calls_hotspot()
result_layer
hotspot_raster
```
The resultant hotspot raster can be visualized in the Jupyter Notebook using the code snippet below:
```
from IPython.display import Image
Image(hotspot_raster['mapImage']['href'])
```
| github_jupyter |
### Creating Data Frames
documentation: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
DataFrame is a 2-dimensional labeled data structure with columns of potentially different types. You can think of it
like a spreadsheet or SQL table, or a dict of Series objects.
You can create a data frame using:
- Dict of 1D ndarrays, lists, dicts, or Series
- 2-D numpy.ndarray
- Structured or record ndarray
- A Series
- Another DataFrame
### Data Frame attributes
| T | Transpose index and columns | |
|---------|-------------------------------------------------------------------------------------------------------------------|---|
| at | Fast label-based scalar accessor | |
| axes | Return a list with the row axis labels and column axis labels as the only members. | |
| blocks | Internal property, property synonym for as_blocks() | |
| dtypes | Return the dtypes in this object. | |
| empty | True if NDFrame is entirely empty [no items], meaning any of the axes are of length 0. | |
| ftypes | Return the ftypes (indication of sparse/dense and dtype) in this object. | |
| iat | Fast integer location scalar accessor. | |
| iloc | Purely integer-location based indexing for selection by position. | |
| is_copy | | |
| ix | A primarily label-location based indexer, with integer position fallback. | |
| loc | Purely label-location based indexer for selection by label. | |
| ndim | Number of axes / array dimensions | |
| shape | Return a tuple representing the dimensionality of the DataFrame. | |
| size | number of elements in the NDFrame | |
| style | Property returning a Styler object containing methods for building a styled HTML representation fo the DataFrame. | |
| values | Numpy representation of NDFrame | |
```
import pandas as pd
import numpy as np
```
### Creating data frames from various data types
documentation: http://pandas.pydata.org/pandas-docs/stable/dsintro.html
cookbook: http://pandas.pydata.org/pandas-docs/stable/cookbook.html
##### create data frame from Python dictionary
```
my_dictionary = {'a' : 45., 'b' : -19.5, 'c' : 4444}
print(my_dictionary.keys())
print(my_dictionary.values())
```
##### constructor without explicit index
```
cookbook_df = pd.DataFrame({'AAA' : [4,5,6,7], 'BBB' : [10,20,30,40],'CCC' : [100,50,-30,-50]})
cookbook_df
```
##### constructor contains dictionary with Series as values
```
series_dict = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
series_df = pd.DataFrame(series_dict)
series_df
```
##### dictionary of lists
```
produce_dict = {'veggies': ['potatoes', 'onions', 'peppers', 'carrots'],
'fruits': ['apples', 'bananas', 'pineapple', 'berries']}
produce_dict
```
##### list of dictionaries
```
data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]
pd.DataFrame(data2)
```
##### dictionary of tuples, with multi index
```
pd.DataFrame({('a', 'b'): {('A', 'B'): 1, ('A', 'C'): 2},
('a', 'a'): {('A', 'C'): 3, ('A', 'B'): 4},
('a', 'c'): {('A', 'B'): 5, ('A', 'C'): 6},
('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8},
('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}})
```
| github_jupyter |
# Chapter 10 - Predicting Continuous Target Variables with Regression Analysis
### Overview
- [Introducing a simple linear regression model](#Introducing-a-simple-linear-regression-model)
- [Exploring the Housing Dataset](#Exploring-the-Housing-Dataset)
- [Visualizing the important characteristics of a dataset](#Visualizing-the-important-characteristics-of-a-dataset)
- [Implementing an ordinary least squares linear regression model](#Implementing-an-ordinary-least-squares-linear-regression-model)
- [Solving regression for regression parameters with gradient descent](#Solving-regression-for-regression-parameters-with-gradient-descent)
- [Estimating the coefficient of a regression model via scikit-learn](#Estimating-the-coefficient-of-a-regression-model-via-scikit-learn)
- [Fitting a robust regression model using RANSAC](#Fitting-a-robust-regression-model-using-RANSAC)
- [Evaluating the performance of linear regression models](#Evaluating-the-performance-of-linear-regression-models)
- [Using regularized methods for regression](#Using-regularized-methods-for-regression)
- [Turning a linear regression model into a curve - polynomial regression](#Turning-a-linear-regression-model-into-a-curve---polynomial-regression)
- [Modeling nonlinear relationships in the Housing Dataset](#Modeling-nonlinear-relationships-in-the-Housing-Dataset)
- [Dealing with nonlinear relationships using random forests](#Dealing-with-nonlinear-relationships-using-random-forests)
- [Decision tree regression](#Decision-tree-regression)
- [Random forest regression](#Random-forest-regression)
- [Summary](#Summary)
<br>
<br>
```
from IPython.display import Image
%matplotlib inline
```
# Introducing a simple linear regression model
#### Univariate Model
$$
y = w_0 + w_1 x
$$
Relationship between
- a single feature (**explanatory variable**) $x$
- a continous target (**response**) variable $y$
```
Image(filename='./images/10_01.png', width=500)
```
- **regression line** : the best-fit line
- **offsets** or **residuals**: the gap between the regression line and the sample points
#### Multivariate Model
$$
y = w_0 + w_1 x_1 + \dots + w_m x_m
$$
<br>
<br>
# Exploring the Housing dataset
- Information about houses in the suburbs of Boston
- Collected by D. Harrison and D.L. Rubinfeld in 1978
- 506 samples
Source: [https://archive.ics.uci.edu/ml/datasets/Housing](https://archive.ics.uci.edu/ml/datasets/Housing)
Attributes:
<pre>
1. CRIM per capita crime rate by town
2. ZN proportion of residential land zoned for lots over
25,000 sq.ft.
3. INDUS proportion of non-retail business acres per town
4. CHAS Charles River dummy variable (= 1 if tract bounds
river; 0 otherwise)
5. NOX nitric oxides concentration (parts per 10 million)
6. RM average number of rooms per dwelling
7. AGE proportion of owner-occupied units built prior to 1940
8. DIS weighted distances to five Boston employment centres
9. RAD index of accessibility to radial highways
10. TAX full-value property-tax rate per $10,000
11. PTRATIO pupil-teacher ratio by town
12. B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks
by town
13. LSTAT % lower status of the population
14. MEDV Median value of owner-occupied homes in $1000's
</pre>
We'll consider **MEDV** as our target variable.
```
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/'
'housing/housing.data',
header=None,
sep='\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
df.head()
```
<br>
<br>
## Visualizing the important characteristics of a dataset
#### Scatter plot matrix
```
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='whitegrid', context='notebook')
cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV']
sns.pairplot(df[cols], size=2.5)
plt.tight_layout()
# plt.savefig('./figures/scatter.png', dpi=300)
plt.show()
```
#### Correlation Matrix
- a scaled version of the covariance matrix
- each entry contains the **Pearson product-moment correlation coefficients** (**Pearson's r**)
- quantifies **linear** relationship between features
- ranges in $[-1,1]$
- $r=1$ perfect positive correlation
- $r=0$ no correlation
- $r=-1$ perfect negative correlation
$$
r = \frac{
\sum_{i=1}^n [(x^{(i)}-\mu_x)(y^{(i)}-\mu_y)]
}{
\sqrt{\sum_{i=1}^n (x^{(i)}-\mu_x)^2}
\sqrt{\sum_{i=1}^n (y^{(i)}-\mu_y)^2}
} =
\frac{\sigma_{xy}}{\sigma_x\sigma_y}
$$
```
import numpy as np
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.5)
hm = sns.heatmap(cm,
cbar=True,
annot=True,
square=True,
fmt='.2f',
annot_kws={'size': 15},
yticklabels=cols,
xticklabels=cols)
# plt.tight_layout()
# plt.savefig('./figures/corr_mat.png', dpi=300)
plt.show()
```
- MEDV has large correlation with LSTAT and RM
- The relation between MEDV ~ LSTAT may not be linear
- The relation between MEDV ~ RM looks liinear
```
sns.reset_orig()
%matplotlib inline
```
<br>
<br>
# Implementing an ordinary least squares (OLS) linear regression model
## Solving regression for regression parameters with gradient descent
#### OLS Cost Function (Sum of Squred Errors, SSE)
$$
J(w) = \frac12 \sum_{i=1}^n (y^{(i)} - \hat y^{(i)})^2 = \frac12 \| y - Xw - \mathbb{1}w_0\|^2
$$
- $\hat y^{(i)} = w^T x^{(i)} $ is the predicted value
- OLS linear regression can be understood as Adaline without the step function, which converts the linear response $w^T x$ into $\{-1,1\}$.
#### Gradient Descent (refresh)
$$
w_{k+1} = w_k - \eta_k \nabla J(w_k), \;\; k=1,2,\dots
$$
- $\eta_k>0$ is the learning rate
- $$
\nabla J(w_k) =
\begin{bmatrix} -X^T(y-Xw- \mathbb{1}w_0) \\
-\mathbb{1}^T(y-Xw- \mathbb{1}w_0)
\end{bmatrix}
$$
```
class LinearRegressionGD(object):
def __init__(self, eta=0.001, n_iter=20):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return self.net_input(X)
X = df[['RM']].values
y = df[['MEDV']].values
y.shape
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
sc_y = StandardScaler()
X_std = sc_x.fit_transform(X)
#y_std = sc_y.fit_transform(y[:, np.newaxis]).flatten()
y_std = sc_y.fit_transform(y).flatten()
y_std.shape
lr = LinearRegressionGD()
lr.fit(X_std, y_std)
plt.plot(range(1, lr.n_iter+1), lr.cost_)
plt.ylabel('SSE')
plt.xlabel('Epoch')
plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
def lin_regplot(X, y, model):
plt.scatter(X, y, c='lightblue')
plt.plot(X, model.predict(X), color='red', linewidth=2)
return
lin_regplot(X_std, y_std, lr)
plt.xlabel('Average number of rooms [RM] (standardized)')
plt.ylabel('Price in $1000\'s [MEDV] (standardized)')
plt.tight_layout()
# plt.savefig('./figures/gradient_fit.png', dpi=300)
plt.show()
print('Slope: %.3f' % lr.w_[1])
print('Intercept: %.3f' % lr.w_[0])
num_rooms_std = sc_x.transform(np.array([[5.0]]))
price_std = lr.predict(num_rooms_std)
print("Price in $1000's: %.3f" % sc_y.inverse_transform(price_std))
```
<br>
<br>
## Estimating the coefficient of a regression model via scikit-learn
```
from sklearn.linear_model import LinearRegression
slr = LinearRegression()
slr.fit(X, y)
y_pred = slr.predict(X)
print('Slope: %.3f' % slr.coef_[0])
print('Intercept: %.3f' % slr.intercept_)
```
The solution is different from the previous result, since the data is **not** normalized here.
```
lin_regplot(X, y, slr)
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.tight_layout()
# plt.savefig('./figures/scikit_lr_fit.png', dpi=300)
plt.show()
```
<br>
<br>
# Fitting a robust regression model using RANSAC (RANdom SAmple Consensus)
- Linear regression models can be heavily affected by outliers
- A very small subset of data can have a big impact on the estimated model coefficients
- Removing outliers is not easy
RANSAC algorithm:
1. Select a random subset of samples to be *inliers* and fit the model
2. Test all other data points against the fitted model, and add those points that fall within a user-defined tolerance to inliers
3. Refit the model using all inliers.
4. Estimate the error of the fitted model vs. the inliers
5. Terminate if the performance meets a user-defined threshold, or if a fixed number of iterations has been reached.
```
from sklearn.linear_model import RANSACRegressor
ransac = RANSACRegressor(LinearRegression(),
max_trials=100,
min_samples=50,
loss='absolute_loss',
residual_threshold=5.0, # problem-specific
random_state=0)
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask],
c='blue', marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask],
c='lightgreen', marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='red')
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/ransac_fit.png', dpi=300)
plt.show()
print('Slope: %.3f' % ransac.estimator_.coef_[0])
print('Intercept: %.3f' % ransac.estimator_.intercept_)
```
<br>
<br>
# Evaluating the performance of linear regression models
```
from sklearn.model_selection import train_test_split
X = df.iloc[:, :-1].values
y = df['MEDV'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0)
slr = LinearRegression()
slr.fit(X_train, y_train)
y_train_pred = slr.predict(X_train)
y_test_pred = slr.predict(X_test)
```
#### Residual Plot
- It's not easy to plot linear regression line in general, since the model uses multiple explanatory variables
- Residual plots are used for:
- detect nonlinearity
- detect outliers
- check if errors are randomly distributed
```
plt.scatter(y_train_pred, y_train_pred - y_train,
c='blue', marker='o', label='Training data')
plt.scatter(y_test_pred, y_test_pred - y_test,
c='lightgreen', marker='s', label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red')
plt.xlim([-10, 50])
plt.tight_layout()
# plt.savefig('./figures/slr_residuals.png', dpi=300)
plt.show()
```
If we see patterns in residual plot, it implies that our model didn't capture some explanatory information which leaked into the pattern.
#### MSE (Mean-Square Error)
$$
\text{MSE} = \frac{1}{n} \sum_{i=1}^n \left( y^{(i)} - \hat y^{(i)} \right)^2
$$
#### $R^2$ score
- The fraction of variance captured by the model
- $R^2=1$ : the model fits the data perfectly
$$
R^2 = 1 - \frac{SSE}{SST}, \;\; SST = \sum_{i=1}^n \left( y^{(i)}-\mu_y\right)^2
$$
$$
R^2 = 1 - \frac{MSE}{Var(y)}
$$
```
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
```
The gap in MSE (between train and test) indicates overfitting
<br>
<br>
# Using regularized methods for regression
#### Ridge Regression
$$
J(w) = \frac12 \sum_{i=1}^n (y^{(i)}-\hat y^{(i)})^2 + \lambda \|w\|_2^2
$$
#### LASSO (Least Absolute Shrinkage and Selection Operator)
$$
J(w) = \frac12 \sum_{i=1}^n (y^{(i)}-\hat y^{(i)})^2 + \lambda \|w\|_1
$$
#### Elastic-Net
$$
J(w) = \frac12 \sum_{i=1}^n (y^{(i)}-\hat y^{(i)})^2 + \lambda_1 \|w\|_2^2 + \lambda_2 \|w\|_1
$$
```
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
ridge = Ridge(alpha=1.0)
lasso = Lasso(alpha=1.0)
enet = ElasticNet(alpha=1.0, l1_ratio=0.5)
ridge.fit(X_train, y_train)
lasso.fit(X_train, y_train)
enet.fit(X_train, y_train)
#y_train_pred = lasso.predict(X_train)
y_test_pred_r = ridge.predict(X_test)
y_test_pred_l = lasso.predict(X_test)
y_test_pred_e = enet.predict(X_test)
print("Ridge = ", ridge.coef_)
print("LASSO = ", lasso.coef_)
print("ENET = ",enet.coef_)
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
```
<br>
<br>
# Turning a linear regression model into a curve - polynomial regression
$$
y = w_0 + w_1 x + w_2 x^2 + \dots + w_d x^d
$$
```
X = np.array([258.0, 270.0, 294.0,
320.0, 342.0, 368.0,
396.0, 446.0, 480.0, 586.0])[:, np.newaxis]
y = np.array([236.4, 234.4, 252.8,
298.6, 314.2, 342.2,
360.8, 368.0, 391.2,
390.8])
from sklearn.preprocessing import PolynomialFeatures
lr = LinearRegression()
pr = LinearRegression()
quadratic = PolynomialFeatures(degree=2)
X_quad = quadratic.fit_transform(X)
# fit linear features
lr.fit(X, y)
X_fit = np.arange(250, 600, 10)[:, np.newaxis]
y_lin_fit = lr.predict(X_fit)
# fit quadratic features
pr.fit(X_quad, y)
y_quad_fit = pr.predict(quadratic.fit_transform(X_fit))
# plot results
plt.scatter(X, y, label='training points')
plt.plot(X_fit, y_lin_fit, label='linear fit', linestyle='--')
plt.plot(X_fit, y_quad_fit, label='quadratic fit')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./figures/poly_example.png', dpi=300)
plt.show()
y_lin_pred = lr.predict(X)
y_quad_pred = pr.predict(X_quad)
print('Training MSE linear: %.3f, quadratic: %.3f' % (
mean_squared_error(y, y_lin_pred),
mean_squared_error(y, y_quad_pred)))
print('Training R^2 linear: %.3f, quadratic: %.3f' % (
r2_score(y, y_lin_pred),
r2_score(y, y_quad_pred)))
```
<br>
<br>
## Modeling nonlinear relationships in the Housing Dataset
```
X = df[['LSTAT']].values
y = df['MEDV'].values
regr = LinearRegression()
# create quadratic features
quadratic = PolynomialFeatures(degree=2)
cubic = PolynomialFeatures(degree=3)
X_quad = quadratic.fit_transform(X)
X_cubic = cubic.fit_transform(X)
# fit features
X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis]
regr = regr.fit(X, y)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y, regr.predict(X))
regr = regr.fit(X_quad, y)
y_quad_fit = regr.predict(quadratic.fit_transform(X_fit))
quadratic_r2 = r2_score(y, regr.predict(X_quad))
regr = regr.fit(X_cubic, y)
y_cubic_fit = regr.predict(cubic.fit_transform(X_fit))
cubic_r2 = r2_score(y, regr.predict(X_cubic))
# plot results
plt.scatter(X, y, label='training points', color='lightgray')
plt.plot(X_fit, y_lin_fit,
label='linear (d=1), $R^2=%.2f$' % linear_r2,
color='blue',
lw=2,
linestyle=':')
plt.plot(X_fit, y_quad_fit,
label='quadratic (d=2), $R^2=%.2f$' % quadratic_r2,
color='red',
lw=2,
linestyle='-')
plt.plot(X_fit, y_cubic_fit,
label='cubic (d=3), $R^2=%.2f$' % cubic_r2,
color='green',
lw=2,
linestyle='--')
plt.xlabel('% lower status of the population [LSTAT]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper right')
plt.tight_layout()
# plt.savefig('./figures/polyhouse_example.png', dpi=300)
plt.show()
```
As the model complexity increases, the chance of overfitting increases as well
Transforming the dataset:
```
X = df[['LSTAT']].values
y = df['MEDV'].values
# transform features
X_log = np.log(X)
y_sqrt = np.sqrt(y)
# fit features
X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis]
regr = regr.fit(X_log, y_sqrt)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y_sqrt, regr.predict(X_log))
# plot results
plt.scatter(X_log, y_sqrt, label='training points', color='lightgray')
plt.plot(X_fit, y_lin_fit,
label='linear (d=1), $R^2=%.2f$' % linear_r2,
color='blue',
lw=2)
plt.xlabel('log(% lower status of the population [LSTAT])')
plt.ylabel('$\sqrt{Price \; in \; \$1000\'s [MEDV]}$')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('./figures/transform_example.png', dpi=300)
plt.show()
```
<br>
<br>
# Dealing with nonlinear relationships using random forests
We use Information Gain (IG) to find the feature to split, which will lead to the maximal IG:
$$
IG(D_p, x_i) = I(D_p) - \frac{N_{left}}{N_p} I(D_{left}) - \frac{N_{right}}{N_p} I(D_{right})
$$
where $I$ is the impurity measure.
We've used e.g. entropy for discrete features. Here, we use MSE at node $t$ instead for continuous features:
$$
I(t) = MSE(t) = \frac{1}{N_t} \sum_{i \in D_t} (y^{(i)} - \bar y_t)^2
$$
where $\bar y_t$ is the sample mean,
$$
\bar y_t = \frac{1}{N_t} \sum_{i \in D_t} y^{(i)}
$$
## Decision tree regression
```
from sklearn.tree import DecisionTreeRegressor
X = df[['LSTAT']].values
y = df['MEDV'].values
tree = DecisionTreeRegressor(max_depth=3)
tree.fit(X, y)
sort_idx = X.flatten().argsort()
lin_regplot(X[sort_idx], y[sort_idx], tree)
plt.xlabel('% lower status of the population [LSTAT]')
plt.ylabel('Price in $1000\'s [MEDV]')
# plt.savefig('./figures/tree_regression.png', dpi=300)
plt.show()
r2 = r2_score(y, tree.predict(X))
print("R^2 = ", r2)
```
Disadvantage: it does not capture the continuity and differentiability of the desired prediction
<br>
<br>
## Random forest regression
Advantages:
- better generalization than individual trees
- less sensitive to outliers in the dataset
- don't require much parameter tuning
```
X = df.iloc[:, :-1].values
y = df['MEDV'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=1)
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor(n_estimators=1000,
criterion='mse',
random_state=1,
n_jobs=-1)
forest.fit(X_train, y_train)
y_train_pred = forest.predict(X_train)
y_test_pred = forest.predict(X_test)
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
plt.scatter(y_train_pred,
y_train_pred - y_train,
c='black',
marker='o',
s=35,
alpha=0.5,
label='Training data')
plt.scatter(y_test_pred,
y_test_pred - y_test,
c='lightgreen',
marker='s',
s=35,
alpha=0.7,
label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red')
plt.xlim([-10, 50])
plt.tight_layout()
# plt.savefig('./figures/slr_residuals.png', dpi=300)
plt.show()
```
<br>
<br>
# Summary
- Univariate and multivariate linear models
- RANSAC to deal with outliers
- Regularization: control model complexity to avoid overfitting
| github_jupyter |
420-A52-SF - Algorithmes d'apprentissage supervisé - Hiver 2020 - Spécialisation technique en Intelligence Artificielle<br/>
MIT License - Copyright (c) 2020 Mikaël Swawola
<br/>

<br/>
**Objectif:** cette séance de travaux pratiques a pour objectif la mise en oeuvre des techniques suivantes:
* Bagging
* Forêts aléatoires
* Gradient Boosting
* AdaBoost
* XGBoost
* LightGBM
Le jeu de données utilisée sera **Heart**
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
## Exercice 1 - Chargement et préparation des données
```
import pandas as pd
HRT = pd.read_csv('../../data/Heart.csv', index_col=[0])
HRT = HRT.dropna()
HRT_onehot = pd.get_dummies(HRT, columns=['ChestPain','Thal'], prefix = ['cp','thal'], drop_first=True)
X = HRT_onehot.drop(['AHD'], axis=1)
y = (HRT['AHD'] == "Yes").astype(int)
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.7, random_state=2020)
```
## Exercice 2 - Arbres de classification (avec élagage)
```
from sklearn.tree import DecisionTreeClassifier
```
#### Définition du modèle et entraînement
```
clf_tree = DecisionTreeClassifier(random_state=2020, ccp_alpha=0.05)
clf_tree.fit(X_train, y_train)
```
#### Prédictions (train et val)
```
y_train_pred_proba_tree = clf_tree.predict_proba(X_train)[:,1]
y_val_pred_proba_tree = clf_tree.predict_proba(X_val)[:,1]
```
#### Aire sous la courbe
```
from sklearn.metrics import roc_auc_score
print(f'AUC Train = {roc_auc_score(y_train, y_train_pred_proba_tree)}')
print(f'AUC Val = {roc_auc_score(y_val, y_val_pred_proba_tree)}')
```
## Exercice 3 - Bagging
```
from sklearn.ensemble import BaggingClassifier
```
[class sklearn.ensemble.BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=None, verbose=0)](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html)
#### Définition du modèle et entraînement
```
base_tree = DecisionTreeClassifier(random_state=2020, ccp_alpha=0.01)
clf_bag = BaggingClassifier(base_estimator=base_tree, n_estimators=1000, random_state=2020)
clf_bag.fit(X_train, y_train)
```
#### Prédictions (train et val)
```
y_train_pred_proba_bag = clf_bag.predict_proba(X_train)[:,1]
y_val_pred_proba_bag = clf_bag.predict_proba(X_val)[:,1]
```
#### Aire sous la courbe
```
print(f'AUC Train = {roc_auc_score(y_train, y_train_pred_proba_bag)}')
print(f'AUC Val = {roc_auc_score(y_val, y_val_pred_proba_bag)}')
```
## Exercice 4 - Forêts aléatoires
```
from sklearn.ensemble import RandomForestClassifier
```
[class sklearn.ensemble.RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None)](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
#### Définition du modèle et entraînement
```
clf_rf = RandomForestClassifier(random_state=2020, ccp_alpha=0.001)
clf_rf.fit(X_train, y_train)
```
#### Prédictions (train et val)
```
y_train_pred_proba_rf = clf_rf.predict_proba(X_train)[:,1]
y_val_pred_proba_rf = clf_rf.predict_proba(X_val)[:,1]
```
#### Aire sous la courbe
```
print(f'AUC Train = {roc_auc_score(y_train, y_train_pred_proba_rf)}')
print(f'AUC Val = {roc_auc_score(y_val, y_val_pred_proba_rf)}')
```
## Exercice 5 - AdaBoost
```
from sklearn.ensemble import AdaBoostClassifier
```
[class sklearn.ensemble.AdaBoostClassifier(base_estimator=None, n_estimators=50, learning_rate=1.0, algorithm='SAMME.R', random_state=None)](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html)
#### Définition du modèle et entraînement
```
clf_ada = AdaBoostClassifier(n_estimators=100, random_state=2020)
clf_ada.fit(X_train, y_train)
```
#### Prédiction (train et val)
```
y_train_pred_proba_ada = clf_ada.predict_proba(X_train)[:,1]
y_val_pred_proba_ada = clf_ada.predict_proba(X_val)[:,1]
```
#### Aire sous la courbe
```
print(f'AUC Train = {roc_auc_score(y_train, y_train_pred_proba_ada)}')
print(f'AUC Val = {roc_auc_score(y_val, y_val_pred_proba_ada)}')
```
## Exercice 6 - Gradient Boosting
```
from sklearn.ensemble import GradientBoostingClassifier
```
[class sklearn.ensemble.GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, min_impurity_split=None, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, presort='deprecated', validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0)](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html)
#### Définition du modèle et entraînement
```
clf_gb = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=3, random_state=2020)
clf_gb.fit(X_train, y_train)
```
#### Prédiction (train et val)
```
y_train_pred_proba_gb = clf_gb.predict_proba(X_train)[:,1]
y_val_pred_proba_gb = clf_gb.predict_proba(X_val)[:,1]
```
#### Aire sous la courbe
```
print(f'AUC Train = {roc_auc_score(y_train, y_train_pred_proba_gb)}')
print(f'AUC Val = {roc_auc_score(y_val, y_val_pred_proba_gb)}')
```
## Exercice 7 - XGBoost
```
#!pip install xgboost
import xgboost as xgb
```
[XGBoost Scikit-learn API](https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn)
#### Définition du modèle et entraînement
```
clf_xgb = xgb.XGBClassifier(objective='binary:logistic',
colsample_bytree=0.3,
learning_rate=1.1,
max_depth=5,
reg_alpha=0.1,
n_estimators=100)
clf_xgb.fit(X_train, y_train)
```
#### Prédictions (train et val)
```
y_train_pred_proba_xgb = clf_xgb.predict_proba(X_train)[:,1]
y_val_pred_proba_xgb = clf_xgb.predict_proba(X_val)[:,1]
```
#### Aire sous la courbe
```
print(f'AUC Train = {roc_auc_score(y_train, y_train_pred_proba_xgb)}')
print(f'AUC Val = {roc_auc_score(y_val, y_val_pred_proba_xgb)}')
```
## Exercice 8 - LightGBM
```
!pip install lightgbm
import lightgbm as lgb
```
#### Définition du modèle et entraînement
```
clf_lgbm = lgb.LGBMClassifier(num_leaves=6, learning_rate=0.1, n_estimators=200)
clf_lgbm.fit(X_train, y_train)
```
#### Prédictions (train et val)
```
y_train_pred_proba_lgbm = clf_lgbm.predict_proba(X_train)[:,1]
y_val_pred_proba_lgbm = clf_lgbm.predict_proba(X_val)[:,1]
```
#### Aire sous la courbe
```
print(f'AUC Train = {roc_auc_score(y_train, y_train_pred_proba_lgbm)}')
print(f'AUC Val = {roc_auc_score(y_val, y_val_pred_proba_lgbm)}')
```
## Exercice 9 - Évaluation des modèles
```
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
fpr_tree, tpr_tree, thresholds = roc_curve(y_val, y_val_pred_proba_tree)
fpr_bag, tpr_bag, thresholds = roc_curve(y_val, y_val_pred_proba_bag)
fpr_rf, tpr_rf, thresholds = roc_curve(y_val, y_val_pred_proba_rf)
fpr_ada, tpr_ada, thresholds = roc_curve(y_val, y_val_pred_proba_ada)
fpr_gb, tpr_gb, thresholds = roc_curve(y_val, y_val_pred_proba_gb)
fpr_xgb, tpr_xgb, thresholds = roc_curve(y_val, y_val_pred_proba_xgb)
fpr_lgbm, tpr_lgbm, thresholds = roc_curve(y_val, y_val_pred_proba_lgbm)
fig = plt.figure(1, figsize=(12, 12))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_tree, tpr_tree, label='Decision Tree')
plt.plot(fpr_bag, tpr_bag, label='Bagging')
plt.plot(fpr_rf, tpr_rf, label='Random Forest')
plt.plot(fpr_ada, tpr_ada, label='AdaBoost')
plt.plot(fpr_gb, tpr_gb, label='Gradient Boosting')
plt.plot(fpr_xgb, tpr_xgb, label='XGBoost')
plt.plot(fpr_lgbm, tpr_lgbm, label='LightGBM')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend()
```
## Exercice 10 - Importance des variables explicatives
```
imp = clf_xgb.feature_importances_
fig = plt.figure(2, figsize=(12, 12))
plt.barh(X.columns, imp)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/alastra32/DS-Unit-2-Applied-Modeling/blob/master/module4/assignment_applied_modeling_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science, Unit 2: Predictive Modeling
# Applied Modeling, Module 4
You will use your portfolio project dataset for all assignments this sprint.
## Assignment
Complete these tasks for your project, and document your work.
- [ ] Continue to iterate on your project: data cleaning, exploratory visualization, feature engineering, modeling.
- [ ] Make a Shapley force plot to explain at least 1 individual prediction.
- [ ] Share at least 1 visualization on Slack.
(If you haven't completed an initial model yet for your portfolio project, then do today's assignment using your Tanzania Waterpumps model.)
## Stretch Goals
- [ ] Make Shapley force plots to explain at least 4 individual predictions.
- If your project is Binary Classification, you can do a True Positive, True Negative, False Positive, False Negative.
- If your project is Regression, you can do a high prediction with low error, a low prediction with low error, a high prediction with high error, and a low prediction with high error.
- [ ] Use Shapley values to display verbal explanations of individual predictions.
- [ ] Use the SHAP library for other visualization types.
The [SHAP repo](https://github.com/slundberg/shap) has examples for many visualization types, including:
- Force Plot, individual predictions
- Force Plot, multiple predictions
- Dependence Plot
- Summary Plot
- Summary Plot, Bar
- Interaction Values
- Decision Plots
We just did the first type during the lesson. The [Kaggle microcourse](https://www.kaggle.com/dansbecker/advanced-uses-of-shap-values) shows two more. Experiment and see what you can learn!
## Links
- [Kaggle / Dan Becker: Machine Learning Explainability — SHAP Values](https://www.kaggle.com/learn/machine-learning-explainability)
- [Christoph Molnar: Interpretable Machine Learning — Shapley Values](https://christophm.github.io/interpretable-ml-book/shapley.html)
- [SHAP repo](https://github.com/slundberg/shap) & [docs](https://shap.readthedocs.io/en/latest/)
## Setup
```
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade category_encoders pandas-profiling plotly
import pandas as pd
from sklearn.model_selection import train_test_split
# merge train_features.csv & train_labels.csv
trainandval = pd.merge(pd.read_csv('https://raw.githubusercontent.com/alastra32/DS-Unit-2-Kaggle-Challenge/master/data/tanzania/train_features.csv'),
pd.read_csv('https://raw.githubusercontent.com/alastra32/DS-Unit-2-Kaggle-Challenge/master/data/tanzania/train_labels.csv'))
# read test_features.csv & sample_submission.csv
test = pd.read_csv('https://raw.githubusercontent.com/alastra32/DS-Unit-2-Kaggle-Challenge/master/data/tanzania/test_features.csv')
sample_submission = pd.read_csv('https://raw.githubusercontent.com/alastra32/DS-Unit-2-Kaggle-Challenge/master/data/tanzania/sample_submission.csv')
# import block
pd.set_option('display.max_columns', None)
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
import category_encoders as ce
from xgboost import XGBClassifier
# train validation split
train, val = train_test_split(trainandval, train_size=0.95, test_size=0.05,
stratify=trainandval['status_group'], random_state=42)
train.shape, val.shape, test.shape
```
## Manual Mode
```
# We need a function that returns the mode of a given series for the imputer function.
def manual_mode(feature):
try:
return feature.mode()[0]
except:
pass
```
## Imputer
```
# imputes by the lowest non-null region measure
def fill_nulls(df, feature, method):
#attempt to fill nulls by method in succesively larger geographic scopes
df = df.copy()# avoid settingwithcopy warning
geo_scopes = ['ward', 'lga', 'region', 'basin']
if method == 'mode':
method = manual_mode
for scope in geo_scopes:
if df[feature].isnull().sum() == 0:
break
df[feature] = df[feature].fillna(df.groupby(scope)[feature].transform(method))
return df[feature]
def impute(df, features, method):
#imputation of given features by given method (mean/median/mode)
df = df.copy()
for feature in features:
df[feature] = fill_nulls(df, feature, method)
return df
```
## Wrangler
```
def flag_missing_values(df):
'''add "<FEATURE>_MISSING" flag feature for all columns with nulls'''
df.copy()
columns_with_nulls = df.columns[df.isna().any()]
for col in columns_with_nulls:
df[col+'_MISSING'] = df[col].isna()
return df
def convert_dummy_nulls(df):
'''Convert 0 to NaN's'''
df = df.copy()
# replace near-zero latitudes with zero
df['latitude'] = df['latitude'].replace(-2e-08, 0)
zero_columns = ['longitude', 'latitude', 'construction_year', 'gps_height',
'population']
for col in zero_columns:
df[col] = df[col].replace(0, np.nan)
return df
def clean_text_columns(df):
'''convert text to lowercase, remove non-alphanumerics, unknowns to NaN'''
df = df.copy()
text_columns = df[df.columns[(df.applymap(type) == str).all(0)]]
unknowns = ['unknown', 'notknown', 'none', 'nan', '']
for col in text_columns:
df[col] = df[col].str.lower().str.replace('\W', '')
df[col] = df[col].replace(unknowns, np.nan)
return df
def get_distances_to_population_centers(df):
'''create a distance feature for population centers'''
df = df.copy()
population_centers = {'dar': (6.7924, 39.2083),
'mwanza': (2.5164, 32.9175),
'dodoma': (6.1630, 35.7516)}
for city, loc in population_centers.items():
df[city+'_distance'] = ((((df['latitude']-loc[0])**2)
+ ((df['longitude']-loc[1])**2))**0.5)
return df
def engineer_date_features(df):
df = df.copy()
# change date_recorded to datetime format
df['date_recorded'] = pd.to_datetime(df.date_recorded,
infer_datetime_format=True)
# extract components from date_recorded
df['year_recorded'] = df['date_recorded'].dt.year
df['month_recorded'] = df['date_recorded'].dt.month
df['day_recorded'] = df['date_recorded'].dt.day
df['inspection_interval'] = df['year_recorded'] - df['construction_year']
return df
def wrangle(df):
'''cleaning/engineering function'''
df = df.copy()
df = convert_dummy_nulls(df)
df = clean_text_columns(df)
df = get_distances_to_population_centers(df)
df = engineer_date_features(df)
df = flag_missing_values(df)
drop_features = ['recorded_by', 'id', 'date_recorded']
df = df.drop(columns=drop_features)
# Apply imputation
numeric_columns = df.select_dtypes(include = 'number').columns
nonnumeric_columns = df.select_dtypes(exclude = 'number').columns
df = impute(df, numeric_columns, 'median')
df = impute(df, nonnumeric_columns, 'mode')
return df
```
## Engineer, Pipe, and Train
```
# clean and engineer all datasets
train_wrangled = wrangle(train)
val_wrangled = wrangle(val)
test_wrangled = wrangle(test)
# arrange data into X features matrix and y target vector
target = 'status_group'
X_train = train_wrangled.drop(columns=target)
y_train = train_wrangled[target]
X_val = val_wrangled.drop(columns=target)
y_val = val_wrangled[target]
X_test = test_wrangled
# Use Ordinal Encoder, outside of a pipeline
encoder = ce.OrdinalEncoder()
X_train_encoded = encoder.fit_transform(X_train)
X_val_encoded= encoder.fit_transform(X_val)
model = RandomForestClassifier(n_estimators=129, max_depth=29, min_samples_leaf=2,
random_state=42, min_impurity_decrease=2.22037e-16, n_jobs=-1)
model.fit(X_train_encoded, y_train)
#score
model.score(X_val_encoded,y_val)
row = X_test.iloc[[3232]]
row
!pip install shap
import shap
# processor = pipeline[:-1]
explainer = shap.TreeExplainer(model)
row_process = encoder.transform(row)
shap_values = explainer.shap_values(row_process)
shap.initjs()
shap.force_plot(
base_value=explainer.expected_value[0],
shap_values=shap_values[0],
features=row
)
feature_names = row.columns
feature_values = row.values[0]
shaps = pd.Series(shap_values[0][0], zip(feature_names,feature_values))
shaps.sort_values().plot.barh(color='grey', figsize=(15,20));
```
| github_jupyter |
```
from keras.models import Sequential
from keras.layers import Dense, Input, Reshape
from keras.models import Model
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.core import Flatten, Dropout
from keras.optimizers import Adam
from keras.datasets import mnist
import numpy as np
from PIL import Image
import argparse
import math
EPOCH = 30
PNG_CREATE = 20
N_CLASS = 10
# 偽物を作る生成機
def auto_encoder_generator_model():
input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
print('*** Auto encoder generator model ***')
autoencoder.summary()
return autoencoder
# 偽物を検知する発見機
def discriminator_model():
model = Sequential()
model.add(Conv2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
model.add(LeakyReLU(0.2))
model.add(Conv2D(128, (5, 5), subsample=(2, 2))) # subsampleでダウンサンプリング
model.add(LeakyReLU(0.2))
model.add(Flatten())
model.add(Dense(1024))
model.add(LeakyReLU(0.2))
model.add(Dropout(0.5))
model.add(Dense(N_CLASS))
model.add(Activation('sigmoid'))
print('*** discriminator model ***')
model.summary()
return model
def generator_containing_discriminator(g, d):
model = Sequential()
model.add(g)
d.trainable = False
model.add(d)
model.summary()
return model
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num)/width))
shape = generated_images.shape[1:3]
image = np.zeros((height*shape[0], width*shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index/width)
j = index % width
image[ i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1] ] = img[:, :, 0]
return image
def train(BATCH_SIZE):
####
# Dataの読み込み
####
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print ("データのサイズ")
print ("X_train: %s, y_train:%s" % (X_train.shape, y_train.shape))
print ("X_test: %s, y_test: %s" % (X_test.shape, y_test.shape))
X_train = (X_train.astype(np.float32) - 127.5)/127.5
X_train = X_train[:, :, :, None] # channelは使用しない為None設定
X_test = X_test [:, :, :, None] # channelは使用しない為None設定
def create_label_data(in_y_data):
out_data = []
for index, data in enumerate(in_y_data):
if data == 0:
out_data.append([1,0,0,0,0,0,0,0,0,0])
elif data == 1:
out_data.append([0,1,0,0,0,0,0,0,0,0])
elif data == 2:
out_data.append([0,0,1,0,0,0,0,0,0,0])
elif data == 3:
out_data.append([0,0,0,1,0,0,0,0,0,0])
elif data == 4:
out_data.append([0,0,0,0,1,0,0,0,0,0])
elif data == 5:
out_data.append([0,0,0,0,0,1,0,0,0,0])
elif data == 6:
out_data.append([0,0,0,0,0,0,1,0,0,0])
elif data == 7:
out_data.append([0,0,0,0,0,0,0,1,0,0])
elif data == 8:
out_data.append([0,0,0,0,0,0,0,0,1,0])
else:
out_data.append([0,0,0,0,0,0,0,0,0,1])
return np.array(out_data)
# yのデータを多次元化
ylabel_train = create_label_data(y_train)
ylabel_test = create_label_data(y_test)
# generatorで生成した画像について間違えさせたいラベルを生成(ターゲットは適当)
def create_target_data(in_ylabel_data):
out_data = []
for index, data in enumerate(in_ylabel_data):
if data[0] == 1:
# 0を1と間違えさせるようにデータを生成する
out_data.append([0,1,0,0,0,0,0,0,0,0])
elif data[1] == 1:
out_data.append([0,0,1,0,0,0,0,0,0,0])
elif data[2] == 1:
out_data.append([0,0,0,1,0,0,0,0,0,0])
elif data[3] == 1:
out_data.append([0,0,0,0,1,0,0,0,0,0])
elif data[4] == 1:
out_data.append([0,0,0,0,0,1,0,0,0,0])
elif data[5] == 1:
out_data.append([0,0,0,0,0,0,1,0,0,0])
elif data[6] == 1:
out_data.append([0,0,0,0,0,0,0,1,0,0])
elif data[7] == 1:
out_data.append([0,0,0,0,0,0,0,0,1,0])
elif data[8] == 1:
out_data.append([0,0,0,0,0,0,0,0,0,1])
else:
out_data.append([1,0,0,0,0,0,0,0,0,0])
return np.array(out_data)
# target用データを生成
target_ylabel_train = create_target_data(ylabel_train)
target_ylabel_test = create_target_data(ylabel_test)
####
# モデルの準備
####
# discriminatorとgeneratorの作成
d = discriminator_model()
g = auto_encoder_generator_model()
# discriminatorとgeneratorのCONCATをしてAdversarial Training
d_on_g = generator_containing_discriminator(g, d) # この内部では d.trainable=False となっているためdは学習しない
# discriminatorとgenerator,Adversarial Trainingの最適化手法としてAdamを利用する
d_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
g_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #
d_on_g_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
g.compile(loss='binary_crossentropy', optimizer=g_optim)
d_on_g.compile(loss='binary_crossentropy', optimizer=d_on_g_optim)
d.trainable = True # Discriminatorの学習をONにしておく
d.compile(loss='binary_crossentropy', optimizer=d_optim)
####
# epoch数分のループ
####
for epoch in range(EPOCH):
print("Epoch is", epoch)
print("Number of batches", int( X_train.shape[0] / BATCH_SIZE ))
####
# batchのループ
####
for index in range(int( X_train.shape[0] / BATCH_SIZE )):
image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE] # 学習画像をBatch分、取得する
ylabel_train_batch = ylabel_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
target_ylabel_train_batch = target_ylabel_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
generated_images = g.predict(image_batch, verbose=0) # Auto encoder でAdversarial画像データを生成
####
# 指定回数の度に、状況をPNG画像で確認
####
if index % PNG_CREATE == 0:
image = combine_images(generated_images)
image = image*127.5+127.5
print('*** Generate Image by Auto encoder ***')
Image.fromarray(image.astype(np.uint8)).save(str(epoch)+"_"+str(index)+".png")
####
# 評価
####
print ("image_batch: %s, generated_images:%s" % (image_batch.shape, generated_images.shape))
print ("ylabel_train_batch: %s, target_ylabel_train_batch:%s" % (ylabel_train_batch.shape, target_ylabel_train_batch.shape))
X = np.concatenate((image_batch, generated_images)) # 学習画像と生成画像のCONCAT
y = np.concatenate((ylabel_train_batch, target_ylabel_train_batch)) # 正解ラベルと生成画像に付与するtargetラベルのCONCAT
# discriminatorで評価(入力画像に対して付与されたラベルを正しく検出できるかどうか)
d_loss = d.train_on_batch(X, y)
print("batch %d D_loss : %f" % (index, d_loss))
# generatorを評価(Adversarial画像が生成できているかどうか)
d.trainable = False # discriminatorの学習をOFFにする
g_loss = d_on_g.train_on_batch(image_batch, ylabel_train)
d.trainable = True # discriminatorの学習をONにする
print("batch %d G_loss : %f" % (index, g_loss))
# 適度にモデルのパラメータを出力する
if index % 10 == 9:
g.save_weights('generator', True)
d.save_weights('discriminator', True)
train(BATCH_SIZE=100)
def generate(BATCH_SIZE, nice=False):
g = auto_encoder_generator_model()
g_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
g.compile(loss='binary_crossentropy', optimizer=g_optim)
g.load_weights('generator')
if nice:
d = discriminator_model()
d_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
d.compile(loss='binary_crossentropy', optimizer=g_optim)
d.load_weights('discriminator')
noise = np.random.uniform(-1, 1, (BATCH_SIZE*20, 100))
generated_images = g.predict(noise, verbose=1)
d_pret = d.predict(generated_images, verbose=1)
index = np.arange(0, BATCH_SIZE*20)
index.resize((BATCH_SIZE*20, 1))
pre_with_index = list(np.append(d_pret, index, axis=1))
pre_with_index.sort(key=lambda x: x[0], reverse=True)
nice_images = np.zeros((BATCH_SIZE,) + generated_images.shape[1:3], dtype=np.float32)
nice_images = nice_images[:, :, :, None]
for i in range(BATCH_SIZE):
idx = int(pre_with_index[i][1])
nice_images[i, :, :, 0] = generated_images[idx, :, :, 0]
image = combine_images(nice_images)
else:
noise = np.random.uniform(-1, 1, (BATCH_SIZE, 100))
generated_images = g.predict(noise, verbose=1)
image = combine_images(generated_images)
image = image*127.5+127.5
Image.fromarray(image.astype(np.uint8)).save("generated_image.png")
generate(BATCH_SIZE=100, nice=False)
```
| github_jupyter |
# Loss Functions
This python script illustrates the different loss functions for regression and classification.
We start by loading the ncessary libraries and resetting the computational graph.
```
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
```
### Create a Graph Session
```
sess = tf.Session()
```
## Numerical Predictions
---------------------------------
To start with our investigation of loss functions, we begin by looking at numerical loss functions. To do so, we must create a sequence of predictions around a target. For this exercise, we consider the target to be zero.
```
# Various Predicted X-values
x_vals = tf.linspace(-1., 1., 500)
# Create our target of zero
target = tf.constant(0.)
```
### L2 Loss
The L2 loss is one of the most common regression loss functions. Here we show how to create it in TensorFlow and we evaluate it for plotting later.
```
# L2 loss
# L = (pred - actual)^2
l2_y_vals = tf.square(target - x_vals)
l2_y_out = sess.run(l2_y_vals)
```
### L1 Loss
An alternative loss function to consider is the L1 loss. This is very similar to L2 except that we take the `absolute value` of the difference instead of squaring it.
```
# L1 loss
# L = abs(pred - actual)
l1_y_vals = tf.abs(target - x_vals)
l1_y_out = sess.run(l1_y_vals)
```
### Pseudo-Huber Loss
The psuedo-huber loss function is a smooth approximation to the L1 loss as the (predicted - target) values get larger. When the predicted values are close to the target, the pseudo-huber loss behaves similar to the L2 loss.
```
# L = delta^2 * (sqrt(1 + ((pred - actual)/delta)^2) - 1)
# Pseudo-Huber with delta = 0.25
delta1 = tf.constant(0.25)
phuber1_y_vals = tf.multiply(tf.square(delta1), tf.sqrt(1. + tf.square((target - x_vals)/delta1)) - 1.)
phuber1_y_out = sess.run(phuber1_y_vals)
# Pseudo-Huber with delta = 5
delta2 = tf.constant(5.)
phuber2_y_vals = tf.multiply(tf.square(delta2), tf.sqrt(1. + tf.square((target - x_vals)/delta2)) - 1.)
phuber2_y_out = sess.run(phuber2_y_vals)
```
### Plot the Regression Losses
Here we use Matplotlib to plot the L1, L2, and Pseudo-Huber Losses.
```
x_array = sess.run(x_vals)
plt.plot(x_array, l2_y_out, 'b-', label='L2 Loss')
plt.plot(x_array, l1_y_out, 'r--', label='L1 Loss')
plt.plot(x_array, phuber1_y_out, 'k-.', label='P-Huber Loss (0.25)')
plt.plot(x_array, phuber2_y_out, 'g:', label='P-Huber Loss (5.0)')
plt.ylim(-0.2, 0.4)
plt.legend(loc='lower right', prop={'size': 11})
plt.show()
```
## Categorical Predictions
-------------------------------
We now consider categorical loss functions. Here, the predictions will be around the target of 1.
```
# Various predicted X values
x_vals = tf.linspace(-3., 5., 500)
# Target of 1.0
target = tf.constant(1.)
targets = tf.fill([500,], 1.)
```
### Hinge Loss
The hinge loss is useful for categorical predictions. Here is is the `max(0, 1-(pred*actual))`.
```
# Hinge loss
# Use for predicting binary (-1, 1) classes
# L = max(0, 1 - (pred * actual))
hinge_y_vals = tf.maximum(0., 1. - tf.multiply(target, x_vals))
hinge_y_out = sess.run(hinge_y_vals)
```
### Cross Entropy Loss
The cross entropy loss is a very popular way to measure the loss between categorical targets and output model logits. You can read about the details more here: https://en.wikipedia.org/wiki/Cross_entropy
```
# Cross entropy loss
# L = -actual * (log(pred)) - (1-actual)(log(1-pred))
xentropy_y_vals = - tf.multiply(target, tf.log(x_vals)) - tf.multiply((1. - target), tf.log(1. - x_vals))
xentropy_y_out = sess.run(xentropy_y_vals)
```
### Sigmoid Entropy Loss
TensorFlow also has a sigmoid-entropy loss function. This is very similar to the above cross-entropy function except that we take the sigmoid of the predictions in the function.
```
# L = -actual * (log(sigmoid(pred))) - (1-actual)(log(1-sigmoid(pred)))
# or
# L = max(actual, 0) - actual * pred + log(1 + exp(-abs(actual)))
x_val_input = tf.expand_dims(x_vals, 1)
target_input = tf.expand_dims(targets, 1)
xentropy_sigmoid_y_vals = tf.nn.softmax_cross_entropy_with_logits(logits=x_val_input, labels=target_input)
xentropy_sigmoid_y_out = sess.run(xentropy_sigmoid_y_vals)
```
### Weighted (Softmax) Cross Entropy Loss
Tensorflow also has a similar function to the `sigmoid cross entropy` loss function above, but we take the softmax of the actuals and weight the predicted output instead.
```
# Weighted (softmax) cross entropy loss
# L = -actual * (log(pred)) * weights - (1-actual)(log(1-pred))
# or
# L = (1 - pred) * actual + (1 + (weights - 1) * pred) * log(1 + exp(-actual))
weight = tf.constant(0.5)
xentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(x_vals, targets, weight)
xentropy_weighted_y_out = sess.run(xentropy_weighted_y_vals)
```
### Plot the Categorical Losses
```
# Plot the output
x_array = sess.run(x_vals)
plt.plot(x_array, hinge_y_out, 'b-', label='Hinge Loss')
plt.plot(x_array, xentropy_y_out, 'r--', label='Cross Entropy Loss')
plt.plot(x_array, xentropy_sigmoid_y_out, 'k-.', label='Cross Entropy Sigmoid Loss')
plt.plot(x_array, xentropy_weighted_y_out, 'g:', label='Weighted Cross Entropy Loss (x0.5)')
plt.ylim(-1.5, 3)
#plt.xlim(-1, 3)
plt.legend(loc='lower right', prop={'size': 11})
plt.show()
```
### Softmax entropy and Sparse Entropy
Since it is hard to graph mutliclass loss functions, we will show how to get the output instead
```
# Softmax entropy loss
# L = -actual * (log(softmax(pred))) - (1-actual)(log(1-softmax(pred)))
unscaled_logits = tf.constant([[1., -3., 10.]])
target_dist = tf.constant([[0.1, 0.02, 0.88]])
softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=unscaled_logits,
labels=target_dist)
print(sess.run(softmax_xentropy))
# Sparse entropy loss
# Use when classes and targets have to be mutually exclusive
# L = sum( -actual * log(pred) )
unscaled_logits = tf.constant([[1., -3., 10.]])
sparse_target_dist = tf.constant([2])
sparse_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=unscaled_logits,
labels=sparse_target_dist)
print(sess.run(sparse_xentropy))
```
| github_jupyter |
# Academic Integrity Statement
As a matter of Departmental policy, **we are required to give you a 0** unless you **type your name** after the following statement:
> *I certify on my honor that I have neither given nor received any help, or used any non-permitted resources, while completing this evaluation.*
\[TYPE YOUR NAME HERE\]
# Problem 1 (50 points)
Rampant disinformation---often called "fake news"---has emerged as one of the fundamental crises over our time.
<figure class="image" style="width:30%">
<img src="https://s3.amazonaws.com/libapps/accounts/63707/images/21392935.jpg" alt="A portrait of Willy Wonka, wearing a purple suit and brown top hat. His face is condescending. The caption reads: 'Oh, so you read it on the internet? Well then I guess it must be true.'">
<figcaption><i></i></figcaption>
</figure>
There is a growing movement for online platforms to regulate fake news. Doing so at scale requires combing through millions of news items every day, making it very expensive to do by hand. Can an algorithm do it instead?
The following two URLs each contain part of a data set.
- **Fake news items**: `https://raw.githubusercontent.com/PhilChodrow/PIC16A/master/datasets/fake_news/Fake.csv`
- **Real news items**: `https://raw.githubusercontent.com/PhilChodrow/PIC16A/master/datasets/fake_news/true.csv`
Use the data at these urls to **construct a fake news classifier.**
1. Your model must be able to **make predictions** about whether or not an unseen news item is fake or real.
2. Because fake news models must be able to make millions of predictions per day, it must be able to make predictions very quickly. More columns mean more computation time. **Your final model should use no more than 50 columns.**
You are free to create any columns that you need, and to use any functions that we have or have not covered in the course. You may also use any machine learning model.
Please use Markdown headers with \#\# signs to clearly distinguish the different stages of your solution.
### Requirements
1. Any operations that you perform multiple times (such processing that you perform on both the training and test sets) must be contained in function with informative docstrings. Comments and explanations are expected throughout. It is especially important to explain how you chose the columns to use in your final model.
2. You should not use for-loops to iterate over the rows of data frames or arrays.
3. You must fit your model on the training data, and not use the test data for fitting.
### Hints
- `pd.concat()` is a good way to combine data frames.
- Try fitting a model with as many columns as you want first. See if you can get a representation of which columns are important, and then select your final columns from this list.
- In class, we talked about greedy stagewise feature selection and exhaustive enumeration for determining a good set of columns. Neither of these methods are recommended for this problem.
- If you want to be creative about your model choice, then please go for it. If you want a safe option, try logistic regression.
- If a model takes too long to fit on the full data set, try fitting it on, say, 10% of the data.
- You might find the some of the [cheatsheets](https://philchodrow.github.io/PIC16A/resources/) to be helpful.
### Rubric
- (**15 points**): clearly written code that makes economical use of skills from the course to manipulate data.
- (**15 points**): comments, explanatory surrouding text, and docstrings for any functions and classes.
- (**20 points**): computed according to the formula `20 x score`, where `score` is your model's prediction performance on unseen data. Models that use more than 50 columns can receive up to 15 of these points. Scores will be rounded up. For example, if you obtain an 84% predictive performance with 50 columns, then the score of `20 x 0.84 = 16.8` will be rounded up to 17 points.
| github_jupyter |
```
import pandas as pd
import ast
from collections import Counter
import csv
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from factor_analyzer.factor_analyzer import calculate_kmo
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from advanced_pca import CustomPCA
import gensim
import scipy
import seaborn as sns; sns.set()
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.manifold import TSNE
import matplotlib.cm as cm
from sklearn.cluster import DBSCAN
```
The data is used from the 'Mediacloud_Analysis.ipynb' (<a href='Mediacloud_Analysis.ipynb'>link</a>). It already contains preprocessed and tokenized text for each article. Also it has a column with corona terms specifically and their frequency.
```
#reading the dataframe with pre-processed tokens
df = pd.read_csv("preprocessed_results/mediacloud_parsed_corona_df_feb.csv")
temp = pd.read_csv("preprocessed_results/mediacloud_parsed_corona_df_may.csv")
temp_2 = pd.read_csv("preprocessed_results/mediacloud_parsed_corona_df_sep.csv")
df = pd.concat([df,temp])
df = pd.concat([df,temp_2])
df = df[~df.Text.isnull()] #removing rows withh no text
df['tokens'] = df['tokens'].apply(ast.literal_eval) #transforming string of tokens to list
df.head()
# sample = df.sample(n=1)
temp = pd.read_csv('ncov-or-cov-19-or-covid-or-all-story-urls-20201012133126.csv')
sample = temp.sample(n=1)
print(sample['url'].values[0])
print(sample['title'].values[0])
print(sample['publish_date'])
```
For further procedures we use 500 most frequent tokens, that are later manually reviewed. All names, countries, dates as well as words that do not carry any strong meaning are excluded. They are saved to the 'most_frequent_tokens.csv' file
```
#finding 500 most frequent tokens
flatten_tokens = [token for sublist in df['tokens'].tolist() for token in sublist]
counter_tokens = Counter(flatten_tokens)
most_frequent = counter_tokens.most_common(500)
#saving them to csv file
with open('most_frequent_tokens.csv', "w") as the_file:
csv.register_dialect("custom", delimiter=",", skipinitialspace=True)
writer = csv.writer(the_file, dialect="custom")
for tup in most_frequent:
writer.writerow(tup)
#finding 500 most frequent tokens for SEPTEMBER
flatten_tokens = [token for sublist in df['bigrams'][85298:].tolist() for token in sublist]
counter_tokens = Counter(flatten_tokens)
most_frequent = counter_tokens.most_common(500)
#saving them to csv file
with open('most_frequent_bigrams_SEP.csv', "w") as the_file:
csv.register_dialect("custom", delimiter=",", skipinitialspace=True)
writer = csv.writer(the_file, dialect="custom")
for tup in most_frequent:
writer.writerow(tup)
```
## Unigrams
Reading file with reviewed tokens (<a href="most_frequent_tokens_cleaned_v2.csv">file link</a>)
```
tokens = pd.read_csv('most_frequent_tokens_cleaned_v2.csv', header=None, names=['token', 'frequency'])
#tokens['tfidf'] = 0
```
Firstly the original tokenized texts are converted to the tfidf scores. The result is sparse tfidf matrix. After that for each row only tfidf scores of frequent tokens are kept (for each sparse vector we match id of the tfidf value with dictionary token and check if this token is in the clean list). As a result for each row in the dataframe there is a vector of length n (nuber of cleaned frequent tokens) with tfidf values.
```
def dummy_fun(doc):
return doc
cv = CountVectorizer(analyzer='word',
tokenizer=dummy_fun,
preprocessor=dummy_fun,
token_pattern=None)
data = cv.fit_transform(df['tokens'])
tfidf_transformer = TfidfTransformer()
tfidf_matrix = tfidf_transformer.fit_transform(data)
tfidf_dict = cv.get_feature_names() #all tokens there are in the original texts
df['transformed_tokens'] = np.empty((len(df), 0)).tolist()
for i in range(tfidf_matrix.shape[0]):
print(i)
df.at[i, 'transformed_tokens'] = [tfidf_matrix[i].toarray()[0][j] for j in range(len(tfidf_dict)) if tfidf_dict[j] in tokens['token'].tolist()]
temp = df['transformed_tokens'].tolist()
temp = [np.array(x) for x in temp]
tfidf_frequent = np.array(temp)
tfidf_frequent.shape #= [np.array(token_list) for token_list in tokens_transformed]
with open("tfidf_transformed_tokens.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(temp)
```
KMO score is calculated (according to the <a href="https://www.tandfonline.com/doi/full/10.1080/1369183X.2017.1282813">paper</a>). KMO is a measure for sampling adequacy applied in factor analysis. It informs about the general strength of the relationship among items and thus indicates whether an item (i.e. a word) should be included in a factor analysis or not. Following Backhaus et al. (2006), terms with a KMO value below .50 were subsequently excluded.
```
kmo_all,kmo_model=calculate_kmo(tfidf_frequent)
kmo_model
features_pca = np.zeros((tfidf_frequent.shape[0], len(kmo_all)))
for i in range(len(kmo_all)):
if kmo_all[i] > 0.5: #keeping only those that have kmo over 0.5
features_pca[i] = tfidf_frequent[i]
print(len(features_pca), tfidf_frequent.shape)
```
Running PCA on the filtered tokens. PCA is applied using <a href="https://pypi.org/project/advanced-pca/"> advanced PCA package</a>. For each number of components factor loadings are calculated (for each term) based on the <a href="https://www.r-bloggers.com/p-is-for-principal-components-analysis-pca/">tutorial here</a>. Only significant terms are taken (with a threshold of 0.1)
```
scaler = StandardScaler()
features_pca_scaled = scaler.fit_transform(features_pca)
pca_results = {'Num_of_components': [],
'Explained_variance': [],
'Sum_Explained_variance': [],
'Terms':[]
}
for n in range (3, 21):
pca_model = (CustomPCA(n_components=n)
.fit(features_pca_scaled))
pca_results['Num_of_components'].append(n)
pca_results['Explained_variance'].append(pca_model.explained_variance_ratio_)
pca_results['Sum_Explained_variance'].append(sum(pca_model.explained_variance_ratio_))
all_terms = []
for i in range(n):
scores = [score for score in pca_model.components_[i].round(1) if score>0.1 or score<-0.1]
# tokens_sign = (pca_model.components_[i].round(1)>0.1) or (pca_model.components_[i].round(1)<-0.1)
terms = tokens.token[(pca_model.components_[i].round(1)>0.1) | (pca_model.components_[i].round(1)<-0.1)]
all_terms.append(list(zip(terms, scores)))
pca_results['Terms'].append(all_terms)
pca_results_df = pd.DataFrame(pca_results)
```
Example with a custom PCA with 3 components, printing variance ratio for each component and factor loadings:
```
pca_model = (CustomPCA(n_components=5)
.fit(features_pca_scaled))
print(pca_model.explained_variance_ratio_)
pca_model.components_[1]
pca_results_df['Terms'][0]
```
Saving results of the PCA to the csv file 'results/mediacloud_pca_results_shortlist.csv'. Plot the sum of explained variance based on the number of components:
```
pca_results_df.to_csv('results/mediacloud_pca_results_shortlist.csv')
pca_results_df.plot.line(x='Num_of_components', y='Sum_Explained_variance')
```
Save the 'significant' terms for all components (each n of components) with corresponding factor loadings to csv file 'results/pca_terms.csv':
```
pca_results_df['Terms'].to_csv('results/pca_terms.csv')
print(pca_results_df['Terms'][5])
```
A plot that shows cumulative explained variance and explained variance of each component (with max 20):
```
cummulative_pca = PCA(n_components=20).fit(features_pca_scaled)
fig, ax = plt.subplots(figsize=(8,6))
x_values = range(1, cummulative_pca.n_components_+1)
ax.plot(x_values, cummulative_pca.explained_variance_ratio_, lw=2, label='explained variance')
ax.plot(x_values, np.cumsum(cummulative_pca.explained_variance_ratio_), lw=2, label='cumulative explained variance')
ax.set_title('PCA on filtered tokens : explained variance of components')
ax.set_xlabel('principal component')
ax.set_ylabel('explained variance')
plt.show()
```
## Bigrams
Creating bigrams from the original texts. The bigrams are then saved to file 'most_frequent_tokens_bigrams.csv' and reviewed the same way as the unigrams in the file 'most_frequent_tokens_bigrams.csv' (<a href='most_frequent_tokens_bigrams.csv'>link</a>). The final list contains 87 terms
```
bigram = gensim.models.Phrases(df['tokens'], min_count=3, threshold=50) # higher threshold fewer phrases.
bigram_mod = gensim.models.phrases.Phraser(bigram)
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
df['bigrams'] = make_bigrams(df['tokens'])
df['bigrams']
flatten_bigrams = [token for sublist in df['bigrams'].tolist() for token in sublist]
counter_bigrams = Counter(flatten_bigrams)
most_frequent = counter_bigrams.most_common(500)
#saving them to csv file
with open('most_frequent_tokens_bigrams.csv', "w") as the_file:
csv.register_dialect("custom", delimiter=",", skipinitialspace=True)
writer = csv.writer(the_file, dialect="custom")
for tup in most_frequent:
writer.writerow(tup)
# tokens_bigrams = pd.read_csv('most_frequent_tokens_bigrams.csv', header=None, names=['token', 'frequency'])
cv = CountVectorizer(analyzer='word',
tokenizer=dummy_fun,
preprocessor=dummy_fun,
token_pattern=None)
data = cv.fit_transform(df['bigrams'])
tfidf_transformer = TfidfTransformer()
tfidf_matrix = tfidf_transformer.fit_transform(data)
tfidf_matrix
tfidf_dict_bigrams = cv.get_feature_names() #all tokens there are in the original texts
df['transformed_tokens_bigrams'] = np.empty((len(df), 0)).tolist()
for i in range(tfidf_matrix.shape[0]):
print(i)
df.at[i, 'transformed_tokens_bigrams'] = [tfidf_matrix[i].toarray()[0][j] for j in range(len(tfidf_dict_bigrams)) if tfidf_dict_bigrams[j] in tokens_bigrams['token'].tolist()]
with open("tfidf_transformed_bigrams.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(df['transformed_tokens_bigrams'].tolist())
temp = df['transformed_tokens_bigrams'].tolist()
temp = [np.array(x) for x in temp]
tfidf_frequent_bigrams = np.array(temp)
tfidf_frequent_bigrams.shape #= [np.array(token_list) for token_list in tokens_transformed]
kmo_all_bi,kmo_model_bi=calculate_kmo(np.array(tfidf_frequent_bigrams))
kmo_model_bi
features_bigrams = np.zeros((tfidf_frequent_bigrams.shape[0], len(kmo_all_bi)))
for i in range(len(kmo_all_bi)):
if kmo_all_bi[i] > 0.5: #keeping only those that have kmo over 0.5
features_bigrams[i] = tfidf_frequent_bigrams[i]
print(len(features_bigrams), tfidf_frequent_bigrams.shape)
scaler = StandardScaler()
features_bi_scaled = scaler.fit_transform(features_bigrams)
pca_results_bi = {'Num_of_components': [],
'Explained_variance': [],
'Terms':[]
}
for n in range (3, 21):
pca_model = (CustomPCA(n_components=n)
.fit(features_bi_scaled))
pca_results_bi['Num_of_components'].append(n)
pca_results_bi['Explained_variance'].append(sum(pca_model.explained_variance_ratio_))
all_terms = []
for i in range(n):
scores = [score for score in pca_model.components_[i].round(1) if score>0.1]
# tokens_sign = (pca_model.components_[i].round(1)>0.1) or (pca_model.components_[i].round(1)<-0.1)
terms = tokens_bigrams.token[pca_model.components_[i].round(1)>0.1]
all_terms.append(list(zip(terms, scores)))
pca_results_bi['Terms'].append(all_terms)
pca_results_bi_df = pd.DataFrame(pca_results_bi)
pca_model = (CustomPCA(n_components=3)
.fit(features_bi_scaled))
print(pca_model.explained_variance_ratio_)
pca_model.components_[1]
pca_results_bi_df['Terms'][0]
temp = tokens_bigrams['token'].tolist()
pca_dict = {}
for token in temp:
pca_dict[token] = []
for topic in pca_results_bi_df['Terms'][17]:
if token in [term[0] for term in topic]:
pca_dict[token].append([term[1] for term in topic if term[0]==token][0])
else:
pca_dict[token].append(0)
pca_df = pd.DataFrame(pca_dict).transpose()
pca_df[pca_df[5]!=0]
pca_results_bi_df.to_csv('results/mediacloud_pca_bigrams_results_shortlist.csv')
pca_results_bi_df.plot.line(x='Num_of_components', y='Explained_variance')
cummulative_pca = PCA(n_components=20).fit(features_bi_scaled)
fig, ax = plt.subplots(figsize=(8,6))
x_values = range(1, cummulative_pca.n_components_+1)
ax.plot(x_values, cummulative_pca.explained_variance_ratio_, lw=2, label='explained variance')
ax.plot(x_values, np.cumsum(cummulative_pca.explained_variance_ratio_), lw=2, label='cumulative explained variance')
ax.set_title('PCA on filtered tokens : explained variance of components')
ax.set_xlabel('principal component')
ax.set_ylabel('explained variance')
plt.show()
```
## Toy example
The perfect curated list is created, that contains 39 words for 4 frames: economic, medical, travel and restrictions/prevention. The list is available <a href="most_frequent_tokens_toy.csv">here</a>
```
tokens_toy = pd.read_csv('most_frequent_tokens_toy.csv', header=None, names=['token', 'frequency'])
toy = tokens_toy['token'].sort_values().tolist()
bigrams_sep = pd.read_csv('most_frequent_bigrams_SEP.csv', header=None, names=['token', 'frequency'])
tokens_sep = bigrams_sep['token'].sort_values().tolist()
# tokens_bigrams = pd.read_csv('most_frequent_tokens_bigrams.csv', header=None, names=['token', 'frequency'])
def dummy_fun(doc):
return doc
cv = CountVectorizer(analyzer='word',
tokenizer=dummy_fun,
preprocessor=dummy_fun,
token_pattern=None)
data = cv.fit_transform(df['bigrams'][85298:])
tfidf_transformer = TfidfTransformer()
tfidf_matrix = tfidf_transformer.fit_transform(data)
df.reset_index(inplace=True)
df.drop(['Unnamed: 0','Unnamed: 0.1'],axis=1,inplace=True)
tfidf_matrix.shape[0]
# tfidf_dict = cv.get_feature_names() #all tokens there are in the original texts
tfidf_dict_bigrams = cv.get_feature_names()
transformed_tokens_sep = np.empty((tfidf_matrix.shape[0], 86))
for i in range(0, tfidf_matrix.shape[0]):
print(i)
# print([tfidf_matrix[i].toarray()[0][j] for j in range(len(tfidf_dict_bigrams)) if tfidf_dict_bigrams[j] in tokens_sep])
transformed_tokens_sep[i] = [tfidf_matrix[i].toarray()[0][j] for j in range(len(tfidf_dict_bigrams)) if tfidf_dict_bigrams[j] in tokens_sep]
with open("tfidf_transformed_toy_sep.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(df['transformed_tokens_toy2'][1136:].tolist())
with open("tfidf_transformed_toy_sep.csv", newline='') as csvfile:
data = list(csv.reader(csvfile))
temp = data
temp = [np.array(x) for x in temp]
tfidf_frequent_toy = np.array(temp)
tfidf_frequent_toy.shape #= [np.array(token_list) for token_list in tokens_transformed]
tfidf_frequent_sep = transformed_tokens_sep[:1136].astype(np.float)
kmo_all_toy,kmo_model_toy=calculate_kmo(tfidf_frequent_sep)
kmo_model_toy
features_sep = np.zeros((tfidf_frequent_sep.shape[0], len(kmo_all_toy)))
for i in range(len(kmo_all_toy)):
if kmo_all_toy[i] > 0.5: #keeping only those that have kmo over 0.5
features_sep[i] = tfidf_frequent_sep[i]
print(len(features_sep), tfidf_frequent_sep.shape)
```
Kmeans clustering. For each number of k model is created and fitted on above features (consisting of 36 manually chosen words). Number of texts assigned to each cluster is printed below. Then top words are presented and a tsne graph of them in 2d
```
random_state = 20
k = 3
model = KMeans(n_clusters=k, random_state=random_state)
clusters = model.fit_predict(features_sep)
# tsne = TSNE().fit_transform(features_sep)
Counter(clusters)
# max_items = np.random.choice(range(features_toy.shape[0]), size=10000, replace=False)
#FEBRUARY
def get_top_keywords(data, clusters, labels, n_terms):
df = pd.DataFrame(data).groupby(clusters).mean()
for i,r in df.iterrows():
print('\nCluster {}'.format(i))
print(','.join([labels[t] for t in np.argsort(r)[-n_terms:]]))
get_top_keywords(features_toy, clusters, tokens_toy, 5)
#SEPTEMBER
get_top_keywords(features_sep, clusters, tokens_sep, 5)
#kmeans and dbscan, 3 to 5 k
def plot_tsne_pca(tsne, labels):
max_label = max(labels)
label_subset = [cm.hsv(i/max_label) for i in labels]
plt.scatter(tsne[:, 0], tsne[:, 1], c=label_subset)
plt.title('TSNE Cluster Plot')
plot_tsne_pca(tsne[clusters!=0], clusters[clusters!=0])
# plot_tsne_pca(tsne, clusters)
```
DBSCAN.
```
eps = 3
min_samples = 3
dbscan = {
'eps':[],
'min_samples':[],
'labels':[]
}
for eps in np.arange(0.01,0.05, 0.01):
for min_samples in range (3, 10, 1):
db1 = DBSCAN(eps=eps, min_samples=min_samples).fit(features_toy)
labels1 = db1.labels_
print(f"eps: {eps}, min samples: {min_samples}")
print(Counter(labels1))
dbscan['eps'].append(eps)
dbscan['min_samples'].append(min_samples)
dbscan['labels'].append(labels1)
```
PCA. Number of components ranging from 3 to 5, printing explained variance ratio, factor loading matrix and significant terms for each component.
```
scaler = StandardScaler()
features_toy_scaled = scaler.fit_transform(features_sep)
pca_model_toy = (CustomPCA(n_components=4)
.fit(features_toy_scaled))
print(pca_model_toy.explained_variance_ratio_)
pca_model_toy.components_[1]
all_terms = []
for i in range(4):
scores = [score for score in pca_model_toy.components_[i].round(2) if score>=0.2]
print(scores)
terms = bigrams_sep.token[pca_model_toy.components_[i].round(2)>=0.2]
all_terms.append(list(zip(terms, scores)))
all_terms
pca_model_toy.components_[3]
```
| github_jupyter |
<a href="https://colab.research.google.com/github/MasakazuNaganuma/WhirlwindTourOfPython/blob/master/08-Defining-Functions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="https://github.com/MasakazuNaganuma/WhirlwindTourOfPython/blob/master/fig/cover-small.jpg?raw=1">
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).*
*The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).*
<!--NAVIGATION-->
< [Control Flow](07-Control-Flow-Statements.ipynb) | [Contents](Index.ipynb) | [Errors and Exceptions](09-Errors-and-Exceptions.ipynb) >
# 関数の定義と使用法
# Defining and Using Functions
So far, our scripts have been simple, single-use code blocks.
One way to organize our Python code and to make it more readable and reusable is to factor-out useful pieces into reusable *functions*.
Here we'll cover two ways of creating functions: the ``def`` statement, useful for any type of function, and the ``lambda`` statement, useful for creating short anonymous functions.
これまでのところ、私たちのスクリプトはシンプルな単一のコードブロックでした。
Pythonのコードを整理し、より読みやすく再利用可能なものにするための一つの方法は、便利な部分を再利用可能な*関数*に分解することです。
ここでは、関数を作成するための2つの方法について説明します: ``def`` 文はどんなタイプの関数にも使えます。
## 関数を使う
## Using Functions
Functions are groups of code that have a name, and can be called using parentheses.
We've seen functions before. For example, ``print`` in Python 3 is a function:
関数は名前がついていて、括弧を使って呼び出すことができるコードのグループです。
関数は以前にも見たことがあります。例えば、Python 3 の ``print`` は関数です。
```
print('abc')
```
Here ``print`` is the function name, and ``'abc'`` is the function's *argument*.
In addition to arguments, there are *keyword arguments* that are specified by name.
One available keyword argument for the ``print()`` function (in Python 3) is ``sep``, which tells what character or characters should be used to separate multiple items:
ここで ``print`` は関数名、``'abc'`` は関数の *引数* です。
引数の他に、名前で指定される*キーワード引数*もあります。
(Python 3では) ``print()``関数で利用可能なキーワード引数の1つは ``sep`` で、複数の項目を区切る際にどの文字を使うかを指定します。
```
print(1, 2, 3)
print(1, 2, 3, sep='--')
```
When non-keyword arguments are used together with keyword arguments, the keyword arguments must come at the end.
キーワード以外の引数をキーワード引数と一緒に使用する場合、キーワード引数は最後に来る必要があります。
## 関数の定義
## Defining Functions
Functions become even more useful when we begin to define our own, organizing functionality to be used in multiple places.
In Python, functions are defined with the ``def`` statement.
For example, we can encapsulate a version of our Fibonacci sequence code from the previous section as follows:
関数は、複数の場所で使えるように機能を整理して定義するようになると、さらに便利になります。
Pythonでは、関数は ``def`` 文で定義されます。
例えば、先ほどのフィボナッチ数列のコードを以下のようにカプセル化することができます。
```
def fibonacci(N):
L = []
a, b = 0, 1
while len(L) < N:
a, b = b, a + b
L.append(a)
return L
```
Now we have a function named ``fibonacci`` which takes a single argument ``N``, does something with this argument, and ``return``s a value; in this case, a list of the first ``N`` Fibonacci numbers:
ここでは ``fibonacci`` という名前の関数があります。これは一つの引数 ``N`` を受け取り、この引数で何かを行い、 ``return`` は値を返します。
```
fibonacci(10)
```
If you're familiar with strongly-typed languages like ``C``, you'll immediately notice that there is no type information associated with the function inputs or outputs.
Python functions can return any Python object, simple or compound, which means constructs that may be difficult in other languages are straightforward in Python.
For example, multiple return values are simply put in a tuple, which is indicated by commas:
もしあなたが ``C`` のような型付けの強い言語に慣れているなら、関数の入力や出力に型情報がないことにすぐに気づくでしょう。
Pythonの関数はシンプルなものでも複合的なものでも、どんなPythonオブジェクトでも返すことができるので、他の言語では難しいかもしれませんが、Pythonでは簡単にできます。
例えば、複数の戻り値は単純にタプルに入れられ、カンマで示されます。
```
def real_imag_conj(val):
return val.real, val.imag, val.conjugate()
r, i, c = real_imag_conj(3 + 4j)
print(r, i, c)
```
## デフォルトの引数値
## Default Argument Values
Often when defining a function, there are certain values that we want the function to use *most* of the time, but we'd also like to give the user some flexibility.
In this case, we can use *default values* for arguments.
Consider the ``fibonacci`` function from before.
What if we would like the user to be able to play with the starting values?
We could do that as follows:
関数を定義する際には、関数が常に*ほとんど*使ってほしい値がありますが、ユーザーにも柔軟性を与えたいと思うことがよくあります。
この場合、引数に *default 値* を使うことができます。
先ほどの ``fibonacci`` 関数を考えてみましょう。
ユーザが開始値を操作できるようにしたいとしたらどうでしょうか?
次のようにします。
```
def fibonacci(N, a=0, b=1):
L = []
while len(L) < N:
a, b = b, a + b
L.append(a)
return L
```
With a single argument, the result of the function call is identical to before:
1つの引数で、関数呼び出しの結果は以前と同じです。
```
fibonacci(10)
```
But now we can use the function to explore new things, such as the effect of new starting values:
しかし、新しい開始値の効果など、新しいことを探求するためにこの関数を使うことができるようになりました。
```
fibonacci(10, 0, 2)
```
The values can also be specified by name if desired, in which case the order of the named values does not matter:
必要に応じて値を名前で指定することもできますが、その場合は名前のついた値の順番は問題になりません。
```
fibonacci(10, b=3, a=1)
```
## ``*args`` と ``**kwargs``. 柔軟な引数
## ``*args`` and ``**kwargs``: Flexible Arguments
Sometimes you might wish to write a function in which you don't initially know how many arguments the user will pass.
In this case, you can use the special form ``*args`` and ``**kwargs`` to catch all arguments that are passed.
Here is an example:
時には、ユーザが何個の引数を渡すかわからないような関数を書きたいと思うことがあるかもしれません。
このような場合には ``*args`` と ``**kwargs`` という特殊な形式を使って、渡されたすべての引数をキャッチすることができます。
以下に例を示します。
```
def catch_all(*args, **kwargs):
print("args =", args)
print("kwargs = ", kwargs)
catch_all(1, 2, 3, a=4, b=5)
catch_all('a', keyword=2)
```
Here it is not the names ``args`` and ``kwargs`` that are important, but the ``*`` characters preceding them.
``args`` and ``kwargs`` are just the variable names often used by convention, short for "arguments" and "keyword arguments".
The operative difference is the asterisk characters: a single ``*`` before a variable means "expand this as a sequence", while a double ``**`` before a variable means "expand this as a dictionary".
In fact, this syntax can be used not only with the function definition, but with the function call as well!
ここで重要なのは ``args`` や ``kwargs`` という名前ではなく、それらの前にある ``*`` という文字です。
``args``と``kwargs``は規約でよく使われる変数名で、「引数」と「キーワード引数」の略です。
操作上の違いはアスタリスク文字です。変数の前にある単一の ``*`` は「シーケンスとして展開する」ことを意味し、変数の前にある二重の ``**`` は「辞書として展開する」ことを意味します。
実際、この構文は関数定義だけでなく、関数呼び出しでも使用できます。
```
inputs = (1, 2, 3)
keywords = {'pi': 3.14}
catch_all(*inputs, **keywords)
```
## 匿名 (``lambda``) 関数
## Anonymous (``lambda``) Functions
Earlier we quickly covered the most common way of defining functions, the ``def`` statement.
You'll likely come across another way of defining short, one-off functions with the ``lambda`` statement.
It looks something like this:
先ほど、関数を定義する最も一般的な方法である ``def`` 文について簡単に説明しました。
また、他にも ``lambda`` 文を使った短い一回限りの関数を定義する方法に出くわすこともあるでしょう。
これは次のようなものです。
```
add = lambda x, y: x + y
add(1, 2)
```
このラムダ関数は、大まかには
This lambda function is roughly equivalent to
```
def add(x, y):
return x + y
```
So why would you ever want to use such a thing?
Primarily, it comes down to the fact that *everything is an object* in Python, even functions themselves!
That means that functions can be passed as arguments to functions.
As an example of this, suppose we have some data stored in a list of dictionaries:
では、なぜそのようなものを使いたいのでしょうか?
主に、Pythonでは*すべてのものがオブジェクト*であるという事実に起因しています。
つまり、関数は関数の引数として渡すことができるということです。
この例として、あるデータが辞書のリストに保存されているとします。
```
data = [{'first':'Guido', 'last':'Van Rossum', 'YOB':1956},
{'first':'Grace', 'last':'Hopper', 'YOB':1906},
{'first':'Alan', 'last':'Turing', 'YOB':1912}]
```
Now suppose we want to sort this data.
Python has a ``sorted`` function that does this:
さて、このデータをソートしたいとしましょう。
Pythonにはこれを行う ``sorted`` 関数があります。
```
sorted([2,4,3,5,1,6])
```
But dictionaries are not orderable: we need a way to tell the function *how* to sort our data.
We can do this by specifying the ``key`` function, a function which given an item returns the sorting key for that item:
しかし、辞書は順番を決めることができません。
これは ``key`` 関数を指定することで実現できます。
```
# sort alphabetically by first name
sorted(data, key=lambda item: item['first'])
# sort by year of birth
sorted(data, key=lambda item: item['YOB'])
```
While these key functions could certainly be created by the normal, ``def`` syntax, the ``lambda`` syntax is convenient for such short one-off functions like these.
これらの主要な関数は通常の ``def`` 構文で作成することができますが、 ``lambda`` 構文はこのような短い単発の関数を作成するのに便利です。
<!--NAVIGATION-->
< [Control Flow](07-Control-Flow-Statements.ipynb) | [Contents](Index.ipynb) | [Errors and Exceptions](09-Errors-and-Exceptions.ipynb) >
| github_jupyter |
graded = 9/9
# Homework assignment #3
These problem sets focus on using the Beautiful Soup library to scrape web pages.
## Problem Set #1: Basic scraping
I've made a web page for you to scrape. It's available [here](http://static.decontextualize.com/widgets2016.html). The page concerns the catalog of a famous [widget](http://en.wikipedia.org/wiki/Widget) company. You'll be answering several questions about this web page. In the cell below, I've written some code so that you end up with a variable called `html_str` that contains the HTML source code of the page, and a variable `document` that stores a Beautiful Soup object.
```
from bs4 import BeautifulSoup
from urllib.request import urlopen
html_str = urlopen("http://static.decontextualize.com/widgets2016.html").read()
document = BeautifulSoup(html_str, "html.parser")
```
Now, in the cell below, use Beautiful Soup to write an expression that evaluates to the number of `<h3>` tags contained in `widgets2016.html`.
```
h3_tags = document.find_all('h3')
h3_tags_count = 0
for tag in h3_tags:
h3_tags_count = h3_tags_count + 1
print(h3_tags_count)
```
Now, in the cell below, write an expression or series of statements that displays the telephone number beneath the "Widget Catalog" header.
```
#inspecting webpace with help of developer tools -- shows infomation is stored in an a tag that has the class 'tel'
a_tags = document.find_all('a', {'class':'tel'})
for tag in a_tags:
print(tag.string)
#Does not return the same: [tag.string for tag in a_tags]
```
In the cell below, use Beautiful Soup to write some code that prints the names of all the widgets on the page. After your code has executed, `widget_names` should evaluate to a list that looks like this (though not necessarily in this order):
```
Skinner Widget
Widget For Furtiveness
Widget For Strawman
Jittery Widget
Silver Widget
Divided Widget
Manicurist Widget
Infinite Widget
Yellow-Tipped Widget
Unshakable Widget
Self-Knowledge Widget
Widget For Cinema
```
```
search_table = document.find_all('table',{'class': 'widgetlist'})
#print(search_table)
tables_content = [table('td', {'class':'wname'}) for table in search_table]
#print(tables_content)
for table in tables_content:
for single_table in table:
print(single_table.string)
```
## Problem set #2: Widget dictionaries
For this problem set, we'll continue to use the HTML page from the previous problem set. In the cell below, I've made an empty list and assigned it to a variable called `widgets`. Write code that populates this list with dictionaries, one dictionary per widget in the source file. The keys of each dictionary should be `partno`, `wname`, `price`, and `quantity`, and the value for each of the keys should be the value for the corresponding column for each row. After executing the cell, your list should look something like this:
```
[{'partno': 'C1-9476',
'price': '$2.70',
'quantity': u'512',
'wname': 'Skinner Widget'},
{'partno': 'JDJ-32/V',
'price': '$9.36',
'quantity': '967',
'wname': u'Widget For Furtiveness'},
...several items omitted...
{'partno': '5B-941/F',
'price': '$13.26',
'quantity': '919',
'wname': 'Widget For Cinema'}]
```
And this expression:
widgets[5]['partno']
... should evaluate to:
LH-74/O
```
widgets = []
#STEP 1: Find all tr tags, because that's what tds are grouped by
for tr_tags in document.find_all('tr', {'class': 'winfo'}):
#STEP 2: For each tr_tag in tr_tags, make a dict of its td
tr_dict ={}
for td_tags in tr_tags.find_all('td'):
td_tags_class = td_tags['class']
for tag in td_tags_class:
tr_dict[tag] = td_tags.string
#STEP3: add dicts to list
widgets.append(tr_dict)
widgets
#widgets[5]['partno']
```
In the cell below, duplicate your code from the previous question. Modify the code to ensure that the values for `price` and `quantity` in each dictionary are floating-point numbers and integers, respectively. I.e., after executing the cell, your code should display something like this:
[{'partno': 'C1-9476',
'price': 2.7,
'quantity': 512,
'widgetname': 'Skinner Widget'},
{'partno': 'JDJ-32/V',
'price': 9.36,
'quantity': 967,
'widgetname': 'Widget For Furtiveness'},
... some items omitted ...
{'partno': '5B-941/F',
'price': 13.26,
'quantity': 919,
'widgetname': 'Widget For Cinema'}]
(Hint: Use the `float()` and `int()` functions. You may need to use string slices to convert the `price` field to a floating-point number.)
```
#had to rename variables as it kept printing the ones from the cell above...
widgetsN = []
for trN_tags in document.find_all('tr', {'class': 'winfo'}):
trN_dict ={}
for tdN_tags in trN_tags.find_all('td'):
tdN_tags_class = tdN_tags['class']
for tagN in tdN_tags_class:
if tagN == 'price':
sliced_tag_string = tdN_tags.string[1:]
trN_dict[tagN] = float(sliced_tag_string)
elif tagN == 'quantity':
trN_dict[tagN] = int(tdN_tags.string)
else:
trN_dict[tagN] = tdN_tags.string
widgetsN.append(trN_dict)
widgetsN
```
Great! I hope you're having fun. In the cell below, write an expression or series of statements that uses the `widgets` list created in the cell above to calculate the total number of widgets that the factory has in its warehouse.
Expected output: `7928`
```
widget_quantity_list = [element['quantity'] for element in widgetsN]
sum(widget_quantity_list)
```
In the cell below, write some Python code that prints the names of widgets whose price is above $9.30.
Expected output:
```
Widget For Furtiveness
Jittery Widget
Silver Widget
Infinite Widget
Widget For Cinema
```
```
for widget in widgetsN:
if widget['price'] > 9.30:
print(widget['wname'])
```
## Problem set #3: Sibling rivalries
In the following problem set, you will yet again be working with the data in `widgets2016.html`. In order to accomplish the tasks in this problem set, you'll need to learn about Beautiful Soup's `.find_next_sibling()` method. Here's some information about that method, cribbed from the notes:
Often, the tags we're looking for don't have a distinguishing characteristic, like a class attribute, that allows us to find them using `.find()` and `.find_all()`, and the tags also aren't in a parent-child relationship. This can be tricky! For example, take the following HTML snippet, (which I've assigned to a string called `example_html`):
```
example_html = """
<h2>Camembert</h2>
<p>A soft cheese made in the Camembert region of France.</p>
<h2>Cheddar</h2>
<p>A yellow cheese made in the Cheddar region of... France, probably, idk whatevs.</p>
"""
```
If our task was to create a dictionary that maps the name of the cheese to the description that follows in the `<p>` tag directly afterward, we'd be out of luck. Fortunately, Beautiful Soup has a `.find_next_sibling()` method, which allows us to search for the next tag that is a sibling of the tag you're calling it on (i.e., the two tags share a parent), that also matches particular criteria. So, for example, to accomplish the task outlined above:
```
example_doc = BeautifulSoup(example_html, "html.parser")
cheese_dict = {}
for h2_tag in example_doc.find_all('h2'):
cheese_name = h2_tag.string
cheese_desc_tag = h2_tag.find_next_sibling('p')
cheese_dict[cheese_name] = cheese_desc_tag.string
cheese_dict
```
With that knowledge in mind, let's go back to our widgets. In the cell below, write code that uses Beautiful Soup, and in particular the `.find_next_sibling()` method, to print the part numbers of the widgets that are in the table *just beneath* the header "Hallowed Widgets."
Expected output:
```
MZ-556/B
QV-730
T1-9731
5B-941/F
```
```
for h3_tags in document.find_all('h3'):
if h3_tags.string == 'Hallowed widgets':
hallowed_table = h3_tags.find_next_sibling('table')
for element in hallowed_table.find_all('td', {'class':'partno'}):
print(element.string)
```
Okay, now, the final task. If you can accomplish this, you are truly an expert web scraper. I'll have little web scraper certificates made up and I'll give you one, if you manage to do this thing. And I know you can do it!
In the cell below, I've created a variable `category_counts` and assigned to it an empty dictionary. Write code to populate this dictionary so that its keys are "categories" of widgets (e.g., the contents of the `<h3>` tags on the page: "Forensic Widgets", "Mood widgets", "Hallowed Widgets") and the value for each key is the number of widgets that occur in that category. I.e., after your code has been executed, the dictionary `category_counts` should look like this:
```
{'Forensic Widgets': 3,
'Hallowed widgets': 4,
'Mood widgets': 2,
'Wondrous widgets': 3}
```
```
category_counts = {}
for x_tags in document.find_all('h3'):
x_table = x_tags.find_next_sibling('table')
tr_info_tags = x_table.find_all('tr', {'class':'winfo'})
category_counts[x_tags.string] = len(tr_info_tags)
category_counts
```
Congratulations! You're done.
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# 정형 데이터 다루기
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/structured_data/feature_columns">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
TensorFlow.org에서 보기</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/structured_data/feature_columns.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
구글 코랩(Colab)에서 실행하기</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/structured_data/feature_columns.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
깃허브(GitHub) 소스 보기</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/structured_data/feature_columns.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.
이 번역에 개선할 부분이 있다면
[tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
문서 번역이나 리뷰에 참여하려면
[docs-ko@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로
메일을 보내주시기 바랍니다.
이 튜토리얼은 정형 데이터(structured data)를 다루는 방법을 소개합니다(예를 들어 CSV에서 읽은 표 형식의 데이터). [케라스](https://www.tensorflow.org/guide/keras)를 사용하여 모델을 정의하고 [특성 열](https://www.tensorflow.org/guide/feature_columns)(feature column)을 사용하여 CSV의 열을 모델 훈련에 필요한 특성으로 매핑하겠습니다. 이 튜토리얼은 다음 내용을 포함합니다:
* [판다스](https://pandas.pydata.org/)(Pandas)를 사용하여 CSV 파일을 읽기
* [tf.data](https://www.tensorflow.org/guide/datasets)를 사용하여 행을 섞고 배치로 나누는 입력 파이프라인(pipeline)을 만들기
* CSV의 열을 feature_column을 사용해 모델 훈련에 필요한 특성으로 매핑하기
* 케라스를 사용하여 모델 구축, 훈련, 평가하기
## 데이터셋
클리블랜드(Cleveland) 심장병 재단에서 제공한 작은 [데이터셋](https://archive.ics.uci.edu/ml/datasets/heart+Disease)을 사용하겠습니다. 이 CSV 파일은 수백 개의 행으로 이루어져 있습니다. 각 행은 환자 한 명을 나타내고 각 열은 환자에 대한 속성 값입니다. 이 정보를 사용해 환자의 심장병 발병 여부를 예측해 보겠습니다. 즉 이 데이터셋은 이진 분류 문제입니다.
다음은 이 데이터셋에 대한 [설명](https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/heart-disease.names)입니다. 수치형과 범주형 열이 모두 있다는 점을 주목하세요.
>열| 설명| 특성 타입 | 데이터 타입
>------------|--------------------|----------------------|-----------------
>Age | 나이 | 수치형 | 정수
>Sex | (1 = 남성; 0 = 여성) | 범주형 | 정수
>CP | 가슴 통증 유형 (0, 1, 2, 3, 4) | 범주형 | 정수
>Trestbpd | 안정 혈압 (병원 입원시 mm Hg) | 수치형 | 정수
>Chol | 혈청 콜레스테롤 (mg/dl) | 수치형 | 정수
>FBS | (공복 혈당 > 120 mg/dl) (1 = true; 0 = false) | 범주형 | 정수
>RestECG | 안정 심전도 결과 (0, 1, 2) | 범주형 | 정수
>Thalach | 최대 심박동수 | 수치형 | 정수
>Exang | 협심증 유발 운동 (1 = yes; 0 = no) | 범주형 | 정수
>Oldpeak | 비교적 안정되기까지 운동으로 유발되는 ST depression | 수치형 | 정수
>Slope | 최대 운동 ST segment의 기울기 | 수치형 | 실수
>CA | 형광 투시된 주요 혈관의 수 (0-3) | 수치형 | 정수
>Thal | 3 = 보통; 6 = 해결된 결함; 7 = 해결가능한 결함 | 범주형 | 문자열
>Target | 심장병 진단 (1 = true; 0 = false) | 분류 | 정수
## 텐서플로와 필요한 라이브러리 임포트하기
```
!pip install sklearn
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import feature_column
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
```
## 판다스로 데이터프레임 만들기
[판다스](https://pandas.pydata.org/)는 정형 데이터를 읽고 조작하는데 유용한 유틸리티 함수를 많이 제공하는 파이썬 라이브러리입니다. 판다스를 이용해 URL로부터 데이터를 다운로드하여 읽은 다음 데이터프레임으로 변환하겠습니다.
```
URL = 'https://storage.googleapis.com/applied-dl/heart.csv'
dataframe = pd.read_csv(URL)
dataframe.head()
```
## 데이터프레임을 훈련 세트, 검증 세트, 테스트 세트로 나누기
하나의 CSV 파일에서 데이터셋을 다운로드했습니다. 이를 훈련 세트, 검증 세트, 테스트 세트로 나누겠습니다.
```
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), '훈련 샘플')
print(len(val), '검증 샘플')
print(len(test), '테스트 샘플')
```
## tf.data를 사용하여 입력 파이프라인 만들기
그다음 [tf.data](https://www.tensorflow.org/guide/datasets)를 사용하여 데이터프레임을 감싸겠습니다. 이렇게 하면 특성 열을 사용하여 판다스 데이터프레임의 열을 모델 훈련에 필요한 특성으로 매핑할 수 있습니다. 아주 큰 CSV 파일(메모리에 들어갈 수 없을 정도로 큰 파일)을 다룬다면 tf.data로 디스크 디렉토리에서 데이터를 읽을 수 있습니다. 이런 내용은 이 튜토리얼에 포함되어 있지 않습니다.
```
# 판다스 데이터프레임으로부터 tf.data 데이터셋을 만들기 위한 함수
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
batch_size = 5 # 예제를 위해 작은 배치 크기를 사용합니다.
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
```
## 입력 파이프라인 이해하기
앞서 만든 입력 파이프라인을 호출하여 반환되는 데이터 포맷을 확인해 보겠습니다. 간단하게 출력하기 위해 작은 배치 크기를 사용합니다.
```
for feature_batch, label_batch in train_ds.take(1):
print('전체 특성:', list(feature_batch.keys()))
print('나이 특성의 배치:', feature_batch['age'])
print('타깃의 배치:', label_batch )
```
이 데이터셋은 (데이터프레임의) 열 이름을 키로 갖는 딕셔너리를 반환합니다. 데이터프레임 열의 값이 매핑되어 있습니다.
## 여러 종류의 특성 열 알아 보기
텐서플로는 여러 종류의 특성 열을 제공합니다. 이 절에서 몇 가지 특성 열을 만들어서 데이터프레임의 열을 변환하는 방법을 알아 보겠습니다.
```
# 특성 열을 시험해 보기 위해 샘플 배치를 만듭니다.
example_batch = next(iter(train_ds))[0]
# 특성 열을 만들고 배치 데이터를 변환하는 함수
def demo(feature_column):
feature_layer = layers.DenseFeatures(feature_column)
print(feature_layer(example_batch).numpy())
```
### 수치형 열
특성 열의 출력은 모델의 입력이 됩니다(앞서 정의한 함수를 사용하여 데이터프레임의 각 열이 어떻게 변환되는지 알아 볼 것입니다). [수치형 열](https://www.tensorflow.org/api_docs/python/tf/feature_column/numeric_column)은 가장 간단한 종류의 열입니다. 이 열은 실수 특성을 표현하는데 사용됩니다. 이 열을 사용하면 모델은 데이터프레임 열의 값을 변형시키지 않고 그대로 전달 받습니다.
```
age = feature_column.numeric_column("age")
demo(age)
```
심장병 데이터셋 데이터프레임의 대부분 열은 수치형입니다.
### 버킷형 열
종종 모델에 수치 값을 바로 주입하기 원치 않을 때가 있습니다. 대신 수치 값의 구간을 나누어 이를 기반으로 범주형으로 변환합니다. 원본 데이터가 사람의 나이를 표현한다고 가정해 보죠. 나이를 수치형 열로 표현하는 대신 [버킷형 열](https://www.tensorflow.org/api_docs/python/tf/feature_column/bucketized_column)(bucketized column)을 사용하여 나이를 몇 개의 버킷(bucket)으로 분할할 수 있습니다. 다음에 원-핫 인코딩(one-hot encoding)된 값은 각 열이 매칭되는 나이 범위를 나타냅니다.
```
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
demo(age_buckets)
```
### 범주형 열
이 데이터셋에서 thal 열은 문자열입니다(예를 들어 'fixed', 'normal', 'reversible'). 모델에 문자열을 바로 주입할 수 없습니다. 대신 문자열을 먼저 수치형으로 매핑해야 합니다. 범주형 열(categorical column)을 사용하여 문자열을 원-핫 벡터로 표현할 수 있습니다. 문자열 목록은 [categorical_column_with_vocabulary_list](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list)를 사용하여 리스트로 전달하거나 [categorical_column_with_vocabulary_file](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_file)을 사용하여 파일에서 읽을 수 있습니다.
```
thal = feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
demo(thal_one_hot)
```
더 복잡한 데이터셋에는 범주형(예를 들면 문자열)인 열이 많을 수 있습니다. 특성 열은 범주형 데이터를 다룰 때 진가가 발휘됩니다. 이 데이터셋에는 범주형 열이 하나 뿐이지만 다른 데이터셋에서 사용할 수 있는 여러 종류의 특성 열을 소개하겠습니다.
### 임베딩 열
가능한 문자열이 몇 개가 있는 것이 아니라 범주마다 수천 개 이상의 값이 있는 경우를 상상해 보겠습니다. 여러 가지 이유로 범주의 개수가 늘어남에 따라 원-핫 인코딩을 사용하여 신경망을 훈련시키는 것이 불가능해집니다. 임베딩 열(embedding column)을 사용하면 이런 제한을 극복할 수 있습니다. 고차원 원-핫 벡터로 데이터를 표현하는 대신 [임베딩 열](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column)을 사용하여 저차원으로 데이터를 표현합니다. 이 벡터는 0 또는 1이 아니라 각 원소에 어떤 숫자도 넣을 수 있는 밀집 벡터(dense vector)입니다. 임베딩의 크기(아래 예제에서는 8입니다)는 튜닝 대상 파라미터입니다.
핵심 포인트: 범주형 열에 가능한 값이 많을 때는 임베딩 열을 사용하는 것이 최선입니다. 여기에서는 예시를 목적으로 하나를 사용하지만 완전한 예제이므로 나중에 다른 데이터셋에 수정하여 적용할 수 있습니다.
```
# 임베딩 열의 입력은 앞서 만든 범주형 열입니다.
thal_embedding = feature_column.embedding_column(thal, dimension=8)
demo(thal_embedding)
```
### 해시 특성 열
가능한 값이 많은 범주형 열을 표현하는 또 다른 방법은 [categorical_column_with_hash_bucket](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_hash_bucket)을 사용하는 것입니다. 이 특성 열은 입력의 해시(hash) 값을 계산한 다음 `hash_bucket_size` 크기의 버킷 중 하나를 선택하여 문자열을 인코딩합니다. 이 열을 사용할 때는 어휘 목록을 제공할 필요가 없고 공간을 절약하기 위해 실제 범주의 개수보다 훨씬 작게 해시 버킷(bucket)의 크기를 정할 수 있습니다.
핵심 포인트: 이 기법의 큰 단점은 다른 문자열이 같은 버킷에 매핑될 수 있다는 것입니다. 그럼에도 실전에서는 일부 데이터셋에서 잘 작동합니다.
```
thal_hashed = feature_column.categorical_column_with_hash_bucket(
'thal', hash_bucket_size=1000)
demo(feature_column.indicator_column(thal_hashed))
```
### 교차 특성 열
여러 특성을 연결하여 하나의 특성으로 만드는 것을 [교차 특성](https://developers.google.com/machine-learning/glossary/#feature_cross)(feature cross)이라고 합니다. 모델이 특성의 조합에 대한 가중치를 학습할 수 있습니다. 이 예제에서는 age와 thal의 교차 특성을 만들어 보겠습니다. `crossed_column`은 모든 가능한 조합에 대한 해시 테이블을 만들지 않고 `hashed_column` 매개변수를 사용하여 해시 테이블의 크기를 선택합니다.
```
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
demo(feature_column.indicator_column(crossed_feature))
```
## 사용할 열 선택하기
여러 가지 특성 열을 사용하는 방법을 보았으므로 이제 이를 사용하여 모델을 훈련하겠습니다. 이 튜토리얼의 목적은 특성 열을 사용하는 완전한 코드(예를 들면 작동 방식)를 제시하는 것이므로 임의로 몇 개의 열을 선택하여 모델을 훈련하겠습니다.
핵심 포인트: 제대로 된 모델을 만들어야 한다면 대용량의 데이터셋을 사용하고 어떤 특성을 포함하는 것이 가장 의미있는지, 또 어떻게 표현해야 할지 신중하게 생각하세요.
```
feature_columns = []
# 수치형 열
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']:
feature_columns.append(feature_column.numeric_column(header))
# 버킷형 열
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# 범주형 열
thal = feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
# 임베딩 열
thal_embedding = feature_column.embedding_column(thal, dimension=8)
feature_columns.append(thal_embedding)
# 교차 특성 열
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
crossed_feature = feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
```
### 특성 층 만들기
특성 열을 정의하고 나면 [DenseFeatures](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/DenseFeatures) 층을 사용해 케라스 모델에 주입할 수 있습니다.
```
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
```
앞서 특성 열의 작동 예를 보이기 위해 작은 배치 크기를 사용했습니다. 여기에서는 조금 더 큰 배치 크기로 입력 파이프라인을 만듭니다.
```
batch_size = 32
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
```
## 모델 생성, 컴파일, 훈련
```
model = tf.keras.Sequential([
feature_layer,
layers.Dense(128, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(train_ds,
validation_data=val_ds,
epochs=5)
loss, accuracy = model.evaluate(test_ds)
print("정확도", accuracy)
```
핵심 포인트: 일반적으로 크고 복잡한 데이터셋일 경우 딥러닝 모델에서 최선의 결과를 얻습니다. 이런 작은 데이터셋에서는 기본 모델로 결정 트리(decision tree)나 랜덤 포레스트(random forest)를 사용하는 것이 권장됩니다. 이 튜토리얼의 목적은 정확한 모델을 훈련하는 것이 아니라 정형 데이터를 다루는 방식을 설명하는 것입니다. 실전 데이터셋을 다룰 때 이 코드를 시작점으로 사용하세요.
## 그 다음엔
정형 데이터를 사용한 분류 작업에 대해 배우는 가장 좋은 방법은 직접 실습하는 것입니다. 실험해 볼 다른 데이터셋을 찾아서 위와 비슷한 코드를 사용해 모델을 훈련해 보세요. 정확도를 향상시키려면 모델에 포함할 특성과 표현 방법을 신중하게 생각하세요.
| github_jupyter |
# About
This kernel applies the techniques from [fastai's deep learning for coders](http://course.fast.ai) course to the dogbreed dataset
The resulting Kaggle score is **0.22623** which roughly translates to a position in the top 30%.
# Setup
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import pandas as pd
import os
from fastai.conv_learner import *
# make sure CUDA is available and enabled
print(torch.cuda.is_available(), torch.backends.cudnn.enabled)
# set competition name
comp_name = "dogbreed"
# use with custom environment
user = "ec2-user"
input_path = f"/home/{user}/data/{comp_name}/"
wd = f"/home/{user}/kaggle/{comp_name}/"
# use only with kaggle kernels
#input_path = "../input/"
#wd = "/kaggle/working/"
# create symlinks for easy data handling
!ln -fs {input_path}labels.csv {wd}labels.csv
!ln -fs {input_path}sample_submission.csv {wd}sample.csv
!ln -fs {input_path}train {wd}train
!ln -fs {input_path}test {wd}test
!ls -alh
```
## Helper functions to deal with Kaggle's file system limitations
```
def create_symlnk(src_dir, src_name, dst_name, dst_dir=wd, target_is_dir=False):
"""
If symbolic link does not already exist, create it by pointing dst_dir/lnk_name to src_dir/lnk_name
"""
if not os.path.exists(dst_dir + dst_name):
os.symlink(src=src_dir + src_name, dst = dst_dir + dst_name, target_is_directory=target_is_dir)
def clean_up(wd=wd):
"""
Delete all temporary directories and symlinks in working directory (wd)
"""
for root, dirs, files in os.walk(wd):
try:
for d in dirs:
if os.path.islink(d):
os.unlink(d)
else:
shutil.rmtree(d)
for f in files:
if os.path.islink(f):
os.unlink(f)
else:
print(f)
except FileNotFoundError as e:
print(e)
# only use with kaggle kernels
#create_symlnk(input_path, "labels.csv", "labels.csv")
#create_symlnk(input_path, "sample_submission.csv", "sample.csv")
#create_symlnk(input_path, "train", "train", target_is_dir=True)
#create_symlnk(input_path, "test", "test", target_is_dir=True)
# perform sanity check
#!ls -alh
```
# Exploration
```
label_df = pd.read_csv(f"{wd}labels.csv")
label_df.head()
label_df.shape
label_df.pivot_table(index="breed", aggfunc=len).sort_values("id", ascending=False)
```
# Preprocess data
```
# define architecture
arch = resnext101_64
sz = 224
bs = 64
# create indexes for validation dataset
val_idxs = get_cv_idxs(label_df.shape[0])
def get_data(sz=sz):
"""
Load images via fastai's ImageClassifierData.from_csv() object defined as 'data' before
Return images if size bigger than 300 pixels, else resize to 340 pixels
"""
tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
data = ImageClassifierData.from_csv(path=wd,
folder="train",
csv_fname=f"{wd}labels.csv",
tfms=tfms,
val_idxs=val_idxs,
suffix=".jpg",
test_name="test")
return data if sz > 300 else data.resize(340, new_path=wd)
data = get_data()
[print(len(e)) for e in [data.trn_ds, data.val_ds, data.test_ds]]
# look at an actual image
fn = wd + data.trn_ds.fnames[-1]
img = PIL.Image.open(fn); img
img.size
```
# Model
## Baseline
```
learn = ConvLearner.pretrained(arch, data, ps=0.5, precompute=True)
lrf = learn.lr_find()
learn.sched.plot()
lr = 1e-1
# fit baseline model without data augmentation
learn.fit(lr, 3)
# disable precompute and fit model with data augmentation
learn.precompute=False
learn.fit(lr, 3, cycle_len=1, cycle_mult=2)
learn.save(f"{comp_name}_{arch.__name__}_{sz}_base")
learn.load(f"{comp_name}_{arch.__name__}_{sz}_base")
```
## Increase image size
```
sz = 299
learn.set_data(get_data(sz))
learn.fit(lr, 3, cycle_len=1)
learn.sched.plot_loss()
learn.save(f"{comp_name}_{arch.__name__}_{sz}")
learn.load(f"{comp_name}_{arch.__name__}_{sz}")
```
## Prediction on validation set
```
from sklearn.metrics import log_loss
log_preds, y = learn.TTA()
probs = np.mean(np.exp(log_preds), 0)
accuracy_np(probs, y), log_loss(y, probs)
```
## Prediction on test set
```
log_preds_test, y_test = learn.TTA(is_test=True)
probs_test = np.mean(np.exp(log_preds_test), 0)
np.save(f"{comp_name}_probs_test", probs_test, allow_pickle=True)
probs_test = np.load(f"{comp_name}_probs_test.npy")
```
# Submission
```
df = pd.DataFrame(probs_test)
df.columns = data.classes
# insert clean ids - without folder prefix and .jpg suffix - of images as first column
df.insert(0, "id", [e[5:-4] for e in data.test_ds.fnames])
df.to_csv(f"sub_{comp_name}_{arch.__name__}.csv", index=False)
# only use with kaggle kernels
#clean_up()
```
| github_jupyter |
### Entrepreneurial Competency Analysis and Predict
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib as mat
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv('entrepreneurial competency.csv')
data.head()
data.describe()
data.corr()
list(data)
data.shape
data_reasons = pd.DataFrame(data.ReasonsForLack.value_counts())
data_reasons
data.ReasonsForLack.value_counts().idxmax()
data.isnull().sum()[data.isnull().sum()>0]
data['ReasonsForLack'] = data.ReasonsForLack.fillna('Desconhecido')
fill_na = pd.DataFrame(data.ReasonsForLack.value_counts())
fill_na.head(5)
edu_sector = data.EducationSector.value_counts().sort_values(ascending=False)
edu_sector
edu_sector_pd = pd.DataFrame(edu_sector, columns = ['Sector', 'Amount'])
edu_sector_pd.Sector = edu_sector.index
edu_sector_pd.Amount = edu_sector.values
edu_sector_pd
perc_sec = round(data.EducationSector.value_counts()/data.EducationSector.shape[0],2)
edu_sector_pd['Percentual'] = perc_sec.values *100
edu_sector_pd
labels = [str(edu_sector_pd['Sector'][i])+' '+'['+str(round(edu_sector_pd['Percentual'][i],2)) +'%'+']' for i in edu_sector_pd.index]
from matplotlib import cm
cs = cm.Set3(np.arange(100))
f = plt.figure()
plt.pie(edu_sector_pd['Amount'], labeldistance = 1, radius = 3, colors = cs, wedgeprops = dict(width = 0.8))
plt.legend(labels = labels, loc = 'center', prop = {'size':12})
plt.title("Students distribution based on Education Sector - General Analysis", loc = 'Center', fontdict = {'fontsize':20,'fontweight':20})
plt.show()
rank_edu_sec = data.EducationSector.value_counts().sort_values(ascending=False)
rank = pd.DataFrame(rank_edu_sec, columns=['Sector', 'Amount'])
rank.Sector = rank_edu_sec.index
rank.Amount = rank_edu_sec.values
rank_3 = rank.head(3)
rank_3
fig, ax = plt.subplots(figsize=(8,5))
colors = ["#00e600", "#ff8c1a", "#a180cc"]
sns.barplot(x="Sector", y="Amount", palette=colors, data=rank_3)
ax.set_title("Sectors with largest students number",fontdict= {'size':12})
ax.xaxis.set_label_text("Sectors",fontdict= {'size':12})
ax.yaxis.set_label_text("Students amount",fontdict= {'size':12})
plt.show()
fig, ax = plt.subplots(figsize=(8,6))
sns.histplot(data["Age"], color="#33cc33",kde=True, ax=ax)
ax.set_title('Students distribution based on Age', fontsize= 15)
plt.ylabel("Density (KDE)", fontsize= 15)
plt.xlabel("Age", fontsize= 15)
plt.show()
fig = plt.figure(figsize=(10,5))
plt.boxplot(data.Age)
plt.show()
gender = data.Gender.value_counts()
gender
perc_gender = round((data.Gender.value_counts()/data.Gender.shape[0])*100, 2)
perc_gender
df_gender = pd.DataFrame(gender, columns=['Gender','Absolut_Value', 'Percent_Value'])
df_gender.Gender = gender.index
df_gender.Absolut_Value = gender.values
df_gender.Percent_Value = perc_gender.values
df_gender
fig, ax = plt.subplots(figsize=(8,6))
sns.histplot(data["Gender"], color="#33cc33", ax=ax)
ax.set_title('Students distribution by gender', fontsize= 15)
plt.ylabel("Amount", fontsize= 15)
plt.xlabel("Gender", fontsize= 15)
plt.show()
```
# Education Sector, Gender and Age Analyses, where Target = 1
```
data_y = data[data.y == 1]
data_y.head()
data_y.shape
edu_sector_y = data_y.EducationSector.value_counts().sort_values(ascending=False)
edu_sector_y
edu_sector_ypd = pd.DataFrame(edu_sector_y, columns = ['Sector', 'Amount'])
edu_sector_ypd.Sector = edu_sector_y.index
edu_sector_ypd.Amount = edu_sector_y.values
edu_sector_ypd
perc_sec_y = round(data_y.EducationSector.value_counts()/data_y.EducationSector.shape[0],2)
edu_sector_ypd['Percent'] = perc_sec_y.values *100
edu_sector_ypd
labels = [str(edu_sector_ypd['Sector'][i])+' '+'['+str(round(edu_sector_ypd['Percent'][i],2)) +'%'+']' for i in edu_sector_ypd.index]
cs = cm.Set3(np.arange(100))
f = plt.figure()
plt.pie(edu_sector_ypd['Amount'], labeldistance = 1, radius = 3, colors = cs, wedgeprops = dict(width = 0.8))
plt.legend(labels = labels, loc = 'center', prop = {'size':12})
plt.title("Students distribution based on Education Sector - Target Analysis", loc = 'Center', fontdict = {'fontsize':20,'fontweight':20})
plt.show()
fig, ax = plt.subplots(figsize=(8,6))
sns.histplot(data_y["Age"], color="#1f77b4",kde=True, ax=ax)
ax.set_title('Students distribution based on Age - Target Analysis', fontsize= 15)
plt.ylabel("Density (KDE)", fontsize= 15)
plt.xlabel("Age", fontsize= 15)
plt.show()
gender_y = data_y.Gender.value_counts()
perc_gender_y = round((data_y.Gender.value_counts()/data_y.Gender.shape[0])*100, 2)
df_gender_y = pd.DataFrame(gender_y, columns=['Gender','Absolut_Value', 'Percent_Value'])
df_gender_y.Gender = gender_y.index
df_gender_y.Absolut_Value = gender_y.values
df_gender_y.Percent_Value = perc_gender_y.values
df_gender_y
fig, ax = plt.subplots(figsize=(8,6))
sns.histplot(data_y["Gender"], color="#9467bd", ax=ax)
ax.set_title('Students distribution by gender', fontsize= 15)
plt.ylabel("Amount", fontsize= 15)
plt.xlabel("Gender", fontsize= 15)
plt.show()
pcy= round(data_y.IndividualProject.value_counts()/data_y.IndividualProject.shape[0]*100,2)
pcy
pc= round(data.IndividualProject.value_counts()/data.IndividualProject.shape[0]*100,2)
pc
fig = plt.figure(figsize=(15,5)) #tamanho do frame
plt.subplots_adjust(wspace= 0.5) #espaço entre os graficos
plt.suptitle('Comparation between Idividual Project on "y general" and "y == 1"')
plt.subplot(1,2,2)
plt.bar(data_y.IndividualProject.unique(), pcy, color = 'green')
plt.title("Individual Project Distribution - y==1")
plt.subplot(1,2,1)
plt.bar(data.IndividualProject.unique(), pc, color = 'grey')
plt.title("Individual Project Distribution - Full dataset")
plt.show()
round(data.Influenced.value_counts()/data.Influenced.shape[0],2)*100
round(data_y.Influenced.value_counts()/data_y.Influenced.shape[0],2)*100
```
Here we can observe that the categoric features have no influence on Target. Each feature measure almost haven´t any impact when compared on 'y general' and 'y == 1'. In other words, we must take the numerical features as predict parameters.
```
data.head()
list(data)
data_num = data.drop(['EducationSector', 'Age', 'Gender', 'City','MentalDisorder'], axis = 1)
data_num.head()
data_num.corr()
plt.hist(data_num.GoodPhysicalHealth, bins = 30)
plt.title("Good Physical Health distribution")
plt.show()
data_num_fil1 = data_num[data_num.y == 1]
plt.hist(data_num_fil1.GoodPhysicalHealth, bins = 30)
plt.title("Good Physical Health distribution, where target == 1")
plt.show()
pers_fil = round(data_num.GoodPhysicalHealth.value_counts()/data_num.GoodPhysicalHealth.shape[0],2)
pers_fil1 = round(data_num_fil1.GoodPhysicalHealth.value_counts()/data_num_fil1.GoodPhysicalHealth.shape[0],2)
pers_fil
pers_fil1
list(data_num)
def plot_features(df, df_filtered, columns):
df_original = df.copy()
df2 = df_filtered.copy()
for column in columns:
a = df_original[column]
b = df2[column]
fig = plt.figure(figsize=(15,5)) #tamanho do frame
plt.subplots_adjust(wspace= 0.5) #espaço entre os graficos
plt.suptitle('Comparation between Different Features on "y general" and "y == 1"')
plt.subplot(1,2,2)
plt.bar(a.unique(), round(a.value_counts()/a.shape[0],2), color = 'green')
plt.title("Comparation between " + column + " on 'y == 1'")
plt.subplot(1,2,1)
plt.bar(b.unique(), round(a.value_counts()/b.shape[0],2), color = 'grey')
plt.title("Comparation between " + column + " Full dataset")
plt.show()
plot_features(data_num,data_num_fil1,columns=['Influenced',
'Perseverance',
'DesireToTakeInitiative',
'Competitiveness',
'SelfReliance',
'StrongNeedToAchieve',
'SelfConfidence'])
```
### Data Transformation and Preprocessing
```
data_num.shape
data_num.dtypes
from sklearn.preprocessing import OneHotEncoder
X = data_num.drop(['y', 'Influenced', 'ReasonsForLack'], axis = 1)
def ohe_drop(data, columns):
df = data.copy()
ohe = OneHotEncoder()
for column in columns:
var_ohe = df[column].values.reshape(-1,1)
ohe.fit(var_ohe)
ohe.transform(var_ohe)
OHE = pd.DataFrame(ohe.transform(var_ohe).toarray(),
columns = ohe.categories_[0].tolist())
df = pd.concat([df, OHE], axis = 1)
df = df.drop([column],axis = 1)
return df
X = ohe_drop(data_num, columns =['Perseverance',
'DesireToTakeInitiative',
'Competitiveness',
'SelfReliance',
'StrongNeedToAchieve',
'SelfConfidence',
'GoodPhysicalHealth',
'Influenced',
'KeyTraits'] )
X
X = X.drop(['y', 'ReasonsForLack', 'IndividualProject'], axis = 1)
y = np.array(data_num.y)
X.shape
y.shape
X = np.array(X)
type(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.30, random_state = 0)
X_train.shape
X_test.shape
y_train.shape
y_test.shape
```
### Logistic Regression
```
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
logreg.predict(X_train)
logreg.predict(X_train)[:20]
y_train[:20]
```
### Performance metrics calculation
Accuracy Score
Transformar df em matrix
```
from sklearn.metrics import accuracy_score
accuracy_score(y_true = y_train, y_pred = logreg.predict(X_train))
```
Cross Validation
```
from sklearn.model_selection import KFold
kf = KFold(n_splits = 3)
classif= LogisticRegression()
train_accuracy_list = []
val_accuracy_list = []
for train_idx, val_idx in kf.split(X_train, y_train):
Xtrain_folds = X_train[train_idx]
ytrain_folds = y_train[train_idx]
Xval_fold = X_train[val_idx]
yval_fold = y_train[val_idx]
classif.fit(Xtrain_folds,ytrain_folds)
train_pred = classif.predict(Xtrain_folds)
pred_validacao = classif.predict(Xval_fold)
train_accuracy_list.append(accuracy_score(y_pred = train_pred, y_true = ytrain_folds))
val_accuracy_list.append(accuracy_score(y_pred = pred_validacao, y_true = yval_fold))
print("acurácias em treino: \n", train_accuracy_list, " \n| média: ", np.mean(train_accuracy_list))
print()
print("acurácias em validação: \n", val_accuracy_list, " \n| média: ", np.mean(val_accuracy_list))
from sklearn.metrics import confusion_matrix
confusion_matrix(y_true = y_train, y_pred = logreg.predict(X_train))
cm = confusion_matrix(y_true = y_train, y_pred = logreg.predict(X_train))
cm[1,1] / cm[1, :].sum()
cm[1,1] / cm[:, 1].sum()
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import f1_score
f1_score(y_true = y_train, y_pred = logreg.predict(X_train))
```
### Y test Predict
```
logreg.predict(X_test)
f1_score(y_true = y_test, y_pred = logreg.predict(X_test))
```
The predict of "y_test" is too low, so I'll optimize the model
### Model Optimization
```
from sklearn.feature_selection import SelectKBest, chi2
def try_k(x, y, n):
the_best = SelectKBest(score_func = chi2, k =n)
fit = the_best.fit(x, y)
features = fit.transform(x)
logreg.fit(features,y)
preds = logreg.predict(features)
f1 = f1_score(y_true = y, y_pred = preds)
precision = precision_score(y_true = y, y_pred = preds)
recall = recall_score(y_true = y, y_pred = preds)
return preds, f1, precision, recall
for n in n_list:
preds,f1, precision, recall = try_k(X_test, y_test, n)
f1
precision
recall
from sklearn.metrics import classification_report, plot_confusion_matrix,plot_roc_curve
the_best = SelectKBest(score_func = chi2, k =30)
fit = the_best.fit(X_test, y_test)
feature = fit.transform(X_test)
preds = logreg.predict(feature)
plot_confusion_matrix(logreg,features,y_test)
plot_roc_curve(logreg,features,y_test)
print(classification_report(y_test, preds))
```
| github_jupyter |
# Mean Shift using Standard Scaler
This Code template is for the Cluster analysis using a simple Mean Shift(Centroid-Based Clustering using a flat kernel) Clustering algorithm along with feature scaling using Standard Scaler and includes 2D and 3D cluster visualization of the Clusters.
### Required Packages
```
!pip install plotly
import operator
import warnings
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import plotly.express as px
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import plotly.graph_objects as go
from sklearn.cluster import MeanShift, estimate_bandwidth
warnings.filterwarnings("ignore")
```
### Initialization
Filepath of CSV file
```
file_path = ""
```
List of features which are required for model training
```
features=[]
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X.
```
X = df[features]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
X.head()
```
####Feature Scaling
Standard Scaler - Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using transform.<br>
[For more information click here](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
```
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
```
### Model
Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover “blobs” in a smooth density of samples. It is a centroid-based algorithm, which works by updating candidates for centroids to be the mean of the points within a given region. These candidates are then filtered in a post-processing stage to eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
[More information](https://analyticsindiamag.com/hands-on-tutorial-on-mean-shift-clustering-algorithm/)
#### Tuning Parameters
1. bandwidthfloat, default=None
> Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using sklearn.cluster.estimate_bandwidth
2. seedsarray-like of shape (n_samples, n_features), default=None
> Seeds used to initialize kernels. If not set, the seeds are calculated by clustering.get_bin_seeds with bandwidth as the grid size and default values for other parameters.
3. bin_seedingbool, default=False
> If true, initial kernel locations are not locations of all points, but rather the location of the discretized version of points, where points are binned onto a grid whose coarseness corresponds to the bandwidth.
4. min_bin_freqint, default=1
> To speed up the algorithm, accept only those bins with at least min_bin_freq points as seeds.
5. cluster_allbool, default=True
> If true, then all points are clustered, even those orphans that are not within any kernel. Orphans are assigned to the nearest kernel. If false, then orphans are given cluster label -1
6. n_jobsint, default=None
> The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors.
7. max_iterint, default=300
> Maximum number of iterations, per seed point before the clustering operation terminates
[For more detail on API](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.MeanShift.html)
<br>
<br>
####Estimate Bandwidth
Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large datasets, it’s wise to set that parameter to a small value.
```
bandwidth = estimate_bandwidth(X_scaled, quantile=0.15)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X_scaled)
y_pred = ms.predict(X_scaled)
```
### Cluster Analysis
First, we add the cluster labels from the trained model into the copy of the data frame for cluster analysis/visualization.
```
ClusterDF = X.copy()
ClusterDF['ClusterID'] = y_pred
ClusterDF.head()
```
#### Cluster Records
The below bar graphs show the number of data points in each available cluster.
```
ClusterDF['ClusterID'].value_counts().plot(kind='bar')
```
#### Cluster Plots
Below written functions get utilized to plot 2-Dimensional and 3-Dimensional cluster plots on the available set of features in the dataset. Plots include different available clusters along with cluster centroid.
```
def Plot2DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 2)):
plt.rcParams["figure.figsize"] = (8,6)
xi,yi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1])
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
plt.scatter(DFC[i[0]],DFC[i[1]],cmap=plt.cm.Accent,label=j)
plt.scatter(ms.cluster_centers_[:,xi],ms.cluster_centers_[:,yi],marker="^",color="black",label="centroid")
plt.xlabel(i[0])
plt.ylabel(i[1])
plt.legend()
plt.show()
def Plot3DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig,ax = plt.figure(figsize = (16, 10)),plt.axes(projection ="3d")
ax.grid(b = True, color ='grey',linestyle ='-.',linewidth = 0.3,alpha = 0.2)
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
ax.scatter3D(DFC[i[0]],DFC[i[1]],DFC[i[2]],alpha = 0.8,cmap=plt.cm.Accent,label=j)
ax.scatter3D(ms.cluster_centers_[:,xi],ms.cluster_centers_[:,yi],ms.cluster_centers_[:,zi],
marker="^",color="black",label="centroid")
ax.set_xlabel(i[0])
ax.set_ylabel(i[1])
ax.set_zlabel(i[2])
plt.legend()
plt.show()
def Plotly3D(X_Cols,df):
for i in list(itertools.combinations(X_Cols,3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig1 = px.scatter_3d(ms.cluster_centers_,x=ms.cluster_centers_[:,xi],y=ms.cluster_centers_[:,yi],
z=ms.cluster_centers_[:,zi])
fig2=px.scatter_3d(df, x=i[0], y=i[1],z=i[2],color=df['ClusterID'])
fig3 = go.Figure(data=fig1.data + fig2.data,
layout=go.Layout(title=go.layout.Title(text="x:{}, y:{}, z:{}".format(i[0],i[1],i[2])))
)
fig3.show()
sns.set_style("whitegrid")
sns.set_context("talk")
plt.rcParams["lines.markeredgewidth"] = 1
sns.pairplot(data=ClusterDF, hue='ClusterID', palette='Dark2', height=5)
Plot2DCluster(X.columns,ClusterDF)
Plot3DCluster(X.columns,ClusterDF)
Plotly3D(X.columns,ClusterDF)
```
#### [Created by Anu Rithiga](https://github.com/iamgrootsh7)
| github_jupyter |
```
import pickle
import matplotlib.pyplot as plt
from scipy.stats.mstats import gmean
import seaborn as sns
from statistics import stdev
from math import log
import numpy as np
from scipy import stats
%matplotlib inline
price_100c = pickle.load(open("total_price_non.p","rb"))
price_100 = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\Initial\\NetScape_Elegant\\total_price1.p", "rb"))
from collections import defaultdict
def make_distro(price_100):
all_stds =[]
total_log = defaultdict(list)
for run, output in price_100.items():
for step, prices in output.items():
log_pr = [log(p) for p in prices]
if len(log_pr) <2:
pass
else:
out = stdev(log_pr)
total_log[run].append(out)
all_stds.append(out)
return all_stds
price_cluster = make_distro(price_100c)
price_norm = make_distro(price_100)
fig7, ax7 = plt.subplots(figsize = (7,7))
ax7.hist(price_cluster, 500, label = "Hierarchy")
ax7.hist(price_norm, 500, label = "No Hierarchy")
plt.title("Network Approach:\nPrice Distribution of SDLM of 100 Runs", fontsize = 20, fontweight = "bold")
plt.xlabel("SDLM of Step", fontsize = 15, fontweight = "bold")
plt.ylabel("Frequency of SDLM", fontsize = 15, fontweight = "bold")
#plt.xlim(.75,2)
#plt.ylim(0,5)
plt.legend()
from statistics import mean
stan_multi_s = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_Standard\\stan_multi_sur.p", "rb"))
stan_multi_t = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_Standard\\stan_multi_time.p", "rb"))
brute_multi_s = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_Brute\\brute_multi_sur.p", "rb"))
brute_multi_t = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_Brute\\brute_multi_time.p", "rb"))
net_multi_s = pickle.load(open("net_multi_sur_non.p", "rb"))
net_multi_t =pickle.load(open("net_multi_time_non.p", "rb"))
net_mean = mean(net_multi_s)
brute_mean = mean(brute_multi_s)
stan_mean = mean(stan_multi_s)
net_time = round(mean(net_multi_t),2)
brute_time = round(mean(brute_multi_t),2)
stan_time = round(mean(stan_multi_t),2)
t, p = stats.ttest_ind(stan_multi_s,brute_multi_s)
brute_p = round(p * 2, 3)
t2, p2 = stats.ttest_ind(stan_multi_s,net_multi_s)
net_p = round(p * 2, 3)
print (net_p, brute_p)
fig5, ax5 = plt.subplots(figsize=(7,7))
plt.hist(net_multi_s, label = "Network Approach")
plt.hist(stan_multi_s, label = "Standard Approach")
plt.hist(brute_multi_s, label = "Explicit Approach")
plt.text(56.5, 28.5, "Network mean: "+str(net_mean) +"\nStandard mean: " + str(stan_mean)+ "\nExplicit mean: "+str(stan_mean))
plt.legend()
plt.title("Survivor Histogram of 100 Runs, 1000 Steps \nLink Threshold 10; with Hierarchy", fontweight = "bold", fontsize = 15)
t, p = stats.ttest_ind(stan_multi_t,brute_multi_t)
brute_t_p = (p * 2,10)
t2, p2 = stats.ttest_ind(stan_multi_t,net_multi_t)
net_t_p = (p * 2, 10)
print (net_t_p, brute_t_p)
fig6, ax6 = plt.subplots(figsize=(7,7))
plt.hist(net_multi_t, label = "Network Approach")
plt.hist(stan_multi_t, label = "Standard Approach")
plt.hist(brute_multi_t, label = "Explicit Approach")
#plt.text(78, 25, "Network p-value: "+str(net_t_p) +"\nExplicit p-value: "+str(brute_t_p))
plt.legend()
plt.title("Time Histogram of 100 Runs, 1000 steps \nLink Threshold 10; with Hierarchy", fontweight = "bold", fontsize = 15)
plt.text(70, 24, "\nNetwork Mean: "+str(net_time) +"\nStandard Mean: "+str(stan_time) + "\nExplicit Approach: "+str(brute_time))
ind_e = price_100c["Run95"]
## Calculate price
x = []
y =[]
for st, pr in ind_e.items():
#if step <=400:
x.append(st)
y.append(gmean(pr))
y[0]
fig, ax = plt.subplots(figsize = (7,7))
ax.scatter(x,y)
plt.title("Network Approach with Hierarchy:\nMean Trade Price", fontsize = 20, fontweight = "bold")
plt.xlabel("Time", fontsize = 15, fontweight = "bold")
plt.ylabel("Price", fontsize = 15, fontweight = "bold")
x_vol = []
y_vol = []
total = 0
for s, p in ind_e.items():
#if step <=400:
x_vol.append(s)
y_vol.append(len(p))
total += len(p)
total
fig2, ax2 = plt.subplots(figsize = (7,7))
ax2.hist(y_vol, 100)
plt.title("Network Approach with Hierarchy:\nTrade Volume Histogram", fontsize = 20, fontweight = "bold")
plt.xlabel("Trade Volume of Step", fontsize = 15, fontweight = "bold")
plt.ylabel("Frequency Trade Volume", fontsize = 15, fontweight = "bold")
#plt.ylim(0,400)
fig2, ax2 = plt.subplots(figsize = (7,7))
ax2.plot(x_vol, y_vol)
plt.title("Network Approach with Hierarchy:\nTrade Volume", fontsize = 20, fontweight = "bold")
plt.xlabel("Time", fontsize = 15, fontweight = "bold")
plt.ylabel("Volume", fontsize = 15, fontweight = "bold")
#ax2.text(600,300, "Total Trade Volume: \n "+str(total), fontsize = 15, fontweight = 'bold')
#plt.ylim(0,400)
from statistics import stdev
from math import log
x_dev =[]
y_dev = []
x_all = []
y_all = []
log_prices = {}
for step, prices in ind_e.items():
log_prices[step] = [log(p) for p in prices]
for step, log_p in log_prices.items():
#if step <= 400:
if len(log_p) <2:
pass
else:
for each in log_p:
x_all.append(step)
y_all.append(each)
x_dev.append(step)
y_dev.append(stdev(log_p))
from numpy.polynomial.polynomial import polyfit
fig3, ax3 = plt.subplots(figsize=(7,7))
ax3.scatter(x_all,y_all)
plt.plot(x_dev,y_dev,'-', color ='red')
plt.title("Network Approach with Hierarchy:\nStandard Deviation of Logarithmic Mean", fontsize = 20, fontweight = "bold")
plt.xlabel("Time", fontsize = 15, fontweight = "bold")
plt.ylabel("Logarithmic Price", fontsize = 15, fontweight = "bold")
net_emergent =pickle.load(open("type_df_non.p", "rb"))
net_emergent["Run67"][999]
```
| github_jupyter |
# Learning a LJ potential [](https://colab.research.google.com/github/Teoroo-CMC/PiNN/blob/master/docs/notebooks/Learn_LJ_potential.ipynb)
This notebook showcases the usage of PiNN with a toy problem of learning a Lennard-Jones
potential with a hand-generated dataset.
It serves as a basic test, and demonstration of the workflow with PiNN.
```
# Install PiNN
!pip install git+https://github.com/Teoroo-CMC/PiNN
%matplotlib inline
import os, warnings
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from ase import Atoms
from ase.calculators.lj import LennardJones
os.environ['CUDA_VISIBLE_DEVICES'] = ''
index_warning = 'Converting sparse IndexedSlices'
warnings.filterwarnings('ignore', index_warning)
```
## Reference data
```
# Helper function: get the position given PES dimension(s)
def three_body_sample(atoms, a, r):
x = a * np.pi / 180
pos = [[0, 0, 0],
[0, 2, 0],
[0, r*np.cos(x), r*np.sin(x)]]
atoms.set_positions(pos)
return atoms
atoms = Atoms('H3', calculator=LennardJones())
na, nr = 50, 50
arange = np.linspace(30,180,na)
rrange = np.linspace(1,3,nr)
# Truth
agrid, rgrid = np.meshgrid(arange, rrange)
egrid = np.zeros([na, nr])
for i in range(na):
for j in range(nr):
atoms = three_body_sample(atoms, arange[i], rrange[j])
egrid[i,j] = atoms.get_potential_energy()
# Samples
nsample = 100
asample, rsample = [], []
distsample = []
data = {'e_data':[], 'f_data':[], 'elems':[], 'coord':[]}
for i in range(nsample):
a, r = np.random.choice(arange), np.random.choice(rrange)
atoms = three_body_sample(atoms, a, r)
dist = atoms.get_all_distances()
dist = dist[np.nonzero(dist)]
data['e_data'].append(atoms.get_potential_energy())
data['f_data'].append(atoms.get_forces())
data['coord'].append(atoms.get_positions())
data['elems'].append(atoms.numbers)
asample.append(a)
rsample.append(r)
distsample.append(dist)
plt.pcolormesh(agrid, rgrid, egrid, shading='auto')
plt.plot(asample, rsample, 'rx')
plt.colorbar()
```
## Dataset from numpy arrays
```
from pinn.io import sparse_batch, load_numpy
data = {k:np.array(v) for k,v in data.items()}
dataset = lambda: load_numpy(data, splits={'train':8, 'test':2})
train = lambda: dataset()['train'].shuffle(100).repeat().apply(sparse_batch(100))
test = lambda: dataset()['test'].repeat().apply(sparse_batch(100))
```
## Training
### Model specification
```
import pinn
params={
'model_dir': '/tmp/PiNet',
'network': {
'name': 'PiNet',
'params': {
'ii_nodes':[8,8],
'pi_nodes':[8,8],
'pp_nodes':[8,8],
'out_nodes':[8,8],
'depth': 4,
'rc': 3.0,
'atom_types':[1]}},
'model':{
'name': 'potential_model',
'params': {
'e_dress': {1:-0.3}, # element-specific energy dress
'e_scale': 2, # energy scale for prediction
'e_unit': 1.0, # output unit of energy dur
'log_e_per_atom': True, # log e_per_atom and its distribution
'use_force': True}}} # include force in Loss function
model = pinn.get_model(params)
%rm -rf /tmp/PiNet
train_spec = tf.estimator.TrainSpec(input_fn=train, max_steps=5e3)
eval_spec = tf.estimator.EvalSpec(input_fn=test, steps=10)
tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
```
## Validate the results
### PES analysis
```
atoms = Atoms('H3', calculator=pinn.get_calc(model))
epred = np.zeros([na, nr])
for i in range(na):
for j in range(nr):
a, r = arange[i], rrange[j]
atoms = three_body_sample(atoms, a, r)
epred[i,j] = atoms.get_potential_energy()
plt.pcolormesh(agrid, rgrid, epred, shading='auto')
plt.colorbar()
plt.title('NN predicted PES')
plt.figure()
plt.pcolormesh(agrid, rgrid, np.abs(egrid-epred), shading='auto')
plt.plot(asample, rsample, 'rx')
plt.title('NN Prediction error and sampled points')
plt.colorbar()
```
### Pairwise potential analysis
```
atoms1 = Atoms('H2', calculator=pinn.get_calc(model))
atoms2 = Atoms('H2', calculator=LennardJones())
nr2 = 100
rrange2 = np.linspace(1,1.9,nr2)
epred = np.zeros(nr2)
etrue = np.zeros(nr2)
for i in range(nr2):
pos = [[0, 0, 0],
[rrange2[i], 0, 0]]
atoms1.set_positions(pos)
atoms2.set_positions(pos)
epred[i] = atoms1.get_potential_energy()
etrue[i] = atoms2.get_potential_energy()
f, (ax1, ax2) = plt.subplots(2,1, gridspec_kw = {'height_ratios':[3, 1]})
ax1.plot(rrange2, epred)
ax1.plot(rrange2, etrue,'--')
ax1.legend(['Prediction', 'Truth'], loc=4)
_=ax2.hist(np.concatenate(distsample,0), 20, range=(1,1.9))
```
## Molecular dynamics with ASE
```
from ase import units
from ase.io import Trajectory
from ase.md.nvtberendsen import NVTBerendsen
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
atoms = Atoms('H', cell=[2, 2, 2], pbc=True)
atoms = atoms.repeat([5,5,5])
atoms.rattle()
atoms.set_calculator(pinn.get_calc(model))
MaxwellBoltzmannDistribution(atoms, 300*units.kB)
dyn = NVTBerendsen(atoms, 0.5 * units.fs, 300, taut=0.5*100*units.fs)
dyn.attach(Trajectory('ase_nvt.traj', 'w', atoms).write, interval=10)
dyn.run(5000)
```
| github_jupyter |
<h1><center>Assessmet 5 on Advanced Data Analysis using Pandas</center></h1>
## **Project 2: Correlation Between the GDP Rate and Unemployment Rate (2019)**
```
import warnings
warnings.simplefilter('ignore', FutureWarning)
import pandas as pd
pip install pandas_datareader
```
# Getting the Datasets
We got the two datasets we will be considering in this project from the Worldbank website. The first one dataset, available at http://data.worldbank.org/indicator/NY.GDP.MKTP.CD, lists the GDP of the world's countries in current US dollars, for various years. The use of a common currency allows us to compare GDP values across countries. The other dataset, available at https://data.worldbank.org/indicator/SL.UEM.TOTL.NE.ZS, lists the unemployment rate of the world's countries. The datasets were downloaded as Excel files in June 2021.
```
GDP_INDICATOR = 'NY.GDP.MKTP.CD'
#below is the first five rows of the first dataset, GDP Indicator.
gdpReset= pd.read_excel("API_NY.GDP.MKTP.CD.xls")
gdpReset.head()
#below is the last five rows of the first dataset, GDP Indicator.
gdpReset.tail()
UNEMPLOYMENT_INDICATORS = 'SL.UEM.TOTL.NE.ZS'
#below is the first five rows of the second dataset, Uemployment Rate Indicator.
UnemployReset= pd.read_excel('API_SL.UEM.TOTL.NE.ZS.xls')
UnemployReset.head()
#below is the last five rows of the second dataset, Unemployment Rate Indicator.
UnemployReset.tail()
```
# Cleaning the data
Inspecting the data with head() and tail() methods shows that for some countries the GDP and unemploymet rate values are missing. The data is, therefore, cleaned by removing the rows with unavailable values using the drop() method.
```
gdpCountries = gdpReset[0:].dropna()
gdpCountries
UnemployCountries = UnemployReset[0:].dropna()
UnemployCountries
```
# Transforming the data
The World Bank reports GDP in US dollars and cents. To make the data easier to read, the GDP is converted to millions of British pounds with the following auxiliary functions, using the average 2020 dollar-to-pound conversion rate provided by http://www.ukforex.co.uk/forex-tools/historical-rate-tools/yearly-average-rates..
```
def roundToMillions (value):
return round(value / 1000000)
def usdToGBP (usd):
return usd / 1.284145
GDP = 'GDP (£m)'
gdpCountries[GDP] = gdpCountries[GDP_INDICATOR].apply(usdToGBP).apply(roundToMillions)
gdpCountries.head()
```
The unnecessary columns can be dropped.
```
COUNTRY = 'Country Name'
headings = [COUNTRY, GDP]
gdpClean = gdpCountries[headings]
gdpClean.head()
```
```
UNEMPLOYMENT = 'Unemploymet Rate'
UnemployCountries[UNEMPLOYMENT] = UnemployCountries[UNEMPLOYMENT_INDICATORS].apply(round)
headings = [COUNTRY, UNEMPLOYMENT]
UnempClean = UnemployCountries[headings]
UnempClean.head()
```
# Combining the data
The tables are combined through an inner join merge method on the common 'Country Name' column.
```
gdpVsUnemp = pd.merge(gdpClean, UnempClean, on=COUNTRY, how='inner')
gdpVsUnemp.head()
```
# Calculating the correlation
To measure if the unemployment rate and the GDP grow together or not, the Spearman rank correlation coefficient is used.
```
from scipy.stats import spearmanr
gdpColumn = gdpVsUnemp[GDP]
UnemployColumn = gdpVsUnemp[UNEMPLOYMENT]
(correlation, pValue) = spearmanr(gdpColumn, UnemployColumn)
print('The correlation is', correlation)
if pValue < 0.05:
print('It is statistically significant.')
else:
print('It is not statistically significant.')
```
The value shows an indirect correlation, i.e. richer countries tend to have lower unemployment rate. A rise by one percentage point of unemployment will reduce real GDP growth by 0.26 percentage points with a delay of 7 lags. Studies have shown that the higher the GDP growth rate of a country, the higher the employment rate. Thus, resulting to a lower unemployment rate. Besides, a negative or inverse correlation, between two variables, indicates that one variable increases while the other decreases, and vice-versa.
# Visualizing the Data
Measures of correlation can be misleading, so it is best to view the overall picture with a scatterplot. The GDP axis uses a logarithmic scale to better display the vast range of GDP values, from a few million to several billion (million of million) pounds.
```
%matplotlib inline
gdpVsUnemp.plot(x=GDP, y=UNEMPLOYMENT, kind='scatter', grid=True, logx=True, figsize=(10, 4))
```
The plot shows there is no clear correlation: there are some poor countries with a low unemployment rate and very few averagely rich countries with a high employment rate. Hpwever, most extremely rich countries have a low unemployment rate. Besides, countries with around 10 thousand (10^4) to (10^6) million pounds GDP have almost the full range of values, from below 5 to over 10 percentage but there are still some countries with more than 10 thousand (10^5) million pounds GDP with a high unemployment rate.
Comparing the 10 poorest countries and the 10 countries with the lowest unemployment rate shows that total GDP is a rather crude measure. The population size should be taken into consideration for a more precise definiton of what 'poor' and 'rich' means.
```
# the 10 countries with lowest GDP
gdpVsUnemp.sort_values(GDP).head(10)
# the 10 countries with the lowest unemployment rate
gdpVsUnemp.sort_values(UNEMPLOYMENT).head(10)
```
# Conclusion
The correlation between real GDP growth and unemployment is very important for policy makers in order to obtain a sustainable rise in living standards. If GDP growth rate is below its natural rate it is indicated to promote employment because this rise in total income will note generate inflationary pressures. In contrast, if the GDP growth is above its natural level, policy makers will decide not to intensively promote the creation of new jobs in order to obtain a sustainable growth rate which will not generate inflation. The correlation coefficient shows that the variables are negatively correlated as predicted by the theory. These values are particularly important for policy makers in order to obtain an optimal relation between unemployment and real GDP growth.
| github_jupyter |
# T1557.001 - LLMNR/NBT-NS Poisoning and SMB Relay
By responding to LLMNR/NBT-NS network traffic, adversaries may spoof an authoritative source for name resolution to force communication with an adversary controlled system. This activity may be used to collect or relay authentication materials.
Link-Local Multicast Name Resolution (LLMNR) and NetBIOS Name Service (NBT-NS) are Microsoft Windows components that serve as alternate methods of host identification. LLMNR is based upon the Domain Name System (DNS) format and allows hosts on the same local link to perform name resolution for other hosts. NBT-NS identifies systems on a local network by their NetBIOS name. (Citation: Wikipedia LLMNR) (Citation: TechNet NetBIOS)
Adversaries can spoof an authoritative source for name resolution on a victim network by responding to LLMNR (UDP 5355)/NBT-NS (UDP 137) traffic as if they know the identity of the requested host, effectively poisoning the service so that the victims will communicate with the adversary controlled system. If the requested host belongs to a resource that requires identification/authentication, the username and NTLMv2 hash will then be sent to the adversary controlled system. The adversary can then collect the hash information sent over the wire through tools that monitor the ports for traffic or through [Network Sniffing](https://attack.mitre.org/techniques/T1040) and crack the hashes offline through [Brute Force](https://attack.mitre.org/techniques/T1110) to obtain the plaintext passwords. In some cases where an adversary has access to a system that is in the authentication path between systems or when automated scans that use credentials attempt to authenticate to an adversary controlled system, the NTLMv2 hashes can be intercepted and relayed to access and execute code against a target system. The relay step can happen in conjunction with poisoning but may also be independent of it. (Citation: byt3bl33d3r NTLM Relaying)(Citation: Secure Ideas SMB Relay)
Several tools exist that can be used to poison name services within local networks such as NBNSpoof, Metasploit, and [Responder](https://attack.mitre.org/software/S0174). (Citation: GitHub NBNSpoof) (Citation: Rapid7 LLMNR Spoofer) (Citation: GitHub Responder)
## Atomic Tests:
Currently, no tests are available for this technique.
## Detection
Monitor <code>HKLM\Software\Policies\Microsoft\Windows NT\DNSClient</code> for changes to the "EnableMulticast" DWORD value. A value of “0” indicates LLMNR is disabled. (Citation: Sternsecurity LLMNR-NBTNS)
Monitor for traffic on ports UDP 5355 and UDP 137 if LLMNR/NetBIOS is disabled by security policy.
Deploy an LLMNR/NBT-NS spoofing detection tool.(Citation: GitHub Conveigh) Monitoring of Windows event logs for event IDs 4697 and 7045 may help in detecting successful relay techniques.(Citation: Secure Ideas SMB Relay)
| github_jupyter |
# Use scikit-learn to recognize hand-written digits with `ibm-watson-machine-learning`
This notebook contains steps and code to demonstrate how to persist and deploy locally trained scikit-learn model in Watson Machine Learning Service. This notebook contains steps and code to work with [ibm-watson-machine-learning](https://pypi.python.org/pypi/ibm-watson-machine-learning) library available in PyPI repository. This notebook introduces commands for getting model and training data, persisting model, deploying model, scoring it, updating the model and redeploying it.
Some familiarity with Python is helpful. This notebook uses Python 3.7 with the ibm-watson-machine-learning package.
## Learning goals
The learning goals of this notebook are:
- Train sklearn model.
- Persist trained model in Watson Machine Learning repository.
- Deploy model for online scoring using client library.
- Score sample records using client library.
## Contents
This notebook contains the following parts:
1. [Setup](#setup)
2. [Explore data and create scikit-learn model](#train)
3. [Persist externally created scikit model](#upload)
4. [Deploy and score](#deploy)
5. [Clean up](#cleanup)
6. [Summary and next steps](#summary)
<a id="setup"></a>
## 1. Set up the environment
Before you use the sample code in this notebook, you must perform the following setup tasks:
- Contact with your Cloud Pack for Data administrator and ask him for your account credentials
### Connection to WML
Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `password`.
```
username = 'PASTE YOUR USERNAME HERE'
password = 'PASTE YOUR PASSWORD HERE'
url = 'PASTE THE PLATFORM URL HERE'
wml_credentials = {
"username": username,
"password": password,
"url": url,
"instance_id": 'openshift',
"version": '3.5'
}
```
### Install and import the `ibm-watson-machine-learning` package
**Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>.
```
!pip install -U ibm-watson-machine-learning
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
```
### Working with spaces
First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one.
- Click New Deployment Space
- Create an empty space
- Go to space `Settings` tab
- Copy `space_id` and paste it below
**Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Space%20management.ipynb).
**Action**: Assign space ID below
```
space_id = 'PASTE YOUR SPACE ID HERE'
```
You can use `list` method to print all existing spaces.
```
client.spaces.list(limit=10)
```
To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using.
```
client.set.default_space(space_id)
```
<a id="train"></a>
## 2. Explore data and create scikit-learn model
In this section, you will prepare and train handwritten digits model using scikit-learn library.
### 2.1 Explore data
As a first step, you will load the data from scikit-learn sample datasets and perform a basic exploration.
```
import sklearn
from sklearn import datasets
digits = datasets.load_digits()
```
Loaded toy dataset consists of 8x8 pixels images of hand-written digits.
Let's display first digit data and label using **data** and **target**.
```
print(digits.data[0].reshape((8, 8)))
digits.target[0]
```
In next step, you will count data examples.
```
samples_count = len(digits.images)
print("Number of samples: " + str(samples_count))
```
### 2.2. Create a scikit-learn model
**Prepare data**
In this step, you'll split your data into three datasets:
- train
- test
- score
```
train_data = digits.data[: int(0.7*samples_count)]
train_labels = digits.target[: int(0.7*samples_count)]
test_data = digits.data[int(0.7*samples_count): int(0.9*samples_count)]
test_labels = digits.target[int(0.7*samples_count): int(0.9*samples_count)]
score_data = digits.data[int(0.9*samples_count): ]
print("Number of training records: " + str(len(train_data)))
print("Number of testing records : " + str(len(test_data)))
print("Number of scoring records : " + str(len(score_data)))
```
**Create pipeline**
Next, you'll create scikit-learn pipeline.
In ths step, you will import scikit-learn machine learning packages that will be needed in next cells.
```
from sklearn.pipeline import Pipeline
from sklearn import preprocessing
from sklearn import svm, metrics
```
Standardize features by removing the mean and scaling to unit variance.
```
scaler = preprocessing.StandardScaler()
```
Next, define estimators you want to use for classification. Support Vector Machines (SVM) with radial basis function as kernel is used in the following example.
```
clf = svm.SVC(kernel='rbf')
```
Let's build the pipeline now. This pipeline consists of transformer and an estimator.
```
pipeline = Pipeline([('scaler', scaler), ('svc', clf)])
```
**Train model**
Now, you can train your SVM model by using the previously defined **pipeline** and **train data**.
```
model = pipeline.fit(train_data, train_labels)
```
**Evaluate model**
You can check your **model quality** now. To evaluate the model, use **test data**.
```
predicted = model.predict(test_data)
print("Evaluation report: \n\n%s" % metrics.classification_report(test_labels, predicted))
```
You can tune your model now to achieve better accuracy. For simplicity of this example tuning section is omitted.
<a id="upload"></a>
## 3. Persist locally created scikit-learn model
In this section, you will learn how to store your model in Watson Machine Learning repository by using the IBM Watson Machine Learning SDK.
### 3.1: Publish model
#### Publish model in Watson Machine Learning repository on Cloud.
Define model name, autor name and email.
```
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.7")
metadata = {
client.repository.ModelMetaNames.NAME: 'Scikit model',
client.repository.ModelMetaNames.TYPE: 'scikit-learn_0.23',
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(
model=model,
meta_props=metadata,
training_data=train_data,
training_target=train_labels)
```
### 3.2: Get model details
```
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
```
### 3.3 Get all models
```
models_details = client.repository.list_models()
```
<a id="deploy"></a>
## 4. Deploy and score
In this section you will learn how to create online scoring and to score a new data record by using the IBM Watson Machine Learning SDK.
### 4.1: Create model deployment
#### Create online deployment for published model
```
metadata = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of scikit model",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
created_deployment = client.deployments.create(published_model_uid, meta_props=metadata)
```
**Note**: Here we use deployment url saved in published_model object. In next section, we show how to retrive deployment url from Watson Machine Learning instance.
```
deployment_uid = client.deployments.get_uid(created_deployment)
```
Now you can print an online scoring endpoint.
```
scoring_endpoint = client.deployments.get_scoring_href(created_deployment)
print(scoring_endpoint)
```
You can also list existing deployments.
```
client.deployments.list()
```
### 4.2: Get deployment details
```
client.deployments.get_details(deployment_uid)
```
<a id="score"></a>
### 4.3: Score
You can use the following method to do test scoring request against deployed model.
**Action**: Prepare scoring payload with records to score.
```
score_0 = list(score_data[0])
score_1 = list(score_data[1])
scoring_payload = {"input_data": [{"values": [score_0, score_1]}]}
```
Use ``client.deployments.score()`` method to run scoring.
```
predictions = client.deployments.score(deployment_uid, scoring_payload)
print(json.dumps(predictions, indent=2))
```
<a id="cleanup"></a>
## 5. Clean up
If you want to clean up all created assets:
- experiments
- trainings
- pipelines
- model definitions
- models
- functions
- deployments
please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb).
<a id="summary"></a>
## 6. Summary and next steps
You successfully completed this notebook! You learned how to use scikit-learn machine learning as well as Watson Machine Learning for model creation and deployment.
Check out our [Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/analyze-data/wml-setup.html) for more samples, tutorials, documentation, how-tos, and blog posts.
### Authors
**Daniel Ryszka**, Software Engineer
Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
| github_jupyter |
### Introduction
This notebook contains a working example to show usage of the API for visual saliency map generation for image blackbox classifiers.
This example will follow an application-like use-case where we define a functionally rigid process that transforms an input image into a number of saliency heat-maps based on our black-box classifier's output, visualizing the heat-maps over the input image.
We will show that it is easy to change which of our API implementations are used in the application without impacting the application's successful execution, first using a sliding-window, occlusion-based algorithm and then using RISE algorithms.
This will necessarily require us to define some black-box classification model for us to introspect the saliency of.
We will fill this role here with a PyTorch Imagenet-pretrained ResNet18 network.
This will be wrapped up in an implementation of the `ClassifyImage` interface for input to our "application."
This sub-classing is a means of standardizing classifier operation with our API in order to support the varying ways classification is performed across toolkits and applications.
### Table of Contents
* [The test image](#The-test-image)
* [The "application"](#The-"application")
* [Black-box Classifier](#Black-box-Classifier)
* [`xaitk_saliency` Swappable Implementations](#xaitk_saliency-Swappable-Implementations)
* [Calling the Application](#Calling-the-Application)
### Miscellaneous
License for test image used may be found in 'COCO-LICENSE.txt'.
#### References
1. Zeiler, Matthew D., and Rob Fergus. "Visualizing and understanding convolutional networks." European conference on computer vision. Springer, Cham, 2014.
2. Petsiuk, Vitali, Abir Das, and Kate Saenko. "Rise: Randomized input sampling for explanation of black-box models." arXiv preprint arXiv:1806.07421 (2018).
<br>
To run this notebook in Colab, please use the link below:
[](https://colab.research.google.com/github/XAITK/xaitk-saliency/blob/master/examples/OcclusionSaliency.ipynb)
# Setup environment
```
!python -c "import xaitk_saliency" || pip install xaitk-saliency
!python -c "import torch" || pip install "torch==1.9.0"
!python -c "import torchvision" || pip install "torchvision==0.10.0"
```
# The test image
We will test this application on the following image.
We know that this image contains the ImageNet classes of "boxer" and "tiger cat".
```
import PIL.Image
import matplotlib.pyplot as plt
import urllib.request
# Use JPEG format for inline visualizations here.
%config InlineBackend.figure_format = "jpeg"
urllib.request.urlretrieve('https://farm1.staticflickr.com/74/202734059_fcce636dcd_z.jpg', "catdog.jpg")
test_image_filename = 'catdog.jpg'
plt.figure(figsize=(12, 8))
plt.axis('off')
_ = plt.imshow(PIL.Image.open(test_image_filename))
```
# The "Application"
The `xaitk-saliency` package provides a relatively high-level API interface for visual saliency map generation.
This interface defines the following input requirements:
* a reference image
* a black-box classifier that performs inference over images
As mentioned above, our high-level API accepts black-box classifiers in terms of the `ClassifyImage` interface.
```
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
from smqtk_classifier import ClassifyImage
from xaitk_saliency import GenerateImageClassifierBlackboxSaliency
def app(
image_filepath: str,
# Assuming outputs `nClass` length arrays.
blackbox_classify: ClassifyImage,
gen_bb_sal: GenerateImageClassifierBlackboxSaliency,
):
# Load the image
ref_image = np.asarray(PIL.Image.open(image_filepath))
sal_maps = gen_bb_sal(ref_image, blackbox_classify)
print(f"Saliency maps: {sal_maps.shape}")
visualize_saliency(ref_image, sal_maps)
def visualize_saliency(ref_image: np.ndarray, sal_maps: np.ndarray) -> None:
# Visualize the saliency heat-maps
sub_plot_ind = len(sal_maps) + 1
plt.figure(figsize=(12, 6))
plt.subplot(2, sub_plot_ind, 1)
plt.imshow(ref_image)
plt.axis('off')
plt.title('Test Image')
# Some magic numbers here to get colorbar to be roughly the same height
# as the plotted image.
colorbar_kwargs = {
"fraction": 0.046*(ref_image.shape[0]/ref_image.shape[1]),
"pad": 0.04,
}
for i, class_sal_map in enumerate(sal_maps):
print(f"Class {i} saliency map range: [{class_sal_map.min()}, {class_sal_map.max()}]")
# Positive half saliency
plt.subplot(2, sub_plot_ind, 2+i)
plt.imshow(ref_image, alpha=0.7)
plt.imshow(
np.clip(class_sal_map, 0, 1),
cmap='jet', alpha=0.3
)
plt.clim(0, 1)
plt.colorbar(**colorbar_kwargs)
plt.title(f"Class #{i+1} Pos Saliency")
plt.axis('off')
# Negative half saliency
plt.subplot(2, sub_plot_ind, sub_plot_ind+2+i)
plt.imshow(ref_image, alpha=0.7)
plt.imshow(
np.clip(class_sal_map, -1, 0),
cmap='jet_r', alpha=0.3
)
plt.clim(-1, 0)
plt.colorbar(**colorbar_kwargs)
plt.title(f"Class #{i+1} Neg Saliency")
plt.axis('off')
```
# Black-box Classifier
In this example we will use a basic PyTorch-based pretrained ResNet18 model and use it's softmax output as classification confidences.
Since this model normally outputs 1000 classes, we will, for simplicity of example, constrain the output to the two classes that we happen to know are relevant for our test image.
```
# Set up our "black box" classifier using PyTorch and it's ImageNet pretrained ResNet18.
# We will constrain the output of our classifier here to the two classes that are relevant
# to our test image for the purposes of this example.
import os
import torch
from torch.utils.data import DataLoader, Dataset
import torchvision.models as models
import torchvision.transforms as transforms
CUDA_AVAILABLE = torch.cuda.is_available()
model = models.resnet18(pretrained=True)
model = model.eval()
if CUDA_AVAILABLE:
model = model.cuda()
# These are some simple helper functions to perform prediction with this model
model_input_size = (224, 224)
model_mean = [0.485, 0.456, 0.406]
model_loader = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(model_input_size),
transforms.ToTensor(),
transforms.Normalize(
mean=model_mean,
std=[0.229, 0.224, 0.225]
),
])
# Grabbing the class labels associated with this model.
if not os.path.isfile('imagenet_classes.txt'):
!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt -O imagenet_classes.txt
f = open("imagenet_classes.txt", "r")
categories = [s.strip() for s in f.readlines()]
# For this test, we will use an image with both a cat and a dog in it.
# Let's only consider the saliency of two class predictions.
sal_class_labels = ['boxer', 'tiger cat']
sal_class_idxs = [categories.index(lbl) for lbl in sal_class_labels]
class TorchResnet18 (ClassifyImage):
""" Blackbox model to output the two focus classes. """
def get_labels(self):
return sal_class_labels
@torch.no_grad()
def classify_images(self, image_iter):
# Input may either be an NDaray, or some arbitrary iterable of NDarray images.
for img in image_iter:
image_tensor = model_loader(img).unsqueeze(0)
if CUDA_AVAILABLE:
image_tensor = image_tensor.cuda()
feature_vec = model(image_tensor)
# Converting feature extractor output to probabilities.
class_conf = torch.nn.functional.softmax(feature_vec, dim=1).cpu().detach().numpy().squeeze()
# Only return the confidences for the focus classes
yield dict(zip(sal_class_labels, class_conf[sal_class_idxs]))
def get_config(self):
# Required by a parent class.
return {}
blackbox_classifier = TorchResnet18()
blackbox_fill = np.uint8(np.asarray(model_mean) * 255)
```
# `xaitk_saliency` Swappable Implementations
Here we will manually import and construct a number of `GenerateImageClassifierBlackboxSaliency` implementations.
Since these all implement the same parent [abstract] class, they effectively all promise to abide by the API it defines.
Thus, we should be able to use them all interchangebly, at least at the functional level.
Their implementations may of course produce different results, as is the point of having varying implementations, but the types and form of the inputs and outputs should be the same.
Diving deeper into the implementations used here, the implementations used here use a perturbation-occlusion sub-pipelines as shown in this diagram.

As mentioned before, we will first construct a sliding-window, occlusion-based implementations and then RISE implementations.
```
from xaitk_saliency.impls.gen_image_classifier_blackbox_sal.rise import RISEStack
from xaitk_saliency.impls.gen_image_classifier_blackbox_sal.slidingwindow import SlidingWindowStack
gen_slidingwindow = SlidingWindowStack((50, 50), (20, 20), threads=4)
gen_rise = RISEStack(1000, 8, 0.5, seed=0, threads=4, debiased=False)
gen_rise_debiased = RISEStack(1000, 8, 0.5, seed=0, threads=4, debiased=True)
```
# Calling the Application
In the below cells, we will show that we can invoke the same "application" (function) with different `xaitk-saliency` API interface implementations while still successfully executing, visualizing the different results that are generated.
## Sliding Window Method
```
# This generator implementation has a slot for filling background occlusion,
# which our choice of classifier should ideally take for best operation.
gen_slidingwindow.fill = blackbox_fill
app(
test_image_filename,
blackbox_classifier,
gen_slidingwindow,
)
```
## RISE
```
# This generator implementation has a slot for filling background occlusion,
# which our choice of classifier should ideally take for best operation.
gen_rise.fill = blackbox_fill
app(
test_image_filename,
blackbox_classifier,
gen_rise
)
```
## RISE with Debiasing
```
# This generator implementation has a slot for filling background occlusion,
# which our choice of classifier should ideally take for best operation.
gen_rise_debiased.fill = blackbox_fill
app(
test_image_filename,
blackbox_classifier,
gen_rise_debiased,
)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/yohanesnuwara/66DaysOfData/blob/main/D01_PCA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Principal Component Analysis
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.datasets import load_digits, fetch_lfw_people
from sklearn.preprocessing import StandardScaler
rng = np.random.RandomState(1)
X = np.dot(rng.rand(2, 5), rng.randn(5, 200)).T
plt.scatter(X[:, 0], X[:, 1])
plt.axis('equal')
plt.show()
pca = PCA(n_components=2)
pca.fit(X)
```
PCA components are called eigenvectors.
```
print(pca.components_)
print(pca.explained_variance_)
def draw_vector(v0, v1, ax=None):
ax = ax or plt.gca()
arrowprops=dict(arrowstyle='->',
linewidth=2,
shrinkA=0, shrinkB=0)
ax.annotate('', v1, v0, arrowprops=arrowprops)
# plot data
plt.scatter(X[:, 0], X[:, 1])
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
draw_vector(pca.mean_, pca.mean_ + v)
plt.axis('equal');
```
## PCA to reduce dimension.
```
pca = PCA(n_components=1)
pca.fit(X)
X_pca = pca.transform(X)
print("original shape: ", X.shape)
print("transformed shape:", X_pca.shape)
X_new = pca.inverse_transform(X_pca)
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8)
plt.axis('equal')
plt.show()
```
## PCA for digit classification.
```
digits = load_digits()
print(digits.data.shape)
pca = PCA(2) # project from 64 to 2 dimensions
projected = pca.fit_transform(digits.data)
print(digits.data.shape)
print(projected.shape)
plt.scatter(projected[:, 0], projected[:, 1],
c=digits.target, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('jet', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar()
plt.show()
```
Here, PCA can be used to approximate a digit. For instance, a 64-pixel image can be approximated by a dimensionality reduced 8-pixel image. Reconstructing using PCA as a basis function:
$$image(x)=mean+x1⋅(basis 1)+x2⋅(basis 2)+x3⋅(basis 3)⋯$$
```
def plot_pca_components(x, coefficients=None, mean=0, components=None,
imshape=(8, 8), n_components=8, fontsize=12,
show_mean=True):
if coefficients is None:
coefficients = x
if components is None:
components = np.eye(len(coefficients), len(x))
mean = np.zeros_like(x) + mean
fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2))
g = plt.GridSpec(2, 4 + bool(show_mean) + n_components, hspace=0.3)
def show(i, j, x, title=None):
ax = fig.add_subplot(g[i, j], xticks=[], yticks=[])
ax.imshow(x.reshape(imshape), interpolation='nearest', cmap='binary')
if title:
ax.set_title(title, fontsize=fontsize)
show(slice(2), slice(2), x, "True")
approx = mean.copy()
counter = 2
if show_mean:
show(0, 2, np.zeros_like(x) + mean, r'$\mu$')
show(1, 2, approx, r'$1 \cdot \mu$')
counter += 1
for i in range(n_components):
approx = approx + coefficients[i] * components[i]
show(0, i + counter, components[i], r'$c_{0}$'.format(i + 1))
show(1, i + counter, approx,
r"${0:.2f} \cdot c_{1}$".format(coefficients[i], i + 1))
if show_mean or i > 0:
plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom',
transform=plt.gca().transAxes, fontsize=fontsize)
show(slice(2), slice(-2, None), approx, "Approx")
return fig
pca = PCA(n_components=8)
Xproj = pca.fit_transform(digits.data)
fig = plot_pca_components(digits.data[3], Xproj[3],
pca.mean_, pca.components_, show_mean=False)
```
Choose the optimum number of components. 20 is good to account over 90% of variance.
```
pca = PCA().fit(digits.data)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.show()
```
## PCA for noise filtering
```
def plot_digits(data):
fig, axes = plt.subplots(4, 10, figsize=(10, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(data[i].reshape(8, 8),
cmap='binary', interpolation='nearest',
clim=(0, 16))
plot_digits(digits.data)
```
Add random noise.
```
np.random.seed(42)
noisy = np.random.normal(digits.data, 5) # Tweak this number as level of noise
plot_digits(noisy)
```
Make the PCA preserve 50% of the variance. There are 12 components the most fit one.
```
pca = PCA(0.50).fit(noisy)
print(pca.n_components_)
# See the number of components given % preservations
x = np.linspace(0.1, 0.9, 19)
comp = [(PCA(i).fit(noisy)).n_components_ for i in x]
plt.plot(x, comp)
plt.xlabel('Preservation')
plt.ylabel('Number of components fit')
plt.show()
components = pca.transform(noisy)
filtered = pca.inverse_transform(components)
plot_digits(filtered)
```
## Eigenfaces
```
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape)
```
There are 3,000 dimensions. Take a look at first 150 components.
```
pca = PCA(150)
pca.fit(faces.data)
```
Look at the first 24 components (eigenvectors or "eigenfaces").
```
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(pca.components_[i].reshape(62, 47), cmap='bone')
```
150 is good to account for 90% of variance. Using these 150 components, we would recover most of the essential characteristics of the data.
```
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
# Compute the components and projected faces
pca = PCA(150).fit(faces.data)
components = pca.transform(faces.data)
projected = pca.inverse_transform(components)
```
Reconstructing the full 3,000 pixel input image reduced to 150.
```
# Plot the results
fig, ax = plt.subplots(2, 10, figsize=(10, 2.5),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i in range(10):
ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r')
ax[1, i].imshow(projected[i].reshape(62, 47), cmap='binary_r')
ax[0, 0].set_ylabel('full-dim\ninput')
ax[1, 0].set_ylabel('150-dim\nreconstruction');
```
## Feature selection
```
df = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
df.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
df.head()
X, y = df.iloc[:, 1:], df.iloc[:, 0]
scaler = StandardScaler()
X_std = scaler.fit_transform(X)
pca=PCA()
Xt = pca.fit_transform(X_std)
pca.explained_variance_ratio_
```
From the bar plot below, 6 features are important, until it reach 90% of variance (red curve).
```
plt.bar(range(1,14),pca.explained_variance_ratio_,label='Variance Explained')
plt.step(range(1,14),np.cumsum(pca.explained_variance_ratio_),label='CumSum Variance Explained',c='r')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal component index')
plt.legend(loc='best')
plt.tight_layout()
plt.show()
```
References:
* https://jakevdp.github.io/PythonDataScienceHandbook/05.10-manifold-learning.html
* https://github.com/dishaaagarwal/Dimensionality-Reduction-Techniques
* Other resources:
* https://www.ritchieng.com/machine-learning-dimensionality-reduction-feature-transform/
* https://medium.com/analytics-vidhya/implementing-pca-in-python-with-sklearn-4f757fb4429e
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Custom training: basics
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/customization/custom_training"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/customization/custom_training.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/customization/custom_training.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/customization/custom_training.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
In the previous tutorial, you covered the TensorFlow APIs for automatic differentiation—a basic building block for machine learning.
In this tutorial, you will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.
TensorFlow also includes `tf.keras`—a high-level neural network API that provides useful abstractions to reduce boilerplate and makes TensorFlow easier to use without sacrificing flexibility and performance. We strongly recommend the [tf.Keras API](../../guide/keras/overview.ipynb) for development. However, in this short tutorial you will learn how to train a neural network from first principles to establish a strong foundation.
## Setup
```
import tensorflow as tf
```
## Variables
Tensors in TensorFlow are immutable stateless objects. Machine learning models, however, must have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state, which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language:
```
# Using Python state
x = tf.zeros([10, 10])
x += 2 # This is equivalent to x = x + 2, which does not mutate the original
# value of x
print(x)
```
TensorFlow has stateful operations built-in, and these are often easier than using low-level Python representations for your state. Use `tf.Variable` to represent weights in a model.
A `tf.Variable` object stores a value and implicitly reads from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc.) that manipulate the value stored in a TensorFlow variable.
```
v = tf.Variable(1.0)
# Use Python's `assert` as a debugging statement to test the condition
assert v.numpy() == 1.0
# Reassign the value `v`
v.assign(3.0)
assert v.numpy() == 3.0
# Use `v` in a TensorFlow `tf.square()` operation and reassign
v.assign(tf.square(v))
assert v.numpy() == 9.0
```
Computations using `tf.Variable` are automatically traced when computing gradients. For variables that represent embeddings, TensorFlow will do sparse updates by default, which are more computation and memory efficient.
A `tf.Variable` is also a way to show a reader of your code that a piece of state is mutable.
## Fit a linear model
Let's use the concepts you have learned so far—`Tensor`, `Variable`, and `GradientTape`—to build and train a simple model. This typically involves a few steps:
1. Define the model.
2. Define a loss function.
3. Obtain training data.
4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.
Here, you'll create a simple linear model, `f(x) = x * W + b`, which has two variables: `W` (weights) and `b` (bias). You'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`.
### Define the model
Let's define a simple class to encapsulate the variables and the computation:
```
class Model(object):
def __init__(self):
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be initialized to random values (for example, with `tf.random.normal`)
self.W = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def __call__(self, x):
return self.W * x + self.b
model = Model()
assert model(3.0).numpy() == 15.0
```
### Define a loss function
A loss function measures how well the output of a model for a given input matches the target output. The goal is to minimize this difference during training. Let's use the standard L2 loss, also known as the least square errors:
```
def loss(target_y, predicted_y):
return tf.reduce_mean(tf.square(target_y - predicted_y))
```
### Obtain training data
First, synthesize the training data by adding random Gaussian (Normal) noise to the inputs:
```
TRUE_W = 3.0
TRUE_b = 2.0
NUM_EXAMPLES = 1000
inputs = tf.random.normal(shape=[NUM_EXAMPLES])
noise = tf.random.normal(shape=[NUM_EXAMPLES])
outputs = inputs * TRUE_W + TRUE_b + noise
```
Before training the model, visualize the loss value by plotting the model's predictions in red and the training data in blue:
```
import matplotlib.pyplot as plt
plt.scatter(inputs, outputs, c='b')
plt.scatter(inputs, model(inputs), c='r')
plt.show()
print('Current loss: %1.6f' % loss(model(inputs), outputs).numpy())
```
### Define a training loop
With the network and training data, train the model using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent) to update the weights variable (`W`) and the bias variable (`b`) to reduce the loss. There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer`—our recommended implementation. But in the spirit of building from first principles, here you will implement the basic math yourself with the help of `tf.GradientTape` for automatic differentiation and `tf.assign_sub` for decrementing a value (which combines `tf.assign` and `tf.sub`):
```
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as t:
current_loss = loss(outputs, model(inputs))
dW, db = t.gradient(current_loss, [model.W, model.b])
model.W.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
```
Finally, let's repeatedly run through the training data and see how `W` and `b` evolve.
```
model = Model()
# Collect the history of W-values and b-values to plot later
Ws, bs = [], []
epochs = range(10)
for epoch in epochs:
Ws.append(model.W.numpy())
bs.append(model.b.numpy())
current_loss = loss(outputs, model(inputs))
train(model, inputs, outputs, learning_rate=0.1)
print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' %
(epoch, Ws[-1], bs[-1], current_loss))
# Let's plot it all
plt.plot(epochs, Ws, 'r',
epochs, bs, 'b')
plt.plot([TRUE_W] * len(epochs), 'r--',
[TRUE_b] * len(epochs), 'b--')
plt.legend(['W', 'b', 'True W', 'True b'])
plt.show()
```
## Next steps
This tutorial used `tf.Variable` to build and train a simple linear model.
In practice, the high-level APIs—such as `tf.keras`—are much more convenient to build neural networks. `tf.keras` provides higher level building blocks (called "layers"), utilities to save and restore state, a suite of loss functions, a suite of optimization strategies, and more. Read the [TensorFlow Keras guide](../../guide/keras/overview.ipynb) to learn more.
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
import xgboost as xgb
from sklearn.metrics import mean_absolute_error
from datetime import date
import warnings
warnings.filterwarnings(action='ignore')
# set the seed of random number generator, which is useful for creating simulations
# or random objects that can be reproduced.
import random
SEED=3
random.seed(SEED)
np.random.seed(SEED)
```
```
# Load Train Data
train = pd.read_pickle('../data/processed/train_nochanel_uniqueidpos_x_envios_feateng.pkl')
train.shape
train['fecha_venta_norm'] = pd.to_datetime(train['fecha_venta_norm'])
train['fecha_venta_norm'] = train['fecha_venta_norm'].dt.date
```
```
predictors = [
'id_pos',
#'canal',
'competidores',
'ingreso_mediana',
'ingreso_promedio',
'densidad_poblacional',
'pct_0a5',
'pct_5a9',
'pct_10a14',
'pct_15a19',
'pct_20a24',
'pct_25a29',
'pct_30a34',
'pct_35a39',
'pct_40a44',
'pct_45a49',
'pct_50a54',
'pct_55a59',
'pct_60a64',
'pct_65a69',
'pct_70a74',
'pct_75a79',
'pct_80a84',
'pct_85ainf',
'pct_bachelors',
'pct_doctorados',
'pct_secundario',
'pct_master',
'pct_bicicleta',
'pct_omnibus',
'pct_subtes',
'pct_taxi',
'pct_caminata',
'mediana_valor_hogar',
#'unidades_despachadas_sum',
#'unidades_despachadas_max',
#'unidades_despachadas_min',
#'unidades_despachadas_avg',
#'cantidad_envios_max',
#'cantidad_envios_min',
#'cantidad_envios_avg',
#'num_cantidad_envios',
#'unidades_despachadas_sum_acum',
#'unidades_despachadas_sum_acum_3p',
#'unidades_despachadas_sum_acum_6p',
#'unidades_despachadas_max_acum',
#'unidades_despachadas_min_acum',
#'num_cantidad_envios_acum',
#'num_cantidad_envios_acum_3per',
#'num_cantidad_envios_acum_6per',
#'diff_dtventa_dtenvio',
'unidades_before',
'num_ventas_before',
'rel_unidades_num_ventas',
'unidades_acum',
'num_ventas_acum',
'countacum', 'unidades_mean',
'num_ventas_mean',
'unidades_2time_before',
'unidades_diff',
'month',
'diff_dtventa_dtventa_before',
'unidades_pend',
]
train = train[predictors]
```
#### encode catvars
```
le = preprocessing.LabelEncoder()
classes = train['canal'].unique()
classes = [i for i in classes]
classes.append('NAN')
le.fit(classes)
np.save('../models/canal_le.npy', le.classes_)
train['canal'] = le.transform(train['canal'].values)
X, y = train.iloc[:,:-1],train.iloc[:,-1]
```
#### Building final model
```
model = xgb.XGBRegressor(seed = SEED)
model.set_params(objective = 'reg:squarederror')
model.set_params(gpu_id = 0)
model.set_params(max_bin= 16)
model.set_params(tree_method='gpu_hist')
model.set_params(learning_rate = 0.01)
model.set_params(n_estimators = 273)
model.set_params(max_depth = 4)
model.set_params(min_child_weight = 5)
model.set_params(gamma = 0.0)
model.set_params(colsample_bytree = 0.9)
model.set_params(subsample = 0.8)
model.set_params(reg_alpha = 1)
model.fit(X, y)
y_pred = model.predict(X)
print("MAE unidades: ",mean_absolute_error(y, y_pred))
print("median unidades: ", np.median(y))
print("median unidades pred: ", np.median(y_pred))
import pickle
#save model
pickle.dump(model, open("../models/xgboost_013.pkl","wb"))
```
| github_jupyter |
# Lecture 10 - eigenvalues and eigenvectors
An eigenvector $\boldsymbol{x}$ and corrsponding eigenvalue $\lambda$ of a square matrix $\boldsymbol{A}$ satisfy
$$
\boldsymbol{A} \boldsymbol{x} = \lambda \boldsymbol{x}
$$
Rearranging this expression,
$$
\left( \boldsymbol{A} - \lambda \boldsymbol{I}\right) \boldsymbol{x} = \boldsymbol{0}
$$
The above equation has solutions (other than $\boldsymbol{x} = \boldsymbol{0}$) if
$$
\det \left( \boldsymbol{A} - \lambda \boldsymbol{I}\right) = 0
$$
Computing the determinant of an $n \times n$ matrix requires solution of an $n$th degree polynomial. It is known how to compute roots of polynomials up to and including degree four (e.g., see <http://en.wikipedia.org/wiki/Quartic_function>). For matrices with $n > 4$, numerical methods must be used to compute eigenvalues and eigenvectors.
An $n \times n$ will have $n$ eigenvalue/eigenvector pairs (eigenpairs).
## Computing eigenvalues with NumPy
NumPy provides a function to compute eigenvalues and eigenvectors. To demonstrate how to compute eigpairs, we first create a $5 \times 5$ symmetric matrix:
```
# Import NumPy and seed random number generator to make generated matrices deterministic
import numpy as np
np.random.seed(1)
# Create a symmetric matrix with random entries
A = np.random.rand(5, 5)
A = A + A.T
print(A)
```
We can compute the eigenvectors and eigenvalues using the NumPy function `linalg.eig`
```
# Compute eigenvectors of A
evalues, evectors = np.linalg.eig(A)
print("Eigenvalues: {}".format(evalues))
print("Eigenvectors: {}".format(evectors))
```
The $i$th column of `evectors` is the $i$th eigenvector.
## Symmetric matrices
Note that the above eigenvalues and the eigenvectors are real valued. This is always the case for symmetric matrices. Another features of symmetric matrices is that the eigenvectors are orthogonal. We can verify this for the above matrix:
We can also check that the second eigenpair is indeed an eigenpair (Python/NumPy use base 0, so the second eiegenpair has index 1):
```
import itertools
# Build pairs (0,0), (0,1), . . . (0, n-1), (1, 2), (1, 3), . . .
pairs = itertools.combinations_with_replacement(range(len(evectors)), 2)
# Compute dot product of eigenvectors x_{i} \cdot x_{j}
for p in pairs:
e0, e1 = p[0], p[1]
print ("Dot product of eigenvectors {}, {}: {}".format(e0, e1, evectors[:, e0].dot(evectors[:, e1])))
print("Testing Ax and (lambda)x: \n {}, \n {}".format(A.dot(evectors[:,1]), evalues[1]*evectors[:,1]))
```
## Non-symmetric matrices
In general, the eigenvalues and eigenvectors of a non-symmetric, real-valued matrix are complex. We can see this by example:
```
B = np.random.rand(5, 5)
evalues, evectors = np.linalg.eig(B)
print("Eigenvalues: {}".format(evalues))
print("Eigenvectors: {}".format(evectors))
```
Unlike symmetric matrices, the eigenvectors are in general not orthogonal, which we can test:
```
# Compute dot product of eigenvectors x_{i} \cdot x_{j}
pairs = itertools.combinations_with_replacement(range(len(evectors)), 2)
for p in pairs:
e0, e1 = p[0], p[1]
print ("Dot product of eigenvectors {}, {}: {}".format(e0, e1, evectors[:, e0].dot(evectors[:, e1])))
```
| github_jupyter |
<a href="http://landlab.github.io"><img style="float: left" src="../../../landlab_header.png"></a>
# Components for modeling overland flow erosion
*(G.E. Tucker, July 2021)*
There are two related components that calculate erosion resulting from surface-water flow, a.k.a. overland flow: `DepthSlopeProductErosion` and `DetachmentLtdErosion`. They were originally created by Jordan Adams to work with the `OverlandFlow` component, which solves for water depth across the terrain. They are similar to the `StreamPowerEroder` and `FastscapeEroder` components in that they calculate erosion resulting from water flow across a topographic surface, but whereas these components require a flow-routing algorithm to create a list of node "receivers", the `DepthSlopeProductErosion` and `DetachmentLtdErosion` components only require a user-identified slope field together with an at-node depth or discharge field (respectively).
## `DepthSlopeProductErosion`
This component represents the rate of erosion, $E$, by surface water flow as:
$$E = k_e (\tau^a - \tau_c^a)$$
where $k_e$ is an erodibility coefficient (with dimensions of velocity per stress$^a$), $\tau$ is bed shear stress, $\tau_c$ is a minimum bed shear stress for any erosion to occur, and $a$ is a parameter that is commonly treated as unity.
For steady, uniform flow,
$$\tau = \rho g H S$$,
with $\rho$ being fluid density, $g$ gravitational acceleration, $H$ local water depth, and $S$ the (postive-downhill) slope gradient (an approximation of the sine of the slope angle).
The component uses a user-supplied slope field (at nodes) together with the water-depth field `surface_water__depth` to calculate $\tau$, and then the above equation to calculate $E$. The component will then modify the `topographic__elevation` field accordingly. If the user wishes to apply material uplift relative to baselevel, an `uplift_rate` parameter can be passed on initialization.
We can learn more about this component by examining its internal documentation. To get an overview of the component, we can examine its *header docstring*: internal documentation provided in the form of a Python docstring that sits just below the class declaration in the source code. This text can be displayed as shown here:
```
from landlab.components import DepthSlopeProductErosion
print(DepthSlopeProductErosion.__doc__)
```
A second useful source of internal documentation for this component is its *init docstring*: a Python docstring that describes the component's class `__init__` method. In Landlab, the init docstrings for components normally provide a list of that component's parameters. Here's how to display the init docstring:
```
print(DepthSlopeProductErosion.__init__.__doc__)
```
### Example
In this example, we load the topography of a small drainage basin, calculate a water-depth field by running overland flow over the topography using the `KinwaveImplicitOverlandFlow` component, and then calculating the resulting erosion.
Note that in order to accomplish this, we need to identify which variable we wish to use for slope gradient. This is not quite as simple as it may sound. An easy way to define slope is as the slope between two adjacent grid nodes. But using this approach means that slope is defined on the grid *links* rathter than *nodes*. To calculate slope magnitude at *nodes*, we'll define a little function below that uses Landlab's `calc_grad_at_link` method to calculate gradients at grid links, then use the `map_link_vector_components_to_node` method to calculate the $x$ and $y$ vector components at each node. With that in hand, we just use the Pythagorean theorem to find the slope magnitude from its vector components.
First, though, some imports we'll need:
```
import copy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from landlab import imshow_grid
from landlab.components import KinwaveImplicitOverlandFlow
from landlab.grid.mappers import map_link_vector_components_to_node
from landlab.io import read_esri_ascii
def slope_magnitude_at_node(grid, elev):
# calculate gradient in elevation at each link
grad_at_link = grid.calc_grad_at_link(elev)
# set the gradient to zero for any inactive links
# (those attached to a closed-boundaries node at either end,
# or connecting two boundary nodes of any type)
grad_at_link[grid.status_at_link != grid.BC_LINK_IS_ACTIVE] = 0.0
# map slope vector components from links to their adjacent nodes
slp_x, slp_y = map_link_vector_components_to_node(grid, grad_at_link)
# use the Pythagorean theorem to calculate the slope magnitude
# from the x and y components
slp_mag = (slp_x * slp_x + slp_y * slp_y) ** 0.5
return slp_mag, slp_x, slp_y
```
(See [here](https://landlab.readthedocs.io/en/latest/reference/grid/gradients.html#landlab.grid.gradients.calc_grad_at_link) to learn how `calc_grad_at_link` works, and [here](https://landlab.readthedocs.io/en/latest/reference/grid/raster_mappers.html#landlab.grid.raster_mappers.map_link_vector_components_to_node_raster) to learn how
`map_link_vector_components_to_node` works.)
Next, define some parameters we'll need.
To estimate the erodibility coefficient $k_e$, one source is:
[http://milford.nserl.purdue.edu/weppdocs/comperod/](http://milford.nserl.purdue.edu/weppdocs/comperod/)
which reports experiments in rill erosion on agricultural soils. Converting their data into $k_e$, its values are on the order of 1 to 10 $\times 10^{-6}$ (m / s Pa), with threshold ($\tau_c$) values on the order of a few Pa.
```
# Process parameters
n = 0.1 # roughness coefficient, (s/m^(1/3))
dep_exp = 5.0 / 3.0 # depth exponent
R = 72.0 # runoff rate, mm/hr
k_e = 4.0e-6 # erosion coefficient (m/s)/(kg/ms^2)
tau_c = 3.0 # erosion threshold shear stress, Pa
# Run-control parameters
rain_duration = 240.0 # duration of rainfall, s
run_time = 480.0 # duration of run, s
dt = 10.0 # time-step size, s
dem_filename = "../hugo_site_filled.asc"
# Derived parameters
num_steps = int(run_time / dt)
# set up arrays to hold discharge and time
time_since_storm_start = np.arange(0.0, dt * (2 * num_steps + 1), dt)
discharge = np.zeros(2 * num_steps + 1)
```
Read an example digital elevation model (DEM) into a Landlab grid and set up the boundaries so that water can only exit out the right edge, representing the watershed outlet.
```
# Read the DEM file as a grid with a 'topographic__elevation' field
(grid, elev) = read_esri_ascii(dem_filename, name="topographic__elevation")
# Configure the boundaries: valid right-edge nodes will be open;
# all NODATA (= -9999) nodes will be closed.
grid.status_at_node[grid.nodes_at_right_edge] = grid.BC_NODE_IS_FIXED_VALUE
grid.status_at_node[np.isclose(elev, -9999.0)] = grid.BC_NODE_IS_CLOSED
```
Now we'll calculate the slope vector components and magnitude, and plot the vectors as quivers on top of a shaded image of the topography:
```
slp_mag, slp_x, slp_y = slope_magnitude_at_node(grid, elev)
imshow_grid(grid, elev)
plt.quiver(grid.x_of_node, grid.y_of_node, slp_x, slp_y)
```
Let's take a look at the slope magnitudes:
```
imshow_grid(grid, slp_mag, colorbar_label="Slope gradient (m/m)")
```
Now we're ready to instantiate a `KinwaveImplicitOverlandFlow` component, with a specified runoff rate and roughness:
```
# Instantiate the component
olflow = KinwaveImplicitOverlandFlow(
grid, runoff_rate=R, roughness=n, depth_exp=dep_exp
)
```
The `DepthSlopeProductErosion` component requires there to be a field called `slope_magnitude` that contains our slope-gradient values, so we will we will create this field and assign `slp_mag` to it (the `clobber` keyword says it's ok to overwrite this field if it already exists, which prevents generating an error message if you run this cell more than once):
```
grid.add_field("slope_magnitude", slp_mag, at="node", clobber=True)
```
Now we're ready to instantiate a `DepthSlopeProductErosion` component:
```
dspe = DepthSlopeProductErosion(grid, k_e=k_e, tau_crit=tau_c, slope="slope_magnitude")
```
Next, we'll make a copy of the starting terrain for later comparison, then run overland flow and erosion:
```
starting_elev = elev.copy()
for i in range(num_steps):
olflow.run_one_step(dt)
dspe.run_one_step(dt)
slp_mag[:], slp_x, slp_y = slope_magnitude_at_node(grid, elev)
```
We can visualize the instantaneous erosion rate at the end of the run, in m/s:
```
imshow_grid(grid, dspe._E, colorbar_label="erosion rate (m/s)")
```
We can also inspect the cumulative erosion during the event by differencing the before and after terrain:
```
imshow_grid(grid, starting_elev - elev, colorbar_label="cumulative erosion (m)")
```
Note that because this is a bumpy DEM, much of the erosion has occurred on (probably digital) steps in the channels. But we can see some erosion across the slopes as well.
## `DetachmentLtdErosion`
This component is similar to `DepthSlopeProductErosion` except that it calculates erosion rate from discharge and slope rather than depth and slope. The vertical incision rate, $I$ (equivalent to $E$ in the above; here we are following the notation in the component's documentation) is:
$$I = K Q^m S^n - I_c$$
where $K$ is an erodibility coefficient (with dimensions of velocity per discharge$^m$; specified by parameter `K_sp`), $Q$ is volumetric discharge, $I_c$ is a threshold with dimensions of velocity, and $m$ and $n$ are exponents. (In the erosion literature, the exponents are sometimes treated as empirical parameters, and sometimes set to particular values on theoretical grounds; here we'll just set them to unity.)
The component uses the fields `surface_water__discharge` and `topographic__slope` for $Q$ and $S$, respectively. The component will modify the `topographic__elevation` field accordingly. If the user wishes to apply material uplift relative to baselevel, an `uplift_rate` parameter can be passed on initialization.
Here are the header and constructor docstrings:
```
from landlab.components import DetachmentLtdErosion
print(DetachmentLtdErosion.__doc__)
print(DetachmentLtdErosion.__init__.__doc__)
```
The example below uses the same approach as the previous example, but now using `DetachmentLtdErosion`. Note that the value for parameter $K$ (`K_sp`) is just a guess. Use of exponents $m=n=1$ implies the use of total stream power.
```
# Process parameters
n = 0.1 # roughness coefficient, (s/m^(1/3))
dep_exp = 5.0 / 3.0 # depth exponent
R = 72.0 # runoff rate, mm/hr
K_sp = 1.0e-7 # erosion coefficient (m/s)/(m3/s)
m_sp = 1.0 # discharge exponent
n_sp = 1.0 # slope exponent
I_c = 0.0001 # erosion threshold, m/s
# Run-control parameters
rain_duration = 240.0 # duration of rainfall, s
run_time = 480.0 # duration of run, s
dt = 10.0 # time-step size, s
dem_filename = "../hugo_site_filled.asc"
# Derived parameters
num_steps = int(run_time / dt)
# set up arrays to hold discharge and time
time_since_storm_start = np.arange(0.0, dt * (2 * num_steps + 1), dt)
discharge = np.zeros(2 * num_steps + 1)
# Read the DEM file as a grid with a 'topographic__elevation' field
(grid, elev) = read_esri_ascii(dem_filename, name="topographic__elevation")
# Configure the boundaries: valid right-edge nodes will be open;
# all NODATA (= -9999) nodes will be closed.
grid.status_at_node[grid.nodes_at_right_edge] = grid.BC_NODE_IS_FIXED_VALUE
grid.status_at_node[np.isclose(elev, -9999.0)] = grid.BC_NODE_IS_CLOSED
slp_mag, slp_x, slp_y = slope_magnitude_at_node(grid, elev)
grid.add_field("topographic__slope", slp_mag, at="node", clobber=True)
# Instantiate the component
olflow = KinwaveImplicitOverlandFlow(
grid, runoff_rate=R, roughness=n, depth_exp=dep_exp
)
dle = DetachmentLtdErosion(
grid, K_sp=K_sp, m_sp=m_sp, n_sp=n_sp, entrainment_threshold=I_c
)
starting_elev = elev.copy()
for i in range(num_steps):
olflow.run_one_step(dt)
dle.run_one_step(dt)
slp_mag[:], slp_x, slp_y = slope_magnitude_at_node(grid, elev)
imshow_grid(grid, starting_elev - elev, colorbar_label="cumulative erosion (m)")
```
<hr>
<small>For more Landlab tutorials, click here: <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small>
<hr>
| github_jupyter |
```
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
train_df = pd.read_csv(Path('Resources/2019loans.csv'))
test_df = pd.read_csv(Path('Resources/2020Q1loans.csv'))
train_df.head()
test_df.head()
#Separate target feature for training data
x_train = train_df.drop("loan_status", axis=1)
y_train = train_df["loan_status"]
# Convert categorical data to numeric and separate target feature for training data
y_train = LabelEncoder().fit_transform(train_df["loan_status"])
x_train_numeric = pd.get_dummies(x_train)
x_train_numeric.head()
#Separate target feature for testing data
x_test = test_df.drop("loan_status", axis=1)
y_test = test_df["loan_status"]
# Convert categorical data to numeric and separate target feature for testing data
y_test = LabelEncoder().fit_transform(test_df["loan_status"])
x_test_numeric = pd.get_dummies(x_test)
# Locate missing dummy variables
for variable in x_train_numeric.columns:
if variable not in x_test_numeric.columns:
print(variable)
# add missing dummy variables to testing set
x_test_numeric[variable] = 0
x_test_numeric.head()
```
## Initial Thought
After reading and learning more about the differences between Random Forests and Logistic Regression, I believe Random Forests will perform better for categorical data giving a more important features while increasing the overall accuracy of the result
```
# Train the Logistic Regression model on the unscaled data and print the model score
LogisticModel = LogisticRegression().fit(x_train_numeric, y_train)
print(f'training score: {LogisticModel.score(x_train_numeric, y_train)}')
print(f'testing score: {LogisticModel.score(x_test_numeric, y_test)}')
# Train a Random Forest Classifier model and print the model score
ForestModel = RandomForestClassifier().fit(x_train_numeric, y_train)
print(f'training score: {ForestModel.score(x_train_numeric, y_train)}')
print(f'testing score: {ForestModel.score(x_test_numeric, y_test)}')
```
## Result before Scaling
It appears that Random Forest Classifier model performed better than Logistic Regression; However, we can clearly see that our training score for the Forest Model is 1.0, a sign of over fitting!
## After Scaling Thoughts
Once the data is scaled, I believe both model will perform better than the previous results
```
# Scale the data
scaler = StandardScaler().fit(x_train_numeric)
x_train_scaled = scaler.transform(x_train_numeric)
x_test_scaled = scaler.transform(x_test_numeric)
# Train the Logistic Regression model on the scaled data and print the model score
LogisticModel = LogisticRegression().fit(x_train_scaled, y_train)
print(f'training score: {LogisticModel.score(x_train_scaled, y_train)}')
print(f'testing score: {LogisticModel.score(x_test_scaled, y_test)}')
# Train a Random Forest Classifier model on the scaled data and print the model score
ForestModel = RandomForestClassifier().fit(x_train_scaled, y_train)
print(f'training score: {ForestModel.score(x_train_scaled, y_train)}')
print(f'testing score: {ForestModel.score(x_test_scaled, y_test)}')
```
## After Scaling Results
After scaling the data and run our models, we can see Logisitc Regression model performs better than before scaling it. On the other hand, Random Forest Classifier has perform worst than it did before scaling the data and we can observ how the training data is still over fitting!
| github_jupyter |
# Transfer Learning
Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using [VGGNet](https://arxiv.org/pdf/1409.1556.pdf) trained on the [ImageNet dataset](http://www.image-net.org/) as a feature extractor. Below is a diagram of the VGGNet architecture.
<img src="assets/cnnarchitecture.jpg" width=700px>
VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes.
You can read more about transfer learning from [the CS231n course notes](http://cs231n.github.io/transfer-learning/#tf).
## Pretrained VGGNet
We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. Make sure to clone this repository to the directory you're working from. You'll also want to rename it so it has an underscore instead of a dash.
```
git clone https://github.com/machrisaa/tensorflow-vgg.git tensorflow_vgg
```
This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. **You'll need to clone the repo into the folder containing this notebook.** Then download the parameter file using the next cell.
```
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
```
## Flower power
Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the [TensorFlow inception tutorial](https://www.tensorflow.org/tutorials/image_retraining).
```
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
```
## ConvNet Codes
Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier.
Here we're using the `vgg16` module from `tensorflow_vgg`. The network takes images of size $224 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from [the source code](https://github.com/machrisaa/tensorflow-vgg/blob/master/vgg16.py)):
```
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
```
So what we want are the values of the first fully connected layer, after being ReLUd (`self.relu6`). To build the network, we use
```
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
```
This creates the `vgg` object, then builds the graph with `vgg.build(input_)`. Then to get the values from the layer,
```
feed_dict = {input_: images}
codes = sess.run(vgg.relu6, feed_dict=feed_dict)
```
```
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
```
Below I'm running images through the VGG network in batches.
> **Exercise:** Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values).
```
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 10
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# TODO: Build the vgg network here
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
feed_dict = {input_: images}
codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
```
## Building the Classifier
Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work.
```
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
```
### Data prep
As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!
> **Exercise:** From scikit-learn, use [LabelBinarizer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelBinarizer.html) to create one-hot encoded vectors from the labels.
```
labels[:10]
from sklearn import preprocessing
lb=preprocessing.LabelBinarizer()
lb.fit(labels)
labels_vecs = lb.transform(labels)# Your one-hot encoded labels array here
labels_vecs[:10]
```
Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use [`StratifiedShuffleSplit`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) from scikit-learn.
You can create the splitter like so:
```
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
```
Then split the data with
```
splitter = ss.split(x, y)
```
`ss.split` returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use `next(splitter)` to get the indices. Be sure to read the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) and the [user guide](http://scikit-learn.org/stable/modules/cross_validation.html#random-permutations-cross-validation-a-k-a-shuffle-split).
> **Exercise:** Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets.
```
from sklearn.model_selection import train_test_split
train_x, val_test_x, train_y, val_test_y = train_test_split(codes, labels_vecs, test_size=0.2)
val_x, test_x, val_y, test_y = train_test_split(val_test_x, val_test_y, test_size=0.5)
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
print(test_y) # should be shuffled
```
If you did it right, you should see these sizes for the training sets:
```
Train shapes (x, y): (2936, 4096) (2936, 5)
Validation shapes (x, y): (367, 4096) (367, 5)
Test shapes (x, y): (367, 4096) (367, 5)
```
### Classifier layers
Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network.
> **Exercise:** With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost.
```
print(codes.shape[1])
print(labels_vecs.shape[1])
def fully_connected_layer(inputs_tensor, num_outputs, activation_fn=tf.nn.relu):
inputs_shape = inputs_tensor.get_shape().as_list()
weights = tf.Variable(tf.truncated_normal([inputs_shape[1], num_outputs], stddev=0.05))
bias = tf.Variable(tf.truncated_normal([num_outputs], stddev=0.05))
activation = tf.add(tf.matmul(inputs_tensor, weights), bias)
if activation_fn is None:
return activation
return activation_fn(activation)
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
# TODO: Classifier layers and operations
fc = fully_connected_layer(inputs_, 256)
logits = fully_connected_layer(fc, labels_vecs.shape[1], activation_fn=None)# output layer logits
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels_, logits=logits) # 각 label 마다 cross_entropy 값을 담은 tensor
cost = tf.reduce_mean(cross_entropy)# cross entropy loss
optimizer = tf.train.AdamOptimizer().minimize(cost)# training optimizer
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
```
### Batches!
Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data.
```
def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
```
### Training
Here, we'll train the network.
> **Exercise:** So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the `get_batches` function I wrote before to get your batches like `for x, y in get_batches(train_x, train_y)`. Or write your own!
```
epochs = 10
iteration = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
for X, y in get_batches(train_x, train_y):
loss, _ = sess.run([cost, optimizer], feed_dict={
inputs_: X,
labels_: y
})
print("Epoch: {}/{}".format(epoch+1, epochs),
"Iteration: {}".format(iteration),
"Training loss: {:.5f}".format(loss))
iteration += 1
if iteration % 5 == 0:
val_accuracy = sess.run(accuracy, feed_dict={
inputs_: val_x,
labels_: val_y
})
print("Epoch: {}/{}".format(epoch+1, epochs),
"Iteration: {}".format(iteration),
"Validation Acc: {:.4f}".format(val_accuracy))
# TODO: Your training code here
saver.save(sess, "checkpoints/flowers.ckpt")
```
### Testing
Below you see the test accuracy. You can also see the predictions returned for images.
```
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
```
Below, feel free to choose images and see how the trained classifier predicts the flowers in them.
```
test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
if 'vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), lb.classes_)
```
| github_jupyter |
```
from pymongo import MongoClient
import re, string, nltk, csv
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
print("Connecting to MongoDB ...")
client = MongoClient('localhost:27017')
db = client['comments']
rawComments = db['rawComments'].find()
def translate_numbers(word):
word = word.replace('2', 'a')
word = word.replace('3', 'a')
word = word.replace('5', "kh")
word = word.replace('7', 'h')
word = word.replace('8', "gh")
word = word.replace('9', "k")
return word
def remove_redundant_letters(word):
return re.sub(r'(.)\1+', r'\1', word)
def cleanComment(comment):
tokens = comment.split()
#ignoring case by converting the words to lowercase letters
tokens = [word.lower() for word in tokens]
# translate arabic phonetic numbers used in tunisian dialect (for example: '7' --> 'h', '5' --> "kh")
tokens = [translate_numbers(w) for w in tokens]
#remove punctuation
table = str.maketrans('', '', string.punctuation)
tokens = [w.translate(table) for w in tokens]
#remove redundant letters (for example: "mahleeeeeh" --> "mahleh")
tokens = [remove_redundant_letters(w) for w in tokens]
#remove short words of length <=2 because in general they are insignificant and will slow down the process
tokens = [word for word in tokens if len(word) > 2]
cleancomment = " ".join(tokens)
return cleancomment
def checkSimilarity(word1, word2):
return nltk.edit_distance(word1, word2) < 2
def sentimentScore(words, dictionary):
scoreComment = tokenCount = 0
for word in words:
for token in dictionary:
if checkSimilarity(word, token[0]):
if token[1] != "":
scoreComment = scoreComment + int(token[1])
tokenCount = tokenCount + 1
break
if tokenCount != 0:
scoreComment = scoreComment / tokenCount
return scoreComment
dictionary = []
with open('C:/Users/INFOTEC/Desktop/PI/cleanDictionary.csv', 'r', newline='',encoding="utf8") as dictionaryFile:
dictionaryReader = csv.reader(dictionaryFile, delimiter=',')
i = 0
for row in dictionaryReader:
if (row[1] == 0):
continue
dictionary.append([row[0], row[1]])
print("Cleaning comments")
for comment in rawComments:
existant = db['cleanComments'].find({"id": comment["id"]}).count()
if existant:
continue
cleancomment = cleanComment(comment["review"])
words = cleancomment.split()
score = sentimentScore(words, dictionary)
db.cleanComments.insert({
"_id": comment["_id"],
"id": comment["id"],
"review": cleancomment,
"score": score
})
print(rawComments)
wordcloud = WordCloud(width = 800, height = 800,
background_color ='black',
min_font_size = 10).generate(cleancomment)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
wordcloud.to_file('C:/Users/INFOTEC/Desktop/PI/world.png')
```
| github_jupyter |
# Evolution of CRO disclosure over time
```
import sys
import math
from datetime import date
from dateutil.relativedelta import relativedelta
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates
from matplotlib.ticker import MaxNLocator
import seaborn as sns
sys.path.append('../..')
from data import constants
# Setup seaborn
sns.set_theme(style="ticks", rc={'text.usetex' : True})
sns.set_context("paper")
# Read main file
df = pd.read_pickle("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Data/stoxx_inference/Firm_AnnualReport_Paragraphs_with_actual_back.pkl")
df = df.set_index(["id"])
assert df.index.is_unique, "Index is not unique. Check the data!"
# Read master for scaling
df_master = pd.read_csv("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Data/stoxx_inference/Firm_AnnualReport.csv")
df_reports_count = df_master.groupby('year')['is_inferred'].sum()
```
## Config
```
category_labels = constants.cro_category_labels
category_codes = constants.cro_category_codes
colors = sns.color_palette("GnBu", len(category_codes))
levelize_year = 2015
```
## Evolution over the years
Shows the level of *average number of predicted CRO's per report* (ACROR) over time, in 2015 *levels* (i.e. 2015 scaled to 1).
1. divide by amount of reports in each year
2. then report the levels by dividing by 2015 values
Why 2015? Paris and simply because it of missing values otherwise...
```
# Create yearly bins for each category
df_years = df.groupby('year')[[f"{c}_predicted" for c in category_codes]].sum().T
# 1. Divide by number of reports in each year
df_years = df_years / df_reports_count
df_years = df_years.T
# 2. Divide by the first column to get levels
# level_column = df_years[levelize_year]
# df_years = df_years.T / level_column
# df_years = df_years.T
df_years.rename(columns={'PR_predicted': 'Physical risks', 'TR_predicted': 'Transition risks', 'OP_predicted': 'Opportunities (rhs)'}, inplace=True)
# Plot
ax = sns.lineplot(data=df_years[['Physical risks', 'Transition risks']])
ax2 = ax.twinx()
ln2 = sns.lineplot(data=df_years[['Opportunities (rhs)']], ax=ax2, palette=["green"])
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ln2.get_legend_handles_labels()
ax.legend(h1+h2, l1+l2, loc=0)
ln2.legend_.remove()
ax.set_xlabel('')
plt.xlim()
plt.xlim(min(df_years.index), max(df_years.index))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.show()
fig = ax.get_figure()
fig.savefig('/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Thesis/figures/abs_acror_years.pdf', format='pdf', bbox_inches='tight')
```
## Evolution by country
```
index_id = 'country'
results = {}
pd.pivot_table(df_master, values="is_inferred", index=[index_id], columns=['year'], aggfunc=np.sum, fill_value = 0)
reports_count = pd.pivot_table(df_master, values="is_inferred", index=['country'], columns=['year'], aggfunc=np.sum, fill_value = 0)
def plot_grid_by_group(groups, group_column, y_max_values = [20, 500], no_columns=4):
reports_count = pd.pivot_table(df_master, values="is_inferred", index=[group_column], columns=['year'], aggfunc=np.sum, fill_value = 0)
rows = math.ceil(len(groups) / no_columns)
fig, axs = plt.subplots(rows, no_columns,
figsize=(12, 15 if rows > 1 else 5),
sharex=False,
sharey='row',
# constrained_layout=True
)
axs = axs.ravel()
max_y_axis_val = 0
for idx, c in enumerate(groups):
ax = axs[idx]
df_group = df.query(f"{group_column} == @c")
# Create yearly bins for each category
df_years = df_group.groupby('year')[[f"{c}_predicted" for c in category_codes]].sum().T
# 1. Divide by number of reports in each year
df_years = df_years / reports_count.loc[c]
# 2. Divide by the first column to get levels
# level_column = df_years[levelize_year]
# df_years = df_years.T / level_column
df_years = df_years.T
df_years.rename(columns={'PR_predicted': 'Physical risks', 'TR_predicted': 'Transition risks', 'OP_predicted': 'Opportunities (rhs)'}, inplace=True)
ax = sns.lineplot(data=df_years[['Physical risks', 'Transition risks']], ax=ax)
ax2 = ax.twinx()
ln2 = sns.lineplot(data=df_years[['Opportunities (rhs)']], ax=ax2, palette=["green"])
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ln2.get_legend_handles_labels()
fig.legend(h1+h2, l1+l2, loc="upper center", ncol=len(h1+h2))
ax.legend_.remove()
ln2.legend_.remove()
ax.set_ylim(0, y_max_values[0])
ax2.set_ylim(0, y_max_values[1])
ax.set_xlim(min(df_group.year), max(df_group.year))
ax.title.set_text(c.upper() if len(c) == 2 else c)
ax.set_xlabel('')
# Implement sharey also for the second y axis
if ((idx + 1) % no_columns) != 0:
ax2.set_yticklabels([])
fig.subplots_adjust(bottom=0.05 if rows > 2 else 0.25)
return fig
all_countries = sorted(df_master.country.unique())
all_countries_fig = plot_grid_by_group(all_countries, 'country', y_max_values=[20, 500])
all_countries_fig.savefig('/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Thesis/figures/abs_acror_countries.pdf', format='pdf', bbox_inches='tight')
selected_countries_fig = plot_grid_by_group(["de", "ch", "fr", "gb"], 'country', y_max_values=[50, 500])
selected_countries_fig.savefig('/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Thesis/figures/abs_acror_selected_countries.pdf', format='pdf', bbox_inches='tight')
```
## Industry
```
all_industries = sorted(df_master.icb_industry.unique())
all_inudustries_fig = plot_grid_by_group(all_industries, 'icb_industry', y_max_values=[50, 500])
all_inudustries_fig.savefig('/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Thesis/figures/abs_acror_industries.pdf', format='pdf', bbox_inches='tight')
selected_industries_fig = plot_grid_by_group(["10 Technology", "30 Financials", "60 Energy", "65 Utilities"], 'icb_industry', y_max_values=[50, 500])
selected_industries_fig.savefig('/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Thesis/figures/abs_acror_selected_industries.pdf', format='pdf', bbox_inches='tight')
```
| github_jupyter |
# Lesson 3. Coordinate Reference Systems (CRS) & Map Projections
Building off of what we learned in the previous notebook, we'll get to understand an integral aspect of geospatial data: Coordinate Reference Systems.
- 3.1 California County Shapefile
- 3.2 USA State Shapefile
- 3.3 Plot the Two Together
- 3.4 Coordinate Reference System (CRS)
- 3.5 Getting the CRS
- 3.6 Setting the CRS
- 3.7 Transforming or Reprojecting the CRS
- 3.8 Plotting States and Counties Togther
- 3.9 Recap
- **Exercise**: CRS Management
<br>
<font color='grey'>
<b>Instructor Notes</b>
- Datasets used
- ‘notebook_data/california_counties/CaliforniaCounties.shp’
- ‘notebook_data/us_states/us_states.shp’
- ‘notebook_data/census/Places/cb_2018_06_place_500k.zip’
- Expected time to complete
- Lecture + Questions: 45 minutes
- Exercises: 10 minutes
</font>
### Import Libraries
```
import pandas as pd
import geopandas as gpd
import matplotlib # base python plotting library
import matplotlib.pyplot as plt # submodule of matplotlib
# To display plots, maps, charts etc in the notebook
%matplotlib inline
```
## 3.1 California County shapefile
Let's go ahead and bring back in our California County shapefile. As before, we can read the file in using `gpd.read_file` and plot it straight away.
```
counties = gpd.read_file('notebook_data/california_counties/CaliforniaCounties.shp')
counties.plot(color='darkgreen')
```
Even if we have an awesome map like this, sometimes we want to have more geographical context, or we just want additional information. We're going to try **overlaying** our counties GeoDataFrame on our USA states shapefile.
## 3.2 USA State shapefile
We're going to bring in our states geodataframe, and let's do the usual operations to start exploring our data.
```
# Read in states shapefile
states = gpd.read_file('notebook_data/us_states/us_states.shp')
# Look at the first few rows
states.head()
# Count how many rows and columns we have
states.shape
# Plot our states data
states.plot()
```
You might have noticed that our plot extends beyond the 50 states (which we also saw when we executed the `shape` method). Let's double check what states we have included in our data.
```
states['STATE'].values
```
Beyond the 50 states we seem to have American Samoa, Puerto Rico, Guam, Commonwealth of the Northern Mariana Islands, and United States Virgin Islands included in this geodataframe. To make our map cleaner, let's limit the states to the contiguous states (so we'll also exclude Alaska and Hawaii).
```
# Define list of non-contiguous states
non_contiguous_us = [ 'American Samoa','Puerto Rico','Guam',
'Commonwealth of the Northern Mariana Islands',
'United States Virgin Islands', 'Alaska','Hawaii']
# Limit data according to above list
states_limited = states.loc[~states['STATE'].isin(non_contiguous_us)]
# Plot it
states_limited.plot()
```
To prepare for our mapping overlay, let's make our states a nice, light grey color.
```
states_limited.plot(color='lightgrey', figsize=(10,10))
```
## 3.3 Plot the two together
Now that we have both geodataframes in our environment, we can plot both in the same figure.
**NOTE**: To do this, note that we're getting a Matplotlib Axes object (`ax`), then explicitly adding each our layers to it
by providing the `ax=ax` argument to the `plot` method.
```
fig, ax = plt.subplots(figsize=(10,10))
counties.plot(color='darkgreen',ax=ax)
states_limited.plot(color='lightgrey', ax=ax)
```
Oh no, what happened here?
<img src="http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png" width="20" align=left > **Question** Without looking ahead, what do you think happened?
<br>
<br>
If you look at the numbers we have on the x and y axes in our two plots, you'll see that the county data has much larger numbers than our states data. It's represented in some different type of unit other than decimal degrees!
In fact, that means if we zoom in really close into our plot we'll probably see the states data plotted.
```
%matplotlib inline
fig, ax = plt.subplots(figsize=(10,10))
counties.plot(color='darkgreen',ax=ax)
states_limited.plot(color='lightgrey', ax=ax)
ax.set_xlim(-140,-50)
ax.set_ylim(20,50)
```
This is a key issue that you'll have to resolve time and time again when working with geospatial data!
It all revolves around **coordinate reference systems** and **projections**.
----------------------------
## 3.4 Coordinate Reference Systems (CRS)
<img src="http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png" width="20" align=left > **Question** Do you have experience with Coordinate Reference Systems?
<br><br>As a refresher, a CRS describes how the coordinates in a geospatial dataset relate to locations on the surface of the earth.
A `geographic CRS` consists of:
- a 3D model of the shape of the earth (a **datum**), approximated as a sphere or spheroid (aka ellipsoid)
- the **units** of the coordinate system (e.g, decimal degrees, meters, feet) and
- the **origin** (i.e. the 0,0 location), specified as the meeting of the **equator** and the **prime meridian**(
A `projected CRS` consists of
- a geographic CRS
- a **map projection** and related parameters used to transform the geographic coordinates to `2D` space.
- a map projection is a mathematical model used to transform coordinate data
### A Geographic vs Projected CRS
<img src ="https://www.e-education.psu.edu/natureofgeoinfo/sites/www.e-education.psu.edu.natureofgeoinfo/files/image/projection.gif" height="100" width="500">
#### There are many, many CRSs
Theoretically the number of CRSs is unlimited!
Why? Primariy, because there are many different definitions of the shape of the earth, multiplied by many different ways to cast its surface into 2 dimensions. Our understanding of the earth's shape and our ability to measure it has changed greatly over time.
#### Why are CRSs Important?
- You need to know the data about your data (or `metadata`) to use it appropriately.
- All projected CRSs introduce distortion in shape, area, and/or distance. So understanding what CRS best maintains the characteristics you need for your area of interest and your analysis is important.
- Some analysis methods expect geospatial data to be in a projected CRS
- For example, `geopandas` expects a geodataframe to be in a projected CRS for area or distance based analyses.
- Some Python libraries, but not all, implement dynamic reprojection from the input CRS to the required CRS and assume a specific CRS (WGS84) when a CRS is not explicitly defined.
- Most Python spatial libraries, including Geopandas, require geospatial data to be in the same CRS if they are being analysed together.
#### What you need to know when working with CRSs
- What CRSs used in your study area and their main characteristics
- How to identify, or `get`, the CRS of a geodataframe
- How to `set` the CRS of geodataframe (i.e. define the projection)
- Hot to `transform` the CRS of a geodataframe (i.e. reproject the data)
### Codes for CRSs commonly used with CA data
CRSs are typically referenced by an [EPSG code](http://wiki.gis.com/wiki/index.php/European_Petroleum_Survey_Group).
It's important to know the commonly used CRSs and their EPSG codes for your geographic area of interest.
For example, below is a list of commonly used CRSs for California geospatial data along with their EPSG codes.
##### Geographic CRSs
-`4326: WGS84` (units decimal degrees) - the most commonly used geographic CRS
-`4269: NAD83` (units decimal degrees) - the geographic CRS customized to best fit the USA. This is used by all Census geographic data.
> `NAD83 (epsg:4269)` are approximately the same as `WGS84(epsg:4326)` although locations can differ by up to 1 meter in the continental USA and elsewhere up to 3m. That is not a big issue with census tract data as these data are only accurate within +/-7meters.
##### Projected CRSs
-`5070: CONUS NAD83` (units meters) projected CRS for mapping the entire contiguous USA (CONUS)
-`3857: Web Mercator` (units meters) conformal (shape preserving) CRS used as the default in web mapping
-`3310: CA Albers Equal Area, NAD83` (units meters) projected CRS for CA statewide mapping and spatial analysis
-`26910: UTM Zone 10N, NAD83` (units meters) projected CRS for northern CA mapping & analysis
-`26911: UTM Zone 11N, NAD83` (units meters) projected CRS for Southern CA mapping & analysis
-`102641 to 102646: CA State Plane zones 1-6, NAD83` (units feet) projected CRS used for local analysis.
You can find the full CRS details on the website https://www.spatialreference.org
## 3.5 Getting the CRS
### Getting the CRS of a gdf
GeoPandas GeoDataFrames have a `crs` attribute that returns the CRS of the data.
```
counties.crs
states_limited.crs
```
As we can clearly see from those two printouts (even if we don't understand all the content!),
the CRSs of our two datasets are different! **This explains why we couldn't overlay them correctly!**
-----------------------------------------
The above CRS definition specifies
- the name of the CRS (`WGS84`),
- the axis units (`degree`)
- the shape (`datum`),
- and the origin (`Prime Meridian`, and the equator)
- and the area for which it is best suited (`World`)
> Notes:
> - `geocentric` latitude and longitude assume a spherical (round) model of the shape of the earth
> - `geodetic` latitude and longitude assume a spheriodal (ellipsoidal) model, which is closer to the true shape.
> - `geodesy` is the study of the shape of the earth.
**NOTE**: If you print a `crs` call, Python will just display the EPSG code used to initiate the CRS object. Depending on your versions of Geopandas and its dependencies, this may or may not look different from what we just saw above.
```
print(states_limited.crs)
```
## 3.6 Setting the CRS
You can also set the CRS of a gdf using the `crs` attribute. You would set the CRS if is not defined or if you think it is incorrectly defined.
> In desktop GIS terminology setting the CRS is called **defining the CRS**
As an example, let's set the CRS of our data to `None`
```
# first set the CRS to None
states_limited.crs = None
# Check it again
states_limited.crs
```
...hummm...
If a variable has a null value (None) then displaying it without printing it won't display anything!
```
# Check it again
print(states_limited.crs)
```
Now we'll set it back to its correct CRS.
```
# Set it to 4326
states_limited.crs = "epsg:4326"
# Show it
states_limited.crs
```
**NOTE**: You can set the CRS to anything you like, but **that doesn't make it correct**! This is because setting the CRS does not change the coordinate data; it just tells the software how to interpret it.
## 3.7 Transforming or Reprojecting the CRS
You can transform the CRS of a geodataframe with the `to_crs` method.
> In desktop GIS terminology transforming the CRS is called **projecting the data** (or **reprojecting the data**)
When you do this you want to save the output to a new GeoDataFrame.
```
states_limited_utm10 = states_limited.to_crs( "epsg:26910")
```
Now take a look at the CRS.
```
states_limited_utm10.crs
```
You can see the result immediately by plotting the data.
```
# plot geographic gdf
states_limited.plot();
plt.axis('square');
# plot utm gdf
states_limited_utm10.plot();
plt.axis('square')
# Your thoughts here
```
<div style="display:inline-block;vertical-align:top;">
<img src="http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png" width="30" align=left >
</div>
<div style="display:inline-block;">
#### Questions
</div>
1. What two key differences do you see between the two plots above?
1. Do either of these plotted USA maps look good?
1. Try looking at the common CRS EPSG codes above and see if any of them look better for the whole country than what we have now. Then try transforming the states data to the CRS that you think would be best and plotting it. (Use the code cell two cells below.)
```
# YOUR CODE HERE
```
**Double-click to see solution!**
<!--
#SOLUTION
states_limited_conus = states_limited.to_crs("epsg:5070")
states_limited_conus.plot();
plt.axis('square')
-->
## 3.8 Plotting states and counties together
Now that we know what a CRS is and how we can set them, let's convert our counties GeoDataFrame to match up with out states' crs.
```
# Convert counties data to NAD83
counties_utm10 = counties.to_crs("epsg:26910")
counties_utm10.plot()
# Plot it together!
fig, ax = plt.subplots(figsize=(10,10))
states_limited_utm10.plot(color='lightgrey', ax=ax)
counties_utm10.plot(color='darkgreen',ax=ax)
```
Since we know that the best CRS to plot the contiguous US from the above question is 5070, let's also transform and plot everything in that CRS.
```
counties_conus = counties.to_crs("epsg:5070")
fig, ax = plt.subplots(figsize=(10,10))
states_limited_conus.plot(color='lightgrey', ax=ax)
counties_conus.plot(color='darkgreen',ax=ax)
```
## 3.9 Recap
In this lesson we learned about...
- Coordinate Reference Systems
- Getting the CRS of a geodataframe
- `crs`
- Transforming/repojecting CRS
- `to_crs`
- Overlaying maps
## Exercise: CRS Management
Now it's time to take a crack and managing the CRS of a new dataset. In the code cell below, write code to:
1. Bring in the CA places data (`notebook_data/census/Places/cb_2018_06_place_500k.zip`)
2. Check if the CRS is EPSG code 26910. If not, transform the CRS
3. Plot the California counties and places together.
To see the solution, double-click the Markdown cell below.
```
# YOUR CODE HERE
```
## Double-click to see solution!
<!--
# SOLUTION
# 1. Bring in the CA places data
california_places = gpd.read_file('zip://notebook_data/census/Places/cb_2018_06_place_500k.zip')
california_places.head()
# 2. Check and transorm the CRS if needed
california_places.crs
california_places_utm10 = california_places.to_crs( "epsg:26910")
# 3. Plot the California counties and places together
fig, ax = plt.subplots(figsize=(10,10))
counties_utm10.plot(color='lightgrey', ax=ax)
california_places_utm10 .plot(color='purple',ax=ax)
-->
---
<div style="display:inline-block;vertical-align:middle;">
<a href="https://dlab.berkeley.edu/" target="_blank"><img src ="assets/images/dlab_logo.png" width="75" align="left">
</a>
</div>
<div style="display:inline-block;vertical-align:middle;">
<div style="font-size:larger"> D-Lab @ University of California - Berkeley</div>
<div> Team Geo<div>
</div>
| github_jupyter |
**Chapter 5 – Support Vector Machines**
_This notebook contains all the sample code and solutions to the exercises in chapter 5._
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/jtao/handson-ml2/blob/master/05_support_vector_machines.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20.
```
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "svm"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
```
# Large margin classification
The next few code cells generate the first figures in chapter 5. The first actual code sample comes after:
```
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
setosa_or_versicolor = (y == 0) | (y == 1)
X = X[setosa_or_versicolor]
y = y[setosa_or_versicolor]
# SVM Classifier model
svm_clf = SVC(kernel="linear", C=float("inf"))
svm_clf.fit(X, y)
# Bad models
x0 = np.linspace(0, 5.5, 200)
pred_1 = 5*x0 - 20
pred_2 = x0 - 1.8
pred_3 = 0.1 * x0 + 0.5
def plot_svc_decision_boundary(svm_clf, xmin, xmax):
w = svm_clf.coef_[0]
b = svm_clf.intercept_[0]
# At the decision boundary, w0*x0 + w1*x1 + b = 0
# => x1 = -w0/w1 * x0 - b/w1
x0 = np.linspace(xmin, xmax, 200)
decision_boundary = -w[0]/w[1] * x0 - b/w[1]
margin = 1/w[1]
gutter_up = decision_boundary + margin
gutter_down = decision_boundary - margin
svs = svm_clf.support_vectors_
plt.scatter(svs[:, 0], svs[:, 1], s=180, facecolors='#FFAAAA')
plt.plot(x0, decision_boundary, "k-", linewidth=2)
plt.plot(x0, gutter_up, "k--", linewidth=2)
plt.plot(x0, gutter_down, "k--", linewidth=2)
fig, axes = plt.subplots(ncols=2, figsize=(10,2.7), sharey=True)
plt.sca(axes[0])
plt.plot(x0, pred_1, "g--", linewidth=2)
plt.plot(x0, pred_2, "m-", linewidth=2)
plt.plot(x0, pred_3, "r-", linewidth=2)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris versicolor")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris setosa")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper left", fontsize=14)
plt.axis([0, 5.5, 0, 2])
plt.sca(axes[1])
plot_svc_decision_boundary(svm_clf, 0, 5.5)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo")
plt.xlabel("Petal length", fontsize=14)
plt.axis([0, 5.5, 0, 2])
save_fig("large_margin_classification_plot")
plt.show()
```
# Sensitivity to feature scales
```
Xs = np.array([[1, 50], [5, 20], [3, 80], [5, 60]]).astype(np.float64)
ys = np.array([0, 0, 1, 1])
svm_clf = SVC(kernel="linear", C=100)
svm_clf.fit(Xs, ys)
plt.figure(figsize=(9,2.7))
plt.subplot(121)
plt.plot(Xs[:, 0][ys==1], Xs[:, 1][ys==1], "bo")
plt.plot(Xs[:, 0][ys==0], Xs[:, 1][ys==0], "ms")
plot_svc_decision_boundary(svm_clf, 0, 6)
plt.xlabel("$x_0$", fontsize=20)
plt.ylabel("$x_1$ ", fontsize=20, rotation=0)
plt.title("Unscaled", fontsize=16)
plt.axis([0, 6, 0, 90])
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_scaled = scaler.fit_transform(Xs)
svm_clf.fit(X_scaled, ys)
plt.subplot(122)
plt.plot(X_scaled[:, 0][ys==1], X_scaled[:, 1][ys==1], "bo")
plt.plot(X_scaled[:, 0][ys==0], X_scaled[:, 1][ys==0], "ms")
plot_svc_decision_boundary(svm_clf, -2, 2)
plt.xlabel("$x'_0$", fontsize=20)
plt.ylabel("$x'_1$ ", fontsize=20, rotation=0)
plt.title("Scaled", fontsize=16)
plt.axis([-2, 2, -2, 2])
save_fig("sensitivity_to_feature_scales_plot")
```
# Sensitivity to outliers
```
X_outliers = np.array([[3.4, 1.3], [3.2, 0.8]])
y_outliers = np.array([0, 0])
Xo1 = np.concatenate([X, X_outliers[:1]], axis=0)
yo1 = np.concatenate([y, y_outliers[:1]], axis=0)
Xo2 = np.concatenate([X, X_outliers[1:]], axis=0)
yo2 = np.concatenate([y, y_outliers[1:]], axis=0)
svm_clf2 = SVC(kernel="linear", C=10**9)
svm_clf2.fit(Xo2, yo2)
fig, axes = plt.subplots(ncols=2, figsize=(10,2.7), sharey=True)
plt.sca(axes[0])
plt.plot(Xo1[:, 0][yo1==1], Xo1[:, 1][yo1==1], "bs")
plt.plot(Xo1[:, 0][yo1==0], Xo1[:, 1][yo1==0], "yo")
plt.text(0.3, 1.0, "Impossible!", fontsize=24, color="red")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.annotate("Outlier",
xy=(X_outliers[0][0], X_outliers[0][1]),
xytext=(2.5, 1.7),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=16,
)
plt.axis([0, 5.5, 0, 2])
plt.sca(axes[1])
plt.plot(Xo2[:, 0][yo2==1], Xo2[:, 1][yo2==1], "bs")
plt.plot(Xo2[:, 0][yo2==0], Xo2[:, 1][yo2==0], "yo")
plot_svc_decision_boundary(svm_clf2, 0, 5.5)
plt.xlabel("Petal length", fontsize=14)
plt.annotate("Outlier",
xy=(X_outliers[1][0], X_outliers[1][1]),
xytext=(3.2, 0.08),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=16,
)
plt.axis([0, 5.5, 0, 2])
save_fig("sensitivity_to_outliers_plot")
plt.show()
```
# Large margin *vs* margin violations
This is the first code example in chapter 5:
```
import numpy as np
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64) # Iris virginica
svm_clf = Pipeline([
("scaler", StandardScaler()),
("linear_svc", LinearSVC(C=1, loss="hinge", random_state=42)),
])
svm_clf.fit(X, y)
svm_clf.predict([[5.5, 1.7]])
```
Now let's generate the graph comparing different regularization settings:
```
scaler = StandardScaler()
svm_clf1 = LinearSVC(C=1, loss="hinge", random_state=42)
svm_clf2 = LinearSVC(C=100, loss="hinge", random_state=42)
scaled_svm_clf1 = Pipeline([
("scaler", scaler),
("linear_svc", svm_clf1),
])
scaled_svm_clf2 = Pipeline([
("scaler", scaler),
("linear_svc", svm_clf2),
])
scaled_svm_clf1.fit(X, y)
scaled_svm_clf2.fit(X, y)
# Convert to unscaled parameters
b1 = svm_clf1.decision_function([-scaler.mean_ / scaler.scale_])
b2 = svm_clf2.decision_function([-scaler.mean_ / scaler.scale_])
w1 = svm_clf1.coef_[0] / scaler.scale_
w2 = svm_clf2.coef_[0] / scaler.scale_
svm_clf1.intercept_ = np.array([b1])
svm_clf2.intercept_ = np.array([b2])
svm_clf1.coef_ = np.array([w1])
svm_clf2.coef_ = np.array([w2])
# Find support vectors (LinearSVC does not do this automatically)
t = y * 2 - 1
support_vectors_idx1 = (t * (X.dot(w1) + b1) < 1).ravel()
support_vectors_idx2 = (t * (X.dot(w2) + b2) < 1).ravel()
svm_clf1.support_vectors_ = X[support_vectors_idx1]
svm_clf2.support_vectors_ = X[support_vectors_idx2]
fig, axes = plt.subplots(ncols=2, figsize=(10,2.7), sharey=True)
plt.sca(axes[0])
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^", label="Iris virginica")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs", label="Iris versicolor")
plot_svc_decision_boundary(svm_clf1, 4, 5.9)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper left", fontsize=14)
plt.title("$C = {}$".format(svm_clf1.C), fontsize=16)
plt.axis([4, 5.9, 0.8, 2.8])
plt.sca(axes[1])
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plot_svc_decision_boundary(svm_clf2, 4, 5.99)
plt.xlabel("Petal length", fontsize=14)
plt.title("$C = {}$".format(svm_clf2.C), fontsize=16)
plt.axis([4, 5.9, 0.8, 2.8])
save_fig("regularization_plot")
```
# Non-linear classification
```
X1D = np.linspace(-4, 4, 9).reshape(-1, 1)
X2D = np.c_[X1D, X1D**2]
y = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0])
plt.figure(figsize=(10, 3))
plt.subplot(121)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.plot(X1D[:, 0][y==0], np.zeros(4), "bs")
plt.plot(X1D[:, 0][y==1], np.zeros(5), "g^")
plt.gca().get_yaxis().set_ticks([])
plt.xlabel(r"$x_1$", fontsize=20)
plt.axis([-4.5, 4.5, -0.2, 0.2])
plt.subplot(122)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot(X2D[:, 0][y==0], X2D[:, 1][y==0], "bs")
plt.plot(X2D[:, 0][y==1], X2D[:, 1][y==1], "g^")
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$ ", fontsize=20, rotation=0)
plt.gca().get_yaxis().set_ticks([0, 4, 8, 12, 16])
plt.plot([-4.5, 4.5], [6.5, 6.5], "r--", linewidth=3)
plt.axis([-4.5, 4.5, -1, 17])
plt.subplots_adjust(right=1)
save_fig("higher_dimensions_plot", tight_layout=False)
plt.show()
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=0.15, random_state=42)
def plot_dataset(X, y, axes):
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
plt.axis(axes)
plt.grid(True, which='both')
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.show()
from sklearn.datasets import make_moons
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
polynomial_svm_clf = Pipeline([
("poly_features", PolynomialFeatures(degree=3)),
("scaler", StandardScaler()),
("svm_clf", LinearSVC(C=10, loss="hinge", random_state=42))
])
polynomial_svm_clf.fit(X, y)
def plot_predictions(clf, axes):
x0s = np.linspace(axes[0], axes[1], 100)
x1s = np.linspace(axes[2], axes[3], 100)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
y_pred = clf.predict(X).reshape(x0.shape)
y_decision = clf.decision_function(X).reshape(x0.shape)
plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2)
plt.contourf(x0, x1, y_decision, cmap=plt.cm.brg, alpha=0.1)
plot_predictions(polynomial_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
save_fig("moons_polynomial_svc_plot")
plt.show()
from sklearn.svm import SVC
poly_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="poly", degree=3, coef0=1, C=5))
])
poly_kernel_svm_clf.fit(X, y)
poly100_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="poly", degree=10, coef0=100, C=5))
])
poly100_kernel_svm_clf.fit(X, y)
fig, axes = plt.subplots(ncols=2, figsize=(10.5, 4), sharey=True)
plt.sca(axes[0])
plot_predictions(poly_kernel_svm_clf, [-1.5, 2.45, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.4, -1, 1.5])
plt.title(r"$d=3, r=1, C=5$", fontsize=18)
plt.sca(axes[1])
plot_predictions(poly100_kernel_svm_clf, [-1.5, 2.45, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.4, -1, 1.5])
plt.title(r"$d=10, r=100, C=5$", fontsize=18)
plt.ylabel("")
save_fig("moons_kernelized_polynomial_svc_plot")
plt.show()
def gaussian_rbf(x, landmark, gamma):
return np.exp(-gamma * np.linalg.norm(x - landmark, axis=1)**2)
gamma = 0.3
x1s = np.linspace(-4.5, 4.5, 200).reshape(-1, 1)
x2s = gaussian_rbf(x1s, -2, gamma)
x3s = gaussian_rbf(x1s, 1, gamma)
XK = np.c_[gaussian_rbf(X1D, -2, gamma), gaussian_rbf(X1D, 1, gamma)]
yk = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0])
plt.figure(figsize=(10.5, 4))
plt.subplot(121)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.scatter(x=[-2, 1], y=[0, 0], s=150, alpha=0.5, c="red")
plt.plot(X1D[:, 0][yk==0], np.zeros(4), "bs")
plt.plot(X1D[:, 0][yk==1], np.zeros(5), "g^")
plt.plot(x1s, x2s, "g--")
plt.plot(x1s, x3s, "b:")
plt.gca().get_yaxis().set_ticks([0, 0.25, 0.5, 0.75, 1])
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"Similarity", fontsize=14)
plt.annotate(r'$\mathbf{x}$',
xy=(X1D[3, 0], 0),
xytext=(-0.5, 0.20),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=18,
)
plt.text(-2, 0.9, "$x_2$", ha="center", fontsize=20)
plt.text(1, 0.9, "$x_3$", ha="center", fontsize=20)
plt.axis([-4.5, 4.5, -0.1, 1.1])
plt.subplot(122)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot(XK[:, 0][yk==0], XK[:, 1][yk==0], "bs")
plt.plot(XK[:, 0][yk==1], XK[:, 1][yk==1], "g^")
plt.xlabel(r"$x_2$", fontsize=20)
plt.ylabel(r"$x_3$ ", fontsize=20, rotation=0)
plt.annotate(r'$\phi\left(\mathbf{x}\right)$',
xy=(XK[3, 0], XK[3, 1]),
xytext=(0.65, 0.50),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=18,
)
plt.plot([-0.1, 1.1], [0.57, -0.1], "r--", linewidth=3)
plt.axis([-0.1, 1.1, -0.1, 1.1])
plt.subplots_adjust(right=1)
save_fig("kernel_method_plot")
plt.show()
x1_example = X1D[3, 0]
for landmark in (-2, 1):
k = gaussian_rbf(np.array([[x1_example]]), np.array([[landmark]]), gamma)
print("Phi({}, {}) = {}".format(x1_example, landmark, k))
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=5, C=0.001))
])
rbf_kernel_svm_clf.fit(X, y)
from sklearn.svm import SVC
gamma1, gamma2 = 0.1, 5
C1, C2 = 0.001, 1000
hyperparams = (gamma1, C1), (gamma1, C2), (gamma2, C1), (gamma2, C2)
svm_clfs = []
for gamma, C in hyperparams:
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=gamma, C=C))
])
rbf_kernel_svm_clf.fit(X, y)
svm_clfs.append(rbf_kernel_svm_clf)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10.5, 7), sharex=True, sharey=True)
for i, svm_clf in enumerate(svm_clfs):
plt.sca(axes[i // 2, i % 2])
plot_predictions(svm_clf, [-1.5, 2.45, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.45, -1, 1.5])
gamma, C = hyperparams[i]
plt.title(r"$\gamma = {}, C = {}$".format(gamma, C), fontsize=16)
if i in (0, 1):
plt.xlabel("")
if i in (1, 3):
plt.ylabel("")
save_fig("moons_rbf_svc_plot")
plt.show()
```
# Regression
```
np.random.seed(42)
m = 50
X = 2 * np.random.rand(m, 1)
y = (4 + 3 * X + np.random.randn(m, 1)).ravel()
from sklearn.svm import LinearSVR
svm_reg = LinearSVR(epsilon=1.5, random_state=42)
svm_reg.fit(X, y)
svm_reg1 = LinearSVR(epsilon=1.5, random_state=42)
svm_reg2 = LinearSVR(epsilon=0.5, random_state=42)
svm_reg1.fit(X, y)
svm_reg2.fit(X, y)
def find_support_vectors(svm_reg, X, y):
y_pred = svm_reg.predict(X)
off_margin = (np.abs(y - y_pred) >= svm_reg.epsilon)
return np.argwhere(off_margin)
svm_reg1.support_ = find_support_vectors(svm_reg1, X, y)
svm_reg2.support_ = find_support_vectors(svm_reg2, X, y)
eps_x1 = 1
eps_y_pred = svm_reg1.predict([[eps_x1]])
def plot_svm_regression(svm_reg, X, y, axes):
x1s = np.linspace(axes[0], axes[1], 100).reshape(100, 1)
y_pred = svm_reg.predict(x1s)
plt.plot(x1s, y_pred, "k-", linewidth=2, label=r"$\hat{y}$")
plt.plot(x1s, y_pred + svm_reg.epsilon, "k--")
plt.plot(x1s, y_pred - svm_reg.epsilon, "k--")
plt.scatter(X[svm_reg.support_], y[svm_reg.support_], s=180, facecolors='#FFAAAA')
plt.plot(X, y, "bo")
plt.xlabel(r"$x_1$", fontsize=18)
plt.legend(loc="upper left", fontsize=18)
plt.axis(axes)
fig, axes = plt.subplots(ncols=2, figsize=(9, 4), sharey=True)
plt.sca(axes[0])
plot_svm_regression(svm_reg1, X, y, [0, 2, 3, 11])
plt.title(r"$\epsilon = {}$".format(svm_reg1.epsilon), fontsize=18)
plt.ylabel(r"$y$", fontsize=18, rotation=0)
#plt.plot([eps_x1, eps_x1], [eps_y_pred, eps_y_pred - svm_reg1.epsilon], "k-", linewidth=2)
plt.annotate(
'', xy=(eps_x1, eps_y_pred), xycoords='data',
xytext=(eps_x1, eps_y_pred - svm_reg1.epsilon),
textcoords='data', arrowprops={'arrowstyle': '<->', 'linewidth': 1.5}
)
plt.text(0.91, 5.6, r"$\epsilon$", fontsize=20)
plt.sca(axes[1])
plot_svm_regression(svm_reg2, X, y, [0, 2, 3, 11])
plt.title(r"$\epsilon = {}$".format(svm_reg2.epsilon), fontsize=18)
save_fig("svm_regression_plot")
plt.show()
np.random.seed(42)
m = 100
X = 2 * np.random.rand(m, 1) - 1
y = (0.2 + 0.1 * X + 0.5 * X**2 + np.random.randn(m, 1)/10).ravel()
```
**Note**: to be future-proof, we set `gamma="scale"`, as this will be the default value in Scikit-Learn 0.22.
```
from sklearn.svm import SVR
svm_poly_reg = SVR(kernel="poly", degree=2, C=100, epsilon=0.1, gamma="scale")
svm_poly_reg.fit(X, y)
from sklearn.svm import SVR
svm_poly_reg1 = SVR(kernel="poly", degree=2, C=100, epsilon=0.1, gamma="scale")
svm_poly_reg2 = SVR(kernel="poly", degree=2, C=0.01, epsilon=0.1, gamma="scale")
svm_poly_reg1.fit(X, y)
svm_poly_reg2.fit(X, y)
fig, axes = plt.subplots(ncols=2, figsize=(9, 4), sharey=True)
plt.sca(axes[0])
plot_svm_regression(svm_poly_reg1, X, y, [-1, 1, 0, 1])
plt.title(r"$degree={}, C={}, \epsilon = {}$".format(svm_poly_reg1.degree, svm_poly_reg1.C, svm_poly_reg1.epsilon), fontsize=18)
plt.ylabel(r"$y$", fontsize=18, rotation=0)
plt.sca(axes[1])
plot_svm_regression(svm_poly_reg2, X, y, [-1, 1, 0, 1])
plt.title(r"$degree={}, C={}, \epsilon = {}$".format(svm_poly_reg2.degree, svm_poly_reg2.C, svm_poly_reg2.epsilon), fontsize=18)
save_fig("svm_with_polynomial_kernel_plot")
plt.show()
```
# Under the hood
```
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64) # Iris virginica
from mpl_toolkits.mplot3d import Axes3D
def plot_3D_decision_function(ax, w, b, x1_lim=[4, 6], x2_lim=[0.8, 2.8]):
x1_in_bounds = (X[:, 0] > x1_lim[0]) & (X[:, 0] < x1_lim[1])
X_crop = X[x1_in_bounds]
y_crop = y[x1_in_bounds]
x1s = np.linspace(x1_lim[0], x1_lim[1], 20)
x2s = np.linspace(x2_lim[0], x2_lim[1], 20)
x1, x2 = np.meshgrid(x1s, x2s)
xs = np.c_[x1.ravel(), x2.ravel()]
df = (xs.dot(w) + b).reshape(x1.shape)
m = 1 / np.linalg.norm(w)
boundary_x2s = -x1s*(w[0]/w[1])-b/w[1]
margin_x2s_1 = -x1s*(w[0]/w[1])-(b-1)/w[1]
margin_x2s_2 = -x1s*(w[0]/w[1])-(b+1)/w[1]
ax.plot_surface(x1s, x2, np.zeros_like(x1),
color="b", alpha=0.2, cstride=100, rstride=100)
ax.plot(x1s, boundary_x2s, 0, "k-", linewidth=2, label=r"$h=0$")
ax.plot(x1s, margin_x2s_1, 0, "k--", linewidth=2, label=r"$h=\pm 1$")
ax.plot(x1s, margin_x2s_2, 0, "k--", linewidth=2)
ax.plot(X_crop[:, 0][y_crop==1], X_crop[:, 1][y_crop==1], 0, "g^")
ax.plot_wireframe(x1, x2, df, alpha=0.3, color="k")
ax.plot(X_crop[:, 0][y_crop==0], X_crop[:, 1][y_crop==0], 0, "bs")
ax.axis(x1_lim + x2_lim)
ax.text(4.5, 2.5, 3.8, "Decision function $h$", fontsize=16)
ax.set_xlabel(r"Petal length", fontsize=16, labelpad=10)
ax.set_ylabel(r"Petal width", fontsize=16, labelpad=10)
ax.set_zlabel(r"$h = \mathbf{w}^T \mathbf{x} + b$", fontsize=18, labelpad=5)
ax.legend(loc="upper left", fontsize=16)
fig = plt.figure(figsize=(11, 6))
ax1 = fig.add_subplot(111, projection='3d')
plot_3D_decision_function(ax1, w=svm_clf2.coef_[0], b=svm_clf2.intercept_[0])
save_fig("iris_3D_plot")
plt.show()
```
# Small weight vector results in a large margin
```
def plot_2D_decision_function(w, b, ylabel=True, x1_lim=[-3, 3]):
x1 = np.linspace(x1_lim[0], x1_lim[1], 200)
y = w * x1 + b
m = 1 / w
plt.plot(x1, y)
plt.plot(x1_lim, [1, 1], "k:")
plt.plot(x1_lim, [-1, -1], "k:")
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot([m, m], [0, 1], "k--")
plt.plot([-m, -m], [0, -1], "k--")
plt.plot([-m, m], [0, 0], "k-o", linewidth=3)
plt.axis(x1_lim + [-2, 2])
plt.xlabel(r"$x_1$", fontsize=16)
if ylabel:
plt.ylabel(r"$w_1 x_1$ ", rotation=0, fontsize=16)
plt.title(r"$w_1 = {}$".format(w), fontsize=16)
fig, axes = plt.subplots(ncols=2, figsize=(9, 3.2), sharey=True)
plt.sca(axes[0])
plot_2D_decision_function(1, 0)
plt.sca(axes[1])
plot_2D_decision_function(0.5, 0, ylabel=False)
save_fig("small_w_large_margin_plot")
plt.show()
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64) # Iris virginica
svm_clf = SVC(kernel="linear", C=1)
svm_clf.fit(X, y)
svm_clf.predict([[5.3, 1.3]])
```
# Hinge loss
```
t = np.linspace(-2, 4, 200)
h = np.where(1 - t < 0, 0, 1 - t) # max(0, 1-t)
plt.figure(figsize=(5,2.8))
plt.plot(t, h, "b-", linewidth=2, label="$max(0, 1 - t)$")
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.yticks(np.arange(-1, 2.5, 1))
plt.xlabel("$t$", fontsize=16)
plt.axis([-2, 4, -1, 2.5])
plt.legend(loc="upper right", fontsize=16)
save_fig("hinge_plot")
plt.show()
```
# Extra material
## Training time
```
X, y = make_moons(n_samples=1000, noise=0.4, random_state=42)
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
import time
tol = 0.1
tols = []
times = []
for i in range(10):
svm_clf = SVC(kernel="poly", gamma=3, C=10, tol=tol, verbose=1)
t1 = time.time()
svm_clf.fit(X, y)
t2 = time.time()
times.append(t2-t1)
tols.append(tol)
print(i, tol, t2-t1)
tol /= 10
plt.semilogx(tols, times, "bo-")
plt.xlabel("Tolerance", fontsize=16)
plt.ylabel("Time (seconds)", fontsize=16)
plt.grid(True)
plt.show()
```
## Linear SVM classifier implementation using Batch Gradient Descent
```
# Training set
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64).reshape(-1, 1) # Iris virginica
from sklearn.base import BaseEstimator
class MyLinearSVC(BaseEstimator):
def __init__(self, C=1, eta0=1, eta_d=10000, n_epochs=1000, random_state=None):
self.C = C
self.eta0 = eta0
self.n_epochs = n_epochs
self.random_state = random_state
self.eta_d = eta_d
def eta(self, epoch):
return self.eta0 / (epoch + self.eta_d)
def fit(self, X, y):
# Random initialization
if self.random_state:
np.random.seed(self.random_state)
w = np.random.randn(X.shape[1], 1) # n feature weights
b = 0
m = len(X)
t = y * 2 - 1 # -1 if t==0, +1 if t==1
X_t = X * t
self.Js=[]
# Training
for epoch in range(self.n_epochs):
support_vectors_idx = (X_t.dot(w) + t * b < 1).ravel()
X_t_sv = X_t[support_vectors_idx]
t_sv = t[support_vectors_idx]
J = 1/2 * np.sum(w * w) + self.C * (np.sum(1 - X_t_sv.dot(w)) - b * np.sum(t_sv))
self.Js.append(J)
w_gradient_vector = w - self.C * np.sum(X_t_sv, axis=0).reshape(-1, 1)
b_derivative = -self.C * np.sum(t_sv)
w = w - self.eta(epoch) * w_gradient_vector
b = b - self.eta(epoch) * b_derivative
self.intercept_ = np.array([b])
self.coef_ = np.array([w])
support_vectors_idx = (X_t.dot(w) + t * b < 1).ravel()
self.support_vectors_ = X[support_vectors_idx]
return self
def decision_function(self, X):
return X.dot(self.coef_[0]) + self.intercept_[0]
def predict(self, X):
return (self.decision_function(X) >= 0).astype(np.float64)
C=2
svm_clf = MyLinearSVC(C=C, eta0 = 10, eta_d = 1000, n_epochs=60000, random_state=2)
svm_clf.fit(X, y)
svm_clf.predict(np.array([[5, 2], [4, 1]]))
plt.plot(range(svm_clf.n_epochs), svm_clf.Js)
plt.axis([0, svm_clf.n_epochs, 0, 100])
print(svm_clf.intercept_, svm_clf.coef_)
svm_clf2 = SVC(kernel="linear", C=C)
svm_clf2.fit(X, y.ravel())
print(svm_clf2.intercept_, svm_clf2.coef_)
yr = y.ravel()
fig, axes = plt.subplots(ncols=2, figsize=(11, 3.2), sharey=True)
plt.sca(axes[0])
plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^", label="Iris virginica")
plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs", label="Not Iris virginica")
plot_svc_decision_boundary(svm_clf, 4, 6)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.title("MyLinearSVC", fontsize=14)
plt.axis([4, 6, 0.8, 2.8])
plt.legend(loc="upper left")
plt.sca(axes[1])
plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^")
plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs")
plot_svc_decision_boundary(svm_clf2, 4, 6)
plt.xlabel("Petal length", fontsize=14)
plt.title("SVC", fontsize=14)
plt.axis([4, 6, 0.8, 2.8])
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(loss="hinge", alpha=0.017, max_iter=1000, tol=1e-3, random_state=42)
sgd_clf.fit(X, y.ravel())
m = len(X)
t = y * 2 - 1 # -1 if t==0, +1 if t==1
X_b = np.c_[np.ones((m, 1)), X] # Add bias input x0=1
X_b_t = X_b * t
sgd_theta = np.r_[sgd_clf.intercept_[0], sgd_clf.coef_[0]]
print(sgd_theta)
support_vectors_idx = (X_b_t.dot(sgd_theta) < 1).ravel()
sgd_clf.support_vectors_ = X[support_vectors_idx]
sgd_clf.C = C
plt.figure(figsize=(5.5,3.2))
plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^")
plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs")
plot_svc_decision_boundary(sgd_clf, 4, 6)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.title("SGDClassifier", fontsize=14)
plt.axis([4, 6, 0.8, 2.8])
```
# Exercise solutions
## 1. to 7.
See appendix A.
# 8.
_Exercise: train a `LinearSVC` on a linearly separable dataset. Then train an `SVC` and a `SGDClassifier` on the same dataset. See if you can get them to produce roughly the same model._
Let's use the Iris dataset: the Iris Setosa and Iris Versicolor classes are linearly separable.
```
from sklearn import datasets
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
setosa_or_versicolor = (y == 0) | (y == 1)
X = X[setosa_or_versicolor]
y = y[setosa_or_versicolor]
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
C = 5
alpha = 1 / (C * len(X))
lin_clf = LinearSVC(loss="hinge", C=C, random_state=42)
svm_clf = SVC(kernel="linear", C=C)
sgd_clf = SGDClassifier(loss="hinge", learning_rate="constant", eta0=0.001, alpha=alpha,
max_iter=1000, tol=1e-3, random_state=42)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
lin_clf.fit(X_scaled, y)
svm_clf.fit(X_scaled, y)
sgd_clf.fit(X_scaled, y)
print("LinearSVC: ", lin_clf.intercept_, lin_clf.coef_)
print("SVC: ", svm_clf.intercept_, svm_clf.coef_)
print("SGDClassifier(alpha={:.5f}):".format(sgd_clf.alpha), sgd_clf.intercept_, sgd_clf.coef_)
```
Let's plot the decision boundaries of these three models:
```
# Compute the slope and bias of each decision boundary
w1 = -lin_clf.coef_[0, 0]/lin_clf.coef_[0, 1]
b1 = -lin_clf.intercept_[0]/lin_clf.coef_[0, 1]
w2 = -svm_clf.coef_[0, 0]/svm_clf.coef_[0, 1]
b2 = -svm_clf.intercept_[0]/svm_clf.coef_[0, 1]
w3 = -sgd_clf.coef_[0, 0]/sgd_clf.coef_[0, 1]
b3 = -sgd_clf.intercept_[0]/sgd_clf.coef_[0, 1]
# Transform the decision boundary lines back to the original scale
line1 = scaler.inverse_transform([[-10, -10 * w1 + b1], [10, 10 * w1 + b1]])
line2 = scaler.inverse_transform([[-10, -10 * w2 + b2], [10, 10 * w2 + b2]])
line3 = scaler.inverse_transform([[-10, -10 * w3 + b3], [10, 10 * w3 + b3]])
# Plot all three decision boundaries
plt.figure(figsize=(11, 4))
plt.plot(line1[:, 0], line1[:, 1], "k:", label="LinearSVC")
plt.plot(line2[:, 0], line2[:, 1], "b--", linewidth=2, label="SVC")
plt.plot(line3[:, 0], line3[:, 1], "r-", label="SGDClassifier")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs") # label="Iris versicolor"
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo") # label="Iris setosa"
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper center", fontsize=14)
plt.axis([0, 5.5, 0, 2])
plt.show()
```
Close enough!
# 9.
_Exercise: train an SVM classifier on the MNIST dataset. Since SVM classifiers are binary classifiers, you will need to use one-versus-all to classify all 10 digits. You may want to tune the hyperparameters using small validation sets to speed up the process. What accuracy can you reach?_
First, let's load the dataset and split it into a training set and a test set. We could use `train_test_split()` but people usually just take the first 60,000 instances for the training set, and the last 10,000 instances for the test set (this makes it possible to compare your model's performance with others):
**Warning:** since Scikit-Learn 0.24, `fetch_openml()` returns a Pandas `DataFrame` by default. To avoid this, we use `as_frame=False`.
```
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1, cache=True, as_frame=False)
X = mnist["data"]
y = mnist["target"].astype(np.uint8)
X_train = X[:60000]
y_train = y[:60000]
X_test = X[60000:]
y_test = y[60000:]
```
Many training algorithms are sensitive to the order of the training instances, so it's generally good practice to shuffle them first. However, the dataset is already shuffled, so we do not need to do it.
Let's start simple, with a linear SVM classifier. It will automatically use the One-vs-All (also called One-vs-the-Rest, OvR) strategy, so there's nothing special we need to do. Easy!
**Warning**: this may take a few minutes depending on your hardware.
```
lin_clf = LinearSVC(random_state=42)
lin_clf.fit(X_train, y_train)
```
Let's make predictions on the training set and measure the accuracy (we don't want to measure it on the test set yet, since we have not selected and trained the final model yet):
```
from sklearn.metrics import accuracy_score
y_pred = lin_clf.predict(X_train)
accuracy_score(y_train, y_pred)
```
Okay, 89.5% accuracy on MNIST is pretty bad. This linear model is certainly too simple for MNIST, but perhaps we just needed to scale the data first:
```
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float32))
X_test_scaled = scaler.transform(X_test.astype(np.float32))
```
**Warning**: this may take a few minutes depending on your hardware.
```
lin_clf = LinearSVC(random_state=42)
lin_clf.fit(X_train_scaled, y_train)
y_pred = lin_clf.predict(X_train_scaled)
accuracy_score(y_train, y_pred)
```
That's much better (we cut the error rate by about 25%), but still not great at all for MNIST. If we want to use an SVM, we will have to use a kernel. Let's try an `SVC` with an RBF kernel (the default).
**Note**: to be future-proof we set `gamma="scale"` since it will be the default value in Scikit-Learn 0.22.
```
svm_clf = SVC(gamma="scale")
svm_clf.fit(X_train_scaled[:10000], y_train[:10000])
y_pred = svm_clf.predict(X_train_scaled)
accuracy_score(y_train, y_pred)
```
That's promising, we get better performance even though we trained the model on 6 times less data. Let's tune the hyperparameters by doing a randomized search with cross validation. We will do this on a small dataset just to speed up the process:
```
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 10)}
rnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2, cv=3)
rnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000])
rnd_search_cv.best_estimator_
rnd_search_cv.best_score_
```
This looks pretty low but remember we only trained the model on 1,000 instances. Let's retrain the best estimator on the whole training set:
**Warning**: the following cell may take hours to run, depending on your hardware.
```
rnd_search_cv.best_estimator_.fit(X_train_scaled, y_train)
y_pred = rnd_search_cv.best_estimator_.predict(X_train_scaled)
accuracy_score(y_train, y_pred)
```
Ah, this looks good! Let's select this model. Now we can test it on the test set:
```
y_pred = rnd_search_cv.best_estimator_.predict(X_test_scaled)
accuracy_score(y_test, y_pred)
```
Not too bad, but apparently the model is overfitting slightly. It's tempting to tweak the hyperparameters a bit more (e.g. decreasing `C` and/or `gamma`), but we would run the risk of overfitting the test set. Other people have found that the hyperparameters `C=5` and `gamma=0.005` yield even better performance (over 98% accuracy). By running the randomized search for longer and on a larger part of the training set, you may be able to find this as well.
## 10.
_Exercise: train an SVM regressor on the California housing dataset._
Let's load the dataset using Scikit-Learn's `fetch_california_housing()` function:
```
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
X = housing["data"]
y = housing["target"]
```
Split it into a training set and a test set:
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
```
Don't forget to scale the data:
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
```
Let's train a simple `LinearSVR` first:
```
from sklearn.svm import LinearSVR
lin_svr = LinearSVR(random_state=42)
lin_svr.fit(X_train_scaled, y_train)
```
Let's see how it performs on the training set:
```
from sklearn.metrics import mean_squared_error
y_pred = lin_svr.predict(X_train_scaled)
mse = mean_squared_error(y_train, y_pred)
mse
```
Let's look at the RMSE:
```
np.sqrt(mse)
```
In this training set, the targets are tens of thousands of dollars. The RMSE gives a rough idea of the kind of error you should expect (with a higher weight for large errors): so with this model we can expect errors somewhere around $10,000. Not great. Let's see if we can do better with an RBF Kernel. We will use randomized search with cross validation to find the appropriate hyperparameter values for `C` and `gamma`:
```
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 10)}
rnd_search_cv = RandomizedSearchCV(SVR(), param_distributions, n_iter=10, verbose=2, cv=3, random_state=42)
rnd_search_cv.fit(X_train_scaled, y_train)
rnd_search_cv.best_estimator_
```
Now let's measure the RMSE on the training set:
```
y_pred = rnd_search_cv.best_estimator_.predict(X_train_scaled)
mse = mean_squared_error(y_train, y_pred)
np.sqrt(mse)
```
Looks much better than the linear model. Let's select this model and evaluate it on the test set:
```
y_pred = rnd_search_cv.best_estimator_.predict(X_test_scaled)
mse = mean_squared_error(y_test, y_pred)
np.sqrt(mse)
```
| github_jupyter |
[](https://www.pythonista.io)
# Cliente de la API con requests.
En esta notebook se encuentra el código de un cliente capaz de consumir los servicios de los servidores creado en este curso.
Es necesario que el servidor en la notebook se encuentre en ejecución.
```
!pip install requests PyYAML
from requests import put, get, post, delete, patch
import yaml
# host="http://localhost:5000"
host = "https://py221-2111.uc.r.appspot.com"
```
## Acceso a la raíz de la API.
Regresará el listado completo de la base de datos en formato JSON.
```
with get(f'{host}/api/') as r:
print(r.url)
print(r.status_code)
if r.headers['Content-Type'] == 'application/json':
print(r.json())
else:
print("Sin contenido JSON.")
```
## Búsqueda por número de cuenta mediante GET.
* Regresará los datos en formato JSON del registro cuyo campo 'Cuenta' coincida con el número que se ingrese en la ruta.
* En caso de que no exista un registro con ese número de cuenta, regresará un mensaje 404.
```
with get(f'{host}/api/1231267') as r:
print(r.url)
print(r.status_code)
if r.headers['Content-Type'] == 'application/json':
print(r.json())
else:
print("Sin contenido JSON.")
```
## Creación de un nuevo registro mediante POST.
* Creará un nuevo registro con la estructura de datos enviada en caso de que no exista un registro cuyo contenido del campo 'Cuenta' coincida con el numero ingresado en la URL y regresará los datos completos de dicho registro en formato JSON.
* En caso de que exista un registro cuyo contenido del campo 'Cuenta' coincida con el numero ingresado en la URL, regresará un mensaje 409.
* En caso de que los datos no sean correctos, estén incompletos o no se apeguen a la estructura de datos, regresará un mensaje 400.
```
data ={'al_corriente': True,
'carrera': 'Sistemas',
'nombre': 'Laura',
'primer_apellido': 'Robles',
'promedio': 9.2,
'semestre': 1}
with post(f'{host}/api/1231268', json=data) as r:
print(r.url)
print(r.status_code)
if r.headers['Content-Type'] == 'application/json':
print(r.json())
else:
print("Sin contenido JSON.")
```
## Sustitución de un registro existente mediante PUT.
* Sustituirá por completo un registro cuyo contenido del campo 'Cuenta' coincida con el numero ingresado en la URL con los datos enviados y regresará los datos completos del nuevo registro en formato JSON.
* En caso de que no exista un registro cuyo contenido del campo 'Cuenta' coincida con el numero ingresado en la URL, regresará un mensaje 404.
* En caso de que los datos no sean correctos, no estén completos o no se apeguen a la estructura de datos, regresará un mensaje 400.
```
data = {'al_corriente': True,
'carrera': 'Sistemas',
'nombre': 'Laura',
'primer_apellido': 'Robles',
'segundo_apellido': 'Sánchez',
'promedio': 10,
'semestre': 2}
with put(f'{host}/api/1231268', json=data) as r:
print(r.url)
print(r.status_code)
if r.headers['Content-Type'] == 'application/json':
print(r.json())
else:
print("Sin contenido JSON.")
```
## Enmienda de un registro existente con el método ```PATCH```.
```
data = {'al_corriente': True,
'semestre': 10}
with patch(f'{host}/api/1231268', json=data) as r:
print(r.url)
print(r.status_code)
if r.headers['Content-Type'] == 'application/json':
print(r.json())
else:
print("Sin contenido JSON.")
```
## Eliminación de un registro existente mediante DELETE.
* Eliminará un registro cuyo contenido del campo 'Cuenta' coincida con el numero ingresado en la URL y regresará los datos completos del registro eliminado en formato JSON.
* En caso de que no exista un registro cuyo contenido del campo 'Cuenta' coincida con el numero ingresado en la URL, regresará un mensaje 404.
```
with delete(f'{host}/api/1231268') as r:
print(r.url)
print(r.status_code)
if r.headers['Content-Type'] == 'application/json':
print(r.json())
else:
print("Sin contenido JSON.")
```
## La documentación de *Swagger*.
```
with get('f'{host}/swagger/') as r:
print(r.url)
print(r.status_code)
if r.headers['Content-Type'] == 'application/json':
print(yaml.dump(r.json()))
else:
print("Sin contenido JSON.")
```
<p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
<p style="text-align: center">© José Luis Chiquete Valdivieso. 2022.</p>
| github_jupyter |
**Estimación puntual**
```
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import random
import math
np.random.seed(2020)
population_ages_1 = stats.poisson.rvs(loc = 18, mu = 35, size = 1500000)
population_ages_2 = stats.poisson.rvs(loc = 18, mu = 10, size = 1000000)
population_ages = np.concatenate((population_ages_1, population_ages_2))
print(population_ages_1.mean())
print(population_ages_2.mean())
print(population_ages.mean())
pd.DataFrame(population_ages).hist(bins = 60, range = (17.5, 77.5), figsize = (10,10))
stats.skew(population_ages)
stats.kurtosis(population_ages)
np.random.seed(42)
sample_ages = np.random.choice(population_ages, 500)
print(sample_ages.mean())
population_ages.mean() - sample_ages.mean()
population_races = (["blanca"]*1000000) + (["negra"]*500000) + (["hispana"]*500000) + (["asiatica"]*250000) + (["otros"]*250000)
for race in set(population_races):
print("Proporción de "+race)
print(population_races.count(race) / 2500000)
random.seed(31)
race_sample = random.sample(population_races, 1000)
for race in set(race_sample):
print("Proporción de "+race)
print(race_sample.count(race) / 1000)
pd.DataFrame(population_ages).hist(bins = 60, range = (17.5, 77.5), figsize = (10,10))
pd.DataFrame(sample_ages).hist(bins = 60, range = (17.5, 77.5), figsize = (10,10))
np.random.sample(1988)
point_estimates = []
for x in range(200):
sample = np.random.choice(population_ages, size = 500)
point_estimates.append(sample.mean())
pd.DataFrame(point_estimates).plot(kind = "density", figsize = (9,9), xlim = (40, 46) )
np.array(point_estimates).mean()
```
**Si conocemos la desviación típica**
```
np.random.seed(10)
n = 1000
alpha = 0.05
sample = np.random.choice(population_ages, size = n)
sample_mean = sample.mean()
z_critical = stats.norm.ppf(q = 1-alpha/2)
sigma = population_ages.std()## sigma de la población
sample_error = z_critical * sigma / math.sqrt(n)
ci = (sample_mean - sample_error, sample_mean + sample_error)
ci
np.random.seed(10)
n = 1000
alpha = 0.05
intervals = []
sample_means = []
z_critical = stats.norm.ppf(q = 1-alpha/2)
sigma = population_ages.std()## sigma de la población
sample_error = z_critical * sigma / math.sqrt(n)
for sample in range(100):
sample = np.random.choice(population_ages, size = n)
sample_mean = sample.mean()
sample_means.append(sample_mean)
ci = (sample_mean - sample_error, sample_mean + sample_error)
intervals.append(ci)
plt.figure(figsize=(10,10))
plt.errorbar(x = np.arange(0.1, 100, 1), y = sample_means, yerr=[(top-bottom)/2 for top, bottom in intervals], fmt='o')
plt.hlines(xmin = 0, xmax = 100, y = population_ages.mean(), linewidth=2.0, color="red")
```
**Si la desviación típica no es conocida...**
```
np.random.seed(10)
n = 25
alpha = 0.05
sample = np.random.choice(population_ages, size = n)
sample_mean = sample.mean()
t_critical = stats.t.ppf(q = 1-alpha/2, df = n-1)
sample_sd = sample.std(ddof=1)## desviación estándar de la muestra
sample_error = t_critical * sample_sd / math.sqrt(n)
ci = (sample_mean - sample_error, sample_mean + sample_error)
ci
stats.t.ppf(q = 1-alpha, df = n-1) - stats.norm.ppf(1-alpha)
stats.t.ppf(q = 1-alpha, df = 999) - stats.norm.ppf(1-alpha)
stats.t.interval(alpha = 0.95, df = 24, loc = sample_mean, scale = sample_sd/math.sqrt(n))
```
**Intervalo para la proporción poblacional**
```
alpha = 0.05
n = 1000
z_critical = stats.norm.ppf(q=1-alpha/2)
p_hat = race_sample.count("blanca") / n
sample_error = z_critical * math.sqrt((p_hat*(1-p_hat)/n))
ci = (p_hat - sample_error, p_hat + sample_error)
ci
stats.norm.interval(alpha = 0.95, loc = p_hat, scale = math.sqrt(p_hat*(1-p_hat)/n))
```
**Cómo interpretar el intervalo de confianza**
```
shape, scale = 2.0, 2.0 #mean = 4, std = 2*sqrt(2)
s = np.random.gamma(shape, scale, 1000000)
mu = shape*scale
sigma = scale*np.sqrt(shape)
print(mu)
print(sigma)
meansample = []
sample_size = 500
for i in range(0,50000):
sample = random.choices(s, k=sample_size)
meansample.append(sum(sample)/len(sample))
plt.figure(figsize=(20,10))
plt.hist(meansample, 200, density=True, color="lightblue")
plt.show()
plt.figure(figsize=(20,10))
plt.hist(meansample, 200, density=True, color="lightblue")
plt.plot([mu,mu], [0, 3.5], 'k-', lw=4, color='green')
plt.plot([mu-1.96*sigma/np.sqrt(sample_size), mu-1.96*sigma/np.sqrt(sample_size)], [0, 3.5], 'k-', lw=2, color="navy")
plt.plot([mu+1.96*sigma/np.sqrt(sample_size), mu+1.96*sigma/np.sqrt(sample_size)], [0, 3.5], 'k-', lw=2, color="navy")
plt.show()
sample_data = np.random.choice(s, size = sample_size)
x_bar = sample_data.mean()
ss = sample_data.std()
plt.figure(figsize=(20,10))
plt.hist(meansample, 200, density=True, color="lightblue")
plt.plot([mu,mu], [0, 3.5], 'k-', lw=4, color='green')
plt.plot([mu-1.96*sigma/np.sqrt(sample_size), mu-1.96*sigma/np.sqrt(sample_size)], [0, 3.5], 'k-', lw=2, color="navy")
plt.plot([mu+1.96*sigma/np.sqrt(sample_size), mu+1.96*sigma/np.sqrt(sample_size)], [0, 3.5], 'k-', lw=2, color="navy")
plt.plot([x_bar, x_bar], [0,3.5], 'k-', lw=2, color="red")
plt.plot([x_bar-1.96*ss/np.sqrt(sample_size), x_bar-1.96*ss/np.sqrt(sample_size)], [0, 3.5], 'k-', lw=1, color="red")
plt.plot([x_bar+1.96*ss/np.sqrt(sample_size), x_bar+1.96*ss/np.sqrt(sample_size)], [0, 3.5], 'k-', lw=1, color="red")
plt.gca().add_patch(plt.Rectangle((x_bar-1.96*ss/np.sqrt(sample_size), 0), 2*(1.96*ss/np.sqrt(sample_size)), 3.5, fill=True, fc=(0.9, 0.1, 0.1, 0.15)))
plt.show()
interval_list = []
z_critical = 1.96 #z_0.975
sample_size = 5000
c = 0
error = z_critical*sigma/np.sqrt(sample_size)
for i in range(0,100):
rs = random.choices(s, k=sample_size)
mean = np.mean(rs)
ub = mean + error
lb = mean - error
interval_list.append([lb, mean, ub])
if ub >= mu and lb <= mu:
c += 1
c
print("Número de intervalos de confianza que contienen el valor real de mu: ",c)
plt.figure(figsize = (20, 10))
plt.boxplot(interval_list)
plt.plot([1,100], [mu, mu], 'k-', lw=2, color="red")
plt.show()
```
| github_jupyter |
```
from urllib.request import urlopen
html=urlopen('http://pythonscraping.com/pages/page1.html')
print(html.read())
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
bs = BeautifulSoup(html.read())
print(bs)
print(bs.h1)
print(bs.html.body.h1)
print(bs.body.h1)
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
# lxml Has more advantage When parsing messy HTML < NO body / head section . unclosed tags. More faster
bs = BeautifulSoup(html.read(),'lxml')
print(bs)
```
## Sometimes we need to understand what is really going on when scraping and we need to done all those stuff by our hand
```
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
# 2 things could go wrong
# page Not Found
# Server is not found
print(html)
# To handle the error we do this
from urllib.error import HTTPError
from urllib.error import URLError
try:
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
except HTTPError as e:
print(e)
except URLError as Ur:
print("Server could not be found")
else :
bs = BeautifulSoup(html.read(),"lxml")
try:
badContent = bs.nonExistingTag.h1
except AttributeError as ae :
print("Tag Was not Found")
else :
print(badContent)
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
def getTitle(url):
try:
html = urlopen(url)
except HTTPError as e:
return None
try:
bs = BeautifulSoup(html.read(), 'lxml')
title = bs.body.h1
except AttributeError as ae:
return None
return title
title = getTitle("http://www.pythonscraping.com/pages/page1.html")
if (title == None):
print("Title Could not found")
else :
print(title)
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
bs = BeautifulSoup(html, "lxml")
print(bs)
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com/pages/warandpeace.html")
bs = BeautifulSoup(html, "lxml")
nameListGreen = bs.findAll("span", {"class" : ["green"]})
storyListRed = bs.findAll("span" , {"class" : ["red"]})
for name in nameListGreen:
print(name.get_text())
for name in storyListRed:
print(name.get_text())
alltags = set(tag.name for tag in bs.findAll())
print(alltags)
random = bs.findAll(["body","div"])
for r in random:
print(r.getText()) # Without Tags
print(r) # With Tags
break
hlist = bs.findAll(["h1","h2"])
for h in hlist:
print(h)
hList = bs.findAll(["h1","h2","h3","h4","h5","h6"])
for h in hList:
print(h)
nameList =bs.findAll(["span"],text="the prince")
for name in nameList:
print(name)
nameList = bs.find(class_="red")
nameList2 = bs.findAll("span",{"class":"green"})
print(nameList)
print(nameList2)
```
Objects on Beautiful soup
- BeautifulSoup
- Tag Objects List or Single value depends on (findAll and find)
- NavigableString objs < Represent text within tags >
- Comment obj < Represent the Comments>
```
# Navigating Treess
html = urlopen("http://www.pythonscraping.com/pages/page3.html")
bs = BeautifulSoup(html,"lxml")
print(bs.html.head)
```
In Beautiful Soup all treated as descendant , ya intinya descendant tuh kek Cucu dan seterunya lah
example :
ada ayah namanya badang
anaknya rino
rino nikah punya anak sambir
nah si sambir ini disebut descendant
bs.findAll juga sama makenya descendant juga
```
#contoh descendant
print(bs.h1)
print(bs.html.h1)
print(bs.body.h1)
print(bs.findAll('img'))
# find children
for child in bs.find("table", {"id" : "giftList"}).children:
print(child)
# Dealing with siblings
# Jadi ini setiap masing masing masuk ke tr , lalu ambil seluruh child yang ada di tr
for sibling in bs.find("table", {"id" : "giftList"}).tr.next_siblings:
print(sibling)
print("\n\n")
# Dealing with parents
# Cara bacanya adalah : Get si parent dari pada tag yang dicari terus navigasi , ya gunain previous sibling
for parent in bs.find("img",{"src" : "../img/gifts/img1.jpg"}).parent.previous_sibling.previous_sibling.next_sibling:
print(parent)
```
| github_jupyter |
# Approximate q-learning
In this notebook you will teach a lasagne neural network to do Q-learning.
__Frameworks__ - we'll accept this homework in any deep learning framework. For example, it translates to TensorFlow almost line-to-line. However, we recommend you to stick to theano/lasagne unless you're certain about your skills in the framework of your choice.
```
%env THEANO_FLAGS='floatX=float32'
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0:
!bash ../xvfb start
%env DISPLAY=:1
import gym
import numpy as np, pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
env = gym.make("CartPole-v0")
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
plt.imshow(env.render("rgb_array"))
```
# Approximate (deep) Q-learning: building the network
In this section we will build and train naive Q-learning with theano/lasagne
First step is initializing input variables
```
import theano
import theano.tensor as T
#create input variables. We'll support multiple states at once
current_states = T.matrix("states[batch,units]")
actions = T.ivector("action_ids[batch]")
rewards = T.vector("rewards[batch]")
next_states = T.matrix("next states[batch,units]")
is_end = T.ivector("vector[batch] where 1 means that session just ended")
import lasagne
from lasagne.layers import *
#input layer
l_states = InputLayer((None,)+state_dim)
<Your architecture. Please start with a single-layer network>
#output layer
l_qvalues = DenseLayer(<previous_layer>,num_units=n_actions,nonlinearity=None)
```
#### Predicting Q-values for `current_states`
```
#get q-values for ALL actions in current_states
predicted_qvalues = get_output(l_qvalues,{l_states:current_states})
#compiling agent's "GetQValues" function
get_qvalues = <compile a function that takes current_states and returns predicted_qvalues>
#select q-values for chosen actions
predicted_qvalues_for_actions = predicted_qvalues[T.arange(actions.shape[0]),actions]
```
#### Loss function and `update`
Here we write a function similar to `agent.update`.
```
#predict q-values for next states
predicted_next_qvalues = get_output(l_qvalues,{l_states:<theano input with for states>})
#Computing target q-values under
gamma = 0.99
target_qvalues_for_actions = <target Q-values using rewards and predicted_next_qvalues>
#zero-out q-values at the end
target_qvalues_for_actions = (1-is_end)*target_qvalues_for_actions
#don't compute gradient over target q-values (consider constant)
target_qvalues_for_actions = theano.gradient.disconnected_grad(target_qvalues_for_actions)
#mean squared error loss function
loss = <mean squared between target_qvalues_for_actions and predicted_qvalues_for_actions>
#all network weights
all_weights = get_all_params(l_qvalues,trainable=True)
#network updates. Note the small learning rate (for stability)
updates = lasagne.updates.sgd(loss,all_weights,learning_rate=1e-4)
#Training function that resembles agent.update(state,action,reward,next_state)
#with 1 more argument meaning is_end
train_step = theano.function([current_states,actions,rewards,next_states,is_end],
updates=updates)
```
### Playing the game
```
epsilon = 0.25 #initial epsilon
def generate_session(t_max=1000):
"""play env with approximate q-learning agent and train it at the same time"""
total_reward = 0
s = env.reset()
for t in range(t_max):
#get action q-values from the network
q_values = get_qvalues([s])[0]
a = <sample action with epsilon-greedy strategy>
new_s,r,done,info = env.step(a)
#train agent one step. Note that we use one-element arrays instead of scalars
#because that's what function accepts.
train_step([s],[a],[r],[new_s],[done])
total_reward+=r
s = new_s
if done: break
return total_reward
for i in range(100):
rewards = [generate_session() for _ in range(100)] #generate new sessions
epsilon*=0.95
print ("mean reward:%.3f\tepsilon:%.5f"%(np.mean(rewards),epsilon))
if np.mean(rewards) > 300:
print ("You Win!")
break
assert epsilon!=0, "Please explore environment"
```
### Video
```
epsilon=0 #Don't forget to reset epsilon back to initial value if you want to go on training
#record sessions
import gym.wrappers
env = gym.wrappers.Monitor(env,directory="videos",force=True)
sessions = [generate_session() for _ in range(100)]
env.close()
#unwrap
env = env.env.env
#upload to gym
#gym.upload("./videos/",api_key="<your_api_key>") #you'll need me later
#Warning! If you keep seeing error that reads something like"DoubleWrapError",
#run env=gym.make("CartPole-v0");env.reset();
#show video
from IPython.display import HTML
import os
video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices
```
| github_jupyter |
均线定投
```
import pandas as pd
from datetime import datetime
import trdb2py
import numpy as np
isStaticImg = False
width = 960
height = 768
pd.options.display.max_columns = None
pd.options.display.max_rows = None
trdb2cfg = trdb2py.loadConfig('./trdb2.yaml')
# 具体基金
asset = 'jqdata.000300_XSHG|1d'
# baselineasset = 'jrj.510310'
# asset = 'jrj.110011'
# baselineasset = 'jqdata.000300_XSHG|1d'
# 起始时间,0表示从最开始算起
tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-12-31', '%Y-%m-%d'))
# 初始资金池
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuy2 = trdb2py.trading2_pb2.BuyParams(
perHandMoney=0.5,
)
# 卖出参数,全部卖出
paramssell = trdb2py.trading2_pb2.SellParams(
perVolume=1,
)
paramsaip = trdb2py.trading2_pb2.AIPParams(
money=10000,
type=trdb2py.trading2_pb2.AIPTT_MONTHDAY,
day=1,
)
# 止盈参数,120%止盈
paramstakeprofit = trdb2py.trading2_pb2.TakeProfitParams(
perVolume=1,
isOnlyProfit=True,
# isFinish=True,
)
# 止盈参数,120%止盈
paramstakeprofit1 = trdb2py.trading2_pb2.TakeProfitParams(
perVolume=1,
# isOnlyProfit=True,
# isFinish=True,
)
# 卖出参数,全部卖出
paramssell7 = trdb2py.trading2_pb2.SellParams(
# perVolume=1,
keepTime=7 * 24 * 60 * 60,
)
lststart = [1, 2, 3, 4, 5]
lsttitle = ['周一', '周二', '周三', '周四', '周五']
def calcweekday2val2(wday, offday):
if offday == 1:
if wday == 5:
return 3
if offday == 2:
if wday >= 4:
return 4
if offday == 3:
if wday >= 3:
return 5
if offday == 4:
if wday >= 2:
return 6
return offday
asset = 'jrj.110011'
# asset = 'jqdata.000036_XSHG|1d'
# asset = 'jqdata.000032_XSHG|1d'
asset = 'jqdata.000300_XSHG|1d'
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='沪深300',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
lstparams = []
for i in range(2, 181):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[i],
)
buy2 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['up'],
strVals=['ta-sma.{}'.format(i)],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(i)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
# paramsaip = trdb2py.trading2_pb2.AIPParams(
# money=10000,
# type=trdb2py.trading2_pb2.AIPTT_WEEKDAY,
# day=1,
# )
s0.buy.extend([buy0, buy1, buy2])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
# s0.paramsInit.CopyFrom(paramsinit)
s0.paramsAIP.CopyFrom(paramsaip)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='{}定投'.format(i),
))
lstaippnl = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=1.5)
trdb2py.showPNLs(lstaippnl + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
```
我们看到不管是每个月的几号买入,最终其实都差异不大
```
dfpnl1b = trdb2py.buildPNLReport(lstaippnl + [pnlBaseline])
dfpnl1b[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/dhruvsheth-ai/hydra-openvino-sensors/blob/master/hydra_openvino_pi.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
**Install the latest OpenVino for Raspberry Pi OS package from Intel OpenVino Distribution Download**
For my case, I have Installed 2020.4 version.
```
l_openvino_toolkit_runtime_raspbian_p_2020.4.28
```
This is the latest version available with the model zoo. Since the below code is executed on a Jupyter Notebook, terminal syntaxes may be different.

```
cd ~/Downloads/
!sudo mkdir -p /opt/intel/openvino
!sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_<version>.tgz --strip 1 -C /opt/intel/openvino
!sudo apt install cmake
!echo "source /opt/intel/openvino/bin/setupvars.sh" >> ~/.bashrc
```
Your output on new terminal will be:
```
[setupvars.sh] OpenVINO environment initialized
```
```
!sudo usermod -a -G users "$(raspberry-pi)"
```
The below are the USB rules for Intel Neural Compute Stick 2:
```
!sh /opt/intel/openvino/install_dependencies/install_NCS_udev_rules.sh
```
Once this is set up, move to the ```
hydra-openvino-yolo.ipynb
```
file for running the model
| github_jupyter |
<a href="https://colab.research.google.com/github/RichardFreedman/CRIM_Collab_Notebooks/blob/main/CRIM_Data_Search.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import requests
import pandas as pd
```
# Markdown for descriptive text
## level two
### Structure notebook for various sections and TOC
Plain text is just normal
- list
- list item with dashes
or numbers
1. this
2. that
3. another
- Still other
- Still more
-And yet more
# Markdown vs Code
Pick Markdown for type of cell above. **Shift + return** to enter these
# Formatting
Italics is *before* **bold**
Escape then B to create new cells (and pick cell types later)
# Fill
Tab to auto fill within cell
# Requests
Requests in fact has several functions after the "." Like Get, or whatever
Requests.get plus (), then Shift+Tab to see all the parameters that must be passed.
Response object allows you to extract what you need, like JSON
For Obs_1_json = response.json() we **need** the parenths to run the function
# Dictionaries and Types
Dictionary= Key>Value Pairs (Key is MEI Links, value is the link)
Note that Values can themselves contain dictionary
Python Types
Dictionary (Pairs; can contain other Dictionaries)
String (thing in a quote)
List (always in square brackets, and can contain dictionaries and lists within them)
indexing of items in a list start at ZERO
last item is "-1", etc
# Get Key
To get an individual KEY from top level:
Obs_ema_1 = Obs_1_json["ema"]
This allows you to dig deeper in nested lists or dictionaries. In this case piece is top level in JSON, the MEI link is next. The number allows you to pick from items IN a list: Obs_1_json["piece"]["mei_links"][0]
```
Obs_1_url = "https://crimproject.org/data/observations/1/"
Obs_1_url
response = requests.get(Obs_1_url)
response
type(response)
Obs_1_json = response.json()
Obs_1_json
type(Obs_1_json)
example_list_1 = [5, 3, "this", "that"]
example_list_1[3]
Obs_1_json.keys()
Obs_ema_1 = Obs_1_json["ema"]
Obs_ema_1
type(Obs_ema_1)
print("here is a print statement")
Obs_1_json["musical_type"]
Obs_1_mt = Obs_1_json["musical_type"]
Obs_1_mt
Obs_1_piece = Obs_1_json["piece"]
Obs_1_piece
Obs_1_mei = Obs_1_piece["mei_links"]
Obs_1_mei
len(Obs_1_mei)
Obs_1_mei[0]
Obs_1_json["piece"]["mei_links"][0]
Obs_1_json["ema"]
```
# Loops
```
test_list = [1,5,2,5,6]
for i, observation_id in enumerate(test_list):
# do stuff
print(i, observation_id)
for number in range(1,10):
print(number)
def myfunction():
print("it is running")
myfunction
myfunction()
def adder(num_1, num_2):
return num_1 + num_2
adder(5,9)
def get_ema_for_observation_id(obs_id):
# get Obs_1_url
url = "https://crimproject.org/data/observations/{}/".format(obs_id)
return url
def get_ema_for_observation_id(obs_id):
# get Obs_1_ema
my_ema_mei_dictionary = dict()
url = "https://crimproject.org/data/observations/{}/".format(obs_id)
response = requests.get(url)
Obs_json = response.json()
# Obs_ema = Obs_json["ema"]
my_ema_mei_dictionary["id"]=Obs_json["id"]
my_ema_mei_dictionary["musical type"]=Obs_json["musical_type"]
my_ema_mei_dictionary["int"]=Obs_json["mt_fg_int"]
my_ema_mei_dictionary["tint"]=Obs_json["mt_fg_tint"]
my_ema_mei_dictionary["ema"]=Obs_json["ema"]
my_ema_mei_dictionary["mei"]=Obs_json["piece"]["mei_links"][0]
my_ema_mei_dictionary["pdf"]=Obs_json["piece"]["pdf_links"][0]
# Obs_piece = Obs_json["piece"]
# Obs_mei = Obs_piece["mei_links"]
print(f'Got: {obs_id}')
# return {"ema":Obs_ema,"mei":Obs_mei}
return my_ema_mei_dictionary
get_ema_for_observation_id(20)
output = get_ema_for_observation_id(20)
pd.Series(output).to_csv("output.csv")
# this holds the output as a LIST of DICTS
obs_data_list = []
# this is the list of IDs to call
obs_call_list = [1,3,5,17,21]
# this is the LOOP that runs through the list aboe
# for observ in obs_call_list:
for observ in range(1,11):
call_list_output = get_ema_for_observation_id(observ)
# the print command simply puts the output in the notebook terminal.
#Later we will put it in the List of Dicts.
# print(call_list_output)
obs_data_list.append(call_list_output)
# list includes APPEND function that will allow us to add one item after each loop.
# EX blank_list = [1,5,6] (note that these are in square brackets as LIST)
# blank_list.append(89)
# range would in parenths as in: range(1,11)
# here we make a LIST object that contains the Range.
# This allows it to iterate over the range
# since the range could be HUGE We can ONLY append a number to a LIST!
Obs_range = list(range(1,11))
# blank_list.append(76)
blank_list
obs_data_list
pd.Series(obs_data_list).to_csv("obs_data_list.csv")
# Pandas DataFrame interprets the series of items in each Dict
# as separate 'cells' (a tab structure)
DF_output = pd.DataFrame(obs_data_list)
DF_output
DF_output.to_csv("obs_data_list.csv")
# two = means check for equality
# for 'contains' use str.contains("letter")
# can also use regex in this (for EMA range)
# Filter_by_Type = (DF_output["musical type"]=="Fuga") & (DF_output["id"]==8)
Filter_by_Type = DF_output["musical type"].str.contains("Fuga")
#
DF_output[Filter_by_Type]
# here is a string of text with numbers in it
my_num = 5
f"here is a string of text with numbers in it: {my_num}"
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import datetime
import os
import glob, os
import time
```
O que vou fazer amanhã:
- Abrir todos os bancos do granular activity/opens com o código que gera uma coluna com o nome do arquivo.
- Ordenar por data e dropar duplicados, assim estimarei a data de envio do email (com a proxy da data da primeira abertura)
- Terei um banco com todas as campanhas com o nome delas (nome do arquivo) no mesmo formato que no aggregated activity
- Depois tenho que juntar todos os bancos do aggregated activity/opened e aggregated activity/not_opened
- Criar uma coluna em cada um desses que especifíque se é de aberto ou fechado
- Em seguida, concatenar opened com not opened
- Mergir com o banco de campanhas
- Ordenar por email e data de envio da campanha(descendente)!
- Daí crio uma contagem que reseta com emails, onde o último email recebido pela pessoa é 1, o segundo 2, assim por diante...
- Depois é apagar com filtros compostos: Se o email 1 (mais recente) é não aberto e o 2, 3,4 e 5
```
files = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/granular_activity/opens/*.csv')
#df = pd.concat([pd.read_csv(fp, encoding='latin-1',nrows=1 ).assign(New=os.path.basename(fp) ) for fp in files], sort=False)
# Não quero me confiar no nrows, que o pressuposto é que a primeira linha de cada arquivo é o timestamp da primeira abertura
df = pd.concat([pd.read_csv(fp, encoding='latin-1').assign(New=os.path.basename(fp) ) for fp in files], sort=False)
df = df.sort_values(['New', 'Timestamp'])
df = df[['New', 'Timestamp']].drop_duplicates('New')
```
Existem 6190 arquivos, mas só 4980 estão sendo lidos. Curiosamente, alguns deles são os que não tem. 2 horas pra descobrir que tudo isso era pq são arquivos vazios. A maior parte são testes ab de 100%, então óbvio que ninguém recebe o combo vencedor. Um caso que se eu fosse irresponsável, não ia dar em nada. Pq a perda não existe de vdd, é só a redução natural de categorias, já que algums não tem observações. O código abaixo (a versão limpa, claro), foi pra idenficcar isso.
```
#paths = pd.DataFrame({'caminhos':files})
#paths['caminhos'] = paths['caminhos'].str[len('C:/Users/ander/Documents/Nossas_mailchimp/granular_activity/opens\\'):] #slicing
#erros = pd.merge(df, paths, left_on='New', right_on='caminhos',how='outer', indicator=True)
#erros[erros['_merge'] != 'both'].caminhos[5000]
#df[df['New'].str.contains('340630_-pdm-solidariedade-nas-ruas-e-ilera-con')]
```
pd.read_csv('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/opened/308509_-rioacess-vel.csv',
encoding='latin-1', usecols = [0,1,2,34, 36]).columns
```
files_opened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/opened/*.csv')
opened = pd.concat([pd.read_csv(fp, encoding='latin-1',usecols = [0,1,2]
).assign(New=os.path.basename(fp) ) for fp in files_opened], sort=False)
opended.shape
files_notopened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/not_opened/*.csv')
not_opened = pd.concat([pd.read_csv(fp, encoding='latin-1',usecols = [0,1,2]
).assign(New=os.path.basename(fp) ) for fp in files_notopened], sort=False)
not_opened.shape
not_opened.to_csv('all_not_opened.csv', index=False)
opended.to_csv('all_opened.csv', index=False)
df.to_csv('all_emails.csv', index=False)
s_nopened = not_opened.sample(frac=0.01)
s_opened = opended.sample(frac=0.02)
s_opened.shape
```
### Dia 2 - Reiniciei o kernel e vou fazer as operações agora sem a memória pesada
open_1 = s_opened[['Email', 'Nome', 'Sobrenome', 'New']]
open_2 = s_opened[['Email Address', 'First Name', 'Last Name', 'New']]
open_3 = s_opened[['E-mail', 'First Name do eleitor', 'New']]
open_3['Sobrenome'] = ''
open_2.columns = ['Email', 'Nome', 'Sobrenome', 'New']
open_3.columns = ['Email', 'Nome', 'New', 'Sobrenome']
open_3 = open_3[['Email', 'Nome', 'Sobrenome', 'New']]
opens = pd.concat([open_1, open_2, open_3])
opens = opens.dropna(subset=['Email'])
opens = opens.merge(df,on='New')
opens['Atividade'] = 'abertura'
n_open_1 = s_nopened[['Email', 'Nome', 'Sobrenome', 'New']]
n_open_2 = s_nopened[['Email Address', 'First Name', 'Last Name', 'New']]
n_open_3 = s_nopened[['E-mail', 'First Name do eleitor', 'New']]
n_open_3['Sobrenome'] = ''
n_open_2.columns = ['Email', 'Nome', 'Sobrenome', 'New']
n_open_3.columns = ['Email', 'Nome', 'New', 'Sobrenome']
n_open_3 = open_3[['Email', 'Nome', 'Sobrenome', 'New']]
n_opens = pd.concat([n_open_1, n_open_2, n_open_3])
n_opens = n_opens.dropna(subset=['Email'])
n_opens = n_opens.merge(df,on='New')
n_opens['Atividade'] = 'não abertura'
```
start_time = time.time()
#not_opened = pd.read_csv('all_not_opened.csv')
#opened = pd.read_csv('all_opened.csv')
emails = pd.read_csv('all_emails.csv')
print("--- %s seconds ---" % (time.time() - start_time))
```
opens
```
open_1 = opened[['Email', 'Nome', 'Sobrenome', 'New']]
open_2 = opened[['Email Address', 'First Name', 'Last Name', 'New']]
open_3 = opened[['E-mail', 'First Name do eleitor', 'New']]
open_3['Sobrenome'] = ''
open_2.columns = ['Email', 'Nome', 'Sobrenome', 'New']
open_3.columns = ['Email', 'Nome', 'New', 'Sobrenome']
open_3 = open_3[['Email', 'Nome', 'Sobrenome', 'New']]
opens = pd.concat([open_1, open_2, open_3])
opens = opens.dropna(subset=['Email'])
opens = opens.merge(emails,on='New')
opens['Atividade'] = 'abertura'
recent_opens = opens.head(10000000)[opens.head(10000000)['Timestamp'] >'2019-01-01 12:12:48']
## por algum motivo, quando eu uso head e não o banco inteiro, vai mais rápido.
recent_opens.to_csv('recent_opens.csv', index=False)
# Já foi rodado
n_opened = not_opened.merge(emails, on='New')
start_time = time.time()
n_opened.to_csv('n_opened.csv', index=False)
print("--- %s seconds ---" % (time.time() - start_time))
```
Recomeçar daqui: filtrar acima de 2019 e depois salvar (sábado). Na segunda volto e reorganizo, concateno com opens, ordeno e faço os cortes
Estratégia nova: impossível utilizar todas as linhas de não aberturas, então criei uma pasta só pros arquivos a partir de 2019.
Basicamnte recomecei tudo a partir daqui
```
start_time = time.time()
files_notopened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/not_opened_recentes/*.csv')
n_opens = pd.concat([pd.read_csv(fp, encoding='latin-1',usecols = [0,1,2]
).assign(New=os.path.basename(fp) ) for fp in files_notopened], sort=False)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
n_open_1 = n_opens[['Email', 'Nome', 'Sobrenome', 'New']].dropna(subset=['Email'])
n_open_2 = n_opens[['Email Address', 'First Name', 'Last Name', 'New']].dropna(subset=['Email Address'])
n_open_2.columns = ['Email', 'Nome', 'Sobrenome', 'New']
n_opens = pd.concat([n_open_1, n_open_2])
#n_opens = n_opens.dropna(subset=['Email'])
n_opens = n_opens.merge(emails,on='New')
n_opens['Atividade'] = 'não abertura'
print("--- %s seconds ---" % (time.time() - start_time))
```
Tática do opens abaixo. Mas pro n_opens, vai ser primeiro merge (já feito), depois corte de datas e só aí reorganizo e concateno. Depois, vou concatenar com o opens, reordenar e fazer os cortes
```
n_opens.to_csv('recent_n_opens.csv', index=False)
opens = pd.read_csv('recent_opens.csv')
opens.shape[0] + n_opens.shape[0]
type(all_activities['Timestamp'])
all_activities = pd.concat([opens, n_opens])
all_activities = all_activities.sort_values(['Email','Timestamp'])
all_activities.to_csv('all_recent_activities.csv', index=False)
all_activities.to_json('all_recent_activities.json', index=False)
```
tirar média e desvio padrão do número de emails de cada um e do % de abertura de cada pessoa. Agora é só lazer.
porra, agora ainda vai ter que juntar com as inscrições de cada pessoa. Se é meu rio, ms, mapa..
## Recomeço aqui
```
df = pd.read_csv('all_recent_activities.csv')
files_notopened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/not_opened_recentes/*.csv')
n_opens = pd.concat([pd.read_csv(fp, encoding='latin-1'
).assign(New=os.path.basename(fp) ) for fp in files_notopened], sort=False)
n_opens = n_opens[['Email', 'Nome', 'Sobrenome', 'Inscrições', 'Interesses', 'Member Rating', 'New', 'Email Address',
'First Name', 'Last Name']].dropna(subset=['Inscrições'])
files_notopened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/opened_recentes/*.csv')
opens = pd.concat([pd.read_csv(fp, encoding='latin-1'
).assign(New=os.path.basename(fp) ) for fp in files_notopened], sort=False)
opens = opens[['Email', 'Nome', 'Sobrenome', 'Inscrições', 'Interesses', 'Member Rating', 'New', 'Email Address',
'First Name', 'Last Name']].dropna(subset=['Inscrições'])
opens.Email.nunique()
n_opens = n_opens[['Email', 'Inscrições', 'Member Rating', 'New']].drop_duplicates('Email', keep='last')
opens = opens[['Email', 'Inscrições', 'Member Rating', 'New']].drop_duplicates('Email', keep='last')
inscricoes = pd.concat([n_opens, opens])
inscricoes.shape
inscricoes = inscricoes.merge(emails, on='New')
inscricoes = inscricoes.sort_values(['Email', 'Timestamp'])
inscricoes = inscricoes.drop_duplicates("Email")
inscricoes.columns = ['Email', 'Inscrições', 'Menber Rating', 'New', 'Timestamp']
df = df.merge(inscricoes[['Email', 'Inscrições', 'Menber Rating']], on='Email', how='outer', indicator=True)
df.to_csv('all_recent_activities_inscricoes.csv', index=False)
df = pd.read_csv('all_recent_activities_inscricoes.csv')
df = df.drop('_merge', axis=1)
```
Criar banco no nível do usuário
```
user_nopen = df[df['Atividade'] == 'não abertura'].groupby('Email', as_index=False).agg({'Atividade': "count"})
user_open = df[df['Atividade'] == 'abertura'].groupby('Email', as_index=False).agg({'Atividade': "count"})
user_geral = df.groupby('Email', as_index=False).agg({"Timestamp":"first","New" : "first" ,"Inscrições":"last","Menber Rating":"last"})
user_nopen.columns = ['Email','n_open']
user_open.columns = ['Email', 'open']
user_geral.columns = ['Email', 'First Email', 'New', 'Inscrições', 'Member Rating']
user = pd.merge(user_nopen, user_open, on='Email', how='outer', indicator=True)
user = user.merge(user_geral, on='Email')
#Taxa de abertura Geral
user.open.sum() /(user.open.sum() + user.n_open.sum())
user['Inscrições'] = user['Inscrições'].fillna('0')
user['corte'] = np.where((user['Inscrições'].str.contains('Meu Recife') |user['Inscrições'].str.contains('Minha Jampa')
| user['Inscrições'].str.contains('Minha Campinas') | user['Inscrições'].str.contains('Minha Porto Alegre'))
, "imune", "elegível")
imunes = user[user['corte'] == 'imune']
user = user[user['corte'] == 'elegível']
#user[user['Email'] =='enrica@nossas.org']
# Primeiras exclusões
nunca_abriu = user[user['_merge'] =='left_only']
apagar_1 = nunca_abriu[nunca_abriu['n_open'] >= 3] # quem nunca abriu mesmo já recebendo mais de 3 emails
# nunca abriu, recebeu menos que 2, mas é antigo
apagar_2 = nunca_abriu[(nunca_abriu['n_open'] < 3) & (nunca_abriu['First Email'] < '2019-07-01 00:00:01')]
# pessoas que não abriram nenhum email, mas receberam 1 ou 2 e entraram há menos de 1 ano na base
alerta = nunca_abriu[(nunca_abriu['n_open'] < 3) & (nunca_abriu['First Email'] > '2019-07-01 00:00:01')]
apagar_1.to_csv('nunca_abriu_1.csv')
apagar_2.to_csv('nunca_abriu_2.csv')
alerta.to_csv('nunca_abriu_alerta.csv')
import pandas as pd
df = pd.read_csv('nunca_abriu_1.csv')
df['Member Rating'].value_counts(dropna=False)
apagar_1.shape
apagar_2.shape
alerta.shape
nunca_abriu.n_open.sum()
df = df.merge(nunca_abriu[['Email']], on='Email', how='outer', indicator=True)
df = df[df['_merge'] != "both"]
df['corte'] = np.where((df['Inscrições'].str.contains('Meu Recife') |df['Inscrições'].str.contains('Minha Jampa')
| df['Inscrições'].str.contains('Minha Campinas') | df['Inscrições'].str.contains('Minha Porto Alegre'))
, "imune", "elegível")
df = df[df['corte'] == 'elegível']
```
CARALHO EU SEMPRE QUIS ESSE CÓDIGO (HACK PRA DROPAR DUPLICADOS MANTENDO N LINHAS)
```
df =df.sort_values(['Email', 'Timestamp'])
df['Inscrições'] = df['Inscrições'].fillna('0')
df_3 = df.groupby('Email').tail(3) #last 3 rows
df_5 = df.groupby('Email').tail(5) #last 5 rows
df_10 = df.groupby('Email').tail(10) #last 10 rows
df_3['abertura'] = np.where((df_3['Atividade'] =='abertura') ,1, 0)
df_3['não abertura'] = np.where((df_3['Atividade'] == 'não abertura'), 1, 0)
df_5['abertura'] = np.where((df_5['Atividade'] =='abertura') ,1, 0)
df_5['não abertura'] = np.where((df_5['Atividade'] == 'não abertura'), 1, 0)
df_10['abertura'] = np.where((df_10['Atividade'] =='abertura') ,1, 0)
df_10['não abertura'] = np.where((df_10['Atividade'] == 'não abertura'), 1, 0)
df_3 = df_3.groupby(['Email', 'Inscrições'],as_index=False).agg({'Atividade': "count", 'abertura': 'sum', 'não abertura':'sum'})
df_5 = df_5.groupby(['Email', 'Inscrições'],as_index=False).agg({'Atividade': "count", 'abertura': 'sum', 'não abertura':'sum'})
df_10=df_10.groupby(['Email', 'Inscrições'],as_index=False).agg({'Atividade': "count", 'abertura': 'sum', 'não abertura':'sum'})
apagar_3 = df_3[(df_3['Atividade'] == 3) & (df_3['não abertura'] == 3)]
apagar_5 = df_5[(df_5['Atividade'] == 5) & (df_5['não abertura'] == 5)]
apagar_10 = df_10[(df_10['Atividade'] == 10) & (df_10['não abertura'] == 10)]
apagar_3.to_csv('apagar_3.csv', index=False)
apagar_5.to_csv('apagar_5.csv', index=False)
apagar_10.to_csv('apagar_10.csv', index=False)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import requests
import bs4 as bs
import urllib.request
```
## Extracting features of 2020 movies from Wikipedia
```
link = "https://en.wikipedia.org/wiki/List_of_American_films_of_2020"
source = urllib.request.urlopen(link).read()
soup = bs.BeautifulSoup(source,'lxml')
tables = soup.find_all('table',class_='wikitable sortable')
len(tables)
type(tables[0])
df1 = pd.read_html(str(tables[0]))[0]
df2 = pd.read_html(str(tables[1]))[0]
df3 = pd.read_html(str(tables[2]))[0]
df4 = pd.read_html(str(tables[3]).replace("'1\"\'",'"1"'))[0]
df = df1.append(df2.append(df3.append(df4,ignore_index=True),ignore_index=True),ignore_index=True)
df
df_2020 = df[['Title','Cast and crew']]
df_2020
!pip install tmdbv3api
from tmdbv3api import TMDb
import json
import requests
tmdb = TMDb()
tmdb.api_key = ''
from tmdbv3api import Movie
tmdb_movie = Movie()
def get_genre(x):
genres = []
result = tmdb_movie.search(x)
if not result:
return np.NaN
else:
movie_id = result[0].id
response = requests.get('https://api.themoviedb.org/3/movie/{}?api_key={}'.format(movie_id,tmdb.api_key))
data_json = response.json()
if data_json['genres']:
genre_str = " "
for i in range(0,len(data_json['genres'])):
genres.append(data_json['genres'][i]['name'])
return genre_str.join(genres)
else:
return np.NaN
df_2020['genres'] = df_2020['Title'].map(lambda x: get_genre(str(x)))
df_2020
def get_director(x):
if " (director)" in x:
return x.split(" (director)")[0]
elif " (directors)" in x:
return x.split(" (directors)")[0]
else:
return x.split(" (director/screenplay)")[0]
df_2020['director_name'] = df_2020['Cast and crew'].map(lambda x: get_director(str(x)))
def get_actor1(x):
return ((x.split("screenplay); ")[-1]).split(", ")[0])
df_2020['actor_1_name'] = df_2020['Cast and crew'].map(lambda x: get_actor1(str(x)))
def get_actor2(x):
if len((x.split("screenplay); ")[-1]).split(", ")) < 2:
return np.NaN
else:
return ((x.split("screenplay); ")[-1]).split(", ")[1])
df_2020['actor_2_name'] = df_2020['Cast and crew'].map(lambda x: get_actor2(str(x)))
def get_actor3(x):
if len((x.split("screenplay); ")[-1]).split(", ")) < 3:
return np.NaN
else:
return ((x.split("screenplay); ")[-1]).split(", ")[2])
df_2020['actor_3_name'] = df_2020['Cast and crew'].map(lambda x: get_actor3(str(x)))
df_2020
df_2020 = df_2020.rename(columns={'Title':'movie_title'})
new_df20 = df_2020.loc[:,['director_name','actor_1_name','actor_2_name','actor_3_name','genres','movie_title']]
new_df20
new_df20['comb'] = new_df20['actor_1_name'] + ' ' + new_df20['actor_2_name'] + ' '+ new_df20['actor_3_name'] + ' '+ new_df20['director_name'] +' ' + new_df20['genres']
new_df20.isna().sum()
new_df20 = new_df20.dropna(how='any')
new_df20.isna().sum()
new_df20['movie_title'] = new_df20['movie_title'].str.lower()
new_df20
old_df = pd.read_csv('final_data.csv')
old_df
final_df = old_df.append(new_df20,ignore_index=True)
final_df
final_df.to_csv('main_data.csv',index=False)
```
| github_jupyter |
# Importing the libraries
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score,recall_score, precision_score, f1_score
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, average_precision_score
```
# Load and Explore Data
```
dataset=pd.read_csv('weatherAUS.csv')
dataset.head()
dataset.describe()
# find categorical variables
categorical = [var for var in dataset.columns if dataset[var].dtype=='O']
print('There are {} categorical variables : \n'.format(len(categorical)), categorical)
# view the categorical variables
dataset[categorical].head()
# check and print categorical variables containing missing values
nullCategorical = [var for var in categorical if dataset[var].isnull().sum()!=0]
print(dataset[nullCategorical].isnull().sum())
```
Number of labels: cardinality
The number of labels within a categorical variable is known as cardinality. A high number of labels within a variable is known as high cardinality. High cardinality may pose some serious problems in the machine learning model. So, I will check for high cardinality.
```
# check for cardinality in categorical variables
for var in categorical:
print(var, ' contains ', len(dataset[var].unique()), ' labels')
# Feature Extraction
dataset['Date'].dtypes
# parse the dates, currently coded as strings, into datetime format
dataset['Date'] = pd.to_datetime(dataset['Date'])
dataset['Date'].dtypes
# extract year from date
dataset['Year'] = dataset['Date'].dt.year
# extract month from date
dataset['Month'] = dataset['Date'].dt.month
# extract day from date
dataset['Day'] = dataset['Date'].dt.day
dataset.info()
# drop the original Date variable
dataset.drop('Date', axis=1, inplace = True)
dataset.head()
```
## Explore Categorical Variables
```
# Explore Location variable
dataset.Location.unique()
# check frequency distribution of values in Location variable
dataset.Location.value_counts()
# let's do One Hot Encoding of Location variable
# get k-1 dummy variables after One Hot Encoding
pd.get_dummies(dataset.Location, drop_first=True).head()
# Explore WindGustDir variable
dataset.WindGustDir.unique()
# check frequency distribution of values in WindGustDir variable
dataset.WindGustDir.value_counts()
# let's do One Hot Encoding of WindGustDir variable
# get k-1 dummy variables after One Hot Encoding
# also add an additional dummy variable to indicate there was missing data
pd.get_dummies(dataset.WindGustDir, drop_first=True, dummy_na=True).head()
# sum the number of 1s per boolean variable over the rows of the dataset --> it will tell us how many observations we have for each category
pd.get_dummies(dataset.WindGustDir, drop_first=True, dummy_na=True).sum(axis=0)
# Explore WindDir9am variable
dataset.WindDir9am.unique()
dataset.WindDir9am.value_counts()
pd.get_dummies(dataset.WindDir9am, drop_first=True, dummy_na=True).head()
# sum the number of 1s per boolean variable over the rows of the dataset -- it will tell us how many observations we have for each category
pd.get_dummies(dataset.WindDir9am, drop_first=True, dummy_na=True).sum(axis=0)
# Explore WindDir3pm variable
dataset['WindDir3pm'].unique()
dataset['WindDir3pm'].value_counts()
pd.get_dummies(dataset.WindDir3pm, drop_first=True, dummy_na=True).head()
pd.get_dummies(dataset.WindDir3pm, drop_first=True, dummy_na=True).sum(axis=0)
# Explore RainToday variable
dataset['RainToday'].unique()
dataset.RainToday.value_counts()
pd.get_dummies(dataset.RainToday, drop_first=True, dummy_na=True).head()
pd.get_dummies(dataset.RainToday, drop_first=True, dummy_na=True).sum(axis=0)
```
## Explore Numerical Variables
```
# find numerical variables
numerical = [var for var in dataset.columns if dataset[var].dtype!='O']
print('There are {} numerical variables : \n'.format(len(numerical)), numerical)
# view the numerical variables
dataset[numerical].head()
# check missing values in numerical variables
dataset[numerical].isnull().sum()
# view summary statistics in numerical variables to check for outliers
print(round(dataset[numerical].describe()),2)
# plot box plot to check outliers
plt.figure(figsize=(10,15))
plt.subplot(2, 2, 1)
fig = sns.boxplot(y=dataset['Rainfall'])
fig.set_ylabel('Rainfall')
plt.subplot(2, 2, 2)
fig = sns.boxplot(y=dataset["Evaporation"])
fig.set_ylabel('Evaporation')
plt.subplot(2, 2, 3)
fig = sns.boxplot(y=dataset['WindSpeed9am'])
fig.set_ylabel('WindSpeed9am')
plt.subplot(2, 2, 4)
fig = sns.boxplot(y=dataset['WindSpeed3pm'])
fig.set_ylabel('WindSpeed3pm')
# plot histogram to check distribution
plt.figure(figsize=(10,15))
plt.subplot(2, 2, 1)
fig = dataset.Rainfall.hist(bins=10)
fig.set_xlabel('Rainfall')
fig.set_ylabel('RainTomorrow')
plt.subplot(2, 2, 2)
fig = dataset.Evaporation.hist(bins=10)
fig.set_xlabel('Evaporation')
fig.set_ylabel('RainTomorrow')
plt.subplot(2, 2, 3)
fig = dataset.WindSpeed9am.hist(bins=10)
fig.set_xlabel('WindSpeed9am')
fig.set_ylabel('RainTomorrow')
plt.subplot(2, 2, 4)
fig = dataset.WindSpeed3pm.hist(bins=10)
fig.set_xlabel('WindSpeed3pm')
fig.set_ylabel('RainTomorrow')
# find outliers for Rainfall variable
IQR = dataset.Rainfall.quantile(0.75) - dataset.Rainfall.quantile(0.25)
Rainfall_Lower_fence = dataset.Rainfall.quantile(0.25) - (IQR * 3)
Rainfall_Upper_fence = dataset.Rainfall.quantile(0.75) + (IQR * 3)
print('Outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=Rainfall_Lower_fence, upperboundary=Rainfall_Upper_fence))
print('Number of outliers are {}'. format(dataset[(dataset.Rainfall> Rainfall_Upper_fence) | (dataset.Rainfall< Rainfall_Lower_fence)]['Rainfall'].count()))
# find outliers for Evaporation variable
IQR = dataset.Evaporation.quantile(0.75) - dataset.Evaporation.quantile(0.25)
Evaporation_Lower_fence = dataset.Evaporation.quantile(0.25) - (IQR * 3)
Evaporation_Upper_fence = dataset.Evaporation.quantile(0.75) + (IQR * 3)
print('Outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=Evaporation_Lower_fence, upperboundary=Evaporation_Upper_fence))
print('Number of outliers are {}'. format(dataset[(dataset.Evaporation> Evaporation_Upper_fence) | (dataset.Evaporation< Evaporation_Lower_fence)]['Evaporation'].count()))
# find outliers for WindSpeed9am variable
IQR = dataset.WindSpeed9am.quantile(0.75) - dataset.WindSpeed9am.quantile(0.25)
WindSpeed9am_Lower_fence = dataset.WindSpeed9am.quantile(0.25) - (IQR * 3)
WindSpeed9am_Upper_fence = dataset.WindSpeed9am.quantile(0.75) + (IQR * 3)
print('Outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=WindSpeed9am_Lower_fence, upperboundary=WindSpeed9am_Upper_fence))
print('Number of outliers are {}'. format(dataset[(dataset.WindSpeed9am> WindSpeed9am_Upper_fence) | (dataset.WindSpeed9am< WindSpeed9am_Lower_fence)]['WindSpeed9am'].count()))
# find outliers for WindSpeed3pm variable
IQR = dataset.WindSpeed3pm.quantile(0.75) - dataset.WindSpeed3pm.quantile(0.25)
WindSpeed3pm_Lower_fence = dataset.WindSpeed3pm.quantile(0.25) - (IQR * 3)
WindSpeed3pm_Upper_fence = dataset.WindSpeed3pm.quantile(0.75) + (IQR * 3)
print('Outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=WindSpeed3pm_Lower_fence, upperboundary=WindSpeed3pm_Upper_fence))
print('Number of outliers are {}'. format(dataset[(dataset.WindSpeed3pm> WindSpeed3pm_Lower_fence) | (dataset.WindSpeed3pm< WindSpeed3pm_Upper_fence)]['WindSpeed3pm'].count()))
def max_value(dataset, variable, top):
return np.where(dataset[variable]>top, top, dataset[variable])
dataset['Rainfall'] = max_value(dataset, 'Rainfall', Rainfall_Upper_fence)
dataset['Evaporation'] = max_value(dataset, 'Evaporation', Evaporation_Upper_fence)
dataset['WindSpeed9am'] = max_value(dataset, 'WindSpeed9am', WindSpeed9am_Upper_fence)
dataset['WindSpeed3pm'] = max_value(dataset, 'WindSpeed3pm', 57)
print('Number of outliers are {}'. format(dataset[(dataset.Rainfall> Rainfall_Upper_fence) | (dataset.Rainfall< Rainfall_Lower_fence)]['Rainfall'].count()))
print('Number of outliers are {}'. format(dataset[(dataset.Evaporation> Evaporation_Upper_fence) | (dataset.Evaporation< Evaporation_Lower_fence)]['Evaporation'].count()))
print('Number of outliers are {}'. format(dataset[(dataset.WindSpeed9am> WindSpeed9am_Upper_fence) | (dataset.WindSpeed9am< WindSpeed9am_Lower_fence)]['WindSpeed9am'].count()))
print('Number of outliers are {}'. format(dataset[(dataset.WindSpeed3pm> WindSpeed3pm_Lower_fence) | (dataset.WindSpeed3pm< WindSpeed3pm_Upper_fence)]['WindSpeed3pm'].count()))
# Replace NaN with default values
nullValues = [var for var in dataset.columns if dataset[var].isnull().sum()!=0]
print(dataset[nullValues].isnull().sum())
categorical = [var for var in nullValues if dataset[var].dtype=='O']
from sklearn.impute import SimpleImputer
categoricalImputer = SimpleImputer(missing_values=np.nan,strategy='constant')
categoricalImputer.fit(dataset[categorical])
dataset[categorical]=categoricalImputer.transform(dataset[categorical])
print(dataset.head())
numerical = [var for var in dataset.columns if dataset[var].dtype!='O']
from sklearn.impute import SimpleImputer
numericalImputer = SimpleImputer(missing_values=np.nan,strategy='mean')
numericalImputer.fit(dataset[numerical])
dataset[numerical]=numericalImputer.transform(dataset[numerical])
print(dataset.head())
```
# Split data for model
```
x = dataset.drop(['RainTomorrow'], axis=1) # get all row data expect RainTomorrow
y = dataset['RainTomorrow'] # get the RainTomorrow column depentant variable data for all rows
print(x.head())
print(y[:10])
```
# Encoding categorical data
```
#encoding independent variable
x = pd.get_dummies(x)
print(x.head())
## Encoding dependent variable
# use LabelEncoder to replace purchased (dependent variable) with 0 and 1
from sklearn.preprocessing import LabelEncoder
y= LabelEncoder().fit_transform(y)
print(y[:10])
```
# Splitting the dataset into training and test set
```
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state = 0) # func returns train and test data. It takes dataset and then split size test_size =0.3 means 30% data is for test and rest for training and random_state
print(x_train.head())
print(x_test.head())
print(y_train[:10])
print(y_test[:10])
```
# Feature scaling
```
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train= scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
print(x_train[:10,:])
print(x_test[:10,:])
```
# Build Model
```
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(solver='liblinear', random_state=0)
classifier.fit(x_train,y_train)
#predicting the test set results
y_pred = classifier.predict(x_test)
```
# Evaluate Model
```
cm = confusion_matrix(y_test,y_pred)
print(cm)
cr = classification_report(y_test,y_pred)
print(cr)
accuracy_score(y_test,y_pred)
average_precision= average_precision_score(y_test,y_pred)
print(average_precision)
recall_score(y_test,y_pred)
precision_score(y_test,y_pred)
f1_score(y_test,y_pred)
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
disp = plot_precision_recall_curve(classifier, x_test, y_test)
disp.ax_.set_title('2-class Precision-Recall curve: '
'AP={0:0.2f}'.format(average_precision))
```
| github_jupyter |
```
# default_exp callback.PredictionDynamics
```
# PredictionDynamics
> Callback used to visualize model predictions during training.
This is an implementation created by Ignacio Oguiza (timeseriesAI@gmail.com) based on a [blog post](http://localhost:8888/?token=83bca9180c34e1c8991886445942499ee8c1e003bc0491d0) by Andrej Karpathy I read some time ago that I really liked. One of the things he mentioned was this:
>"**visualize prediction dynamics**. I like to visualize model predictions on a fixed test batch during the course of training. The “dynamics” of how these predictions move will give you incredibly good intuition for how the training progresses. Many times it is possible to feel the network “struggle” to fit your data if it wiggles too much in some way, revealing instabilities. Very low or very high learning rates are also easily noticeable in the amount of jitter." A. Karpathy
```
#export
from fastai.callback.all import *
from tsai.imports import *
# export
class PredictionDynamics(Callback):
order, run_valid = 65, True
def __init__(self, show_perc=1., figsize=(10,6), alpha=.3, size=30, color='lime', cmap='gist_rainbow', normalize=False,
sensitivity=None, specificity=None):
"""
Args:
show_perc: percent of samples from the valid set that will be displayed. Default: 1 (all).
You can reduce it if the number is too high and the chart is too busy.
alpha: level of transparency. Default:.3. 1 means no transparency.
figsize: size of the chart. You may want to expand it if too many classes.
size: size of each sample in the chart. Default:30. You may need to decrease it a bit if too many classes/ samples.
color: color used in regression plots.
cmap: color map used in classification plots.
normalize: flag to normalize histograms displayed in binary classification.
sensitivity: (aka recall or True Positive Rate) if you pass a float between 0. and 1. the sensitivity threshold will be plotted in the chart.
Only used in binary classification.
specificity: (or True Negative Rate) if you pass a float between 0. and 1. it will be plotted in the chart. Only used in binary classification.
The red line in classification tasks indicate the average probability of true class.
"""
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds")
if not self.run:
return
self.cat = True if (hasattr(self.dls, "c") and self.dls.c > 1) else False
if self.cat:
self.binary = self.dls.c == 2
if self.show_perc != 1:
valid_size = len(self.dls.valid.dataset)
self.show_idxs = np.random.choice(valid_size, int(round(self.show_perc * valid_size)), replace=False)
# Prepare ground truth container
self.y_true = []
def before_epoch(self):
# Prepare empty pred container in every epoch
self.y_pred = []
def after_pred(self):
if self.training:
return
# Get y_true in epoch 0
if self.epoch == 0:
self.y_true.extend(self.y.cpu().flatten().numpy())
# Gather y_pred for every batch
if self.cat:
if self.binary:
y_pred = F.softmax(self.pred, -1)[:, 1].reshape(-1, 1).cpu()
else:
y_pred = torch.gather(F.softmax(self.pred, -1), -1, self.y.reshape(-1, 1).long()).cpu()
else:
y_pred = self.pred.cpu()
self.y_pred.extend(y_pred.flatten().numpy())
def after_epoch(self):
# Ground truth
if self.epoch == 0:
self.y_true = np.array(self.y_true)
if self.show_perc != 1:
self.y_true = self.y_true[self.show_idxs]
self.y_bounds = (np.min(self.y_true), np.max(self.y_true))
self.min_x_bounds, self.max_x_bounds = np.min(self.y_true), np.max(self.y_true)
self.y_pred = np.array(self.y_pred)
if self.show_perc != 1:
self.y_pred = self.y_pred[self.show_idxs]
if self.cat:
neg_thr = None
pos_thr = None
if self.specificity is not None:
inp0 = self.y_pred[self.y_true == 0]
neg_thr = np.sort(inp0)[-int(len(inp0) * (1 - self.specificity))]
if self.sensitivity is not None:
inp1 = self.y_pred[self.y_true == 1]
pos_thr = np.sort(inp1)[-int(len(inp1) * self.sensitivity)]
self.update_graph(self.y_pred, self.y_true, neg_thr=neg_thr, pos_thr=pos_thr)
else:
# Adjust bounds during validation
self.min_x_bounds = min(self.min_x_bounds, np.min(self.y_pred))
self.max_x_bounds = max(self.max_x_bounds, np.max(self.y_pred))
x_bounds = (self.min_x_bounds, self.max_x_bounds)
self.update_graph(self.y_pred, self.y_true, x_bounds=x_bounds, y_bounds=self.y_bounds)
def update_graph(self, y_pred, y_true, x_bounds=None, y_bounds=None, neg_thr=None, pos_thr=None):
if not hasattr(self, 'graph_fig'):
self.df_out = display("", display_id=True)
if self.cat:
self._cl_names = self.dls.vocab
self._classes = L(self.dls.vocab.o2i.values())
self._n_classes = len(self._classes)
if self.binary:
self.bins = np.linspace(0, 1, 101)
else:
_cm = plt.get_cmap(self.cmap)
self._color = [_cm(1. * c/self._n_classes) for c in range(1, self._n_classes + 1)][::-1]
self._h_vals = np.linspace(-.5, self._n_classes - .5, self._n_classes + 1)[::-1]
self._rand = []
for i, c in enumerate(self._classes):
self._rand.append(.5 * (np.random.rand(np.sum(y_true == c)) - .5))
self.graph_fig, self.graph_ax = plt.subplots(1, figsize=self.figsize)
self.graph_out = display("", display_id=True)
self.graph_ax.clear()
if self.cat:
if self.binary:
self.graph_ax.hist(y_pred[y_true == 0], bins=self.bins, density=self.normalize, color='red', label=self._cl_names[0],
edgecolor='black', alpha=self.alpha)
self.graph_ax.hist(y_pred[y_true == 1], bins=self.bins, density=self.normalize, color='blue', label=self._cl_names[1],
edgecolor='black', alpha=self.alpha)
self.graph_ax.axvline(.5, lw=1, ls='--', color='gray')
if neg_thr is not None:
self.graph_ax.axvline(neg_thr, lw=2, ls='--', color='red', label=f'specificity={(self.specificity):.3f}')
if pos_thr is not None:
self.graph_ax.axvline(pos_thr, lw=2, ls='--', color='blue', label=f'sensitivity={self.sensitivity:.3f}')
self.graph_ax.set_xlabel(f'probability of class {self._cl_names[1]}', fontsize=12)
self.graph_ax.legend()
else:
for i, c in enumerate(self._classes):
self.graph_ax.scatter(y_pred[y_true == c], y_true[y_true == c] + self._rand[i], color=self._color[i],
edgecolor='black', alpha=self.alpha, lw=.5, s=self.size)
self.graph_ax.vlines(np.mean(y_pred[y_true == c]), i - .5, i + .5, color='r')
self.graph_ax.vlines(.5, min(self._h_vals), max(self._h_vals), lw=.5)
self.graph_ax.hlines(self._h_vals, 0, 1, lw=.5)
self.graph_ax.set_ylim(min(self._h_vals), max(self._h_vals))
self.graph_ax.set_yticks(self._classes)
self.graph_ax.set_yticklabels(self._cl_names)
self.graph_ax.set_ylabel('true class', fontsize=12)
self.graph_ax.set_xlabel('probability of true class', fontsize=12)
self.graph_ax.set_xlim(0, 1)
self.graph_ax.set_xticks(np.linspace(0, 1, 11))
self.graph_ax.grid(axis='x', color='gainsboro', lw=.2)
else:
self.graph_ax.scatter(y_pred, y_true, color=self.color, edgecolor='black', alpha=self.alpha, lw=.5, s=self.size)
self.graph_ax.set_xlim(*x_bounds)
self.graph_ax.set_ylim(*y_bounds)
self.graph_ax.plot([*x_bounds], [*x_bounds], color='gainsboro')
self.graph_ax.set_xlabel('y_pred', fontsize=12)
self.graph_ax.set_ylabel('y_true', fontsize=12)
self.graph_ax.grid(color='gainsboro', lw=.2)
self.graph_ax.set_title(f'Prediction Dynamics \nepoch: {self.epoch + 1}/{self.n_epoch}')
self.df_out.update(pd.DataFrame(np.stack(self.learn.recorder.values)[-1].reshape(1,-1),
columns=self.learn.recorder.metric_names[1:-1], index=[self.epoch]))
self.graph_out.update(self.graph_ax.figure)
if self.epoch == self.n_epoch - 1:
plt.close(self.graph_ax.figure)
from tsai.basics import *
from tsai.models.InceptionTime import *
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, split_data=False)
check_data(X, y, splits, False)
tfms = [None, [Categorize()]]
batch_tfms = [TSStandardize(by_var=True)]
dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
learn = ts_learner(dls, InceptionTime, metrics=accuracy, cbs=PredictionDynamics())
learn.fit_one_cycle(2, 3e-3)
#hide
from tsai.imports import *
from tsai.export import *
nb_name = get_nb_name()
# nb_name = "064_callback.PredictionDynamics.ipynb"
create_scripts(nb_name);
```
| github_jupyter |
# Identifying special matrices
## Instructions
In this assignment, you shall write a function that will test if a 4×4 matrix is singular, i.e. to determine if an inverse exists, before calculating it.
You shall use the method of converting a matrix to echelon form, and testing if this fails by leaving zeros that can’t be removed on the leading diagonal.
Don't worry if you've not coded before, a framework for the function has already been written.
Look through the code, and you'll be instructed where to make changes.
We'll do the first two rows, and you can use this as a guide to do the last two.
### Matrices in Python
In the *numpy* package in Python, matrices are indexed using zero for the top-most column and left-most row.
I.e., the matrix structure looks like this:
```python
A[0, 0] A[0, 1] A[0, 2] A[0, 3]
A[1, 0] A[1, 1] A[1, 2] A[1, 3]
A[2, 0] A[2, 1] A[2, 2] A[2, 3]
A[3, 0] A[3, 1] A[3, 2] A[3, 3]
```
You can access the value of each element individually using,
```python
A[n, m]
```
which will give the n'th row and m'th column (starting with zero).
You can also access a whole row at a time using,
```python
A[n]
```
Which you will see will be useful when calculating linear combinations of rows.
A final note - Python is sensitive to indentation.
All the code you should complete will be at the same level of indentation as the instruction comment.
### How to submit
Edit the code in the cell below to complete the assignment.
Once you are finished and happy with it, press the *Submit Assignment* button at the top of this notebook.
Please don't change any of the function names, as these will be checked by the grading script.
```
# GRADED FUNCTION
import numpy as np
# Our function will go through the matrix replacing each row in order turning it into echelon form.
# If at any point it fails because it can't put a 1 in the leading diagonal,
# we will return the value True, otherwise, we will return False.
# There is no need to edit this function.
def isSingular(A):
B = np.array(A, dtype=np.float_) # Make B as a copy of A, since we're going to alter it's values.
try:
fixRowZero(B)
fixRowOne(B)
fixRowTwo(B)
fixRowThree(B)
except MatrixIsSingular:
return True
return False
# This next line defines our error flag. For when things go wrong if the matrix is singular.
# There is no need to edit this line.
class MatrixIsSingular(Exception): pass
# For Row Zero, all we require is the first element is equal to 1.
# We'll divide the row by the value of A[0, 0].
# This will get us in trouble though if A[0, 0] equals 0, so first we'll test for that,
# and if this is true, we'll add one of the lower rows to the first one before the division.
# We'll repeat the test going down each lower row until we can do the division.
# There is no need to edit this function.
def fixRowZero(A):
if A[0, 0] == 0:
A[0] = A[0] + A[1]
if A[0, 0] == 0:
A[0] = A[0] + A[2]
if A[0, 0] == 0:
A[0] = A[0] + A[3]
if A[0, 0] == 0:
raise MatrixIsSingular()
A[0] = A[0] / A[0, 0]
return A
# First we'll set the sub-diagonal elements to zero, i.e. A[1,0].
# Next we want the diagonal element to be equal to one.
# We'll divide the row by the value of A[1, 1].
# Again, we need to test if this is zero.
# If so, we'll add a lower row and repeat setting the sub-diagonal elements to zero.
# There is no need to edit this function.
def fixRowOne(A):
A[1] = A[1] - A[1, 0] * A[0]
if A[1, 1] == 0:
A[1] = A[1] + A[2]
A[1] = A[1] - A[1, 0] * A[0]
if A[1, 1] == 0:
A[1] = A[1] + A[3]
A[1] = A[1] - A[1, 0] * A[0]
if A[1, 1] == 0:
raise MatrixIsSingular()
A[1] = A[1] / A[1, 1]
return A
# This is the first function that you should complete.
# Follow the instructions inside the function at each comment.
def fixRowTwo(A):
# Insert code below to set the sub-diagonal elements of row two to zero (there are two of them).
A[2] = A[2] - A[2, 0] * A[0]
A[2] = A[2] - A[2, 1] * A[1]
# Next we'll test that the diagonal element is not zero.
if A[2, 2] == 0:
# Insert code below that adds a lower row to row 2.
A[2] = A[2] + A[3]
# Now repeat your code which sets the sub-diagonal elements to zero.
A[2] = A[2] - A[2, 0] * A[0]
A[2] = A[2] - A[2, 1] * A[1]
if A[2, 2] == 0:
raise MatrixIsSingular()
# Finally set the diagonal element to one by dividing the whole row by that element.
A[2] = A[2] / A[2, 2]
return A
# You should also complete this function
# Follow the instructions inside the function at each comment.
def fixRowThree(A):
# Insert code below to set the sub-diagonal elements of row three to zero.
A[3] = A[3] - A[3, 0] * A[0]
A[3] = A[3] - A[3, 1] * A[1]
A[3] = A[3] - A[3, 2] * A[2]
# Complete the if statement to test if the diagonal element is zero.
if A[3, 3] == 0:
raise MatrixIsSingular()
# Transform the row to set the diagonal element to one.
A[3] = A[3] / A[3, 3]
return A
```
## Test your code before submission
To test the code you've written above, run the cell (select the cell above, then press the play button [ ▶| ] or press shift-enter).
You can then use the code below to test out your function.
You don't need to submit this cell; you can edit and run it as much as you like.
Try out your code on tricky test cases!
```
A = np.array([
[2, 0, 0, 0],
[0, 3, 0, 0],
[0, 0, 4, 4],
[0, 0, 5, 5]
], dtype=np.float_)
isSingular(A)
A = np.array([
[0, 7, -5, 3],
[2, 8, 0, 4],
[3, 12, 0, 5],
[1, 3, 1, 3]
], dtype=np.float_)
fixRowZero(A)
fixRowOne(A)
fixRowTwo(A)
fixRowThree(A)
```
| github_jupyter |
# Label Roads
For machine learning, a set of road labels are needed for the downloaded aerial images. That is, for each aerial image, a mask image the same size is needed with each pixel having value 1 or 0 to indicate the prescense or abscense of a road.
<table><tr><td><img src='/img/notebook/label_example_img.png'></td><td><img src='/img/notebook/label_example_label.png'></td></tr></table>
Here, we use Open Street Map (OSM) data to create binary road masks for the aerial images as shown above. The OSM data is in the form of lines denoted by sequences of geographic coordinates, and the aerial images are georeferenced meaning each pixel can be mapped to a coordinate pair. Thus, assigning labels is relaively straightforward by mapping the road coordinates to the pixels in the images. There are two notable shortcomings of this approach:
1. OSM data may sometimes be incomplete or inaccurate.
2. OSM gives only the location of the center of the road and not the full extend of the road width.
The first issue is hard to correct, but with enough data a neural net can hopefully overcome the noise.
The second issue can be approached by assigning road labels more liberally. Rather than only assigning the centerline pixel as a road, one can label the adjacent neighboring pixels as roads as well. Methodical refinements of this procedure include expanding the neighborhood based on road type (e.g. highways have a larger neighborhood than residential streets) or by assigning a probability distribution to neighboring pixels rather than hard 1's. However, for this project, it is sufficient simply to expand the road labels by a fixed amount (this has already been applied in the example above). Compare the undilate (left) and dilated label examples below.
<table><tr><td><img src='/img/web/labels_no_dilation.png'></td><td><img src='/img/web/labels_dilation.png'></td></tr></table>
In this rest of this notebook, a label image (i.e. a binary mask) is generated for each NAIP image downloaded previously. These images are of course the same size as the NAIP image and stored locally. Then, for the large city (Phoenix, AZ) which serves as the training and benchmark set, each image/mask pair is broken up into smaller tiles (say, 512x512x3 pixels) that will be fed as input to a neural net. These tilings are saved as datasets in the hdf5 format.
```
import rasterio
import fiona
import json
import h5py
import cv2
import os
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from rasterio.features import rasterize
from helpers import make_tiles
from pyproj import Proj
from PIL import Image
%matplotlib inline
```
First, we need to figure out which coordinate reference system (CRS) / projections we're working with. Different images may have different projections depending on their location, so the road coordinates need to be mapped with the correct projection.
It's a little overkill, but here we simply project all roads in Arizona for each CRS we find. If memory were a constrained resource, we could limit it to only roads within the cities that were downloaded, but the projections for a single state are managable.
```
from importlib import reload
import helpers
reload(helpers)
from helpers import make_tiles
with open('data/naip/download_info.json', 'r') as places_in:
places = json.load(places_in)
## Get all GeoTiff paths as a flat list
tif_paths_in = [place_info['img_paths'] for _, place_info in places.items()]
tif_paths_in = [path_in for paths_in in tif_paths_in for path_in in paths_in]
## Get projections
projections = []
for tif_path_in in tif_paths_in:
with rasterio.open(tif_path_in) as tif_in:
projections.append(tif_in.crs['init'])
projections = list(set(projections))
print(projections)
## Getting shapes for all roads in AZ
shape_path = 'data/osm/arizona-latest-free_shp/gis.osm_roads_free_1.shp'
roads_map = {} # Key is projection CRS, value is list of projected roads
for projection in projections:
## Get transformation
proj = Proj(init = projection)
## Project road coordinates
roads = []
for i, feat in enumerate(fiona.open(shape_path, 'r')):
lons, lats = zip(*feat['geometry']['coordinates'])
xx, yy = proj(lons, lats)
road = {'type': 'LineString','coordinates': list(zip(xx,yy))} # In meters
roads.append(road)
roads_map[projection] = roads
print('Found {} roads'.format(len(roads_map[projections[0]])))
```
Next, loop through each image, get its CRS, and overlay the roads with the corresponding projection. A dilation from the OpenCV library is used to expand road labels.
```
## Save labels as .PNG images
## Writing roads within bounds of a source geotiff.
labels_dir = 'data/naip/img/labels/'
kernel = np.ones((3,3), np.uint8) # For label dilation
## Make one output label per input image
for tif_path_in in tif_paths_in:
labels_name_out = tif_path_in.split('/')[-1].replace('.tif', '_labels.png')
labels_path_out = labels_dir + labels_name_out
## Skip if we've already made it
if os.path.isfile(labels_path_out):
continue
with rasterio.open(tif_path_in) as tif_in:
roads = roads_map[tif_in.crs['init']]
## Rasterize a mask
labels = rasterize(
roads,
out_shape = tif_in.shape,
transform = tif_in.transform,
default_value = 1,
fill = 0,
all_touched=True
)
labels = cv2.dilate(labels, kernel, iterations = 2)
labels_img = Image.fromarray(labels * 255)
labels_img.save(labels_path_out)
```
The data from Phoenix is used as the train/test/dev sets and will be stored in a hdf5 file. Two helper functions will accomplish this. First, `make_tiles` takes an image and chunks it up into smaller sizes that can be input to the neural net. Further, we can specify if there should be any padding which there should be for the input image because the neural net reduces the size of the input. In this case, the padding comes from reflecting the edges of the input. We tile both the aerial image and the corresponding label image. The code is in `helpers.py`.
Then, `make_hdf5_set` defined below takes a list of multiple aerial/label image pairs, splits each into tiles (called chunks in the code), and randomly assigns the tiles to the train/dev/test sets in specified proportions.
```
def make_hdf5_set(
hdf5_path,
img_paths,
frac_train = .80,
frac_dev = .10,
frac_test = .10,
train_input_name = 'X_train',
train_label_name = 'Y_train',
dev_input_name = 'X_dev',
dev_label_name = 'Y_dev',
test_input_name = 'X_test',
test_label_name = 'Y_test'
):
assert frac_train + frac_dev + frac_test == 1
with h5py.File(hdf5_path, 'w') as data:
chunk_counter = 0
for i,img_path in enumerate(img_paths):
## Chunk the image and corresponding labels
labels_path = img_path.replace('download', 'labels').replace('.tif', '_labels.png')
X_chunks, _, _ = make_tiles(img_path, pad = 64)
labels_chunks, _, _ = make_tiles(labels_path)
labels_chunks = labels_chunks / labels_chunks.max()
labels_chunks = np.expand_dims(labels_chunks, 3).astype(np.int8)
chunk_counter = chunk_counter + X_chunks.shape[0]
## Split into train/dev/test
X_train, X_test, Y_train, Y_test = train_test_split(X_chunks, labels_chunks, test_size=frac_test, random_state=40)
X_train, X_dev, Y_train, Y_dev = train_test_split(X_train, Y_train, train_size=frac_train/(frac_train+frac_dev), random_state=30)
## Add first chunks to dataset
## Should make the maxshape not so hardcoded
if i == 0:
dset_x_train = data.create_dataset(train_input_name, X_train.shape, maxshape = (None, 640, 640, 3), data=X_train)
dset_x_dev = data.create_dataset(dev_input_name, X_dev.shape, maxshape = (None, 640, 640, 3), data=X_dev)
dset_x_test = data.create_dataset(test_input_name, X_test.shape, maxshape = (None, 640, 640, 3), data=X_test)
dset_y_train = data.create_dataset(train_label_name, Y_train.shape, maxshape = (None, 512, 512, 3), data=Y_train)
dset_y_dev = data.create_dataset(dev_label_name, Y_dev.shape, maxshape = (None, 512, 512, 3), data=Y_dev)
dset_y_test = data.create_dataset(test_label_name, Y_test.shape, maxshape = (None, 512, 512, 3), data=Y_test)
## Append new chunks to the dataset
else:
n_train_before_resize = dset_x_train.shape[0]
n_train_after_resize = n_train_before_resize + X_train.shape[0]
n_dev_before_resize = dset_x_dev.shape[0]
n_dev_after_resize = n_dev_before_resize + X_dev.shape[0]
n_test_before_resize = dset_x_test.shape[0]
n_test_after_resize = n_test_before_resize + X_test.shape[0]
dset_x_train.resize(n_train_after_resize, axis = 0)
dset_y_train.resize(n_train_after_resize, axis = 0)
dset_x_dev.resize(n_dev_after_resize, axis = 0)
dset_y_dev.resize(n_dev_after_resize, axis = 0)
dset_x_test.resize(n_test_after_resize, axis = 0)
dset_y_test.resize(n_test_after_resize, axis = 0)
dset_x_train[n_train_before_resize:] = X_train
dset_y_train[n_train_before_resize:] = Y_train
dset_x_dev[n_dev_before_resize:] = X_dev
dset_y_dev[n_dev_before_resize:] = Y_dev
dset_x_test[n_test_before_resize:] = X_test
dset_y_test[n_test_before_resize:] = Y_test
print('Saved {} input/output pairs to {}'.format(chunk_counter, hdf5_path))
```
Since the whole Phoenix dataset is rather large (~25GB HDF5 file), for development purposes we'll create a smaller set based on only a few input tiles that we manually specify. Then we'll do the same for the whole dataset.
```
img_paths = [
'm_3311117_ne_12_1_20150601',
'm_3311117_sw_12_1_20150529',
'm_3311117_nw_12_1_20150529',
'm_3311117_se_12_1_20150601',
'm_3311125_ne_12_1_20150601',
'm_3311125_nw_12_1_20150529',
'm_3311125_se_12_1_20150601',
'm_3311125_sw_12_1_20150529',
'm_3311133_ne_12_1_20150601',
'm_3311133_nw_12_1_20150529',
'm_3311133_se_12_1_20150601',
'm_3311133_sw_12_1_20150529'
]
img_paths = ['data/naip/img/download/' + img_path + '.tif' for img_path in img_paths]
hdf5_path = 'data/naip/hdf5/phoenix_subset.h5'
make_hdf5_set(hdf5_path, img_paths)
img_paths = places['Phoenix']['img_paths']
hdf5_path = 'data/naip/hdf5/phoenix.h5'
make_hdf5_set(hdf5_path, img_paths)
```
| github_jupyter |
# Homework03: Topic Modeling with Latent Semantic Analysis
Latent Semantic Analysis (LSA) is a method for finding latent similarities between documents treated as a bag of words by using a low rank approximation. It is used for document classification, clustering and retrieval. For example, LSA can be used to search for prior art given a new patent application. In this homework, we will implement a small library for simple latent semantic analysis as a practical example of the application of SVD. The ideas are very similar to PCA. SVD is also used in recommender systems in an similar fashion (for an SVD-based recommender system library, see [Surpise](http://surpriselib.com).
We will implement a toy example of LSA to get familiar with the ideas. If you want to use LSA or similar methods for statistical language analysis, the most efficient Python libraries are probably [gensim](https://radimrehurek.com/gensim/) and [spaCy](https://spacy.io) - these also provide an online algorithm - i.e. the training information can be continuously updated. Other useful functions for processing natural language can be found in the [Natural Language Toolkit](http://www.nltk.org/).
**Note**: The SVD from scipy.linalg performs a full decomposition, which is inefficient since we only need to decompose until we get the first k singluar values. If the SVD from `scipy.linalg` is too slow, please use the `sparsesvd` function from the [sparsesvd](https://pypi.python.org/pypi/sparsesvd/) package to perform SVD instead. You can install in the usual way with
```
!pip install sparsesvd
```
Then import the following
```python
from sparsesvd import sparsesvd
from scipy.sparse import csc_matrix
```
and use as follows
```python
sparsesvd(csc_matrix(M), k=10)
```
**Exercise 1 (20 points)**. Calculating pairwise distance matrices.
Suppose we want to construct a distance matrix between the rows of a matrix. For example, given the matrix
```python
M = np.array([[1,2,3],[4,5,6]])
```
the distance matrix using Euclidean distance as the measure would be
```python
[[ 0.000 1.414 2.828]
[ 1.414 0.000 1.414]
[ 2.828 1.414 0.000]]
```
if $M$ was a collection of column vectors.
Write a function to calculate the pairwise-distance matrix given the matrix $M$ and some arbitrary distance function. Your functions should have the following signature:
```
def func_name(M, distance_func):
pass
```
0. Write a distance function for the Euclidean, squared Euclidean and cosine measures.
1. Write the function using looping for M as a collection of row vectors.
2. Write the function using looping for M as a collection of column vectors.
3. Wrtie the function using broadcasting for M as a collection of row vectors.
4. Write the function using broadcasting for M as a collection of column vectors.
For 3 and 4, try to avoid using transposition (but if you get stuck, there will be no penalty for using transposition). Check that all four functions give the same result when applied to the given matrix $M$.
**Exercise 2 (20 points)**.
**Exercise 2 (20 points)**. Write 3 functions to calculate the term frequency (tf), the inverse document frequency (idf) and the product (tf-idf). Each function should take a single argument `docs`, which is a dictionary of (key=identifier, value=document text) pairs, and return an appropriately sized array. Convert '-' to ' ' (space), remove punctuation, convert text to lowercase and split on whitespace to generate a collection of terms from the document text.
- tf = the number of occurrences of term $i$ in document $j$
- idf = $\log \frac{n}{1 + \text{df}_i}$ where $n$ is the total number of documents and $\text{df}_i$ is the number of documents in which term $i$ occurs.
Print the table of tf-idf values for the following document collection
```
s1 = "The quick brown fox"
s2 = "Brown fox jumps over the jumps jumps jumps"
s3 = "The the the lazy dog elephant."
s4 = "The the the the the dog peacock lion tiger elephant"
docs = {'s1': s1, 's2': s2, 's3': s3, 's4': s4}
```
**Exercise 3 (20 points)**.
1. Write a function that takes a matrix $M$ and an integer $k$ as arguments, and reconstructs a reduced matrix using only the $k$ largest singular values. Use the `scipy.linagl.svd` function to perform the decomposition. This is the least squares approximation to the matrix $M$ in $k$ dimensions.
2. Apply the function you just wrote to the following term-frequency matrix for a set of $9$ documents using $k=2$ and print the reconstructed matrix $M'$.
```
M = np.array([[1, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 2, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1]])
```
3. Calculate the pairwise correlation matrix for the original matrix M and the reconstructed matrix using $k=2$ singular values (you may use [scipy.stats.spearmanr](http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html) to do the calculations). Consider the fist 5 sets of documents as one group $G1$ and the last 4 as another group $G2$ (i.e. first 5 and last 4 columns). What is the average within group correlation for $G1$, $G2$ and the average cross-group correlation for G1-G2 using either $M$ or $M'$. (Do not include self-correlation in the within-group calculations.).
**Exercise 4 (40 points)**. Clustering with LSA
1. Begin by loading a PubMed database of selected article titles using 'pickle'. With the following:
```import pickle
docs = pickle.load(open('pubmed.pic', 'rb'))```
Create a tf-idf matrix for every term that appears at least once in any of the documents. What is the shape of the tf-idf matrix?
2. Perform SVD on the tf-idf matrix to obtain $U \Sigma V^T$ (often written as $T \Sigma D^T$ in this context with $T$ representing the terms and $D$ representing the documents). If we set all but the top $k$ singular values to 0, the reconstructed matrix is essentially $U_k \Sigma_k V_k^T$, where $U_k$ is $m \times k$, $\Sigma_k$ is $k \times k$ and $V_k^T$ is $k \times n$. Terms in this reduced space are represented by $U_k \Sigma_k$ and documents by $\Sigma_k V^T_k$. Reconstruct the matrix using the first $k=10$ singular values.
3. Use agglomerative hierarchical clustering with complete linkage to plot a dendrogram and comment on the likely number of document clusters with $k = 100$. Use the dendrogram function from [SciPy ](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.cluster.hierarchy.dendrogram.html).
4. Determine how similar each of the original documents is to the new document `data/mystery.txt`. Since $A = U \Sigma V^T$, we also have $V = A^T U S^{-1}$ using orthogonality and the rule for transposing matrix products. This suggests that in order to map the new document to the same concept space, first find the tf-idf vector $v$ for the new document - this must contain all (and only) the terms present in the existing tf-idx matrix. Then the query vector $q$ is given by $v^T U_k \Sigma_k^{-1}$. Find the 10 documents most similar to the new document and the 10 most dissimilar.
**Notes on the Pubmed articles**
These were downloaded with the following script.
```python
from Bio import Entrez, Medline
Entrez.email = "YOUR EMAIL HERE"
import cPickle
try:
docs = cPickle.load(open('pubmed.pic'))
except Exception, e:
print e
docs = {}
for term in ['plasmodium', 'diabetes', 'asthma', 'cytometry']:
handle = Entrez.esearch(db="pubmed", term=term, retmax=50)
result = Entrez.read(handle)
handle.close()
idlist = result["IdList"]
handle2 = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text")
result2 = Medline.parse(handle2)
for record in result2:
title = record.get("TI", None)
abstract = record.get("AB", None)
if title is None or abstract is None:
continue
docs[title] = '\n'.join([title, abstract])
print title
handle2.close()
cPickle.dump(docs, open('pubmed.pic', 'w'))
docs.values()
```
| github_jupyter |
# Pytorch Basics - Regressão Linear
> Tutorial de como realizar um modelo de regressão linear no Pytorch.
- toc: false
- badges: true
- comments: true
- categories: [pytorch, regressaolinear]
- image: images/pytorch.png
O objetivo desse breve trabalho é apresentar como é realizado um modelo de regressão linear utilizando pytorch. Muitas das vezes utiliza-se regressão linear como uma primeira hipotese, devido a sua simplicidade, antes de partir para modelos mais complexos.
## Carregando as bibliotecas necessárias
```
#Carregando o Pytorch
import torch
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
```
## Carregando o conjunto de dados
Para carregar o bando de dados que está em .csv, utilizamos o pandas, o qual consegue ler um arquivo localmente ou em um nuvem (url deve ser do raw do .csv)
```
df = pd.read_csv('https://raw.githubusercontent.com/lucastiagooliveira/lucas_repo/master/Kaggle/Revisiting%20a%20Concrete%20Strength%20regression/datasets_31874_41246_Concrete_Data_Yeh.csv')
```
Mostrando as 5 primeiras linhas do dataframe carregado, isso é importante para verificarmos o se o dataframe está correto.
```
df.head()
```
Apresentando um resumo estatístico dos dataframe por coluna, tais como: quantidade de dados, média, desvio padrão, mínimo, primeiro ao terceiro quartil e valor máximo.
```
df.describe()
```
## Plotando os gráficos de todas as váriaveis
Para visualização da relação entre as váriaveis é interessante fazer a visualização gráfica da relação entre as variáveis. Para isso usamos a função PairGrid da biblioteca Seaborn aliado com um scatterplot da biblioteca MatplotLib.
```
sns.set(style="darkgrid")
g = sns.PairGrid(df)
g.map(plt.scatter)
```
## Correlação linear
Para entendimento da correlação linear das variáveis entre si, temos a função "built-in" do Pandas que nos retorna o coeficiente de correlação que tem por padrão o método Pearson.
```
df.corr()
```
Escolhendo as variáveis que serão utilizadas para criação do modelo.
```
var_used = ['cement', 'superplasticizer', 'age', 'water']
train = df[var_used]
target = df['csMPa']
```
Tabela com somente as variáveis que serão utilizadas.
```
train.head()
```
Para iniciarmos um modelo temos que fazer a transformação da base de dados que está com o tipo de DataFrame para tensor, que é utilizado pelo Pytorch. Todavia, uma das maneiras de fazer essa transformação é antes fazer a transformação da base de dados para um vetor do Numpy e depois transformar para um tensor do Pytorch.
Obs.: Foi criado o vetor de uns para ser adicionado ao tensor dos parâmetros, pois essa coluna deverá multiplicar a constante da expressão (b), conforme o exemplo abaixo.
Y = a*X + b
```
train = np.asarray(train)
a = np.ones((train.shape[0],1))
train = torch.tensor(np.concatenate((train, a), axis=1))
target = torch.tensor(np.asarray(target))
train.shape
```
## Criando o modelo
Para iniciarmos precisamos criar uma função a qual definirá a equação da regressão linear a qual utilizará a função matmul para realizar a multiplicação entre os dois tensores dos parâmetros e variáveis dependentes.
```
def model(x,params):
return torch.matmul(x, params)
```
Função que calcula o erro quadrático médio (MSE).
Para saber mais sobre como é calculado acesso o link: https://pt.qwe.wiki/wiki/Mean_squared_error
```
def mse(pred, labels): return ((pred - labels)**2).mean()
```
Para iniciar o treino do modelo primeiramente temos que criar um tensor o qual receberá os valores dos parâmetros que serão atualizados a cada iteração, quedo assim precisamos utilizar o método requires_grad_ assim será possível calcular o gradiente desse tensor quando necessário.
Observe que o tipo do objeto criado é torch.float64.
```
params = torch.randn(5,1, dtype=torch.float64).requires_grad_()
params.dtype
```
**Primeiro passo:** realizar as predições do modelo
```
pred = model(train, params)
```
**Segundo passo:** calcular como o nosso modelo performou, ou seja, calcular MSE para averiguação da performace do modelo.
Observe que o modelo vai apresentar um erro acentuado, pois os parâmetros ainda não foram *treinados*.
```
loss = mse(pred, target)
loss
```
**Terceiro passo:** realizar o gradiente descente.
Conceito do algoritmo de gradiente descendente: http://cursos.leg.ufpr.br/ML4all/apoio/Gradiente.html
```
loss.backward()
params.grad
```
**Quarto passo:** Atualização dos parâmetros, para isso utiliza-se o valor do gradiente por meio do algoritmo descendente e é escalado (multiplicado) pelo taxa de aprendizado (*learning rate*).
Após a realização da atulização dos parâmetros deve-se resetar o gradiente.
```
lr = 1e-5
params.data -= lr * params.grad.data
params.grad = None
```
Primeira iteração realizada, pode-se observar o valor do erro do nosso modelo reduziu. A tendência é ocorrer uma diminuição até a cada iteração, até a estabilização do modelo.
```
pred = model(train, params)
loss = mse(pred, target)
loss
```
Foi criada uma função que realiza todos os passos acima realizados.
```
def step(train, target, params, lr = 1e-6):
## realizando as predições
pred = model(train, params)
## caculando o erro
loss = mse(pred, target)
## realizando o gradiente descendente
loss.backward()
## atualizando os parâmtros
params.data -= lr * params.grad.data
## reset do gradiente
params.grad = None
## imprimindo na tela o erro
print('Loss:',loss.item())
## retornado as predições e os parâmetros atuzalizados na ultima iteração
return pred, params
```
Criando um loop para realizar as itereções, é possível verificar a diminuição do erro a cada iteração, ou seja, se realizada mais iteração pode-se chegar a um resultado plausível (neste caso não cheramos a um, pois o modelo de regressão linear não é um modelo adequado para esses dados, somente como hipótese inicial).
```
for i in range(10): loss, params = step(train, target, params)
```
Esté é o resultado dos parâmetros que serão utilizados para o modelo realizar futuras predições.
```
parameters = params
parameters #parametros do modelo
```
| github_jupyter |
```
## Import dependencies
import numpy as np
import pandas as pd
from pathlib import Path
from getpass import getpass
from sqlalchemy import create_engine
import psycopg2
from sklearn.preprocessing import LabelEncoder
## Load the data
file_path = Path("Resources/DisneylandReviews.csv")
disney_raw_df = pd.read_csv(file_path)
# Inspect data
disney_raw_df
# Inspect counts
disney_raw_df.count()
# Inspect data types
disney_raw_df.dtypes
# Check length of reviews
disney_raw_df["Review_Text"].astype('str').str.split().str.len()
# Check first entry to confirm results
disney_raw_df["Review_Text"].loc[0]
disney_raw_df["Review_Text"].astype('str').str.len().loc[0]
# Add column for review lengths
disney_raw_df["Review_Words"] = disney_raw_df["Review_Text"].astype('str').str.split().str.len()
disney_raw_df["Review_Letters"] = disney_raw_df["Review_Text"].astype('str').str.len()
disney_raw_df.describe()
# Remove data with missing time values
disney_raw_df = disney_raw_df[disney_raw_df["Year_Month"]!='missing']
# Split year/month column into two columns
disney_raw_df[["Year", "Month"]] = disney_raw_df["Year_Month"].str.split(pat="-", expand = True)
disney_raw_df["Year_Month"].value_counts()
# Check for nulls
disney_raw_df.isna().sum()
# Check unique locations
locations = disney_raw_df["Reviewer_Location"].unique()
sorted(locations)
# Replace locations with missing characters
disney_raw_df["Reviewer_Location"] = disney_raw_df["Reviewer_Location"].replace(["Cura�ao", "C�te d'Ivoire", "�land Islands"],["Curacao", "Cote d'Ivoire", "Aland Islands"])
# Check which disney parks were visited
disney_raw_df["Branch"].unique()
# Come up with function for determining if reviewer was a local or tourist (in broad terms)
def tourist(row):
if (row["Branch"]=="Disneyland_HongKong") & (row["Reviewer_Location"]=="Hong Kong"):
return 0
elif (row["Branch"]=="Disneyland_California") & (row["Reviewer_Location"]=="United States"):
return 0
elif (row["Branch"]=="Disneyland_Paris") & (row["Reviewer_Location"]=="France"):
return 0
else:
return 1
# Create tourism column: 1 is a reviewer from another country, 0 is a reviewer from the same country
disney_raw_df["Tourist"] = disney_raw_df.apply(tourist, axis=1)
# Check results
disney_raw_df[disney_raw_df["Tourist"]==0]
# Check counts of tourist vs local
disney_raw_df["Tourist"].value_counts()
# Change data types
disney_raw_df["Tourist"] = disney_raw_df["Tourist"].astype(int)
disney_raw_df["Month"] = disney_raw_df["Month"].astype(int)
disney_raw_df["Year"] = disney_raw_df["Year"].astype(int)
disney_raw_df["Year_Month"] = pd.to_datetime(disney_raw_df["Year_Month"])
disney_raw_df.dtypes
# Look at range of years
sorted(disney_raw_df["Year"].unique())
# Look for duplicate rows
disney_raw_df["Review_ID"].duplicated().sum()
# Drop duplicate rows
disney_raw_df = disney_raw_df.drop_duplicates(subset="Review_ID", keep="first")
# We may have to bin locations; check number/distribution of unique entries
disney_raw_df["Reviewer_Location"].value_counts()
# Create instance of labelencoder
labelencoder = LabelEncoder()
# Encode categorical data
disney_raw_df["Branch_Encoded"] = labelencoder.fit_transform(disney_raw_df["Branch"])
disney_raw_df["Location_Encoded"] = labelencoder.fit_transform(disney_raw_df["Reviewer_Location"])
# View encoded branches
disney_raw_df.groupby(["Branch_Encoded", "Branch"]).size()
disney_raw_df.groupby(["Tourist", "Branch"]).size()
# View encoded locations
disney_raw_df.groupby(["Location_Encoded", "Reviewer_Location"]).size()
disney_clean_df = disney_raw_df
# Reset index
disney_clean_df.reset_index(inplace=True, drop=True)
## Now we upload our dataframe to SQL
# Build the connection string
protocol = 'postgresql'
user = 'postgres'
location = 'localhost'
port = '5432'
db = 'disney_db'
password = getpass('Enter database password')
# Store string as variable
db_string = f'{protocol}://{user}:{password}@{location}:{port}/{db}'
# Create database engine
engine = create_engine(db_string)
# Send to database
disney_clean_df.to_sql(name='disneyland_reviews', con=engine, if_exists='replace')
# Export to csv
disney_clean_df.to_csv("Resources/disney_clean.csv", index=False)
```
| github_jupyter |
```
from bs4 import BeautifulSoup
import requests
class stock:
def __init__(self,*stock_num):
from bs4 import BeautifulSoup
import requests
import pymysql
import openpyxl
from openpyxl.styles import Font
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from webdriver_manager.chrome import ChromeDriverManager
import time
class Stock:
def __init__(self, *stock_numbers):
self.stock_numbers = stock_numbers
def scrape(self):
result = list()
for stock_number in self.stock_numbers:
response = requests.get(
"https://tw.stock.yahoo.com/q/q?s=" + stock_number)
soup = BeautifulSoup(response.text.replace("加到投資組合", ""), "lxml")
stock_date = soup.find(
"font", {"class": "tt"}).getText().strip()[-9:] # 資料日期
tables = soup.find_all("table")[2] # 取得網頁中第三個表格
tds = tables.find_all("td")[0:11] # 取得表格中1到10格
result.append((stock_date,) +
tuple(td.getText().strip() for td in tds))
return result
def save(self, stocks):
db_settings = {
"host": "127.0.0.1",
"port": 3306,
"user": "root",
"password": "******",
"db": "stock",
"charset": "utf8"
}
try:
conn = pymysql.connect(**db_settings)
with conn.cursor() as cursor:
sql = """INSERT INTO market(
market_date,
stock_name,
market_time,
final_price,
buy_price,
sell_price,
ups_and_downs,
lot,
yesterday_price,
opening_price,
highest_price,
lowest_price)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
for stock in stocks:
cursor.execute(sql, stock)
conn.commit()
except Exception as ex:
print("Exception:", ex)
def export(self, stocks):
wb = openpyxl.Workbook()
sheet = wb.create_sheet("Yahoo股市", 0)
response = requests.get(
"https://tw.stock.yahoo.com/q/q?s=2451")
soup = BeautifulSoup(response.text, "lxml")
tables = soup.find_all("table")[2]
ths = tables.find_all("th")[0:11]
titles = ("資料日期",) + tuple(th.getText() for th in ths)
sheet.append(titles)
for index, stock in enumerate(stocks):
sheet.append(stock)
if "△" in stock[6]:
sheet.cell(row=index+2, column=7).font = Font(color='FF0000')
elif "▽" in stock[6]:
sheet.cell(row=index+2, column=7).font = Font(color='00A600')
wb.save("yahoostock.xlsx")
def gsheet(self, stocks):
scopes = ["https://spreadsheets.google.com/feeds"]
credentials = ServiceAccountCredentials.from_json_keyfile_name(
"credentials.json", scopes)
client = gspread.authorize(credentials)
sheet = client.open_by_key(
"YOUR GOOGLE SHEET KEY").sheet1
response = requests.get(
"https://tw.stock.yahoo.com/q/q?s=2451")
soup = BeautifulSoup(response.text, "lxml")
tables = soup.find_all("table")[2]
ths = tables.find_all("th")[0:11]
titles = ("資料日期",) + tuple(th.getText() for th in ths)
sheet.append_row(titles, 1)
for stock in stocks:
sheet.append_row(stock)
def daily(self, year, month):
browser = webdriver.Chrome(ChromeDriverManager().install())
browser.get(
"https://www.twse.com.tw/zh/page/trading/exchange/STOCK_DAY_AVG.html")
select_year = Select(browser.find_element_by_name("yy"))
select_year.select_by_value(year) # 選擇傳入的年份
select_month = Select(browser.find_element_by_name("mm"))
select_month.select_by_value(month) # 選擇傳入的月份
stockno = browser.find_element_by_name("stockNo") # 定位股票代碼輸入框
result = []
for stock_number in self.stock_numbers:
stockno.clear() # 清空股票代碼輸入框
stockno.send_keys(stock_number)
stockno.submit()
time.sleep(2)
soup = BeautifulSoup(browser.page_source, "lxml")
table = soup.find("table", {"id": "report-table"})
elements = table.find_all(
"td", {"class": "dt-head-center dt-body-center"})
data = (stock_number,) + tuple(element.getText()
for element in elements)
result.append(data)
print(result)
stock = Stock('2451', '2454', '2369') # 建立Stock物件
stock.daily("2019", "7") # 動態爬取指定的年月份中,股票代碼的每日收盤價
# stock.gsheet(stock.scrape()) # 將爬取的股票當日行情資料寫入Google Sheet工作表
# stock.export(stock.scrape()) # 將爬取的股票當日行情資料匯出成Excel檔案
# stock.save(stock.scrape()) # 將爬取的股票當日行情資料存入MySQL資料庫中
```
| github_jupyter |
# Introduction to Kubernetes
**Learning Objectives**
* Create GKE cluster from command line
* Deploy an application to your cluster
* Cleanup, delete the cluster
## Overview
Kubernetes is an open source project (available on [kubernetes.io](kubernetes.io)) which can run on many different environments, from laptops to high-availability multi-node clusters; from public clouds to on-premise deployments; from virtual machines to bare metal.
The goal of this lab is to provide a short introduction to Kubernetes (k8s) and some basic functionality.
## Create a GKE cluster
A cluster consists of at least one cluster master machine and multiple worker machines called nodes. Nodes are Compute Engine virtual machine (VM) instances that run the Kubernetes processes necessary to make them part of the cluster.
**Note**: Cluster names must start with a letter and end with an alphanumeric, and cannot be longer than 40 characters.
We'll call our cluster `asl-cluster`.
```
import os
CLUSTER_NAME = "asl-cluster"
ZONE = "us-central1-a"
os.environ["CLUSTER_NAME"] = CLUSTER_NAME
os.environ["ZONE"] = ZONE
```
We'll set our default compute zone to `us-central1-a` and use `gcloud container clusters create ...` to create the GKE cluster. Let's first look at all the clusters we currently have.
```
!gcloud container clusters list
```
**Exercise**
Use `gcloud container clusters create` to create a new cluster using the `CLUSTER_NAME` we set above. This takes a few minutes...
```
%%bash
gcloud container clusters create $CLUSTER_NAME --zone $ZONE
```
Now when we list our clusters again, we should see the cluster we created.
```
!gcloud container clusters list
```
## Get authentication credentials and deploy and application
After creating your cluster, you need authentication credentials to interact with it. Use `get-credentials` to authenticate the cluster.
**Exercise**
Use `gcloud container clusters get-credentials` to authenticate the cluster you created.
```
%%bash
gcloud container clusters get-credentials $CLUSTER_NAME --zone $ZONE
```
You can now deploy a containerized application to the cluster. For this lab, you'll run `hello-app` in your cluster.
GKE uses Kubernetes objects to create and manage your cluster's resources. Kubernetes provides the [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) object for deploying stateless applications like web servers. [Service](https://kubernetes.io/docs/concepts/services-networking/service/) objects define rules and load balancing for accessing your application from the internet.
**Exercise**
Use the `kubectl create` command to create a new Deployment `hello-server` from the `hello-app` container image. The `--image` flag to specify a container image to deploy. The `kubectl create` command pulls the example image from a Container Registry bucket. Here, use [gcr.io/google-samples/hello-app:1.0](gcr.io/google-samples/hello-app:1.0) to indicate the specific image version to pull. If a version is not specified, the latest version is used.
```
%%bash
kubectl create deployment hello-server --image=gcr.io/google-samples/hello-app:1.0pp
```
This Kubernetes command creates a Deployment object that represents `hello-server`. To create a Kubernetes Service, which is a Kubernetes resource that lets you expose your application to external traffic, run the `kubectl expose` command.
**Exercise**
Use the `kubectl expose` to expose the application. In this command,
* `--port` specifies the port that the container exposes.
* `type="LoadBalancer"` creates a Compute Engine load balancer for your container.
```
%%bash
kubectl expose deployment hello-server --type=LoadBalancer --port 8080
```
Use the `kubectl get service` command to inspect the `hello-server` Service.
**Note**: It might take a minute for an external IP address to be generated. Run the previous command again if the `EXTERNAL-IP` column for `hello-server` status is pending.
```
!kubectl get service
```
You can now view the application from your web browser, open a new tab and enter the following address, replacing `EXTERNAL IP` with the EXTERNAL-IP for `hello-server`:
```bash
http://[EXTERNAL_IP]:8080
```
You should see a simple page which displays
```bash
Hello, world!
Version: 1.0.0
Hostname: hello-server-5bfd595c65-7jqkn
```
## Cleanup
Delete the cluster using `gcloud` to free up those resources. Use the `--quiet` flag if you are executing this in a notebook. Deleting the cluster can take a few minutes.
**Exercise**
Delete the cluster. Use the `--quiet` flag since we're executing in a notebook.
```
%%bash
gcloud container clusters --quiet delete ${CLUSTER_NAME} --zone $ZONE
```
Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| github_jupyter |
<a href="https://colab.research.google.com/github/ipavlopoulos/toxic_spans/blob/master/ToxicSpans_SemEval21.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Download the data and the code
```
from ast import literal_eval
import pandas as pd
import random
!git clone https://github.com/ipavlopoulos/toxic_spans.git
from toxic_spans.evaluation.semeval2021 import f1
tsd = pd.read_csv("toxic_spans/data/tsd_trial.csv")
tsd.spans = tsd.spans.apply(literal_eval)
tsd.head(1)
```
### Run a random baseline
* Returns random offsets as toxic per text
```
# make an example with a taboo word
taboo_word = "fucking"
template = f"This is a {taboo_word} example."
# build a random baseline (yields offsets at random)
random_baseline = lambda text: [i for i, char in enumerate(text) if random.random()>0.5]
predictions = random_baseline(template)
# find the ground truth indices and print
gold = list(range(template.index(taboo_word), template.index(taboo_word)+len(taboo_word)))
print(f"Gold\t\t: {gold}")
print(f"Predicted\t: {predictions}")
tsd["random_predictions"] = tsd.text.apply(random_baseline)
tsd["f1_scores"] = tsd.apply(lambda row: f1(row.random_predictions, row.spans), axis=1)
tsd.head()
from scipy.stats import sem
_ = tsd.f1_scores.plot(kind="box")
print (f"F1 = {tsd.f1_scores.mean():.2f} ± {sem(tsd.f1_scores):.2f}")
```
### Prepare the text file with the scores
* Name it as `spans-pred.txt`.
* Align the scores with the rows.
```
# make sure that the ids match the ones of the scores
predictions = tsd.random_predictions.to_list()
ids = tsd.index.to_list()
# write in a prediction file named "spans-pred.txt"
with open("spans-pred.txt", "w") as out:
for uid, text_scores in zip(ids, predictions):
out.write(f"{str(uid)}\t{str(text_scores)}\n")
! head spans-pred.txt
```
### Zip the predictions
* Take extra care to verify that only the predictions text file is included.
* The text file should **not** be within any directory.
* No other file should be included; the zip should only contain the txt file.
```
! zip -r random_predictions.zip ./spans-pred.*
```
###### Check by unziping it: only a `spans-pred.txt` file should be created
```
! rm spans-pred.txt
! unzip random_predictions.zip
```
### Download the zip and submit it to be assessed
```
from google.colab import files
files.download("random_predictions.zip")
```
### When the submission is finished click the `Download output from scoring step`
* The submission may take a while, so avoid late submissions.
* Download the output_file.zip and see your score in the respective file.
```
```
| github_jupyter |
# Counts, Frequencies, and Ngram Models
Before you proceed, make sure to run the cell below.
This will once again read in the cleaned up text files and store them as tokenized lists in the variables `hamlet`, `faustus`, and `mars`.
If you get an error, make sure that you did the previous notebook and that this notebook is in a folder containing the files `hamlex_clean.txt`, `faustus_clean.txt`, and `mars_clean.txt` (which should be the case if you did the previous notebook).
```
from google.colab import files
#Import files
upload1 = files.upload()
upload2 = files.upload()
upload3 = files.upload()
hamlet_full = upload1['faustus_clean.txt'].decode('utf-8')
faustus_full = upload2['hamlet_clean.txt'].decode('utf-8')
mars_full = upload3['mars_clean.txt'].decode('utf-8')
import re
def tokenize(the_string):
"""Convert string to list of words"""
return re.findall(r"\w+", the_string)
# define a variable for each token list
hamlet = tokenize(hamlet_full)
faustus = tokenize(faustus_full)
mars = tokenize(mars_full)
```
**Caution.**
If you restart the kernel at any point, make sure to run all these previous cells again so that the variables `hamlet`, `faustus`, and `mars` are defined.
## Counting words
Python makes it very easy to count how often an element occurs in a list: the `collections` library provides a function `Counter` that does the counting for us.
The `Counter` function takes as its only argument a list (like the ones produced by `re.findall` for tokenization).
It then converts the list into a *Counter*.
Here is what this looks like with a short example string.
```
import re
from collections import Counter # this allows us to use Counter instead of collections.Counter
test_string = "FTL is short for faster-than-light; we probably won't ever have space ships capable of FTL-travel."
# tokenize the string
tokens = re.findall(r"\w+", str.lower(test_string))
print("The list of tokens:", tokens)
# add an empty line
print()
# and now do the counting
counts = Counter(tokens)
print("Number of tokens for each word type:", counts)
```
Let's take a quick peak at what the counts looks like for each text.
We don't want to do this with something like `print(counts_hamlet)`, because the output would be so large that your browser might actually choke on it (it has happened to me sometimes).
Instead, we will look at the 100 most common words.
We can do this with the function `Counter.most_common`, which takes two arguments: a Counter, and a positive number.
```
from collections import Counter
# construct the counters
counts_hamlet = Counter(hamlet)
counts_faustus = Counter(faustus)
counts_mars = Counter(mars)
print("Most common Hamlet words:", Counter.most_common(counts_hamlet, 100))
print()
print("Most common Faustus words:", Counter.most_common(counts_faustus, 100))
print()
print("Most common John Carter words:", Counter.most_common(counts_mars, 100))
```
**Exercise.**
The code below uses `import collections` instead of `from collections import Counter`.
As you can test for yourself, the code now produces various errors.
Fix the code so that the cell runs correctly.
You must not change the `import` statement.
```
import collections
# construct the counters
counts_hamlet = Counter(hamlet)
counts_faustus = Counter(faustus)
counts_mars = Counter(mars)
print("Most common Hamlet words:", Counter.most_common(counts_hamlet, 100))
print()
print("Most common Faustus words:", Counter.most_common(counts_faustus, 100))
print()
print("Most common John Carter words:", Counter.most_common(counts_mars, 100))
```
Python's output for `Counter.most_common` doesn't look too bad, but it is a bit convoluted.
We can use the function `pprint` from the `pprint` library to have each word on its own line.
The name *pprint* is short for *pretty-print*.
```
from pprint import pprint # we want to use pprint instead of pprint.pprint
from collections import Counter
# construct the counters
counts_hamlet = Counter(hamlet)
counts_faustus = Counter(faustus)
counts_mars = Counter(mars)
# we have to split lines now because pprint cannot take multiple arguments like print
print("Most common Hamlet words:")
pprint(Counter.most_common(counts_hamlet, 100))
print()
print("Most common Faustus words:")
pprint(Counter.most_common(counts_faustus, 100))
print()
print("Most common John Carter words:")
pprint(Counter.most_common(counts_mars, 100))
```
**Exercise.**
What is the difference between the following two pieces of code?
How do they differ in their output, and why?
```
from collections import Counter
counts = Counter(hamlet[:50])
print(counts)
from collections import Counter
count = Counter(hamlet)
print(Counter.most_common(count, 50))
```
## A problem
If you look at the lists of 100 most common words for each text, you'll notice that they are fairly similar.
For instance, all of them have *a*, *the*, and *to* among the most frequent ones.
That's not a peculiarity of these few texts, it's a general property of English texts.
This is because of **Zipf's law**: ranking words by their frequency, the n-th word will have a relative frequency of 1/n.
So the most common word is twice as frequent as the second most common one, three times more frequent than the third most common one, and so on.
As a result, a handful of words make up over 50% of all words in a text.
Zipf's law means that word frequencies in a text give rise to a peculiar shape that we might call the Zipf dinosaur.
A super-high neck, followed by a very long tail.
For English texts, the distribution usually resembles the one below, and that's even though this graph only shows the most common words.
```
from IPython.display import HTML
# Youtube
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/fCn8zs912OE" frameborder="0" allowfullscreen></iframe>')
```
There is precious little variation between English texts with respect to which words are at the top.
These common but uninformative words are called **stop words**.
If we want to find any interesting differences between *Hamlet*, *Doctor Faustus*, and *Princess of Mars*, we have to filter out all these stop words.
That's not something we can do by hand, but our existing box of tricks doesn't really seem to fit either.
We could use a regular expression to delete all these words from the string before it even gets tokenized.
But that's not the best solution:
1. A minor mistake in the regular expression might accidentally delete many things we want to keep.
Odds are that this erroneous deletion would go unnoticed, possibly invalidating our stylistic analysis.
1. There's hundreds of stop words, so the regular expression would be very long.
Ideally, our code should be compact and easy to read.
A super-long regular expression is the opposite of that, and it's no fun to type either.
And of course, the longer a regular expression, the higher the chance that you make a typo (which takes us back to point 1).
1. While regular expressions are fast, they are not as fast as most of the operations Python can perform on lists and counters.
If there is an easy alternative to a regular expression, that alternative is worth exploring.
Alright, so if regexes aren't the best solution, what's the alternative?
Why, it's simple: 0.
## Changing counts
The values in a Python counter can be changed very easily.
```
from collections import Counter
from pprint import pprint
# define a test counter and show its values
test = Counter(["John", "said", "that", "Mary", "said", "that", "Bill", "stinks"])
pprint(test)
# 'that' is a stop word; set its count to 0
test["that"] = 0
pprint(test)
```
The code above uses the new notation `test['that']`.
Counters are a subclass of dictionaries, so `test["that"]` points to the value for `"that"` in the counter `test`.
We also say that `"that"` is a **key** that points to a specific **value**.
The line
```python
test["that"] = 0
```
intstructs Python to set the value for the key `"that"` to `0`.
**Exercise.**
Look at the code cell below.
For each line, add a comment that briefly describes what it does (for instance, *set value of 'that' to 0*).
If the line causes an error, fix the error and add two commments:
1. What caused the error?
1. What does the corrected line do?
You might want to use `pprint` to look at how the counter changes after each line.
```
from collections import Counter
# define a test counter and show its values
test = Counter(["John", "said", "that", "Mary", "said", "that", "Bill", "stinks"])
test["that"] = 0 # set value of 'that' to 0
test["Mary"] = test["that"]
test[John] = 10
test["said"] = test["John' - 'said"]
test["really"] = 0
```
Since we can change the values of keys in counters, stop words become very easy to deal with.
Recall that the problem with stop words is not so much that they occur in the counter, but that they make up the large majority of high frequency words.
Our intended fix was to delete them from the counter.
But instead, we can just set the count of each stop word to 0.
Then every stop word is still technically contained by the counter, but since its frequency is 0 it will no longer show up among the most common words, which is what we really care about.
Alright, let's do that.
**Exercise.**
Together with this notebook you found a figure which shows you the most common stop words of English (except for *whale*, you can ignore that one).
Extend the code below so that the count for each one of the stop words listed in the figure is set to 0.
Compare the output before and after stop word removal and ask yourself whether there has been significant progress.
```
from collections import Counter
# construct the counters
counts_hamlet = Counter(hamlet)
# output with stop words
print("Most common Hamlet words before clean-up:\n", Counter.most_common(counts_hamlet, 25))
# set stop word counts to 0
# put your code here
# output without stop words
print("Most common Hamlet words after clean-up:\n", Counter.most_common(counts_hamlet, 25))
```
Okay, this is an improvement, but it's really tedious.
You have to write the same code over and over again, changing only the key.
And you aren't even done yet, there's still many more stop words to be removed.
But don't despair, you don't have to add another 100 lines of code.
No, repetitive tasks like that are exactly why programming languages have **`for` loops**.
With a `for`-loop, setting the counts of stop words to 0 becomes a matter of just a few lines.
```
from collections import Counter
# construct the counters
counts_hamlet = Counter(hamlet)
counts_faustus = Counter(faustus)
counts_mars = Counter(mars)
stopwords = ["the", "of", "and", "a", "to", "in",
"that", "his", "it", "he", "but", "as",
"is", "with", "was", "for", "all", "this",
"at", "while", "by", "not", "from", "him",
"so", "be", "one", "you", "there", "now",
"had", "have", "or", "were", "they", "which",
"like"]
for word in stopwords:
counts_hamlet[word] = 0
counts_faustus[word] = 0
counts_mars[word] = 0
```
Okay, now we can finally compare the three texts based on their unigram counts.
You can use the `Counter.most_common` function to see which words are most common in each text.
We can also compare the overall frequency distribution.
The code below will plot the counters, giving you a graphical representation of the frequency distribution, similar to the Zipf figures above.
(Don't worry about what any of the code below does.
Just run the cell and look at the pretty output.)
```
%matplotlib inline
# import relevant matplotlib code
import matplotlib.pyplot as plt
# figsize(20, 10)
plt.figure(figsize=(20,10))
# the lines above are needed for Jupyter to display the plots in your browser
# do not remove them
# a little bit of preprocessing so that the data is ordered by frequency
def plot_preprocess(the_counter, n):
"""format data for plotting n most common items"""
sorted_list = sorted(the_counter.items(), key=lambda x: x[1], reverse=True)[:n]
words, counts = zip(*sorted_list)
return words, counts
for text in [counts_hamlet, counts_faustus, counts_mars]:
# you can change the max words value to look at more or fewer words in one plot
max_words = 10
words = plot_preprocess(text, max_words)[0]
counts = plot_preprocess(text, max_words)[1]
plt.bar(range(len(counts)), counts, align="center")
plt.xticks(range(len(words)), words)
plt.show()
```
So there you have it.
Your first, fairly simple quantitative analysis of writing style.
You can compare the three texts among several dimensions:
1. What are the most common words in each text?
1. Are the distributions very different?
Perhaps one of them keeps repeating the same words over and over, whereas another author varies their vocabulary more and thus has a smoother curve that's not as much tilted towards the left?
| github_jupyter |
# Model import using the Petab format
In this notebook, we illustrate how to use [pyPESTO](https://github.com/icb-dcm/pypesto.git) together with [PEtab](https://github.com/petab-dev/petab.git) and [AMICI](https://github.com/icb-dcm/amici.git). We employ models from the [benchmark collection](https://github.com/benchmarking-initiative/benchmark-models-petab), which we first download:
```
import pypesto
import amici
import petab
import os
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
!git clone --depth 1 https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git tmp/benchmark-models || (cd tmp/benchmark-models && git pull)
folder_base = "tmp/benchmark-models/Benchmark-Models/"
```
## Import
### Manage PEtab model
A PEtab problem comprises all the information on the model, the data and the parameters to perform parameter estimation. We import a model as a `petab.Problem`.
```
# a collection of models that can be simulated
#model_name = "Zheng_PNAS2012"
model_name = "Boehm_JProteomeRes2014"
#model_name = "Fujita_SciSignal2010"
#model_name = "Sneyd_PNAS2002"
#model_name = "Borghans_BiophysChem1997"
#model_name = "Elowitz_Nature2000"
#model_name = "Crauste_CellSystems2017"
#model_name = "Lucarelli_CellSystems2018"
#model_name = "Schwen_PONE2014"
#model_name = "Blasi_CellSystems2016"
# the yaml configuration file links to all needed files
yaml_config = os.path.join(folder_base, model_name, model_name + '.yaml')
# create a petab problem
petab_problem = petab.Problem.from_yaml(yaml_config)
```
### Import model to AMICI
The model must be imported to pyPESTO and AMICI. Therefore, we create a `pypesto.PetabImporter` from the problem, and create an AMICI model.
```
importer = pypesto.PetabImporter(petab_problem)
model = importer.create_model()
# some model properties
print("Model parameters:", list(model.getParameterIds()), '\n')
print("Model const parameters:", list(model.getFixedParameterIds()), '\n')
print("Model outputs: ", list(model.getObservableIds()), '\n')
print("Model states: ", list(model.getStateIds()), '\n')
```
### Create objective function
To perform parameter estimation, we need to define an objective function, which integrates the model, data, and noise model defined in the PEtab problem.
```
import libsbml
converter_config = libsbml.SBMLLocalParameterConverter()\
.getDefaultProperties()
petab_problem.sbml_document.convert(converter_config)
obj = importer.create_objective()
# for some models, hyperparamters need to be adjusted
#obj.amici_solver.setMaxSteps(10000)
#obj.amici_solver.setRelativeTolerance(1e-7)
#obj.amici_solver.setAbsoluteTolerance(1e-7)
```
We can request variable derivatives via `sensi_orders`, or function values or residuals as specified via `mode`. Passing `return_dict`, we obtain the direct result of the AMICI simulation.
```
ret = obj(petab_problem.x_nominal_scaled, mode='mode_fun', sensi_orders=(0,1), return_dict=True)
print(ret)
```
The problem defined in PEtab also defines the fixing of parameters, and parameter bounds. This information is contained in a `pypesto.Problem`.
```
problem = importer.create_problem(obj)
```
In particular, the problem accounts for the fixing of parametes.
```
print(problem.x_fixed_indices, problem.x_free_indices)
```
The problem creates a copy of he objective function that takes into account the fixed parameters. The objective function is able to calculate function values and derivatives. A finite difference check whether the computed gradient is accurate:
```
objective = problem.objective
ret = objective(petab_problem.x_nominal_free_scaled, sensi_orders=(0,1))
print(ret)
eps = 1e-4
def fd(x):
grad = np.zeros_like(x)
j = 0
for i, xi in enumerate(x):
mask = np.zeros_like(x)
mask[i] += eps
valinc, _ = objective(x+mask, sensi_orders=(0,1))
valdec, _ = objective(x-mask, sensi_orders=(0,1))
grad[j] = (valinc - valdec) / (2*eps)
j += 1
return grad
fdval = fd(petab_problem.x_nominal_free_scaled)
print("fd: ", fdval)
print("l2 difference: ", np.linalg.norm(ret[1] - fdval))
```
### In short
All of the previous steps can be shortened by directly creating an importer object and then a problem:
```
importer = pypesto.PetabImporter.from_yaml(yaml_config)
problem = importer.create_problem()
```
## Run optimization
Given the problem, we can perform optimization. We can specify an optimizer to use, and a parallelization engine to speed things up.
```
optimizer = pypesto.ScipyOptimizer()
# engine = pypesto.SingleCoreEngine()
engine = pypesto.MultiProcessEngine()
# do the optimization
result = pypesto.minimize(problem=problem, optimizer=optimizer,
n_starts=10, engine=engine)
```
## Visualize
The results are contained in a `pypesto.Result` object. It contains e.g. the optimal function values.
```
result.optimize_result.get_for_key('fval')
```
We can use the standard pyPESTO plotting routines to visualize and analyze the results.
```
import pypesto.visualize
ref = pypesto.visualize.create_references(x=petab_problem.x_nominal_scaled, fval=obj(petab_problem.x_nominal_scaled))
pypesto.visualize.waterfall(result, reference=ref, scale_y='lin')
pypesto.visualize.parameters(result, reference=ref)
```
| github_jupyter |
```
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import transforms, datasets
```
**NOTE**: it is recommended to watch [this link](https://drive.google.com/file/d/1jARX0gjNZwpkcMloOnE8HmngIYDQ6sIB/view?usp=sharing) about "Intoduction of how to code in Pytorch" instructed by Rassa Ghavami beforehand.
### What is Tensor?
tensor is mostly same as numpy array (even its applications like broadcasting operation, indexing, slicing and etc), except for it brings us the opportunity to run operations on faster hardwares like GPU. let's see some tensor defintion
```
arr = torch.zeros((256, 256), dtype=torch.int32)
# tensors are defined by default at CPU
print(arr.device)
# keep 'size', 'dtype' and 'device' same as arr, but fill with 1
arr2 = torch.ones_like(arr)
# keep 'dtype' and 'device' same as arr, but fill data arbitrarily
arr3 = arr.new_tensor([[1, 2], [3, 4]])
```
in order to feed tensors to deep-learning models, they should follow a customary shape form; `B C H W` for 4D tensors where `B` is batch size, `C` is channel dimension and `H W` are spatial dimensions.
#### Device determination
first we need to determine which device all torch tensors (including the input, learning weights and etc) are going to be allocated. basically, GPU is the first priority.
```
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
```
#### Pseudo random generation
it is often recommended to generate **pseudo** random numbers as it provides fair comparison between different configs of deep learning model(s). torch provides this by `torch.manual_seed`.
```
np.random.seed(12345)
# same seed on all devices; both CPU and CUDA
torch.manual_seed(12345)
```
## Build a CNN model
from now on, you will learn how to build and train a CNN model.
pytorch models are defined as python classes inherited from `torch.nn.Module`. two functions are essential for model creation:
1. learning weights (parameters) and network layers are defined within `__init__()`.
2. forwarding procedure of the model is developed within `forward()`.
so let's create a multi-classification CNN model (with ten ground-truth labels) containing the following layers: `Conv` -> `ReLU` -> `Batchnorm` -> `Conv` -> `ReLU` -> `Batchnorm` -> `Adaptive average pooling` -> `dropout` -> `fully connected`. suppose the input has only one channel and `forward()` will only return output of the model.
```
class Model(nn.Module):
def __init__(self):
super().__init__()
# your code here
def forward(self, x: torch.Tensor) -> torch.Tensor:
# your code here
return x
```
#### set model device
Previously, we have determined which device (GPU or CPU) is going to be used, although it has not been allocated yet to parameters of the model. Pytorch `.to(device)` Api provides this for us.
```
model = Model()
model.to(device)
```
#### Model phases
there are two phases for a Pytorch model: `.train()` and `.eval()`. models are by default at `.train()` phase, however the difference between these two is that in `eval()` phase, some layers change their behavior during inference; for instance dropout will be deactivated and batch normalization will not update estimated mean and variance and they will be used only for normalization, hence please note **`.eval()` will not block parameters to be updated**. therefore during evaluation, besides `model.eval()` we should assure that back propagation is temporarily deactivated and this is possible by `torch.no_grad()`. indeed disabling the gradient calculation enables us to use bigger batch sizes as it speeds up the computation and reduces memory usage.
## Data processing
Before training, we need to prepare and process our dataset which is MNIST here.
#### Data transformation
PIL images should first be transformed to torch tensors. `torchvision.transforms.Compose` provides a pipeline of transforms. in the following 'converting to tensors' is only applied.
```
transform = transforms.Compose([
transforms.ToTensor()
])
```
#### Download data
as evaluation is not purpose of this notebook, you only need to load **train** set of MNIST dataset using `torchvision.datasets.MNIST`.
```
# your code here
train = None
```
#### Data loader
define train loader using `torch.utils.data.DataLoader`.
```
batch_size = 32
# your code here
train_loader = None
```
## Training
here we are going to develop training process of MNIST classification.
#### Optimizer
define your optimizer, use `torch.optim`.
```
# your code here
optimizer = None
```
#### Procedure
implement the procedure of training in the following cell. please note **evaluation is not purpose of this notebook**, therefore only report the training loss changes which ought to be descending in general. consider cross entropy as loss function and compute it without using pre-defined APIs.
the backpropagation consists of three sub-parts:
1. gradient computation
2. updating learning parameters
3. removing current computed gradients for next iteration
fortunately we don't need to implement them from sctrach as pytorch provides APIs for them.
```
num_epochs = 3
num_iters = len(train_loader)
train_losses = np.zeros((num_epochs, num_iters), dtype=np.float32)
for epoch in range(num_epochs):
for it, (X, y) in enumerate(train_loader):
## forward model
## compute loss
## backpropagation
```
| github_jupyter |
# Top Charts Exploratory Data Analysis
## Loading Dependencies
```
import pandas as pd
from collections import Counter
import altair as alt
import nltk
import regex as re
```
## Loading in Data
```
df = pd.read_csv('cleaned_data/all_top_songs_with_genres_nolist.csv')
# preview of dataframe
df.head()
```
## Cleaning Up List of Genres
```
# cleaning up the genres column on copy of dataframe
df_ = df.copy()
df_['genre'] = df_['genre'].str.split(", ")
# add all values to a list to generate a unique list of values
genres_list = []
for idx, value in enumerate(df_['genre']):
genres_list.extend(value)
```
### Adding in Columns for genres
```
df_['pop'] = df.genre.str.contains('pop')==True
df_['rb'] = df.genre.str.contains('r-b')==True
df_['rap'] = df.genre.str.contains('rap')==True
df_['rock'] = df.genre.str.contains('rock')==True
df_['non-music'] = df.genre.str.contains('non-music')==True
df_['country'] = df.genre.str.contains('country')==True
df_['no_genre'] = df.genre.str.contains('m')==True
df_['pop'] = df_['pop'].astype(int)
df_['rb'] = df_['rb'].astype(int)
df_['rap'] = df_['rap'].astype(int)
df_['rock'] = df_['rock'].astype(int)
df_['non-music'] = df_['non-music'].astype(int)
df_['country'] = df_['country'].astype(int)
df_['no_genre'] = df_['no_genre'].astype(int)
df_.head()
### Saving to CSV
df_.to_csv('cleaned_data/OHE_all_top_songs.csv', index=False)
df_[df_['non-music'] == 1]['artist']
# drop non-music and bc they are all either having another genre or missing a genre
df_ = df_.drop(columns=['non-music'])
missing_genres = []
for i in range(len(df_.artist)):
if sum(df_.iloc[i,6:11]) > 0:
item = 0
missing_genres.append(item)
else:
item = 1
missing_genres.append(item)
df_['no_genre'] = missing_genres
```
## Visualizations
```
genre_frequencies = dict(Counter(genres_list))
genre_frequencies
genre_frequencies_df = pd.DataFrame.from_records([genre_frequencies])
genre_frequencies_df = genre_frequencies_df.rename(index={0:'counts'}).T.reset_index().rename(columns={'index':'genres'})
genre_frequencies_df = genre_frequencies_df[genre_frequencies_df['genres'].isin(['r-b', 'pop', 'rap', 'rock', 'country'])]
genre_frequencies_df.to_csv('cleaned_data/genre_song_counts.csv', index = False)
bars = alt.Chart(data=genre_frequencies_df).mark_bar().encode(
x= 'genres',
y = 'counts',
color = 'genres'
)
text = bars.mark_text(
align='center',
# baseline='top',
dy=-10
).encode(
text='counts:Q',
)
(bars + text).properties(height=500, width = 400,title = "Frequency of Genres on Top 200 Charts").configure_range(
category={'scheme': 'tableau10'}
)
```
There seem to be data that is labeled as non-music which is strange because there shouldn't be any labeled non-music. If there is another genre listed, remove non-music
# Keyword Extraction of all Genres
```
### Importing More Dependencies
from resources.word_extraction.text_cleaning import lem_stem_text
from resources.word_extraction.stopwords import remove_stopw, get_stopwords
from resources.analyze import find_keywords, find_instances
df_['cleaned_lyrics'] = df_['lyrics'].str.replace('[^\w\s]','')
df_['cleaned_lyrics'] = df_['cleaned_lyrics'].str.replace('missing lyrics','')
df_['cleaned_lyrics'] = df_['cleaned_lyrics'].apply(remove_stopw)
df_['cleaned_lyrics'] = df_['cleaned_lyrics'].apply(lem_stem_text)
df_['cleaned_lyrics'] = df_.cleaned_lyrics.str.strip().str.split(' ')
df_
## getting a list of all lemmed and stemmed keywords without stopwords
lyrics_wordlist = df_['cleaned_lyrics'].tolist()
words_list = []
for i in lyrics_wordlist:
words_list.extend(i)
len(words_list)
# Creating a DataFrame of the Word Counts
lyric_word_frequencies = pd.DataFrame.from_dict(Counter(words_list), orient = 'index').reset_index()
lyric_word_frequencies = lyric_word_frequencies.rename(columns={'index':'word', 0:'count'})
lyric_word_frequencies = lyric_word_frequencies.sort_values(by = "count", ascending = False)
lyric_word_frequencies
lyric_word_frequencies.head(20)
lyric_word_frequencies.to_csv('cleaned_data/lyric_word_frequencies.csv', index = False)
top_100 = lyric_word_frequencies[:100]
top_100
```
## Top Words by Genre
```
pd.Series(genres_list).unique()
pop = df_[df_['pop'] == 1]
rb = df_[df_['rb'] == 1]
rap = df_[df_['rap'] == 1]
rock = df_[df_['rock'] == 1]
country = df_[df_['country'] == 1]
m = df_[df_['no_genre'] == 1]
def top_lyrics(df, dfname):
'''Function to find the top lyric unigrams based on a df containing lyrics'''
## getting a list of all lemmed and stemmed keywords without stopwords
lyrics_wordlist = df['cleaned_lyrics'].tolist()
words_list = []
for i in lyrics_wordlist:
words_list.extend(i)
len(words_list)
# Creating a DataFrame of the Word Counts
lyric_word_frequencies = pd.DataFrame.from_dict(Counter(words_list), orient = 'index').reset_index()
lyric_word_frequencies = lyric_word_frequencies.rename(columns={'index':'word', 0:'count'})
lyric_word_frequencies = lyric_word_frequencies.sort_values(by = "count", ascending = False)
lyric_word_frequencies['genre'] = dfname
return lyric_word_frequencies
rb_lyrics = top_lyrics(rb, 'r-b')[:15]
rb_lyrics
pop_lyrics = top_lyrics(pop, 'pop')[:15]
country_lyrics = top_lyrics(country, 'country')[:15]
rock_lyrics = top_lyrics(rock, 'rock')[:15]
rap_lyrics = top_lyrics(rap, 'rap')[:15]
full_lyrics = pd.concat([pop_lyrics,country_lyrics,rock_lyrics,rap_lyrics,rb_lyrics])
full_lyrics
full_lyrics.to_csv('cleaned_data/lyric_frequencies/top15_all_genres_lyric_frequencies.csv', index = False)
```
## Top Songs By Genre
I forgot to get the top songs by genre streams so I am re importing the top 200 files and the previously created OHE (one-hot-encoded) df to create a new df with the streams
```
import pandas as pd
## OTHER MISC DATA CLEANING
df1 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2017_weekly_all_locations_top200.csv')
df2 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2018_weekly_all_locations_top200.csv')
df3 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2019_weekly_all_locations_top200.csv')
df4 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2020_weekly_all_locations_top200.csv')
df = pd.concat([df1, df2, df3, df4])
df['streams'] = df['streams'].str.replace(",", '').astype(int)
global_df = df[df['country_chart'].str.contains("Global")]
global_df_total = global_df.groupby(["track", 'spotify_link']).sum().reset_index()
lyrics_df = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/OHE_all_top_songs.csv')
merged_df = pd.merge(lyrics_df, global_df_total, "inner", on = "track")
merged_df = merged_df.rename(columns={'streams': "total_streams"})
merged_df
pop = merged_df[merged_df['pop'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:11]
pop['genre'] = 'pop'
rb = merged_df[merged_df['rb'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:11]
rb['genre'] = 'r-b'
rap = merged_df[merged_df['rap'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:13]
rap['genre'] = 'rap'
rock = merged_df[merged_df['rock'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:13]
rock['genre'] = 'rock'
country = merged_df[merged_df['country'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:12]
country['genre'] = 'country'
df_output = pd.concat([pop, rb, rap, rock, country])
df_output
df_output.iloc[59][3]
# Change all links to embed links
df_output.to_csv('../cleaned_data/top10_by_genre_all_time.csv', index = False)
```
### Creating All Topic Songs With Years
```
import pandas as pd
df1 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2017_weekly_all_locations_top200.csv')
df1['year'] = '2017'
df2 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2018_weekly_all_locations_top200.csv')
df2['year'] = '2018'
df3 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2019_weekly_all_locations_top200.csv')
df3['year'] = '2019'
df4 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2020_weekly_all_locations_top200.csv')
df4['year'] = '2020'
df = pd.concat([df1, df2, df3, df4])
all_locations_df_max = df.groupby(["track", 'artist','country_chart', 'year']).max().reset_index()[['track','artist',"year", 'streams', "country_chart",'spotify_link']]
all_locations_df_max
all_locations_df_max.to_csv("cleaned_data/2017_2020_all_locations_max_streams.csv", index = False)
```
| github_jupyter |
# Quickstart
A quick introduction on how to use the OQuPy package to compute the dynamics of a quantum system that is possibly strongly coupled to a structured environment. We illustrate this by applying the TEMPO method to the strongly coupled spin boson model.
**Contents:**
* Example - The spin boson model
* 1. The model and its parameters
* 2. Create system, correlations and bath objects
* 3. TEMPO computation
First, let's import OQuPy and some other packages we are going to use
```
import sys
sys.path.insert(0,'..')
import oqupy
import numpy as np
import matplotlib.pyplot as plt
```
and check what version of tempo we are using.
```
oqupy.__version__
```
Let's also import some shorthands for the spin Pauli operators and density matrices.
```
sigma_x = oqupy.operators.sigma("x")
sigma_y = oqupy.operators.sigma("y")
sigma_z = oqupy.operators.sigma("z")
up_density_matrix = oqupy.operators.spin_dm("z+")
down_density_matrix = oqupy.operators.spin_dm("z-")
```
-------------------------------------------------
## Example - The spin boson model
As a first example let's try to reconstruct one of the lines in figure 2a of [Strathearn2018] ([Nat. Comm. 9, 3322 (2018)](https://doi.org/10.1038/s41467-018-05617-3) / [arXiv:1711.09641v3](https://arxiv.org/abs/1711.09641)). In this example we compute the time evolution of a spin which is strongly coupled to an ohmic bath (spin-boson model). Before we go through this step by step below, let's have a brief look at the script that will do the job - just to have an idea where we are going:
```
Omega = 1.0
omega_cutoff = 5.0
alpha = 0.3
system = oqupy.System(0.5 * Omega * sigma_x)
correlations = oqupy.PowerLawSD(alpha=alpha,
zeta=1,
cutoff=omega_cutoff,
cutoff_type='exponential')
bath = oqupy.Bath(0.5 * sigma_z, correlations)
tempo_parameters = oqupy.TempoParameters(dt=0.1, dkmax=30, epsrel=10**(-4))
dynamics = oqupy.tempo_compute(system=system,
bath=bath,
initial_state=up_density_matrix,
start_time=0.0,
end_time=15.0,
parameters=tempo_parameters)
t, s_z = dynamics.expectations(0.5*sigma_z, real=True)
plt.plot(t, s_z, label=r'$\alpha=0.3$')
plt.xlabel(r'$t\,\Omega$')
plt.ylabel(r'$<S_z>$')
plt.legend()
```
### 1. The model and its parameters
We consider a system Hamiltonian
$$ H_{S} = \frac{\Omega}{2} \hat{\sigma}_x \mathrm{,}$$
a bath Hamiltonian
$$ H_{B} = \sum_k \omega_k \hat{b}^\dagger_k \hat{b}_k \mathrm{,}$$
and an interaction Hamiltonian
$$ H_{I} = \frac{1}{2} \hat{\sigma}_z \sum_k \left( g_k \hat{b}^\dagger_k + g^*_k \hat{b}_k \right) \mathrm{,}$$
where $\hat{\sigma}_i$ are the Pauli operators, and the $g_k$ and $\omega_k$ are such that the spectral density $J(\omega)$ is
$$ J(\omega) = \sum_k |g_k|^2 \delta(\omega - \omega_k) = 2 \, \alpha \, \omega \, \exp\left(-\frac{\omega}{\omega_\mathrm{cutoff}}\right) \mathrm{.} $$
Also, let's assume the initial density matrix of the spin is the up state
$$ \rho(0) = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} $$
and the bath is initially at zero temperature.
For the numerical simulation it is advisable to choose a characteristic frequency and express all other physical parameters in terms of this frequency. Here, we choose $\Omega$ for this and write:
* $\Omega = 1.0 \Omega$
* $\omega_c = 5.0 \Omega$
* $\alpha = 0.3$
```
Omega = 1.0
omega_cutoff = 5.0
alpha = 0.3
```
### 2. Create system, correlations and bath objects
#### System
$$ H_{S} = \frac{\Omega}{2} \hat{\sigma}_x \mathrm{,}$$
```
system = oqupy.System(0.5 * Omega * sigma_x)
```
#### Correlations
$$ J(\omega) = 2 \, \alpha \, \omega \, \exp\left(-\frac{\omega}{\omega_\mathrm{cutoff}}\right) $$
Because the spectral density is of the standard power-law form,
$$ J(\omega) = 2 \alpha \frac{\omega^\zeta}{\omega_c^{\zeta-1}} X(\omega,\omega_c) $$
with $\zeta=1$ and $X$ of the type ``'exponential'`` we define the spectral density with:
```
correlations = oqupy.PowerLawSD(alpha=alpha,
zeta=1,
cutoff=omega_cutoff,
cutoff_type='exponential')
```
#### Bath
The bath couples with the operator $\frac{1}{2}\hat{\sigma}_z$ to the system.
```
bath = oqupy.Bath(0.5 * sigma_z, correlations)
```
### 3. TEMPO computation
Now, that we have the system and the bath objects ready we can compute the dynamics of the spin starting in the up state, from time $t=0$ to $t=5\,\Omega^{-1}$
```
dynamics_1 = oqupy.tempo_compute(system=system,
bath=bath,
initial_state=up_density_matrix,
start_time=0.0,
end_time=5.0,
tolerance=0.01)
```
and plot the result:
```
t_1, z_1 = dynamics_1.expectations(0.5*sigma_z, real=True)
plt.plot(t_1, z_1, label=r'$\alpha=0.3$')
plt.xlabel(r'$t\,\Omega$')
plt.ylabel(r'$<S_z>$')
plt.legend()
```
Yay! This looks like the plot in figure 2a [Strathearn2018].
Let's have a look at the above warning. It said:
```
WARNING: Estimating parameters for TEMPO calculation. No guarantie that resulting TEMPO calculation converges towards the correct dynamics! Please refere to the TEMPO documentation and check convergence by varying the parameters for TEMPO manually.
```
We got this message because we didn't tell the package what parameters to use for the TEMPO computation, but instead only specified a `tolerance`. The package tries it's best by implicitly calling the function `oqupy.guess_tempo_parameters()` to find parameters that are appropriate for the spectral density and system objects given.
#### TEMPO Parameters
There are **three key parameters** to a TEMPO computation:
* `dt` - Length of a time step $\delta t$ - It should be small enough such that a trotterisation between the system Hamiltonian and the environment it valid, and the environment auto-correlation function is reasonably well sampled.
* `dkmax` - Number of time steps $K \in \mathbb{N}$ - It must be large enough such that $\delta t \times K$ is larger than the neccessary memory time $\tau_\mathrm{cut}$.
* `epsrel` - The maximal relative error $\epsilon_\mathrm{rel}$ in the singular value truncation - It must be small enough such that the numerical compression (using tensor network algorithms) does not truncate relevant correlations.
To choose the right set of initial parameters, we recommend to first use the `oqupy.guess_tempo_parameters()` function and then check with the helper function `oqupy.helpers.plot_correlations_with_parameters()` whether it satisfies the above requirements:
```
parameters = oqupy.guess_tempo_parameters(system=system,
bath=bath,
start_time=0.0,
end_time=5.0,
tolerance=0.01)
print(parameters)
fig, ax = plt.subplots(1,1)
oqupy.helpers.plot_correlations_with_parameters(bath.correlations, parameters, ax=ax)
```
In this plot you see the real and imaginary part of the environments auto-correlation as a function of the delay time $\tau$ and the sampling of it corresponding the the chosen parameters. The spacing and the number of sampling points is given by `dt` and `dkmax` respectively. We can see that the auto-correlation function is close to zero for delay times larger than approx $2 \Omega^{-1}$ and that the sampling points follow the curve reasonably well. Thus this is a reasonable set of parameters.
We can choose a set of parameters by hand and bundle them into a `TempoParameters` object,
```
tempo_parameters = oqupy.TempoParameters(dt=0.1, dkmax=30, epsrel=10**(-4), name="my rough parameters")
print(tempo_parameters)
```
and check again with the helper function:
```
fig, ax = plt.subplots(1,1)
oqupy.helpers.plot_correlations_with_parameters(bath.correlations, tempo_parameters, ax=ax)
```
We could feed this object into the `oqupy.tempo_compute()` function to get the dynamics of the system. However, instead of that, we can split up the work that `oqupy.tempo_compute()` does into several steps, which allows us to resume a computation to get later system dynamics without having to start over. For this we start with creating a `Tempo` object:
```
tempo = oqupy.Tempo(system=system,
bath=bath,
parameters=tempo_parameters,
initial_state=up_density_matrix,
start_time=0.0)
```
We can start by computing the dynamics up to time $5.0\,\Omega^{-1}$,
```
tempo.compute(end_time=5.0)
```
then get and plot the dynamics of expecatation values,
```
dynamics_2 = tempo.get_dynamics()
plt.plot(*dynamics_2.expectations(0.5*sigma_z, real=True), label=r'$\alpha=0.3$')
plt.xlabel(r'$t\,\Omega$')
plt.ylabel(r'$<S_z>$')
plt.legend()
```
then continue the computation to $15.0\,\Omega^{-1}$,
```
tempo.compute(end_time=15.0)
```
and then again get and plot the dynamics of expecatation values.
```
dynamics_2 = tempo.get_dynamics()
plt.plot(*dynamics_2.expectations(0.5*sigma_z, real=True), label=r'$\alpha=0.3$')
plt.xlabel(r'$t\,\Omega$')
plt.ylabel(r'$<S_z>$')
plt.legend()
```
Finally, we note: to validate the accuracy the result **it vital to check the convergence of such a simulation by varying all three computational parameters!** For this we recommend repeating the same simulation with slightly "better" parameters (smaller `dt`, larger `dkmax`, smaller `epsrel`) and to consider the difference of the result as an estimate of the upper bound of the accuracy of the simulation.
-------------------------------------------------
| github_jupyter |
```
import jieba
import matplotlib.pyplot as plt
import pandas as pd
from wordcloud import (WordCloud, get_single_color_func,STOPWORDS)
import re
class SimpleGroupedColorFunc(object):
"""Create a color function object which assigns EXACT colors
to certain words based on the color to words mapping
"""
def __init__(self, color_to_words, default_color):
self.word_to_color = {word: color
for (color, words) in color_to_words.items()
for word in words}
self.default_color = default_color
def __call__(self, word, **kwargs):
return self.word_to_color.get(word, self.default_color)
class GroupedColorFunc(object):
"""Create a color function object which assigns DIFFERENT SHADES of
specified colors to certain words based on the color to words mapping.
Uses wordcloud.get_single_color_func
"""
def __init__(self, color_to_words, default_color):
self.color_func_to_words = [
(get_single_color_func(color), set(words))
for (color, words) in color_to_words.items()]
self.default_color_func = get_single_color_func(default_color)
def get_color_func(self, word):
"""Returns a single_color_func associated with the word"""
try:
color_func = next(
color_func for (color_func, words) in self.color_func_to_words
if word in words)
except StopIteration:
color_func = self.default_color_func
return color_func
def __call__(self, word, **kwargs):
return self.get_color_func(word)(word, **kwargs)
def content_preprocess(csv):
# preprocess: extract comment content
df = pd.read_csv(csv)
preprocessed_data = df[['评论']]
# Index and columns are not saved
preprocessed_data.to_csv('content4wordcloud.csv',header = 0,index = 0)
content_preprocess('douban_comment.csv')
# content_preprocess('douban_comment.csv')
def word_cloud_creation(filename):
'''create word cloud and split the words'''
text = open(filename, encoding = 'utf-8', errors = 'ignore').read()
word_list = jieba.cut(text, cut_all = True)
wl = ' '.join(word_list)
return wl
stoptext1 = open('stopword.txt',encoding='utf-8').read()
stopwords = stoptext1.split('\n')
stoptext2 = open('stopword2.txt',encoding='utf-8').read()
stopwords = stopwords+stoptext2.split('\n')
stopwords = stopwords+['一部','这部','看过','真的','感觉','一种']
def word_cloud_setting():
wc = WordCloud(max_words=500, collocations = False,repeat = True,background_color='white',scale=1.5, stopwords=stopwords,height = 1080, width = 1920, font_path = 'C:\Windows\Fonts\simsun.ttc')
return wc
def word_cloud_implementation(wl,wc):
'''Generate word cloud and display'''
my_words = wc.generate(wl)
plt.imshow(my_words)
plt.axis('off')
wc.to_file('word_cloud.png')
plt.show()
wl = word_cloud_creation('content4wordcloud.csv')
wc = word_cloud_setting()
word_cloud_implementation(wl,wc)
# This Part: Emphasize what's most focused and professional
color_to_words = {
# words below will be colored with a single color function
# focus on the film itself
'red': ['电影', '导演', '故事', '剧情', '配乐', '剧本', '表演','角色','镜头', '音乐','主角','观众','片子'],
# talk about something else or feeling/attitude
'green': ['真的', '感觉','精彩','感动','喜欢','特别','人生', '世界', '生活','人性','经典']
}
# Words that are not in any of the color_to_words values
# will be colored with a grey single color function
default_color = 'grey'
# Create a color function with single tone
# grouped_color_func = SimpleGroupedColorFunc(color_to_words, default_color)
# Create a color function with multiple tones
grouped_color_func = GroupedColorFunc(color_to_words, default_color)
# Apply our color function
wc.recolor(color_func=grouped_color_func)
wc.to_file('word_cloud_emphasized.png')
plt.figure()
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.show()
df = pd.read_csv('imdb_movie_review_info.csv')
preprocessed_data = df[['userReview']]
preprocessed_data
def content_preprocess(csv):
# preprocess: extract comment content
df = pd.read_csv(csv)
preprocessed_data = df[['userReview']]
# Index and columns are not saved
preprocessed_data.to_csv('content4wordcloud.csv',header = 0,index = 0)
content_preprocess('imdb_movie_review_info.csv')
# content_preprocess('douban_comment.csv')
def word_cloud_creation(filename):
'''create word cloud and split the words'''
text = open(filename, encoding = 'utf-8', errors = 'ignore').read()
# word_list = jieba.cut(text, cut_all = True)
wl = ''.join(text)
wl = re.sub('<.*?>','',wl)
wl = re.sub('the','',wl)
# wl = re.sub('this')
return wl
stoptext1 = open('stopword.txt',encoding='utf-8').read()
stopwords = stoptext1.split('\n')
stoptext2 = open('stopword2.txt',encoding='utf-8').read()
stopwords = stopwords+stoptext2.split('\n')
stopwords = stopwords+['wa','a','i','time','make','watch']
def word_cloud_setting():
# stopwords = ['当然','所以','另外','不过','so','that','what','me','to','so','of','it','and','the','in','you','but','will','with','但是','最后','还有']
wc = WordCloud(max_words=500, collocations = False,repeat = True,background_color='white',scale=1.5, stopwords=stopwords,height = 1080, width = 1920, font_path = 'C:\Windows\Fonts\simsun.ttc')
return wc
def word_cloud_implementation(wl,wc):
'''Generate word cloud and display'''
my_words = wc.generate(wl)
plt.imshow(my_words)
plt.axis('off')
wc.to_file('word_cloud_imdb.png')
plt.show()
wl = word_cloud_creation('content4wordcloud.csv')
wc = word_cloud_setting()
word_cloud_implementation(wl,wc)
# This Part: Emphasize what's most focused and professional
color_to_words = {
# words below will be colored with a single color function
'red': ['movie', 'film', 'character', 'performance', 'story', 'shot','actor','scene', 'director','plot','acting'],
# talk about something else or feeling/attitude
'green': ['life', 'people','good','like','bad','love', 'great', 'feel','world','excellent','perfect','real','classic']
}
# Words that are not in any of the color_to_words values
# will be colored with a grey single color function
default_color = 'grey'
# Create a color function with multiple tones
grouped_color_func = GroupedColorFunc(color_to_words, default_color)
# Apply our color function
wc.recolor(color_func=grouped_color_func)
wc.to_file('word_cloud_emphasized_imdb.png')
plt.figure()
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.show()
```
In this part, we will find out what people tend to talk about in the movie reviews on douban and imdb separately. We're doing this by WordCloud which is a fascinating approach for us to figure out what's frequently occurs in people's reviews. By WordCloud, the most frequent words would be larger than other words. We can instantly see what we should pay attention to. Here is the two wordclouds figures of the reviews of each top250 movies on imdb and douban. We chose the top100 rated reviews which are basically positive. They can tell us what on earth people love about the movies.
Of course, stopwords list must be added to filter out some meaningless words for example, 'the', 'than', 'that'.
I noticed that people on douban may be customed to make a comment that is more based on self-feelings and experience, while people on imdb tend to talk about the movie itself. To see this feature more clearly, I have marked the words concerned about the movies **red** and the words about self experience and emotional feelings **green**. Basically, red words are more objective and green words are more subjective.
So I chose these words:
Indeed, if you take a closer look, you will find that many comments on douban are more likely to talk about world, life, and whether they like the movie, which makes the clouds greener. However, imdb users tend to talk about performance, character, scenes.(red) I can't help wondering if this suggest that Chinese people and English-speaking world have a difference in thinking pattern or way of describing a thing. We Chinese like to focus on ourselves' life and feeling while the English-speaking community may prefer start from something about the movies.
Well, this could also be the result of the difference in grammar. But I figure that this might not be the main reason.
Moreover, Chinese seldom use simple words like '赞,棒great' to directly express their feelings('好good' is in the stopwords, 'like' as well), though they start with something that's not closely related to the movies.(world, life) We prefer to say a movie is '感人touching', or '真实close to reality' if we think they are very good. On the other hand, imdb users describe a movie with 'excellent', 'perfect'. They use these words as the highest praise.
For further research on reviews, my teammate Haoyun has done some research on prediction about genres by reviews.
douban:
'red': '电影', '导演', '故事', '剧情', '配乐', '剧本', '表演','角色','镜头', '音乐','主角','观众','片子'
meaning: movie, director, story, plot, soundtrack, script, performance, character, shot, music, main character, audience, film(another)
'green': '真的', '感觉','精彩','感动','喜欢','特别','人生', '世界', '生活','人性','经典','现实'
meaning: really, feel, excellent, touching, like, special(particularly), life, world, living(daily), humanity, classic, reality
imdb:
'red': 'movie', 'film', 'character', 'performance', 'story', 'shot','actor','scene', 'director','plot','acting'
'green': 'life', 'people','good','like','bad','love', 'great', 'feel','world','excellent','perfect','real'
| github_jupyter |
<img src='./img/EU-Copernicus-EUM_3Logos.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='50%'></img>
<br>
<br>
<a href="./index_ltpy.ipynb"><< Index</a><span style="float:right;"><a href="./12_ltpy_WEkEO_harmonized_data_access_api.ipynb">12 - WEkEO Harmonized Data Access API >></a></span>
# 1.1 Atmospheric composition data - Overview and data access
This module gives an overview of the following atmospheric composition data services:
* [EUMETSAT AC SAF - The EUMETSAT Satellite Application Facility on Atmospheric Composition Monitoring](#ac_saf)
* [Copernicus Sentinel-5 Precursor (Sentinel-5P)](#sentinel_5p)
* [Copernicus Sentinel-3](#sentinel3)
* [Copernicus Atmosphere Monitoring Service (CAMS)](#cams)
<br>
## <a id="ac_saf"></a>EUMETSAT AC SAF - The EUMETSAT Satellite Application Facility on Atmospheric Composition Monitoring
<span style=float:left><img src='./img/ac_saf_logo.png' alt='Logo EU Copernicus EUMETSAT' align='left' width='90%'></img></span>
The [EUMETSAT Satellite Application Facility on Atmospheric Composition Monitoring (EUMETSAT AC SAF)](http://acsaf.org/) is one of eight EUMETSAT Satellite Application Facilities (SAFs). <br>
SAFs generate and disseminate operational EUMETSAT products and services and are an integral part of the distributed EUMETSAT Application Ground Segment.
AC SAF processes data on ozone, other trace gases, aerosols and ultraviolet data, obtained from satellite instrumentation.
<br>
### Available AC SAF products
AC-SAF offers three different product types: <br>
|<font size='+0.2'><center>[Near real-time products](#nrt)</center></font> | <font size='+0.2'><center>[Offline products](#offline)</center></font> | <font size='+0.2'><center>[Data records](#records)</center></font> |
|-----|-----|------|
<img src='./img/nrt_no2_example.png' alt='Near-real time product - NO2' align='middle' width='60%'></img>|<img src='./img/offline_ozone_example.png' alt='Logo EU Copernicus EUMETSAT' align='middle' width='60%'></img>|<img src='./img/ac_saf_level3.png' alt='Logo EU Copernicus EUMETSAT' align='middle' width='100%'></img>|
<br>
Near real-time and offline products are often refered as Level 2 data. Data records are refered as Level 3 data.
AC SAF products are sensed from two instruments onboard the Metop satellites:
* [Global Ozone Monitoring Experiment-2 (GOME-2) instrument](https://acsaf.org/gome-2.html) <br>
GOME-2 can measure a range of atmospheric trace constituents, with the emphasis on global ozone distributions. Furthermore, cloud properties and intensities of ultraviolet radiation are retrieved. These data are crucial for monitoring the atmospheric composition and the detection of pollutants. <br>
* [Infrared Atmospheric Sounding Interferometer (IASI) instrument](https://acsaf.org/iasi.html)
The [Metop satellites](https://acsaf.org/metop.html) is a series of three satellites that were launched in October 2006 (Metop-A), September 2012 (Metop-B) and November 2018 (Metop-C) respectively.
All AC SAF products are disseminated under the [AC SAF Data policy](https://acsaf.org/data_policy.html).
<br>
#### <a id="nrt"></a>Near-real time (NRT) products
NRT products are Level 2 products and are available to users in 3 hours from sensing at the latest and available for the past two months. NRT products are disseminated in HDF5 format.
| <img width=100>NRT Product type name</img> | Description | Unit | <img width=80>Satellite</img> | Instrument |
| ---- | ----- | ----- | ---- | -----|
| Total Ozone (O<sub>3</sub>) column | NRT total ozone column product provided information about vertical column densities of ozone in the atmosphere | Dobson Units (DU) | Metop-A<br>Metop-B | GOME-2 |
| Total and tropospheric NO<sub>2</sub> columns | NRT total and tropospheric NO2 column products provide information about vertical column densities of nitrogen dioxide in the atmosphere. | molecules/cm2 | Metop-A<br>Metop-B | GOME-2 |
| Total SO<sub>2</sub> column | NRT total SO2 column product provides information about vertical column densities of the sulfur dioxide in the atmosphere. | Dobson Units (DU) | Metop-A<br>Metop-B | GOME-2
| Total HCHO column | NRT HCHO column product provides information about vertical column densities of formaldehyde in the atmosphere. | molecules/cm2 | Metop-A<br>Metop-B | GOME-2 |
| High-resolution vertical ozone profile | NRT high-resolution vertical ozone profile product provides an ozone profile from the GOME-2 nadir scanning mode. | Partial ozone columns in Dobson Units in 40 layers from the surface up to 0.001 hPa| Metop-A<br>Metop-B | GOME-2 |
| Global tropospheric ozone column | The global tropospheric ozone column product provides information about vertical column densities of ozone in the troposphere, <br>from the surface to the tropopause and from the surface to 500 hPa (∼5km). | Dobson Units (DU) | Metop-A<br>Metop-B | GOME-2 |
<br>
#### <a id="offline"></a>Offline products
Offline products are Level 2 products and are available to users in 15 days of sensing. Typical delay is 2-3 days. Offline products are disseminated in HDF5 format.
| Offline Product type name | Description | Unit | <img width=80>Satellite</img> | Instrument | <img width=150px>Time period</img> |
| ---- | ----- | ----- | ---- | -----|----|
| Total Ozone (O<sub>3</sub>) column | Offline total ozone column product provided information about vertical column densities of ozone in the atmosphere | Dobson Units (DU) | Metop-A<br>Metop-B | GOME-2 | 1 Jan 2008 - almost NRT<br>13 Dec 2012 - almost NRT |
| Total and tropospheric NO<sub>2</sub> columns | Offline total and tropospheric NO2 column products provide information about vertical column densities of nitrogen dioxide in the atmosphere. | molecules/cm2 | Metop-A<br>Metop-B | GOME-2 | 1 Jan 2008 - almost NRT<br>13 Dec 2012 - almost NRT |
| Total SO<sub>2</sub> column | Offline total SO2 column product provides information about vertical column densities of the sulfur dioxide in the atmosphere. | Dobson Units (DU) | Metop-A<br>Metop-B | GOME-2 | 1 Jan 2008 - almost NRT<br>13 Dec 2012 - almost NRT |
| Total HCHO column | Offline HCHO column product provides information about vertical column densities of formaldehyde in the atmosphere. | molecules/cm2 | Metop-A<br>Metop-B | GOME-2 | 1 Jan 2008 - almost NRT<br>13 Dec 2012 - almost NRT |
| High-resolution vertical ozone profile | Offline high-resolution vertical ozone profile product provides an ozone profile from the GOME-2 nadir scanning mode. | Partial ozone columns in Dobson Units in 40 layers from the surface up to 0.001 hPa| Metop-A<br>Metop-B | GOME-2 | 1 Jan 2008 - almost NRT<br>13 Dec 2012 - almost NRT |
| Global tropospheric ozone column | The offline global tropospheric ozone column product provides information about vertical column densities of ozone in the troposphere, from the surface to the tropopause and and from the surface to 500 hPa (∼5km). | Dobson Units (DU) | Metop-A<br>Metop-B | GOME-2 | 1 Jan 2008 - almost NRT<br>13 Dec 2012 - almost NRT |
<br>
#### <a id="records"></a>Data records
Data records are reprocessed, gridded Level 3 data. Data records are monthly aggregated products, regridded on a regular latitude-longitude grid. Data records are disseminated in NetCDF format.
| Data record name | Description | Unit | <img width=80>Satellite</img> | Instrument | <img width=150>Time period</img> |
| ---- | ----- | ----- | ---- | -----|----|
| Reprocessed **tropospheric O<sub>3</sub>** column data record for the Tropics | Tropospheric ozone column data record for the Tropics provides long-term information <br>about vertical densities of ozone in the atmosphere for the tropics. | Dobson Units (DU) | Metop-A<br>Metop-B | GOME-2 | Jan 2007- Dec 2018<br>Jan 2013- Jun 2019 |
| Reprocessed **total column and tropospheric NO<sub>2</sub>** data record | Total and tropospheric NO2 column data record provides long-term information about vertical column densities of nitrogen dioxide in the atmosphere. | molecules/cm2 | Metop-A<br>Metop-B | GOME-2 | Jan 2007 - Nov 2017<br>Jan 2013 - Nov 2017 |
| Reprocessed **total H<sub>2</sub>O column** data record | Total H2O column data record provides long-term information about vertical column densities of water vapour in the atmosphere. | kg/m2 | Metop-A<br>Metop-B | GOME-2 | Jan 2007 - Nov 2017<br>Jan 2013 - Nov 2017 |
<br>
### <a id="ac_saf_access"></a>How to access AC SAF products
AC SAF products can be accessed via different dissemination channels. There are channels where Level 2 and Level 3 are available for download. Other sources allow to browse through images and maps of the data. This is useful to see for what dates e.g. Level 2 data were sensed.
#### DLR ftp server
All near-real time, offline and reprocessed total column data are available at [DLR's ATMOS FTP-server](https://atmos.eoc.dlr.de/products/). Accessing data is a two step process:
1. [Register](https://acsaf.org/registration_form.html) as a user of AC SAF products
2. [Log in](https://atmos.eoc.dlr.de/products/)( (with the user name and password that is emailed to you after registration)
Once logged in, you find data folders for GOME-2 products from Metop-A in the directory *'gome2a/'* and GOME-2 products from Metop-B in the directory: *'gome2b/'* respectively. In each GOME-2 directory, you find the following sub-directories: <br>
* **`near_real_time/`**,
* **`offline/`**, and
* **`level3/`**.
<br>
<div style='text-align:center;'>
<figure><img src='./img/dlr_ftp_directory.png' width='50%'/>
<figcaption><i>Example of the directory structure of DLR's ATMOS FTP-server</i></figcaption>
</figure>
</div>
<br>
#### EUMETSAT Data Centre
The EUMETSAT Data Centre provides a long-term archive of data and generated products from EUMETSAT, which can be ordered online. Ordering data is a two step process:
1. [Create an account](https://eoportal.eumetsat.int/userMgmt/register.faces) at the EUMETSAT Earth Observation Portal
2. [Log in](https://eoportal.eumetsat.int/userMgmt/login.faces) (with the user name and password that is emailed to you after registration)
Once succesfully logged in, go to (1) Data Centre. You will be re-directed to (2) the User Services Client. Type in *'GOME'* as search term and you can get a list of all available GOME-2 products.
<div style='text-align:center;'>
<figure><img src='./img/eumetsat_data_centre.png' width='50%' />
<figcaption><i>Example of the directory structure of EUMETSAT's Data Centre</i></figcaption>
</figure>
</div>
<br>
#### Web-based services
There are two web-based services, [DLR's ATMOS webserver](https://atmos.eoc.dlr.de/app/missions/gome2) and the [TEMIS service by KNMI](http://temis.nl/index.php) that offer access to GOME-2/MetOp browse products. These services are helpful to see the availability of data for specific days, especially for AC SAF Level-2 parameters.
<br>
| <font size='+0.2'>[DLR's ATMOS webserver](https://atmos.eoc.dlr.de/app/missions/gome2)</font> | <font size='+0.2'>[TEMIS - Tropospheric Emission Monitoring Internet Service](http://temis.nl/index.php)</font> |
| - | - |
| <br>ATMOS (Atmospheric ParameTers Measured by in-Orbit Spectrosocopy is a webserver operated by DLR's Remote Sensing Technology Institute (IMF). The webserver provides access to browse products from GOME-2/Metop Products, both in NRT and offline mode. <br><br> | <br>TEMIS is a web-based service to browse and download atmospheric satellite data products maintained by KNMI. The data products consist mainly of tropospheric trace gases and aerosol concentrations, but also UV products, cloud information and surface albedo climatologies are provided. <br><br> |
| <center><img src='./img/atmos_service.png' width='70%'></img></center> | <center><img src='./img/temis_service.png' width='70%'></img></center> |
<br>
## <a id="sentinel_5p"></a>Copernicus Sentinel-5 Precursor (Sentinel-5P)
[Sentinel-5 Precursor (Sentinel-5P)](https://sentinels.copernicus.eu/web/sentinel/missions/sentinel-5p) is the first Copernicus mission dedicated to monitoring our atmosphere. The satellite carries the state-of-the-art TROPOMI instrument to map a multitude of trace gases.
Sentinel-5P was developed to reduce data gaps between the ENVISAT satellite - in particular the Sciamachy instrument - and the launch of Sentinel-5, and to complement GOME-2 on MetOp. In the future, both the geostationary Sentinel-4 and polar-orbiting Sentinel-5 missions will monitor the composition of the atmosphere for Copernicus Atmosphere Services. Both missions will be carried on meteorological satellites operated by [EUMETSAT](https://eumetsat.int).
### Available data products and trace gas information
<span style=float:right><img src='./img/sentinel_5p_data_products.jpg' alt='Sentinel-5p data prodcuts' align='right' width='90%'></img></span>
Data products from Sentinel-5P’s Tropomi instrument are distributed to users at two levels:
* `Level-1B`: provides geo-located and radiometrically corrected top of the atmosphere Earth radiances in all spectral bands, as well as solar irradiances.
* `Level-2`: provides atmospheric geophysical parameters.
`Level-2` products are disseminated within three hours after sensing. This `near-real-time`(NRT) services disseminates the following products:
* `Ozone`
* `Sulphur dioxide`
* `Nitrogen dioxide`
* `Formaldehyde`
* `Carbon monoxide`
* `Vertical profiles of ozone` and
* `Cloud / Aerosol distributions`
`Level-1B` products are disseminated within 12 hours after sensing.
`Methane`, `tropospheric ozone` and `corrected total nitrogen dioxide columns` are available withing 5 days after sensing.
<br>
### <a id="sentinel5p_access"></a>How to access Sentinel-5P data
Sentinel-5P data can be accessed via different dissemination channels. The data is accessible via the `Copernicus Open Access Hub` and `EUMETSAT's EUMETCast`.
#### Copernicus Open Access Hub
Sentinel-5P data is available for browsing and downloading via the [Copernicus Open Access Hub](https://scihub.copernicus.eu/). The Copernicus Open Access Hub provides complete, free and open access to Sentinel-1, Sentinel-2, Sentinel-3 and Sentinel-5P data.
<div style='text-align:center;'>
<figure><img src='./img/open_access_hub.png' alt='Sentinel-5p data products' align='middle' width='50%'/>
<figcaption><i>Interface of the Copernicus Open Access Hub and the Sentinel-5P Pre-Operations Data Hub</i></figcaption>
</figure>
</div>
#### EUMETSAT's EUMETCast
Since August 2019, Sentinel-5p `Level 1B` and `Level 2` are as well available on EUMETSAT's EUMETCast:
* **Level 1B** products will be distributed on EUMETCast Terrestrial
* **Level 2** products are distributed on EUMETCast Europe, High Volume Service Transponder 2 (HVS-2)
Sentinel-5P data on EUMETCast can be accessed via [EUMETSAT's Earth Observation Portal (EOP)](https://eoportal.eumetsat.int/userMgmt/login.faces).
#### TEMIS
[TEMIS - Tropospheric Emission Monitoring Internet Service](http://temis.nl/airpollution/no2.html) provides access to selected Sentinel-5P parameters, e.g. `NO`<sub>2</sub>.
<br>
## <a id='sentinel3'></a>Copernicus Sentinel-3 - Ocean and Land Colour (OLCI)
<span style=float:right><img src='./img/sentinel3.png' alt='Sentinel-5p data prodcuts' align='right' width='90%'></img></span>
The Sentinel-3 is the Copernicus mission to monitor and measure sea surface topography, sea and land surface temperature and ocean and land surface.
The Sentinel-3 mission carries five different instruments aboard the satellites: and offers four differnt data product types:
- [Ocean and Land Colour Instrument (OLCI)](https://sentinel.esa.int/web/sentinel/missions/sentinel-3/data-products/olci)
- [Sea and Land Surface Temperature Radiometer (SLSTR)](https://sentinel.esa.int/web/sentinel/missions/sentinel-3/data-products/slstr)
- [Synergy](https://sentinel.esa.int/web/sentinel/missions/sentinel-3/data-products/synergy), and
- [Altimetry](https://sentinel.esa.int/web/sentinel/missions/sentinel-3/data-products/altimetry).
The Sentinel-3 OLCI mission supports maritime monitoring, land mapping and monitoring, atmospheric monitoring and climate change monitoring.
### Available OLCI data products
OLCI product types are divided in three main categories:
- #### Level-1B products
Two different Level-1B products can be obtained:
- OL_1_EFR - output during EO processing mode for Full Resolution
- OL_1_ERR -output during EO processing mode for Reduced Resolution
The Level-1B products in EO processing mode contain calibrated, ortho-geolocated and spatially re-sampled Top Of Atmosphere (TOA) radiances for [21 OLCI spectral bands](https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci/resolutions/radiometric). In Full Resolution products (i.e. at native instrument spatial resolution), these parameters are provided for each re-gridded pixel on the product image and for each removed pixel. In Reduced Resolution products (i.e. at a resolution four times coarser), the parameters are only provided on the product grid.
- #### Level-2 Land products and Water products
The level-2 land product provides land and atmospheric geophysical parameters. The Level-2 water product provides water and atmospheric geophysical parameters. All products are computed for full and reduced resolution:
- OL_2_LFR - Land Full Resolution
- OL_2_LRR - Land Reduced Resolution
There are two timeframes for the delivery of the products:
- **Near-Real-Time (NRT)**: delivered to the users less than three hours after acquisition of the data by the sensor
- **Non-Time Critical (NTC)**: delivered no later than one month after acquisition or from long-term archives. Typically, the product is available within 24 or 48 hours.
The data is disseminated in .zip archive containing free-standing `NetCDF4` product files.
### How to access Sentinel-3 data
Sentinel-3 data can be accessed via different dissemination channels. The data is accessible via the `Copernicus Open Access Hub` and `WEkEO's Harmonized Data Access API`.
#### Copernicus Open Access Hub
Sentinel-3 data is available for browsing and downloading via the Copernicus Open Access Hub. The Copernicus Open Access Hub provides complete, free and open access to Sentinel-1, Sentinel-2, Sentinel-3 and Sentinel-5P data. See the an example of the Copernicus Open Access Hub interface [here](#sentinel5p_access).
#### WEkEO's Harmonized Data Access API
<span style=float:left><img src='./img/wekeo_logo2.png' alt='Logo WEkEO' align='center' width='90%'></img></span>
[WEkEO](https://www.wekeo.eu/) is the EU Copernicus DIAS (Data and Information Access Service) reference service for environmental data, virtual processing environments and skilled user support.
WEkEO offers access to a variety of data, including different parameters sensored from Sentinel-1, Sentinel-2 and Sentinel-3. It further offers access to climate reanalysis and seasonal forecast data.
The [Harmonized Data Access (HDA) API](https://www.wekeo.eu/documentation/using_jupyter_notebooks), a REST interface, allows users to subset and download datasets from WEkEO.
Please see [here](./12_ltpy_WEkEO_harmonized_access_api.ipynb) a practical example how you can retrieve Sentinel-3 data from WEkEO using the Harmonized Data Access API.
<br>
<br>
## <a id="cams"></a>Copernicus Atmosphere Monitoring Service (CAMS)
<span style=float:left><img src='./img/cams_logo_2.png' alt='Copernicus Atmosphere Monitoring Service' align='left' width='95%'></img></span>
[The Copernicus Atmosphere Monitoring Service (CAMS)](https://atmosphere.copernicus.eu/) provides consistent and quality-controlled information related to `air pollution and health`, `solar energy`, `greenhouse gases` and `climate forcing`, everywhere in the world.
CAMS is one of six services that form [Copernicus, the European Union's Earth observation programme](https://www.copernicus.eu/en).
CAMS is implemented by the [European Centre for Medium-Range Weather Forecasts (ECMWF)](http://ecmwf.int/) on behalf of the European Commission. ECMWF is an independent intergovernmental organisation supported by 34 states. It is both a research institute and a 24/7 operational service, producing and disseminating numerical weather predictions to its member states.
<br>
### Available data products
CAMS offers four different data product types:
|<font size='+0.2'><center>[CAMS Global <br>Reanalysis](#cams_reanalysis)</center></font></img> | <font size='+0.2'><center>[CAMS Global Analyses <br>and Forecasts](#cams_an_fc)</center></font> | <img width=30><font size='+0.2'><center>[CAMS Global Fire Assimilation System (GFAS)](#cams_gfas)</center></font></img> | <img width=30><font size='+0.2'><center>[CAMS Greenhouse Gases Flux Inversions](#cams_greenhouse_flux)</center></font></img> |
|-----|-----|------|------|
<img src='./img/cams_reanalysis.png' alt='CAMS reanalysis' align='middle' width='100%'></img>|<img src='./img/cams_forecast.png' alt='CAMS Forecast' align='middle' width='100%'></img>|<img src='./img/cams_gfas.png' alt='CAMS GFAS' align='middle' width='100%'></img>|<img src='./img/cams_greenhouse_fluxes.png' alt='CAMS greenhous flux inversions' align='middle' width='100%'></img>|
#### <a id="cams_reanalysis"></a>CAMS Global Reanalysis
CAMS reanalysis data set provides consistent information on aerosols and reactive gases from 2003 to 2017. CAMS global reanalysis dataset has a global horizontal resolution of approximately 80 km and a refined temporal resolution of 3 hours. CAMS reanalysis are available in GRIB and NetCDF format.
| Parameter family | Time period | <img width=80>Spatial resolution</img> | Temporal resolution |
| ---- | ----- | ----- | -----|
| [CAMS global reanalysis of total aerosol optical depth<br> at multiple wavelengths](https://atmosphere.copernicus.eu/catalogue#/product/urn:x-wmo:md:int.ecmwf::copernicus:cams:prod:rean:black-carbon-aod_dust-aod_organic-carbon-aod_sea-salt-aod_sulphate-aod_total-aod_warning_multiple_species:pid469) | 2003-2017 | ~80km | 3-hourly |
| [CAMS global reanalysis of aerosol concentrations](https://atmosphere.copernicus.eu/catalogue#/product/urn:x-wmo:md:int.ecmwf::copernicus:cams:prod:rean:black-carbon-concentration_dust-concentration_organic-carbon-concentration_pm1_pm10_pm2.5_sea-salt-concentration_sulfates-concentration_warning_multiple_species:pid467) | 2003-2017 | ~80km | 3-hourly |
| [CAMS global reanalysis chemical species](https://atmosphere.copernicus.eu/catalogue#/product/urn:x-wmo:md:int.ecmwf::copernicus:cams:prod:rean:ald2_c10h16_c2h4_c2h5oh_c2h6_c2o3_c3h6_c3h8_c5h8_ch3coch3_ch3cocho_ch3o2_ch3oh_ch3ooh_ch4_co_dms_h2o2_hcho_hcooh_hno3_ho2_ho2no2_mcooh_msa_n2o5_nh2_nh3_nh4_no_no2_no3_no3_a_nox_o3_oh_ole_onit_pan_par_pb_rooh_ror_ra_so2_so4_warning_multiple_species:pid468) | 2003-2017 | ~80km | 3-hourly |
#### <a id="cams_an_fc"></a>CAMS Global analyses and forecasts
CAMS daily global analyses and forecast data set provides daily global forecasts of atmospheric composition parameters up to five days in advance. CAMS analyses and forecast data are available in GRIB and NetCDF format.
The forecast consists of 56 reactive trace gases in the troposphere, stratospheric ozone and five different types of aersol (desert dust, sea salt, organic matter, black carbon and sulphate).
| Parameter family | Time period | <img width=80>Spatial resolution</img> | Forecast step |
| ---- | ----- | ----- | -----|
| CAMS global forecasts of aerosol optical depths | Jul 2012- 5 days in advance | ~40km | 3-hour |
| CAMS global forecasts of aerosols | Jul 2012 - 5 days in advance | ~40km | 3-hour |
| CAMS global forecasts of chemical species | Jul 2012- 5 days in advance | ~40km | 3-hour |
| CAMS global forecasts of greenhouse gases | Jul 2012- 5 days in advance | ~9km | 3-hour |
#### <a id="cams_gfas"></a>CAMS Global Fire Assimiliation System (GFAS)
CAMS GFAS assimilated fire radiative power (FRP) observations from satellite-based sensors to produce daily estimates of wildfire and biomass burning emissions. The GFAS output includes spatially gridded Fire Radiative Power (FRP), dry matter burnt and biomass burning emissions for a large set of chemical, greenhouse gas and aerosol species. CAMS GFAS data are available in GRIB and NetCDF data.
A full list of CAMS GFAS parameters can be found in the [CAMS Global Fire Assimilation System (GFAS) data documentation](https://atmosphere.copernicus.eu/sites/default/files/2018-05/CAMS%20%20Global%20Fire%20Assimilation%20System%20%28GFAS%29%20data%20documentation.pdf).
| Parameter family | Time period | <img width=80>Spatial resolution</img> | Temporal resolution |
| ---- | ----- | ----- | ---- |
| CAMS GFAS analysis surface parameters | Jan 2003 - present | ~11km | daily |
| CAMS GFAS gridded satellite parameters | Jan 2003 - present | ~11km | daily |
#### <a id="cams_greenhouse_flux"></a>CAMS Greenhouse Gases Flux Inversions
CAMS Greenhouse Gases Flux Inversion reanalysis describes the variations, in space and in time, of the surface sources and sinks (fluxes) of the three major greenhouse gases that are directly affected by human activities: `carbon dioxide (CO2)`, `methane (CH4)` and `nitrous oxide (N2O)`. CAMS Greenhouse Gases Flux data is available in GRIB and NetCDF format.
| Parameter | Time period | <img width=80>Spatial resolution</img> | Frequency | Quantity |
| ---- | ----- | ----- | ---- | -----|
| Carbon Dioxide | Jan 1979 - Dec 2018 | ??? | 3 hourly<br>Monthly average | Concentration<br>Surface flux<br> Total colum |
| Methane | Jan 1990 - Dec 2017 | ??? | 6-hourly<br>Daily average<br>Monthly average | Concentration<br>Surface flux<br>Total column
| Nitrous Oxide | Jan 1995 - Dec 2017 | ???| 3-hourly<br>Monthly average | Concentration<br>Surface flux |
<br>
### <a id="cams_access"></a>How to access CAMS data
CAMS data can be accessed in two different ways: `ECMWF data archive` and `CAMS data catalogue of data visualizations`. A more detailed description of the different data access platforms can be found [here](https://confluence.ecmwf.int/display/CKB/Access+to+CAMS+global+forecast+data).
#### ECMWF data archive
ECMWF's data archive is called Meteorological and Archival Retrieval System (MARS) and provides access to ECMWF Public Datasets. The following CAMS data can be accessed through the ECMWF MARS archive: `CAMS reanalysis`, `CAMS GFAS data` (older than one day), and `CAMS global analyses and forecasts` (older than five days).
The archive can be accessed in two ways:
* via the [web interface](https://apps.ecmwf.int/datasets/) and
* via the [ECMWF Web API](https://confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets).
Subsequently, an example is shown how a MARS request can be executed within Python and data in either GRIB or netCDF can be downloaded on-demand.
#### 1. Register for an ECMWF user account
- Self-register at https://apps.ecmwf.int/registration/
- Login at https://apps.ecmwf.int/auth/login
#### 2. Install the `ecmwfapi` python library
`pip install ecmwf-api-client`
#### 3. Retrieve your API key
You can retrieve your API key at https://api.ecmwf.int/v1/key/. Add the `url`, `key` and `email` information, when you define the `ECMWFDataServer` (see below).
#### 3. Execute a MARS request and download data as `netCDF` file
Below, you see the principle of a `data retrieval` request. You can use the web interface to browse through the datasets. At the end, there is the option to let generate the `data retrieval` request for the API.
Additionally, you can have a look [here](./cams_ecmwfapi_example_requests.ipynb) at some example requests for different CAMS parameters.
**NOTE**: per default, ECMWF data is stored on a grid with longitudes going from 0 to 360 degrees. It can be reprojected to a regular geographic latitude-longitude grid, by setting the keyword argument `area` and `grid`. Per default, data is retrieved in `GRIB`. If you wish to retrieve the data in `netCDF`, you have to specify it by using the keyword argument `format`.
The example requests `Organic Matter Aerosol Optical Depth at 550 nm` forecast data for 3 June 2019 in `NetCDF`.
```
#!/usr/bin/env python
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer(url="https://api.ecmwf.int/v1", key="XXXXXXXXXXXXXXXX", email="XXXXXXXXXXXXXXXX")
# Retrieve data in NetCDF format
server.retrieve({
"class": "mc",
"dataset": "cams_nrealtime",
"date": "2019-06-03/to/2019-06-03",
"expver": "0001",
"levtype": "sfc",
"param": "210.210",
"step": "3",
"stream": "oper",
"time": "00:00:00",
"type": "fc",
"format": "netcdf",
"area": "90/-180/-90/180",
"grid": "0.4/0.4",
"target": "test.nc"
})
```
#### CAMS data catalogue of data visualizations
CAMS provides an extensive [catalogue of data visualizations](https://atmosphere.copernicus.eu/data) in the form of maps and charts. Products are updated daily and are available for selected parameters of `CAMS daily analyses and forecasts`.
<hr>
## Further information
* [EUMETSAT AC SAF - The EUMETSAT Application Facility on Atmospheric Composition Monitoring](https://acsaf.org/index.html)
* [AC SAF Data policy](https://acsaf.org/data_policy.html)
* [AC SAF Algorithm Theoretical Basis Documents (atbds)](https://acsaf.org/atbds.html)
* [DLR's ATMOS webserver](https://atmos.eoc.dlr.de/app/missions/gome2)
* [TEMIS - Tropospheric Emission Monitoring Internet Service](http://temis.nl/index.php)
* [Copernicus Open Access Hub](https://scihub.copernicus.eu/)
* [EUMETSAT Earth Observation Portal](https://eoportal.eumetsat.int/userMgmt/login.faces)
* [Sentinel-5P Mission information](https://sentinels.copernicus.eu/web/sentinel/missions/sentinel-5p)
* [Sentinel-3 Mission information](https://sentinel.esa.int/web/sentinel/missions/sentinel-3)
* [Sentinel-3 OLCI User Guide](https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci)
* [WEkEO](https://www.wekeo.eu/)
* [Copernicus Atmosphere Monitoring Service](https://atmosphere.copernicus.eu/)
* [ECMWF Web Interface](https://apps.ecmwf.int/datasets/)
* [ECMWF Web API](https://confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets)
* [CAMS catalogue of data visualizations](https://atmosphere.copernicus.eu/data)
* [CAMS Service Product Portfolio](https://atmosphere.copernicus.eu/sites/default/files/2018-12/CAMS%20Service%20Product%20Portfolio%20-%20July%202018.pdf)
<br>
<a href="./index_ltpy.ipynb"><< Index</a><span style="float:right;"><a href="./12_ltpy_WEkEO_harmonized_data_access_api.ipynb">12 - WEkEO Harmonized Data Access API >></a></span>
<hr>
<p style="text-align:left;">This project is licensed under the <a href="./LICENSE">MIT License</a> <span style="float:right;"><a href="https://gitlab.eumetsat.int/eumetlab/atmosphere/atmosphere">View on GitLab</a> | <a href="https://training.eumetsat.int/">EUMETSAT Training</a> | <a href=mailto:training@eumetsat.int>Contact</a></span></p>
| github_jupyter |
```
from erddapy import ERDDAP
import pandas as pd
import numpy as np
## settings (move to yaml file for routines)
server_url = 'http://akutan.pmel.noaa.gov:8080/erddap'
maxdepth = 0 #keep all data above this depth
site_str = 'M8'
region = 'bs'
substring = ['bs8','bs8'] #search substring useful for M2
prelim=[]
#this elimnates bad salinity but
data_QC = True
e = ERDDAP(server=server_url)
df = pd.read_csv(e.get_search_url(response='csv', search_for=f'datasets_Mooring AND {region}'))
#print(df['Dataset ID'].values)
from requests.exceptions import HTTPError
dfs = {}
for dataset_id in sorted(df['Dataset ID'].values):
if ('1hr' in dataset_id):
continue
if any(x in dataset_id for x in substring) and not any(x in dataset_id for x in prelim) and ('final' in dataset_id):
print(dataset_id)
try:
d = ERDDAP(server=server_url,
protocol='tabledap',
response='csv'
)
d.dataset_id=dataset_id
d.variables = ['latitude',
'longitude',
'depth',
'Chlorophyll_Fluorescence',
'time',
'timeseries_id']
d.constraints = {'depth>=':maxdepth}
except HTTPError:
print('Failed to generate url {}'.format(dataset_id))
try:
df_m = d.to_pandas(
index_col='time (UTC)',
parse_dates=True,
skiprows=(1,) # units information can be dropped.
)
df_m.sort_index(inplace=True)
df_m.columns = [x[1].split()[0] for x in enumerate(df_m.columns)]
dfs.update({dataset_id:df_m})
except:
pass
if any(x in dataset_id for x in prelim) and ('preliminary' in dataset_id):
print(dataset_id)
try:
d = ERDDAP(server=server_url,
protocol='tabledap',
response='csv'
)
d.dataset_id=dataset_id
d.variables = ['latitude',
'longitude',
'depth',
'Chlorophyll_Fluorescence',
'time',
'timeseries_id']
d.constraints = {'depth>=':maxdepth}
except HTTPError:
print('Failed to generate url {}'.format(dataset_id))
try:
df_m = d.to_pandas(
index_col='time (UTC)',
parse_dates=True,
skiprows=(1,) # units information can be dropped.
)
df_m.sort_index(inplace=True)
df_m.columns = [x[1].split()[0] for x in enumerate(df_m.columns)]
#using preliminary for unfinished datasets - very simple qc
if data_QC:
#overwinter moorings
if '17bs2c' in dataset_id:
df_m=df_m['2017-10-3':'2018-5-1']
if '16bs2c' in dataset_id:
df_m=df_m['2016-10-6':'2017-4-26']
if '17bsm2a' in dataset_id:
df_m=df_m['2017-4-28':'2017-9-22']
if '18bsm2a' in dataset_id:
df_m=df_m['2018-4-30':'2018-10-01']
if '17bs8a' in dataset_id:
df_m=df_m['2017-9-30':'2018-10-1']
if '18bs8a' in dataset_id:
df_m=df_m['2018-10-12':'2019-9-23']
if '16bs4b' in dataset_id:
df_m=df_m['2016-9-26':'2017-9-24']
if '17bs4b' in dataset_id:
df_m=df_m['2017-9-30':'2018-10-1']
if '18bs4b' in dataset_id:
df_m=df_m['2018-10-12':'2018-9-23']
if '13bs5a' in dataset_id:
df_m=df_m['2013-8-18':'2014-10-16']
if '14bs5a' in dataset_id:
df_m=df_m['2014-10-16':'2015-9-24']
if '16bs5a' in dataset_id:
df_m=df_m['2016-9-26':'2017-9-24']
if '17bs5a' in dataset_id:
df_m=df_m['2017-9-30':'2018-10-1']
if '18bs5a' in dataset_id:
df_m=df_m['2018-10-12':'2018-9-23']
dfs.update({dataset_id:df_m})
except:
pass
df_merged=pd.DataFrame()
for dataset_id in dfs.keys():
df_merged = df_merged.append(dfs[dataset_id])
df_merged.describe()
df_merged = df_merged.dropna()
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.scatter(df_merged.index, y=df_merged['depth'], s=10, c=df_merged['Chlorophyll_Fluorescence'], vmin=0, vmax=10, cmap='inferno')
plt.plot(df_merged.index, df_merged['Chlorophyll_Fluorescence'])
df_merged.to_csv(f'{site_str}_nearsfc_chlor.csv')
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from utils.plotting import plot_dataset
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
# Load Dataset
df = pd.read_csv('data/ex.csv')
dataset = df.copy()
X = dataset.values
x_cords = dataset['x'].values
y_cords = dataset['y'].values
plot_dataset(x_cords, y_cords, 'Full Dataset')
```
### Split the data into train and test
Now split the dataset into a training set and a test set.
Use the test set in the final evaluation of the model.
### Split features from labels
Separate the target value, the "label", from the features. This label is the value that you will train the model to predict.
```
# Train Test Split
x_train, x_test, y_train, y_test = train_test_split(x_cords, y_cords, test_size=0.20, random_state=np.random.seed(6))
X = np.stack((x_test, y_test), axis=1)
```
### Linear regression
Before building a DNN model, start with a linear regression.
One Variable
Start with a single-variable linear regression, to predict `y` from `x`.
Training a model with `tf.keras` typically starts by defining the model architecture.
In this case use a `keras.Sequential` model. This model represents a sequence of steps. In this case there are two steps:
- Normalize the input `x`.
- Apply a linear transformation $(y = mx+b)$ to produce 1 output using `layers.Dense`.
The number of inputs can either be set by the `input_shape` argument, or automatically when the model is run for the first time.
First create the horsepower `Normalization` layer:
```
# Build the sequential model
model = tf.keras.Sequential([
layers.Dense(1, input_dim=1)
])
model.summary()
```
This model will predict `y` from `x`.
Run the untrained model on the first 10 `x` values. The output won't be good, but you'll see that it has the expected shape, (10,1):
```
model.predict(x_cords[:10])
print(model.predict(x_cords[:10]))
```
Once the model is built, configure the training procedure using the `Model.compile()` method. The most important arguments to compile are the `loss` and the `optimizer` since these define what will be optimized (`mean_absolute_error`) and how (using the `optimizers.Adam`).
```
model.compile(
optimizer=tf.optimizers.Adam(lr=1e-3),
loss='logcosh')
```
Once the training is configured, use `Model.fit()` to execute the training:
```
%%time
history = model.fit(
x_train, y_train,
epochs=100,
validation_split=0.2,
verbose=0)
```
Visualize the model's training progress using the stats stored in the history object.
```
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 1])
plt.xlabel('Epoch')
plt.ylabel('Error [Y]')
plt.legend()
plt.grid(True)
plot_loss(history)
test_results = model.evaluate(
x_test, y_test, verbose=0)
x = tf.linspace(-4.0, 4.0, 9)
y = model.predict(x)
def plot_model(x, y):
plt.scatter(x_train, y_train, label='Data')
plt.plot(x, y, color='k', label='Predictions')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plot_model(x,y)
print(f"Loss: {test_results}")
```
| github_jupyter |
### Plot Comulative Distribution Of Sportive Behavior Over Time
```
%load_ext autoreload
%autoreload 2
%matplotlib notebook
from sensible_raw.loaders import loader
from world_viewer.cns_world import CNSWorld
from world_viewer.synthetic_world import SyntheticWorld
from world_viewer.glasses import Glasses
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, PowerNorm
import math
import pandas as pd
import numpy as np
#import dask.dataframe as dd
import time
import seaborn as sns
# load data and restict timeseries
# data from "PreprocessOpinions/FitnessAsBehavior.ipynb"
data = pd.read_pickle("data/op_fitness.pkl")
#data.reset_index(inplace=True)
opinion = "op_fitness"
data = data[data.time >= CNSWorld.CNS_TIME_BEGIN]
data = data[data.time <= CNSWorld.CNS_TIME_END]
data.head()
# calc cummulative distribution function
def cdf_from_data(data, cdfx):
size_data = len(data)
y_values = []
for i in cdfx:
# all the values in data less than the ith value in x_values
temp = data[data <= i]
# fraction of that value with respect to the size of the x_values
value = temp.size / size_data
# pushing the value in the y_values
y_values.append(value)
# return both x and y values
return pd.DataFrame({'x':cdfx, 'cdf':y_values}).set_index("x")
cdfx = np.linspace(start=0,stop=4,num=400)
cdf = data.groupby("time")[opinion + "_abs"].apply(lambda d: cdf_from_data(d, cdfx))#
# load cdf if previously calculated
#cdf = pd.read_pickle("tmp/cdf_fitness.pkl")
# plot cdf as heatmap (fig.: 3.3)
fig, ax = plt.subplots(1,1)
num_ticks = 5
# the index of the position of yticks
yticks = np.linspace(0, len(cdfx)-1, num_ticks, dtype=np.int)
# the content of labels of these yticks
yticklabels = [round(cdfx[idx]) for idx in yticks]
cmap = sns.cubehelix_palette(60, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
ax = sns.heatmap(df2, cmap=cmap, xticklabels=80, yticklabels=yticklabels, vmin=0.4, vmax=1, cbar_kws={'label': 'cumulative distribution function'})#, norm=LogNorm(vmin=0.1, vmax=1))#, , cbar_kws={"ticks": cbar_ticks})
#ax.hlines([300], *ax.get_xlim(), linestyles="dashed")
ax.set_yticks(yticks)
ax.invert_yaxis()
plt.xticks(rotation=70)
plt.yticks(rotation=0)
plt.ylabel(r"$\bar b(t)$")
#ax.set_yscale('log')
#sns.heatmap(cdf.cdf, annot=False)
fig.savefig("test.png" , dpi=600, bbox_inches='tight')
# plot cdf for singe timestep
fig, ax = plt.subplots(1,1)
ax.plot(cdf.loc["2014-02-09"].reset_index().x, 1-cdf.loc["2014-11-30","cdf"].values)
ax.set_yscale('log')
```
| github_jupyter |
# Finding Conserved Patterns Across Two Time Series
## AB-Joins
This tutorial is adapted from the [Matrix Profile I](https://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf) paper and replicates Figures 9 and 10.
Previously, we had introduced a concept called [time series motifs](https://stumpy.readthedocs.io/en/latest/Tutorial_STUMPY_Basics.html), which are conserved patterns found within a single time series, $T$, that can be discovered by computing its [matrix profile](https://stumpy.readthedocs.io/en/latest/Tutorial_The_Matrix_Profile.html) using STUMPY. This process of computing a matrix profile with one time series is commonly known as a "self-join" since the subsequences within time series $T$ are only being compared with itself. However, what do you do if you have two time series, $T_{A}$ and $T_{B}$, and you want to know if there are any subsequences in $T_{A}$ that can also be found in $T_{B}$? By extension, a motif discovery process involving two time series is often referred to as an "AB-join" since all of the subsequences within time series $T_{A}$ are compared to all of the subsequences in $T_{B}$.
It turns out that "self-joins" can be trivially generalized to "AB-joins" and the resulting matrix profile, which annotates every subsequence in $T_{A}$ with its nearest subsequence neighbor in $T_{B}$, can be used to identify similar (or unique) subsequences across any two time series. Additionally, as long as $T_{A}$ and $T_{B}$ both have lengths that are greater than or equal to the subsequence length, $m$, there is no requirement that the two time series must be the same length.
In this short tutorial we will demonstrate how to find a conserved pattern across two independent time series using STUMPY.
## Getting Started
Let's import the packages that we'll need to load, analyze, and plot the data.
```
%matplotlib inline
import stumpy
import pandas as pd
import numpy as np
from IPython.display import IFrame
import matplotlib.pyplot as plt
plt.style.use('stumpy.mplstyle')
```
## Finding Similarities in Music Using STUMPY
In this tutorial we are going to analyze two songs, “Under Pressure” by Queen and David Bowie as well as “Ice Ice Baby” by Vanilla Ice. For those who are unfamiliar, in 1990, Vanilla Ice was alleged to have sampled the bass line from "Under Pressure" without crediting the original creators and the copyright claim was later settled out of court. Have a look at this short video and see if you can hear the similarities between the two songs:
```
IFrame(width="560", height="315", src="https://www.youtube.com/embed/HAA__AW3I1M")
```
The two songs certainly share some similarities! But, before we move forward, imagine if you were the judge presiding over this court case. What analysis result would you need to see in order to be convinced, beyond a shadow of a doubt, that there was wrongdoing?
## Loading the Music Data
To make things easier, instead of using the raw music audio from each song, we're only going to use audio that has been pre-converted to a single frequency channel (i.e., the 2nd MFCC channel sampled at 100Hz).
```
queen_df = pd.read_csv("https://zenodo.org/record/4294912/files/queen.csv?download=1")
vanilla_ice_df = pd.read_csv("https://zenodo.org/record/4294912/files/vanilla_ice.csv?download=1")
print("Length of Queen dataset : " , queen_df.size)
print("Length of Vanilla ice dataset : " , vanilla_ice_df.size)
```
## Visualizing the Audio Frequencies
It was very clear in the earlier video that there are strong similarities between the two songs. However, even with this prior knowledge, it's incredibly difficult to spot the similarities (below) due to the sheer volume of the data:
```
fig, axs = plt.subplots(2, sharex=True, gridspec_kw={'hspace': 0})
plt.suptitle('Can You Spot The Pattern?', fontsize='30')
axs[0].set_title('Under Pressure', fontsize=20, y=0.8)
axs[1].set_title('Ice Ice Baby', fontsize=20, y=0)
axs[1].set_xlabel('Time')
axs[0].set_ylabel('Frequency')
axs[1].set_ylabel('Frequency')
ylim_lower = -25
ylim_upper = 25
axs[0].set_ylim(ylim_lower, ylim_upper)
axs[1].set_ylim(ylim_lower, ylim_upper)
axs[0].plot(queen_df['under_pressure'])
axs[1].plot(vanilla_ice_df['ice_ice_baby'], c='orange')
plt.show()
```
## Performing an AB-Join with STUMPY
Fortunately, using the `stumpy.stump` function, we can quickly compute the matrix profile by performing an AB-join and this will help us easily identify and locate the similar subsequence(s) between these two songs:
```
m = 500
queen_mp = stumpy.stump(T_A = queen_df['under_pressure'],
m = m,
T_B = vanilla_ice_df['ice_ice_baby'],
ignore_trivial = False)
```
Above, we call `stumpy.stump` by specifying our two time series `T_A = queen_df['under_pressure']` and `T_B = vanilla_ice_df['ice_ice_baby']`. Following the original published work, we use a subsequence window length of `m = 500` and, since this is not a self-join, we set `ignore_trivial = False`. The resulting matrix profile, `queen_mp`, essentially serves as an annotation for `T_A` so, for every subsequence in `T_A`, we find its closest subsequence in `T_B`.
As a brief reminder of the matrix profile data structure, each row of `queen_mp` corresponds to each subsequence within `T_A`, the first column in `queen_mp` records the matrix profile value for each subsequence in `T_A` (i.e., the distance to its nearest neighbor in `T_B`), and the second column in `queen_mp` keeps track of the index location of the nearest neighbor subsequence in `T_B`.
One additional side note is that AB-joins are not symmetrical in general. That is, unlike a self-join, the order of the input time series matter. So, an AB-join will produce a different matrix profile than a BA-join (i.e., for every subsequence in `T_B`, we find its closest subsequence in `T_A`).
## Visualizing the Matrix Profile
Just as we've done [in the past](https://stumpy.readthedocs.io/en/latest/Tutorial_STUMPY_Basics.html), we can now look at the matrix profile, `queen_mp`, computed from our AB-join:
```
queen_motif_index = queen_mp[:, 0].argmin()
plt.xlabel('Subsequence')
plt.ylabel('Matrix Profile')
plt.scatter(queen_motif_index,
queen_mp[queen_motif_index, 0],
c='red',
s=100)
plt.plot(queen_mp[:,0])
plt.show()
```
Now, to discover the global motif (i.e., the most conserved pattern), `queen_motif_index`, all we need to do is identify the index location of the lowest distance value in the `queen_mp` matrix profile (see red circle above).
```
queen_motif_index = queen_mp[:, 0].argmin()
print(f'The motif is located at index {queen_motif_index} of "Under Pressure"')
```
In fact, the index location of its nearest neighbor in "Ice Ice Baby" is stored in `queen_mp[queen_motif_index, 1]`:
```
vanilla_ice_motif_index = queen_mp[queen_motif_index, 1]
print(f'The motif is located at index {vanilla_ice_motif_index} of "Ice Ice Baby"')
```
## Overlaying The Best Matching Motif
After identifying the motif and retrieving the index location from each song, let's overlay both of these subsequences and see how similar they are to each other:
```
plt.plot(queen_df.iloc[queen_motif_index : queen_motif_index + m].values, label='Under Pressure')
plt.plot(vanilla_ice_df.iloc[vanilla_ice_motif_index:vanilla_ice_motif_index+m].values, label='Ice Ice Baby')
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.legend()
plt.show()
```
Wow, the resulting overlay shows really strong correlation between the two subsequences! Are you convinced?
## Summary
And that's it! In just a few lines of code, you learned how to compute a matrix profile for two time series using STUMPY and identified the top-most conserved behavior between them. While this tutorial has focused on audio data, there are many further applications such as detecting imminent mechanical issues in sensor data by comparing to known experimental or historical failure datasets or finding matching movements in commodities or stock prices, just to name a few.
You can now import this package and use it in your own projects. Happy coding!
## Resources
[Matrix Profile I](https://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf)
[STUMPY Documentation](https://stumpy.readthedocs.io/en/latest/)
[STUMPY Matrix Profile Github Code Repository](https://github.com/TDAmeritrade/stumpy)
| github_jupyter |
# PCMark benchmark on Android
The goal of this experiment is to run benchmarks on a Pixel device running Android with an EAS kernel and collect results. The analysis phase will consist in comparing EAS with other schedulers, that is comparing *sched* governor with:
- interactive
- performance
- powersave
- ondemand
The benchmark we will be using is ***PCMark*** (https://www.futuremark.com/benchmarks/pcmark-android). You will need to **manually install** the app on the Android device in order to run this Notebook.
When opinening PCMark for the first time you will need to Install the work benchmark from inside the app.
```
import logging
from conf import LisaLogging
LisaLogging.setup()
%pylab inline
import copy
import os
from time import sleep
from subprocess import Popen
import pandas as pd
# Support to access the remote target
import devlib
from env import TestEnv
# Support for trace events analysis
from trace import Trace
# Suport for FTrace events parsing and visualization
import trappy
```
## Test environment setup
For more details on this please check out **examples/utils/testenv_example.ipynb**.
In case more than one Android device are conencted to the host, you must specify the ID of the device you want to target in `my_target_conf`. Run `adb devices` on your host to get the ID. Also, you have to specify the path to your android sdk in ANDROID_HOME.
```
# Setup a target configuration
my_target_conf = {
# Target platform and board
"platform" : 'android',
# Add target support
"board" : 'pixel',
# Device ID
"device" : "HT6670300102",
"ANDROID_HOME" : "/home/vagrant/lisa/tools/android-sdk-linux/",
# Define devlib modules to load
"modules" : [
'cpufreq' # enable CPUFreq support
],
}
my_tests_conf = {
# Folder where all the results will be collected
"results_dir" : "Android_PCMark",
# Platform configurations to test
"confs" : [
{
"tag" : "pcmark",
"flags" : "ftrace", # Enable FTrace events
"sched_features" : "ENERGY_AWARE", # enable EAS
},
],
}
# Initialize a test environment using:
# the provided target configuration (my_target_conf)
# the provided test configuration (my_test_conf)
te = TestEnv(target_conf=my_target_conf, test_conf=my_tests_conf)
target = te.target
```
## Support Functions
This set of support functions will help us running the benchmark using different CPUFreq governors.
```
def set_performance():
target.cpufreq.set_all_governors('performance')
def set_powersave():
target.cpufreq.set_all_governors('powersave')
def set_interactive():
target.cpufreq.set_all_governors('interactive')
def set_sched():
target.cpufreq.set_all_governors('sched')
def set_ondemand():
target.cpufreq.set_all_governors('ondemand')
for cpu in target.list_online_cpus():
tunables = target.cpufreq.get_governor_tunables(cpu)
target.cpufreq.set_governor_tunables(
cpu,
'ondemand',
**{'sampling_rate' : tunables['sampling_rate_min']}
)
# CPUFreq configurations to test
confs = {
'performance' : {
'label' : 'prf',
'set' : set_performance,
},
#'powersave' : {
# 'label' : 'pws',
# 'set' : set_powersave,
#},
'interactive' : {
'label' : 'int',
'set' : set_interactive,
},
#'sched' : {
# 'label' : 'sch',
# 'set' : set_sched,
#},
#'ondemand' : {
# 'label' : 'odm',
# 'set' : set_ondemand,
#}
}
# The set of results for each comparison test
results = {}
#Check if PCMark si available on the device
def check_packages(pkgname):
try:
output = target.execute('pm list packages -f | grep -i {}'.format(pkgname))
except Exception:
raise RuntimeError('Package: [{}] not availabe on target'.format(pkgname))
# Check for specified PKG name being available on target
check_packages('com.futuremark.pcmark.android.benchmark')
# Function that helps run a PCMark experiment
def pcmark_run(exp_dir):
# Unlock device screen (assume no password required)
target.execute('input keyevent 82')
# Start PCMark on the target device
target.execute('monkey -p com.futuremark.pcmark.android.benchmark -c android.intent.category.LAUNCHER 1')
# Wait few seconds to make sure the app is loaded
sleep(5)
# Flush entire log
target.clear_logcat()
# Run performance workload (assume screen is vertical)
target.execute('input tap 750 1450')
# Wait for completion (10 minutes in total) and collect log
log_file = os.path.join(exp_dir, 'log.txt')
# Wait 5 minutes
sleep(300)
# Start collecting the log
with open(log_file, 'w') as log:
logcat = Popen(['adb logcat', 'com.futuremark.pcmandroid.VirtualMachineState:*', '*:S'],
stdout=log,
shell=True)
# Wait additional two minutes for benchmark to complete
sleep(300)
# Terminate logcat
logcat.kill()
# Get scores from logcat
score_file = os.path.join(exp_dir, 'score.txt')
os.popen('grep -o "PCMA_.*_SCORE .*" {} | sed "s/ = / /g" | sort -u > {}'.format(log_file, score_file))
# Close application
target.execute('am force-stop com.futuremark.pcmark.android.benchmark')
return score_file
# Function that helps run PCMark for different governors
def experiment(governor, exp_dir):
os.system('mkdir -p {}'.format(exp_dir));
logging.info('------------------------')
logging.info('Run workload using %s governor', governor)
confs[governor]['set']()
### Run the benchmark ###
score_file = pcmark_run(exp_dir)
# Save the score as a dictionary
scores = dict()
with open(score_file, 'r') as f:
lines = f.readlines()
for l in lines:
info = l.split()
scores.update({info[0] : float(info[1])})
# return all the experiment data
return {
'dir' : exp_dir,
'scores' : scores,
}
```
## Run PCMark and collect scores
```
# Run the benchmark in all the configured governors
for governor in confs:
test_dir = os.path.join(te.res_dir, governor)
res = experiment(governor, test_dir)
results[governor] = copy.deepcopy(res)
```
After running the benchmark for the specified governors we can show and plot the scores:
```
# Create results DataFrame
data = {}
for governor in confs:
data[governor] = {}
for score_name, score in results[governor]['scores'].iteritems():
data[governor][score_name] = score
df = pd.DataFrame.from_dict(data)
df
df.plot(kind='bar', rot=45, figsize=(16,8),
title='PCMark scores vs SchedFreq governors');
```
| github_jupyter |
```
import numpy as numpy
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
train = pd.read_csv('titanic_train.csv')
train.head()
```
### Missing Data
```
train.isnull()
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
```
Roughly 20 percent of the Age data is missing. The proportion of Age missing is likely small enough for reasonable replacement with some form of imputation. Looking at the Cabin column, it looks like we are just missing too much of that data to do something useful with at a basic level. We'll probably drop this later, or change it to another feature like "Cabin Known: 1 or 0"
```
sns.set_style('whitegrid')
sns.countplot(x='Survived',data=train)
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Pclass',data=train,palette='rainbow')
sns.distplot(train['Age'].dropna(),kde=False,color='darkred',bins=40)
train['Age'].hist(bins=30,color='darkred',alpha=0.3)
sns.countplot(x='SibSp',data=train)
train['Fare'].hist(color='green',bins=40,figsize=(8,4))
```
### Cufflinks for plots
```
import cufflinks as cf
cf.go_offline()
train['Fare'].iplot(kind='hist',bins=30,color='green')
```
### Data Cleaning
We want to fill in missing age data instead of just dropping the missing age data rows. One way to do this is by filling in the mean age of all the passengers (imputation). However we can be smarter about this and check the average age by passenger class.
```
plt.figure(figsize=(12, 7))
sns.boxplot(x='Pclass',y='Age',data=train,palette='winter')
```
We can see the wealthier passengers in the higher classes tend to be older, which makes sense. We'll use these average age values to impute based on Pclass for Age.
```
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
```
Now apply that function!
```
train['Age'] = train[['Age','Pclass']].apply(impute_age,axis=1)
```
Now let's check that heat map again!
```
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
```
Great! Let's go ahead and drop the Cabin column and the row in Embarked that is NaN.
```
train.drop('Cabin',axis=1,inplace=True)
train.head()
train.dropna(inplace=True)
```
### Converting Categorical Features¶
We'll need to convert categorical features to dummy variables using pandas! Otherwise our machine learning algorithm won't be able to directly take in those features as inputs.
```
train.info()
pd.get_dummies(train['Embarked'],drop_first=True).head()
sex = pd.get_dummies(train['Sex'],drop_first=True)
embark = pd.get_dummies(train['Embarked'],drop_first=True)
train.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
train.head()
train = pd.concat([train,sex,embark],axis=1)
train.head()
```
| github_jupyter |
```
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
documents = ['This is the first sentence.',
'This one is the second sentence.',
'And this is the third one.',
'Is this the first sentence?']
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(documents)
# X.torray() is BoW
print(X.toarray())
# Get the unique words
print(vectorizer.get_feature_names())
# the above array represents the number of times each feature name
# appears in the sentence
# supervised learning vs unsupervised learning
#
# supervised learning includes linear regression, logistic regression, support vector machine
# this is called supervised because it infers a function from labeled training data
# consisting of a set of training examples
#
# unsupervised learning includes principal component analysis and clustering
# unsupervised learning attempts to find previously unknown patterns in data, without preexisting labels
from figures import plot_kmeans_interactive
plot_kmeans_interactive()
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1])
from sklearn.cluster import KMeans
km = KMeans(n_clusters=4)
km.fit(X)
print(km.cluster_centers_)
import numpy as np
from scipy.spatial import distance
distortions = []
K = range(1, 10)
for k in K:
km = KMeans(n_clusters=k)
km.fit(X)
distortions.append(sum(np.min(distance.cdist(X, km.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])
# Plot the elbow
plt.plot(K, distortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.show()
def optimal(dist_arr):
best_delta = 0
optimal = 0
for index, val in enumerate(dist_arr):
k = index + 1
delta_slope = 0
if index > 0 and index < len(dist_arr) - 1:
prev_slope = dist_arr[index-1] - dist_arr[index]
next_slope = dist_arr[index] - dist_arr[index+1]
delta_slope = abs(prev_slope - next_slope)
if delta_slope > best_delta:
best_delta = delta_slope
optimal = k
return optimal
optimal(distortions)
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
documents = ["This little kitty came to play when I was eating at a restaurant.",
"Merley has the best squooshy kitten belly.",
"Google Translate app is incredible.",
"If you open 100 tab in google you get a smiley face.",
"Best cat photo I've ever taken.",
"Climbing ninja cat.",
"Impressed with google map feedback.",
"Key promoter extension for Google Chrome."]
# vec = CountVectorizer()
vec = TfidfVectorizer(stop_words='english')
J = vec.fit_transform(documents)
print(J.toarray()) # this matrix is called a "bag of words"
print(vec.get_feature_names())
print(J.shape)
model = KMeans(n_clusters=2, init='k-means++')
model.fit(J)
Y = vec.transform(["chrome browser to open."])
print('Y:')
print(Y.toarray())
prediction = model.predict(Y)
print(prediction)
Y = vec.transform(["My cat is hungry."])
prediction = model.predict(Y)
print(prediction)
model.get_params()
# beautiful
for index, sentence in enumerate(documents):
print(sentence)
print(model.predict(J[index]))
```
| github_jupyter |
## Exercise 3
In the videos you looked at how you would improve Fashion MNIST using Convolutions. For your exercise see if you can improve MNIST to 99.8% accuracy or more using only a single convolutional layer and a single MaxPooling 2D. You should stop training once the accuracy goes above this amount. It should happen in less than 20 epochs, so it's ok to hard code the number of epochs for training, but your training must end once it hits the above metric. If it doesn't, then you'll need to redesign your layers.
I've started the code for you -- you need to finish it!
When 99.8% accuracy has been hit, you should print out the string "Reached 99.8% accuracy so cancelling training!"
```
import tensorflow as tf
from os import path, getcwd, chdir
# DO NOT CHANGE THE LINE BELOW. If you are developing in a local
# environment, then grab mnist.npz from the Coursera Jupyter Notebook
# and place it inside a local folder and edit the path to that location
path = f"{getcwd()}/../tmp2/mnist.npz"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# GRADED FUNCTION: train_mnist_conv
def train_mnist_conv():
# Please write your code only where you are indicated.
# please do not remove model fitting inline comments.
# YOUR CODE STARTS HERE
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
# Quick solution for "older" tensorflow to get rid of TypeError: Use 'acc' instead of 'accuracy'
# The version of tf used here is 1.14.0 (old)
if(logs.get('acc') >= 0.998):
print('\nReached 99.8% accuracy so cancelling training!')
self.model.stop_training = True
# YOUR CODE ENDS HERE
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data(path=path)
# YOUR CODE STARTS HERE
training_images=training_images.reshape(60000, 28, 28, 1)
training_images=training_images / 255.0
callbacks = myCallback()
# YOUR CODE ENDS HERE
model = tf.keras.models.Sequential([
# YOUR CODE STARTS HERE
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
# YOUR CODE ENDS HERE
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# model fitting
history = model.fit(
# YOUR CODE STARTS HERE
training_images, training_labels, epochs=30, callbacks=[callbacks]
# YOUR CODE ENDS HERE
)
# model fitting
return history.epoch, history.history['acc'][-1]
_, _ = train_mnist_conv()
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
```
| github_jupyter |
# Distributing standardized COMBINE archives with Tellurium
<div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/tellurium-and-libroadrunner.png" width="60%" style="padding: 20px"></div>
<div align='center' style='font-size:100%'>
Veronica L. Porubsky, BS
<div align='center' style='font-size:100%'>Sauro Lab PhD Student, Department of Bioengineering<br>
Head of Outreach, <a href="https://reproduciblebiomodels.org/dissemination-and-training/seminar/">Center for Reproducible Biomedical Modeling</a><br>
University of Washington, Seattle, WA USA
</div>
<hr>
To facilitate design and comprehension of their models, modelers should use standard systems biology formats for
model descriptions, simulation experiments, and to distribute stand-alone archives which can regenerate the modeling study. We will discuss three of these standards - the Systems Biology Markup Language (SBML), the Simulation Experiment Description Markup Language (SED-ML), and the COMBINE archive/ inline Open Modeling EXchange format (OMEX) format.
## TOC
* [Links to relevant resources](#relevant-resources)
* [Packages and Constants](#standardized-formats-packages-and-constants)
* [Import and export capabilities with Tellurium](#import-export)
* [Importing SBML directly from the BioModels Database for simulation](#import-from-biomodels)
* [Exporting SBML or Antimony models](#export-to-sbml-or-antimony)
* [Writing SED-ML with PhraSED-ML](#writing-phrasedml)
* [Exporting SED-ML](#exporting-sedml)
* [Generating a COMBINE archive](#combine-archive)
* [Exercises](#exercises)
# Links to relevant resources <a class="anchor" id="relevant-resources"></a>
<a href="http://model.caltech.edu/">SBML specification</a><br>
<a href="http://sbml.org/SBML_Software_Guide/SBML_Software_Matrix">SBML tool support</a><br>
<a href="https://sed-ml.org/">SED-ML specification</a><br>
<a href="https://sed-ml.org/showcase.html">SED-ML tool support</a><br>
<a href="http://phrasedml.sourceforge.net/phrasedml__api_8h.html">PhraSED-ML documentation</a><br>
<a href="http://phrasedml.sourceforge.net/Tutorial.html">PhraSED-ML tutorial</a><br>
<a href="https://tellurium.readthedocs.io/en/latest/">Tellurium documentation</a><br>
<a href="https://libroadrunner.readthedocs.io/en/latest/">libRoadRunner documentation</a><br>
<a href="https://tellurium.readthedocs.io/en/latest/antimony.html">Antimony documentation</a><br>
<a href="http://copasi.org/Download/">COPASI download</a><br>
# Packages and constants <a class="anchor" id="standardized-formats-packages-and-constants"></a>
```
!pip install tellurium -q
import tellurium as te
import phrasedml
```
# Import and export capabilities with Tellurium <a class="anchor" id="import-export"></a>
Models can be imported from the BioModels Database, given the appropriate BioModel ID using a standard URL format to programmatically access the model of interest.
We will use this model of respiratory oscillations in Saccharomyces cerevisae by <a href="https://www.ebi.ac.uk/biomodels/BIOMD0000000090">Jana Wolf et al. (2001)</a> </div> as an example:
<br>
<div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/wolf_publication.PNG" width="65%" style="padding: 20px"></div>
<br>
<div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/wolf_network.PNG" width="65%" style="padding: 20px"></div>
# Importing SBML directly from the BioModels Database for simulation <a class="anchor" id="import-from-biomodels"></a>
SBML is a software data format for describing computational biological models. Markup languages allow you to separate annotations and documentation about the content from the content itself, using standardized tags. So the model and annotations are stored in a single file, but tools that support SBML are designed to interpret these to perform tasks. SBML is independent of any particular software tool and is broadly applicable to the modeling domain. It is open and free, and widely supported. Tools might allow for writing the model, simulating the model, visualizing the network, etc.
We will demonstrate how Tellurium supports import and export of SBML model files.
```
# Import an SBML model from the BioModels Database using a url
wolf = te.loadSBMLModel("https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000090.2?filename=BIOMD0000000090_url.xml")
wolf.simulate(0, 200, 1000)
wolf.plot(figsize = (15, 10), xtitle = 'Time', ytitle = 'Concentration')
```
# Exporting SBML or Antimony models <a class="anchor" id="export-to-sbml-or-antimony"></a>
```
# Export the model you just accessed from BioModels to the current directory as an SBML string
wolf.reset()
wolf.exportToSBML('Wolf2001_Respiratory_Oscillations.xml', current = True)
# You can also export the model to the current directory as an Antimony string
# Let's take a look at the string first
print(wolf.getCurrentAntimony())
# Edit the Antimony string of Wolf et al.:
# Update model name for ease of use with PhraSED-ML
# Remove model name annotatations -- causes error with SED-ML export
wolf = te.loada("""
// Created by libAntimony v2.12.0
model wolf
// Compartments and Species:
compartment c0, c1, c2;
species $sul_ex in c0, $eth_ex in c0, $oxy_ex in c0, oxy in c2, $H2O in c2;
species A3c in c1, aps in c1, $PPi in c1, pap in c1, sul in c1, eth in c1;
species $A2c in c1, hyd in c1, cys in c1, N2 in c1, $N1 in c1, aco in c1;
species oah in c1, S1 in c2, $S2 in c2, $C1 in c2, $C2 in c2, $A2m in c2;
species A3m in c2, $Ho in c1, $Hm in c2;
// Assignment Rules:
A2c := Ac - A3c;
N1 := N - N2;
S2 := S - S1;
A2m := Am - A3m;
// Reactions:
v1: $sul_ex => sul; c0*k_v0/(1 + (cys/Kc)^n);
v13: $eth_ex => eth; c0*k_v13;
v2: sul + A3c => aps + $PPi; c1*k2*sul*A3c;
v10: $oxy_ex => oxy; c0*k_v10;
v14: oxy => $oxy_ex; c2*k14*oxy;
v3: aps + A3c => pap + $A2c; c1*k3*aps*A3c;
v4: pap + 3 N2 => hyd + 3 $N1; c1*k4*pap*N2;
v5: hyd + oah => cys; c1*k5*hyd*oah;
v6: cys => ; c1*k6*cys;
v7: eth + 2 $N1 => aco + 2 N2; c1*k7*eth*N1;
v15: aco => oah; c1*k15*aco;
v17: hyd => ; c1*k17*hyd;
v18: oah => ; c1*k18*oah;
v8: $S2 + aco => S1; c2*k8*aco*S2;
v9: S1 + 4 $N1 => $S2 + 4 N2; c2*k9*S1*N1;
v11a: $C1 + $Hm + N2 => $C2 + $Ho + $N1; c2*k11*N2*oxy/((a*N2 + oxy)*(1 + (hyd/Kh)^m));
v11a2: $C2 + oxy => $C1 + $H2O; c2*k11*N2*oxy/((a*N2 + oxy)*(1 + (hyd/Kh)^m));
v16: $A2c + A3m => $A2m + A3c; c2*k16*A3m*A2c;
v11b: $Ho + $A2m => $Hm + A3m; (c2*3*k11*N2*oxy/((a*N2 + oxy)*(1 + (hyd/Kh)^m)))*A2m/(Ka + A2m);
vLEAK: $Ho => $Hm; 0;
v12: A3c => $A2c; c1*k12*A3c;
// Species initializations:
sul_ex = 0;
eth_ex = 0;
oxy_ex = 0;
oxy = 7/c2;
oxy has substance_per_volume;
H2O = 0;
A3c = 1.5/c1;
A3c has substance_per_volume;
aps = 0.5/c1;
aps has substance_per_volume;
PPi = 0;
pap = 0.4/c1;
pap has substance_per_volume;
sul = 0.4/c1;
sul has substance_per_volume;
eth = 4/c1;
eth has substance_per_volume;
A2c has substance_per_volume;
hyd = 0.5/c1;
hyd has substance_per_volume;
cys = 0.3/c1;
cys has substance_per_volume;
N2 = 2/c1;
N2 has substance_per_volume;
N1 has substance_per_volume;
aco = 0.3/c1;
aco has substance_per_volume;
oah = 1.5/c1;
oah has substance_per_volume;
S1 = 1.5/c2;
S1 has substance_per_volume;
S2 has substance_per_volume;
C1 = 0;
C2 = 0;
A2m has substance_per_volume;
A3m = 1.5/c2;
A3m has substance_per_volume;
Ho = 0;
Hm = 0;
// Compartment initializations:
c0 = 1;
c1 = 1;
c2 = 1;
// Variable initializations:
Ac = 2;
N = 2;
S = 2;
Am = 2;
k_v0 = 1.6;
k2 = 0.2;
k3 = 0.2;
k4 = 0.2;
k5 = 0.1;
k6 = 0.12;
k7 = 10;
k8 = 10;
k9 = 10;
k_v10 = 80;
k11 = 10;
k12 = 5;
k_v13 = 4;
k14 = 10;
k15 = 5;
k16 = 10;
k17 = 0.02;
k18 = 1;
n = 4;
m = 4;
Ka = 1;
Kc = 0.1;
a = 0.1;
Kh = 0.5;
// Other declarations:
const c0, c1, c2, Ac, N, S, Am, k_v0, k2, k3, k4, k5, k6, k7, k8, k9, k_v10;
const k11, k12, k_v13, k14, k15, k16, k17, k18, n, m, Ka, Kc, a, Kh;
// Unit definitions:
unit substance = mole;
unit substance_per_volume = mole / litre;
// Display Names:
c0 is "external";
c1 is "cytosol";
c2 is "mitochondria";
sul_ex is "SO4_ex";
eth_ex is "EtOH_ex";
oxy_ex is "O2_ex";
oxy is "O2";
A3c is "ATP";
aps is "APS";
pap is "PAPS";
sul is "SO4";
eth is "EtOH";
A2c is "ADP";
hyd is "H2S";
cys is "CYS";
N2 is "NADH";
N1 is "NAD";
aco is "AcCoA";
oah is "OAH";
A2m is "ADP_mit";
A3m is "ATP_mit";
v11a is "vET1";
v11a2 is "vET2";
v11b is "vSYNT";
// CV terms:
c0 hypernym "http://identifiers.org/obo.go/GO:0005576"
c1 hypernym "http://identifiers.org/obo.go/GO:0005829"
c2 hypernym "http://identifiers.org/obo.go/GO:0005739"
sul_ex identity "http://identifiers.org/obo.chebi/CHEBI:16189"
eth_ex identity "http://identifiers.org/obo.chebi/CHEBI:16236"
oxy_ex identity "http://identifiers.org/obo.chebi/CHEBI:15379"
oxy identity "http://identifiers.org/obo.chebi/CHEBI:15379"
H2O identity "http://identifiers.org/obo.chebi/CHEBI:15377"
A3c identity "http://identifiers.org/obo.chebi/CHEBI:15422"
aps identity "http://identifiers.org/obo.chebi/CHEBI:17709"
PPi identity "http://identifiers.org/obo.chebi/CHEBI:18361"
pap identity "http://identifiers.org/obo.chebi/CHEBI:17980"
sul identity "http://identifiers.org/obo.chebi/CHEBI:16189"
eth identity "http://identifiers.org/obo.chebi/CHEBI:16236"
A2c identity "http://identifiers.org/obo.chebi/CHEBI:16761"
hyd identity "http://identifiers.org/obo.chebi/CHEBI:16136"
cys identity "http://identifiers.org/obo.chebi/CHEBI:17561"
N2 identity "http://identifiers.org/obo.chebi/CHEBI:16908"
N1 identity "http://identifiers.org/obo.chebi/CHEBI:15846"
aco identity "http://identifiers.org/obo.chebi/CHEBI:15351"
oah identity "http://identifiers.org/obo.chebi/CHEBI:16288"
S1 parthood "http://identifiers.org/obo.go/GO:0030062"
S2 parthood "http://identifiers.org/obo.go/GO:0030062"
C1 hypernym "http://identifiers.org/obo.go/GO:0005746"
C2 hypernym "http://identifiers.org/obo.go/GO:0005746"
A2m identity "http://identifiers.org/obo.chebi/CHEBI:16761"
A3m identity "http://identifiers.org/obo.chebi/CHEBI:15422"
Ho identity "http://identifiers.org/obo.chebi/CHEBI:24636"
Hm identity "http://identifiers.org/obo.chebi/CHEBI:24636"
v1 hypernym "http://identifiers.org/obo.go/GO:0015381"
v13 hypernym "http://identifiers.org/obo.go/GO:0015850"
v2 identity "http://identifiers.org/ec-code/2.7.7.4"
v3 identity "http://identifiers.org/ec-code/2.7.1.25"
v3 hypernym "http://identifiers.org/obo.go/GO:0004020"
v4 version "http://identifiers.org/ec-code/1.8.4.8",
"http://identifiers.org/ec-code/1.8.1.2"
v5 version "http://identifiers.org/ec-code/4.4.1.1",
"http://identifiers.org/ec-code/4.2.1.22",
"http://identifiers.org/ec-code/2.5.1.49"
v7 version "http://identifiers.org/ec-code/6.2.1.1",
"http://identifiers.org/ec-code/1.2.1.3",
"http://identifiers.org/ec-code/1.1.1.1"
v15 identity "http://identifiers.org/ec-code/2.3.1.31"
v8 parthood "http://identifiers.org/obo.go/GO:0006099"
v9 parthood "http://identifiers.org/obo.go/GO:0006099"
v11a identity "http://identifiers.org/obo.go/GO:0015990"
v11a parthood "http://identifiers.org/obo.go/GO:0042775"
v11a version "http://identifiers.org/obo.go/GO:0002082"
v11a2 parthood "http://identifiers.org/obo.go/GO:0042775"
v11a2 version "http://identifiers.org/obo.go/GO:0002082"
v11a2 identity "http://identifiers.org/obo.go/GO:0006123"
v16 identity "http://identifiers.org/obo.go/GO:0005471"
v11b parthood "http://identifiers.org/obo.go/GO:0042775"
v11b hypernym "http://identifiers.org/obo.go/GO:0006119"
v11b version "http://identifiers.org/obo.go/GO:0002082"
vLEAK hypernym "http://identifiers.org/obo.go/GO:0006810"
v12 hypernym "http://identifiers.org/obo.go/GO:0006200"
end
""")
# Export SBML and Antimony versions of the updated model to current working directory
wolf.exportToAntimony('wolf_antimony.txt')
wolf.exportToSBML('wolf.xml')
# Let's work with the species 'oxy'(CHEBI ID: 15379) - or dioxygen - going forward
wolf.simulate(0, 100, 1000, ['time', 'oxy']) # note that specific species can be selected for recording concentrations over the timecourse
wolf.plot(figsize = (10, 6), xtitle = 'Time', ytitle = 'Concentration')
```
# Writing SED-ML with PhraSED-ML <a class="anchor" id="writing-phrasedml"></a>
SED-ML encodes the information required by the minimal information about a simiulation experiment guidelines (MIASE) to enable reproduction of simulation experiments in a computer-readable format.
The specification includes:
* selection of experimental data for the experiment
* models used for the experiement
* which simulation to run on which models
* which results to pass to output
* how results should be output
PhraSED-ML is a language and a library that provide a text-based way to read, summarize, and create SED-ML files as part of the greater Tellurium modeling environment we have discussed.
```
# Write phraSED-ML string specifying the simulation study
wolf_phrasedml = '''
// Set model
wolf = model "wolf.xml" # model_id = model source_model
// Deterministic simulation
det_sim = simulate uniform(0, 500, 1000) # sim_id = simulate simulation_type
wolf_det_sim = run det_sim on wolf # task_id = run sim_id on model_id
plot "Wolf et al. dynamics (Model ID: BIOMD0000000090)" time vs oxy # plot title_name x vs y
'''
# Generate SED-ML string from the phraSED-ML string
wolf.resetAll()
wolf_sbml = wolf.getSBML()
phrasedml.setReferencedSBML("wolf.xml", wolf_sbml)
wolf_sedml = phrasedml.convertString(wolf_phrasedml)
print(wolf_sedml)
```
# Exporting SED-ML <a class="anchor" id="exporting-sedml"></a>
```
# Save the SED-ML simulation experiment to your current working directory
te.saveToFile('wolf_sedml.xml', wolf_sedml)
# Load and run SED-ML script
te.executeSEDML('wolf_sedml.xml')
```
# Generating a COMBINE archive <a class="anchor" id="combine-archive"></a>
COMBINE archives package SBML models and SED-ML simulation experiment descriptions together to ensure complete modeling studies or experiments can be exchangesd between software tools. Tellurium provides the inline Open Modeling EXchange format (OMEX) to edit contents of COMBINE archives in a human-readable format. Inline OMEX is essentially an Antimony description of the model joined to the PhraSED-ML experiment description.
```
# Read Antimony model into a string
wolf_antimony = te.readFromFile('wolf_antimony.txt')
# create an inline OMEX string
wolf_inline_omex = '\n'.join([wolf_antimony, wolf_phrasedml])
print(wolf_inline_omex)
# export to a COMBINE archive
te.exportInlineOmex(wolf_inline_omex, 'wolf.omex')
```
# Exercises <a class="anchor" id="exercises"></a>
## Exercise 1:
Download the <a href="http://www.ebi.ac.uk/biomodels-main/BIOMD0000000010 "> Kholodenko 2000 model</a> of ultrasensitivity and negative feedback oscillations in the MAPK cascade from the BioModels Database, and upload to your workspace. Simulate and plot simulation results for the model.
<div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/kholodenko_publication.PNG" width="75%"></div>
```
# Write your solution here
```
## Exercise 1 Solution:
```
# Solution
r = te.loadSBMLModel(
"https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000010?filename=BIOMD0000000010_url.xml")
r.simulate(0, 5000, 1000)
r.plot()
```
# Acknowledgements
<br>
<div align='left'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/acknowledgments.png" width="80%"></div>
<br>
<html>
<head>
</head>
<body>
<h1>Bibliography</h1>
<ol>
<li>
<p>K. Choi et al., <cite>Tellurium: An extensible python-based modeling environment for systems and synthetic biology</cite>, Biosystems, vol. 171, pp. 74–79, Sep. 2018.</p>
</li>
<li>
<p>E. T. Somogyi et al., <cite>libRoadRunner: a high performance SBML simulation and analysis library.,</cite>, Bioinformatics, vol. 31, no. 20, pp. 3315–21, Oct. 2015.</p>
<li>
<p>L. P. Smith, F. T. Bergmann, D. Chandran, and H. M. Sauro, <cite>Antimony: a modular model definition language</cite>, Bioinformatics, vol. 25, no. 18, pp. 2452–2454, Sep. 2009.</p>
</li>
<li>
<p>K. Choi, L. P. Smith, J. K. Medley, and H. M. Sauro, <cite>phraSED-ML: a paraphrased, human-readable adaptation of SED-ML</cite>, J. Bioinform. Comput. Biol., vol. 14, no. 06, Dec. 2016.</p>
</li>
<li>
<p> B.N. Kholodenko, O.V. Demin, G. Moehren, J.B. Hoek, <cite>Quantification of short term signaling by the epidermal growth factor receptor.</cite>, J Biol Chem., vol. 274, no. 42, Oct. 1999.</p>
</li>
</ol>
</body>
</html>
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# 첫 번째 신경망 훈련하기: 기초적인 분류 문제
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ko/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ko/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a>
</td>
</table>
Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.
이 번역에 개선할 부분이 있다면
[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
문서 번역이나 리뷰에 참여하려면
[docs-ko@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로
메일을 보내주시기 바랍니다.
이 튜토리얼에서는 운동화나 셔츠 같은 옷 이미지를 분류하는 신경망 모델을 훈련합니다. 상세 내용을 모두 이해하지 못해도 괜찮습니다. 여기서는 완전한 텐서플로(TensorFlow) 프로그램을 빠르게 살펴 보겠습니다. 자세한 내용은 앞으로 배우면서 더 설명합니다.
여기에서는 텐서플로 모델을 만들고 훈련할 수 있는 고수준 API인 [tf.keras](https://www.tensorflow.org/guide/keras)를 사용합니다.
```
try:
# Colab only
%tensorflow_version 2.x
except Exception:
pass
from __future__ import absolute_import, division, print_function, unicode_literals, unicode_literals
# tensorflow와 tf.keras를 임포트합니다
import tensorflow as tf
from tensorflow import keras
# 헬퍼(helper) 라이브러리를 임포트합니다
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
```
## 패션 MNIST 데이터셋 임포트하기
10개의 범주(category)와 70,000개의 흑백 이미지로 구성된 [패션 MNIST](https://github.com/zalandoresearch/fashion-mnist) 데이터셋을 사용하겠습니다. 이미지는 해상도(28x28 픽셀)가 낮고 다음처럼 개별 옷 품목을 나타냅니다:
<table>
<tr><td>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST sprite" width="600">
</td></tr>
<tr><td align="center">
<b>그림 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">패션-MNIST 샘플</a> (Zalando, MIT License).<br/>
</td></tr>
</table>
패션 MNIST는 컴퓨터 비전 분야의 "Hello, World" 프로그램격인 고전 [MNIST](http://yann.lecun.com/exdb/mnist/) 데이터셋을 대신해서 자주 사용됩니다. MNIST 데이터셋은 손글씨 숫자(0, 1, 2 등)의 이미지로 이루어져 있습니다. 여기서 사용하려는 옷 이미지와 동일한 포맷입니다.
패션 MNIST는 일반적인 MNIST 보다 조금 더 어려운 문제이고 다양한 예제를 만들기 위해 선택했습니다. 두 데이터셋은 비교적 작기 때문에 알고리즘의 작동 여부를 확인하기 위해 사용되곤 합니다. 코드를 테스트하고 디버깅하는 용도로 좋습니다.
네트워크를 훈련하는데 60,000개의 이미지를 사용합니다. 그다음 네트워크가 얼마나 정확하게 이미지를 분류하는지 10,000개의 이미지로 평가하겠습니다. 패션 MNIST 데이터셋은 텐서플로에서 바로 임포트하여 적재할 수 있습니다:
```
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
```
load_data() 함수를 호출하면 네 개의 넘파이(NumPy) 배열이 반환됩니다:
* `train_images`와 `train_labels` 배열은 모델 학습에 사용되는 *훈련 세트*입니다.
* `test_images`와 `test_labels` 배열은 모델 테스트에 사용되는 *테스트 세트*입니다.
이미지는 28x28 크기의 넘파이 배열이고 픽셀 값은 0과 255 사이입니다. *레이블*(label)은 0에서 9까지의 정수 배열입니다. 이 값은 이미지에 있는 옷의 *클래스*(class)를 나타냅니다:
<table>
<tr>
<th>레이블</th>
<th>클래스</th>
</tr>
<tr>
<td>0</td>
<td>T-shirt/top</td>
</tr>
<tr>
<td>1</td>
<td>Trouser</td>
</tr>
<tr>
<td>2</td>
<td>Pullover</td>
</tr>
<tr>
<td>3</td>
<td>Dress</td>
</tr>
<tr>
<td>4</td>
<td>Coat</td>
</tr>
<tr>
<td>5</td>
<td>Sandal</td>
</tr>
<tr>
<td>6</td>
<td>Shirt</td>
</tr>
<tr>
<td>7</td>
<td>Sneaker</td>
</tr>
<tr>
<td>8</td>
<td>Bag</td>
</tr>
<tr>
<td>9</td>
<td>Ankle boot</td>
</tr>
</table>
각 이미지는 하나의 레이블에 매핑되어 있습니다. 데이터셋에 *클래스 이름*이 들어있지 않기 때문에 나중에 이미지를 출력할 때 사용하기 위해 별도의 변수를 만들어 저장합니다:
```
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## 데이터 탐색
모델을 훈련하기 전에 데이터셋 구조를 살펴보죠. 다음 코드는 훈련 세트에 60,000개의 이미지가 있다는 것을 보여줍니다. 각 이미지는 28x28 픽셀로 표현됩니다:
```
train_images.shape
```
비슷하게 훈련 세트에는 60,000개의 레이블이 있습니다:
```
len(train_labels)
```
각 레이블은 0과 9사이의 정수입니다:
```
train_labels
```
테스트 세트에는 10,000개의 이미지가 있습니다. 이 이미지도 28x28 픽셀로 표현됩니다:
```
test_images.shape
```
테스트 세트는 10,000개의 이미지에 대한 레이블을 가지고 있습니다:
```
len(test_labels)
```
## 데이터 전처리
네트워크를 훈련하기 전에 데이터를 전처리해야 합니다. 훈련 세트에 있는 첫 번째 이미지를 보면 픽셀 값의 범위가 0~255 사이라는 것을 알 수 있습니다:
```
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
```
신경망 모델에 주입하기 전에 이 값의 범위를 0~1 사이로 조정하겠습니다. 이렇게 하려면 255로 나누어야 합니다. *훈련 세트*와 *테스트 세트*를 동일한 방식으로 전처리하는 것이 중요합니다:
```
train_images = train_images / 255.0
test_images = test_images / 255.0
```
*훈련 세트*에서 처음 25개 이미지와 그 아래 클래스 이름을 출력해 보죠. 데이터 포맷이 올바른지 확인하고 네트워크 구성과 훈련할 준비를 마칩니다.
```
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
```
## 모델 구성
신경망 모델을 만들려면 모델의 층을 구성한 다음 모델을 컴파일합니다.
### 층 설정
신경망의 기본 구성 요소는 *층*(layer)입니다. 층은 주입된 데이터에서 표현을 추출합니다. 아마도 문제를 해결하는데 더 의미있는 표현이 추출될 것입니다.
대부분 딥러닝은 간단한 층을 연결하여 구성됩니다. `tf.keras.layers.Dense`와 같은 층들의 가중치(parameter)는 훈련하는 동안 학습됩니다.
```
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
```
이 네트워크의 첫 번째 층인 `tf.keras.layers.Flatten`은 2차원 배열(28 x 28 픽셀)의 이미지 포맷을 28 * 28 = 784 픽셀의 1차원 배열로 변환합니다. 이 층은 이미지에 있는 픽셀의 행을 펼쳐서 일렬로 늘립니다. 이 층에는 학습되는 가중치가 없고 데이터를 변환하기만 합니다.
픽셀을 펼친 후에는 두 개의 `tf.keras.layers.Dense` 층이 연속되어 연결됩니다. 이 층을 밀집 연결(densely-connected) 또는 완전 연결(fully-connected) 층이라고 부릅니다. 첫 번째 `Dense` 층은 128개의 노드(또는 뉴런)를 가집니다. 두 번째 (마지막) 층은 10개의 노드의 *소프트맥스*(softmax) 층입니다. 이 층은 10개의 확률을 반환하고 반환된 값의 전체 합은 1입니다. 각 노드는 현재 이미지가 10개 클래스 중 하나에 속할 확률을 출력합니다.
### 모델 컴파일
모델을 훈련하기 전에 필요한 몇 가지 설정이 모델 *컴파일* 단계에서 추가됩니다:
* *손실 함수*(Loss function)-훈련 하는 동안 모델의 오차를 측정합니다. 모델의 학습이 올바른 방향으로 향하도록 이 함수를 최소화해야 합니다.
* *옵티마이저*(Optimizer)-데이터와 손실 함수를 바탕으로 모델의 업데이트 방법을 결정합니다.
* *지표*(Metrics)-훈련 단계와 테스트 단계를 모니터링하기 위해 사용합니다. 다음 예에서는 올바르게 분류된 이미지의 비율인 *정확도*를 사용합니다.
```
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
## 모델 훈련
신경망 모델을 훈련하는 단계는 다음과 같습니다:
1. 훈련 데이터를 모델에 주입합니다-이 예에서는 `train_images`와 `train_labels` 배열입니다.
2. 모델이 이미지와 레이블을 매핑하는 방법을 배웁니다.
3. 테스트 세트에 대한 모델의 예측을 만듭니다-이 예에서는 `test_images` 배열입니다. 이 예측이 `test_labels` 배열의 레이블과 맞는지 확인합니다.
훈련을 시작하기 위해 `model.fit` 메서드를 호출하면 모델이 훈련 데이터를 학습합니다:
```
model.fit(train_images, train_labels, epochs=5)
```
모델이 훈련되면서 손실과 정확도 지표가 출력됩니다. 이 모델은 훈련 세트에서 약 0.88(88%) 정도의 정확도를 달성합니다.
## 정확도 평가
그다음 테스트 세트에서 모델의 성능을 비교합니다:
```
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\n테스트 정확도:', test_acc)
```
테스트 세트의 정확도가 훈련 세트의 정확도보다 조금 낮습니다. 훈련 세트의 정확도와 테스트 세트의 정확도 사이의 차이는 *과대적합*(overfitting) 때문입니다. 과대적합은 머신러닝 모델이 훈련 데이터보다 새로운 데이터에서 성능이 낮아지는 현상을 말합니다.
## 예측 만들기
훈련된 모델을 사용하여 이미지에 대한 예측을 만들 수 있습니다.
```
predictions = model.predict(test_images)
```
여기서는 테스트 세트에 있는 각 이미지의 레이블을 예측했습니다. 첫 번째 예측을 확인해 보죠:
```
predictions[0]
```
이 예측은 10개의 숫자 배열로 나타납니다. 이 값은 10개의 옷 품목에 상응하는 모델의 신뢰도(confidence)를 나타냅니다. 가장 높은 신뢰도를 가진 레이블을 찾아보죠:
```
np.argmax(predictions[0])
```
모델은 이 이미지가 앵클 부츠(`class_name[9]`)라고 가장 확신하고 있습니다. 이 값이 맞는지 테스트 레이블을 확인해 보죠:
```
test_labels[0]
```
10개 클래스에 대한 예측을 모두 그래프로 표현해 보겠습니다:
```
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
```
0번째 원소의 이미지, 예측, 신뢰도 점수 배열을 확인해 보겠습니다.
```
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.show()
```
몇 개의 이미지의 예측을 출력해 보죠. 올바르게 예측된 레이블은 파란색이고 잘못 예측된 레이블은 빨강색입니다. 숫자는 예측 레이블의 신뢰도 퍼센트(100점 만점)입니다. 신뢰도 점수가 높을 때도 잘못 예측할 수 있습니다.
```
# 처음 X 개의 테스트 이미지와 예측 레이블, 진짜 레이블을 출력합니다
# 올바른 예측은 파랑색으로 잘못된 예측은 빨강색으로 나타냅니다
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
```
마지막으로 훈련된 모델을 사용하여 한 이미지에 대한 예측을 만듭니다.
```
# 테스트 세트에서 이미지 하나를 선택합니다
img = test_images[0]
print(img.shape)
```
`tf.keras` 모델은 한 번에 샘플의 묶음 또는 *배치*(batch)로 예측을 만드는데 최적화되어 있습니다. 하나의 이미지를 사용할 때에도 2차원 배열로 만들어야 합니다:
```
# 이미지 하나만 사용할 때도 배치에 추가합니다
img = (np.expand_dims(img,0))
print(img.shape)
```
이제 이 이미지의 예측을 만듭니다:
```
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
```
`model.predict`는 2차원 넘파이 배열을 반환하므로 첫 번째 이미지의 예측을 선택합니다:
```
np.argmax(predictions_single[0])
```
이전과 마찬가지로 모델의 예측은 레이블 9입니다.
| github_jupyter |
```
%matplotlib inline
import pandas as pd
import numpy as np
import os
from plotnine import *
```
## Overview
* select 5'UTRs longer than 80 nt
* count reads aligned to these UTRs (pysam)
* plot utr reads -bcm vs utr reads + bcm
* select UTRs with increased number of reads upon addition of BCM (clustering?)
* compare selected UTRs with genes upregulated in the stationary phase as discovered by DESeq2
* compare selected UTRs with small RNA binding sites (pybedtools?)
### Sample table and barcodes
```
# Sample titles with corresponding barcodes
samples = {
's9': ['ATCACG', 'ACAGTG'],
's9+bcm': ['CGATGT', 'GCCAAT'],
's17': ['TTAGGC', 'GATCAG'],
's17+bcm': ['TGACCA', 'TAGCTT'],
's19': ['CAGATC','GGCTAC'],
's19+bcm': ['ACTTGA', 'CTTGTA']
}
# Barcodes
barcodes = ['ATCACG', 'ACAGTG', 'CGATGT', 'GCCAAT', 'TTAGGC', 'GATCAG', 'TGACCA', 'TAGCTT', 'CAGATC','GGCTAC', 'ACTTGA', 'CTTGTA']
```
### Load counts for genes, calculate counts in UTRs longer than 80 nt
Gene counts were obtained using `htseq` program against the standard NC_000913 .GFF file The was I calculate reads in UTRs here is not strand-specific. So the numbers can be confounded if there is a transcript going in the opposite direction. We can solve this later if needed.
```
dfm = pd.read_csv('../../data/dfm.csv', sep='\t')
dfm
```
### Normalize counts for feature length, log-transform, and take means for replicates
Pseudo-counts (+1) are added during UTR reads counting to make sure we can log-transform the data.
```
id_vars = ['TSS','TU_name','coord_5','coord_3','gene', 'UTR_length']
value_vars = ['s9','s17','s19','s9+bcm','s17+bcm','s19+bcm']
dfn = dfm.copy()
# Normalize counts by gene and utr length
def norm_orf(barcode, rec):
return float(rec[barcode] / abs(rec['first_gene_5'] - rec['first_gene_3']))
def norm_utr(barcode, rec):
return float(rec['utr_{0}'.format(barcode)] / rec['UTR_length'])
for barcode in barcodes:
dfn['orf_{0}'.format(barcode)] = dfn.apply(lambda rec: norm_orf(barcode, rec), axis=1)
dfn['utr_{0}'.format(barcode)] = dfn.apply(lambda rec: norm_utr(barcode, rec), axis=1)
df = dfn[id_vars].copy()
# Take means across replicates according to the samples dict
for sample, bcs in samples.items():
df['orf_{0}'.format(sample)] = np.log10(dfn[['orf_{0}'.format(b) for b in list(bcs)]].mean(axis=1))
df['utr_{0}'.format(sample)] = np.log10(dfn[['utr_{0}'.format(b) for b in list(bcs)]].mean(axis=1))
df
```
### Plot wild type with vs without BCM
Two clusters are apparent. We are after the UTRs that are upregulated by the addition of BCM (cloud of points in the left part of the plot along y=0 line and in general (significantly) above y=x line).
BTW, the point size is the length of UTR. No (apparent) correlation here.
```
(ggplot(df, aes(x='utr_s9', y='utr_s9+bcm', size='UTR_length'))
+ geom_point(size=0.5, alpha=0.1)
+ geom_abline(slope=1, intercept=0, size=.5, color='#586e75')
)
(ggplot(df, aes(x='utr_s9', y='utr_s19', size='UTR_length'))
+ geom_point(size=0.5, alpha=0.1)
+ geom_abline(slope=1, intercept=0, size=0.5, color='#586e75')
)
```
### Clustering
Now we need a way to split the points the way we want. Let's try a bunch of clustering algorithms from `scikit-learn.`
```
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import euclidean_distances
from sklearn.neighbors import kneighbors_graph
from sklearn import cluster
from sklearn import mixture
X = df[['utr_s9', 'utr_s9+bcm']].to_numpy()
X = StandardScaler().fit_transform(X)
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
connectivity = kneighbors_graph(X, n_neighbors=20)
connectivity = 0.05 * (connectivity + connectivity.T)
#distances = euclidean_distances(X)
gmm = mixture.GaussianMixture(n_components=2, covariance_type='full')
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2, batch_size=200)
kmeans = cluster.KMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward', connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2, n_neighbors=20, eigen_solver='arpack', affinity='nearest_neighbors')
dbscan = cluster.DBSCAN(eps=.5)
affinity_propagation = cluster.AffinityPropagation(damping=.95, preference=-200)
average_linkage = cluster.AgglomerativeClustering(linkage='average', affinity='cityblock', n_clusters=2, connectivity=connectivity)
for name, alg in [
('MiniBatchKMeans', two_means),
('KMeans', kmeans),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('GMM', gmm),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan)
]:
alg.fit(X)
if hasattr(alg, 'labels_'):
df['label'] = alg.labels_.astype(np.int32)
else:
df['label'] = alg.predict(X)
p = ggplot(df, aes(x='utr_s9', y='utr_s9+bcm', color='label')) \
+ geom_point(size=0.5, alpha=0.5) \
+ ggtitle(name) \
+ geom_abline(slope=1, intercept=0, size=0.5, color='#586e75')
print(p)
X = df.as_matrix
```
| github_jupyter |
```
# LSTM for Human Activity Recognition
Human activity recognition using smartphones dataset and an LSTM RNN. Classifying the type of movement amongst six categories:
- WALKING,
- WALKING_UPSTAIRS,
- WALKING_DOWNSTAIRS,
- SITTING,
- STANDING,
- LAYING.
## Video dataset overview
Follow this link to see a video of the 6 activities recorded in the experiment with one of the participants:
<a href="http://www.youtube.com/watch?feature=player_embedded&v=XOEN9W05_4A
" target="_blank"><img src="http://img.youtube.com/vi/XOEN9W05_4A/0.jpg"
alt="Video of the experiment" width="400" height="300" border="10" /></a>
<a href="https://youtu.be/XOEN9W05_4A"><center>[Watch video]</center></a>
## Details about input data
I will be using an LSTM on the data to learn (as a cellphone attached on the waist) to recognise the type of activity that the user is doing. The dataset's description goes like this:
> The sensor signals (accelerometer and gyroscope) were pre-processed by applying noise filters and then sampled in fixed-width sliding windows of 2.56 sec and 50% overlap (128 readings/window). The sensor acceleration signal, which has gravitational and body motion components, was separated using a Butterworth low-pass filter into body acceleration and gravity. The gravitational force is assumed to have only low frequency components, therefore a filter with 0.3 Hz cutoff frequency was used.
That said, I will use the almost raw data: only the gravity effect has been filtered out of the accelerometer as a preprocessing step for another 3D feature as an input to help learning.
## What is an RNN?
As explained in [this article](http://karpathy.github.io/2015/05/21/rnn-effectiveness/), an RNN takes many input vectors to process them and output other vectors. It can be roughly pictured like in the image below, imagining each rectangle has a vectorial depth and other special hidden quirks in the image below. **In our case, the "many to one" architecture is used**: we accept time series of feature vectors (one vector per time step) to convert them to a probability vector at the output for classification. Note that a "one to one" architecture would be a standard feedforward neural network.
<img src="http://karpathy.github.io/assets/rnn/diags.jpeg" />
An LSTM is an improved RNN. It is more complex, but easier to train, avoiding what is called the vanishing gradient problem and the exploding gradient problem.
## Results
Scroll on! Nice visuals awaits.
# All Includes
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf # Version r0.10
from sklearn import metrics
import os
# Useful Constants
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
```
## Let's start by downloading the data:
```
# Note: Linux bash commands start with a "!" inside those "ipython notebook" cells
DATA_PATH = "data/"
!pwd && ls
os.chdir(DATA_PATH)
!pwd && ls
!python download_dataset.py
!pwd && ls
os.chdir("..")
!pwd && ls
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
```
## Preparing dataset:
```
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths)
X_test = load_X(X_test_signals_paths)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
```
## Additionnal Parameters:
Here are some core parameter definitions for the training.
The whole neural network's structure could be summarised by enumerating those parameters and the fact an LSTM is used.
```
# Input Data
training_data_count = len(X_train) # 7352 training series (with 50% overlap between each serie)
test_data_count = len(X_test) # 2947 testing series
n_steps = len(X_train[0]) # 128 timesteps per series
n_input = len(X_train[0][0]) # 9 input parameters per timestep
# LSTM Neural Network's internal structure
n_hidden = 32 # Hidden layer num of features
n_classes = 6 # Total classes (should go up, or should go down)
# Training
learning_rate = 0.0025
lambda_loss_amount = 0.0015
training_iters = training_data_count * 300 # Loop 300 times on the dataset
batch_size = 1500
display_iter = 30000 # To show test set accuracy during training
# Some debugging info
print "Some useful info to get an insight on dataset's shape and normalisation:"
print "(X shape, y shape, every X's mean, every X's standard deviation)"
print (X_test.shape, y_test.shape, np.mean(X_test), np.std(X_test))
print "The dataset is therefore properly normalised, as expected, but not yet one-hot encoded."
```
## Utility functions for training:
```
def LSTM_RNN(_X, _weights, _biases):
# Function returns a tensorflow LSTM (RNN) artificial neural network from given parameters.
# Moreover, two LSTM cells are stacked which adds deepness to the neural network.
# Note, some code of this notebook is inspired from an slightly different
# RNN architecture used on another dataset:
# https://tensorhub.com/aymericdamien/tensorflow-rnn
# (NOTE: This step could be greatly optimised by shaping the dataset once
# input shape: (batch_size, n_steps, n_input)
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
# Reshape to prepare input to hidden activation
_X = tf.reshape(_X, [-1, n_input])
# new shape: (n_steps*batch_size, n_input)
# Linear activation
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(0, n_steps, _X)
# new shape: n_steps * (batch_size, n_hidden)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
# Get LSTM cell output
outputs, states = tf.nn.rnn(lstm_cells, _X, dtype=tf.float32)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, step, batch_size):
# Function to fetch a "batch_size" amount of data from "(X|y)_train" data.
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
for i in range(batch_size):
# Loop index
index = ((step-1)*batch_size + i) % len(_train)
batch_s[i] = _train[index]
return batch_s
def one_hot(y_):
# Function to encode output labels from number indexes
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = np.max(y_) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
```
## Let's get serious and build the neural network:
```
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) + l2 # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
```
## Hooray, now train the neural network:
```
# To keep track of training's performance
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
# Launch the graph
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
init = tf.initialize_all_variables()
sess.run(init)
# Perform Training steps with "batch_size" amount of example data at each loop
step = 1
while step * batch_size <= training_iters:
batch_xs = extract_batch_size(X_train, step, batch_size)
batch_ys = one_hot(extract_batch_size(y_train, step, batch_size))
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
train_losses.append(loss)
train_accuracies.append(acc)
# Evaluate network only at some steps for faster training:
if (step*batch_size % display_iter == 0) or (step == 1) or (step * batch_size > training_iters):
# To not spam console, show training accuracy/loss in this "if"
print "Training iter #" + str(step*batch_size) + \
": Batch Loss = " + "{:.6f}".format(loss) + \
", Accuracy = {}".format(acc)
# Evaluation on the test set (no learning made here - just evaluation for diagnosis)
loss, acc = sess.run(
[cost, accuracy],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(loss)
test_accuracies.append(acc)
print "PERFORMANCE ON TEST SET: " + \
"Batch Loss = {}".format(loss) + \
", Accuracy = {}".format(acc)
step += 1
print "Optimization Finished!"
# Accuracy for test data
one_hot_predictions, accuracy, final_loss = sess.run(
[pred, accuracy, cost],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(final_loss)
test_accuracies.append(accuracy)
print "FINAL RESULT: " + \
"Batch Loss = {}".format(final_loss) + \
", Accuracy = {}".format(accuracy)
```
## Training is good, but having visual insight is even better:
Okay, let's plot this simply in the notebook for now.
```
# (Inline plots: )
%matplotlib inline
font = {
'family' : 'Bitstream Vera Sans',
'weight' : 'bold',
'size' : 18
}
matplotlib.rc('font', **font)
width = 12
height = 12
plt.figure(figsize=(width, height))
indep_train_axis = np.array(range(batch_size, (len(train_losses)+1)*batch_size, batch_size))
plt.plot(indep_train_axis, np.array(train_losses), "b--", label="Train losses")
plt.plot(indep_train_axis, np.array(train_accuracies), "g--", label="Train accuracies")
indep_test_axis = np.array(range(batch_size, len(test_losses)*display_iter, display_iter)[:-1] + [training_iters])
plt.plot(indep_test_axis, np.array(test_losses), "b-", label="Test losses")
plt.plot(indep_test_axis, np.array(test_accuracies), "g-", label="Test accuracies")
plt.title("Training session's progress over iterations")
plt.legend(loc='upper right', shadow=True)
plt.ylabel('Training Progress (Loss or Accuracy values)')
plt.xlabel('Training iteration')
plt.show()
```
## And finally, the multi-class confusion matrix and metrics!
```
# Results
predictions = one_hot_predictions.argmax(1)
print "Testing Accuracy: {}%".format(100*accuracy)
print ""
print "Precision: {}%".format(100*metrics.precision_score(y_test, predictions, average="weighted"))
print "Recall: {}%".format(100*metrics.recall_score(y_test, predictions, average="weighted"))
print "f1_score: {}%".format(100*metrics.f1_score(y_test, predictions, average="weighted"))
print ""
print "Confusion Matrix:"
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
print confusion_matrix
normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100
print ""
print "Confusion matrix (normalised to % of total test data):"
print normalised_confusion_matrix
print ("Note: training and testing data is not equally distributed amongst classes, "
"so it is normal that more than a 6th of the data is correctly classifier in the last category.")
# Plot Results:
width = 12
height = 12
plt.figure(figsize=(width, height))
plt.imshow(
normalised_confusion_matrix,
interpolation='nearest',
cmap=plt.cm.rainbow
)
plt.title("Confusion matrix \n(normalised to % of total test data)")
plt.colorbar()
tick_marks = np.arange(n_classes)
plt.xticks(tick_marks, LABELS, rotation=90)
plt.yticks(tick_marks, LABELS)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
sess.close()
```
## Conclusion
Outstandingly, **the accuracy is of 91%**!
This means that the neural networks is almost always able to correctly identify the movement type! Remember, the phone is attached on the waist and each series to classify has just a 128 sample window of two internal sensors (a.k.a. 2.56 seconds at 50 FPS), so those predictions are extremely accurate.
I specially did not expect such good results for guessing between "WALKING" "WALKING_UPSTAIRS" and "WALKING_DOWNSTAIRS" as a cellphone. Thought, it is still possible to see a little cluster on the matrix between those 3 classes. This is great.
It is also possible to see that it was hard to do the difference between "SITTING" and "STANDING". Those are seemingly almost the same thing from the point of view of a device placed on the belly, according to how the dataset was gathered.
I also tried my code without the gyroscope, using only the two 3D accelerometer's features (and not changing the training hyperparameters), and got an accuracy of 87%.
## Improvements
In [another repo of mine](https://github.com/guillaume-chevalier/HAR-stacked-residual-bidir-LSTMs), the accuracy is pushed up to 94% using a special deep bidirectional architecture, and this architecture is tested on another dataset. If you want to learn more about deep learning, I have built a list of ressources that I found to be useful [here](https://github.com/guillaume-chevalier/awesome-deep-learning-resources).
## References
The [dataset](https://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones) can be found on the UCI Machine Learning Repository.
> Davide Anguita, Alessandro Ghio, Luca Oneto, Xavier Parra and Jorge L. Reyes-Ortiz. A Public Domain Dataset for Human Activity Recognition Using Smartphones. 21th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, ESANN 2013. Bruges, Belgium 24-26 April 2013.
If you want to cite my work, you can point to the URL of the GitHub repository:
> https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition
## Connect with me
- https://ca.linkedin.com/in/chevalierg
- https://twitter.com/guillaume_che
- https://github.com/guillaume-chevalier/
```
# Let's convert this notebook to a README as the GitHub project's title page:
!jupyter nbconvert --to markdown LSTM.ipynb
!mv LSTM.md README.md
```
| github_jupyter |
# Build Clause Clusters with Book Boundaries
```
from tf.app import use
bhsa = use('bhsa')
F, E, T, L = bhsa.api.F, bhsa.api.E, bhsa.api.T, bhsa.api.L
from pathlib import Path
# divide texts evenly into slices of 50 clauses
def cluster_clauses(N):
clusters = []
for book in F.otype.s('book'):
clauses = list(L.d(book,'clause'))
cluster = []
for i, clause in enumerate(clauses):
i += 1
cluster.append(clause)
# create cluster of 50
if (i and i % N == 0):
clusters.append(cluster)
cluster = []
# deal with final uneven clusters
elif i == len(clauses):
if (len(cluster) / N) < 0.6:
clusters[-1].extend(cluster) # add to last cluster
else:
clusters.append(cluster) # keep as cluster
return {
clause:i+1 for i,clust in enumerate(clusters)
for clause in clust
}
cluster_50 = cluster_clauses(50)
cluster_10 = cluster_clauses(10)
```
## Map Book-names to clause clusters
```
# map book names for visualizing
# map grouped book names
book_map = {
'Genesis':'Gen',
'Exodus':'Exod',
'Leviticus':'Lev',
'Numbers':'Num',
'Deuteronomy':'Deut',
'Joshua':'Josh',
'Judges':'Judg',
'1_Samuel':'Sam',
'2_Samuel':'Sam',
'1_Kings':'Kgs',
'2_Kings':'Kgs',
'Isaiah':'Isa',
'Jeremiah':'Jer',
'Ezekiel':'Ezek',
# 'Hosea':'Hos',
# 'Joel':'Joel',
# 'Amos':'Amos',
# 'Obadiah':'Obad',
# 'Jonah':'Jonah',
# 'Micah':'Mic',
# 'Nahum':'Nah',
# 'Habakkuk':'Hab',
# 'Zephaniah':'Zeph',
# 'Haggai':'Hag',
# 'Zechariah':'Zech',
# 'Malachi':'Mal',
'Psalms':'Pss',
'Job':'Job',
'Proverbs':'Prov',
# 'Ruth':'Ruth',
# 'Song_of_songs':'Song',
# 'Ecclesiastes':'Eccl',
# 'Lamentations':'Lam',
# 'Esther':'Esth',
# 'Daniel':'Dan',
# 'Ezra':'Ezra',
# 'Nehemiah':'Neh',
'1_Chronicles':'Chr',
'2_Chronicles':'Chr'
}
# book of 12
for book in ('Hosea', 'Joel', 'Amos', 'Obadiah',
'Jonah', 'Micah', 'Nahum', 'Habakkuk',
'Zephaniah', 'Haggai', 'Zechariah',
'Malachi'):
book_map[book] = 'Twelve'
# Megilloth
for book in ('Ruth', 'Lamentations', 'Ecclesiastes',
'Esther', 'Song_of_songs'):
book_map[book] = 'Megil'
# Dan-Neh
for book in ('Ezra', 'Nehemiah', 'Daniel'):
book_map[book] = 'Dan-Neh'
clustertypes = [cluster_50, cluster_10]
bookmaps = []
for clust in clustertypes:
bookmap = {'Gen':1}
prev_book = 'Gen'
for cl in clust:
book = T.sectionFromNode(cl)[0]
mbook = book_map.get(book, book)
if prev_book != mbook:
bookmap[mbook] = clust[cl]
prev_book = mbook
bookmaps.append(bookmap)
```
# Export
```
import json
data = {
'50': {
'clusters': cluster_50,
'bookbounds': bookmaps[0],
},
'10': {
'clusters': cluster_10,
'bookbounds': bookmaps[1]
},
}
outpath = Path('/Users/cody/github/CambridgeSemiticsLab/time_collocations/results/cl_clusters')
if not outpath.exists():
outpath.mkdir()
with open(outpath.joinpath('clusters.json'), 'w') as outfile:
json.dump(data, outfile)
```
| github_jupyter |
# 转置卷积
:label:`sec_transposed_conv`
到目前为止,我们所见到的卷积神经网络层,例如卷积层( :numref:`sec_conv_layer`)和汇聚层( :numref:`sec_pooling`),通常会减少下采样输入图像的空间维度(高和宽)。
然而如果输入和输出图像的空间维度相同,在以像素级分类的语义分割中将会很方便。
例如,输出像素所处的通道维可以保有输入像素在同一位置上的分类结果。
为了实现这一点,尤其是在空间维度被卷积神经网络层缩小后,我们可以使用另一种类型的卷积神经网络层,它可以增加上采样中间层特征图的空间维度。
在本节中,我们将介绍
*转置卷积*(transposed convolution) :cite:`Dumoulin.Visin.2016`,
用于扭转下采样导致的空间尺寸减小。
```
import torch
from torch import nn
from d2l import torch as d2l
```
## 基本操作
让我们暂时忽略通道,从基本的转置卷积开始,设步幅为1且没有填充。
假设我们有一个$n_h \times n_w$的输入张量和一个$k_h \times k_w$的卷积核。
以步幅为1滑动卷积核窗口,每行$n_w$次,每列$n_h$次,共产生$n_h n_w$个中间结果。
每个中间结果都是一个$(n_h + k_h - 1) \times (n_w + k_w - 1)$的张量,初始化为0。
为了计算每个中间张量,输入张量中的每个元素都要乘以卷积核,从而使所得的$k_h \times k_w$张量替换中间张量的一部分。
请注意,每个中间张量被替换部分的位置与输入张量中元素的位置相对应。
最后,所有中间结果相加以获得最终结果。
例如, :numref:`fig_trans_conv` 解释了如何为$2\times 2$的输入张量计算卷积核为$2\times 2$的转置卷积。

:label:`fig_trans_conv`
我们可以对输入矩阵`X`和卷积核矩阵 `K`(**实现基本的转置卷积运算**)`trans_conv`。
```
def trans_conv(X, K):
h, w = K.shape
Y = torch.zeros((X.shape[0] + h - 1, X.shape[1] + w - 1))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
Y[i: i + h, j: j + w] += X[i, j] * K
return Y
```
与通过卷积核“减少”输入元素的常规卷积(在 :numref:`sec_conv_layer` 中)相比,转置卷积通过卷积核“广播”输入元素,从而产生大于输入的输出。
我们可以通过 :numref:`fig_trans_conv` 来构建输入张量 `X` 和卷积核张量 `K` 从而[**验证上述实现输出**]。
此实现是基本的二维转置卷积运算。
```
X = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
K = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
trans_conv(X, K)
```
或者,当输入`X`和卷积核`K`都是四维张量时,我们可以[**使用高级API获得相同的结果**]。
```
X, K = X.reshape(1, 1, 2, 2), K.reshape(1, 1, 2, 2)
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, bias=False)
tconv.weight.data = K
tconv(X)
```
## [**填充、步幅和多通道**]
与常规卷积不同,在转置卷积中,填充被应用于的输出(常规卷积将填充应用于输入)。
例如,当将高和宽两侧的填充数指定为1时,转置卷积的输出中将删除第一和最后的行与列。
```
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, padding=1, bias=False)
tconv.weight.data = K
tconv(X)
```
在转置卷积中,步幅被指定为中间结果(输出),而不是输入。
使用 :numref:`fig_trans_conv` 中相同输入和卷积核张量,将步幅从1更改为2会增加中间张量的高和权重,因此输出张量在 :numref:`fig_trans_conv_stride2` 中。

:label:`fig_trans_conv_stride2`
以下代码可以验证 :numref:`fig_trans_conv_stride2` 中步幅为2的转置卷积的输出。
```
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, stride=2, bias=False)
tconv.weight.data = K
tconv(X)
```
对于多个输入和输出通道,转置卷积与常规卷积以相同方式运作。
假设输入有 $c_i$ 个通道,且转置卷积为每个输入通道分配了一个 $k_h\times k_w$ 的卷积核张量。
当指定多个输出通道时,每个输出通道将有一个 $c_i\times k_h\times k_w$ 的卷积核。
同样,如果我们将 $\mathsf{X}$ 代入卷积层 $f$ 来输出 $\mathsf{Y}=f(\mathsf{X})$ ,并创建一个与 $f$ 具有相同的超参数、但输出通道数量是 $\mathsf{X}$ 中通道数的转置卷积层 $g$,那么 $g(Y)$ 的形状将与 $\mathsf{X}$ 相同。
下面的示例可以解释这一点。
```
X = torch.rand(size=(1, 10, 16, 16))
conv = nn.Conv2d(10, 20, kernel_size=5, padding=2, stride=3)
tconv = nn.ConvTranspose2d(20, 10, kernel_size=5, padding=2, stride=3)
tconv(conv(X)).shape == X.shape
```
## [**与矩阵变换的联系**]
:label:`subsec-connection-to-mat-transposition`
转置卷积为何以矩阵变换命名呢?
让我们首先看看如何使用矩阵乘法来实现卷积。
在下面的示例中,我们定义了一个$3\times 3$的输入`X`和$2\times 2$卷积核`K`,然后使用`corr2d`函数计算卷积输出`Y`。
```
X = torch.arange(9.0).reshape(3, 3)
K = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
Y = d2l.corr2d(X, K)
Y
```
接下来,我们将卷积核`K`重写为包含大量0的稀疏权重矩阵`W`。
权重矩阵的形状是($4$,$9$),其中非0元素来自卷积核`K`。
```
def kernel2matrix(K):
k, W = torch.zeros(5), torch.zeros((4, 9))
k[:2], k[3:5] = K[0, :], K[1, :]
W[0, :5], W[1, 1:6], W[2, 3:8], W[3, 4:] = k, k, k, k
return W
W = kernel2matrix(K)
W
```
逐行连接输入`X`,获得了一个长度为9的矢量。
然后,`W`的矩阵乘法和向量化的`X`给出了一个长度为4的向量。
重塑它之后,可以获得与上面的原始卷积操作所得相同的结果`Y`:我们刚刚使用矩阵乘法实现了卷积。
```
Y == torch.matmul(W, X.reshape(-1)).reshape(2, 2)
```
同样,我们可以使用矩阵乘法来实现转置卷积。
在下面的示例中,我们将上面的常规卷积$2 \times 2$的输出`Y`作为转置卷积的输入。
想要通过矩阵相乘来实现它,我们只需要将权重矩阵`W`的形状转置为$(9, 4)$。
```
Z = trans_conv(Y, K)
Z == torch.matmul(W.T, Y.reshape(-1)).reshape(3, 3)
```
抽象来看,给定输入向量 $\mathbf{x}$ 和权重矩阵 $\mathbf{W}$,卷积的前向传播函数可以通过将其输入与权重矩阵相乘并输出向量 $\mathbf{y}=\mathbf{W}\mathbf{x}$ 来实现。
由于反向传播遵循链规则和 $\nabla_{\mathbf{x}}\mathbf{y}=\mathbf{W}^\top$,卷积的反向传播函数可以通过将其输入与转置的权重矩阵 $\mathbf{W}^\top$ 相乘来实现。
因此,转置卷积层能够交换卷积层的正向传播函数和反向传播函数:它的正向传播和反向传播函数将输入向量分别与 $\mathbf{W}^\top$ 和 $\mathbf{W}$ 相乘。
## 小结
* 与通过卷积核减少输入元素的常规卷积相反,转置卷积通过卷积核广播输入元素,从而产生形状大于输入的输出。
* 如果我们将 $\mathsf{X}$ 输入卷积层 $f$ 来获得输出 $\mathsf{Y}=f(\mathsf{X})$ 并创造一个与 $f$ 有相同的超参数、但输出通道数是 $\mathsf{X}$ 中通道数的转置卷积层 $g$,那么 $g(Y)$ 的形状将与 $\mathsf{X}$ 相同。
* 我们可以使用矩阵乘法来实现卷积。转置卷积层能够交换卷积层的正向传播函数和反向传播函数。
## 练习
1. 在 :numref:`subsec-connection-to-mat-transposition` 中,卷积输入 `X` 和转置的卷积输出 `Z` 具有相同的形状。他们的数值也相同吗?为什么?
1. 使用矩阵乘法来实现卷积是否有效率?为什么?
[Discussions](https://discuss.d2l.ai/t/3302)
| github_jupyter |
### Analysis
1. From the tested treatments, Capomulina and Ramican show the largest reduction in tumor volume. Given how similar both treatments performed, further testing is necessary to determine which regimen will work the best.
2. The correlation coefficient for mouse weight and average tumor volume is approximately .83 meaning we have a very strong linear relationship between these two variables.
3. With an r-squared value of .6962, we know that approximately 70% variation from the mean is explained by our model. While this model provides a fairly strong capacity to predict tumor volume for a given weight, adding other variables like age, breed, and sex would likely increase its effectiveness.
### Import Depedencies and Read CSV Data
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "Resources/Mouse_metadata.csv"
study_results_path = "Resources/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
combined_data_df = pd.merge(mouse_metadata, study_results, on = 'Mouse ID')
# Display the data table for preview
combined_data_df.head()
# Checking the number of mice.
mouse_count1 = combined_data_df['Mouse ID'].nunique()
mouse_count1
#check observation count
combined_data_df['Mouse ID'].count()
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicated_vals = combined_data_df[combined_data_df.duplicated(subset = ['Mouse ID', 'Timepoint'], keep = False)]
duplicated_vals
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_df = combined_data_df.drop_duplicates(subset = ['Mouse ID', 'Timepoint'], keep = False)
clean_df.head()
# Checking the number of mice in the clean DataFrame.
clean_mouse_count = clean_df['Mouse ID'].nunique()
clean_mouse_count
#Check observation count on clean data
clean_df['Mouse ID'].count()
```
## Summary Statistics
```
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen: mean, median, variance, standard deviation, and SEM of the tumor volume.
#Group Dataframe by Drug Regimen
regimen_groups = clean_df.groupby(['Drug Regimen'])
#Find mean for each regimen group
regimen_mean = regimen_groups['Tumor Volume (mm3)'].mean()
#Find median for each regimen group
regimen_median = regimen_groups['Tumor Volume (mm3)'].median()
#Find variance for each regimen group
regimen_variance = regimen_groups['Tumor Volume (mm3)'].var()
#Find standard deviation for each regimen group
regimen_std = regimen_groups['Tumor Volume (mm3)'].std()
#Find sem for each regimen group
regimen_sem = regimen_groups['Tumor Volume (mm3)'].sem()
# Assemble the resulting series into a single summary dataframe.
summary_table = pd.DataFrame({"Mean": regimen_mean,
"Median":regimen_median,
"Variance":regimen_variance,
"Standard Deviation": regimen_std,
"SEM": regimen_sem})
summary_table
# Using the aggregation method, produce the same summary statistics in a single line
aggregate_df = clean_df.groupby('Drug Regimen').aggregate({"Tumor Volume (mm3)": ['mean', 'median', 'var',
'std', 'sem']})
aggregate_df
```
## Bar and Pie Charts
```
# Get value counts for each regimen
regimen_count = clean_df['Drug Regimen'].value_counts()
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas.
regimen_count = clean_df['Drug Regimen'].value_counts().plot.bar(width=0.5)
# Set labels for axes
regimen_count.set_xlabel("Drug Regimen")
regimen_count.set_ylabel("Number of Observations")
regimen_count.set_title("Treatment Regimen Observation Count")
regimen_count
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot.
# Determine number of data points
py_regimen_count = clean_df['Drug Regimen'].value_counts()
# Set X axis
x_axis = np.arange(len(py_regimen_count))
#Create bar plot
plt.bar(x_axis, py_regimen_count, width = 0.5)
# Set names for drug regimen groups
tick_locations = [value for value in x_axis]
plt.xticks(tick_locations, py_regimen_count.index.values)
#Change orientation of x labels
plt.xticks(rotation=90)
# Add labels and title
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Observations")
plt.title('Treatment Regimen Observation Count')
# Display results
plt.show()
# Determine number of data points
py_regimen_count = clean_df['Drug Regimen'].value_counts()
py_regimen_count
# Generate a pie plot showing the distribution of female versus male mice using pandas
# Find distribition of mice by sex
sex_count = clean_df['Sex'].value_counts()
# Generate Pie chart for sex distribution
sex_distribution_chart = sex_count.plot.pie(startangle=90, title='Distribution by Sex', autopct="%1.1f%%")
# Hide Y label to improve presentation
sex_distribution_chart.set_ylabel('')
# Generate a pie plot showing the distribution of female versus male mice using pyplot
# Identify distribution of data by sex
py_sex_distribution = clean_df['Sex'].value_counts()
# Tell matplotlib to create a pie chart filled with corresponding percentages and displayed vertically
plt.pie(py_sex_distribution, labels=py_sex_distribution.index.values, startangle=90, autopct="%1.1f%%")
plt.title('Distribution by Sex')
# Display resulting plot
plt.show()
```
## Quartiles, Outliers and Boxplots
```
# Calculate the final tumor volume of each mouse across four of the treatment regimens: Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
maxtimept_df = pd.DataFrame(clean_df.groupby('Mouse ID')['Timepoint'].max()).reset_index().rename(columns={'Timepoint': 'Timepoint (Max)'})
clean_max_df = pd.merge(clean_df, maxtimept_df, on='Mouse ID')
clean_max_df.head()
regimens = ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin']
regimen_values = []
for regimen in regimens:
# create dataframe with all regimens we are interested in
selected_regimens_df = clean_max_df.loc[clean_max_df['Drug Regimen'] == regimen]
# find last time point using max and store in another dataframe
results_df= selected_regimens_df.loc[selected_regimens_df['Timepoint'] == selected_regimens_df['Timepoint (Max)']]
# Get Tumor volume from clean_max_df dataframe
values = results_df['Tumor Volume (mm3)']
regimen_values.append(values)
# Calculate Quartiles and IQR
quartiles = values.quantile([0.25, 0.5, 0.75])
upperquartile = quartiles[0.75]
lowerquartile = quartiles[0.25]
iqr = upperquartile - lowerquartile
#print results
print(f" IQR for {regimen} is {iqr}")
#Find upper and lower bounds
upper_bound = upperquartile + (1.5 * iqr)
lower_bound = lowerquartile - (1.5 * iqr)
print(f"Upper Bound for {regimen}: {upper_bound}")
print(f"Lower Bound for {regimen}: {lower_bound}")
# Find Outliers
outliers_count = (values.loc[(clean_max_df['Tumor Volume (mm3)'] >= upper_bound) |
(clean_max_df['Tumor Volume (mm3)'] <= lower_bound)]).count()
print(f" The {regimen} regimen has {outliers_count} outlier(s)")
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# Create Box Plot
plt.boxplot(regimen_values)
# Add Title and Labels
plt.title('Tumor Volume by Drug')
plt.ylabel(' Tumor Volume (mm3)')
plt.xticks([1, 2, 3, 4], ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin'])
```
## Line and Scatter Plots
```
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
# Isolate Capomulin regimen oberservations
Capomulin_df = clean_df.loc[clean_df['Drug Regimen'] == 'Capomulin']
Capomulin_mouse= Capomulin_df.loc[Capomulin_df['Mouse ID'] == "b128",:]
Capomulin_mouse.head()
#create chart
plt.plot(Capomulin_mouse['Timepoint'], Capomulin_mouse['Tumor Volume (mm3)'], marker = 'o')
# Add labels and title to plot
plt.xlabel("Time (days)")
plt.ylabel("Tumor Volume (mm3)")
plt.title("Capomulin Treatment for Mouse b128")
plt.show()
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
# Isolate Capomulin regimen oberservations
capomulin_df = clean_df.loc[clean_df['Drug Regimen'] == 'Capomulin']
#create df with average tumor volumes
Avg_Tumor_Vol = pd.DataFrame(capomulin_df.groupby('Mouse ID')['Tumor Volume (mm3)'].mean())
# Merge with capomulin_df
Average_Tumor_Volume_df =pd.merge(capomulin_df, Avg_Tumor_Vol, on = 'Mouse ID', how = "left").rename(columns = {'Tumor Volume (mm3)_y' : 'Avg. Tumor Volume'})
Average_Tumor_Volume_df.head()
# Define Variables for scatter plot
x_axis = Average_Tumor_Volume_df['Weight (g)']
y_axis = Average_Tumor_Volume_df['Avg. Tumor Volume']
#Create scatter plot
plt.scatter(x_axis, y_axis)
# Add labels and title to plot
plt.xlabel("Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title('Average Tumor Volume by Weight')
# Display plot
plt.show()
```
## Correlation and Regression
```
# Calculate the correlation coefficient and linear regression model
correlation = st.pearsonr(x_axis, y_axis)
print(f"""The correlation between weight and average tumor volume in the Capomulin regimen is {round((correlation[0]), 4)}.""")
# For mouse weight and average tumor volume for the Capomulin regimen
(slope, intercept, rvalue, pvalue, stderr) = st.linregress(x_axis, y_axis)
regression_values = x_axis * slope + intercept
linear_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# Plot linear regression on to the scatter plot
plt.scatter(x_axis,y_axis)
plt.plot(x_axis,regression_values,"r-")
#apply labels and title
plt.xlabel("Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title('Average Tumor Volume by Weight')
# Add linear equation to the scatterplot
plt.annotate(linear_equation,(20,37), fontsize=15, color="black")
# Display plot
plt.show()
# Calculate r squared to see how well our model predicts average tumor volume for a given weight
rsquared = round((rvalue**2),4)
rsquared
```
| github_jupyter |
## TODO
* Add O2C and C2O seasonality
* Look at diff symbols
* Look at fund flows
## Key Takeaways
* ...
In the [first post](sell_in_may.html) of this short series, we covered several seasonality patterns for large cap equities (i.e, SPY), most of which continue to be in effect.
The findings of that exercise sparked interest in what similar seasonal patterns may exist in other asset classes. This post will pick up where that post left off, looking at "risk-off" assets which exhibit low (or negative) correlation to equities.
```
## Replace this section of imports with your preferred
## data download/access interface. This calls a
## proprietary set of methods (ie they won't work for you)
import sys
sys.path.append('/anaconda/')
import config
sys.path.append(config.REPO_ROOT+'data/')
from prices.eod import read
####### Below here are standard python packages ######
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from IPython import display
import seaborn as sns
from IPython.core.display import HTML,Image
## Load Data
symbols = ['SPY','IWM','AGG','LQD','IEF','MUB','GLD']
#symbols = ['SPY','IWM','AGG','LQD','JNK','IEF']
prices = read.get_symbols_close(symbols,adjusted=True)
returns = prices.pct_change()
log_ret = np.log(prices).diff()
```
### Month-of-year seasonality
Again, we'll start with month-of-year returns for several asset classes. Note that I'm making use of the seaborn library's excellent `clustermap()` method to both visually represent patterns in asset classes _and_ to group the assets by similarity (using Euclidean distance between the average monthly returns vectors of each column).
_Note that the values plotted are z-score values (important for accurate clustering)_.
```
by_month = log_ret.resample('BM').sum()
by_month[by_month==0.0] = None
# because months prior to fund launch are summed to 0.0000
avg_monthly = by_month.groupby(by_month.index.month).mean()
sns.clustermap(avg_monthly[symbols],row_cluster=False,z_score=True, metric='euclidean',\
cmap=sns.diverging_palette(10, 220, sep=20, n=7))
## Notes:
# should use either z_score =True or standard_scale = True for accurate clustering
# Uses Euclidean distance as metric for determining cluster
```
Clearly, the seasonal patterns we saw in the [last post](sell_in_may.html) do not generalize across all instruments - which is a very good thing! IWM (small cap equities) do more or less mimic the SPY patterns, but the "risk-off" assets generally perform well in the summer months of July and August, when equities had faltered.
We might consider a strategy of shifting from risk-on (e.g., SPY) to risk-off (e.g., IEF) for June to September.
```
rotation_results = pd.Series(index=avg_monthly.index)
rotation_results.loc[[1,2,3,4,5,10,11,12]] = avg_monthly['SPY']
rotation_results.loc[[6,7,8,9]] = avg_monthly['IEF']
#
print("Returns:")
print(avg_monthly.SPY.sum())
print(rotation_results.sum())
print()
print("Sharpe:")
print(avg_monthly.SPY.sum()/(by_month.std()['SPY']*12**0.5))
print(rotation_results.sum()/(rotation_results.std()*12**0.5))
avg_monthly.SPY.std()*12**0.5
```
Next, I'll plot the same for day-of-month.
```
avg_day_of_month = log_ret.groupby(log_ret.index.day).mean()
sns.clustermap(avg_day_of_month[symbols],row_cluster=False,z_score= True,metric='euclidean',\
cmap=sns.diverging_palette(10, 220, sep=20, n=7))
```
This is a bit messy, but I think the dominant pattern is weakness within all "risk-off" assets (treasurys, etc...) for the first 1/3 to 1/2 of the month, followed by a very strong end of month rally.
Finally, plot a clustermap for day-of-week:
```
avg_day_of_week = log_ret.groupby(log_ret.index.weekday+1).mean()
sns.clustermap(avg_day_of_week[symbols],row_cluster=False,z_score= True,metric='euclidean',\
cmap=sns.diverging_palette(10, 220, sep=20, n=7))
```
Again, a bit messy. However, the most consistent pattern is "avoid Thursday" for risk-off assets like AGG, LQD, and IEF. Anyone with a hypothesis as to why this might be, please do share!
### Observations
* Clusters form about as you'd expect. The "risk-off" assets like Treasurys (IEF), munis (MUB), gold (GLD), and long volatility (VXX) tend to cluster together. The "risk-on" assets like SPY, EEM, IXUS, and JNK tend to cluster together.
* Risk-off assets (Treasurys etc...) appear to follow the opposite of "sell in May", with weakness in November and December, when SPY and related were strongest.
* Within day-of-month, there are some _very_ strong patterns for fixed income, with negative days at the beginning of month and positive days at end of month.
* Day of week shows very strong clustering of risk-off assets (outperform on Fridays). There's an interesting clustering of underperformance on Mondays. This may be a false correlation since some of these funds have much shorter time histories than others and may be reflecting that
```
risk_off_symbols = ['IEF','MUB','AGG','LQD']
df = log_ret[symbols_1].mean(axis=1).dropna().to_frame(name='pct_chg')
by_month = df.resample('BM').sum()
by_month['month'] = by_month.index.month
title='Avg Log Return (%): by Calendar Month \nfor Risk-off Symbols {}'.format(risk_off_symbols)
s = (by_month.groupby('month').pct_chg.mean()*100)
my_colors = ['r','r','r','r','g','g','g','g','g','g','r','r',]
ax = s.plot(kind='bar',color=my_colors,title=title)
ax.axhline(y=0.00, color='grey', linestyle='--', lw=2)
```
Wow, maybe there's some truth to this myth! It appears that there is a strong difference between the summer months (June to September) and the rest.
From the above chart, it appears than we'd be well advised to sell on June 1st and buy back on September 30th. However, to follow the commonly used interpretation of selling on May 1st and repurchasing on Oct 31st. I'll group the data into those two periods and calculate the monthly average:
```
by_month['season'] = None
by_month.loc[by_month.month.between(5,10),'season'] = 'may_oct'
by_month.loc[~by_month.month.between(5,10),'season'] = 'nov_apr'
(by_month.groupby('season').pct_chg.mean()*100).plot.bar\
(title='Avg Monthly Log Return (%): \nMay-Oct vs Nov_Apr (1993-present)'\
,color='grey')
```
A significant difference. The "winter" months are more than double the average return of the summer months. But has this anomaly been taken out of the market by genius quants and vampire squid? Let's look at this breakout by year:
Of these, the most interesting patterns, to me, are the day-of-week and day-of-month cycles.
### Day of Week
I'll repeat the same analysis pattern as developed in the prior post (["Sell in May"](sell_in_may.html)), using a composite of four generally "risk-off" assets. You may choose create composites differently.
```
risk_off_symbols = ['IEF','MUB','AGG','LQD']
df = log_ret[risk_off_symbols].mean(axis=1).dropna().to_frame(name='pct_chg')
by_day = df
by_day['day_of_week'] = by_day.index.weekday+ 1
ax = (by_day.groupby('day_of_week').pct_chg.mean()*100).plot.bar\
(title='Avg Daily Log Return (%): by Day of Week \n for {}'.format(risk_off_symbols),color='grey')
plt.show()
by_day['part_of_week'] = None
by_day.loc[by_day.day_of_week ==4,'part_of_week'] = 'thurs'
by_day.loc[by_day.day_of_week !=4,'part_of_week'] = 'fri_weds'
(by_day.groupby('part_of_week').pct_chg.mean()*100).plot.bar\
(title='Avg Daily Log Return (%): Mon vs Tue-Fri \n for {}'.format(risk_off_symbols)\
,color='grey')
title='Avg Daily Log Return (%) by Part of Week\nFour Year Moving Average\n for {}'.format(risk_off_symbols)
by_day['year'] = by_day.index.year
ax = (by_day.groupby(['year','part_of_week']).pct_chg.mean().unstack().rolling(4).mean()*100).plot()
ax.axhline(y=0.00, color='grey', linestyle='--', lw=2)
ax.set_title(title)
```
The "avoid Thursday" for risk-off assets seemed to be remarkably useful until about 4 years ago, when it ceased to work. I'll call this one busted. Moving on to day-of-month, and following the same grouping and averaging approach:
```
risk_off_symbols = ['IEF','MUB','AGG','LQD']
by_day = log_ret[risk_off_symbols].mean(axis=1).dropna().to_frame(name='pct_chg')
by_day['day_of_month'] = by_day.index.day
title='Avg Daily Log Return (%): by Day of Month \nFor: {}'.format(symbols_1)
ax = (by_day.groupby('day_of_month').pct_chg.mean()*100).plot.bar(xlim=(1,31),title=title,color='grey')
ax.axhline(y=0.00, color='grey', linestyle='--', lw=2)
```
Here we see the same pattern as appeared in the clustermap. I wonder if the end of month rally is being driven by the ex-div date, which I believe is usually the 1st of the month for these funds.
_Note: this data is dividend-adjusted so there is no valid reason for this - just dividend harvesting and behavioral biases, IMO._
```
by_day['part_of_month'] = None
by_day.loc[by_day.index.day <=10,'part_of_month'] = 'first_10d'
by_day.loc[by_day.index.day >10,'part_of_month'] = 'last_20d'
(by_day.groupby('part_of_month').pct_chg.mean()*100).plot.bar\
(title='Avg Daily Log Return (%): \nDays 1-10 vs 11-31\nfor risk-off assets {}'.format(risk_off_symbols)\
,color='grey')
title='Avg Daily Log Return (%) \nDays 1-10 vs 11-31\nfor risk-off assets {}'.format(risk_off_symbols)
by_day['year'] = by_day.index.year
ax = (by_day.groupby(['year','part_of_month']).pct_chg.mean().unstack().rolling(4).mean()*100).plot(title=title)
ax.axhline(y=0.00, color='grey', linestyle='--', lw=2)
```
In contrast to the day-of-week anomaly, this day-of-month pattern seems to hold extremely well. It's also an extremely tradeable anomaly, considering that it requires only one round-trip per month.
```
baseline = by_day.resample('A').pct_chg.sum()
only_last_20 = by_day[by_day.part_of_month=='last_20d'].resample('A').pct_chg.sum()
pd.DataFrame({'baseline':baseline,'only_last_20':only_last_20}).plot.bar()
print(pd.DataFrame({'baseline':baseline,'only_last_20':only_last_20}).mean())
```
Going to cash in the first 10 days of each month actually _increased_ annualized returns (log) by about 0.60%, while simultaneously lowering capital employed and volatility of returns. Of the seasonality anomalies we've reviewed in this post and the previous, this appears to be the most robust and low risk.
## Conclusion
...
If the future looks anything like the past (insert standard disclaimer about past performance...) then rules of thumb might be:
* Sell on Labor Day and buy on Halloween - especially do this on election years! This assumes that you've got a productive use for the cash!
* Do your buying at Friday's close, do your selling at Wednesday's close
* Maximize your exposure at the end/beginning of months and during the early-middle part of the month, lighten up.
* Remember that, in most of these anomalies, _total_ return would decrease by only participating in part of the market since any positive return is better than sitting in cash. Risk-adjusted returns would be significantly improved by only participating in the most favorable periods. It's for each investor to decide what's important to them.
I had intended to extend this analysis to other asset classes, but will save that for a future post. I'd like to expand this to small caps, rest-of-world developed/emerging, fixed income, growth, value, etc...
### One last thing...
If you've found this post useful, please follow [@data2alpha](https://twitter.com/data2alpha) on twitter and forward to a friend or colleague who may also find this topic interesting.
Finally, take a minute to leave a comment below. Share your thoughts on this post or to offer an idea for future posts. Thanks for reading!
| github_jupyter |
# JWT based authentification
In the API world, authentification is a process where we want to authenticate a user. In real world applications, only authenticated users can access the API. Additionaly, we may want to track how much does a specific user query an API.
To solve the complex issue of authentification, the current golden standart are the `JWT tokens`.
`JWT` stands for JSON Web Token.
The high level graph of the process:

1) The user requests a token, sending over his credentials (username and password).
2) The server checks the credentials and if they are correct, it generates a JWT token. The token gets sent back to the user.
3) Every time the user makes a request to any of the APIs on a certain server, it has to include the JWT token. Only the JWT token is used to authenticate the user.
# JWT token
A JWT token is just a string that has three parts separated by dots:
```
<header>.<payload>.<signature>
```
An example may look like this:
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c`
Thats it, the above string is a JWT token that has alot of information encoded into it. There are many libraries that can be used both to create and to decode a JWT token. In the subsequent chapters we will use Python implementations of JWT authentification and go through the details of the JWT token system.
# The authentification flow
All the code is in the `jwt-toke-example` directory. Be sure to run
```
docker-compose up
```
To spin up a PSQL server.
Additionaly, start the API from the same directory:
```
uvicorn app:app --port 8000
```
## Step 1: Requesting a token
### User registration
In the JWT flow, we still cannot escape the good old username and password combination. We need to store this information somewhere in the server and every time a user requests a new token, we need to check if the user credentials are correct. For this, we need to create an endpoint for user registration and then for token generation. Because of this reason, the whole process of authentification ideally should be done via HTTPS and not HTTP. For the purpose of this tutorial, we will use HTTP, because the concepts are exactly the same. HTTPS only adds a layer of obfuscation and encodes the transactions between user and server.
The user database table is very straightforward. It contains the username, the password and the date it was created:
```
!cat jwt-token-example/models.py
```
The endpoint for user creation is `/users/register`. To register we need to send a POST request with the following data:
```
{
"username": <username>,
"password": <password>
}
```
```
# Importing the request making lib
import requests
# Making the request to the API to register the user
response = requests.post(
"http://localhost:8000/users/register",
json={"username": "eligijus", "password": "123456"}
)
if response.status_code in [200, 201]:
print(f"Response: {response.json()}")
```
Now that we have a registered user we can start implementing the logic of JWT token creation.
## Step 2: Creating the JWT token
The library that creates the JWT token is called `pyjwt`. It is a Python library that can be used to create and decode JWT tokens. It is fully compliant with the [JSON Web Token standard](https://tools.ietf.org/html/rfc7519).
The token creation and inspection script is:
```
!cat jwt-token-example/jwt_tokens.py
```
The logic of creating the token is in the `create_token()` function. Remember the JWT token structure:
```
<header>.<payload>.<signature>
```
The `header` part encodes the algorithm and type needed to decode the token.
The `payload` part holds the dictionary of claims. The claims are the information that gets encoded into the token as a dictionary.
The `signature` part is the signature of the token. It is used to verify the token by the python library. The `_SECRET` constant is used to construct the signature. That it why it should be kept only as a runtime variable in the variable where no one can access it.
Lets query the endpoint `/token` using the credentials we used to register the user.
```
# Making the request to the API to get the token
response = requests.post(
"http://localhost:8000/token",
json={"username": "eligijus", "password": "123456"}
)
# Extracting the token
token = response.json().get('token')
# Printing out the gotten token
print(f"Token: {token}")
```
The above token will be valid for 60 minutes and can be used to make requests to the API. If we make a request with a non existing user, we will get a `401 Unauthorized` error:
```
# Making the request to the API to get the token
response = requests.post(
"http://localhost:8000/token",
json={"username": "eligijus", "password": "12345"}
)
# Printing out the status code
print(f"Response code: {response.status_code}")
```
## Step 3: Using the JWT token
Every time a user makes a request to the API, we need to include the JWT token in the request. We will use the `Authorization` header to include the token and will send a GET request to our very well know number root calculating API.
```
# Defining the parameteres to send
number = 88
n = 0.88
# Making the request with the token
response = requests.get(
f"http://localhost:8000/root?number={number}&n={n}",
headers={"Authorization": f"{token}"}
)
# Printing out the status code and the result
print(f"Response code: {response.status_code}")
print(f"Root {n} of {number} is: {response.json()}")
```
If we use a bad JWT code, a user does not exist in the database or the token has expired, we will get a 401 Unauthorized response error:
```
# Making the request with the token
response = requests.get(
f"http://localhost:8000/root?number={number}&n={n}",
headers={"Authorization": "Hello I am a really legit token"}
)
# Printing out the status code and the result
print(f"Response code: {response.status_code}")
print(f"Root {n} of {number} is: {response.json()}")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/yarengozutok/HU-BBY162-2022/blob/main/Python_101-2022.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#Bölüm 00: Python'a Giriş
## Yazar Hakkında
Yaren Gözütok
##Çalışma Defteri Hakkında
Bu çalışma defteri Google'ın Jupyter Notebook platformuna benzer özellikler taşıyan Google Colab üzerinde oluşturulmuştur. Google Colab, herhangi bir altyapı düzenlemesine ihtiyaç duymadan Web tabanlı olarak Python kodları yazmanıza ve çalıştırmanıza imkan veren ücretsiz bir platformdur. Platform ile ilgili detaylı bilgiye [https://colab.research.google.com/notebooks/intro.ipynb](https://colab.research.google.com/notebooks/intro.ipynb) adresinden ulaşabilirsiniz.
Python'a giriş seviyesinde 10 dersten oluşan bu çalışma defteri daha önce kodlama deneyimi olmayan öğrenenler için hazırlanmıştır. Etkileşimli yapısından dolayı hem konu anlatımlarının hem de çalıştırılabilir örneklerin bir arada olduğu bu yapı, sürekli olarak güncellenebilecek bir altyapıya sahiptir. Bu açıdan çalışma defterinin güncel sürümünü aşağıdaki adresten kontrol etmenizi tavsiye ederim.
Sürüm 1.0: [Python 101](https://github.com/orcunmadran/Python101/blob/main/Python_101.ipynb)
İyi çalışmalar ve başarılar :)
## Kullanım Şartları
Bu çalışma defteri aşağıda belirtilen şartlar altında, katkıda bulunanlara Atıf vermek ve aynı lisansla paylaşmak kaydıyla ticari amaç dahil olmak üzere her şekilde dağıtabilir, paylaşabilir, üzerinde değişiklik yapılarak yeniden kullanılabilir.
---

Bu çalışma defteri Jetbrains'in "Introduction to Python" dersi temel alınarak hazırlanmış ve Creative Commons [Atıf-AynıLisanslaPaylaş 4.0 Uluslararası Lisansı](http://creativecommons.org/licenses/by-sa/4.0/) ile lisanslanmıştır.
---
# Bölüm 01: Giriş
Bu bölümde:
* İlk bilgisayar programımız,
* Yorumlar yer almaktadır.
## İlk Bilgisayar Programımız
Geleneksel olarak herhangi bir programlama dilinde yazılan ilk program "Merhaba Dünya!"'dır.
**Örnek Uygulama:**
```
print("Merhaba Dünya!")
```
```
# Örnek uygulamayı çalıştır
print("Merhaba Dünya!")
```
**Görev:** Kendinizi dünyaya tanıtacak ilk bilgisayar programını yazın!
```
print("Merhaba Python")
```
## Yorumlar
Python'daki yorumlar # "hash" karakteriyle başlar ve fiziksel çizginin sonuna kadar uzanır. Yorum yapmak için kullanılan # "hash" karakteri kod satırlarını geçici olarak devre dışı bırakmak amacıyla da kullanılabilir.
**Örnek Uygulama:**
```
# Bu ilk bilgisayar programım için ilk yorumum
print("# bu bir yorum değildir")
print("Merhaba!") # yorumlar kod satırının devamında da yapılabilir.
#print("Bu kod geçici olarak devre dışı bırakılmıştır.")
```
```
# Örnek uygulamayı çalıştır
# Bu ilk bilgisayar programım için ilk yorumum
print("# bu bir yorum değildir")
print("Merhaba!") # yorumlar kod satırının devamında da yapılabilir.
# print("Bu kod geçici olarak devre dışı bırakılmıştır.")
#Python öğreniyorum
print("#Python öğreniyorum")
```
**Görev:** Python kodunuza yeni bir yorum ekleyin, mevcut satıra yorum ekleyin, yazılmış olan bir kod satırını geçici olarak devre dışı bırakın!
```
print("Bu satırın devamına bir yorum ekleyin") #Python öğreniyorum
#print("Bu satırı devre dışı bırakın!")
```
# Bölüm 02: Değişkenler
Bu bölümde:
* Değişken nedir?,
* Değişken tanımlama,
* Değişken türleri,
* Değişken türü dönüştürme,
* Aritmetik operatörler,
* Artıtılmış atama operatörleri,
* Boolean operatörleri,
* Karşılaştırma operatörleri yer almaktadır.
## Değişken Nedir?
Değişkenler değerleri depolamak için kullanılır. Böylece daha sonra bu değişkenler program içinden çağırılarak atanan değer tekrar ve tekrar kullanılabilir. Değişkenlere metinler ve / veya sayılar atanabilir. Sayı atamaları direkt rakamların yazılması ile gerçekleştirilirken, metin atamalarında metin tek tırnak içinde ( 'abc' ) ya da çift tırnak ( "abc" ) içinde atanır.
Değişkenler etiketlere benzer ve atama operatörü olarak adlandırılan eşittir ( = ) operatörü ile bir değişkene bir değer atanabilir. Bir değer ataması zincirleme şeklinde gerçekleştirilebilir. Örneğin: a = b = 2
**Örnek Uygulama 1**
Aşağıda bir "zincir atama" örneği yer almaktadır. Değer olarak atanan 2 hem "a" değişkenine, hem de "b" değişkenine atanmaktadır.
```
a = b = 2
print("a = " + str(a))
print("b = " + str(b))
```
"a" ve "b" değişkenleri başka metinler ile birlikte ekrana yazdırılmak istendiğinde metin formatına çevrilmesi gerekmektedir. Bu bağlamda kullanılan "str(a)" ve "str(b)" ifadeleri eğitimin ilerleyen bölümlerinde anlatılacaktır.
```
# Örnek uygulamayı çalıştır
a = b = 2
print("a = " + str(a))
print("b = " + str(b))
a = b = 5
print("a = " + str(a))
print("b = " + str(b))
```
**Örnek Uygulama 2**
```
adSoyad = "Orçun Madran"
print("Adı Soyadı: " + adSoyad)
```
```
# Örnek uygulamayı çalıştır
adSoyad = "Orçun Madran"
print("Adı Soyadı: " + adSoyad)
AdSoyad = "Yaren Gözütok"
print("Adı Soyadı: " + AdSoyad)
```
**Görev:** "eposta" adlı bir değişken oluşturun. Oluşturduğunuz bu değişkene bir e-posta adresi atayın. Daha sonra atadığınız bu değeri ekrana yazdırın. Örneğin: "E-posta: orcun[at]madran.net"
```
# Ekrana e-posta yazdır
Eposta = "gztkyrn@gmail.com"
print("E-Posta Adresi: " + Eposta)
```
## Değişken Tanımlama
Değişken isimlerinde uyulması gereken bir takım kurallar vardır:
* Rakam ile başlayamaz.
* Boşluk kullanılamaz.
* Alt tire ( _ ) haricinde bir noktalama işareti kullanılamaz.
* Python içinde yerleşik olarak tanımlanmış anahtar kelimeler kullanılamaz (ör: print).
* Python 3. sürümden itibaren latin dışı karakter desteği olan "Unicode" desteği gelmiştir. Türkçe karakterler değişken isimlerinde kullanılabilir.
**Dikkat:** Değişken isimleri büyük-küçük harfe duyarlıdır. Büyük harfle başlanan isimlendirmeler genelde *sınıflar* için kullanılır. Değişken isimlerinin daha anlaşılır olması için deve notasyonu (camelCase) ya da alt tire kullanımı tavsiye edilir.
**Örnek Uygulama:**
```
degisken = 1
kullaniciAdi = "orcunmadran"
kul_ad = "rafet"
```
Henüz tanımlanmamış bir değişken kullanıldığında derleyicinin döndürdüğü hatayı kodu çalıştırarak gözlemleyin!
```
degisken1 = "Veri"
print(degisken2)
```
**Görev:** Tanımladığınız değişkeni ekrana yazdırın!
```
degisken3 = 'Yeni veri'
print("Değişkeni yaz: " + degisken3)
```
## Değişken Türleri
Python'da iki ana sayı türü vardır; tam sayılar ve ondalık sayılar.
**Dikkat:** Ondalık sayıların yazımında Türkçe'de *virgül* (,) kullanılmasına rağmen, programlama dillerinin evrensel yazım kuralları içerisinde ondalık sayılar *nokta* (.) ile ifade edilir.
**Örnek Uygulama:**
```
tamSayi = 5
print(type(tamSayi)) # tamSayi değişkeninin türünü yazdırır
ondalikSayi = 7.4
print(type(ondalikSayi) # ondalikSayi değişkeninin türünü yazdırır
```
```
# Örnek uygulamayı çalıştır
tamSayi = 5
print(type(tamSayi))
ondalikSayi = 7.4
print(type(ondalikSayi))
```
**Görev:** "sayi" değişkeninin türünü belirleyerek ekrana yazdırın!
```
sayi = 9.0
print(type(sayi))
```
## Değişken Türü Dönüştürme
Bir veri türünü diğerine dönüştürmenize izin veren birkaç yerleşik fonksiyon (built-in function) vardır. Bu fonksiyonlar ("int()", "str()", "float()") uygulandıkları değişkeni dönüştürerek yeni bir nesne döndürürler.
**Örnek Uygulama**
```
sayi = 6.5
print(type(sayi)) # "sayi" değişkeninin türünü ondalık olarak yazdırır
print(sayi)
sayi = int(sayi) # Ondalık sayı olan "sayi" değişkenini tam sayıya dönüştürür
print(type(sayi))
print(sayi)
sayi = float(sayi) # Tam sayı olan "sayi" değişkenini ondalık sayıya dönüştürür
print(type(sayi))
print(sayi)
sayi = str(sayi) # "sayi" değişkeni artık düz metin halini almıştır
print(type(sayi))
print(sayi)
```
```
# Örnek uygulamayı çalıştır
sayi = 6.5
print(type(sayi))
print(sayi)
sayi = int(sayi)
print(type(sayi))
print(sayi)
sayi = float(sayi)
print(type(sayi))
print(sayi)
sayi = str(sayi)
print(type(sayi))
print(sayi)
```
**Görev:** Ondalık sayıyı tam sayıya dönüştürün ve ekrana değişken türünü ve değeri yazdırın!
```
sayi = 3.14
print(type(sayi))
print(sayi)
sayi = int(sayi)
print(type(sayi))
print(sayi)
sayi = float(sayi)
print(type(sayi))
print(sayi)
sayi= str(sayi)
print(type(sayi))
print(sayi)
Değer = input("Yaşınızı giriniz")
print(Değer)
print(type(Değer))
print(2022 - int(Değer))
#Doğum Yılı Yazdırma Programı
bulunduğumuzyıl = input("Bulunduğunuz yılı giriniz")
yaş = input("Yaşınızı giriniz")
print(int(bulunduğumuzyıl)- int(yaş))
#Doğum Yılı Yazdırma Programı
#Şimdiki yılı al
syil = input("İçinde bulunduğunuz yılı giriniz")
#Doğum tarihini al
dtarih = input("Doğum tarihinizi giriniz")
#Dönüştürme işlemleri
syil = int(syil)
dtarih = int(dtarih)
#Yaşı hesapla
yas = syil - dtarih
#Yaşı ekrana yazdır
print("Yaşınız: " + str(yas))
```
## Aritmetik Operatörler
Diğer tüm programlama dillerinde olduğu gibi, toplama (+), çıkarma (-), çarpma (yıldız) ve bölme (/) operatörleri sayılarla kullanılabilir. Bunlarla birlikte Python'un üs (çift yıldız) ve mod (%) operatörleri vardır.
**Dikkat:** Matematik işlemlerinde geçerli olan aritmetik operatörlerin öncelik sıralamaları (çarpma, bölme, toplama, çıkarma) ve parantezlerin önceliği kuralları Python içindeki matematiksel işlemler için de geçerlidir.
**Örnek Uygulama:**
```
# Toplama işlemi
sayi = 7.0
sonuc = sayi + 3.5
print(sonuc)
# Çıkarma işlemi
sayi = 200
sonuc = sayi - 35
print(sonuc)
# Çarpma işlemi
sayi = 44
sonuc = sayi * 10
print(sonuc)
# Bölme işlemi
sayi = 30
sonuc = sayi / 3
print(sonuc)
# Üs alma işlemi
sayi = 30
sonuc = sayi ** 3
print(sonuc)
# Mod alma işlemi
sayi = 35
sonuc = sayi % 4
print(sonuc)
```
```
# Örnek uygulamayı çalıştır
# Toplama işlemi
sayi = 7.0
sonuc = sayi + 3.5
print(sonuc)
# Çıkarma işlemi
sayi = 200
sonuc = sayi - 35
print(sonuc)
# Çarpma işlemi
sayi = 44
sonuc = sayi * 10
print(sonuc)
# Bölme işlemi
sayi = 30
sonuc = sayi / 3
print(sonuc)
# Üs alma işlemi
sayi = 30
sonuc = sayi ** 3
print(sonuc)
# Mod alma işlemi
sayi = 35
sonuc = sayi % 4
print(sonuc)
```
**Görev:** Aşağıda değer atamaları tamamlanmış olan değişkenleri kullanarak ürünlerin peşin satın alınma bedelini TL olarak hesaplayınız ve ürün adı ile birlikte ekrana yazdırınız! İpucu: Ürün adını ve ürün bedelini tek bir satırda yazdırmak isterseniz ürün bedelini str() fonksiyonu ile düz metin değişken türüne çevirmeniz gerekir.
```
urunAdi = "Bisiklet"
urunBedeliAvro = 850
kurAvro = 10
urunAdet = input("Ürün adetini giriniz: ")
pesinAdetIndirimTL = 500
butce = 15000
hesapla = ((urunBedeliAvro* int(urunAdet)) * kurAvro) - pesinAdetIndirimTL
butceTamam = butce > hesapla
print(hesapla)
print("Alışveriş bütçeme uygun mu?" + str(butceTamam))
#Ürünlerin peşin satın alma bedelini TL olarak hesapla, ürün adı ile ekrana yazdır!
urunAdı = "Telefon"
urunBedeliAvro = 2000
kurAvro = 15
urunAdet = input("Ürün adetini giriniz: ")
pesinAdetindirimTL = 500
butce = 30000
hesapla = ((urunBedeliAvro * int(urunAdet)) * kurAvro) - pesinAdetindirimTL
butceTamam = butce > hesapla
print(hesapla)
print("Alışveriş bütçeme uygun mu?" + str(butceTamam))
```
## Artırılmış Atama Operatörleri
Artırılmış atama, bir değişkenin mevcut değerine belirlenen değerin eklenerek ( += ) ya da çıkartılarak ( -= ) atanması işlemidir.
**Örnek Uygulama**
```
sayi = 8
sayi += 4 # Mevcut değer olan 8'e 4 daha ekler.
print(sayi)
sayi -= 6 # Mevcut değer olan 12'den 6 eksiltir.
print("Sayı = " + str(sayi))
```
```
# Örnek uygulama çalıştır
sayi = 8
sayi += 4
print(sayi)
sayi -= 6
print("Sayı = " + str(sayi))
```
**Görev:** Artıtılmış atama operatörleri kullanarak "sayi" değişkenine 20 ekleyip, 10 çıkartarak değişkenin güncel değerini ekrana yazdırın!
```
sayi = 55
sayi += 20
print(sayi)
sayi -= 10
print("Sayı = " + str(sayi))
```
## Boolean Operatörleri
Boolean, yalnızca **Doğru (True)** veya **Yanlış (False)** olabilen bir değer türüdür. Eşitlik (==) operatörleri karşılaştırılan iki değişkenin eşit olup olmadığını kontrol eder ve *True* ya da *False* değeri döndürür.
**Örnek Uygulama:**
```
deger1 = 10
deger2 = 10
esitMi = (deger1 == deger2) # Eşit olup olmadıkları kontrol ediliyor
print(esitMi) # Değişken "True" olarak dönüyor
deger1 = "Python"
deger2 = "Piton"
esitMi = (deger1 == deger2) # Eşit olup olmadıkları kontrol ediliyor
print(esitMi) # Değişken "False" olarak dönüyor
```
```
# Örnek uygulama çalıştır
deger1 = 10
deger2 = 10
esitMi = (deger1 == deger2)
print(esitMi)
deger1 = "Python"
deger2 = "Piton"
esitMi = (deger1 == deger2)
print(esitMi)
```
**Görev:** Atamaları yapılmış olan değişkenler arasındaki eşitliği kontrol edin ve sonucu ekrana yazıdırın!
```
sifre = "Python2020"
sifreTekrar = "Piton2020"
sifrek = input("Şifrenizi giriniz: ")
print(sifrek==sifre)
#Kullanıcı adı ve şifre gir
Kullanıcıadı = "yarengozutok"
Sıfre = "tavsan23"
Kullanıcıadık = input("Kullanıcı adınızı giriniz: ")
Sıfrek = input("Şifrenizi giriniz: ")
print(Kullanıcıadık==Kullanıcıadı)
print(Sıfrek==Sıfre)
```
## Karşılaştırma Operatörleri
Python'da, >=, <= , >, < vb. dahil olmak üzere birçok operatör bulunmaktadır. Python'daki tüm karşılaştırma operatörleri aynı önceliğe sahiptir. Karşılaştırma sonucunda boole değerleri (*True* ya da *False*) döner. Karşılaştırma operatörleri isteğe bağlı olarak arka arkaya da (zincirlenerek) kullanılabilir.
**Örnek Uygulama:**
```
deger1 = 5
deger2 = 7
deger3 = 9
print(deger1 < deger2 < deger3) # Sonuç "True" olarak dönecektir
```
```
# Örnek uygulama çalıştır
deger1 = 5
deger2 = 7
deger3 = 9
print(deger1 < deger2 < deger3)
```
**Görev:** Aşağıda değer atamaları tamamlanmış olan değişkenleri kullanarak ürünlerin peşin satın alınma bedelini TL olarak hesaplayın. Toplam satın alma bedeli ile bütçenizi karşılaştırın. Satın alma bedelini ve bütçenizi ekrana yazdırın. Ödeme bütçenizi aşıyorsa ekrana "False", aşmıyorsa "True" yazdırın.
```
urunAdi = "Bisiklet"
urunBedeliAvro = 850
kurAvro = 10
urunAdet = 3
pesinAdetIndirimTL = 500
butce = 20000
hesapla= ((urunBedeliAvro*urunAdet)*kurAvro)- pesinAdetIndirimTL
butceTamam = butce > hesapla
print(hesapla)
print("Alışveriş bütçeme uygun mu? " + str(butceTamam))
yasLimiti = 13
yas = int(input( "Yaşınızı giriniz: "))
kontrol = yas >= yasLimiti
print("Youtube yayınlarını izleyebilir: " + str(kontrol))
```
# Bölüm 03: Metin Katarları
Bu bölümde:
* Birbirine bağlama,
* Metin katarı çarpımı,
* Metin katarı dizinleme,
* Metin katarı negatif dizinleme,
* Metin katarı dilimleme,
* In operatörü,
* Metin katarının uzunluğu,
* Özel karakterlerden kaçma,
* Basit metin katarı metodları,
* Metin katarı biçimlendirme yer almaktadır.
## Birbirine Bağlama
Birbirine bağlama artı (+) işlemini kullanarak iki metin katarının birleştirilmesi işlemine denir.
**Örnek Uygulama**
```
deger1 = "Merhaba"
deger2 = "Dünya"
selamlama = deger1 + " " + deger2
print(selamlama) # Çıktı: Merhaba Dünya
```
```
# Örnek uygulamayı çalışıtır
deger1 = "Merhaba"
deger2 = "Dünya"
selamlama = deger1 + " " + deger2
print(selamlama)
```
**Görev:** *ad*, *soyad* ve *hitap* değişkenlerini tek bir çıktıda birleştirecek kodu yazın!
```
hitap = "Öğr. Gör."
ad = "Orçun"
soyad = "Madran"
çıktı = hitap + ad + soyad
print(çıktı)
# Çıktı: Öğr. Gör. Orçun Madran
```
## Metin Katarı Çarpımı
Python, metin katarlarının çarpım sayısı kadar tekrar ettirilmesini desteklemektedir.
**Örnek Uygulama**
```
metin = "Hadi! "
metniCarp = metin * 4
print(metniCarp) # Çıktı: Hadi! Hadi! Hadi! Hadi!
```
```
# Örnek uygulamayı çalıştır
metin = "Hadi! "
metniCarp = metin * 4
print(metniCarp)
```
**Görev:** Sizi sürekli bekleten arkadaşınızı uyarabilmek için istediğiniz sayıda "Hadi!" kelimesini ekrana yazdırın!
```
metin = "Hadi! "
metniCarp = metin*4
print(metniCarp)
# Çıktı: Hadi! Hadi! Hadi! Hadi! ... Hadi!
```
##Metin Katarı Dizinleme
Konumu biliniyorsa, bir metin katarındaki ilgili karaktere erişilebilir. Örneğin; str[index] metin katarındaki indeks numarasının karşılık geldiği karakteri geri döndürecektir. İndekslerin her zaman 0'dan başladığı unutulmamalıdır. İndeksler, sağdan saymaya başlamak için negatif sayılar da olabilir. -0, 0 ile aynı olduğundan, negatif indeksler -1 ile başlar.
**Örnek Uygulama**
```
metin = "Python Programlama Dili"
print("'h' harfini yakala: " + metin[3]) # Çıktı: 'h' harfini yakala: h"
```
```
# örnek uygulama çalıştır
metin = "Python Programlama Dili"
print("'h'harfini yakala: " + metin[3])
```
**Görev:** İndeks numarasını kullanarak metin katarındaki ikinci "P" harfini ekrana yazdırın!
```
#Çıktı = P
metin ="Python Programlama Dili"
print(metin[0])
```
## Metin Katarı Negatif Dizinleme
Metin katarının sonlarında yer alan bir karaktere daha rahat erişebilmek için indeks numarası negatif bir değer olarak belirlenebilir.
**Örnek Uygulama**
```
metin = "Python Programlama Dili"
dHarfi = metin[-4]
print(dHarfi) # Çıktı: D
```
```
# Örnek uygulama çalıştır
metin = "Python Programlama Dili"
dHarfi = metin[-4]
print(dHarfi)
```
**Görev:** Metin katarının sonunda yer alan "i" harfini ekrana yazdırın!
```
metin = "Python Programlama Dili"
print(metin[-1])
#Çıktı: i
```
##Metin Katarı Dilimleme
Dilimleme, bir metin katarından birden çok karakter (bir alt katar oluşturmak) almak için kullanılır. Söz dizimi indeks numarası ile bir karaktere erişmeye benzer, ancak iki nokta üst üste işaretiyle ayrılmış iki indeks numarası kullanılır. Ör: str[ind1:ind2].
Noktalı virgülün solundaki indeks numarası belirlenmezse ilk karakterden itibaren (ilk karakter dahil) seçimin yapılacağı anlamına gelir. Ör: str[:ind2]
Noktalı virgülün sağındaki indeks numarası belirlenmezse son karaktere kadar (son karakter dahil) seçimin yapılacağı anlamına gelir. Ör: str[ind1:]
**Örnek Uygulama**
```
metin = "Python Programlama Dili"
dilimle = metin[:6]
print(dilimle) # Çıktı: Python
metin = "Python Programlama Dili"
print(metin[7:]) # Çıktı: Programlama Dili
```
```
# Örnek uygulama çalıştır
metin = "Python Programlama Dili"
dilimle = metin[:6]
print(dilimle)
metin = "Python Programlama Dili"
print(metin[7:])
```
**Görev:** Metin katarını dilemleyerek katarda yer alan üç kelimeyi de ayrı ayrı (alt alta) ekrana yazdırın!.
```
# Çıktı:
# Python
# Programlama
# Dili
metin = "Python Programlama Dili"
dilimle = metin[:6]
print(dilimle)
metin = "Python Programlama Dili"
print(metin[7:])
metin2 = "Python Programlama Dili"
dilimle2 = metin2[7:18]
print(dilimle2)
haber= "But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system"
print(haber)
ozet = haber[:40] + " devamı için tıklayınız..."
print(ozet)
haber = "But I must explain to you how all this mistaken idea of denouncing pleasure praising pain was born"
baslangic = haber[:20]
bitis = haber[-20:]
print(baslangic + "......." + bitis)
```
##In Operatörü
Bir metin katarının belirli bir harf ya da bir alt katar içerip içermediğini kontrol etmek için, in anahtar sözcüğü kullanılır.
**Örnek Uygulama**
```
metin = "Python Programlama Dili"
print("Programlama" in metin) # Çıktı: True
```
**Görev:** Metin katarında "Python" kelimesinin geçip geçmediğini kontrol ederek ekrana yazdırın!
```
metin = "Python Programlama Dili"
arama = input("Arama yapılacak kelimeyi giriniz: ")
sonuç = arama in metin
print("Aradığınız kelime var: " + str(sonuç))
```
##Metin Katarının Uzunluğu
Bir metin katarının kaç karakter içerdiğini saymak için len() yerleşik fonksiyonu kullanılır.
**Örnek Uygulama**
```
metin = "Python programlama dili"
print(len(metin)) # Çıktı: 23
```
```
# Örnek uygulamayı çalıştır
metin = "Python programlama dili"
print(len(metin))
# 1-Bir girdiye > klavyeden
# 2-Klavyeden girilen bilginin uzunluğunu hesapla
# 3-Uzunluğu limit ile karşılaştır
# 4-Sonucu ekrana yaz
#Klavyeden girilen metnin 20 karakterden küçük ise false mesajı veren kod.
girilen = input("Metin giriniz: ")
print(girilen)
girilenKarakter = len(girilen)
print(girilenKarakter)
kontrol = girilenKarakter > 10
print(kontrol)
```
**Görev:** Metin katarındaki cümlenin ilk yarısını ekrana yazdırın! Yazılan kod cümlenin uzunluğundan bağımsız olarak cümleyi ikiye bölmelidir.
```
metin = "Python programlama dili, dünyada eğitim amacıyla en çok kullanılan programlama dillerinin başında gelir."
print(metin[:52])
#yarısı = len(metin)/2
#print(yarısı)
# Çıktı: Python programlama dili, dünyada eğitim amacıyla en
```
## Özel Karakterlerden Kaçma
Metin katarları içerisinde tek ve çift tırnak kullanımı kimi zaman sorunlara yol açmaktadır. Bu karakterin metin katarları içerisinde kullanılabilmesi için "Ters Eğik Çizgi" ile birlikte kullanılırlar.
Örneğin: 'Önümüzdeki ay "Ankara'da Python Eğitimi" gerçekleştirilecek' cümlesindeki tek tırnak kullanımı soruna yol açacağından 'Önümüzdeki ay "Ankara\'da Python Eğitimi" gerçekleştirilecek' şeklinde kullanılmalıdır.
**İpucu:** Tek tırnaklı metin katarlarından kaçmak için çift tırnak ya da tam tersi kullanılabilir.
**Örnek Uygulama**
```
metin = 'Önümüzdeki ay "Ankara\'da Python Eğitimi" gerçekleştirilecektir.'
print(metin) #Çıktı: Önümüzdeki ay "Ankara'da Python Eğitimi" gerçekleştirilecektir.
metin = 'Önümüzdeki ay "Ankara'da Python Eğitimi" gerçekleştirilecektir.'
print(metin) # Çıktı: Geçersiz söz dizimi hatası dönecektir.
```
```
# Örnek uygulamayı çalıştır
metin = 'Önümüzdeki ay "Ankara\'da Python Eğitimi" gerçekleştirilecektir.'
print(metin)
# Örnek uygulamadaki hatayı gözlemle
metin = 'Önümüzdeki ay "Ankara'da Python Eğitimi" gerçekleştirilecektir.'
print(metin)
```
**Görev:** Metin katarındaki cümlede yer alan noktalama işaretlerinden uygun şekilde kaçarak cümleyi ekrana yazdırın!
```
metin = "Bilimsel çalışmalarda 'Python' kullanımı Türkiye'de çok yaygınlaştı!"
print(metin)
```
##Basit Metin Katarı Metodları
Python içinde birçok yerleşik metin katarı fonksiyonu vardır. En çok kullanılan fonksiyonlardan bazıları olarak;
* tüm harfleri büyük harfe dönüştüren *upper()*,
* tüm harfleri küçük harfe dönüştüren *lower()*,
* sadece cümlenin ilk harfini büyük hale getiren *capitalize()* sayılabilir.
**İpucu:** Python'daki yerleşik fonksiyonların bir listesini görüntüleyebilmek için metin katarından sonra bir nokta (.) koyulur ve uygun olan fonksiyonlar arayüz tarafından otomatik olarak listelenir. Bu yardımcı işlevi tetiklemek için CTRL + Bolşuk tuş kombinasyonu da kullanılabilir.
**Örnek Uygulama**
```
metin = "Python Programlama Dili"
print(metin.lower()) # Çıktı: python programlama dili
print(metin.upper()) # Çıktı: PYTHON PROGRAMLAMA DILI
print(metin.capitalize()) # Çıktı: Python programlama dili
```
```
# Örnek uygulamayı çalıştır
metin = "Python Programlama Dili"
print(metin.lower())
print(metin.upper())
print(metin.capitalize())
```
**Görev:** *anahtarKelime* ve *arananKelime* değişkenlerinde yer alan metinler karşılaştırıldığında birbirlerine eşit (==) olmalarını sağlayın ve dönen değerin "True" olmasını sağlayın!
```
anahtarKelime = "Makine Öğrenmesi"
arananKelime = "makine öğrenmesi"
print(anahtarKelime.lower() == arananKelime) # Çıktı: True
print(anahtarKelime.lower())
```
##Metin Katarı Biçimlendirme
Bir metin katarından sonraki % operatörü, bir metin katarını değişkenlerle birleştirmek için kullanılır. % operatörü, bir metin katarıdanki % s öğesini, arkasından gelen değişkenle değiştirir. % d sembolü ise, sayısal veya ondalık değerler için yer tutucu olarak kullanılır.
**Örnek Uygulama**
```
adsoyad = "Orçun Madran"
dogumTarihi = 1976
print("Merhaba, ben %s!" % adsoyad) # Çıktı: Merhaba, ben Orçun Madran!
print("Ben %d doğumluyum" % dogumTarihi) # Ben 1976 doğumluyum.
ad = "Orçun"
soyad = "Madran"
print("Merhaba, ben %s %s!" % (ad, soyad)) # Çıktı: Merhaba, ben Orçun Madran!
```
```
# Örnek uygulamayı çalıştır
adsoyad = "Orçun Madran"
dogumTarihi = 1976
print("Merhaba, ben %s!" % adsoyad)
print("Ben %d doğumluyum" % dogumTarihi)
# Örnek uygulamayı çalıştır
ad = "Orçun"
soyad = "Madran"
print("Merhaba, ben %s %s!" % (ad, soyad))
```
**Görev:** "Merhaba Orçun Madran, bu dönemki dersiniz 'Programlama Dilleri'. Başarılar!" cümlesini ekrana biçimlendirmeyi kullanarak (artı işaretini kullanmadan) yazdırın!
```
ad = "Orçun"
soyad = "Madran"
ders = "Programlama Dilleri"
print("Merhaba ben %s %s, bu dönemki dersiniz '%s'.Başarılar!" % (ad, soyad, ders))
# Çıktı: Merhaba Orçun Madran, bu dönemki dersiniz "Programlama Dilleri". Başarılar!
```
21 ŞUBAT 2022 PAZARTESİ (Buraya kadar geldik.)
---
```
ad = "Yaren"
soyad = "Gozutok"
ders = "Python"
print("Merhaba ben %s %s, bu dönemki dersim '%s' . Başarılı olacağım!" % (ad, soyad, ders))
```
# Bölüm 04: Veri Yapılar
Bu bölümde:
* Listeler,
* Liste işlemleri,
* Liste öğeleri,
* Demetler (Tuples),
* Sözlükler,
* Sözlük değerleri ve anahtarları,
* In anahtar kelimesinin kullanımı yer almaktadır.
## Listeler
Liste, birden fazla değeri tek bir değişken adı altında saklamak için kullanabileceğiniz bir veri yapısıdır. Bir liste köşeli parantez arasında virgülle ayrılmış değerler dizisi olarak yazılır. Ör: liste = [deger1, deger2].
Listeler farklı türden öğeler içerebilir, ancak genellikle listedeki tüm öğeler aynı türdedir. Metin katarları gibi listeler de dizine eklenebilir ve dilimlenebilir. (Bkz. Bölüm 3).
**Örnek Uygulama**
```
acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] # acikListe adında yeni bir liste oluşturur
print(acikListe) # Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak']
```
```
# Örnek uygulamayı çalıştır
acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"]
print(acikListe)
```
**Görev 1:** acikListe içinde yer alan 3. liste öğesini ekrana yazıdırın!
```
acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"]
print(acikListe[2])
```
**Görev 2:** acikListe içinde yer alan 4. ve 5. liste öğesini ekrana yazıdırın!
```
acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"]
print(acikListe[3:5])
```
## Liste İşlemleri
append() fonksiyonunu kullanarak ya da artırılmış atama operatörü ( += ) yardımıyla listenin sonuna yeni öğeler (değerler) eklenebilir. Listelerin içindeki öğeler güncellenebilir, yani liste[indeksNo] = yeni_deger kullanarak içeriklerini değiştirmek mümkündür.
**Örnek Uygulama**
```
acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] # acikListe adında yeni bir liste oluşturur
print(acikListe) # Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak']
acikListe += ["Açık Donanım", "Açık İnovasyon"] # listeye iki yeni öğe ekler
print(acikListe) # Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak', 'Açık Donanım', 'Açık İnovasyon']
acikListe.append("Açık Veri Gazeteciliği") # listeye yeni bir öğe ekler
print(acikListe) # Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak', 'Açık Donanım', 'Açık İnovasyon', 'Açık Veri Gazeteciliği']
acikListe[4] = "Açık Kaynak Kod" # listenin 5. öğesini değiştirir
print(acikListe) # Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak Kod', 'Açık Donanım', 'Açık İnovasyon', 'Açık Veri Gazeteciliği']
```
```
# Örnek uygulamayı çalıştır
acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"]
print(acikListe)
acikListe += ["Açık Donanım", "Açık İnovasyon"]
print(acikListe)
acikListe.append("Açık Veri Gazeteciliği")
print(acikListe)
acikListe[4] = "Açık Kaynak Kod"
print(acikListe)
#Arkadaş Listesi
Liste = ["Yaren"]
print(Liste)
yeni = input("Arkadaşının adı: ")
Liste.append(yeni)
yeni = input("Arkadaşının adı: ")
Liste.append(yeni)
print(Liste)
#Arkadaş Listesi
liste = ["Yaren Gozutok"]
ad = input("Adınızı giriniz: ")
soyad = input("Soyadınızı giriniz: ")
adsoyad = ad + " " + soyad
liste.append(adsoyad)
print(liste)
liste[0]= "Yeni Arkadaş"
print(liste)
```
**Görev:** bilgiBilim adlı bir liste oluşturun. Bu listeye bilgi bilim disiplini ile ilgili 3 adet anahtar kelime ya da kavram ekleyin. Bu listeyi ekrana yazdırın. Listeye istediğiniz bir yöntem ile (append(), +=) 2 yeni öğe ekleyin. Ekrana listenin son durumunu yazdırın. Listenizdeki son öğeyi değiştirin. Listenin son halini ekrana yazıdırn.
```
#bilgiBilim
liste = ["Açık Erişim", "Açık Kaynak", "Açık Veri"]
print(liste)
yeni = input("Eklemek istediğiniz kelimeyi girin: ")
liste.append(yeni)
yeni = input("Eklemek istediğiniz kelimeyi girin: ")
liste.append(yeni)
print(liste)
```
## Liste Öğeleri
Liste öğelerini dilimleme (slice) yaparak da atamak mümkündür. Bu bir listenin boyutunu değiştirebilir veya listeyi tamamen temizleyebilir.
**Örnek Uygulama**
```
acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"] # acikListe adında yeni bir liste oluşturur
print(acikListe) # Çıktı: ['Açık Bilim', 'Açık Erişim', 'Açık Veri', 'Açık Eğitim', 'Açık Kaynak']
acikListe[2:4] = ["Açık İnovasyon"] # "Açık Veri" ve "Açık Eğitim" öğelerinin yerine tek bir öğe ekler
print(acikListe) #Çıktı: ["Açık Bilim", "Açık Erişim", "Açık İnovasyon", "Açık Kaynak"]
acikListe[:2] = [] # listenin ilk iki öğesini siler
print(acikListe) #Çıktı: ["Açık İnovasyon", "Açık Kaynak"]
acikListe[:] = [] # listeyi temizler
print(acikListe) #Çıktı: []
```
```
# Örnek uygulamayı çalıştır
acikListe = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Eğitim", "Açık Kaynak"]
print(acikListe)
acikListe[2:4] = ["Açık İnovasyon"]
print(acikListe)
acikListe[:2] = []
print(acikListe)
acikListe[:] = []
print(acikListe)
```
**Görev:** Önceki görevde oluşturulan "bilgiBilim" adlı listenin istediğiniz öğesini silerek listenin güncel halini ekrana yazdırın. Listeyi tamamen temizleyerek listenin güncel halini ekrana yazdırın.
```
#bilgiBilim
liste = ["Açık Erişim", "Açık Kaynak", "Açık Veri", "Açık Lisans", "Kamu Malı"]
print(liste)
liste [2:4] = ["Açık İnovasyon"]
print(liste)
liste [:2] = []
print(liste)
liste[:] = []
print(liste)
```
## Demetler (Tuples)
Demetler neredeyse listelerle aynı. Demetler ve listeler arasındaki tek önemli fark, demetlerin değiştirilememesidir. Demetlere öğe eklenmez, öğe değiştirilmez veya demetlerden öğe silinemez. Demetler, parantez içine alınmış bir virgül operatörü tarafından oluşturulur. Ör: demet = ("deger1", "deger2", "deger3"). Tek bir öğe demetinde ("d",) gibi bir virgül olmalıdır.
**Örnek Uygulama**
```
ulkeKodlari = ("TR", "US", "EN", "JP")
print(ulkeKodlari) # Çıktı: ('TR', 'US', 'EN', 'JP')
```
```
# Örnek uygulamayı çalıştır
ulkeKodlari = ("TR", "US", "EN", "JP")
print(ulkeKodlari)
```
**Görev:** Kongre Kütüphanesi konu başlıkları listesinin kodlarından oluşan bir demet oluşturun ve ekrana yazdırın! Oluşturulan demet içindeki tek bir öğeyi ekrana yazdırın!
```
#konuBasliklari
baslıkkodları = ("CB", "CC", "CT")
print(baslıkkodları)
print(baslıkkodları[2])
```
## Sözlükler
Sözlük, listeye benzer, ancak sözlük içindeki değerlere indeks numarası yerine bir anahtara ile erişilebilir. Bir anahtar herhangi bir metin katarı veya rakam olabilir. Sözlükler ayraç içine alınır. Ör: sozluk = {'anahtar1': "değer1", 'anahtar2': "değer2"}.
**Örnek Uygulama**
```
adresDefteri = {"Hacettepe Üniversitesi": "hacettepe.edu.tr", "ODTÜ": "odtu.edu.tr", "Bilkent Üniversitesi": "bilkent.edu.tr"} # yeni bir sözlük oluşturur
print(adresDefteri) # Çıktı: {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr'}
adresDefteri["Ankara Üniversitesi"] = "ankara.edu.tr" #sözlüğe yeni bir öğe ekler
print(adresDefteri) # Çıktı: {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr', 'Ankara Üniversitesi': 'ankara.edu.tr'}
del adresDefteri ["Ankara Üniversitesi"] #sözlükten belirtilen öğeyi siler
print(adresDefteri) # Çıktı: {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr'}
```
```
# Örnek uygulamayı çalıştır
adresDefteri = {"Hacettepe Üniversitesi": "hacettepe.edu.tr", "ODTÜ": "odtu.edu.tr", "Bilkent Üniversitesi": "bilkent.edu.tr"}
print(adresDefteri)
adresDefteri["Ankara Üniversitesi"] = "ankara.edu.tr"
print(adresDefteri)
del adresDefteri ["Ankara Üniversitesi"]
print(adresDefteri)
```
**Görev:** İstediğin herhangi bir konuda 5 öğeye sahip bir sözlük oluştur. Sözlüğü ekrana yazdır. Sözlükteki belirli bir öğeyi ekrana yazdır. Sözlükteki belirli bir öğeyi silerek sözlüğün güncel halini ekrana yazdır!
```
#Bilim Sözlüğü
sozluk = {"Açık Erişim": "Kamu Kaynakları..." , "Açık Veri": "Açık olarak..."}
print(sozluk)
sozluk["Açık İnovasyon"] = "Aİ......."
print(sozluk)
del sozluk["Açık Erişim"]
print(sozluk)
sozluk["Açık İnovasyon"] = "Aİ22......."
print(sozluk)
print(sozluk["Açık Veri"])
```
## Sözlük Değerleri ve Anahtarları
Sözlüklerde values() ve keys() gibi birçok yararlı fonksiyon vardır. Bir sozlük adı ve ardından noktadan sonra çıkan listeyi kullanarak geri kalan fonksiyolar incelenebilir.
**Örnek Uygulama**
```
adresDefteri = {"Hacettepe Üniversitesi": "hacettepe.edu.tr", "ODTÜ": "odtu.edu.tr", "Bilkent Üniversitesi": "bilkent.edu.tr"} # yeni bir sözlük oluşturur
print(adresDefteri) # Çıktı: {'Hacettepe Üniversitesi': 'hacettepe.edu.tr', 'ODTÜ': 'odtu.edu.tr', 'Bilkent Üniversitesi': 'bilkent.edu.tr'}
print(adresDefteri.values()) # Çıktı: dict_values(['hacettepe.edu.tr', 'odtu.edu.tr', 'bilkent.edu.tr'])
print(adresDefteri.keys()) # Çıktı: dict_keys(['Hacettepe Üniversitesi', 'ODTÜ', 'Bilkent Üniversitesi'])
```
```
# Örnek uygulamayı çalıştır
adresDefteri = {"Hacettepe Üniversitesi": "hacettepe.edu.tr", "ODTÜ": "odtu.edu.tr", "Bilkent Üniversitesi": "bilkent.edu.tr"}
print(adresDefteri)
print(adresDefteri.values())
print(adresDefteri.keys())
```
**Görev:** İstediğin bir konuda istediğin öğe saysına sahip bir sözlük oluştur. Sözlükler ile ilgili farklı fonksiyoları dene. Sonuçları ekrana yazdır!
```
#yeniSozluk
sozluk = {"Açık Erişim" : "Kamu kaynakları...", "Açık Veri": "Açık verilere erişim..."}
print(sozluk)
print(sozluk.values())
print(sozluk.keys())
```
##In Anahtar Kelimesi
"In" anahtar sözcüğü, bir listenin veya sözlüğün belirli bir öğe içerip içermediğini kontrol etmek için kullanılır. Daha önce metin katarlarındaki kullanıma benzer bir kullanımı vardır. "In" anahtar sözcüğü ile öğe kontrolü yapıldıktan sonra sonuç, öğe listede ya da sözlükte yer alıyorsa *True* yer almıyorsa *False* olarak geri döner.
**Dikkat**: Aranan öğe ile liste ya da sözlük içinde yer alan öğelerin karşılaştırılması sırasında büyük-küçük harf duyarlılığı bulunmaktadır. Ör: "Bilgi" ve "bilgi" iki farklı öğe olarak değerlendirilir.
**Örnek Uygulama**
```
bilgiKavramları = ["indeks", "erişim", "koleksiyon"] # yeni bir liste oluşturur
print("Erişim" in bilgiKavramları) # Çıktı: False
bilgiSozlugu = {"indeks": "index", "erişim": "access", "koleksiyon": "collection"} # yeni bir sozluk oluşturur
print("koleksiyon" in bilgiSozlugu.keys()) # çıktı: True
```
```
# Örnek uygulamayı çalıştır
bilgiKavramları = ["indeks", "erişim", "koleksiyon"]
print("Erişim" in bilgiKavramları)
bilgiSozlugu = {"indeks": "index", "erişim": "access", "koleksiyon": "collection"}
print("koleksiyon" in bilgiSozlugu.keys())
```
**Görev:** Bir liste ve bir sözlük oluşturun. Liste içinde istediğiniz kelimeyi aratın ve sonucunu ekrana yazdırın! Oluşturduğunuz sözlüğün içinde hem anahtar kelime (keys()) hem de değer (values()) kontrolü yaptırın ve sonucunu ekrana yazdırın!
```
#yeniListe
dersler = ["Bilgi Erişim", "Bilgi Hizmetleri", "Bilginin Düzenlenmesi"]
print("Bilgi Hizmetleri" in dersler)
#yeniSozluk
derssozlugu = {"Bilgi Erişim":"Bilgiye kolay eriştirme...", "Bilginin Düzenlenmesi": "AACR ve Marc..."}
print(derssozlugu.values())
print(derssozlugu.keys())
```
#Bölüm 05: Koşullu İfadeler
Bu bölümde:
* Mantıksal operatörler,
* If cümleciği,
* Else ve elif kullanımı yer almatadır.
##Mantıksal Operatörler
Mantıksal operatörler ifadeleri karşılaştırır ve sonuçları *True* ya da *False* değerleriyle döndürür. Python'da üç tane mantıksal operatör bulunur:
1. "and" operatörü: Her iki yanındaki ifadeler doğru olduğunda *True* değerini döndürür.
2. "or" operatörü: Her iki tarafındaki ifadelerden en az bir ifade doğru olduğunda "True" değerini döndürür.
3. "not" operatörü: İfadenin tam tersi olarak değerlendirilmesini sağlar.
**Örnek Uygulama**
```
kullaniciAdi = "orcunmadran"
sifre = 123456
print(kullaniciAdi == "orcunmadran" and sifre == 123456) # Çıktı: True
kullaniciAdi = "orcunmadran"
sifre = 123456
print(kullaniciAdi == "orcunmadran" and not sifre == 123456) # Çıktı: False
cepTel = "05321234567"
ePosta = "orcunmadran@gmail.com"
print(cepTel == "" or ePosta == "orcunmadran@gmail.com" ) # Çıktı: True
```
```
# Örnek uygulamayı çalıştır
kullaniciAdi = "orcunmadran"
sifre = 123456
print(kullaniciAdi == "orcunmadran" and sifre == 123456)
kullaniciAdi = "orcunmadran"
sifre = 123456
print(kullaniciAdi == "orcunmadran" and not sifre == 123456)
cepTel = "05321234567"
ePosta = "orcunmadran@gmail.com"
print(cepTel == "" or ePosta == "orcunmadran@gmail.com" )
```
**Görev:** Klavyeden girilen kullanıcı adı ve şifrenin kayıtlı bulunan kullanıcı adı ve şifre ile uyuşup uyuşmadığını kontrol edin ve sonucu ekrana yazdırın!
```
#Sistemde yer alan bilgiler:
sisKulAdi = "yonetici"
sisKulSifre = "bby162"
#Klavyeden girilen bilgiler:
girKulAdi = input("Kullanıcı Adı: ")
girKulSifre = input("Şifre: ")
#Kontrol
sonuc = sisKulAdi == girKulAdi and sisKulSifre == girKulSifre
#Sonuç
print(sonuc)
kuladı = "yaren"
kulsifre = "12345"
girkuladı = input("Kullanıcı Adı: ")
girkulsifre = input("Şifre: ")
sonuc = kuladı == girkuladı and kulsifre == girkulsifre
print(sonuc)
```
Birden fazla koşulu and ile birleştirebiliyoruz
28 ŞUBAT 2022 PAZARTESİ (Buraya kadar geldik.)
## If Cümleciği
"If" anahtar sözcüğü, verilen ifadenin doğru olup olmadığını kontrol ettikten sonra belirtilen kodu çalıştıran bir koşullu ifade oluşturmak için kullanılır. Python'da kod bloklarının tanımlanması için girinti kullanır.
and ve or ile farklı kurgulanabiliyor.
**Örnek Uygulama**
```
acikKavramlar = ["bilim", "erişim", "veri", "eğitim"]
kavram = input("Bir açık kavramı yazın: ")
if kavram in acikKavramlar:
print(kavram + " açık kavramlar listesinde yer alıyor!")
```
: ile if cümleciği kapatılıyor.
kendine ait olan alt satırların devreye girip girmemesini kontrol ediyor.
```
#Örnek derste
deger= 1
deger2= 2
if deger == deger2:
print("birbirine eşit")
if deger != deger2:
print("birbirine eşit değil")
#kendi örneğim
deger = 1453
deger2 = 1071
if deger == deger2:
print("birbirine eşit")
if deger != deger2:
print("birbirine eşit değil")
# Örnek uygulamayı çalıştır
acikKavramlar = ["bilim", "erişim", "veri", "eğitim"]
kavram = input("Bir açık kavramı yazın: ")
if kavram in acikKavramlar:
print(kavram + " açık kavramlar listesinde yer alıyor!")
```
**Görev:** "acikSozluk" içinde yer alan anahtarları (keys) kullanarak eğer klavyeden girilen anahtar kelime sözlükte varsa açıklamasını ekrana yazdırın!
```
acikSozluk = {
"Açık Bilim" : "Bilimsel bilgi kamu malıdır. Bilimsel yayınlara ve verilere açık erişim bir haktır." ,
"Açık Erişim" : "Kamu kaynakları ile yapılan araştırmalar sonucunda üretilen yayınlara ücretsiz erişim" ,
"Açık Veri" : "Kamu kaynakları ile yapılan araştırma sonucunda üretilen verilere ücretsiz ve yeniden kullanılabilir biçimde erişim"
}
anahtar = input("Anahtar Kelime: ")
if anahtar in acikSozluk:
print(anahtar + " Açık sözlükte yer alıyor!")
#If
```
## Else ve Elif Kullanımı
"If" cümleciği içinde ikinci bir ifadenin doğruluğunun kontrolü için "Elif" ifadesi kullanılır. Doğruluğu sorgulanan ifadelerden hiçbiri *True* döndürmediği zaman çalışacak olan kod bloğu "Else" altında yer alan kod bloğudur.
**Örnek Uygulama**
```
gunler = ["Pazartesi", "Çarşamba", "Cuma"]
girilen = input("Gün giriniz: ")
if girilen == gunler[0]:
print("Programlama Dilleri")
elif girilen == gunler[1]:
print("Kataloglama")
elif girilen == gunler[2]:
print("Bilimsel İletişim")
else :
print("Kayıtlı bir gün bilgisi girmediniz!")
```
```
# Örnek uygulamayı çalıştır
gunler = ["Pazartesi", "Çarşamba", "Cuma"]
girilen = input("Gün giriniz: ")
if girilen == gunler[0]:
print("Programlama Dilleri")
elif girilen == gunler[1]:
print("Kataloglama")
elif girilen == gunler[2]:
print("Bilimsel İletişim")
else :
print("Kayıtlı bir gün bilgisi girmediniz!")
gunler = ["Pazartesi", "Salı", "Çarşamba", "Perşembe"]
girilen = input("Gün giriniz: ")
if girilen == gunler[0]:
print("Programlama Dilleri")
elif girilen == gunler[1]:
print("Türk Dili")
elif girilen == gunler[2]:
print("Bilimsel İletişim ve Bilgi Erişim")
elif girilen == gunler[3]:
print("Bilginin Düzenlenmesi")
else :
print("Kayıtlı bir gün girmediniz! ")
```
Elif birden fazla durum kontrol etmek için kullanılıyor.
**Görev:** Klavyeden girilen yaş bilgisini kullanarak ekrana aşağıdaki mesajları yazdır:
* 21 yaş altı ve 64 yaş üstü kişilere: "Sokağa çıkma yasağı bulunmaktadır!"
* Diğer tüm kişilere: "Sokağa çıkma yasağı yoktur!"
* Klavyeden yaş harici bir bilgi girişi yapıldığında: "Yaşınızı rakam olarak giriniz!"
```
yas = int(input("Yaşınızı giriniz: "))
if yas < 21:
print("Sokağa çıkma yasağı bulunmaktadır!")
elif yas > 64:
print("Sokağa çıkma yasağı bulunmaktadır!")
else:
print("Sokağa çıkma yasağı yoktur!")
```
7 MART PAZARTESİ (Buraya kadar geldik.)
# Bölüm 06: Döngüler
Bu bölümde:
* for döngüsü,
* Metin katarlarında for döngüsü kullanımı,
* while döngüsü,
* break anahtar kelimesi,
* continue anahtar kelimesi yer almaktadır.
## for Döngüsü
for döngüleri belirli komut satırını ya da satırlarını yinelemek (tekrar etmek) için kullanılır. Her yinelemede, for döngüsünde tanımlanan değişken listedeki bir sonraki değere otomatik olarak atanacaktır.
**Örnek Uygulama**
```
for i in range(5): # i değerine 0-4 arası indeks değerleri otomatik olarak atanır
print(i) # Çıktı: Bu komut satırı toplam 5 kere tekrarlanır ve her satırda yeni i değeri yazdırılır
konular = ["Açık Bilim", "Açık Erişim", "Açık Veri"] # yeni bir liste oluşturur
for konu in konular:
print(konu) #Çıktı: Her bir liste öğesi alt alta satırlara yazdırılır
```
```
# Örnek uygulmayı çalıştır
for i in range(5):
print(i+1) #Sıfırı ekranda görmemek için +1 ekledik.
#Ders örneği
liste = []
for i in range(3):
veri = input("Giriş yap: ")
liste.append(veri)
print(liste)
!range for sonraası indeks numarası atıyor
range kullanmadığımızda listenin elemanlarını ekliyor
# Örnek uygulmayı çalıştır
konular = ["Açık Bilim", "Açık Erişim", "Açık Veri", "Açık Donanım"]
for konu in konular:
print(konu)
```
! Liste içindeki eleman sayısı kadar otomatik for döngüsü yapabilir.
**Görev:** Bir liste oluşturun. Liste öğelerini "for" döngüsü kullanarak ekrana yazdırın!
```
#liste
liste = ["elma", "armut", "kivi", "muz"]
for yazdır in liste:
print(yazdır)
```
## Metin Katarlarında for Döngüsü Kullanımı
Metin Katarları üzerinde gerçekleştirilebilecek işlemler Python'daki listelerle büyük benzerlik taşırlar. Metin Katarını oluşturan öğeler (harfler) liste elemanları gibi "for" döngüsü yardımıyla ekrana yazdırılabilir.
**Örnek Uygulama**
```
cumle = "Bisiklet hem zihni hem bedeni dinç tutar!"
for harf in cumle: # Cümledeki her bir harfi ekrana satır satır yazdırır
print(harf)
```
```
# Örnek uygulamayı çalıştır
cumle = "Bisiklet hem zihni, hem bedeni dinç tutar!"
for harf in cumle:
print(harf)
```
**Görev:** İçinde metin katarı bulunan bir değişken oluşturun. Bu değişkende yer alan her bir harfi bir satıra gelecek şekilde "for" döngüsü ile ekrana yazdırın!
```
#degisken
cumle = "Benim adım Yaren"
for harf in cumle:
print(harf)
```
## while Döngüsü
"While" döngüsü "if" cümleciğinin ifade şekline benzer. Koşul doğruysa döngüye bağlı kod satırı ya da satırları yürütülür (çalıştırılır). Temel fark, koşul doğru (True) olduğu olduğu sürece bağlı kod satırı ya da satırları çalışmaya devam eder.
**Örnek Uygulama**
```
deger = 1
while deger <= 10:
print(deger) # Bu satır 10 kez tekrarlanacak
deger += 1 # Bu satır da 10 kez tekrarlanacak
print("Program bitti") # Bu satır sadece bir kez çalıştırılacak
```
```
# Örnek uygulamayı çalıştır
deger = 1
while deger <= 10:
print(deger)
deger += 1
print("Program bitti")
```
## break Anahtar Kelimesi
Asla bitmeyen döngüye sonsuz döngü adı verilir. Döngü koşulu daima doğru (True) olursa, böyle bir döngü sonsuz olur. "Break" anahtar kelimesi geçerli döngüden çıkmak için kullanılır.
**Örnek Uygulama**
```
sayi = 0
while True: # bu döngü sonsuz bir döngüdür
print(sayi)
sayi += 1
if sayi >= 5:
break # sayı değeri 5 olduğunda döngü otomatik olarak sonlanır
```
```
# Örnek Uygulamayı çalıştır
sayi = 0
while True:
print(sayi)
sayi += 1
if sayi >= 5:
break
```
## continue Anahtar Kelimesi
"continue" anahtar kelimesi, o anda yürütülen döngü için döngü içindeki kodun geri kalanını atlamak ve "for" veya "while" deyimine geri dönmek için kullanılır.
```
for i in range(5):
if i == 3:
continue # i değeri 3 olduğu anda altta yer alan "print" komutu atlanıyor.
print(i)
```
```
# Örnek Uygulamayı çalıştır
for i in range(5):
if i == 3:
continue
print(i)
```
Belirli bir kısmı atlamak için de kullanıyorduk.
**Görev: Tahmin Oyunu**
"while" döngüsü kullanarak bir tahmin oyunu tasarla. Bu tahmin oyununda, önceden belirlenmiş olan kelime ile klavyeden girilen kelime karşılaştırılmalı, tahmin doğru ise oyun "Bildiniz..!" mesajı ile sonlanmalı, yanlış ise tahmin hakkı bir daha verilmeli.
```
#Tahmin Oyunu
kelime = "bilgi"
tahmin = ""
print("Kelime tahmin oyununa hoş geldiniz! ")
oyuncuismi = input("İsminizi giriniz: ")
kelime = "erişim"
tahmin = input("Tahmininizi giriniz: " )
while tahmin == kelime:
print("Bildiniz")
break
else:
print("Bilemediniz")
```
# Bölüm 07: Fonksiyonlar
## Fonksiyon Tanımlama (Definition)
Fonksiyonlar, yazılan kodu faydalı bloklara bölmenin, daha okunabilir hale getirmenin ve tekrar kullanmaya yardımcı olmanın kullanışlı bir yoludur. Fonksiyonlar "def" anahtar sözcüğü ve ardından fonksiyonun adı kullanılarak tanımlanır.
**Örnek Uygulama**
```
def merhaba_dunya(): # fonksiyon tanımlama, isimlendirme
print("Merhaba Dünya!") #fonksiyona dahil kod satırları
for i in range(5):
merhaba_dunya() # fonksiyon 5 kere çağırılacak
```
```
# Örnek uygulamayı çalıştır
def merhaba_dunya(): # fonksiyon tanımlama, isimlendirme
print("Merhaba Dünya!") #fonksiyona dahil kod satırları
for i in range(5):
merhaba_dunya() # fonksiyon 5 kere çağırılacak
```
##Fonksiyolarda Parametre Kullanımı
Fonksiyon parametreleri, fonksiyon adından sonra parantez () içinde tanımlanır. Parametre, iletilen bağımsız değişken için değişken adı görevi görür.
**Örnek Uygulama**
```
def foo(x): # x bir fonksiyon parametresidir
print("x = " + str(x))
foo(5) # 5 değeri fonksiyona iletilir ve değer olarak kullanılır.
```
```
# Örnek uygulamayı çalıştır
def foo(x):
print("x = " + str(x))
foo(5)
```
**Görev:** *karsila* fonksiyonunun tetiklenmesi için gerekli kod ve parametleri ekle!
```
def karsila(kAd, kSoyad):
print("Hoşgeldin, %s %s" % (kAd, kSoyad))
```
##Return Değeri
Fonksiyonlar, "return" anahtar sözcüğünü kullanarak fonksiyon sonucunda bir değer döndürebilir. Döndürülen değer bir değişkene atanabilir veya sadece örneğin değeri yazdırmak için kullanılabilir.
**Örnek Uygulama**
```
def iki_sayi_topla(a, b):
return a + b # hesaplama işleminin sonucu değer olarak döndürülüyor
print(iki_sayi_topla(3, 12)) # ekrana işlem sonucu yazdırılacak
```
```
# Örnek uygulamayı çalıştır
def iki_sayi_topla(a, b):
return a + b
print(iki_sayi_topla(3, 12))
```
##Varsayılan Parametreler
Bazen bir veya daha fazla fonksiyon parametresi için varsayılan bir değer belirtmek yararlı olabilir. Bu, ihtiyaç duyulan parametrelerden daha az argümanla çağrılabilen bir fonksiyon oluşturur.
**Örnek Uygulama**
```
def iki_sayi_carp(a, b=2):
return a * b
print(iki_sayi_carp(3, 47)) # verilen iki degeri de kullanır
print(iki_sayi_carp(3)) # verilmeyen 2. değer yerine varsayılanı kullanır
```
```
# Örnek uygulamayı çalıştır
def iki_sayi_carp(a, b=2):
return a * b
print(iki_sayi_carp(3, 47))
print(iki_sayi_carp(3))
```
**Örnek Uygulama: Sayısal Loto**
Aşağıda temel yapısı aynı olan iki *sayısal loto* uygulaması bulunmaktadır: Fonksiyonsuz ve fonksiyonlu.
İlk sayısal loto uygulamasında herhangi bir fonksiyon kullanımı yoktur. Her satırda 1-49 arası 6 adet sayının yer aldığı 6 satır oluşturur.
İkinci sayısal loto uygulamsında ise *tahminEt* isimli bir fonksiyon yer almaktadır. Bu fonksiyon varsayılan parametrelere sahiptir ve bu parametreler fonksiyon çağırılırken değiştirilebilir. Böylece ilk uygulamadan çok daha geniş seçenekler sunabilir bir hale gelmiştir.
```
#Sayısal Loto örnek uygulama (fonksiyonsuz)
from random import randint
i = 0
secilenler = [0,0,0,0,0,0]
for rastgele in secilenler:
while i < len(secilenler):
secilen = randint(1, 49)
if secilen not in secilenler:
secilenler[i] = secilen
i+=1
print(sorted(secilenler))
i=0
#Sayısal Loto örnek uygulama (fonksiyonlu)
from random import randint
def tahminEt(rakam=6, satir=6, baslangic=1, bitis=49):
i = 0
secilenler = []
for liste in range(rakam):
secilenler.append(0)
for olustur in range(satir):
while i < len(secilenler):
secilen = randint(baslangic, bitis)
if secilen not in secilenler:
secilenler[i] = secilen
i+=1
print(sorted(secilenler))
i=0
tahminEt(10,6,1,60)
```
**Görev:** Bu görev genel olarak fonksiyon bölümünü kapsamaktadır.
Daha önce yapmış olduğunuz "Adam Asmaca" projesini (ya da aşağıda yer alan örneği) fonksiyonlar kullanarak oyun bittiğinde tekrar başlatmaya gerek duyulmadan yeniden oynanabilmesine imkan sağlayacak şekilde yeniden kurgulayın.
Oyunun farklı sekansları için farklı fonksiyonlar tanımlayarak oyunu daha optimize hale getirmeye çalışın.
Aşağıda bir adam asmaca oyununun temel özellikerine sahip bir örnek yer almaktadır.
```
#Fonksiyonsuz Adam Asmaca
from random import choice
adamCan = 3
kelimeler = ["bisiklet", "triatlon", "yüzme", "koşu"]
secilenKelime = choice(kelimeler)
print(secilenKelime)
dizilenKelime = []
for diz in secilenKelime:
dizilenKelime.append("_")
print(dizilenKelime)
while adamCan > 0:
girilenHarf = input("Bir harf giriniz: ")
canKontrol = girilenHarf in secilenKelime
if canKontrol == False:
adamCan-=1
i = 0
for kontrol in secilenKelime:
if secilenKelime[i] == girilenHarf:
dizilenKelime[i] = girilenHarf
i+=1
print(dizilenKelime)
print("Kalan can: "+ str(adamCan))
#Fonksiyonlu Adam Asmaca
```
# Bölüm 08: Sınıflar ve Nesneler
Bu bölümde:
* Sınıf ve nesne tanımlama,
* Değişkenlere erişim,
* self parametresi,
* init metodu yer almaktadır.
## Sınıf ve Nesne Tanımlama
Bir nesne değişkenleri ve fonksiyonları tek bir varlıkta birleştirir. Nesneler değişkenlerini ve fonksiyonlarını sınıflardan alır. Sınıflar bir anlamda nesnelerinizi oluşturmak için kullanılan şablonlardır. Bir nesneyi, fonksiyonların yanı sıra veri içeren tek bir veri yapısı olarak düşünebilirsiniz. Nesnelerin fonksiyonlarına yöntem (metod) denir.
**İpucu:** Sınıf isimlerinin baş harfi büyük yazılarak Python içindeki diğer öğelerden (değişken, fonksiyon vb.) daha rahat ayırt edilmeleri sağlanır.
**Örnek Uygulama**
```
class BenimSinifim: # yeni bir sınıfın tanımlanması
bsDegisken = 4 # sınıf içinde yer alan bir değişken
def bsFonksiyon(self): #sınıf içinde yer alan bir fonksiyon
print("Benim sınıfımın fonksiyonundan Merhaba!")
benimNesnem = BenimSinifim()
```
##Değişkenlere ve Fonksiyonlara Erişim
Sınıftan örneklenen bir nesnenin içindeki bir değişkene ya da fonksiyona erişmek için öncelikle nesnenin adı daha sonra ise değişkenin ya da fonkiyonun adı çağırılmalıdır (Ör: nesneAdi.degiskenAdi). Bir sınıfın farklı örnekleri (nesneleri) içinde tanımlanan değişkenlerin değerleri değiştirebilir.
**Örnek Uygulama 1**
```
class BenimSinifim: # yeni bir sınıf oluşturur
bsDegisken = 3 # sınıfın içinde bir değişken tanımlar
def bsFonksiyon(self): #sınıfın içinde bir fonksiyon tanımlar
print("Benim sınıfımın fonksiyonundan Merhaba!")
benimNesnem = BenimSinifim() #sınıftan yeni bir nesne oluşturur
for i in range(benimNesnem.bsDegisken): # oluşturulan nesne üzerinden değişkene ve fonksiyona ulaşılır
benimNesnem.bsFonksiyon()
benimNesnem.bsDegisken = 5 # sınıfın içinde tanımlanan değişkene yeni değer atanması
for i in range(benimNesnem.bsDegisken):
benimNesnem.bsFonksiyon()
```
```
# Örnek uygulama 1'i gözlemleyelim
class BenimSinifim:
bsDegisken = 3
def bsFonksiyon(self):
print("Benim sınıfımın fonksiyonundan Merhaba!")
benimNesnem = BenimSinifim()
for i in range(benimNesnem.bsDegisken):
benimNesnem.bsFonksiyon()
benimNesnem.bsDegisken = 5
for i in range(benimNesnem.bsDegisken):
benimNesnem.bsFonksiyon()
```
Programı yaz belirli bölümlerini tekrar lkullanma ihtiyacı sınıf. (Büyük parça)
**Örnek Uygulama 2**
```
class Bisiklet:
renk = "Kırmızı"
vites = 1
def ozellikler(self):
ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites)
return ozellikDetay
bisiklet1 = Bisiklet()
bisiklet2 = Bisiklet()
print("Bisiklet 1: " + bisiklet1.ozellikler())
bisiklet2.renk = "Sarı"
bisiklet2.vites = 22
print("Bisiklet 2: " + bisiklet2.ozellikler())
```
```
# Örnek uygulama 2'i gözlemleyelim
class Bisiklet:
renk = "Kırmızı"
vites = 1
def ozellikler(self):
ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites)
return ozellikDetay
bisiklet1 = Bisiklet()
bisiklet2 = Bisiklet()
print("Bisiklet 1: " + bisiklet1.ozellikler())
bisiklet2.renk = "Sarı"
bisiklet2.vites = 22
print("Bisiklet 2: " + bisiklet2.ozellikler())
```
##self Parametresi
"self" parametresi bir Python kuralıdır. "self", herhangi bir sınıf yöntemine iletilen ilk parametredir. Python, oluşturulan nesneyi belirtmek için self parametresini kullanır.
**Örnek Uygulama**
Aşağıdaki örnek uygulamada **Bisiklet** sınıfının değişkenleri olan *renk* ve *bisiklet*, sınıf içindeki fonksiyonda **self** parametresi ile birlikte kullanılmaktadır. Bu kullanım şekli sınıftan oluşturulan nesnelerin tanımlanmış değişkenlere ulaşabilmeleri için gereklidir.
```
class Bisiklet:
renk = "Kırmızı"
vites = 1
def ozellikler(self):
ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites)
return ozellikDetay
```
```
# Örnek uygulamada "self" tanımlaması yapılmadığı zaman döndürülen hata kodunu inceleyin
class Bisiklet:
renk = "Kırmızı"
vites = 1
def ozellikler(self):
ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (renk, vites) #tanımlama eksik
return ozellikDetay
bisiklet1 = Bisiklet()
bisiklet2 = Bisiklet()
print("Bisiklet 1: " + bisiklet1.ozellikler())
bisiklet2.renk = "Sarı"
bisiklet2.vites = 22
print("Bisiklet 2: " + bisiklet2.ozellikler())
```
##__init__ Metodu
__init__ fonksiyonu, oluşturduğu nesneleri başlatmak için kullanılır. init "başlat" ın kısaltmasıdır. __init__() her zaman yaratılan nesneye atıfta bulunan en az bir argüman alır: "self".
**Örnek Uygulama**
Aşağıdaki örnek uygulamada *sporDali* sınıfının içinde tanımlanan **init** fonksiyonu, sınıf oluşturulduğu anda çalışmaya başlamaktadır. Fonksiyonun ayrıca çağırılmasına gerek kalmamıştır.
```
class sporDali:
sporlar = ["Yüzme", "Bisiklet", "Koşu"]
def __init__(self):
for spor in self.sporlar:
print(spor + " bir triatlon branşıdır.")
triatlon = sporDali()
```
```
# Örnek uygulamayı çalıştır
class sporDali:
sporlar = ["Yüzme", "Bisiklet", "Koşu"]
def __init__(self):
for spor in self.sporlar:
print(spor + " bir triatlon branşıdır.")
triatlon = sporDali()
# Örnek uygulamayı > Duatlon
class sporDali:
sporlar = ["Yüzme", "Bisiklet", "Koşu"]
def __init__(self):
for spor in self.sporlar:
print(spor + " bir triatlon branşıdır.")
triatlon = sporDali()
```
#Bölüm 09: Modüller ve Paketler
##Modülün İçe Aktarılması
Python'daki modüller, Python tanımlarını (sınıflar, fonksiyonlar vb.) ve ifadelerini (değişkenler, listeler, sözlükler vb.) içeren .py uzantısına sahip Python dosyalarıdır.
Modüller, *import* anahtar sözcüğü ve uzantı olmadan dosya adı kullanılarak içe aktarılır. Bir modül, çalışan bir Python betiğine ilk kez yüklendiğinde, modüldeki kodun bir kez çalıştırılmasıyla başlatılır.
**Örnek Uygulama**
```
#bisiklet.py adlı modülün içeriği
"""
Bu modül içinde Bisiklet sınıfı yer almaktadır.
"""
class Bisiklet:
renk = "Kırmızı"
vites = 1
def ozellikler(self):
ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites)
return ozellikDetay
```
```
#bisikletler.py adlı Python dosyasının içeriği
import bisiklet
bisiklet1 = bisiklet.Bisiklet()
print("Bisiklet 1: " + bisiklet1.ozellikler())
```
**PyCharm Örneği**

bisiklet.py
---

bisikletler.py
##Colab'de Modülün İçe Aktarılması
Bir önceki bölümde (Modülün İçe Aktarılması) herhangi bir kişisel bilgisayarın sabit diski üzerinde çalışırken yerleşik olmayan (kendi yazdığımız) modülün içe aktarılması yer aldı.
Bu bölümde ise Colab üzerinde çalışırken yerleşik olmayan bir modülü nasıl içe aktarılacağı yer almakta.
**Örnek Uygulama**
Aşağıda içeriği görüntülenen *bisiklet.py* adlı Python dosyası Google Drive içerisinde "BBY162_Python_a_Giris.ipynb" dosyasının ile aynı klasör içinde bulunmaktadır.
```
#bisiklet.py adlı modülün içeriği
"""
Bu modül içinde Bisiklet sınıfı yer almaktadır.
"""
class Bisiklet:
renk = "Kırmızı"
vites = 1
def ozellikler(self):
ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites)
return ozellikDetay
```
```
# Google Drive'ın bir disk olarak görülmesi
from google.colab import drive
drive.mount('gdrive') # bağlanan diskin 'gdrive' adı ile tanımlanması.
import sys # bağlanan diskin fiziksel yolunun tespit edilmesi ve bağlantı yoluna eklenmesi
sys.path.append('/content/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/')
import bisiklet # bisiklet.py içerisindeki 'bisiklet' modülünün içe aktarılması
bisiklet1 = bisiklet.Bisiklet()
print("Bisiklet 1: " + bisiklet1.ozellikler())
```
##Yerleşik Modüller (built-in)
Python aşağıdaki bağlantıda yer alan standart modüllerle birlikte gelir. Bu modüllerin *import* anahtar kelimesi ile çağrılması yeterlidir. Ayrıca bu modüllerin yüklenmesine gerek yoktur.
[Python Standart Modülleri](https://docs.python.org/3/library/)
**Örnek Uygulama**
```
import datetime
print(datetime.datetime.today())
```
```
# Örnek uygulamayı çalıştır
import datetime
print(datetime.datetime.today())
```
##from import Kullanımı
İçe aktarma ifadesinin bir başka kullanım şekli *from* anahtar kelimesinin kullanılmasıdır. *from* ifadesi ile modül adları paketin içinde alınarak direkt kullanıma hazır hale getirilir. Bu şekilde, içe aktarılan modül, modül_adı öneki olmadan doğrudan kullanılır.
**Örnek Uygulama**
```
#bisiklet.py adlı modülün içeriği
"""
Bu modül içinde Bisiklet sınıfı yer almaktadır.
"""
class Bisiklet:
renk = "Kırmızı"
vites = 1
def ozellikler(self):
ozellikDetay = "Bu bisiklet %s renkli ve %d viteslidir." % (self.renk, self.vites)
return ozellikDetay
```
```
# Google Drive'ın bir disk olarak görülmesi
from google.colab import drive
drive.mount('gdrive') # bağlanan diskin 'gdrive' adı ile tanımlanması.
import sys # bağlanan diskin fiziksel yolunun tespit edilmesi ve bağlantı yoluna eklenmesi
sys.path.append('/content/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/')
from bisiklet import Bisiklet # bisiklet.py içerisindeki 'bisiklet' sınıfının içe aktarılması
bisiklet1 = Bisiklet() # bisiklet ön tanımlamasına gerek kalmadı
print("Bisiklet 1: " + bisiklet1.ozellikler())
```
#Bölüm 10: Dosya İşlemleri
##Dosya Okuma
Python, bilgisayarınızdaki bir dosyadan bilgi okumak ve yazmak için bir dizi yerleşik fonksiyona sahiptir. **open** fonksiyonu bir dosyayı açmak için kullanılır. Dosya, okuma modunda (ikinci argüman olarak "r" kullanılarak) veya yazma modunda (ikinci argüman olarak "w" kullanılarak) açılabilir. **open** fonksiyonu dosya nesnesini döndürür. Dosyanın saklanması için kapatılması gerekir.
**Örnek Uygulama**
```
#Google Drive Bağlantısı
from google.colab import drive
drive.mount('/gdrive')
dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/metin.txt"
f = open(dosya, "r")
for line in f.readlines():
print(line)
f.close()
```
Dosyanın sağlıklı şekilde okunabilmesi için Google Drive ile bağlantının kurulmuş olması ve okunacak dosyanın yolunun tam olarak belirtilmesi gerekmektedir.

```
#Google Drive Bağlantısı
from google.colab import drive
drive.mount('/gdrive')
dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/metin.txt"
f = open(dosya, "r")
for line in f.readlines():
print(line)
f.close()
from google.colab import drive
drive.mount('/content/drive')
```
##Dosya Yazma
Bir dosyayı ikinci argüman olarak "w" (yazma) kullanarak açarsanız, yeni bir boş dosya oluşturulur. Aynı ada sahip başka bir dosya varsa silineceğini unutmayın. Mevcut bir dosyaya içerik eklemek istiyorsanız "a" (ekleme) değiştiricisini kullanmalısınız.
**Örnek Uygulama**
Aşağıdaki örnekte dosya 'w' parametresi ile açıldığı için var olan dosyanın içindekiler silinir ve yeni veriler dosyaya yazılır. Dosyanın içindeki verilerin kalması ve yeni verilerin eklenmesi isteniyorsa dosya 'a' parametresi ile açılmalıdır.
```
#Google Drive Bağlantısı
from google.colab import drive
drive.mount('/gdrive')
dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/cikti.txt"
f = open(dosya, 'w') # Mevcut veriye ek veri yazılması için parametre: 'a'
f.write("test") # Her yeni verinin bir alt satıra yazdırılması "test\n"
f.close()
```
Kod çalıştırıldıktan sonra eğer *cikti.txt* adında bir dosya yoksa otomatik olarak oluşturulur ve istenilen içerik yazılır.

```
#Google Drive Bağlantısı
from google.colab import drive
drive.mount('/gdrive')
dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Programlama ve Algoritmalar/cikti.txt"
f = open(dosya, 'w') # Mevcut veriye ek veri yazılması için parametre: 'a'
f.write("test") # Her yeni verinin bir alt satıra yazdırılması "test\n"
f.close()
```
| github_jupyter |
## AI for Medicine Course 1 Week 1 lecture exercises
<a name="densenet"></a>
# Densenet
In this week's assignment, you'll be using a pre-trained Densenet model for image classification.
Densenet is a convolutional network where each layer is connected to all other layers that are deeper in the network
- The first layer is connected to the 2nd, 3rd, 4th etc.
- The second layer is connected to the 3rd, 4th, 5th etc.
Like this:
<img src="densenet.png" alt="U-net Image" width="400" align="middle"/>
For a detailed explanation of Densenet, check out the source of the image above, a paper by Gao Huang et al. 2018 called [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993.pdf).
The cells below are set up to provide an exploration of the Keras densenet implementation that you'll be using in the assignment. Run these cells to gain some insight into the network architecture.
```
# Import Densenet from Keras
from keras.applications.densenet import DenseNet121
from keras.layers import Dense, GlobalAveragePooling2D
from keras.models import Model
from keras import backend as K
```
For your work in the assignment, you'll be loading a set of pre-trained weights to reduce training time.
```
# Create the base pre-trained model
base_model = DenseNet121(weights='./nih/densenet.hdf5', include_top=False);
```
View a summary of the model
```
# Print the model summary
base_model.summary()
# Print out the first five layers
layers_l = base_model.layers
print("First 5 layers")
layers_l[0:5]
# Print out the last five layers
print("Last 5 layers")
layers_l[-6:-1]
# Get the convolutional layers and print the first 5
conv2D_layers = [layer for layer in base_model.layers
if str(type(layer)).find('Conv2D') > -1]
print("The first five conv2D layers")
conv2D_layers[0:5]
# Print out the total number of convolutional layers
print(f"There are {len(conv2D_layers)} convolutional layers")
# Print the number of channels in the input
print("The input has 3 channels")
base_model.input
# Print the number of output channels
print("The output has 1024 channels")
x = base_model.output
x
# Add a global spatial average pooling layer
x_pool = GlobalAveragePooling2D()(x)
x_pool
# Define a set of five class labels to use as an example
labels = ['Emphysema',
'Hernia',
'Mass',
'Pneumonia',
'Edema']
n_classes = len(labels)
print(f"In this example, you want your model to identify {n_classes} classes")
# Add a logistic layer the same size as the number of classes you're trying to predict
predictions = Dense(n_classes, activation="sigmoid")(x_pool)
print(f"Predictions have {n_classes} units, one for each class")
predictions
# Create an updated model
model = Model(inputs=base_model.input, outputs=predictions)
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy')
# (You'll customize the loss function in the assignment!)
```
#### This has been a brief exploration of the Densenet architecture you'll use in this week's graded assignment!
| github_jupyter |
# Set Up
The first 5 lines are importing libraries that will be needed later in the notebook. The next lines are setting up the connection to the google service account.
# Getting a Google Service Account
Here is another great tutorial on using Google Sheets and in the begining it shows the steps to create a google service account to use: https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html.
After setting up the service account you have to share the google sheet with the service account so that it has permission to access it. Then all you have to do is add you client_secret.json file so that the service account can be authorized.
# Drive Folder
The drive folder were the sheets discussed here can be found at: https://drive.google.com/drive/folders/1FoTM8DRPcfbevmKnmUQN1-LPvE4oE9hJ?usp=sharing.
The Google Sheets that end with 'Orig' is how the Google sheet looked before I ran this notebook and the Google Sheets that end with 'Calculations' is what it looks like after I have ran this notebook.
```
import pandas as pd
import numpy as np
import csv
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
```
# Create Pandas Dataframes
In the next cell I will create two pandas dataframes each containing one of the two google sheets that I will connect to.
The first thing to do is to open the Google Sheets so that they can be manipulated. After the sheets are I opened I used the 'get_all_values()' function to get all of the data from that Google sheet. Now the 'get_all_values()' function returns a list of lists which is not my prefered data structure for doing math operations on.
I decided to create a dataframe out of each of those list of lists. I set the columns of the dataframe to the first list in the list, and then all the other lists were set as the data.
The last thing I do in this cell is print out one of the finished dataframes.
```
# open the google sheets
pendulum_1 = client.open('pendulum1GoodMeasurementsCalculations').sheet1
pendulum_2 = client.open('pendulum2GoodMeasurementsCalculations').sheet1
#read in the data from the spreadsheet
pendulum_1_data = pendulum_1.get_all_values()
pendulum_2_data = pendulum_2.get_all_values()
# make a pandas dataframe out of the data
pendulum_1_df = pd.DataFrame(pendulum_1_data[1:], columns = pendulum_1_data[0])
pendulum_2_df = pd.DataFrame(pendulum_2_data[1:], columns = pendulum_2_data[0])
# print out the data from one of the sheets as an example
pendulum_2_df
```
# Convert Strings to Numeric Values
For some reason the default data type of values read in from Google Sheets are strings. I can not do math operations on strings so the next cell converts the columns that I need to work with to numeric values.
```
# Convert the Time and Counts columns to numeric values
pendulum_2_df['Time'] = pd.to_numeric(pendulum_2_df['Time'])
pendulum_2_df['Counts'] = pd.to_numeric(pendulum_2_df['Counts'])
pendulum_1_df['Time'] = pd.to_numeric(pendulum_1_df['Time'])
pendulum_1_df['Counts'] = pd.to_numeric(pendulum_1_df['Counts'])
```
# Do My Calculations
This data was originally for a lab I did in my last year of university, and the following cell is just copied from the notebook I used for it.
The lab was Kater's Pendulum and for that lab my lab partners and I had to count the number of times a pendulum passed in front of a sensor while timing how long that took. The first calculation is the period of each of the trials that were done.
After getting the period for each trial I calculated the standard deviation and the mean of the those values.
Finally I printed out those values.
```
# Calculate the period of each trial for each pendulum
pendulum_1_df['Period'] = pendulum_1_df['Time'] / (pendulum_1_df['Counts'] / 2)
pendulum_2_df['Period'] = pendulum_2_df['Time'] / (pendulum_2_df['Counts'] / 2)
# calculate the standard deviation of each pendulum
std_period1 = pendulum_1_df.loc[:,"Period"].std()
std_period2 = pendulum_2_df.loc[:,"Period"].std()
# Calculate the mean of each pendulum
mean_period1 = pendulum_1_df.loc[:,"Period"].mean()
mean_period2 = pendulum_2_df.loc[:,"Period"].mean()
# print out the mean and error of each period
print("Period1: " + str(mean_period1))
print("Period2: " + str(mean_period2))
print("Period1 error: " + str(std_period1/np.sqrt(50)))
print("Period2 error: " + str(std_period2/np.sqrt(50)))
```
# Get a List of New Values
In the following cell I simply took the column that I want to add to Google sheets and made it into a list.
```
# convert the Period columns to a list
period_1 = pendulum_1_df['Period'].tolist()
period_2 = pendulum_2_df['Period'].tolist()
print(period_1)
```
# Updating Google Sheets
In the next two cells I update the google sheets with the new 'Period' column. I used the 'update_cell()' function to accomplish this.
```
# add the period column to the pendulum 1 Google Sheet
pendulum_1.update_cell(1, 7, 'Period')
for row_index, curr_period in enumerate(period_1):
pendulum_1.update_cell(row_index + 2, 7, curr_period)
# add the period column to the pendulum 2 Google Sheet
pendulum_2.update_cell(1, 7, 'Period')
for row_index, curr_period in enumerate(period_2):
pendulum_2.update_cell(row_index + 2, 7, curr_period)
```
# Adding Mean and Error
To finish off I added the mean and the error of the period distributions to the end of their respective google sheets.
```
# Add the mean and error in mean calculations to the google sheets.
pendulum_1.update_cell(52, 1, 'Period Mean')
pendulum_1.update_cell(52, 7, mean_period1)
pendulum_1.update_cell(53, 1, 'Error in Mean')
pendulum_1.update_cell(53, 7, std_period1/np.sqrt(50))
pendulum_2.update_cell(52, 1, 'Period Mean')
pendulum_2.update_cell(52, 7, mean_period2)
pendulum_2.update_cell(53, 1, 'Error in Mean')
pendulum_2.update_cell(53, 7, std_period2/np.sqrt(50))
```
| github_jupyter |
Simple testing of FBT in Warp. Just transform beam in a drift. No solenoid included and no inverse transform.
```
%matplotlib notebook
import sys
del sys.argv[1:]
from warp import *
from warp.data_dumping.openpmd_diag import particle_diag
import numpy as np
import os
from copy import deepcopy
import matplotlib.pyplot as plt
diagDir = 'diags/test/hdf5'
def cleanupPrevious(outputDirectory = diagDir):
if os.path.exists(outputDirectory):
files = os.listdir(outputDirectory)
for file in files:
if file.endswith('.h5'):
os.remove(os.path.join(outputDirectory,file))
cleanupPrevious()
def setup():
pass
##########################################
### Create Beam and Set its Parameters ###
##########################################
top.lrelativ = True
top.relativity = 1
beam = Species(type=Electron, name='Electron')
beam.ekin = 55e6 #KE = 2.5 MeV
derivqty() #Sets addition derived parameters (such as beam.vbeam)
top.emitx = 4.0*800e-6 / top.gammabar # geometric emittance: emit_full = 4 * emit_rms
top.emity = 4.0*1e-6 / top.gammabar
beam.a0 = sqrt(top.emitx * 5.0)
beam.b0 = sqrt(top.emity * 5.0)
beam.ap0 = -1 * top.emitx * 0.0 / beam.a0
beam.bp0 = -1 * top.emity * 0.0 / beam.b0
beam.vthz = 0 #Sets the longitudinal thermal velocity (see iop_lin_002)
beam.ibeam = 0 # beam.ibeam/(top.gammabar**2) #Set correct current for relativity (see iop_lin_002)
top.npmax = 10000
w3d.distrbtn = "Gaussian0"
w3d.cylinder = True #Set True if running without envelope solver
#####################
### Setup Lattice ###
#####################
turnLength = 2.0e-3 #39.9682297148
steps = 2 #8000.
top.zlatstrt = 0#0. # z of lattice start (added to element z's on generate).
top.zlatperi = 10.0#turnLength # Lattice periodicity
top.dt = turnLength / steps / beam.vbeam
start = Marker()
drift1 = Drft(l=1e-3)
transform = Marker()
drift2 = Drft(l=1e-3)
end = Marker()
transformLine = start + drift1 + transform + drift2 + end
madtowarp(transformLine)
def FRBT(beta=5.0, alpha=0.0):
"""
Transforms a matched flat beam to a round 'magnetized' beam.
"""
gamma = (1. - alpha**2) / beta
R = np.zeros([6,6],dtype='float64')
R[0,0] = 1. + alpha
R[0,1] = beta
R[0,2] = 1. - alpha
R[0,3] = -beta
R[1,0] = -gamma
R[1,1] = 1. - alpha
R[1,2] = gamma
R[1,3] = 1. + alpha
R[2,0] = 1. - alpha
R[2,1] = -beta
R[2,2] = 1. + alpha
R[2,3] = beta
R[3,0] = gamma
R[3,1] = 1. + alpha
R[3,2] = -gamma
R[3,3] = 1. - alpha
R[4,4] = 2.
R[5,5] = 2.
R = 0.5 * R
x = {}
norm = {}
for i in range(6):
for j in range(6):
norm[i,j] = 1.0
norm[0,1] = norm[0,3] = norm[2,1] = norm[2,3] = 1./top.pgroup.uzp
norm[1,0] = norm[1,2] = top.pgroup.uzp
norm[3,0] = norm[3,2] = top.pgroup.uzp
x = {}
x[0] = np.copy(top.pgroup.xp)
x[1] = np.copy(top.pgroup.uxp)
x[2] = np.copy(top.pgroup.yp)
x[3] = np.copy(top.pgroup.uyp)
x[4] = np.copy(top.pgroup.zp)
x[5] = np.copy(top.pgroup.uzp)
print x[0].shape
holding = []
for i in range(6):
val = 0
for j in range(6):
val += R[i,j] * x[j] * norm[i,j]
holding.append(val)
top.pgroup.xp = holding[0]
top.pgroup.uxp = holding[1]
top.pgroup.yp = holding[2]
top.pgroup.uyp = holding[3]
top.pgroup.zp = holding[4]
top.pgroup.uzp = holding[5]
# print "Transform!"
################################
### 3D Simulation Parameters ###
################################
top.prwall = pr1 = 0.14
#Set cells
w3d.nx = 128
w3d.ny = 128
w3d.nz = 1
#Set boundaries
w3d.xmmin = -0.10
w3d.xmmax = 0.10
w3d.ymmin = -0.10
w3d.ymmax = 0.10
w3d.zmmin = -2e-3
w3d.zmmax = 2e-3
top.pboundxy = 0 # Absorbing Boundary for particles
top.ibpush = 2 # set type of pusher to vXB push without tan corrections
## 0:off, 1:fast, 2:accurate
top.fstype = -1
############################
### Particle Diagnostics ###
############################
diagP0 = particle_diag.ParticleDiagnostic( period=1, top=top, w3d=w3d,
species= { species.name : species for species in listofallspecies },
comm_world=comm_world, lparallel_output=False, write_dir = diagDir[:-4] )
diagP = particle_diag.ParticleDiagnostic( period=1, top=top, w3d=w3d,
species= { species.name : species for species in listofallspecies },
comm_world=comm_world, lparallel_output=False, write_dir = diagDir[:-4] )
installbeforestep( diagP0.write )
installafterstep( diagP.write )
#################################
### Generate and Run PIC Code ###
#################################
package("wxy")
generate()
fieldsolve()
#installafterstep(thin_lens_lattice)
#Execute First Step
installbeforestep(FRBT)
step(1)
def readparticles(filename):
"""
Reads in openPMD compliant particle file generated by Warp's ParticleDiagnostic class.
Parameters:
filename (str): Path to a ParticleDiagnostic output file.
Returns:
particle_arrays (dict): Dictionary with entry for each species in the file that contains an array
of the 6D particle coordinates.
"""
dims = ['momentum/x', 'position/y', 'momentum/y', 'position/z', 'momentum/z']
particle_arrays = {}
f = h5.File(filename, 'r')
if f.attrs.get('openPMD') is None:
print "Warning!: Not an openPMD file. This may not work."
step = f['data'].keys()[0]
species_list = f['data/%s/particles' % step].keys()
for species in species_list:
parray = f['data/%s/particles/%s/position/x' % (step, species)]
for dim in dims:
parray = np.column_stack((parray, f['data/%s/particles/%s/' % (step, species) + dim]))
particle_arrays[species] = parray
return particle_arrays
def convertunits(particlearray):
"""
Putting particle coordinate data in good ol'fashioned accelerator units:
x: m
x': ux/uz
y: m
y': uy/uz
z: m
p: MeV/c
"""
dat = deepcopy(particlearray) # Don't copy by reference
dat[:, 1] = dat[:, 1] / dat[:, 5]
dat[:, 3] = dat[:, 3] / dat[:, 5]
dat[:, 5] = dat[:, 5] / 5.344286E-22
return dat
def svecplot(array):
fig = plt.figure(figsize = (8,8))
Q = plt.quiver(array[:,0],array[:,2],array[:,1],array[:,3])
plt.quiverkey(Q,0.0, 0.92, 0.002, r'$2', labelpos='W')
xmax = np.max(array[:,0])
xmin = np.min(array[:,0])
plt.xlim(1.5*xmin,1.5*xmax)
plt.ylim(1.5*xmin,1.5*xmax)
plt.show()
init = convertunits(readparticles('diags/test/hdf5/data00000000.h5')['Electron'])
fin = convertunits(readparticles('diags/test/hdf5/data00000001.h5')['Electron'])
svecplot(init)
plt.title("Initial Flat Beam")
plt.xlabel("x (m)")
plt.ylabel("y (m)")
svecplot(fin)
plt.title("Magnetized Beam after FRBT")
plt.xlabel("x (m)")
plt.ylabel("y (m)")
def vortex_check(init):
beta = 5.0
alpha = 0
gamma = (1 - alpha**2) / beta
x1 = ((1+alpha) * init[0,0] + (beta) * init[0,1] + (1-alpha) * init[0,2] + (-beta) * init[0,3]) * 0.5
x2 = ((-gamma) * init[0,0] + (1-alpha) * init[0,1] + (gamma) * init[0,2] + (1+alpha) * init[0,3]) * 0.5
y1 = ((1-alpha) * init[0,0] + (-beta) * init[0,1] + (1+alpha) * init[0,2] + (beta) * init[0,3]) * 0.5
y2 = ((gamma) * init[0,0] + (1+alpha) * init[0,1] + (-gamma) * init[0,2] + (1-alpha) * init[0,3]) * 0.5
print x1, fin[0,0]
print x2, fin[0,1]
print y1, fin[0,2]
print y2, fin[0,3]
def calc_emittance(array):
xemit = np.sqrt(np.average(array[:,0]**2) * np.average(array[:,1]**2) - np.average(array[:,0] * array[:,1])**2 )
yemit = np.sqrt(np.average(array[:,2]**2) * np.average(array[:,3]**2) - np.average(array[:,2] * array[:,3])**2 )
return xemit,yemit
epsx0,epsy0 = calc_emittance(init)
epsxf,epsyf = calc_emittance(fin)
print "Initial:\n x-emit: %s Initial y-emit: %s" % (epsx0,epsy0)
print "After Transform:\n x-emit: %s y-emit: %s" % (epsxf,epsyf)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import os
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from matplotlib.mlab import PCA as mlabPCA
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
%matplotlib inline
pd.options.display.float_format = '{:.3f}'.format
# Suppress annoying harmless error.
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
base = pd.read_excel('table_8_offenses_known_to_law_enforcement_new_york_by_city_2013.xls'
,encoding="latin1"
,skiprows=4
,nrows=348)
base.columns = ['city', 'population', 'violent_crime','murder','rape_1', 'rape_2',
'robbery', 'aggravated', 'property', 'burglary',
'theft', 'motor', 'arson']
```
# Understanding the data
## Correlations
First let's look at the initial correlations to understand what we have
```
cmap = sns.diverging_palette(128, 240,as_cmap=True)
plt.rcParams.update({'font.size': 12})
def show_corr(df):
corr = df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11, 9))
sns.heatmap(corr, mask=mask,cmap=cmap, center=0,annot=True,
square=True, linewidths=.5, cbar_kws={"shrink": .5},fmt='.1f'
);
show_corr(base)
display(base.head(3))
print(base.rape_1.unique())
```
We notice that all the variables are highly corraleted!
The assumption is that all variable are dependent on the population (the number are total number of crime per category, so there is a link between the population and all the crime numbers).
We also notice that the variable rape_1 is actually only N/A values
```
per_pop = base.copy()
per_pop = per_pop.drop(['city','rape_1'],axis=1)
for col in ['violent_crime','murder', 'rape_2','robbery', 'aggravated', 'property', 'burglary','theft', 'motor']:
per_pop[col] = per_pop[col]/per_pop.population
show_corr(per_pop)
```
That is much better !
Having the crime rates allows us to notice that there is a very high correlation between *property* crimes and *theft*, we could make a first model base on that !
Also, apart from *arson*, we can also see that there is very little correlation between the population of the city and the different crime rate, especially for the type of crime we are looking into : *property* and *theft*.
```
plt.scatter(per_pop.theft,per_pop.property,s=3)
plt.title("Theft and Property crime (per population)")
```
Indeed they seem very correlated graphically!
We notice there is an outlier : it seems to be aligned with the rest of the group, but we will need to make sure it does not have a disproportionate influence on the regression.
```
x = per_pop[['theft']]
y = per_pop.property
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# Instantiate our model.
regr = linear_model.LinearRegression()
# Fit our model to our data.
regr.fit(X_train,y_train)
# Display the attributes we calculated.
print('Coefficients: \n', regr.coef_)
print('Intercept: \n', regr.intercept_)
print("Train score:",regr.score(X_train,y_train))
print("Test score:",regr.score(X_test,y_test))
# Plot outputs
plt.scatter(X_test,y_test, color='black',s=2,label="Test values")
plt.scatter(X_test, regr.predict(X_test), color='red',s=1,label="Predicted values")
plt.legend()
plt.show()
predicted = regr.predict(x)
residual = y - predicted
plt.hist(residual,bins=30);
plt.title("Residual histogram")
plt.scatter(predicted, residual)
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.axhline(y=0)
plt.title('Residual vs. Predicted')
plt.show()
from sklearn.model_selection import cross_val_score
cross_val_score(regr, x, y, cv=10)
```
# First regression discussion
Indeed we are able to explain around 95% of the value of the Property crime per population.
The residual is almost normally distributed, but they are a couple of errors that are higher than expected further from 0.
Also the plot of the residual and expected show some heteroscedasticity.
As this is only our first regression with one variable, we will try to improve it with the other variables.
We also notice the outlier variable is visible on the graph and present a higher than expected error : this seems like it is related to the population, looking at the distribution of population there is only one city of more than 1 million inhabitant : this outlier might have a disproportionnate influence on the regression so we will take it out.
Also, **Arson** behave in a strange manner, with N/A that is linked to the way the crime are reported. The median value in 0, so we can safely replace N/A by 0, but we will recorded the value that are N/A with a categorical value.
# Second regression
As there are not many variable, and we already have a very good prediction with theft, let's look iteratively at the features, using the minimum of the cross validation test score.
```
for_reg = per_pop.sort_values("population").reset_index().fillna(0)
y = for_reg[["property"]]
for col in ['population', 'violent_crime', 'murder', 'rape_2', 'robbery',
'aggravated', 'motor', 'arson',]:
x = for_reg[['theft', 'burglary', col]]
print(col,min(cross_val_score(regr, x, y, cv=15)))
x = for_reg[['theft', 'burglary', 'motor']]
cross_val_score(regr, x, y, cv=35)
y = for_reg[["property"]]
x = for_reg[['theft', 'burglary',"motor"]]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.10)
# Instantiate our model.
regr = linear_model.LinearRegression()
# Fit our model to our data.
regr.fit(X_train,y_train)
# Display the attributes we calculated.
print('Coefficients: \n', regr.coef_)
print('Intercept: \n', regr.intercept_)
print("Train score:",regr.score(X_train,y_train))
print("Test score:",regr.score(X_test,y_test))
# Plot outputs
plt.scatter(X_test.theft,y_test, color='black',s=2,label="Test values")
plt.scatter(X_test.theft, regr.predict(X_test), color='red',s=1,label="Predicted values")
plt.legend()
plt.show()
```
# Getting the R2 score for the **Property Crime**
The regression was on the property crime per population, let's check the R2 for the actual R2 value.
```
from sklearn.metrics import r2_score
for_r2 = X_test.merge(for_reg[["population"]],left_index=True, right_index=True)
#r2_score((for_r2.population*y_test.T).T, for_r2.population*[x[0] for x in regr.predict(X_test)], multioutput='variance_weighted')
```
# Second regression conclusion
This result is very surprising, it really looks like a data leak ! It seems that Property = Theft + Burglary + Motor
To make it a little more interesting, let's look at the data without those 3 values and try to find a good prediction.
# Doing some PCA...
```
per_pop = base.copy()
per_pop = per_pop.drop(['city','rape_1'],axis=1)
for col in ['violent_crime','murder', 'rape_2','robbery', 'aggravated', 'property', 'burglary','theft', 'motor']:
per_pop[col] = per_pop[col]/per_pop.population
crime_pca = per_pop[['violent_crime','murder', 'rape_2','robbery', 'aggravated']].copy()
sklearn_pca = PCA(n_components=5)
X = StandardScaler().fit_transform(crime_pca)
Y_sklearn = sklearn_pca.fit_transform(X)
display(sklearn_pca.components_)
display(sklearn_pca.explained_variance_)
sum(sklearn_pca.explained_variance_ratio_[:3])
for i in range(5):
per_pop[f"pca_{i}"] = Y_sklearn[:,i]
per_pop["is_arson"] = per_pop.arson.isna()
per_pop = per_pop.fillna(0)
#per_pop = per_pop.drop(['theft', 'burglary',"motor"],axis=1)
show_corr(per_pop)
```
# Third regression
## Very bad fit !
```
for_reg = per_pop[per_pop.population<1000000].sort_values("population").reset_index()
#for_reg = per_pop[~per_pop.arson.isna()].sort_values("population").reset_index()
y = for_reg[["property"]]
x = for_reg[['pca_0', 'pca_1', 'pca_2','is_arson','arson']]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.40)
# Instantiate our model.
regr = linear_model.LinearRegression()
# Fit our model to our data.
regr.fit(X_train,y_train)
# Display the attributes we calculated.
print('Coefficients: \n', regr.coef_)
print('Intercept: \n', regr.intercept_)
print("Train score:",regr.score(X_train,y_train))
print("Test score:",regr.score(X_test,y_test))
# Plot outputs
#plt.scatter(X_test.pca_0,y_test, color='black',s=2,label="Test values")
#plt.scatter(X_test.pca_0, regr.predict(X_test), color='red',s=1,label="Predicted values")
#plt.legend()
#plt.show()
predicted_test = regr.predict(X_test)
predicted_train = regr.predict(X_train)
residual_test = y_test - predicted_test
residual_train = y_train - predicted_train
_,bins,_ = plt.hist(residual_test.property,color="red",bins=15,alpha=0.6,density=True,label="Test residual");
plt.hist(residual_train.property,color="green",bins=bins,alpha=0.3,density=True,label="Train residual");
plt.legend()
plt.title("Residual histogram");
plt.scatter(predicted_test, residual_test.property,s=10,label="Test")
plt.scatter(predicted_train, residual_train.property,s=15,alpha=0.5,label="Train")
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.axhline(y=0,color="red")
plt.title('Residual vs. Predicted')
plt.legend()
plt.show()
from sklearn.metrics import r2_score
for_r2 = X_test.merge(for_reg[["population"]],left_index=True, right_index=True)
r2_score((for_r2.population*y_test.T).T, for_r2.population*[x[0] for x in regr.predict(X_test)], multioutput='variance_weighted')
```
Eventhoug we get a very bad score for the property crim rate, we still end up with a very high score for the number of property crime as it is dependant on the population mostly.
But looking at the test and train, we realize that the train has no 'is_arson', when the rest of the values are comparable.
```
sum(X_train.is_arson)
_,bins,_ = plt.hist(X_train.arson,density=True,alpha=0.5)
_,bins,_ = plt.hist(X_test.arson,bins=bins,density=True,alpha=0.5)
per_pop.is_arson.unique()
_,bins,_ = plt.hist(per_pop[per_pop.is_arson==True].pca_0,bins=20,density=True,alpha=0.5,color="blue");
plt.hist(per_pop[per_pop.is_arson==False].pca_0,bins=bins,density=True,alpha=0.5,color="red");
_,bins,_ = plt.hist(per_pop[per_pop.is_arson==True].pca_1,bins=20,density=True,alpha=0.5,color="blue");
plt.hist(per_pop[per_pop.is_arson==False].pca_1,bins=bins,density=True,alpha=0.5,color="red");
```
## Controling the train and test are comparable
There is an overal difference between cities that report arson correctly and the others, we need to make sure the number of 'is_arson' is almost the same in train and test.
```
for_reg = per_pop[per_pop.population<1000000].sort_values("population").reset_index()
#for_reg = per_pop[~per_pop.arson.isna()].sort_values("population").reset_index()
y = for_reg[["property"]]
x = for_reg[['pca_0', 'pca_1',"pca_2"]]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.10)
# Instantiate our model.
regr = linear_model.LinearRegression()
# Fit our model to our data.
regr.fit(X_train,y_train)
# Display the attributes we calculated.
print('Coefficients: \n', regr.coef_)
print('Intercept: \n', regr.intercept_)
print("Train score:",regr.score(X_train,y_train))
print("Test score:",regr.score(X_test,y_test))
# Plot outputs
#plt.scatter(X_test.pca_0,y_test, color='black',s=2,label="Test values")
#plt.scatter(X_test.pca_0, regr.predict(X_test), color='red',s=1,label="Predicted values")
#plt.legend()
#plt.show()
test_arson = X_test.merge(for_reg[["is_arson"]],left_index=True, right_index=True)
train_arson = X_train.merge(for_reg[["is_arson"]],left_index=True, right_index=True)
print("Check Arson train",sum(train_arson.is_arson)/train_arson.shape[0])
print("Check Arson test",sum(test_arson.is_arson)/test_arson.shape[0])
predicted_test = regr.predict(X_test)
predicted_train = regr.predict(X_train)
residual_test = y_test - predicted_test
residual_train = y_train - predicted_train
_,bins,_ = plt.hist(residual_test.property,color="red",bins=15,alpha=0.6,density=True,label="Test residual");
plt.hist(residual_train.property,color="green",bins=bins,alpha=0.3,density=True,label="Train residual");
plt.legend()
plt.title("Residual histogram");
plt.scatter(predicted_test, residual_test.property,s=10,label="Test")
plt.scatter(predicted_train, residual_train.property,s=15,alpha=0.5,label="Train")
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.axhline(y=0,color="red")
plt.title('Residual vs. Predicted')
plt.legend()
plt.show()
plt.scatter(x.pca_0,y.property)
plt.scatter(x.pca_1,y.property)
plt.scatter(x.pca_2,y.property)
cross_val_score(regr, x, y, cv=5)
from sklearn.metrics import r2_score
for_r2 = X_test.merge(for_reg[["population"]],left_index=True, right_index=True)
r2_score((for_r2.population*y_test.T).T, for_r2.population*[x[0] for x in regr.predict(X_test)], multioutput='variance_weighted')
```
In the this final regression, we still have some outliers.
```
import statsmodels.formula.api as smf
from statsmodels.sandbox.regression.predstd import wls_prediction_std
per_pop.columns
#per_pop = base.copy()
#per_pop = per_pop.drop(['city','rape_1'],axis=1)
for col in ['violent_crime','murder', 'rape_2','robbery', 'aggravated', 'property', 'burglary','theft', 'motor']:
base[col+"_per_pop"] = base[col]/base.population
base.columns
linear_formula = 'property_per_pop ~ violent_crime_per_pop+rape_2_per_pop'
# Fit the model to our data using the formula.
lm = smf.ols(formula=linear_formula, data=base).fit()
lm.params
lm.pvalues
lm.rsquared
```
| github_jupyter |
### Deep learning for identifying the orientation Scanned images
First we will load the train and test data and create a CTF file
```
import os
from PIL import Image
import numpy as np
import itertools
import random
import time
import matplotlib.pyplot as plt
import cntk as C
def split_line(line):
splits = line.strip().split(',')
return (splits[0], int(splits[1]))
def load_labels_dict(labels_file):
with open(labels_file) as f:
return dict([split_line(line) for line in f.readlines()[1:]])
def load_data(data_dir, labels_dict):
for f in os.listdir(data_dir):
key = f[:-4]
label = labels_dict[key]
image = np.array(Image.open(os.path.join(data_dir, f)), dtype = np.int16).flatten()
yield np.hstack([image, int(label)])
def write_to_ctf_file(generator, test_file_name, train_file_name, pct_train = 0.9, rng_seed = 0):
random.seed(rng_seed)
labels = [l for l in map(' '.join, np.eye(4, dtype = np.int16).astype(str))]
with open(test_file_name, 'w') as testf:
with open(train_file_name, 'w') as trainf:
lines = 0
for entry in generator:
rand_num = random.random()
formatted_line = '|labels {} |features {}\n'.format(labels[int(entry[-1])], ' '.join(entry[:-1].astype(str)))
if rand_num <= pct_train:
trainf.write(formatted_line)
else:
testf.write(formatted_line)
lines += 1
if lines % 1000 == 0:
print('Processed {} entries'.format(str(lines)))
train_data_dir = os.path.join('data', 'train')
labels_file = os.path.join('data', 'train_labels.csv')
train_file = 'train_data.ctf'
test_file = 'test_data.ctf'
all_data_file = 'all_data.ctf'
labels_dict = load_labels_dict(labels_file)
if os.path.exists(train_file) and os.path.exists(test_file):
print("Test and training CTF Files exists, not recreating them again")
else:
generator = load_data(train_data_dir, labels_dict)
write_to_ctf_file(generator, test_file, train_file)
#Created only to enable testing on entire test data to hoping to improve the submission score
if os.path.exists(all_data_file):
print("All data CTF Files exists, not recreating it again")
else:
generator = load_data(train_data_dir, labels_dict)
labels = [l for l in map(' '.join, np.eye(4, dtype = np.int16).astype(str))]
with open(all_data_file, 'w') as f:
lines = 0
for entry in generator:
formatted_line = '|labels {} |features {}\n'.format(labels[int(entry[-1])], ' '.join(entry[:-1].astype(str)))
f.write(formatted_line)
lines += 1
if lines % 1000 == 0:
print('Processed {} entries'.format(str(lines)))
np.random.seed(0)
C.cntk_py.set_fixed_random_seed(1)
C.cntk_py.force_deterministic_algorithms()
num_output_classes = 4
input_dim_model = (1, 64, 64)
def create_reader(file_path, is_training):
print('Creating reader from file ' + file_path)
ctf = C.io.CTFDeserializer(file_path, C.io.StreamDefs(
labels = C.io.StreamDef(field='labels', shape = 4, is_sparse=False),
features = C.io.StreamDef(field='features', shape = 64 * 64, is_sparse=False),
))
return C.io.MinibatchSource(ctf, randomize = is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
x = C.input_variable(input_dim_model)
y = C.input_variable(num_output_classes)
def create_model(features):
with C.layers.default_options(init = C.glorot_uniform(), activation = C.relu):
h = features
h = C.layers.Convolution2D(filter_shape=(5, 5),
num_filters = 32,
strides=(2, 2),
pad=True, name='first_conv')(h)
h = C.layers.MaxPooling(filter_shape = (5, 5), strides = (2, 2), name = 'pool1')(h)
h = C.layers.Convolution2D(filter_shape=(5, 5),
num_filters = 64,
strides=(2, 2),
pad=True, name='second_conv')(h)
h = C.layers.MaxPooling(filter_shape = (3, 3), strides = (2, 2), name = 'pool2')(h)
r = C.layers.Dense(num_output_classes, activation = None, name='classify')(h)
return r
def print_training_progress(trainer, mb, frequency, verbose=1):
training_loss = "NA"
eval_error = "NA"
if mb % frequency == 0:
training_loss = trainer.previous_minibatch_loss_average
eval_error = trainer.previous_minibatch_evaluation_average
if verbose:
print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%".format(mb, training_loss, eval_error*100))
def train_test(train_reader, test_reader, model_func, num_sweeps_to_train_with=10):
model = model_func(x/255)
# Instantiate the loss and error function
loss = C.cross_entropy_with_softmax(model, y)
label_error = C.classification_error(model, y)
# Initialize the parameters for the trainer
minibatch_size = 64
num_samples_per_sweep = 60000
num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
learning_rate = 0.1
lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch)
learner = C.sgd(model.parameters, lr_schedule)
trainer = C.Trainer(model, (loss, label_error), [learner])
input_map={
y : train_reader.streams.labels,
x : train_reader.streams.features
}
training_progress_output_freq = 500
start = time.time()
for i in range(0, int(num_minibatches_to_train)):
data=train_reader.next_minibatch(minibatch_size, input_map = input_map)
trainer.train_minibatch(data)
print_training_progress(trainer, i, training_progress_output_freq, verbose=1)
print("Training took {:.1f} sec".format(time.time() - start))
test_input_map = {
y : test_reader.streams.labels,
x : test_reader.streams.features
}
test_minibatch_size = 64
num_samples = 2000
num_minibatches_to_test = num_samples // test_minibatch_size
test_result = 0.0
for i in range(num_minibatches_to_test):
data = test_reader.next_minibatch(test_minibatch_size, input_map=test_input_map)
eval_error = trainer.test_minibatch(data)
test_result = test_result + eval_error
# Average of evaluation errors of all test minibatches
print("Average test error: {0:.2f}%".format(test_result*100 / num_minibatches_to_test))
def do_train_test(model, train_on_all_data = False):
if train_on_all_data:
reader_train = create_reader(all_data_file, True)
else:
reader_train = create_reader(train_file, True)
reader_test = create_reader(test_file, False)
train_test(reader_train, reader_test, model)
C.cntk_py.set_fixed_random_seed(1)
C.cntk_py.force_deterministic_algorithms()
model = create_model(x)
print('pool2 shape is ' + str(model.pool2.shape))
C.logging.log_number_of_parameters(model)
do_train_test(model, train_on_all_data = False)
#Test data not relevant here in case we use all data, the tests won't be out of sample
#Just done as an attempt improve the submission score using all possible test data after we find the best model
#that gave minimum error on validation set
#Surprisingly, it didn't improve the score but reduced the score by a fraction.
#do_train_test(model, train_on_all_data = True)
#Accumulate and display the misclassified
#TODO: FIX this
test_reader = create_reader(test_file, False)
labels = []
predictions = []
all_images = []
for i in range(0, 2000, 500):
validation_data = test_reader.next_minibatch(500)
features = validation_data[test_reader.streams.features].as_sequences()
all_images += features
l = validation_data[test_reader.streams.labels].as_sequences()
labels += [np.argmax(i.flatten()) for i in l]
images = [i.reshape(1, 64, 64) for i in features]
preds = model(images)
predictions += [np.argmax(i.flatten()) for i in preds]
predictions = np.array(predictions)
labels = np.array(labels)
mask = predictions != labels
mismatch = np.array(all_images)[mask]
expected_label = labels[mask]
mismatch_pred = predictions[mask]
mismatch_images = np.array(all_images)[mask]
%matplotlib inline
for i in range(len(expected_label)):
fig = plt.figure(figsize = (8, 6))
ax = fig.gca()
ax.set_title('Expected label ' + str(expected_label[i]) + ', got label ' + str(mismatch_pred[i]))
image = mismatch_images[i]
plt.imshow(image.reshape(64, 64), cmap = 'gray')
plt.axis('off')
submission_data_dir = os.path.join('data', 'test')
submission_file = 'submission_data.ctf'
def file_to_ndarray(file_root, imfile):
return (imfile[:-4], np.array(Image.open(os.path.join(file_root, imfile))).reshape((-1, 64, 64)))
submission_images = [file_to_ndarray(submission_data_dir, f) for f in os.listdir(submission_data_dir)]
submission_images = sorted(submission_images, key = lambda x: x[0])
input_images = [x[1].astype(np.float32) / 255 for x in submission_images]
all_predictions = []
submission_mini_batch_size = 50
for i in range(0, 20000, submission_mini_batch_size):
predictions = model(input_images[i:(i + submission_mini_batch_size)])
all_predictions.append(np.argmax(predictions, axis = 1))
all_predictions = [item for sl in all_predictions for item in sl]
with open('submission.csv', 'w') as f:
f.write('id,orientation\n')
for i in range(20000):
f.write(submission_images[i][0] + "," + str(all_predictions[i]) + "\n")
```
| github_jupyter |
# Adversarial Attacks Example in PyTorch
## Import Dependencies
This section imports all necessary libraries, such as PyTorch.
```
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
import math
import torch.backends.cudnn as cudnn
import os
import argparse
```
### GPU Check
```
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.cuda.is_available():
print("Using GPU.")
else:
print("Using CPU.")
```
## Data Preparation
```
# MNIST dataloader declaration
print('==> Preparing data..')
# The standard output of the torchvision MNIST data set is [0,1] range, which
# is what we want for later processing. All we need for a transform, is to
# translate it to tensors.
# We first download the train and test datasets if necessary and then load them into pytorch dataloaders.
mnist_train_dataset = datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
mnist_test_dataset = datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor(), download=True)
mnist_dataset_sizes = {'train' : mnist_train_dataset.__len__(), 'test' : mnist_test_dataset.__len__()} # a dictionary to keep both train and test datasets
mnist_train_loader = torch.utils.data.DataLoader(
dataset=mnist_train_dataset,
batch_size=256,
shuffle=True)
mnist_test_loader = torch.utils.data.DataLoader(
dataset=mnist_test_dataset,
batch_size=1,
shuffle=True)
mnist_dataloaders = {'train' : mnist_train_loader ,'test' : mnist_test_loader} # a dictionary to keep both train and test loaders
# CIFAR10 dataloader declaration
print('==> Preparing data..')
# The standard output of the torchvision CIFAR data set is [0,1] range, which
# is what we want for later processing. All we need for a transform, is to
# translate it to tensors.
# we first download the train and test datasets if necessary and then load them into pytorch dataloaders
cifar_train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transforms.ToTensor())
cifar_train_loader = torch.utils.data.DataLoader(cifar_train_dataset, batch_size=128, shuffle=True, num_workers=2)
cifar_test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transforms.ToTensor())
cifar_test_loader = torch.utils.data.DataLoader(cifar_test_dataset, batch_size=100, shuffle=False, num_workers=2)
# these are the output categories from the CIFAR dataset
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
```
## Model Definition
We used LeNet model to train against MNIST dataset because the dataset is not very complex and LeNet can easily reach a high accuracy to then demonstrate an ttack. For CIFAR10 dataset, however, we used the more complex DenseNet model to reach an accuracy of 90% to then attack.
### LeNet
```
# LeNet Model definition
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2)) #first convolutional layer
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) #secon convolutional layer with dropout
x = x.view(-1, 320) #making the data flat
x = F.relu(self.fc1(x)) #fully connected layer
x = F.dropout(x, training=self.training) #final dropout
x = self.fc2(x) # last fully connected layer
return F.log_softmax(x, dim=1) #output layer
```
This is the standard implementation of the DenseNet proposed in the following paper.
[DenseNet paper](https://arxiv.org/abs/1608.06993)
The idea of Densely Connected Networks is that every layer is connected to all its previous layers and its succeeding ones, thus forming a Dense Block.

The implementation is broken to smaller parts, called a Dense Block with 5 layers. Each time there is a convolution operation of the previous layer, it is followed by concatenation of the tensors. This is allowed as the channel dimensions, height and width of the input stay the same after convolution with a kernel size 3×3 and padding 1.
In this way the feature maps produced are more diversified and tend to have richer patterns. Also, another advantage is better information flow during training.
### DenseNet
```
# This is a basic densenet model definition.
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# This creates a densenet model with basic settings for cifar.
def densenet_cifar():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12)
#building model for MNIST data
print('==> Building the model for MNIST dataset..')
mnist_model = LeNet().to(device)
mnist_criterion = nn.CrossEntropyLoss()
mnist_optimizer = optim.Adam(mnist_model.parameters(), lr=0.001)
mnist_num_epochs= 20
#building model for CIFAR10
# Model
print('==> Building the model for CIFAR10 dataset..')
# initialize our datamodel
cifar_model = densenet_cifar()
cifar_model = cifar_model.to(device)
# use cross entropy as our objective function, since we are building a classifier
cifar_criterion = nn.CrossEntropyLoss()
# use adam as an optimizer, because it is a popular default nowadays
# (following the crowd, I know)
cifar_optimizer = optim.Adam(cifar_model.parameters(), lr=0.1)
best_acc = 0 # save the best test accuracy
start_epoch = 0 # start from epoch 0
cifar_num_epochs =20
```
##Model Training
```
#Training for MNIST dataset
def train_mnist_model(model, data_loaders, dataset_sizes, criterion, optimizer, num_epochs, device):
model = model.to(device)
model.train() # set train mode
# for each epoch
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch+1, num_epochs))
running_loss, running_corrects = 0.0, 0
# for each batch
for inputs, labels in data_loaders['train']:
inputs = inputs.to(device)
labels =labels.to(device)
# making sure all the gradients of parameter tensors are zero
optimizer.zero_grad() # set gradient as 0
# get the model output
outputs = model(inputs)
# get the prediction of model
_, preds = torch.max(outputs, 1)
# calculate loss of the output
loss = criterion(outputs, labels)
# backpropagation
loss.backward()
# update model parameters using optimzier
optimizer.step()
batch_loss_total = loss.item() * inputs.size(0) # total loss of the batch
running_loss += batch_loss_total # cumluative sum of loss
running_corrects += torch.sum(preds == labels.data) # cumulative sum of correct count
#calculating the loss and accuracy for the epoch
epoch_loss = running_loss / dataset_sizes['train']
epoch_acc = running_corrects.double() / dataset_sizes['train']
print('Train Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
print('-' * 10)
# after tranining epochs, test epoch starts
else:
model.eval() # set test mode
running_loss, running_corrects = 0.0, 0
# for each batch
for inputs, labels in data_loaders['test']:
inputs = inputs.to(device)
labels =labels.to(device)
# same with the training part.
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
running_loss += loss.item() * inputs.size(0) # cumluative sum of loss
running_corrects += torch.sum(preds == labels.data) # cumluative sum of corrects count
#calculating the loss and accuracy
test_loss = running_loss / dataset_sizes['test']
test_acc = (running_corrects.double() / dataset_sizes['test']).item()
print('<Test Loss: {:.4f} Acc: {:.4f}>'.format(test_loss, test_acc))
train_mnist_model(mnist_model, mnist_dataloaders, mnist_dataset_sizes, mnist_criterion, mnist_optimizer, mnist_num_epochs, device)
# Training for CIFAR10 dataset
def train_cifar_model(model, train_loader, criterion, optimizer, num_epochs, device):
print('\nEpoch: %d' % num_epochs)
model.train() #set the mode to train
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad() # making sure all the gradients of parameter tensors are zero
outputs = model(inputs) #forward pass the model againt the input
loss = criterion(outputs, targets) #calculate the loss
loss.backward() #back propagation
optimizer.step() #update model parameters using the optimiser
train_loss += loss.item() #cumulative sum of loss
_, predicted = outputs.max(1) #the model prediction
total += targets.size(0)
correct += predicted.eq(targets).sum().item() #cumulative sume of corrects count
if batch_idx % 100 == 0:
#calculating and printig the loss and accuracy
print('Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
#testing for CIFAR10 dataset
def test_cifar_model(model, test_loader, criterion, device, save=True):
"""Tests the model.
Taks the epoch number as a parameter.
"""
global best_acc
model.eval() # set the mode to test
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
#similar to the train part
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
if batch_idx % 100 == 0:
print('Loss: %.3f | Acc: %.3f%% (%d/%d) TEST' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
#calculating the accuracy
acc = 100.*correct/total
if acc > best_acc and save:
best_acc = acc
for epoch in range(start_epoch, start_epoch+cifar_num_epochs):
train_cifar_model(cifar_model, cifar_train_loader, cifar_criterion, cifar_optimizer, epoch, device)
test_cifar_model(cifar_model, cifar_test_loader, cifar_criterion, device)
```
## Save and Reload the Model
```
# Mounting Google Drive
from google.colab import auth
auth.authenticate_user()
from google.colab import drive
drive.mount('/content/gdrive')
gdrive_dir = 'gdrive/My Drive/ml/' # update with your own path
# Save and reload the mnist_model
print('==> Saving model for MNIST..')
torch.save(mnist_model.state_dict(), gdrive_dir+'lenet_mnist_model.pth')
#change the directory to load your own pretrained model
print('==> Loading saved model for MNIST..')
mnist_model = LeNet().to(device)
mnist_model.load_state_dict(torch.load(gdrive_dir+'lenet_mnist_model.pth'))
mnist_model.eval()
# Save and reload the cifar_model
print('==> Saving model for CIFAR..')
torch.save(cifar_model.state_dict(), './densenet_cifar_model.pth')
#change the directory to load your own pretrained model
print('==> Loading saved model for CIFAR..')
cifar_model = densenet_cifar().to(device)
cifar_model.load_state_dict(torch.load(gdrive_dir+'densenet_cifar_model.pth'))
cifar_model.eval()
```
## Attack Definition
We used these two attack methods:
* Fast Gradient Signed Method (FGSM)
* Iterative Least Likely method (Iter.L.L.)
```
# Fast Gradient Singed Method attack (FGSM)
#Model is the trained model for the target dataset
#target is the ground truth label of the image
#epsilon is the hyper parameter which shows the degree of perturbation
def fgsm_attack(model, image, target, epsilon):
# Set requires_grad attribute of tensor. Important for Attack
image.requires_grad = True
# Forward pass the data through the model
output = model(image)
init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability(the prediction of the model)
# If the initial prediction is already wrong, dont bother attacking
if init_pred[0].item() != target[0].item():
#if init_pred.item() != target.item():
return image
# Calculate the loss
loss = F.nll_loss(output, target)
# Zero all existing gradients
model.zero_grad()
# Calculate gradients of model in backward pass
loss.backward()
# Collect datagrad
data_grad = image.grad.data
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
# Create the perturbed image by adjusting each pixel of the input image
perturbed_image = image + epsilon*sign_data_grad
# Adding clipping to maintain [0,1] range
perturbed_image = torch.clamp(perturbed_image, 0, 1)
# Return the perturbed image
return perturbed_image
# Iterative least likely method
# Model is the trained model for the target dataset
# target is the ground truth label of the image
# alpha is the hyper parameter which shows the degree of perturbation in each iteration, the value is borrowed from the refrenced paper [4] according to the report file
# iters is the no. of iterations
# no. of iterations can be set manually, otherwise (if iters==0) this function will take care of it
def ill_attack(model, image, target, epsilon, alpha, iters):
# Forward passing the image through model one time to get the least likely labels
output = model(image)
ll_label = torch.min(output, 1)[1] # get the index of the min log-probability
if iters == 0 :
# In paper [4], min(epsilon + 4, 1.25*epsilon) is used as number of iterations
iters = int(min(epsilon + 4, 1.25*epsilon))
# In the original paper the images were in [0,255] range but here our data is in [0,1].
# So we need to scale the epsilon value in a way that suits our data, which is dividing by 255.
epsilon = epsilon/255
for i in range(iters) :
# Set requires_grad attribute of tensor. Important for Attack
image.requires_grad = True
# Forward pass the data through the model
output = model(image)
init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability(the model's prediction)
# If the current prediction is already wrong, dont bother to continue
if init_pred.item() != target.item():
return image
# Calculate the loss
loss = F.nll_loss(output, ll_label)
# Zero all existing gradients
model.zero_grad()
# Calculate gradients of model in backward pass
loss.backward()
# Collect datagrad
data_grad = image.grad.data
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
# Create the perturbed image by adjusting each pixel of the input image
perturbed_image = image - alpha*sign_data_grad
# Updating the image for next iteration
#
# We want to keep the perturbed image in range [image-epsilon, image+epsilon]
# based on the definition of the attack. However the value of image-epsilon
# itself must not fall behind 0, as the data range is [0,1].
# And the value of image+epsilon also must not exceed 1, for the same reason.
# So we clip the perturbed image between the (image-epsilon) clipped to 0 and
# (image+epsilon) clipped to 1.
a = torch.clamp(image - epsilon, min=0)
b = (perturbed_image>=a).float()*perturbed_image + (a>perturbed_image).float()*a
c = (b > image+epsilon).float()*(image+epsilon) + (image+epsilon >= b).float()*b
image = torch.clamp(c, max=1).detach_()
return image
```
## Model Attack Design
```
# We used the same values as described in the reference paper [4] in the report.
fgsm_epsilons = [0, .05, .1, .15, .2, .25, .3] # values for epsilon hyper-parameter for FGSM attack
ill_epsilons = [0, 2, 4, 8, 16] # values for epsilon hyper-parameter for Iter.L.L attack
#This is where we test the effect of the attack on the trained model
#model is the pretrained model on your dataset
#test_loader contains the test dataset
#other parameters are set based on the type of the attack
def attack_test(model, device, test_loader, epsilon, iters, attack='fgsm', alpha=1 ):
# Accuracy counter. accumulates the number of correctly predicted exampels
correct = 0
adv_examples = [] # a list to save some of the successful adversarial examples for visualizing purpose
orig_examples = [] # this list keeps the original image before manipulation corresponding to the images in adv_examples list for comparing purpose
# Loop over all examples in test set
for data, target in test_loader:
# Send the data and label to the device
data, target = data.to(device), target.to(device)
# Forward pass the data through the model
output = model(data)
init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability (model prediction of the image)
# Call the Attack
if attack == 'fgsm':
perturbed_data = fgsm_attack(model, data, target, epsilon=epsilon )
else:
perturbed_data = ill_attack(model, data, target, epsilon, alpha, iters)
# Re-classify the perturbed image
output = model(perturbed_data)
# Check for success
#target refers to the ground truth label
#init_pred refers to the model prediction of the original image
#final_pred refers to the model prediction of the manipulated image
final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability (model prediction of the perturbed image)
if final_pred[0].item() == target[0].item(): #perturbation hasn't affected the classification
correct += 1
# Special case for saving 0 epsilon examples which is equivalent to no adversarial attack
if (epsilon == 0) and (len(adv_examples) < 5):
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
orig_ex = data.squeeze().detach().cpu().numpy()
adv_examples.append( (init_pred[0].item(), final_pred[0].item(), adv_ex) )
orig_examples.append( (target[0].item(), init_pred[0].item(), orig_ex) )
else:
# Save some adv examples for visualization later
if len(adv_examples) < 5:
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
orig_ex = data.squeeze().detach().cpu().numpy()
adv_examples.append( (init_pred[0].item(), final_pred[0].item(), adv_ex) )
orig_examples.append( (target[0].item(), init_pred[0].item(), orig_ex) )
# Calculate final accuracy for this epsilon
final_acc = correct/float(len(test_loader))
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
# Return the accuracy and an adversarial examples and their corresponding original images
return final_acc, adv_examples, orig_examples
```
##Running the Attack for MNIST dataset
```
#FGSM attack
mnist_fgsm_accuracies = [] #list to keep the model accuracy after attack for each epsilon value
mnist_fgsm_examples = [] # list to collect adversarial examples returned from the attack_test function for every epsilon values
mnist_fgsm_orig_examples = [] #list to collect original images corresponding the collected adversarial examples
# Run test for each epsilon
for eps in fgsm_epsilons:
acc, ex, orig = attack_test(mnist_model, device, mnist_test_loader, eps, attack='fgsm', alpha=1, iters=0)
mnist_fgsm_accuracies.append(acc)
mnist_fgsm_examples.append(ex)
mnist_fgsm_orig_examples.append(orig)
#Iterative_LL attack
mnist_ill_accuracies = [] #list to keep the model accuracy after attack for each epsilon value
mnist_ill_examples = [] # list to collect adversarial examples returned from the attack_test function for every epsilon values
mnist_ill_orig_examples = [] #list to collect original images corresponding the collected adversarial examples
# Run test for each epsilon
for eps in ill_epsilons:
acc, ex, orig = attack_test(mnist_model, device, mnist_test_loader, eps, attack='ill', alpha=1, iters=0)
mnist_ill_accuracies.append(acc)
mnist_ill_examples.append(ex)
mnist_ill_orig_examples.append(orig)
```
##Visualizing the results for MNIST dataset
```
#Accuracy after attack vs epsilon
plt.figure(figsize=(5,5))
plt.plot(fgsm_epsilons, mnist_fgsm_accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("FSGM Attack vs MNIST Model Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()
# Plot several examples vs their adversarial samples at each epsilon for fgms attack
cnt = 0
plt.figure(figsize=(8,20))
for i in range(len(fgsm_epsilons)):
for j in range(2):
cnt += 1
plt.subplot(len(fgsm_epsilons),2,cnt)
plt.xticks([], [])
plt.yticks([], [])
if j==0:
plt.ylabel("Eps: {}".format(fgsm_epsilons[i]), fontsize=14)
orig,adv,ex = mnist_fgsm_orig_examples[i][0]
plt.title("target "+"{} -> {}".format(orig, adv)+ " predicted")
plt.imshow(ex, cmap="gray")
else:
orig,adv,ex = mnist_fgsm_examples[i][0]
plt.title("predicted "+"{} -> {}".format(orig, adv)+ " attacked")
plt.imshow(ex, cmap="gray")
plt.tight_layout()
plt.show()
#Accuracy after attack vs epsilon
plt.figure(figsize=(5,5))
plt.plot(ill_epsilons, mnist_ill_accuracies, "*-", color='R')
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, 17, step=2))
plt.title("Iterative Least Likely vs MNIST Model / Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()
# Plot several examples vs their adversarial samples at each epsilon for ill attack
cnt = 0
plt.figure(figsize=(8,20))
for i in range(len(ill_epsilons)):
for j in range(2):
cnt += 1
plt.subplot(len(ill_epsilons),2,cnt)
plt.xticks([], [])
plt.yticks([], [])
if j==0:
plt.ylabel("Eps: {}".format(ill_epsilons[i]), fontsize=14)
orig,adv,ex = mnist_ill_orig_examples[i][0]
plt.title("target "+"{} -> {}".format(orig, adv)+ " predicted")
plt.imshow(ex, cmap="gray")
else:
orig,adv,ex = mnist_ill_examples[i][0]
plt.title("predicted "+"{} -> {}".format(orig, adv)+ " attacked")
plt.imshow(ex, cmap="gray")
plt.tight_layout()
plt.show()
```
##Running the Attack for CIFAR10 dataset
```
#FGSM attack
cifar_fgsm_accuracies = [] #list to keep the model accuracy after attack for each epsilon value
cifar_fgsm_examples = [] # list to collect adversarial examples returned from the attack_test function for every epsilon values
cifar_fgsm_orig_examples = [] #list to collect original images corresponding the collected adversarial examples
# Run test for each epsilon
for eps in fgsm_epsilons:
acc, ex, orig = attack_test(cifar_model, device, cifar_test_loader, eps, attack='fgsm', alpha=1, iters=0)
cifar_fgsm_accuracies.append(acc)
cifar_fgsm_examples.append(ex)
cifar_fgsm_orig_examples.append(orig)
#Iterative_LL attack
cifar_ill_accuracies = [] #list to keep the model accuracy after attack for each epsilon value
cifar_ill_examples = [] # list to collect adversarial examples returned from the attack_test function for every epsilon values
cifar_ill_orig_examples = [] #list to collect original images corresponding the collected adversarial examples
# Run test for each epsilon
for eps in ill_epsilons:
acc, ex, orig = attack_test(cifar_model, device, cifar_test_loader, eps, attack='ill', alpha=1, iters=0)
cifar_ill_accuracies.append(acc)
cifar_ill_examples.append(ex)
cifar_ill_orig_examples.append(orig)
```
##Visualizing the results for CIFAR10 dataset
```
#Accuracy after attack vs epsilon
plt.figure(figsize=(5,5))
plt.plot(fgsm_epsilons, cifar_fgsm_accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("FSGM Attack vs CIFAR Model Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()
# Plot several examples vs their adversarial samples at each epsilon for fgms attack
cnt = 0
# 8 is the separation between images
# 20 is the size of the printed image
plt.figure(figsize=(8,20))
for i in range(len(fgsm_epsilons)):
for j in range(2):
cnt += 1
plt.subplot(len(fgsm_epsilons),2,cnt)
plt.xticks([], [])
plt.yticks([], [])
if j==0:
plt.ylabel("Eps: {}".format(fgsm_epsilons[i]), fontsize=14)
orig,adv,ex = cifar_fgsm_orig_examples[i][0]
plt.title("target "+"{} -> {}".format(classes[orig], classes[adv])+ " predicted")
plt.imshow(ex[0].transpose(1,2,0), cmap="gray")
else:
orig,adv,ex = cifar_fgsm_examples[i][0]
plt.title("predicted "+"{} -> {}".format(classes[orig], classes[adv])+ " attacked")
plt.imshow(ex[0].transpose(1,2,0), cmap="gray")
plt.tight_layout()
plt.show()
#Accuracy after attack vs epsilon
plt.figure(figsize=(5,5))
plt.plot(ill_epsilons, cifar_ill_accuracies, "*-", color='R')
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, 17, step=2))
plt.title("Iterative Least Likely vs CIFAR Model / Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()
# Plot several examples vs their adversarial samples at each epsilon for iterative
# least likely attack.
cnt = 0
# 8 is the separation between images
# 20 is the size of the printed image
plt.figure(figsize=(8,20))
for i in range(len(ill_epsilons)):
for j in range(2):
cnt += 1
plt.subplot(len(ill_epsilons),2,cnt)
plt.xticks([], [])
plt.yticks([], [])
if j==0:
plt.ylabel("Eps: {}".format(ill_epsilons[i]), fontsize=14)
orig,adv,ex = cifar_ill_orig_examples[i][0]
plt.title("target "+"{} -> {}".format(classes[orig], classes[adv])+ " predicted")
plt.imshow(ex[0].transpose(1,2,0), cmap="gray")
else:
orig,adv,ex = cifar_ill_examples[i][0]
plt.title("predicted "+"{} -> {}".format(classes[orig], classes[adv])+ " attacked")
plt.imshow(ex[0].transpose(1,2,0), cmap="gray")
plt.tight_layout()
plt.show()
```
| github_jupyter |
[**Blueprints for Text Analysis Using Python**](https://github.com/blueprints-for-text-analytics-python/blueprints-text)
Jens Albrecht, Sidharth Ramachandran, Christian Winkler
**If you like the book or the code examples here, please leave a friendly comment on [Amazon.com](https://www.amazon.com/Blueprints-Text-Analytics-Using-Python/dp/149207408X)!**
<img src="../rating.png" width="100"/>
# Chapter 5:<div class='tocSkip'/>
# Feature Engineering and Syntactic Similarity
## Remark<div class='tocSkip'/>
The code in this notebook differs slightly from the printed book.
Several layout and formatting commands, like `figsize` to control figure size or subplot commands are removed in the book.
All of this is done to simplify the code in the book and put the focus on the important parts instead of formatting.
## Setup<div class='tocSkip'/>
Set directory locations. If working on Google Colab: copy files and install required libraries.
```
import sys, os
ON_COLAB = 'google.colab' in sys.modules
if ON_COLAB:
GIT_ROOT = 'https://github.com/blueprints-for-text-analytics-python/blueprints-text/raw/master'
os.system(f'wget {GIT_ROOT}/ch05/setup.py')
%run -i setup.py
```
## Load Python Settings<div class="tocSkip"/>
Common imports, defaults for formatting in Matplotlib, Pandas etc.
```
%run "$BASE_DIR/settings.py"
%reload_ext autoreload
%autoreload 2
%config InlineBackend.figure_format = 'png'
```
# Data preparation
```
sentences = ["It was the best of times",
"it was the worst of times",
"it was the age of wisdom",
"it was the age of foolishness"]
tokenized_sentences = [[t for t in sentence.split()] for sentence in sentences]
vocabulary = set([w for s in tokenized_sentences for w in s])
import pandas as pd
[[w, i] for i,w in enumerate(vocabulary)]
```
# One-hot by hand
```
def onehot_encode(tokenized_sentence):
return [1 if w in tokenized_sentence else 0 for w in vocabulary]
onehot = [onehot_encode(tokenized_sentence) for tokenized_sentence in tokenized_sentences]
for (sentence, oh) in zip(sentences, onehot):
print("%s: %s" % (oh, sentence))
pd.DataFrame(onehot, columns=vocabulary)
sim = [onehot[0][i] & onehot[1][i] for i in range(0, len(vocabulary))]
sum(sim)
import numpy as np
np.dot(onehot[0], onehot[1])
np.dot(onehot, onehot[1])
```
## Out of vocabulary
```
onehot_encode("the age of wisdom is the best of times".split())
onehot_encode("John likes to watch movies. Mary likes movies too.".split())
```
## document term matrix
```
onehot
```
## similarities
```
import numpy as np
np.dot(onehot, np.transpose(onehot))
```
# scikit learn one-hot vectorization
```
from sklearn.preprocessing import MultiLabelBinarizer
lb = MultiLabelBinarizer()
lb.fit([vocabulary])
lb.transform(tokenized_sentences)
```
# CountVectorizer
```
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
more_sentences = sentences + ["John likes to watch movies. Mary likes movies too.",
"Mary also likes to watch football games."]
pd.DataFrame(more_sentences)
cv.fit(more_sentences)
print(cv.get_feature_names())
dt = cv.transform(more_sentences)
dt
pd.DataFrame(dt.toarray(), columns=cv.get_feature_names())
from sklearn.metrics.pairwise import cosine_similarity
cosine_similarity(dt[0], dt[1])
len(more_sentences)
pd.DataFrame(cosine_similarity(dt, dt))
```
# TF/IDF
```
from sklearn.feature_extraction.text import TfidfTransformer
tfidf = TfidfTransformer()
tfidf_dt = tfidf.fit_transform(dt)
pd.DataFrame(tfidf_dt.toarray(), columns=cv.get_feature_names())
pd.DataFrame(cosine_similarity(tfidf_dt, tfidf_dt))
headlines = pd.read_csv(ABCNEWS_FILE, parse_dates=["publish_date"])
headlines.head()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()
dt = tfidf.fit_transform(headlines["headline_text"])
dt
dt.data.nbytes
%%time
cosine_similarity(dt[0:10000], dt[0:10000])
```
## Stopwords
```
from spacy.lang.en.stop_words import STOP_WORDS as stopwords
print(len(stopwords))
tfidf = TfidfVectorizer(stop_words=stopwords)
dt = tfidf.fit_transform(headlines["headline_text"])
dt
```
## min_df
```
tfidf = TfidfVectorizer(stop_words=stopwords, min_df=2)
dt = tfidf.fit_transform(headlines["headline_text"])
dt
tfidf = TfidfVectorizer(stop_words=stopwords, min_df=.0001)
dt = tfidf.fit_transform(headlines["headline_text"])
dt
```
## max_df
```
tfidf = TfidfVectorizer(stop_words=stopwords, max_df=0.1)
dt = tfidf.fit_transform(headlines["headline_text"])
dt
tfidf = TfidfVectorizer(max_df=0.1)
dt = tfidf.fit_transform(headlines["headline_text"])
dt
```
## n-grams
```
tfidf = TfidfVectorizer(stop_words=stopwords, ngram_range=(1,2), min_df=2)
dt = tfidf.fit_transform(headlines["headline_text"])
print(dt.shape)
print(dt.data.nbytes)
tfidf = TfidfVectorizer(stop_words=stopwords, ngram_range=(1,3), min_df=2)
dt = tfidf.fit_transform(headlines["headline_text"])
print(dt.shape)
print(dt.data.nbytes)
```
## Lemmas
```
from tqdm.auto import tqdm
import spacy
nlp = spacy.load("en")
nouns_adjectives_verbs = ["NOUN", "PROPN", "ADJ", "ADV", "VERB"]
for i, row in tqdm(headlines.iterrows(), total=len(headlines)):
doc = nlp(str(row["headline_text"]))
headlines.at[i, "lemmas"] = " ".join([token.lemma_ for token in doc])
headlines.at[i, "nav"] = " ".join([token.lemma_ for token in doc if token.pos_ in nouns_adjectives_verbs])
headlines.head()
tfidf = TfidfVectorizer(stop_words=stopwords)
dt = tfidf.fit_transform(headlines["lemmas"].map(str))
dt
tfidf = TfidfVectorizer(stop_words=stopwords)
dt = tfidf.fit_transform(headlines["nav"].map(str))
dt
```
## remove top 10,000
```
top_10000 = pd.read_csv("https://raw.githubusercontent.com/first20hours/google-10000-english/master/google-10000-english.txt", header=None)
tfidf = TfidfVectorizer(stop_words=set(top_10000.iloc[:,0].values))
dt = tfidf.fit_transform(headlines["nav"].map(str))
dt
tfidf = TfidfVectorizer(ngram_range=(1,2), stop_words=set(top_10000.iloc[:,0].values), min_df=2)
dt = tfidf.fit_transform(headlines["nav"].map(str))
dt
```
## Finding document most similar to made-up document
```
tfidf = TfidfVectorizer(stop_words=stopwords, min_df=2)
dt = tfidf.fit_transform(headlines["lemmas"].map(str))
dt
made_up = tfidf.transform(["australia and new zealand discuss optimal apple size"])
sim = cosine_similarity(made_up, dt)
sim[0]
headlines.iloc[np.argsort(sim[0])[::-1][0:5]][["publish_date", "lemmas"]]
```
# Finding the most similar documents
```
# there are "test" headlines in the corpus
stopwords.add("test")
tfidf = TfidfVectorizer(stop_words=stopwords, ngram_range=(1,2), min_df=2, norm='l2')
dt = tfidf.fit_transform(headlines["headline_text"])
```
### Timing Cosine Similarity
```
%%time
cosine_similarity(dt[0:10000], dt[0:10000], dense_output=False)
%%time
r = cosine_similarity(dt[0:10000], dt[0:10000])
r[r > 0.9999] = 0
print(np.argmax(r))
%%time
r = cosine_similarity(dt[0:10000], dt[0:10000], dense_output=False)
r[r > 0.9999] = 0
print(np.argmax(r))
```
### Timing Dot-Product
```
%%time
r = np.dot(dt[0:10000], np.transpose(dt[0:10000]))
r[r > 0.9999] = 0
print(np.argmax(r))
```
## Batch
```
%%time
batch = 10000
max_sim = 0.0
max_a = None
max_b = None
for a in range(0, dt.shape[0], batch):
for b in range(0, a+batch, batch):
print(a, b)
#r = np.dot(dt[a:a+batch], np.transpose(dt[b:b+batch]))
r = cosine_similarity(dt[a:a+batch], dt[b:b+batch], dense_output=False)
# eliminate identical vectors
# by setting their similarity to np.nan which gets sorted out
r[r > 0.9999] = 0
sim = r.max()
if sim > max_sim:
# argmax returns a single value which we have to
# map to the two dimensions
(max_a, max_b) = np.unravel_index(np.argmax(r), r.shape)
# adjust offsets in corpus (this is a submatrix)
max_a += a
max_b += b
max_sim = sim
print(max_a, max_b)
print(max_sim)
pd.set_option('max_colwidth', -1)
headlines.iloc[[max_a, max_b]][["publish_date", "headline_text"]]
```
# Finding most related words
```
tfidf_word = TfidfVectorizer(stop_words=stopwords, min_df=1000)
dt_word = tfidf_word.fit_transform(headlines["headline_text"])
r = cosine_similarity(dt_word.T, dt_word.T)
np.fill_diagonal(r, 0)
voc = tfidf_word.get_feature_names()
size = r.shape[0] # quadratic
for index in np.argsort(r.flatten())[::-1][0:40]:
a = int(index/size)
b = index%size
if a > b: # avoid repetitions
print('"%s" related to "%s"' % (voc[a], voc[b]))
```
| github_jupyter |
```
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import quandl
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import datetime
from datetime import datetime
#selected = ['WALMEX', 'GRUMAB', 'PE&OLES']
# get adjusted closing prices of 5 selected companies with Quandl
quandl.ApiConfig.api_key = 'Qa3CCQjeQQM2EZtv-rvh'
selected = ['CNP', 'F', 'WMT', 'GE', 'TSLA']
data = quandl.get_table('WIKI/PRICES', ticker = selected,
qopts = { 'columns': ['date', 'ticker', 'adj_close'] },
date = { 'gte': '2014-1-1', 'lte': '2016-12-31' }, paginate=True)
# reorganise data pulled by setting date as index with
# columns of tickers and their corresponding adjusted prices
clean = data.set_index('date')
table = clean.pivot(columns='ticker')
table
import yfinance as yf
msft = yf.Tickers("spy qqq")
table = msft.history()['Close']
table
selected = ["SPY","QQQ"]
# calculate daily and annual returns of the stocks
returns_daily = table.pct_change()
returns_annual = returns_daily.mean() * 250
# get daily and covariance of returns of the stock
cov_daily = returns_daily.cov()
cov_annual = cov_daily * 250
# empty lists to store returns, volatility and weights of imiginary portfolios
port_returns = []
port_volatility = []
stock_weights = []
# set the number of combinations for imaginary portfolios
num_assets = len(selected)
num_portfolios = 50000
# populate the empty lists with each portfolios returns,risk and weights
for single_portfolio in range(num_portfolios):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
returns = np.dot(weights, returns_annual)
volatility = np.sqrt(np.dot(weights.T, np.dot(cov_annual, weights)))
port_returns.append(returns)
port_volatility.append(volatility)
stock_weights.append(weights)
# a dictionary for Returns and Risk values of each portfolio
portfolio = {'Returns': port_returns,
'Volatility': port_volatility}
# extend original dictionary to accomodate each ticker and weight in the portfolio
for counter,symbol in enumerate(selected):
portfolio[symbol+' Weight'] = [Weight[counter] for Weight in stock_weights]
# make a nice dataframe of the extended dictionary
df = pd.DataFrame(portfolio)
# get better labels for desired arrangement of columns
column_order = ['Returns', 'Volatility'] + [stock+' Weight' for stock in selected]
# reorder dataframe columns
df = df[column_order]
# plot the efficient frontier with a scatter plot
plt.style.use('seaborn')
df.plot.scatter(x='Volatility', y='Returns', figsize=(10, 8), grid=True)
plt.xlabel('Volatility (Std. Deviation)')
plt.ylabel('Expected Returns')
plt.title('Efficient Frontier')
plt.show()
# }
# # get adjusted closing prices of 5 selected companies with Quandl
# quandl.ApiConfig.api_key = 'Qa3CCQjeQQM2EZtv-rvh'
# selected = ['CNP', 'F', 'WMT', 'GE', 'TSLA']
# data = quandl.get_table('WIKI/PRICES', ticker = selected,
# qopts = { 'columns': ['date', 'ticker', 'adj_close'] },
# date = { 'gte': '2014-1-1', 'lte': '2016-12-31' }, paginate=True)
# # reorganise data pulled by setting date as index with
# # columns of tickers and their corresponding adjusted prices
# clean = data.set_index('date')
# table = clean.pivot(columns='ticker')
# calculate daily and annual returns of the stocks
returns_daily = table.pct_change()
returns_annual = returns_daily.mean() * 250
# get daily and covariance of returns of the stock
cov_daily = returns_daily.cov()
cov_annual = cov_daily * 250
# empty lists to store returns, volatility and weights of imiginary portfolios
port_returns = []
port_volatility = []
sharpe_ratio = []
stock_weights = []
# set the number of combinations for imaginary portfolios
num_assets = len(selected)
num_portfolios = 50000
#set random seed for reproduction's sake
np.random.seed(101)
# populate the empty lists with each portfolios returns,risk and weights
for single_portfolio in range(num_portfolios):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
returns = np.dot(weights, returns_annual)
volatility = np.sqrt(np.dot(weights.T, np.dot(cov_annual, weights)))
sharpe = returns / volatility
sharpe_ratio.append(sharpe)
port_returns.append(returns)
port_volatility.append(volatility)
stock_weights.append(weights)
# a dictionary for Returns and Risk values of each portfolio
portfolio = {'Returns': port_returns,
'Volatility': port_volatility,
'Sharpe Ratio': sharpe_ratio}
# extend original dictionary to accomodate each ticker and weight in the portfolio
for counter,symbol in enumerate(selected):
portfolio[symbol+' Weight'] = [Weight[counter] for Weight in stock_weights]
# make a nice dataframe of the extended dictionary
df = pd.DataFrame(portfolio)
# get better labels for desired arrangement of columns
column_order = ['Returns', 'Volatility', 'Sharpe Ratio'] + [stock+' Weight' for stock in selected]
# reorder dataframe columns
df = df[column_order]
# plot frontier, max sharpe & min Volatility values with a scatterplot
plt.style.use('seaborn-dark')
df.plot.scatter(x='Volatility', y='Returns', c='Sharpe Ratio',
cmap='RdYlGn', edgecolors='black', figsize=(10, 8), grid=True)
plt.xlabel('Volatility (Std. Deviation)')
plt.ylabel('Expected Returns')
plt.title('Efficient Frontier')
plt.show()
# find min Volatility & max sharpe values in the dataframe (df)
min_volatility = df['Volatility'].min()
max_sharpe = df['Sharpe Ratio'].max()
# use the min, max values to locate and create the two special portfolios
sharpe_portfolio = df.loc[df['Sharpe Ratio'] == max_sharpe]
min_variance_port = df.loc[df['Volatility'] == min_volatility]
# plot frontier, max sharpe & min Volatility values with a scatterplot
plt.style.use('seaborn-dark')
df.plot.scatter(x='Volatility', y='Returns', c='Sharpe Ratio',
cmap='RdYlGn', edgecolors='black', figsize=(10, 8), grid=True)
plt.scatter(x=sharpe_portfolio['Volatility'], y=sharpe_portfolio['Returns'], c='red', marker='D', s=200)
plt.scatter(x=min_variance_port['Volatility'], y=min_variance_port['Returns'], c='blue', marker='D', s=200 )
plt.xlabel('Volatility (Std. Deviation)')
plt.ylabel('Expected Returns')
plt.title('Efficient Frontier')
plt.show()
print(min_variance_port.T)
print(sharpe_portfolio.T)
from pandas_datareader import data
import pandas as pd
from yahoo_finance import Share
# Define the instruments to download. We would like to see Apple, Microsoft and the S&P500 index.
tickers = ['WALMEX','GMEXICOB','PE&OLES']
# Define which online source one should use
data_source = 'google'
# We would like all available data from 01/01/2000 until 12/31/2016.
start_date = '2015-01-16'
end_date = '2018-01-16'
# User pandas_reader.data.DataReader to load the desired data. As simple as that.
panel_data = data.DataReader(tickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data.ix['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new indec
close= close.reindex(all_weekdays)
selected = ['WALMEX', 'GMEXICOB', 'PE&OLES']
# get adjusted closing prices of 5 selected companies with Quandl
quandl.ApiConfig.api_key = 'Qa3CCQjeQQM2EZtv-rvh'
data = quandl.get_table('WIKI/PRICES', ticker = selected,
qopts = { 'columns': ['date', 'ticker', 'adj_close'] },
date = { 'gte': '2015-01-16', 'lte': '2018-01-16' }, paginate=True)
# reorganise data pulled by setting date as index with
# columns of tickers and their corresponding adjusted prices
clean = data.set_index('date')
table = close
table.head()
# calculate daily and annual returns of the stocks
returns_daily = table.pct_change()
returns_annual = returns_daily.mean() * 250
# get daily and covariance of returns of the stock
cov_daily = returns_daily.cov()
cov_annual = cov_daily * 250
# empty lists to store returns, volatility and weights of imiginary portfolios
port_returns = []
port_volatility = []
stock_weights = []
# set the number of combinations for imaginary portfolios
num_assets = len(selected)
num_portfolios = 50000
# populate the empty lists with each portfolios returns,risk and weights
for single_portfolio in range(num_portfolios):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
returns = np.dot(weights, returns_annual)
volatility = np.sqrt(np.dot(weights.T, np.dot(cov_annual, weights)))
port_returns.append(returns)
port_volatility.append(volatility)
stock_weights.append(weights)
# a dictionary for Returns and Risk values of each portfolio
portfolio = {'Returns': port_returns,
'Volatility': port_volatility}
# extend original dictionary to accomodate each ticker and weight in the portfolio
for counter,symbol in enumerate(selected):
portfolio[symbol+' Weight'] = [Weight[counter] for Weight in stock_weights]
# make a nice dataframe of the extended dictionary
df = pd.DataFrame(portfolio)
# get better labels for desired arrangement of columns
column_order = ['Returns', 'Volatility'] + [stock+' Weight' for stock in selected]
# reorder dataframe columns
df = df[column_order]
# plot the efficient frontier with a scatter plot
plt.style.use('seaborn')
df.plot.scatter(x='Volatility', y='Returns', figsize=(10, 8), grid=True)
plt.xlabel('Volatility (Std. Deviation)')
plt.ylabel('Expected Returns')
plt.title('Efficient Frontier')
plt.show()
table = close
# calculate daily and annual returns of the stocks
returns_daily = table.pct_change()
returns_annual = returns_daily.mean() * 250
# get daily and covariance of returns of the stock
cov_daily = returns_daily.cov()
cov_annual = cov_daily * 250
# empty lists to store returns, volatility and weights of imiginary portfolios
port_returns = []
port_volatility = []
sharpe_ratio = []
stock_weights = []
# set the number of combinations for imaginary portfolios
num_assets = len(selected)
num_portfolios = 50000
#set random seed for reproduction's sake
np.random.seed(101)
# populate the empty lists with each portfolios returns,risk and weights
for single_portfolio in range(num_portfolios):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
returns = np.dot(weights, returns_annual)
volatility = np.sqrt(np.dot(weights.T, np.dot(cov_annual, weights)))
sharpe = returns / volatility
sharpe_ratio.append(sharpe)
port_returns.append(returns)
port_volatility.append(volatility)
stock_weights.append(weights)
# a dictionary for Returns and Risk values of each portfolio
portfolio = {'Returns': port_returns,
'Volatility': port_volatility,
'Sharpe Ratio': sharpe_ratio}
# extend original dictionary to accomodate each ticker and weight in the portfolio
for counter,symbol in enumerate(selected):
portfolio[symbol+' Weight'] = [Weight[counter] for Weight in stock_weights]
# make a nice dataframe of the extended dictionary
df = pd.DataFrame(portfolio)
# get better labels for desired arrangement of columns
column_order = ['Returns', 'Volatility', 'Sharpe Ratio'] + [stock+' Weight' for stock in selected]
# reorder dataframe columns
df = df[column_order]
# plot frontier, max sharpe & min Volatility values with a scatterplot
plt.style.use('seaborn-dark')
df.plot.scatter(x='Volatility', y='Returns', c='Sharpe Ratio',
cmap='RdYlGn', edgecolors='black', figsize=(10, 8), grid=True)
plt.xlabel('Volatility (Std. Deviation)')
plt.ylabel('Expected Returns')
plt.title('Efficient Frontier')
plt.show()
# find min Volatility & max sharpe values in the dataframe (df)
min_volatility = df['Volatility'].min()
max_sharpe = df['Sharpe Ratio'].max()
# use the min, max values to locate and create the two special portfolios
sharpe_portfolio = df.loc[df['Sharpe Ratio'] == max_sharpe]
min_variance_port = df.loc[df['Volatility'] == min_volatility]
# plot frontier, max sharpe & min Volatility values with a scatterplot
plt.style.use('seaborn-dark')
df.plot.scatter(x='Volatility', y='Returns', c='Sharpe Ratio',
cmap='RdYlGn', edgecolors='black', figsize=(10, 8), grid=True)
plt.scatter(x=sharpe_portfolio['Volatility'], y=sharpe_portfolio['Returns'], c='red', marker='D', s=200)
plt.scatter(x=min_variance_port['Volatility'], y=min_variance_port['Returns'], c='blue', marker='D', s=200 )
plt.xlabel('Volatility (Std. Deviation)')
plt.ylabel('Expected Returns')
plt.title('Efficient Frontier')
plt.show()
print(min_variance_port.T)
print(sharpe_portfolio.T)
df.head()
close_original=close.copy()
close[selected[2]].plot()
plt.title(selected[2])
close[selected[1]].plot()
plt.title(selected[1])
close[selected[0]].plot()
plt.title(selected[0])
close.GMEXICOB.plot()
plt.title(selected[2])
for i in range(0,3):
print selected[i]
print close[selected[i]].describe()
for i in range(0,3):
print selected[i]
print "precio del inicio"
print close[selected[i]][0]
print "precio actual"
print close[selected[i]][len(close)-1]
print "La media es: "
print close[selected[i]].mean()
print "La varianza es: "
print (close[selected[i]].std())**2
print "La volatilidad es: "
print close[selected[i]].std()
print "El rendimiento del portafolio es: " + str(int((close[selected[i]][len(close1)-1]/close[selected[i]][0])*100)-100)+ " %"
close.cov()
close.corr()
for i in range(0,3):
print selected[i]+ " : " +str(float(sharpe_portfolio[selected[i]+" Weight"]*2000000))
close1=DataFrame(close.copy())
close1.head()
close1['PORT']=float(sharpe_portfolio[selected[0]+" Weight"]*2000000)*close1[selected[0]]+float(sharpe_portfolio[selected[1]+" Weight"]*2000000)*close1[selected[1]]+float(sharpe_portfolio[selected[2]+" Weight"]*2000000)*close1[selected[2]]
close1.head()
print close1.PORT.describe()
print "PORT"
print "La media es: "
print close1.PORT.mean()
print "La varianza es: "
print (close1.PORT.std())**2
print "La volatilidad es: "
print close1.PORT.std()
print "El rendimiento del portafolio es: " + str(int((close1.PORT[len(close1)-1]/close1.PORT[0])*100)-100)+ " %"
close1.PORT[len(close1)-1]
close1.PORT[0]
close1.cov()
close1.corr()
close_original.head()
close_anual=close_original[close_original.index[datetime.date(close_original.index())>datetime.date(2017,3,16)]]
close_original['Fecha']=close_original.index
datetime.date(1943,3, 13)
close_original[close_original.Fecha]
now = datetime.datetime.now()
```
| github_jupyter |
<img src='../img/dust_banner.png' alt='Training school and workshop on dust' align='center' width='100%'></img>
<br>
# Day 2 - Assignment
### About
> So far, we analysed Aerosol Optical Depth from different types of data (satellite, model-based and ground-based observations) for a single dust event. Let us now broaden our view and analyse the annual cycle in 2020 of Aerosol Optical Depth from AERONET and compare it with the CAMS global reanalysis data.
### Tasks
#### 1. Download and plot time-series of AERONET data for Santa Cruz, Tenerife in 2020
* **Hint**
* [AERONET - Example notebook](../../dust_workshop_part1/02_ground-based_observations/21_AERONET.ipynb)
* you can select daily aggregates of the station observations by setting the `AVG` key to `AVG=20`
* **Interpret the results:**
* Have there been other times in 2020 with increased AOD values?
* If yes, how could you find out if the increase in AOD is caused by dust? Try to find out by visualizing the AOD time-series together with another parameter from the AERONET data.
* [MSG SEVIRI Dust RGB](https://sds-was.aemet.es/forecast-products/dust-observations/msg-2013-eumetsat) and [MODIS RGB](https://worldview.earthdata.nasa.gov/) quick looks might be helpful to get a more complete picture of other events that might have happened in 2020
#### 2. Download CAMS global reanalysis (EAC4) and select 2020 time-series for *Santa Cruz, Tenerife*
* **Hint**
* [CAMS global forecast - Example notebook](../../dust_workshop_part1/03_model-based_data/32_CAMS_global_forecast_duaod_load_browse.ipynb) (**Note:** the notebook works with CAMS forecast data, but they have a similar data structure to the CAMS global reanalysis data)
* [Data access](https://ads.atmosphere.copernicus.eu/cdsapp#!/dataset/cams-global-reanalysis-eac4?tab=form) with the following specifications:
> Variable on single levels: `Dust aerosol optical depth at 550 nm` <br>
> Date: `Start=2020-01-01`, `End=2020-12-31` <br>
> Time: `[00:00, 03:00, 06:00, 09:00, 12:00, 15:00, 18:00, 21:00]` <br>
> Restricted area: `N: 30., W: -20, E: 14, S: 20.` <br>
>Format: `netCDF` <br>
* With the xarray function `sel()` and keyword argument `method='nearest'` you can select data based on coordinate information
* We also recommend you to transform your xarray.DataArray into a pandas.DataFrame with the function `to_dataframe()`
#### 3. Visualize both time-series of CAMS reanalysis and AERONET daily aggregates in one plot
* **Interpret the results:** What can you say about the annual cycle in 2020 of AOD in Santa Cruz, Tenerife?
### Module outline
* [1 - Select latitude / longitude values for Santa Cruz, Tenerife](#select_lat_lon)
* [2 - Download and plot time-series of AERONET data](#aeronet)
* [3 - Download CAMS global reanalysis (EAC4) and select 2020 time-series for Santa Cruz, Tenerife](#cams_reanalysis)
* [4 - Combine both annual time-series and visualize both in one plot](#visualize_annual_ts)
<hr>
##### Load required libraries
```
%matplotlib inline
import os
import xarray as xr
import numpy as np
import netCDF4 as nc
import pandas as pd
from IPython.display import HTML
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.cm import get_cmap
from matplotlib import animation
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import cartopy.feature as cfeature
from matplotlib.axes import Axes
from cartopy.mpl.geoaxes import GeoAxes
GeoAxes._pcolormesh_patched = Axes.pcolormesh
import wget
import warnings
warnings.simplefilter(action = "ignore", category = RuntimeWarning)
```
##### Load helper functions
```
%run ../functions.ipynb
```
<hr>
### <a id='select_lat_lon'></a>1. Select latitude / longitude values for Santa Cruz, Tenerife
You can see an overview of all available AERONET Site Names [here](https://aeronet.gsfc.nasa.gov/cgi-bin/draw_map_display_aod_v3?long1=-180&long2=180&lat1=-90&lat2=90&multiplier=2&what_map=4&nachal=1&formatter=0&level=3&place_code=10&place_limit=0).
<br>
### <a id='aeronet'></a>2. Download and plot time-series of AERONET data
<br>
### <a id='cams_reanalysis'></a> 3. Download CAMS global reanalysis (EAC4) and select 2020 time-series for Santa Cruz, Tenerife
<br>
### <a id='visualize_annual_ts'></a>4. Combine both annual time-series and visualize both in one plot
<br>
<hr>
<img src='../img/copernicus_logo.png' alt='Logo EU Copernicus' align='left' width='20%'><br><br><br><br>
<p style="text-align:right;">This project is licensed under the <a href="./LICENSE">MIT License</a> and is developed under a Copernicus contract.
| github_jupyter |
# PixelCNN
**Author:** [ADMoreau](https://github.com/ADMoreau)<br>
**Date created:** 2020/05/17<br>
**Last modified:** 2020/05/23<br>
**Description:** PixelCNN implemented in Keras.
## Introduction
PixelCNN is a generative model proposed in 2016 by van den Oord et al.
(reference: [Conditional Image Generation with PixelCNN Decoders](https://arxiv.org/abs/1606.05328)).
It is designed to generate images (or other data types) iteratively,
from an input vector where the probability distribution of prior elements dictates the
probability distribution of later elements. In the following example, images are generated
in this fashion, pixel-by-pixel, via a masked convolution kernel that only looks at data
from previously generated pixels (origin at the top left) to generate later pixels.
During inference, the output of the network is used as a probability ditribution
from which new pixel values are sampled to generate a new image
(here, with MNIST, the pixels values are either black or white).
```
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tqdm import tqdm
```
## Getting the Data
```
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
n_residual_blocks = 5
# The data, split between train and test sets
(x, _), (y, _) = keras.datasets.mnist.load_data()
# Concatenate all of the images together
data = np.concatenate((x, y), axis=0)
# Round all pixel values less than 33% of the max 256 value to 0
# anything above this value gets rounded up to 1 so that all values are either
# 0 or 1
data = np.where(data < (0.33 * 256), 0, 1)
data = data.astype(np.float32)
```
## Create two classes for the requisite Layers for the model
```
# The first layer is the PixelCNN layer. This layer simply
# builds on the 2D convolutional layer, but includes masking.
class PixelConvLayer(layers.Layer):
def __init__(self, mask_type, **kwargs):
super(PixelConvLayer, self).__init__()
self.mask_type = mask_type
self.conv = layers.Conv2D(**kwargs)
def build(self, input_shape):
# Build the conv2d layer to initialize kernel variables
self.conv.build(input_shape)
# Use the initialized kernel to create the mask
kernel_shape = self.conv.kernel.get_shape()
self.mask = np.zeros(shape=kernel_shape)
self.mask[: kernel_shape[0] // 2, ...] = 1.0
self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0
if self.mask_type == "B":
self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0
def call(self, inputs):
self.conv.kernel.assign(self.conv.kernel * self.mask)
return self.conv(inputs)
# Next, we build our residual block layer.
# This is just a normal residual block, but based on the PixelConvLayer.
class ResidualBlock(keras.layers.Layer):
def __init__(self, filters, **kwargs):
super(ResidualBlock, self).__init__(**kwargs)
self.conv1 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
self.pixel_conv = PixelConvLayer(
mask_type="B",
filters=filters // 2,
kernel_size=3,
activation="relu",
padding="same",
)
self.conv2 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
def call(self, inputs):
x = self.conv1(inputs)
x = self.pixel_conv(x)
x = self.conv2(x)
return keras.layers.add([inputs, x])
```
## Build the model based on the original paper
```
inputs = keras.Input(shape=input_shape)
x = PixelConvLayer(
mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same"
)(inputs)
for _ in range(n_residual_blocks):
x = ResidualBlock(filters=128)(x)
for _ in range(2):
x = PixelConvLayer(
mask_type="B",
filters=128,
kernel_size=1,
strides=1,
activation="relu",
padding="valid",
)(x)
out = keras.layers.Conv2D(
filters=1, kernel_size=1, strides=1, activation="sigmoid", padding="valid"
)(x)
pixel_cnn = keras.Model(inputs, out)
adam = keras.optimizers.Adam(learning_rate=0.0005)
pixel_cnn.compile(optimizer=adam, loss="binary_crossentropy")
pixel_cnn.summary()
pixel_cnn.fit(
x=data, y=data, batch_size=128, epochs=50, validation_split=0.1, verbose=2
)
```
## Demonstration
The PixelCNN cannot generate the full image at once, and must instead generate each pixel in
order, append the last generated pixel to the current image, and feed the image back into the
model to repeat the process.
```
from IPython.display import Image, display
# Create an empty array of pixels.
batch = 4
pixels = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:])
batch, rows, cols, channels = pixels.shape
# Iterate the pixels because generation has to be done sequentially pixel by pixel.
for row in tqdm(range(rows)):
for col in range(cols):
for channel in range(channels):
# Feed the whole array and retrieving the pixel value probabilities for the next
# pixel.
probs = pixel_cnn.predict(pixels)[:, row, col, channel]
# Use the probabilities to pick pixel values and append the values to the image
# frame.
pixels[:, row, col, channel] = tf.math.ceil(
probs - tf.random.uniform(probs.shape)
)
def deprocess_image(x):
# Stack the single channeled black and white image to rgb values.
x = np.stack((x, x, x), 2)
# Undo preprocessing
x *= 255.0
# Convert to uint8 and clip to the valid range [0, 255]
x = np.clip(x, 0, 255).astype("uint8")
return x
# Iterate the generated images and plot them with matplotlib.
for i, pic in enumerate(pixels):
keras.preprocessing.image.save_img(
"generated_image_{}.png".format(i), deprocess_image(np.squeeze(pic, -1))
)
display(Image("generated_image_0.png"))
display(Image("generated_image_1.png"))
display(Image("generated_image_2.png"))
display(Image("generated_image_3.png"))
```
| github_jupyter |
# 批量规范化
:label:`sec_batch_norm`
训练深层神经网络是十分困难的,特别是在较短的时间内使他们收敛更加棘手。
在本节中,我们将介绍*批量规范化*(batch normalization) :cite:`Ioffe.Szegedy.2015`,这是一种流行且有效的技术,可持续加速深层网络的收敛速度。
再结合在 :numref:`sec_resnet`中将介绍的残差块,批量规范化使得研究人员能够训练100层以上的网络。
## 训练深层网络
为什么需要批量规范化层呢?让我们来回顾一下训练神经网络时出现的一些实际挑战。
首先,数据预处理的方式通常会对最终结果产生巨大影响。
回想一下我们应用多层感知机来预测房价的例子( :numref:`sec_kaggle_house`)。
使用真实数据时,我们的第一步是标准化输入特征,使其平均值为0,方差为1。
直观地说,这种标准化可以很好地与我们的优化器配合使用,因为它可以将参数的量级进行统一。
第二,对于典型的多层感知机或卷积神经网络。当我们训练时,中间层中的变量(例如,多层感知机中的仿射变换输出)可能具有更广的变化范围:不论是沿着从输入到输出的层,跨同一层中的单元,或是随着时间的推移,模型参数的随着训练更新变幻莫测。
批量规范化的发明者非正式地假设,这些变量分布中的这种偏移可能会阻碍网络的收敛。
直观地说,我们可能会猜想,如果一个层的可变值是另一层的100倍,这可能需要对学习率进行补偿调整。
第三,更深层的网络很复杂,容易过拟合。
这意味着正则化变得更加重要。
批量规范化应用于单个可选层(也可以应用到所有层),其原理如下:在每次训练迭代中,我们首先规范化输入,即通过减去其均值并除以其标准差,其中两者均基于当前小批量处理。
接下来,我们应用比例系数和比例偏移。
正是由于这个基于*批量*统计的*标准化*,才有了*批量规范化*的名称。
请注意,如果我们尝试使用大小为1的小批量应用批量规范化,我们将无法学到任何东西。
这是因为在减去均值之后,每个隐藏单元将为0。
所以,只有使用足够大的小批量,批量规范化这种方法才是有效且稳定的。
请注意,在应用批量规范化时,批量大小的选择可能比没有批量规范化时更重要。
从形式上来说,用$\mathbf{x} \in \mathcal{B}$表示一个来自小批量$\mathcal{B}$的输入,批量规范化$\mathrm{BN}$根据以下表达式转换$\mathbf{x}$:
$$\mathrm{BN}(\mathbf{x}) = \boldsymbol{\gamma} \odot \frac{\mathbf{x} - \hat{\boldsymbol{\mu}}_\mathcal{B}}{\hat{\boldsymbol{\sigma}}_\mathcal{B}} + \boldsymbol{\beta}.$$
:eqlabel:`eq_batchnorm`
在 :eqref:`eq_batchnorm`中,$\hat{\boldsymbol{\mu}}_\mathcal{B}$是小批量$\mathcal{B}$的样本均值,$\hat{\boldsymbol{\sigma}}_\mathcal{B}$是小批量$\mathcal{B}$的样本标准差。
应用标准化后,生成的小批量的平均值为0和单位方差为1。
由于单位方差(与其他一些魔法数)是一个主观的选择,因此我们通常包含
*拉伸参数*(scale)$\boldsymbol{\gamma}$和*偏移参数*(shift)$\boldsymbol{\beta}$,它们的形状与$\mathbf{x}$相同。
请注意,$\boldsymbol{\gamma}$和$\boldsymbol{\beta}$是需要与其他模型参数一起学习的参数。
由于在训练过程中,中间层的变化幅度不能过于剧烈,而批量规范化将每一层主动居中,并将它们重新调整为给定的平均值和大小(通过$\hat{\boldsymbol{\mu}}_\mathcal{B}$和${\hat{\boldsymbol{\sigma}}_\mathcal{B}}$)。
从形式上来看,我们计算出 :eqref:`eq_batchnorm`中的$\hat{\boldsymbol{\mu}}_\mathcal{B}$和${\hat{\boldsymbol{\sigma}}_\mathcal{B}}$,如下所示:
$$\begin{aligned} \hat{\boldsymbol{\mu}}_\mathcal{B} &= \frac{1}{|\mathcal{B}|} \sum_{\mathbf{x} \in \mathcal{B}} \mathbf{x},\\
\hat{\boldsymbol{\sigma}}_\mathcal{B}^2 &= \frac{1}{|\mathcal{B}|} \sum_{\mathbf{x} \in \mathcal{B}} (\mathbf{x} - \hat{\boldsymbol{\mu}}_{\mathcal{B}})^2 + \epsilon.\end{aligned}$$
请注意,我们在方差估计值中添加一个小的常量$\epsilon > 0$,以确保我们永远不会尝试除以零,即使在经验方差估计值可能消失的情况下也是如此。估计值$\hat{\boldsymbol{\mu}}_\mathcal{B}$和${\hat{\boldsymbol{\sigma}}_\mathcal{B}}$通过使用平均值和方差的噪声(noise)估计来抵消缩放问题。
你可能会认为这种噪声是一个问题,而事实上它是有益的。
事实证明,这是深度学习中一个反复出现的主题。
由于尚未在理论上明确的原因,优化中的各种噪声源通常会导致更快的训练和较少的过拟合:这种变化似乎是正则化的一种形式。
在一些初步研究中, :cite:`Teye.Azizpour.Smith.2018`和 :cite:`Luo.Wang.Shao.ea.2018`分别将批量规范化的性质与贝叶斯先验相关联。
这些理论揭示了为什么批量规范化最适应$50 \sim 100$范围中的中等批量大小的难题。
另外,批量规范化层在”训练模式“(通过小批量统计数据规范化)和“预测模式”(通过数据集统计规范化)中的功能不同。
在训练过程中,我们无法得知使用整个数据集来估计平均值和方差,所以只能根据每个小批次的平均值和方差不断训练模型。
而在预测模式下,可以根据整个数据集精确计算批量规范化所需的平均值和方差。
现在,我们了解一下批量规范化在实践中是如何工作的。
## 批量规范化层
回想一下,批量规范化和其他层之间的一个关键区别是,由于批量规范化在完整的小批量上运行,因此我们不能像以前在引入其他层时那样忽略批量大小。
我们在下面讨论这两种情况:全连接层和卷积层,他们的批量规范化实现略有不同。
### 全连接层
通常,我们将批量规范化层置于全连接层中的仿射变换和激活函数之间。
设全连接层的输入为u,权重参数和偏置参数分别为$\mathbf{W}$和$\mathbf{b}$,激活函数为$\phi$,批量规范化的运算符为$\mathrm{BN}$。
那么,使用批量规范化的全连接层的输出的计算详情如下:
$$\mathbf{h} = \phi(\mathrm{BN}(\mathbf{W}\mathbf{x} + \mathbf{b}) ).$$
回想一下,均值和方差是在应用变换的"相同"小批量上计算的。
### 卷积层
同样,对于卷积层,我们可以在卷积层之后和非线性激活函数之前应用批量规范化。
当卷积有多个输出通道时,我们需要对这些通道的“每个”输出执行批量规范化,每个通道都有自己的拉伸(scale)和偏移(shift)参数,这两个参数都是标量。
假设我们的小批量包含$m$个样本,并且对于每个通道,卷积的输出具有高度$p$和宽度$q$。
那么对于卷积层,我们在每个输出通道的$m \cdot p \cdot q$个元素上同时执行每个批量规范化。
因此,在计算平均值和方差时,我们会收集所有空间位置的值,然后在给定通道内应用相同的均值和方差,以便在每个空间位置对值进行规范化。
### 预测过程中的批量规范化
正如我们前面提到的,批量规范化在训练模式和预测模式下的行为通常不同。
首先,将训练好的模型用于预测时,我们不再需要样本均值中的噪声以及在微批次上估计每个小批次产生的样本方差了。
其次,例如,我们可能需要使用我们的模型对逐个样本进行预测。
一种常用的方法是通过移动平均估算整个训练数据集的样本均值和方差,并在预测时使用它们得到确定的输出。
可见,和暂退法一样,批量规范化层在训练模式和预测模式下的计算结果也是不一样的。
## (**从零实现**)
下面,我们从头开始实现一个具有张量的批量规范化层。
```
from mxnet import autograd, init, np, npx
from mxnet.gluon import nn
from d2l import mxnet as d2l
npx.set_np()
def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):
# 通过autograd来判断当前模式是训练模式还是预测模式
if not autograd.is_training():
# 如果是在预测模式下,直接使用传入的移动平均所得的均值和方差
X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)
else:
assert len(X.shape) in (2, 4)
if len(X.shape) == 2:
# 使用全连接层的情况,计算特征维上的均值和方差
mean = X.mean(axis=0)
var = ((X - mean) ** 2).mean(axis=0)
else:
# 使用二维卷积层的情况,计算通道维上(axis=1)的均值和方差。
# 这里我们需要保持X的形状以便后面可以做广播运算
mean = X.mean(axis=(0, 2, 3), keepdims=True)
var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)
# 训练模式下,用当前的均值和方差做标准化
X_hat = (X - mean) / np.sqrt(var + eps)
# 更新移动平均的均值和方差
moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
moving_var = momentum * moving_var + (1.0 - momentum) * var
Y = gamma * X_hat + beta # 缩放和移位
return Y, moving_mean, moving_var
```
我们现在可以[**创建一个正确的`BatchNorm`层**]。
这个层将保持适当的参数:拉伸`gamma`和偏移`beta`,这两个参数将在训练过程中更新。
此外,我们的层将保存均值和方差的移动平均值,以便在模型预测期间随后使用。
撇开算法细节,注意我们实现层的基础设计模式。
通常情况下,我们用一个单独的函数定义其数学原理,比如说`batch_norm`。
然后,我们将此功能集成到一个自定义层中,其代码主要处理数据移动到训练设备(如GPU)、分配和初始化任何必需的变量、跟踪移动平均线(此处为均值和方差)等问题。
为了方便起见,我们并不担心在这里自动推断输入形状,因此我们需要指定整个特征的数量。
不用担心,深度学习框架中的批量规范化API将为我们解决上述问题,我们稍后将展示这一点。
```
class BatchNorm(nn.Block):
# num_features:完全连接层的输出数量或卷积层的输出通道数。
# num_dims:2表示完全连接层,4表示卷积层
def __init__(self, num_features, num_dims, **kwargs):
super().__init__(**kwargs)
if num_dims == 2:
shape = (1, num_features)
else:
shape = (1, num_features, 1, 1)
# 参与求梯度和迭代的拉伸和偏移参数,分别初始化成1和0
self.gamma = self.params.get('gamma', shape=shape, init=init.One())
self.beta = self.params.get('beta', shape=shape, init=init.Zero())
# 非模型参数的变量初始化为0和1
self.moving_mean = np.zeros(shape)
self.moving_var = np.ones(shape)
def forward(self, X):
# 如果X不在内存上,将moving_mean和moving_var
# 复制到X所在显存上
if self.moving_mean.ctx != X.ctx:
self.moving_mean = self.moving_mean.copyto(X.ctx)
self.moving_var = self.moving_var.copyto(X.ctx)
# 保存更新过的moving_mean和moving_var
Y, self.moving_mean, self.moving_var = batch_norm(
X, self.gamma.data(), self.beta.data(), self.moving_mean,
self.moving_var, eps=1e-12, momentum=0.9)
return Y
```
## 使用批量规范化层的 LeNet
为了更好理解如何[**应用`BatchNorm`**],下面我们将其应用(**于LeNet模型**)( :numref:`sec_lenet`)。
回想一下,批量规范化是在卷积层或全连接层之后、相应的激活函数之前应用的。
```
net = nn.Sequential()
net.add(nn.Conv2D(6, kernel_size=5),
BatchNorm(6, num_dims=4),
nn.Activation('sigmoid'),
nn.AvgPool2D(pool_size=2, strides=2),
nn.Conv2D(16, kernel_size=5),
BatchNorm(16, num_dims=4),
nn.Activation('sigmoid'),
nn.AvgPool2D(pool_size=2, strides=2),
nn.Dense(120),
BatchNorm(120, num_dims=2),
nn.Activation('sigmoid'),
nn.Dense(84),
BatchNorm(84, num_dims=2),
nn.Activation('sigmoid'),
nn.Dense(10))
```
和以前一样,我们将[**在Fashion-MNIST数据集上训练网络**]。
这个代码与我们第一次训练LeNet( :numref:`sec_lenet`)时几乎完全相同,主要区别在于学习率大得多。
```
lr, num_epochs, batch_size = 1.0, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
```
让我们来看看从第一个批量规范化层中学到的[**拉伸参数`gamma`和偏移参数`beta`**]。
```
net[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)
```
## [**简明实现**]
除了使用我们刚刚定义的`BatchNorm`,我们也可以直接使用深度学习框架中定义的`BatchNorm`。
该代码看起来几乎与我们上面的代码相同。
```
net = nn.Sequential()
net.add(nn.Conv2D(6, kernel_size=5),
nn.BatchNorm(),
nn.Activation('sigmoid'),
nn.AvgPool2D(pool_size=2, strides=2),
nn.Conv2D(16, kernel_size=5),
nn.BatchNorm(),
nn.Activation('sigmoid'),
nn.AvgPool2D(pool_size=2, strides=2),
nn.Dense(120),
nn.BatchNorm(),
nn.Activation('sigmoid'),
nn.Dense(84),
nn.BatchNorm(),
nn.Activation('sigmoid'),
nn.Dense(10))
```
下面,我们[**使用相同超参数来训练模型**]。
请注意,通常高级API变体运行速度快得多,因为它的代码已编译为C++或CUDA,而我们的自定义代码由Python实现。
```
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
```
## 争议
直观地说,批量规范化被认为可以使优化更加平滑。
然而,我们必须小心区分直觉和对我们观察到的现象的真实解释。
回想一下,我们甚至不知道简单的神经网络(多层感知机和传统的卷积神经网络)为什么如此有效。
即使在暂退法和权重衰减的情况下,它们仍然非常灵活,因此无法通过常规的学习理论泛化保证来解释它们是否能够泛化到看不见的数据。
在提出批量规范化的论文中,作者除了介绍了其应用,还解释了其原理:通过减少*内部协变量偏移*(internal covariate shift)。
据推测,作者所说的“内部协变量转移”类似于上述的投机直觉,即变量值的分布在训练过程中会发生变化。
然而,这种解释有两个问题:
1、这种偏移与严格定义的*协变量偏移*(covariate shift)非常不同,所以这个名字用词不当。
2、这种解释只提供了一种不明确的直觉,但留下了一个有待后续挖掘的问题:为什么这项技术如此有效?
本书旨在传达实践者用来发展深层神经网络的直觉。
然而,重要的是将这些指导性直觉与既定的科学事实区分开来。
最终,当你掌握了这些方法,并开始撰写自己的研究论文时,你会希望清楚地区分技术和直觉。
随着批量规范化的普及,“内部协变量偏移”的解释反复出现在技术文献的辩论,特别是关于“如何展示机器学习研究”的更广泛的讨论中。
Ali Rahimi在接受2017年NeurIPS大会的“接受时间考验奖”(Test of Time Award)时发表了一篇令人难忘的演讲。他将“内部协变量转移”作为焦点,将现代深度学习的实践比作炼金术。
他对该示例进行了详细回顾 :cite:`Lipton.Steinhardt.2018`,概述了机器学习中令人不安的趋势。
此外,一些作者对批量规范化的成功提出了另一种解释:在某些方面,批量规范化的表现出与原始论文 :cite:`Santurkar.Tsipras.Ilyas.ea.2018`中声称的行为是相反的。
然而,与机器学习文献中成千上万类似模糊的说法相比,内部协变量偏移没有更值得批评。
很可能,它作为这些辩论的焦点而产生共鸣,要归功于目标受众对它的广泛认可。
批量规范化已经被证明是一种不可或缺的方法。它适用于几乎所有图像分类器,并在学术界获得了数万引用。
## 小结
* 在模型训练过程中,批量规范化利用小批量的均值和标准差,不断调整神经网络的中间输出,使整个神经网络各层的中间输出值更加稳定。
* 批量规范化在全连接层和卷积层的使用略有不同。
* 批量规范化层和暂退层一样,在训练模式和预测模式下计算不同。
* 批量规范化有许多有益的副作用,主要是正则化。另一方面,”减少内部协变量偏移“的原始动机似乎不是一个有效的解释。
## 练习
1. 在使用批量规范化之前,我们是否可以从全连接层或卷积层中删除偏置参数?为什么?
1. 比较LeNet在使用和不使用批量规范化情况下的学习率。
1. 绘制训练和测试准确度的提高。
1. 你的学习率有多高?
1. 我们是否需要在每个层中进行批量规范化?尝试一下?
1. 你可以通过批量规范化来替换暂退法吗?行为会如何改变?
1. 确定参数`beta`和`gamma`,并观察和分析结果。
1. 查看高级API中有关`BatchNorm`的在线文档,以查看其他批量规范化的应用。
1. 研究思路:想想你可以应用的其他“规范化”转换?你可以应用概率积分变换吗?全秩协方差估计可以么?
[Discussions](https://discuss.d2l.ai/t/1876)
| github_jupyter |
# "Poleval 2021 through wav2vec2"
> "Trying for pronunciation recovery"
- toc: false
- branch: master
- comments: true
- hidden: true
- categories: [wav2vec2, poleval, colab]
```
%%capture
!pip install gdown
!gdown https://drive.google.com/uc?id=1b6MyyqgA9D1U7DX3Vtgda7f9ppkxjCXJ
%%capture
!tar zxvf poleval_wav.train.tar.gz && rm poleval_wav.train.tar.gz
%%capture
!pip install librosa webrtcvad
#collapse-hide
# VAD wrapper is taken from PyTorch Speaker Verification:
# https://github.com/HarryVolek/PyTorch_Speaker_Verification
# Copyright (c) 2019, HarryVolek
# License: BSD-3-Clause
# based on https://github.com/wiseman/py-webrtcvad/blob/master/example.py
# Copyright (c) 2016 John Wiseman
# License: MIT
import collections
import contextlib
import numpy as np
import sys
import librosa
import wave
import webrtcvad
#from hparam import hparam as hp
sr = 16000
def read_wave(path, sr):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
Assumes sample width == 2
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
data, _ = librosa.load(path, sr)
assert len(data.shape) == 1
assert sr in (8000, 16000, 32000, 48000)
return data, pcm_data
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
start = ring_buffer[0][0].timestamp
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
triggered = False
yield (start, frame.timestamp + frame.duration)
ring_buffer.clear()
voiced_frames = []
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield (start, frame.timestamp + frame.duration)
def VAD_chunk(aggressiveness, path):
audio, byte_audio = read_wave(path, sr)
vad = webrtcvad.Vad(int(aggressiveness))
frames = frame_generator(20, byte_audio, sr)
frames = list(frames)
times = vad_collector(sr, 20, 200, vad, frames)
speech_times = []
speech_segs = []
for i, time in enumerate(times):
start = np.round(time[0],decimals=2)
end = np.round(time[1],decimals=2)
j = start
while j + .4 < end:
end_j = np.round(j+.4,decimals=2)
speech_times.append((j, end_j))
speech_segs.append(audio[int(j*sr):int(end_j*sr)])
j = end_j
else:
speech_times.append((j, end))
speech_segs.append(audio[int(j*sr):int(end*sr)])
return speech_times, speech_segs
#collapse-hide
# Based on code from PyTorch Speaker Verification:
# https://github.com/HarryVolek/PyTorch_Speaker_Verification
# Copyright (c) 2019, HarryVolek
# Additions Copyright (c) 2021, Jim O'Regan
# License: MIT
import numpy as np
# wav2vec2's max duration is 40 seconds, using 39 by default
# to be a little safer
def vad_concat(times, segs, max_duration=39.0):
"""
Concatenate continuous times and their segments, where the end time
of a segment is the same as the start time of the next
Parameters:
times: list of tuple (start, end)
segs: list of segments (audio frames)
max_duration: maximum duration of the resulting concatenated
segments; the kernel size of wav2vec2 is 40 seconds, so
the default max_duration is 39, to ensure the resulting
list of segments will fit
Returns:
concat_times: list of tuple (start, end)
concat_segs: list of segments (audio frames)
"""
absolute_maximum=40.0
if max_duration > absolute_maximum:
raise Exception('`max_duration` {:.2f} larger than kernel size (40 seconds)'.format(max_duration))
# we take 0.0 to mean "don't concatenate"
do_concat = (max_duration != 0.0)
concat_seg = []
concat_times = []
seg_concat = segs[0]
time_concat = times[0]
for i in range(0, len(times)-1):
can_concat = (times[i+1][1] - time_concat[0]) < max_duration
if time_concat[1] == times[i+1][0] and do_concat and can_concat:
seg_concat = np.concatenate((seg_concat, segs[i+1]))
time_concat = (time_concat[0], times[i+1][1])
else:
concat_seg.append(seg_concat)
seg_concat = segs[i+1]
concat_times.append(time_concat)
time_concat = times[i+1]
else:
concat_seg.append(seg_concat)
concat_times.append(time_concat)
return concat_times, concat_seg
def make_dataset(concat_times, concat_segs):
starts = [s[0] for s in concat_times]
ends = [s[1] for s in concat_times]
return {'start': starts,
'end': ends,
'speech': concat_segs}
%%capture
!pip install datasets
from datasets import Dataset
def vad_to_dataset(path, max_duration):
t,s = VAD_chunk(3, path)
if max_duration > 0.0:
ct, cs = vad_concat(t, s, max_duration)
dset = make_dataset(ct, cs)
else:
dset = make_dataset(t, s)
return Dataset.from_dict(dset)
%%capture
!pip install -q transformers
%%capture
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
# load model and tokenizer
processor = Wav2Vec2Processor.from_pretrained("mbien/wav2vec2-large-xlsr-polish")
model = Wav2Vec2ForCTC.from_pretrained("mbien/wav2vec2-large-xlsr-polish")
model.to("cuda")
def speech_file_to_array_fn(batch):
import torchaudio
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = speech_array[0].numpy()
batch["sampling_rate"] = sampling_rate
batch["target_text"] = batch["sentence"]
return batch
def evaluate(batch):
import torch
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
import json
def process_wave(filename, duration):
import json
dataset = vad_to_dataset(filename, duration)
result = dataset.map(evaluate, batched=True, batch_size=16)
speechless = result.remove_columns(['speech'])
d=speechless.to_dict()
tlog = list()
for i in range(0, len(d['end']) - 1):
out = dict()
out['start'] = d['start'][i]
out['end'] = d['end'][i]
out['transcript'] = d['pred_strings'][i]
tlog.append(out)
with open('{}.tlog'.format(filename), 'w') as outfile:
json.dump(tlog, outfile)
import glob
for f in glob.glob('/content/poleval_final_dataset_wav/train/*.wav'):
print(f)
process_wave(f, 10.0)
!find . -name '*tlog'|zip poleval-train.zip -@
```
| github_jupyter |
<a href="https://colab.research.google.com/github/wel51x/DS-Unit-4-Sprint-4-Deep-Learning/blob/master/My_LS_DS_441_RNN_and_LSTM_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Lambda School Data Science - Recurrent Neural Networks and LSTM
> "Yesterday's just a memory - tomorrow is never what it's supposed to be." -- Bob Dylan
####have down-version numpy to get "RNN/LSTM Sentiment Classification with Keras" to work in colab
```
!pip install numpy==1.16.2
import numpy as np
```
### Forecasting
Forecasting - at it's simplest, it just means "predict the future":
# Assignment

It is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM.
This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txt
Use it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach.
Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size.
Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more!
```
# Imports
from random import random
import numpy as np
import requests
# TODO - Words, words, mere words, no matter from the heart.
# Grab first ten
r = requests.get('http://www.gutenberg.org/files/100/100-0.txt', verify=True)
x = r.text.find('From')
y = r.text.find('thine or thee.')
article_text = r.text[x : y+14]
chars = list(set(article_text)) # split and remove duplicate characters. convert to list.
num_chars = len(chars) # the number of unique characters
txt_data_size = len(article_text)
print("unique characters : ", num_chars)
print("txt_data_size : ", txt_data_size)
```
#### one hot encode
```
char_to_int = dict((c, i) for i, c in enumerate(chars)) # "enumerate" retruns index and value. Convert it to dictionary
int_to_char = dict((i, c) for i, c in enumerate(chars))
print(char_to_int)
print("----------------------------------------------------")
print(int_to_char)
print("----------------------------------------------------")
# integer encode input data
integer_encoded = [char_to_int[i] for i in article_text] # "integer_encoded" is a list which has a sequence converted from an original data to integers.
print(integer_encoded)
print("----------------------------------------------------")
print("data length : ", len(integer_encoded))
```
#### hyperparameters
```
iteration = 500
sequence_length = 40
batch_size = round((txt_data_size /sequence_length)+0.5) # = math.ceil
hidden_size = 128 # size of hidden layer of neurons.
learning_rate = 1e-1
# model parameters
W_xh = np.random.randn(hidden_size, num_chars)*0.01 # weight input -> hidden.
W_hh = np.random.randn(hidden_size, hidden_size)*0.01 # weight hidden -> hidden
W_hy = np.random.randn(num_chars, hidden_size)*0.01 # weight hidden -> output
b_h = np.zeros((hidden_size, 1)) # hidden bias
b_y = np.zeros((num_chars, 1)) # output bias
h_prev = np.zeros((hidden_size,1)) # h_(t-1)
```
#### Forward propagation
```
def forwardprop(inputs, targets, h_prev):
# Since the RNN receives the sequence, the weights are not updated during one sequence.
xs, hs, ys, ps = {}, {}, {}, {} # dictionary
hs[-1] = np.copy(h_prev) # Copy previous hidden state vector to -1 key value.
loss = 0 # loss initialization
for t in range(len(inputs)): # t is a "time step" and is used as a key(dic).
xs[t] = np.zeros((num_chars,1))
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(W_xh, xs[t]) + np.dot(W_hh, hs[t-1]) + b_h) # hidden state.
ys[t] = np.dot(W_hy, hs[t]) + b_y # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars.
# Softmax. -> The sum of probabilities is 1 even without the exp() function, but all of the elements are positive through the exp() function.
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss). Efficient and simple code
# y_class = np.zeros((num_chars, 1))
# y_class[targets[t]] =1
# loss += np.sum(y_class*(-np.log(ps[t]))) # softmax (cross-entropy loss)
return loss, ps, hs, xs
```
#### Backward propagation
```
def backprop(ps, inputs, hs, xs):
dWxh, dWhh, dWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy) # make all zero matrices.
dbh, dby = np.zeros_like(b_h), np.zeros_like(b_y)
dhnext = np.zeros_like(hs[0]) # (hidden_size,1)
# reversed
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t]) # shape (num_chars,1). "dy" means "dloss/dy"
dy[targets[t]] -= 1 # backprop into y. After taking the soft max in the input vector, subtract 1 from the value of the element corresponding to the correct label.
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(W_hy.T, dy) + dhnext # backprop into h.
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity #tanh'(x) = 1-tanh^2(x)
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(W_hh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients.
return dWxh, dWhh, dWhy, dbh, dby
```
#### Training
```
%%time
data_pointer = 0
# memory variables for Adagrad
mWxh, mWhh, mWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy)
mbh, mby = np.zeros_like(b_h), np.zeros_like(b_y)
for i in range(iteration+1):
h_prev = np.zeros((hidden_size,1)) # reset RNN memory
data_pointer = 0 # go from start of data
for b in range(batch_size):
inputs = [char_to_int[ch] for ch in article_text[data_pointer:data_pointer+sequence_length]]
targets = [char_to_int[ch] for ch in article_text[data_pointer+1:data_pointer+sequence_length+1]] # t+1
if (data_pointer+sequence_length+1 >= len(article_text) and b == batch_size-1): # processing of the last part of the input data.
# targets.append(char_to_int[txt_data[0]]) # When the data doesn't fit, add the first char to the back.
targets.append(char_to_int[" "]) # When the data doesn't fit, add space(" ") to the back.
# forward
loss, ps, hs, xs = forwardprop(inputs, targets, h_prev)
# print(loss)
# backward
dWxh, dWhh, dWhy, dbh, dby = backprop(ps, inputs, hs, xs)
# perform parameter update with Adagrad
for param, dparam, mem in zip([W_xh, W_hh, W_hy, b_h, b_y],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam # elementwise
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
data_pointer += sequence_length # move data pointer
if i % 25 == 0:
print ('iter %d, loss: %f' % (i, loss)) # print progress
```
#### Prediction
```
def predict(test_char, length):
x = np.zeros((num_chars, 1))
x[char_to_int[test_char]] = 1
ixes = []
h = np.zeros((hidden_size,1))
for t in range(length):
h = np.tanh(np.dot(W_xh, x) + np.dot(W_hh, h) + b_h)
y = np.dot(W_hy, h) + b_y
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(num_chars), p=p.ravel()) # ravel -> rank0
# "ix" is a list of indexes selected according to the soft max probability.
x = np.zeros((num_chars, 1)) # init
x[ix] = 1
ixes.append(ix) # list
txt = test_char + ''.join(int_to_char[i] for i in ixes)
print ('----\n %s \n----' % (txt, ))
predict('S', 500)
predict('C', 750)
```
# Resources and Stretch Goals
## Stretch goals:
- Refine the training and generation of text to be able to ask for different genres/styles of Shakespearean text (e.g. plays versus sonnets)
- Train a classification model that takes text and returns which work of Shakespeare it is most likely to be from
- Make it more performant! Many possible routes here - lean on Keras, optimize the code, and/or use more resources (AWS, etc.)
- Revisit the news example from class, and improve it - use categories or tags to refine the model/generation, or train a news classifier
- Run on bigger, better data
## Resources:
- [The Unreasonable Effectiveness of Recurrent Neural Networks](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) - a seminal writeup demonstrating a simple but effective character-level NLP RNN
- [Simple NumPy implementation of RNN](https://github.com/JY-Yoon/RNN-Implementation-using-NumPy/blob/master/RNN%20Implementation%20using%20NumPy.ipynb) - Python 3 version of the code from "Unreasonable Effectiveness"
- [TensorFlow RNN Tutorial](https://github.com/tensorflow/models/tree/master/tutorials/rnn) - code for training a RNN on the Penn Tree Bank language dataset
- [4 part tutorial on RNN](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/) - relates RNN to the vanishing gradient problem, and provides example implementation
- [RNN training tips and tricks](https://github.com/karpathy/char-rnn#tips-and-tricks) - some rules of thumb for parameterizing and training your RNN
| github_jupyter |
# Coase and Property
> Coase, R. H. 1960. “The Problem of Social Cost.” *The Journal of Law and Economics* 3:1–44.
> Coase, Ronald H. 1937. “The Nature of the Firm.” *Economica* 4 (16):386–405.
**Slideshow mode**: this notebook can be viewed as a slideshow by pressing Alt-R if run on a server.
## Coase (1960) The Problem of Social Cost
### A rancher and wheat farmer.
Both are utilizing adjacent plots of land. No fence separates the lands.
**The Wheat Farmer:** chooses a production method that delivers a maximum profit of $\Pi_W =8$.
- to keep this simple suppose this is the farmer's only production choice.
**The Rancher:** chooses herd size $x$ to maximize profits $\Pi_C(x) = P \cdot F(x) - c \cdot x^2$
- $P$ is cattle price and $c$ is the cost of feeding each animal.
- The herd size $x^*$ that maximizes profits given by:
$$P \cdot F'(x^*) = c$$
**Example:** If $F(x) = x$, $c=\frac{1}{2}$.
The FOC are $x^{*} = P_c$
With $P_c=4$ and $c=\frac{1}{2}$, the rancher's privately optimal herd size of $x^* = 4$
#### Missing Property Rights impose external costs
With no effective barrier separating the fields cattle sometimes strays into the wheat farmer's fields, damaging crops and reducing wheat farmer's profits.
Assume that if the rancher keeps a herd size $x$ net profits in wheat are reduced from $\Pi_W$ to:
$$\Pi_W(x) = \Pi_W - d \cdot x^2$$
**The external cost**
Suppose $d=\frac{1}{2}$
At the rancher's private optimum herd size of $x^*=4$, the farmer's profit is reduced from 8 to zero:
$$\begin{align}
\Pi_W(x) &= \Pi_W - d \cdot x^2 \\
& = 8 - \frac{1}{2} \cdot 4^2 = 0
\end{align}$$
```
from coase import *
from ipywidgets import interact, fixed
```
At private optimum Rancher earns \$8 but imposes external costs that drive the farmer's earnings to zero.
```
coaseplot1()
```
Private and social marginal benefits and costs can be plotted to see deadweight loss (DWL) differently:
```
coaseplot2()
```
## The assignment of property rights (liability)
**Scenario 1:** Farmer is given the right to enjoin (i.e. limit or prohibit) cattle herding.
If the farmer enforces a prohibition on all cattle herding:
- Rancher now earns \$0.
- Farmer earns \$8.
- But this is not efficient! Total output is smaller than it could be.
- If transactions costs are low the two parties can bargain to a more efficient outcome.
**Scenario 1:** Farmer is given the right to enjoin (i.e. limit or prohibit) cattle herding.
Rancher reasons that if she were permitted to herd 2 cattle she'd earn $\$6$ while imposing \$2 in damage.
- She could offer $\$2$ in full compensation for damage, pocketing remaining \$4
- or they could bargain over how to divide the gains to trade of \$4 in other ways.
**Scenario 2:** Rancher is granted right to graze with impunity.
Farmer reasons that if herd size could be reduced from 4 to 2
- farm profits would rise from $\$0$ to $\$6$
- rancher's profits would fall from $\$8$ to $\$6$
- So farmer could offer to fully compensate rancher for $\$2$ loss and pocket remaining $\$4$
- or they could bargain to divide those gains to trade of $\$4$ in other ways.
### Who causes the externality?
- The rancher, because his cows trample the crops?
- The farmer, for placing his field too close to the rancher?
- Ronald Coase point is that there is no clear answer to this question.
- Hence Pigouvian tax/subsidy 'solutions' are not obvious. Should we tax the rancher, or subsidize them to keep their herd size down?
- 'Externality' problem is due to the non-assignment of property rights.
## The 'Coase Theorem'
### With zero/low transactions costs
- **The initial assignment of property rights does not matter for efficiency:** The parties traded to an efficient solution no matter who first got the rights.
- **The 'emergence' of property rights**: Even with no initial third-party assignment of property rights, it should be in the interests of the parties to create such rights and negotiate/trade to an efficient outcome.
- **The initial allocation does matter for the distribution of benefits between parties.** Legally tradable entitlements are valuable, generate income to those who can then sell.
### Coase Theorem: True, False or Tautology?
> "Costless bargaining is efficient tautologically; if I assume people can agree on socially efficient bargains, then of course they will... In the absence of property rights, a bargain *establishes* a contract between parties with novel rights that needn’t exist ex-ante."
Cooter (1990)
In the Farmer and Rancher example there was a missing market for legal entitlements.
Once the market is made complete (by an assumed third party) then the First Welfare Theorem applies: complete competitive markets will lead to efficient allocations, regardless of initial allocation of property rights.
The "Coase Theorem" makes legal entitlements tradable.
In this view insuring efficiency is matter or removing impediments to free exchange of legal entitlements. However,
>"The interesting case is when transaction costs make bargaining difficult. What you should take from Coase is that social efficiency can be enhanced by institutions (including the firm!) which allow socially efficient bargains to be reached by removing restrictive transaction costs, and particularly that the assignment of property rights to different parties can either help or hinder those institutions."
Good further discussions from [D. Mcloskey](http://www.deirdremccloskey.com/docs/pdf/Article_306.pdf) and [here](https://afinetheorem.wordpress.com/2013/09/03/on-coases-two-famous-theorems/):
## When initial rights allocations matters for efficiency
- 'Coase Theorem' (Stigler) interpretation sweeps under the rug the complicated political question of who gets initial rights.
- Parties may engage in costly conflict, expend real resources to try to establish control over initial allocation of rights.
- The [Myerson Satterthaite theorem](https://en.wikipedia.org/wiki/Myerson%E2%80%93Satterthwaite_theorem) establishes that when parties are asymmetrically informed about each other's valuations (e.g. here about the value of damages or benefits) then efficient exchange may become difficult/impossible. Each party may try to extract rents by trying to "hold-up" the other.
- Suppose we had many farmers and ranchers. It might be costly/difficult to bring all relevant ranchers and farmers together and to agree on bargain terms.
- Coase himself thought transactions costs mattered and hence initial allocation mechanisms had to be thought through carefully (e.g. spectrum auctions).
## A Coasian view of land market development
Suppose there is an open field. In the absence of a land market whoever gets to the land first (possibly the more powerful in the the village) will prepare/clear land until the marginal value product of the last unit of land is equal to the clearing cost. We contrast two situations:
(1) Open frontier: where land is still abundant
(2) Land Scarcity.
There will be a misallocation in (2) shown by DWL in the diagram... but also an incentive for the parties to bargain to a more efficient outcome. A well functionining land market would also deliver that outcome.
#### Abundant land environment
$\bar T$ units of land and $N$=2 households.
Land clearing cost $c$. Frontier land not yet exhausted.
Maximize profits at $P \cdot F_T(T) = c$
Land demand for each farmer is given by $P\cdot F_T(T_i) = r$. So for this production $P \frac{1}{\sqrt T_i} = r$ or $P \frac{1}{\sqrt T_i} = cl$ so we can write
$$T^*_i(r) = (P/r)^2$$
If there is an open frontier the sum or demands falls short of total land supply and the marginal cost of land is the cost of clearing $r=c_l$.
'Land scarcity' results on the other hand when there is an equilibrium price of land $r>c_l$ where $r$ is found from
$$\sum T^*_i(r) = \bar T$$
Now land rent $r-c$ can be charged on the right to access and use land. Trade in these legal entitlements can raise output and efficiency. But there may be conflict and a 'scramble' to establish those rights of first access.
#### 'Customary' land rights
- Suppose norm is that all in the village can use as much land as they can farm
- Higher status individuals get allocation first
- As long as land is abundant everyone gets the land they want
- No "land rent" -- cannot charge rent above $c$ since villagers are free to clear at cost $c$
```
landmarket(P=5, cl = 3, title = 'Open Frontier')
```
### The closing of the frontier
- Rising population or improving price or technology increases demand for land.
- Suppose price at which product can be sold increases
- demand for land increases.
- Suppose total demandat clearing cost $c$ exceedsavailable land supply.
- High-status individuals (who have first-access) leave less land available than is needed to satisfy remaining villagers demand.
- Inefficient allocation of land
- marginal products of land not equalized across households.
- output would increase if we establish a market for trading land
```
landmarket(P=8, cl = 3, title = 'Land Scarcity')
```
We can solve for the equilibrium rental rate $r$ given environmental paramters including the price $P$, land endowment $\bar T$, population size $N$ and technology parameters $A)
To do:
(things to still do in this notebook)
- indicate DWL on landmarket diagrams
- create widget to see how diagram shifts with changing parameters
```
interact(landmarket, P=(4,10,0.2), cl = (0,5,0.5),
title = fixed('Land'), A=fixed(1));
```
| github_jupyter |
# Tune a CNN on MNIST
This tutorial walks through using Ax to tune two hyperparameters (learning rate and momentum) for a PyTorch CNN on the MNIST dataset trained using SGD with momentum.
```
import torch
import numpy as np
from ax.plot.contour import plot_contour
from ax.plot.trace import optimization_trace_single_method
from ax.service.managed_loop import optimize
from ax.utils.notebook.plotting import render, init_notebook_plotting
from ax.utils.tutorials.cnn_utils import load_mnist, train, evaluate, CNN
init_notebook_plotting()
torch.manual_seed(12345)
dtype = torch.float
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
```
## 1. Load MNIST data
First, we need to load the MNIST data and partition it into training, validation, and test sets.
Note: this will download the dataset if necessary.
```
BATCH_SIZE = 512
train_loader, valid_loader, test_loader = load_mnist(batch_size=BATCH_SIZE)
```
## 2. Define function to optimize
In this tutorial, we want to optimize classification accuracy on the validation set as a function of the learning rate and momentum. The function takes in a parameterization (set of parameter values), computes the classification accuracy, and returns a dictionary of metric name ('accuracy') to a tuple with the mean and standard error.
```
def train_evaluate(parameterization):
net = CNN()
net = train(net=net, train_loader=train_loader, parameters=parameterization, dtype=dtype, device=device)
return evaluate(
net=net,
data_loader=valid_loader,
dtype=dtype,
device=device,
)
```
## 3. Run the optimization loop
Here, we set the bounds on the learning rate and momentum and set the parameter space for the learning rate to be on a log scale.
```
best_parameters, values, experiment, model = optimize(
parameters=[
{"name": "lr", "type": "range", "bounds": [1e-6, 0.4], "log_scale": True},
{"name": "momentum", "type": "range", "bounds": [0.0, 1.0]},
],
evaluation_function=train_evaluate,
objective_name='accuracy',
)
```
We can introspect the optimal parameters and their outcomes:
```
best_parameters
means, covariances = values
means, covariances
```
## 4. Plot response surface
Contour plot showing classification accuracy as a function of the two hyperparameters.
The black squares show points that we have actually run, notice how they are clustered in the optimal region.
```
render(plot_contour(model=model, param_x='lr', param_y='momentum', metric_name='accuracy'))
```
## 5. Plot best objective as function of the iteration
Show the model accuracy improving as we identify better hyperparameters.
```
# `plot_single_method` expects a 2-d array of means, because it expects to average means from multiple
# optimization runs, so we wrap out best objectives array in another array.
best_objectives = np.array([[trial.objective_mean*100 for trial in experiment.trials.values()]])
best_objective_plot = optimization_trace_single_method(
y=np.maximum.accumulate(best_objectives, axis=1),
title="Model performance vs. # of iterations",
ylabel="Classification Accuracy, %",
)
render(best_objective_plot)
```
## 6. Train CNN with best hyperparameters and evaluate on test set
Note that the resulting accuracy on the test set might not be exactly the same as the maximum accuracy achieved on the evaluation set throughout optimization.
```
data = experiment.fetch_data()
df = data.df
best_arm_name = df.arm_name[df['mean'] == df['mean'].max()].values[0]
best_arm = experiment.arms_by_name[best_arm_name]
best_arm
combined_train_valid_set = torch.utils.data.ConcatDataset([
train_loader.dataset.dataset,
valid_loader.dataset.dataset,
])
combined_train_valid_loader = torch.utils.data.DataLoader(
combined_train_valid_set,
batch_size=BATCH_SIZE,
shuffle=True,
)
net = train(
net=CNN(),
train_loader=combined_train_valid_loader,
parameters=best_arm.parameters,
dtype=dtype,
device=device,
)
test_accuracy = evaluate(
net=net,
data_loader=test_loader,
dtype=dtype,
device=device,
)
print(f"Classification Accuracy (test set): {round(test_accuracy*100, 2)}%")
```
| github_jupyter |
```
#import sys
#!{sys.executable} -m pip install --user alerce
```
# light_transient_matching
## Matches DESI observations to ALERCE and DECAM ledger objects
This code predominately takes in data from the ALERCE and DECAM ledger brokers and identifies DESI observations within 2 arcseconds of those objects, suspected to be transients. It then prepares those matches to be fed into our [CNN code](https://github.com/MatthewPortman/timedomain/blob/master/cronjobs/transient_matching/modified_cnn_classify_data_gradCAM.ipynb) which attempts to identify the class of these transients.
The main matching algorithm uses astropy's **match_coordinate_sky** to match 1-to-1 targets with the objects from the two ledgers. Wrapping functions handle data retrieval from both the ledgers as well as from DESI and prepare this data to be fed into **match_coordinate_sky**. Since ALERCE returns a small enough (pandas) dataframe, we do not need to precondition the input much. However, DECAM has many more objects to match so we use a two-stage process: an initial 2 degree match to tile RA's/DEC's and a second closer 1 arcsecond match to individual targets.
As the code is a work in progress, please forgive any redundancies. We are attempting to merge all of the above (neatly) into the same two or three matching/handling functions!
```
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, match_coordinates_sky, Angle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from glob import glob
import sys
import sqlite3
import os
from desispec.io import read_spectra, write_spectra
from desispec.spectra import Spectra
# Some handy global variables
global db_filename
db_filename = '/global/cfs/cdirs/desi/science/td/daily-search/transients_search.db'
global exposure_path
exposure_path = os.environ["DESI_SPECTRO_REDUX"]
global color_band
color_band = "r"
global minDist
minDist = {}
global today
today = Time.now()
```
## Necessary functions
```
# Grabbing the file names
def all_candidate_filenames(transient_dir: str):
# This function grabs the names of all input files in the transient directory and does some python string manipulation
# to grab the names of the input files with full path and the filenames themselves.
try:
filenames_read = glob(transient_dir + "/*.fits") # Hardcoding is hopefully a temporary measure.
except:
print("Could not grab/find any fits in the transient spectra directory:")
print(transient_dir)
filenames_read = [] # Just in case
#filenames_out = [] # Just in case
raise SystemExit("Exiting.")
#else:
#filenames_out = [s.split(".")[0] for s in filenames_read]
#filenames_out = [s.split("/")[-1] for s in filenames_read]
#filenames_out = [s.replace("in", "out") for s in filenames_out]
return filenames_read #, filenames_out
#path_to_transient = "/global/cfs/cdirs/desi/science/td/daily-search/desitrip/out"
#print(all_candidate_filenames(path_to_transient)[1])
# From ALeRCE_ledgermaker https://github.com/alercebroker/alerce_client
# I have had trouble importing this before so I copy, paste it, and modify it here.
# I also leave these imports here because why not?
import requests
from alerce.core import Alerce
from alerce.exceptions import APIError
alerce_client = Alerce()
# Choose cone_radius of diameter of tile so that, whatever coord I choose for ra_in, dec_in, we cover the whole tile
def access_alerts(lastmjd_in=[], ra_in = None, dec_in = None, cone_radius = 3600*4.01, classifier='stamp_classifier', class_names=['SN', 'AGN']):
if type(class_names) is not list:
raise TypeError('Argument `class_names` must be a list.')
dataframes = []
if not lastmjd_in:
date_range = 60
lastmjd_in = [Time.now().mjd - 60, Time.now().mjd]
print('Defaulting to a lastmjd range of', str(date_range), 'days before today.')
#print("lastmjd:", lastmjd_in)
for class_name in class_names:
data = alerce_client.query_objects(classifier=classifier,
class_name=class_name,
lastmjd=lastmjd_in,
ra = ra_in,
dec = dec_in,
radius = cone_radius, # in arcseconds
page_size = 5000,
order_by='oid',
order_mode='DESC',
format='pandas')
#if lastmjd is not None:
# select = data['lastmjd'] >= lastmjd
# data = data[select]
dataframes.append(data)
#print(pd.concat(dataframes).columns)
return pd.concat(dataframes).sort_values(by = 'lastmjd')
# From https://github.com/desihub/timedomain/blob/master/too_ledgers/decam_TAMU_ledgermaker.ipynb
# Function to grab decam data
from bs4 import BeautifulSoup
import json
import requests
def access_decam_data(url, overwrite=False):
"""Download reduced DECam transient data from Texas A&M.
Cache the data to avoid lengthy and expensive downloads.
Parameters
----------
url : str
URL for accessing the data.
overwrite : bool
Download new data and overwrite the cached data.
Returns
-------
decam_transients : pandas.DataFrame
Table of transient data.
"""
folders = url.split('/')
thedate = folders[-1] if len(folders[-1]) > 0 else folders[-2]
outfile = '{}.csv'.format(thedate)
if os.path.exists(outfile) and not overwrite:
# Access cached data.
decam_transients = pd.read_csv(outfile)
else:
# Download the DECam data index.
# A try/except is needed because the datahub SSL certificate isn't playing well with URL requests.
try:
decam_dets = requests.get(url, auth=('decam','tamudecam')).text
except:
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
decam_dets = requests.get(url, verify=False, auth=('decam','tamudecam')).text
# Convert transient index page into scrapable data using BeautifulSoup.
soup = BeautifulSoup(decam_dets)
# Loop through transient object summary JSON files indexed in the main transient page.
# Download the JSONs and dump the info into a Pandas table.
decam_transients = None
j = 0
for a in soup.find_all('a', href=True):
if 'object-summary.json' in a:
link = a['href'].replace('./', '')
summary_url = url + link
summary_text = requests.get(summary_url, verify=False, auth=('decam','tamudecam')).text
summary_data = json.loads(summary_text)
j += 1
#print('Accessing {:3d} {}'.format(j, summary_url)) # Modified by Matt
if decam_transients is None:
decam_transients = pd.DataFrame(summary_data, index=[0])
else:
decam_transients = pd.concat([decam_transients, pd.DataFrame(summary_data, index=[0])])
# Cache the data for future access.
print('Saving output to {}'.format(outfile))
decam_transients.to_csv(outfile, index=False)
return decam_transients
# Function to read in fits table info, RA, DEC, MJD and targetid if so desired
# Uses control parameter tile to determine if opening tile exposure file or not since headers are different
import logging
def read_fits_info(filepath: str, transient_candidate = True):
'''
if transient_candidate:
hdu_num = 1
else:
hdu_num = 5
'''
# Disabling INFO logging temporarily to suppress INFO level output/print from read_spectra
logging.disable(logging.INFO)
try:
spec_info = read_spectra(filepath).fibermap
except:
filename = filepath.split("/")[-1]
print("Could not open or use:", filename)
#print("In path:", filepath)
#print("Trying the next file...")
return np.array([]), np.array([]), 0, 0
headers = ['TARGETID', 'TARGET_RA', 'TARGET_DEC', 'LAST_MJD']
targ_info = {}
for head in headers:
try:
targ_info[head] = spec_info[head].data
except:
if not head == 'LAST_MJD': print("Failed to read in", head, "data. Continuing...")
targ_info[head] = False
# targ_id = spec_info['TARGETID'].data
# targ_ra = spec_info['TARGET_RA'].data # Now it's a numpy array
# targ_dec = spec_info['TARGET_DEC'].data
# targ_mjd = spec_info['LAST_MJD'] #.data
if np.any(targ_info['LAST_MJD']):
targ_mjd = Time(targ_info['LAST_MJD'][0], format = 'mjd')
elif transient_candidate:
targ_mjd = filepath.split("/")[-1].split("_")[-2] #to grab the date
targ_mjd = Time(targ_mjd, format = 'mjd') #.mjd
else:
print("Unable to determine observation mjd for", filename)
print("This target will not be considered.")
return np.array([]), np.array([]), 0, 0
'''
with fits.open(filepath) as hdu1:
data_table = Table(hdu1[hdu_num].data) #columns
targ_id = data_table['TARGETID']
targ_ra = data_table['TARGET_RA'].data # Now it's a numpy array
targ_dec = data_table['TARGET_DEC'].data
#targ_mjd = data_table['MJD'][0] some have different versions of this so this is a *bad* idea... at least now I know the try except works!
if tile:
targ_mjd = hdu1[hdu_num].header['MJD-OBS']
'''
# if tile and not np.all(targ_mjd):
# print("Unable to grab mjd from spectra, taking it from the filename...")
# targ_mjd = filepath.split("/")[-1].split("_")[-2] #to grab the date
# #targ_mjd = targ_mjd[:4]+"-"+targ_mjd[4:6]+"-"+targ_mjd[6:] # Adding dashes for Time
# targ_mjd = Time(targ_mjd, format = 'mjd') #.mjd
# Re-enabling logging for future calls if necessary
logging.disable(logging.NOTSET)
return targ_info["TARGET_RA"], targ_info["TARGET_DEC"], targ_mjd, targ_info["TARGETID"] #targ_ra, targ_dec, targ_mjd, targ_id
```
## Matching function
More or less the prototype to the later rendition used for DECAM. Will not be around in later versions of this notebook as I will be able to repurpose the DECAM code to do both. Planned obsolescence?
It may not be even worth it at this point... ah well!
```
# Prototype for the later, heftier matching function
# Will be deprecated, please reference commentary in inner_matching later for operation notes
def matching(path_in: str, max_sep: float, tile = False, date_dict = {}):
max_sep *= u.arcsec
#max_sep = Angle(max_sep*u.arcsec)
#if not target_ra_dec_date:
# target_ras, target_decs, obs_mjds = read_fits_ra_dec(path_in, tile)
#else:
# target_ras, target_decs, obs_mjds = target_ra_dec_date
#Look back 60 days from the DESI observations
days_back = 60
if not date_dict:
print("No RA's/DEC's fed in. Quitting.")
return np.array([]), np.array([])
all_trans_matches = []
all_alerts_matches = []
targetid_matches = []
for obs_mjd, ra_dec in date_dict.items():
# Grab RAs and DECs from input.
target_ras = ra_dec[:, 0]
target_decs = ra_dec[:, 1]
target_ids = np.int64(ra_dec[:, 2])
# Check for NaN's and remove which don't play nice with match_coordinates_sky
nan_ra = np.isnan(target_ras)
nan_dec = np.isnan(target_decs)
if np.any(nan_ra) or np.any(nan_dec):
print("NaNs found, removing them from array (not FITS) before match.")
#print("Original length (ra, dec): ", len(target_ras), len(target_decs))
nans = np.logical_not(np.logical_and(nan_ra, nan_dec))
target_ras = target_ras[nans] # Logic masking, probably more efficient
target_decs = target_decs[nans]
#print("Reduced length (ra, dec):", len(target_ras), len(target_decs))
# Some code used to test -- please ignore ******************
# Feed average to access alerts, perhaps that will speed things up/find better results
#avg_ra = np.average(target_ras)
#avg_dec = np.average(target_decs)
# coo_trans_search = SkyCoord(target_ras*u.deg, target_decs*u.deg)
# #print(coo_trans_search)
# idxs, d2d, _ = match_coordinates_sky(coo_trans_search, coo_trans_search, nthneighbor = 2)
# # for conesearch in alerce
# max_sep = np.max(d2d).arcsec + 2.1 # to expand a bit further than the furthest neighbor
# ra_in = coo_trans_search[0].ra
# dec_in = coo_trans_search[0].dec
# Some code used to test -- please ignore ******************
#print([obs_mjd - days_back, obs_mjd])
try:
alerts = access_alerts(lastmjd_in = [obs_mjd - days_back, obs_mjd],
ra_in = target_ras[0],
dec_in = target_decs[0], #cone_radius = max_sep,
class_names = ['SN']
) # Modified Julian Day .mjd
except:
#print("No SN matches ("+str(days_back)+" day range) for", obs_mjd)
#break
continue
# For each fits file, look at one month before the observation from Alerce
# Not sure kdtrees matter
# tree_name = "kdtree_" + str(obs_mjd - days_back)
alerts_ra = alerts['meanra'].to_numpy()
#print("Length of alerts: ", len(alerts_ra))
alerts_dec = alerts['meandec'].to_numpy()
# Converting to SkyCoord type arrays (really quite handy)
coo_trans_search = SkyCoord(target_ras*u.deg, target_decs*u.deg)
coo_alerts = SkyCoord(alerts_ra*u.deg, alerts_dec*u.deg)
# Some code used to test -- please ignore ******************
#ra_range = list(zip(*[(i, j) for i,j in zip(alerts_ra,alerts_dec) if (np.min(target_ras) < i and i < np.max(target_ras) and np.min(target_decs) < j and j < np.max(target_decs))]))
#try:
# ra_range = SkyCoord(ra_range[0]*u.deg, ra_range[1]*u.deg)
#except:
# continue
#print(ra_range)
#print(coo_trans_search)
#idx_alerts, d2d_trans, d3d_trans = match_coordinates_sky(coo_trans_search, ra_range)
#for i in coo_trans_search:
#print(i.separation(ra_range[3]))
#print(idx_alerts)
#print(np.min(d2d_trans))
#break
# Some code used to test -- please ignore ******************
idx_alerts, d2d_trans, d3d_trans = match_coordinates_sky(coo_trans_search, coo_alerts)
# Filtering by maximum separation and closest match
sep_constraint = d2d_trans < max_sep
trans_matches = coo_trans_search[sep_constraint]
alerts_matches = coo_alerts[idx_alerts[sep_constraint]]
targetid_matches = target_ids[sep_constraint]
#print(d2d_trans < max_sep)
minDist[obs_mjd] = np.min(d2d_trans)
# Adding everything to lists and outputting
if trans_matches.size:
all_trans_matches.append(trans_matches)
all_alerts_matches.append(alerts_matches)
sort_dist = np.sort(d2d_trans)
#print("Minimum distance found: ", sort_dist[0])
#print()
#break
#else:
#print("No matches found...\n")
#break
return all_trans_matches, all_alerts_matches, targetid_matches
```
## Matching to ALERCE
Runs a 5 arcsecond match of DESI to Alerce objects. Since everything is handled in functions, this part is quite clean.
From back when I was going to use *if __name__ == "__main__":*... those were the days
```
# Transient dir
path_to_transient = "/global/cfs/cdirs/desi/science/td/daily-search/desitrip/out"
# Grab paths
paths_to_fits = all_candidate_filenames(path_to_transient)
#print(len(paths_to_fits))
desi_info_dict = {}
target_ras, target_decs, obs_mjd, targ_ids = read_fits_info(paths_to_fits[0], transient_candidate = True)
desi_info_dict[obs_mjd] = np.column_stack((target_ras, target_decs, targ_ids))
'''
To be used when functions are properly combined.
initial_check(ledger_df = None, ledger_type = '')
closer_check(matches_dict = {}, ledger_df = None, ledger_type = '', exclusion_list = [])
'''
fail_count = 0
# Iterate through every fits file and grab all necessary info and plop it all together
for path in paths_to_fits[1:]:
target_ras, target_decs, obs_mjd, targ_ids = read_fits_info(path, transient_candidate = True)
if not obs_mjd:
fail_count += 1
continue
#try:
if obs_mjd in desi_info_dict.keys():
np.append(desi_info_dict[obs_mjd], np.array([target_ras, target_decs, targ_ids]).T, axis = 0)
else:
desi_info_dict[obs_mjd] = np.column_stack((target_ras, target_decs, targ_ids))
#desi_info_dict[obs_mjd].extend((target_ras, target_decs, targ_ids))
#except:
# continue
#desi_info_dict[obs_mjd] = np.column_stack((target_ras, target_decs, targ_ids))
#desi_info_dict[obs_mjd].append((target_ras, target_decs, targ_ids))
#trans_matches, _ = matching(path, 5.0, (all_desi_ras, all_desi_decs, all_obs_mjd))
# if trans_matches.size:
# all_trans_matches.append(trans_matches)
# all_alerts_matches.append(alerts_matches)
#print([i.mjd for i in sorted(desi_info_dict.keys())])
print(len(paths_to_fits))
print(len(desi_info_dict))
#print(fail_count)
```
```
# I was going to prepare everything by removing duplicate target ids but it's more trouble than it's worth and match_coordinates_sky can handle it
# Takes quite a bit of time... not much more I can do to speed things up though since querying Alerce for every individual date is the hang-up.
#print(len(paths_to_fits) - ledesi_info_dictfo_dict))
#print(fail_count)
#trans_matches, _, target_id_matches = matching("", 2.0, date_dict = temp_dict)
trans_matches, _, target_id_matches = matching("", 2.0, date_dict = desi_info_dict)
print(trans_matches)
print(target_id_matches)
print(sorted(minDist.values())[:5])
#for i in minDist.values():
# print(i)
```
## Matching to DECAM functions
Overwrite *read_fits_info* with older version to accommodate *read_spectra* error
```
# Read useful data from fits file, RA, DEC, target ID, and mjd as a leftover from previous use
def read_fits_info(filepath: str, transient_candidate = False):
if transient_candidate:
hdu_num = 1
else:
hdu_num = 5
try:
with fits.open(filepath) as hdu1:
data_table = Table(hdu1[hdu_num].data) #columns
targ_ID = data_table['TARGETID']
targ_ra = data_table['TARGET_RA'].data # Now it's a numpy array
targ_dec = data_table['TARGET_DEC'].data
#targ_mjd = data_table['MJD'][0] some have different versions of this so this is a *bad* idea... at least now I know the try except works!
# if transient_candidate:
# targ_mjd = hdu1[hdu_num].header['MJD-OBS'] # This is a string
# else:
# targ_mjd = data_table['MJD'].data
# targ_mjd = Time(targ_mjd[0], format = 'mjd')
except:
filename = filepath.split("/")[-1]
print("Could not open or use:", filename)
#print("In path:", filepath)
#print("Trying the next file...")
return np.array([]), np.array([]), np.array([])
return targ_ra, targ_dec, targ_ID #targ_mjd, targ_ID
# Grabbing the frame fits files
def glob_frames(exp_d: str):
# This function grabs the names of all input files in the transient directory and does some python string manipulation
# to grab the names of the input files with full path and the filenames themselves.
try:
filenames_read = glob(exp_d + "/cframe-" + color_band + "*.fits") # Only need one of b, r, z
# sframes not flux calibrated
# May want to use tiles... coadd (will need later, but not now)
except:
try:
filenames_read = glob(exp_d + "/frame-" + color_band + "*.fits") # Only need one of b, r, z
except:
print("Could not grab/find any fits in the exposure directory:")
print(exp_d)
filenames_read = [] # Just in case
#filenames_out = [] # Just in case
raise SystemExit("Exitting.")
#else:
#filenames_out = [s.split(".")[0] for s in filenames_read]
#filenames_out = [s.split("/")[-1] for s in filenames_read]
#filenames_out = [s.replace("in", "out") for s in filenames_out]
return filenames_read #, filenames_out
#path_to_transient = "/global/cfs/cdirs/desi/science/td/daily-search/desitrip/out"
#print(all_candidate_filenames(path_to_transient)[1])
```
## Match handling routines
The two functions below perform data handling/calling for the final match step.
The first, **initial_check** grabs all the tile RAs and DECS from the exposures and tiles SQL table, does some filtering, and sends the necessary information to the matching function. Currently designed to handle ALERCE as well but work has to be done to make sure it operates correctly.
```
def initial_check(ledger_df = None, ledger_type = ''):
query_date_start = "20210301"
#today = Time.now()
smushed_YMD = today.iso.split(" ")[0].replace("-","")
query_date_end = smushed_YMD
# Handy queries for debugging/useful info
query2 = "PRAGMA table_info(exposures)"
query3 = "PRAGMA table_info(tiles)"
# Crossmatch across tiles and exposures to grab obsdate via tileid
query_match = "SELECT distinct tilera, tiledec, obsdate, obsmjd, expid, exposures.tileid from exposures INNER JOIN tiles ON exposures.tileid = tiles.tileid where obsdate BETWEEN " + \
query_date_start + " AND " + query_date_end + ";"
'''
Some handy code for debugging
#cur.execute(query2)
#row2 = cur.fetchall()
#for i in row2:
# print(i[:])
'''
# Querying sql and returning a data type called sqlite3 row, it's kind of like a namedtuple/dictionary
conn = sqlite3.connect(db_filename)
conn.row_factory = sqlite3.Row # https://docs.python.org/3/library/sqlite3.html#sqlite3.Row
cur = conn.cursor()
cur.execute(query_match)
matches_list = cur.fetchall()
cur.close()
# I knew there was a way! THANK YOU!
# https://stackoverflow.com/questions/11276473/append-to-a-dict-of-lists-with-a-dict-comprehension
# Grabbing everything by obsdate from matches_list
date_dict = {k['obsdate'] : list(filter(lambda x:x['obsdate'] == k['obsdate'], matches_list)) for k in matches_list}
alert_matches_dict = {}
all_trans_matches = []
all_alerts_matches = []
# Grabbing DECAM ledger if not already fed in
if ledger_type.upper() == 'DECAM_TAMU':
if ledger_df.empty:
ledger_df = access_decam_data('https://datahub.geos.tamu.edu:8000/decam/LCData_Legacy/')
# Iterating through the dates and checking each tile observed on each date
# It is done in this way to cut down on calls to ALERCE since we go day by day
# It's also a convenient way to organize things
for date, row in date_dict.items():
date_str = str(date)
date_str = date_str[:4]+"-"+date_str[4:6]+"-"+date_str[6:] # Adding dashes for Time
obs_mjd = Time(date_str).mjd
# This method is *technically* safer than doing a double list comprehension with set albeit slower
# The lists are small enough that speed shouldn't matter here
unique_tileid = {i['tileid']: (i['tilera'], i['tiledec']) for i in row}
exposure_ras, exposure_decs = zip(*unique_tileid.values())
# Grabbing alerce ledger if not done already
if ledger_type.upper() == 'ALERCE':
if ledger_df.empty:
ledger_df = access_alerts(lastmjd = obs_mjd - 28) # Modified Julian Day #.mjd
elif ledger_type.upper() == 'DECAM_TAMU':
pass
else:
print("Cannot use alerts broker/ledger provided. Stopping before match.")
return {}
#Reatin tileid
tileid_arr = np.array(list(unique_tileid.keys()))
# Where the magic/matching happens
trans_matches, alert_matches, trans_ids, alerts_ids, _ = \
inner_matching(target_ids_in = tileid_arr, target_ras_in = exposure_ras, target_decs_in = exposure_decs, obs_mjd_in = obs_mjd,
path_in = '', max_sep = 1.8, sep_units = 'deg', ledger_df_in = ledger_df, ledger_type_in = ledger_type)
# Add everything into one giant list for both
if trans_matches.size:
#print(date, "-", len(trans_matches), "matches")
all_trans_matches.append(trans_matches)
all_alerts_matches.append(alert_matches)
else:
#print("No matches on", date)
continue
# Prepping output
# Populating the dictionary by date (a common theme)
# Each element in the dictionary thus contains the entire sqlite3 row (all info from sql tables with said headers)
alert_matches_dict[date] = []
for tup in trans_matches:
ra = tup.ra.deg
dec = tup.dec.deg
match_rows = [i for i in row if (i['tilera'], i['tiledec']) == (ra, dec)] # Just rebuilding for populating, this shouldn't change/exclude anything
alert_matches_dict[date].extend(match_rows)
return alert_matches_dict
```
## closer_check
**closer_check** is also a handling function but operates differently in that now it is checking individual targets. This *must* be run after **initial_check** because it takes as input the dictionary **initial_check** spits out. It then grabs all the targets from the DESI files and pipes that into the matching function but this time with a much more strict matching radius (in this case 2 arcseconds).
It then preps the data for output and writing.
```
def closer_check(matches_dict = {}, ledger_df = None, ledger_type = '', exclusion_list = []):
all_exp_matches = {}
if not matches_dict:
print("No far matches fed in for nearby matching. Returning none.")
return {}
# Again just in case the dataframe isn't fed in
if ledger_type.upper() == 'DECAM_TAMU':
id_head = 'ObjectID'
ra_head = 'RA-OBJECT'
dec_head = 'DEC-OBJECT'
if ledger_df.empty:
ledger_df = access_decam_data('https://datahub.geos.tamu.edu:8000/decam/LCData_Legacy/')
count_flag=0
# Iterating through date and all tile information for that date
for date, row in matches_dict.items():
print("\n", date)
if date in exclusion_list:
continue
# Declaring some things
all_exp_matches[date] = []
alert_exp_matches = []
file_indices = {}
all_targ_ras = np.array([])
all_targ_decs = np.array([])
all_targ_ids = np.array([])
all_tileids = np.array([])
all_petals = np.array([])
# Iterating through each initial match tile for every date
for i in row:
# Grabbing the paths and iterating through them to grab the RA's/DEC's
exp_paths = '/'.join((exposure_path, "daily/exposures", str(i['obsdate']), "000"+str(i['expid'])))
#print(exp_paths)
for path in glob_frames(exp_paths):
#print(path)
targ_ras, targ_decs, targ_ids = read_fits_info(path, transient_candidate = False)
h=fits.open(path)
tileid = h[0].header['TILEID']
tileids = np.full(len(targ_ras),tileid).tolist()
petal = path.split("/")[-1].split("-")[1][-1]
petals = np.full(len(targ_ras),petal).tolist()
# This is to retain the row to debug/check the original FITS file
# And to pull the info by row direct if you feel so inclined
all_len = len(all_targ_ras)
new_len = len(targ_ras)
if all_len:
all_len -= 1
file_indices[path] = (all_len, all_len + new_len) # The start and end index, modulo number
else:
file_indices[path] = (0, new_len) # The start and end index, modulo number
if len(targ_ras) != len(targ_decs):
print("Length of all ras vs. all decs do not match.")
print("Something went wrong!")
print("Continuing but not adding those to match...")
continue
# All the ras/decs together!
all_targ_ras = np.append(all_targ_ras, targ_ras)
all_targ_decs = np.append(all_targ_decs, targ_decs)
all_targ_ids = np.append(all_targ_ids, targ_ids)
all_tileids = np.append(all_tileids, tileids)
all_petals = np.append(all_petals, petals)
date_mjd = str(date)[:4]+"-"+str(date)[4:6] + "-" + str(date)[6:] # Adding dashes for Time
date_mjd = Time(date_mjd).mjd
# Grabbing ALERCE just in case
# Slow
if ledger_type.upper() == 'ALERCE':
id_head = 'oid'
ra_head = 'meanra'
dec_head = 'meandec'
if ledger_df.empty:
ledger_df = access_alerts(lastmjd_in = obs_mjd - 45) # Modified Julian Day #.mjd
# Checking for NaNs, again doesn't play nice with match_coordinates_sky
nan_ra = np.isnan(all_targ_ras)
nan_dec = np.isnan(all_targ_decs)
if np.any(nan_ra) or np.any(nan_dec):
print("NaNs found, removing them from array before match.")
#print("Original length (ra, dec): ", len(target_ras), len(target_decs))
nans = np.logical_not(np.logical_and(nan_ra, nan_dec))
all_targ_ras = all_targ_ras[nans] # Logic masking, probably more efficient
all_targ_decs = all_targ_decs[nans]
all_targ_ids = all_targ_ids[nans]
all_tileids = all_tileids[nans]
all_petals = all_petals[nans]
# Where the magic matching happens. This time with separation 2 arcseconds.
# Will be cleaned up (eventually)
alert_exp_matches, alerts_matches, targetid_exp_matches, id_alerts_matches, exp_idx = inner_matching(target_ids_in =all_targ_ids, \
target_ras_in = all_targ_ras, target_decs_in = all_targ_decs, obs_mjd_in = date_mjd,
path_in = '', max_sep = 2, sep_units = 'arcsec', ledger_df_in = ledger_df, ledger_type_in = ledger_type)
date_arr=np.full(alerts_matches.shape[0],date)
#print(date_arr.shape,targetid_exp_matches.shape,alert_exp_matches.shape, id_alerts_matches.shape,alerts_matches.shape )
info_arr_date=np.column_stack((date_arr,all_tileids[exp_idx],all_petals[exp_idx], targetid_exp_matches,alert_exp_matches.ra.deg,alert_exp_matches.dec.deg, \
id_alerts_matches,alerts_matches.ra.deg,alerts_matches.dec.deg ))
all_exp_matches[date].append(info_arr_date)
if count_flag==0:
all_exp_matches_arr=info_arr_date
count_flag=1
else:
#print(all_exp_matches_arr,info_arr_date)
all_exp_matches_arr=np.concatenate((all_exp_matches_arr,info_arr_date))
# Does not easily output to a csv since we have multiple results for each date
# so uh... custom file output for me
return all_exp_matches_arr
```
## inner_matching
#### aka the bread & butter
**inner_matching** is what ultimately does the final match and calls **match_coordinates_sky** with everything fed in. So really it doesn't do much other than take in all the goodies and make everyone happy.
It may still be difficult to co-opt for alerce matching but that may be a project for another time.
```
def inner_matching(target_ids_in = np.array([]), target_ras_in = np.array([]), target_decs_in = np.array([]), obs_mjd_in = '', path_in = '', max_sep = 2, sep_units = 'arcsec', ledger_df_in = None, ledger_type_in = ''): # to be combined with the other matching thing in due time
# Figuring out the units
if sep_units == 'arcsec':
max_sep *= u.arcsec
elif sep_units == 'arcmin':
max_sep *= u.arcmin
elif sep_units == 'deg':
max_sep *= u.deg
else:
print("Separation unit specified is invalid for matching. Defaulting to arcsecond.")
max_sep *= u.arcsec
if not np.array(target_ras_in).size:
return np.array([]), np.array([])
# Checking for NaNs, again doesn't play nice with match_coordinates_sky
nan_ra = np.isnan(target_ras_in)
nan_dec = np.isnan(target_decs_in)
if np.any(nan_ra) or np.any(nan_dec):
print("NaNs found, removing them from array before match.")
#print("Original length (ra, dec): ", len(target_ras), len(target_decs))
nans = np.logical_not(np.logical_and(nan_ra, nan_dec))
target_ras_in = target_ras_in[nans] # Logic masking, probably more efficient
target_decs_in = target_decs_in[nans]
target_ids_in = target_ids_in[nans]
#print("Reduced length (ra, dec):", len(target_ras), len(target_decs))
# For quick matching if said kdtree actually does anything
# Supposed to speed things up on subsequent runs *shrugs*
tree_name = "_".join(("kdtree", ledger_type_in, str(obs_mjd_in)))
# Selecting header string to use with the different alert brokers/ledgers
if ledger_type_in.upper() == 'DECAM_TAMU':
id_head = 'ObjectID'
ra_head = 'RA-OBJECT'
dec_head = 'DEC-OBJECT'
elif ledger_type_in.upper() == 'ALERCE':
id_head = 'oid' #Check this is how id is called!
ra_head = 'meanra'
dec_head = 'meandec'
else:
print("No ledger type specified. Quitting.")
# lofty goals
# Will try to figure it out assuming it's a pandas dataframe.")
#print("Returning empty-handed for now until that is complete - Matthew P.")
return np.array([]), np.array([])
# Convert df RA/DEC to numpy arrays
alerts_id = ledger_df_in[id_head].to_numpy()
alerts_ra = ledger_df_in[ra_head].to_numpy()
alerts_dec = ledger_df_in[dec_head].to_numpy()
# Convert everything to SkyCoord
coo_trans_search = SkyCoord(target_ras_in*u.deg, target_decs_in*u.deg)
coo_alerts = SkyCoord(alerts_ra*u.deg, alerts_dec*u.deg)
# Do the matching!
idx_alerts, d2d_trans, d3d_trans = match_coordinates_sky(coo_trans_search, coo_alerts, storekdtree = tree_name) # store tree to speed up subsequent results
# Filter out the good stuff
sep_constraint = d2d_trans < max_sep
trans_matches = coo_trans_search[sep_constraint]
trans_matches_ids = target_ids_in[sep_constraint]
alerts_matches = coo_alerts[idx_alerts[sep_constraint]]
alerts_matches_ids = alerts_id[idx_alerts[sep_constraint]]
if trans_matches.size:
print(len(trans_matches), "matches with separation -", max_sep)
#sort_dist = np.sort(d2d_trans)
#print("Minimum distance found: ", sort_dist[0])
return trans_matches, alerts_matches, trans_matches_ids, alerts_matches_ids, sep_constraint
```
## Grab DECAM ledger as pandas dataframe
```
decam_transients = access_decam_data('https://datahub.geos.tamu.edu:8000/decam/LCData_Legacy/', overwrite = True) # If True, grabs a fresh batch
decam_transients_agn = access_decam_data('https://datahub.geos.tamu.edu:8000/decam/LCData_Legacy_AGN/', overwrite = True) # If True, grabs a fresh batch
decam_transients
```
## Run initial check (on tiles) and closer check (on targets)
```
init_matches_by_date = initial_check(ledger_df = decam_transients, ledger_type = 'DECAM_TAMU')
close_matches = closer_check(init_matches_by_date, ledger_df = decam_transients, ledger_type = 'DECAM_TAMU', exclusion_list = [])
np.save('matches_DECam',close_matches, allow_pickle=True)
init_matches_agn_by_date = initial_check(ledger_df = decam_transients_agn, ledger_type = 'DECAM_TAMU')
close_matches_agn = closer_check(init_matches_agn_by_date, ledger_df = decam_transients_agn, ledger_type = 'DECAM_TAMU', exclusion_list = [])
np.save('matches_DECam_agn',close_matches_agn, allow_pickle=True)
np.save('matches_DECam_agn',close_matches_agn, allow_pickle=True)
```
## A quick plot to see the distribution of target matches
```
plt.scatter(close_matches[:,4], close_matches[:,5],label='SN')
plt.scatter(close_matches_agn[:,4], close_matches_agn[:,5],label='AGN')
plt.legend()
```
## End notes:
Double matches are to be expected, could be worthwhile to compare the spectra of both
| github_jupyter |
```
%matplotlib inline
```
# Cascade decomposition
This example script shows how to compute and plot the cascade decompositon of
a single radar precipitation field in pysteps.
```
from matplotlib import cm, pyplot as plt
import numpy as np
import os
from pprint import pprint
from pysteps.cascade.bandpass_filters import filter_gaussian
from pysteps import io, rcparams
from pysteps.cascade.decomposition import decomposition_fft
from pysteps.utils import conversion, transformation
from pysteps.visualization import plot_precip_field
```
## Read precipitation field
First thing, the radar composite is imported and transformed in units
of dB.
```
# Import the example radar composite
root_path = rcparams.data_sources["fmi"]["root_path"]
filename = os.path.join(
root_path, "20160928", "201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz"
)
R, _, metadata = io.import_fmi_pgm(filename, gzipped=True)
# Convert to rain rate
R, metadata = conversion.to_rainrate(R, metadata)
# Nicely print the metadata
pprint(metadata)
# Plot the rainfall field
plot_precip_field(R, geodata=metadata)
plt.show()
# Log-transform the data
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
```
## 2D Fourier spectrum
Compute and plot the 2D Fourier power spectrum of the precipitaton field.
```
# Set Nans as the fill value
R[~np.isfinite(R)] = metadata["zerovalue"]
# Compute the Fourier transform of the input field
F = abs(np.fft.fftshift(np.fft.fft2(R)))
# Plot the power spectrum
M, N = F.shape
fig, ax = plt.subplots()
im = ax.imshow(
np.log(F ** 2), vmin=4, vmax=24, cmap=cm.jet, extent=(-N / 2, N / 2, -M / 2, M / 2)
)
cb = fig.colorbar(im)
ax.set_xlabel("Wavenumber $k_x$")
ax.set_ylabel("Wavenumber $k_y$")
ax.set_title("Log-power spectrum of R")
plt.show()
```
## Cascade decomposition
First, construct a set of Gaussian bandpass filters and plot the corresponding
1D filters.
```
num_cascade_levels = 7
# Construct the Gaussian bandpass filters
filter = filter_gaussian(R.shape, num_cascade_levels)
# Plot the bandpass filter weights
L = max(N, M)
fig, ax = plt.subplots()
for k in range(num_cascade_levels):
ax.semilogx(
np.linspace(0, L / 2, len(filter["weights_1d"][k, :])),
filter["weights_1d"][k, :],
"k-",
base=pow(0.5 * L / 3, 1.0 / (num_cascade_levels - 2)),
)
ax.set_xlim(1, L / 2)
ax.set_ylim(0, 1)
xt = np.hstack([[1.0], filter["central_wavenumbers"][1:]])
ax.set_xticks(xt)
ax.set_xticklabels(["%.2f" % cf for cf in filter["central_wavenumbers"]])
ax.set_xlabel("Radial wavenumber $|\mathbf{k}|$")
ax.set_ylabel("Normalized weight")
ax.set_title("Bandpass filter weights")
plt.show()
```
Finally, apply the 2D Gaussian filters to decompose the radar rainfall field
into a set of cascade levels of decreasing spatial scale and plot them.
```
decomp = decomposition_fft(R, filter, compute_stats=True)
# Plot the normalized cascade levels
for i in range(num_cascade_levels):
mu = decomp["means"][i]
sigma = decomp["stds"][i]
decomp["cascade_levels"][i] = (decomp["cascade_levels"][i] - mu) / sigma
fig, ax = plt.subplots(nrows=2, ncols=4)
ax[0, 0].imshow(R, cmap=cm.RdBu_r, vmin=-5, vmax=5)
ax[0, 1].imshow(decomp["cascade_levels"][0], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 2].imshow(decomp["cascade_levels"][1], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 3].imshow(decomp["cascade_levels"][2], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 0].imshow(decomp["cascade_levels"][3], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 1].imshow(decomp["cascade_levels"][4], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 2].imshow(decomp["cascade_levels"][5], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 3].imshow(decomp["cascade_levels"][6], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 0].set_title("Observed")
ax[0, 1].set_title("Level 1")
ax[0, 2].set_title("Level 2")
ax[0, 3].set_title("Level 3")
ax[1, 0].set_title("Level 4")
ax[1, 1].set_title("Level 5")
ax[1, 2].set_title("Level 6")
ax[1, 3].set_title("Level 7")
for i in range(2):
for j in range(4):
ax[i, j].set_xticks([])
ax[i, j].set_yticks([])
plt.tight_layout()
plt.show()
# sphinx_gallery_thumbnail_number = 4
```
| github_jupyter |
##### Copyright 2021 The TF-Agents Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# CheckpointerとPolicySaver
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/agents/tutorials/10_checkpointer_policysaver_tutorial"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.org で表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/agents/tutorials/10_checkpointer_policysaver_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/agents/tutorials/10_checkpointer_policysaver_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png"> GitHub でソースを表示</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/agents/tutorials/10_checkpointer_policysaver_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a></td>
</table>
## はじめに
`tf_agents.utils.common.Checkpointer`は、ローカルストレージとの間でトレーニングの状態、ポリシーの状態、およびreplay_bufferの状態を保存/読み込むユーティリティです。
`tf_agents.policies.policy_saver.PolicySaver`は、ポリシーのみを保存/読み込むツールであり、`Checkpointer`よりも軽量です。`PolicySaver`を使用すると、ポリシーを作成したコードに関する知識がなくてもモデルをデプロイできます。
このチュートリアルでは、DQNを使用してモデルをトレーニングし、次に`Checkpointer`と`PolicySaver`を使用して、状態とモデルをインタラクティブな方法で保存および読み込む方法を紹介します。`PolicySaver`では、TF2.0の新しいsaved_modelツールとフォーマットを使用することに注意してください。
## セットアップ
以下の依存関係をインストールしていない場合は、実行します。
```
#@test {"skip": true}
!sudo apt-get update
!sudo apt-get install -y xvfb ffmpeg python-opengl
!pip install pyglet
!pip install 'imageio==2.4.0'
!pip install 'xvfbwrapper==0.2.9'
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import imageio
import io
import matplotlib
import matplotlib.pyplot as plt
import os
import shutil
import tempfile
import tensorflow as tf
import zipfile
import IPython
try:
from google.colab import files
except ImportError:
files = None
from tf_agents.agents.dqn import dqn_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import q_network
from tf_agents.policies import policy_saver
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
tempdir = os.getenv("TEST_TMPDIR", tempfile.gettempdir())
#@test {"skip": true}
# Set up a virtual display for rendering OpenAI gym environments.
import xvfbwrapper
xvfbwrapper.Xvfb(1400, 900, 24).start()
```
## DQNエージェント
前のColabと同じように、DQNエージェントを設定します。 このColabでは、詳細は主な部分ではないので、デフォルトでは非表示になっていますが、「コードを表示」をクリックすると詳細を表示できます。
### ハイパーパラメーター
```
env_name = "CartPole-v1"
collect_steps_per_iteration = 100
replay_buffer_capacity = 100000
fc_layer_params = (100,)
batch_size = 64
learning_rate = 1e-3
log_interval = 5
num_eval_episodes = 10
eval_interval = 1000
```
### 環境
```
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
```
### エージェント
```
#@title
q_net = q_network.QNetwork(
train_env.observation_spec(),
train_env.action_spec(),
fc_layer_params=fc_layer_params)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
global_step = tf.compat.v1.train.get_or_create_global_step()
agent = dqn_agent.DqnAgent(
train_env.time_step_spec(),
train_env.action_spec(),
q_network=q_net,
optimizer=optimizer,
td_errors_loss_fn=common.element_wise_squared_loss,
train_step_counter=global_step)
agent.initialize()
```
### データ収集
```
#@title
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_capacity)
collect_driver = dynamic_step_driver.DynamicStepDriver(
train_env,
agent.collect_policy,
observers=[replay_buffer.add_batch],
num_steps=collect_steps_per_iteration)
# Initial data collection
collect_driver.run()
# Dataset generates trajectories with shape [BxTx...] where
# T = n_step_update + 1.
dataset = replay_buffer.as_dataset(
num_parallel_calls=3, sample_batch_size=batch_size,
num_steps=2).prefetch(3)
iterator = iter(dataset)
```
### エージェントのトレーニング
```
#@title
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
agent.train = common.function(agent.train)
def train_one_iteration():
# Collect a few steps using collect_policy and save to the replay buffer.
collect_driver.run()
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = agent.train(experience)
iteration = agent.train_step_counter.numpy()
print ('iteration: {0} loss: {1}'.format(iteration, train_loss.loss))
```
### ビデオ生成
```
#@title
def embed_gif(gif_buffer):
"""Embeds a gif file in the notebook."""
tag = '<img src="data:image/gif;base64,{0}"/>'.format(base64.b64encode(gif_buffer).decode())
return IPython.display.HTML(tag)
def run_episodes_and_create_video(policy, eval_tf_env, eval_py_env):
num_episodes = 3
frames = []
for _ in range(num_episodes):
time_step = eval_tf_env.reset()
frames.append(eval_py_env.render())
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = eval_tf_env.step(action_step.action)
frames.append(eval_py_env.render())
gif_file = io.BytesIO()
imageio.mimsave(gif_file, frames, format='gif', fps=60)
IPython.display.display(embed_gif(gif_file.getvalue()))
```
### ビデオ生成
ビデオを生成して、ポリシーのパフォーマンスを確認します。
```
print ('global_step:')
print (global_step)
run_episodes_and_create_video(agent.policy, eval_env, eval_py_env)
```
## チェックポインタとPolicySaverのセットアップ
CheckpointerとPolicySaverを使用する準備ができました。
### Checkpointer
```
checkpoint_dir = os.path.join(tempdir, 'checkpoint')
train_checkpointer = common.Checkpointer(
ckpt_dir=checkpoint_dir,
max_to_keep=1,
agent=agent,
policy=agent.policy,
replay_buffer=replay_buffer,
global_step=global_step
)
```
### Policy Saver
```
policy_dir = os.path.join(tempdir, 'policy')
tf_policy_saver = policy_saver.PolicySaver(agent.policy)
```
### 1回のイテレーションのトレーニング
```
#@test {"skip": true}
print('Training one iteration....')
train_one_iteration()
```
### チェックポイントに保存
```
train_checkpointer.save(global_step)
```
### チェックポイントに復元
チェックポイントに復元するためには、チェックポイントが作成されたときと同じ方法でオブジェクト全体を再作成する必要があります。
```
train_checkpointer.initialize_or_restore()
global_step = tf.compat.v1.train.get_global_step()
```
また、ポリシーを保存して指定する場所にエクスポートします。
```
tf_policy_saver.save(policy_dir)
```
ポリシーの作成に使用されたエージェントまたはネットワークについての知識がなくても、ポリシーを読み込めるので、ポリシーのデプロイが非常に簡単になります。
保存されたポリシーを読み込み、それがどのように機能するかを確認します。
```
saved_policy = tf.saved_model.load(policy_dir)
run_episodes_and_create_video(saved_policy, eval_env, eval_py_env)
```
## エクスポートとインポート
以下は、後でトレーニングを続行し、再度トレーニングすることなくモデルをデプロイできるように、Checkpointer とポリシーディレクトリをエクスポート/インポートするのに役立ちます。
「1回のイテレーションのトレーニング」に戻り、後で違いを理解できるように、さらに数回トレーニングします。 結果が少し改善し始めたら、以下に進みます。
```
#@title Create zip file and upload zip file (double-click to see the code)
def create_zip_file(dirname, base_filename):
return shutil.make_archive(base_filename, 'zip', dirname)
def upload_and_unzip_file_to(dirname):
if files is None:
return
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
shutil.rmtree(dirname)
zip_files = zipfile.ZipFile(io.BytesIO(uploaded[fn]), 'r')
zip_files.extractall(dirname)
zip_files.close()
```
チェックポイントディレクトリからzipファイルを作成します。
```
train_checkpointer.save(global_step)
checkpoint_zip_filename = create_zip_file(checkpoint_dir, os.path.join(tempdir, 'exported_cp'))
```
zipファイルをダウンロードします。
```
#@test {"skip": true}
if files is not None:
files.download(checkpoint_zip_filename) # try again if this fails: https://github.com/googlecolab/colabtools/issues/469
```
10〜15回ほどトレーニングした後、チェックポイントのzipファイルをダウンロードし、[ランタイム]> [再起動してすべて実行]に移動してトレーニングをリセットし、このセルに戻ります。ダウンロードしたzipファイルをアップロードして、トレーニングを続けます。
```
#@test {"skip": true}
upload_and_unzip_file_to(checkpoint_dir)
train_checkpointer.initialize_or_restore()
global_step = tf.compat.v1.train.get_global_step()
```
チェックポイントディレクトリをアップロードしたら、「1回のイテレーションのトレーニング」に戻ってトレーニングを続けるか、「ビデオ生成」に戻って読み込まれたポリシーのパフォーマンスを確認します。
または、ポリシー(モデル)を保存して復元することもできます。Checkpointerとは異なり、トレーニングを続けることはできませんが、モデルをデプロイすることはできます。ダウンロードしたファイルはCheckpointerのファイルよりも大幅に小さいことに注意してください。
```
tf_policy_saver.save(policy_dir)
policy_zip_filename = create_zip_file(policy_dir, os.path.join(tempdir, 'exported_policy'))
#@test {"skip": true}
if files is not None:
files.download(policy_zip_filename) # try again if this fails: https://github.com/googlecolab/colabtools/issues/469
```
ダウンロードしたポリシーディレクトリ(exported_policy.zip)をアップロードし、保存したポリシーの動作を確認します。
```
#@test {"skip": true}
upload_and_unzip_file_to(policy_dir)
saved_policy = tf.saved_model.load(policy_dir)
run_episodes_and_create_video(saved_policy, eval_env, eval_py_env)
```
## SavedModelPyTFEagerPolicy
TFポリシーを使用しない場合は、`py_tf_eager_policy.SavedModelPyTFEagerPolicy`を使用して、Python envでsaved_modelを直接使用することもできます。
これは、eagerモードが有効になっている場合にのみ機能することに注意してください。
```
eager_py_policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(
policy_dir, eval_py_env.time_step_spec(), eval_py_env.action_spec())
# Note that we're passing eval_py_env not eval_env.
run_episodes_and_create_video(eager_py_policy, eval_py_env, eval_py_env)
```
## ポリシーを TFLite に変換する
詳細については、「[TensorFlow Lite 推論](https://tensorflow.org/lite/guide/inference)」をご覧ください。
```
converter = tf.lite.TFLiteConverter.from_saved_model(policy_dir, signature_keys=["action"])
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
tflite_policy = converter.convert()
with open(os.path.join(tempdir, 'policy.tflite'), 'wb') as f:
f.write(tflite_policy)
```
### TFLite モデルで推論を実行する
```
import numpy as np
interpreter = tf.lite.Interpreter(os.path.join(tempdir, 'policy.tflite'))
policy_runner = interpreter.get_signature_runner()
print(policy_runner._inputs)
policy_runner(**{
'0/discount':tf.constant(0.0),
'0/observation':tf.zeros([1,4]),
'0/reward':tf.constant(0.0),
'0/step_type':tf.constant(0)})
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.