code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
import random
import gym
#import math
import numpy as np
from collections import deque
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import Adam
EPOCHS = 1000
THRESHOLD = 10
MONITOR = True
class DQN():
def __init__(self, env_string,batch_size=64, IM_SIZE = 84, m = 4):
self.memory = deque(maxlen=5000)
self.env = gym.make(env_string)
input_size = self.env.observation_space.shape[0]
action_size = self.env.action_space.n
self.batch_size = batch_size
self.gamma = 1.0
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.IM_SIZE = IM_SIZE
self.m = m
alpha=0.01
alpha_decay=0.01
if MONITOR: self.env = gym.wrappers.Monitor(self.env, '../data/'+env_string, force=True)
# Init model
self.model = Sequential()
self.model.add( Conv2D(32, 8, (4,4), activation='relu',padding='valid', input_shape=(IM_SIZE, IM_SIZE, m)))
#self.model.add(MaxPooling2D())
self.model.add( Conv2D(64, 4, (2,2), activation='relu',padding='valid'))
self.model.add(MaxPooling2D())
self.model.add( Conv2D(64, 3, (1,1), activation='relu',padding='valid'))
self.model.add(MaxPooling2D())
self.model.add(Flatten())
self.model.add(Dense(256, activation='elu'))
self.model.add(Dense(action_size, activation='linear'))
self.model.compile(loss='mse', optimizer=Adam(lr=alpha, decay=alpha_decay))
self.model_target = tf.keras.models.clone_model(self.model)
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def choose_action(self, state, epsilon):
if np.random.random() <= epsilon:
return self.env.action_space.sample()
else:
return np.argmax(self.model.predict(state))
def preprocess_state(self, img):
img_temp = img[31:195] # Choose the important area of the image
img_temp = tf.image.rgb_to_grayscale(img_temp)
img_temp = tf.image.resize(img_temp, [self.IM_SIZE, self.IM_SIZE],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
img_temp = tf.cast(img_temp, tf.float32)
return img_temp[:,:,0]
def combine_images(self, img1, img2):
if len(img1.shape) == 3 and img1.shape[0] == self.m:
im = np.append(img1[1:,:, :],np.expand_dims(img2,0), axis=2)
return tf.expand_dims(im, 0)
else:
im = np.stack([img1]*self.m, axis = 2)
return tf.expand_dims(im, 0)
#return np.reshape(state, [1, 4])
def replay(self, batch_size):
x_batch, y_batch = [], []
minibatch = random.sample(self.memory, min(len(self.memory), batch_size))
for state, action, reward, next_state, done in minibatch:
y_target = self.model_target.predict(state)
y_target[0][action] = reward if done else reward + self.gamma * np.max(self.model.predict(next_state)[0])
x_batch.append(state[0])
y_batch.append(y_target[0])
self.model.fit(np.array(x_batch), np.array(y_batch), batch_size=len(x_batch), verbose=0)
#epsilon = max(epsilon_min, epsilon_decay*epsilon) # decrease epsilon
def train(self):
scores = deque(maxlen=100)
avg_scores = []
for e in range(EPOCHS):
state = self.env.reset()
state = self.preprocess_state(state)
state = self.combine_images(state, state)
done = False
i = 0
while not done:
action = self.choose_action(state,self.epsilon)
next_state, reward, done, _ = self.env.step(action)
next_state = self.preprocess_state(next_state)
next_state = self.combine_images(next_state, state)
#print(next_state.shape)
self.remember(state, action, reward, next_state, done)
state = next_state
self.epsilon = max(self.epsilon_min, self.epsilon_decay*self.epsilon) # decrease epsilon
i += reward
scores.append(i)
mean_score = np.mean(scores)
avg_scores.append(mean_score)
if mean_score >= THRESHOLD:
print('Solved after {} trials ✔'.format(e))
return avg_scores
if e % 10 == 0:
print('[Episode {}] - Average Score: {}.'.format(e, mean_score))
self.model_target.set_weights(self.model.get_weights())
self.replay(self.batch_size)
print('Did not solve after {} episodes 😞'.format(e))
return avg_scores
env_string = 'BreakoutDeterministic-v4'
agent = DQN(env_string)
agent.model.summary()
agent.model_target.summary()
scores = agent.train()
import matplotlib.pyplot as plt
plt.plot(scores)
plt.show()
agent.env.close()
```
| github_jupyter |
# Lesson 5: Tidy Data
*Learn to prepare data for visualization and analytics.*
## Instructions
This tutorial provides step-by-step training divided into numbered sections. The sections often contain embeded exectable code for demonstration. This tutorial is accompanied by a practice notebook: [L05-Tidy_Data-Practice.ipynb](./L05-Tidy_Data-Practice.ipynb).
Throughout this tutorial sections labeled as "Tasks" are interspersed and indicated with the icon: . You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook.
## Introduction
The purpose of this assignment is to learn and practice with preparing tidy datasets. Often data we are asked to analyze is provided to us in formats that are not easy to visualize or analyze. Many visualization tools such as Seaborn or analytical tools such as supervised machine learning libraries expect data to be tidied. It is important to know what "tidy" data is, how to reformat a data into a tidy format, and to organize our own scientific data to help ourselves and others analyze it.
**What are "tidy" datasets?**
> Tidy datasets are easy to manipulate, model and visualize, and have a specific structure: each variable is a column, each observation is a row, and each type of observational unit is a table.
\- Wickham, Hadley. [Tidy Data](https://www.jstatsoft.org/article/view/v059i10). *Journal of Statistical Software*, 59.10 (2014): 1 - 23.
Before proceeding, fully read the [Tidy Data paper](https://www.jstatsoft.org/article/view/v059i10) (quoted above) by Hadley Wickham. Once finished, return here to reinforce the techniques introduced by that paper.
---
## 1. Getting Started
As before, we import any needed packages at the top of our notebook. Let's import Numpy and Pandas:
```
import numpy as np
import pandas as pd
```
#### Task 1a: Setup
<span style="float:right; margin-left:10px; clear:both;"></span>
Import the following packages:
+ `pandas` as `pd`
+ `numpy` as `np`
## 2. Tidy Rules
### 2.1 Recognizing data components
To understand the rules for tidy data, we should define a few terms: 'variable', 'observation' and 'observational unit'.
+ **variable**:
> A variable is a characteristic of a unit being observed... to which a numerical measure or a category... can be assigned (e.g. income, age, weight, etc., and “occupation”, “industry”, “disease”, etc.
\- [OECD Glossary of Statistical terms -- Variable](https://stats.oecd.org/glossary/detail.asp?ID=2857)
+ **observation**:
> An observation is the value, at a particular period, of a particular variable
\- [OECD Glossary of Statistical terms -- Observation](https://stats.oecd.org/glossary/detail.asp?ID=6132)
+ **observational unit**:
> Observation units are those entities on which information is received and statistics are compiled.
\- [OECD Glossary of Statistical terms -- Observation Unit](https://stats.oecd.org/glossary/detail.asp?ID=1873)
With those definitions for reference, remember from the text that in order for a dataset to be considered "tidy" it must be organized into a table (i.e. Pandas DataFrame) and follow these rules:
+ Each variable forms a unique column in the data frame.
+ Each observation forms a row in the data frame.
+ Each **type** of observational unit needs its own table.
To demonstrate the meaning of these rules, let's first examine a dataset described in the Tidy Data paper. Execute the following lines of code that manually creates a Pandas data frame containing the example table:
```
# Create the data rows and columns.
data = [['John Smith', None, 2],
['Jane Doe', 16, 11],
['Mary Johnson', 3, 1]]
# Create the list of labels for the data frame.
headers = ['', 'Treatment_A', 'Treatment_B']
# Create the data frame.
pd.DataFrame(data, columns=headers)
```
This data is not in tidy format. Can you see why?
#### Task 2a: Understand the data
<span style="float:right; margin-left:10px; clear:both;">
</span>
Using the table above, answer the following:
- What are the variables?
- What are the observations?
- What is the observable unit?
- Are the variables columns?
- Are the observations rows?
### 2.1 Spotting messy data
The author provides a few useful indicators that help us spot untidied data:
1. Column headers are values, not variable names.
2. Multiple variables are stored in one column.
3. Variables are stored in both rows and columns.
4. Multiple types of observational units are stored in the same table.
5. A single observational unit is stored in multiple tables.
As an example, let's look at a data set that the author borrowed from the Pew Reserach Center that provides religious affiliation and yearly income ranges for individuals surveyed. Execute the following code which manually puts that data into a Pandas data frame:
```
data = [['Agnostic',27,34,60,81,76,137],
['Atheist',12,27,37,52,35,70],
['Buddhist',27,21,30,34,33,58],
['Catholic',418,617,732,670,638,1116],
['Don\'t know/refused',15,14,15,11,10,35],
['Evangelical Prot',575,869,1064,982,881,1486],
['Hindu',1,9,7,9,11,34],
['Historically Black Prot',228,244,236,238,197,223],
['Jehovah\'s Witness',20,27,24,24,21,30],
['Jewish',19,19,25,25,30,95]]
headers = ['religion','<$10k','$10-20k','$20-30k','$30-40k','$40-50k','$50-75k']
religion = pd.DataFrame(data, columns=headers)
religion
```
#### Task 2b: Explain causes of untidyness
<span style="float:right; margin-left:10px; clear:both;">
</span>
Using the data set above:
- Explain why the data above is untidy?
- What are the variables?
- What are the observations?
As another example, consider the data frame also provided by the author. For this data, the demographic groups are broken down by sex (m, f) and age (0–14, 15–25, 25–34, 35–44, 45–54, 55–64, 65+, or unknown). Execute the following:
```
data = [['AD', 2000, 0, 0, 1, 0, 0, 0, 0, None, None],
['AE', 2000, 2, 4, 4, 6, 5, 12, 10, None, 3],
['AF', 2000, 52, 228, 183, 149, 129, 94, 80, None, 93],
['AG', 2000, 0, 0, 0, 0, 0, 0, 1, None, 1],
['AL', 2000, 2, 19, 21, 14, 24, 19, 16, None, 3],
['AM', 2000, 2, 152, 130, 131, 63, 26, 21, None, 1],
['AN', 2000, 0, 0, 1, 2, 0, 0, 0, None, 0],
['AO', 2000, 186, 999, 1003, 912, 482, 312, 194, None, 247],
['AR', 2000, 97, 278, 594, 402, 419, 368, 330, None, 121],
['AS', 2000, None, None, None, None, 1, 1, None, None, None]]
headers = ['country', 'year', 'm014', 'm1524', 'm2534', 'm3544', 'm4554', 'm5564',
'm65', 'mu', 'f014']
demographics = pd.DataFrame(data, columns=headers)
demographics
```
#### Task 2c: Explain causes of untidyness
<span style="float:right; margin-left:10px; clear:both;">
</span>
Using the data set above:
- Explain why the data above is untidy?
- What are the variables?
- What are the observations?
---
## 3. Melting Data
In the Tidy paper, the author indicated that many times a data set can be corrected, or tidied, by first "melting" the data. Fortunately, Pandas provides the `pd.melt` function! See the [online documenation for pd.melt](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.melt.html) for full usage instructions. The author provides five different use cases where melting (and other transformations) can be performed:
1. Column headers are values, not variable names.
2. Multiple variables are stored in one column.
3. Variables are stored in both rows and columns.
4. Multiple types of observational units are stored in the same table.
5. A single observational unit is stored in multiple tables.
We will explore only a few of these use cases. However, the techniques provided by these examples will help with melting for all of them.
### 3.1 Use Case #1: column headers are values
To demonsrate melting let's create a sample dataframe that provides the progress level of different groups of individuals in a process that has two stages:
```
df = pd.DataFrame({'Group': {0: 'A', 1: 'B', 2: 'C'},
'Stage1': {0: 1, 1: 3, 2: 5},
'Stage2': {0: 2, 1: 4, 2: 6}})
df
```
It's clear that this dataset does not follow tidy rules. This is because information about the stage is housed in the header (i.e. two different stages: stage1 and stage2). To tidy this up, we should have a separate column that indicates the stage and a corresponding column that indicates the observation for each stage.
The first step to correct this is to melt the data. To melt a dataset using Pandas, you must indicate which columns in the current data frame should be kept as columns and which columns should be melted (also called **unpivoted**) to rows. This is indicated using two arguments provided to `pd.melt`:
- `id_vars`: indicates the columns to use as identifier variables. These columns remain as columns in the dataframe after melting.
- `value_vars`: indicates the columns to melt (unpivot). If not specified, then all columns that are not set as `id_vars` are used.
- The column header becomes a value in a new column
- The value within the original column is matched with the header value in an adjacent column.
As an example, let's melt the example dataframe:
```
df2 = pd.melt(df, id_vars=['Group'], value_vars=['Stage1', 'Stage2'])
df2
```
Observe that the new column labels named 'variable' and 'value' do not indicate what the data the colomn contains. We can either set these manually using:
```python
df2.columns = ['Group', 'Stage', 'Level']
```
Or, we can provide the new labels when we melt the data using the `var_name` and `value_name` arguments:
```
df2 = pd.melt(df, id_vars=['Group'], value_vars=['Stage1', 'Stage2'],
var_name='Stage', value_name='Level')
df2
```
#### Task 3a: Melt data, use case #1
<span style="float:right; margin-left:10px; clear:both;">
</span>
Using the `pd.melt` function, melt the demographics data introduced in section 2. Be sure to:
- Set the column headers correctly.
- Order by country
- Print the first 10 lines of the resulting melted dataset.
### 3.2 Use Case #2: multiple variables stored in one column
Sometimes, melting the data is not enough. Consider the demographics example where the sex and the age range are combined into a single column label. In Task 3a we melted that dataset:
<table>
<tr><th>country</th><th>year</th><th>age</th><th>freq</th></tr>
<tr><td>AD</td><td>2000</td><td>m014</td><td>0</td></tr>
<tr><td>AD</td><td>2000</td><td>m5564</td><td>0</td></tr>
<tr><td>AD</td><td>2000</td><td>m3544</td><td>0</td></tr>
<tr><td>AD</td><td>2000</td><td>m65</td><td>0</td></tr>
<tr><td>AD</td><td>2000</td><td>m2534</td><td>1</td></tr>
<tr><td>AD</td><td>2000</td><td>mu</td><td>None</td></tr>
<tr><td>AD</td><td>2000</td><td>m1524</td><td>0</td></tr>
<tr><td>AD</td><td>2000</td><td>f014</td><td>NaN</td></tr>
<tr><td>AD</td><td>2000</td><td>m4554</td><td>0</td></tr>
<tr><td>AE</td><td>2000</td><td>m5564</td><td>12</td></tr>
</table>
We need to split that `age` column into three different columns corresponding to the sex, minimum age and maximum age. To do this, we can use the following line of code:
```Python
temp_df = melted_df["age"].str.extract("(\D)(\d+)(\d{2})")
```
Remember, that Pandas provides a [pandas.Series.str.extract](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.extract.html) function for manipulating the string values of a Series, and each column in a Pandas dataframe is a series. We can use this function to break apart the value into three separate columns.
Observe the argument provided to the `.str.extract` function: `(\D)(\d+)(\d{2})`. This type of string is called a regular expression (RE). We will not cover regular expressions in detail, but they are a powerful method for parsing strings to either match elements of the string or to split them. An [introduction to REs](https://docs.python.org/3.4/howto/regex.html#regex-howto) for Python and [a full syntax description](https://docs.python.org/3.4/library/re.html#regular-expression-syntax) is available online. But here is a short explanation for the elements of the RE above:
+ `(\D)`: Matches any single character which is not a digit. This correspondes to the sex: 'f' or 'm'.
+ `(\d+)`: Matches one or more digits. This correspondes to the minimum age which may be one or more digts.
+ `(\d{2})`: Matches exactly two digits. This requires that the last two digits are the max age.
Let's try it and see how it works:
```
# Melt the demographics dataset and sort by country:
melted_df = pd.melt(demographics, id_vars=["country", "year"],
var_name="age", value_name="freq")
melted_df = melted_df.sort_values(by=["country"])
# Split 'age' column into a new dataframe containing the three components: sex,
# minimum age and maximum age.
temp_df = melted_df["age"].str.extract("(\D)(\d+)(\d{2})")
temp_df.columns = ['sex', 'min_age', 'max_age']
temp_df.head(10)
```
### 3.3 Use Case #3: variables are in both rows and columns
Consider the following dataset which contains the daily weather records for five months in 2010 for the MX17004 weather station in Mexico. Each day of the month has it's own column (e.g. d1, d2, d3, etc.). The example data only provides the first 8 days:
```
data = [['MX17004',2010,1,'tmax',None,None,None,None,None,None,None,None],
['MX17004',2010,1,'tmin',None,None,None,None,None,None,None,None],
['MX17004',2010,2,'tmax',None,27.3,24.1,None,None,None,None,None],
['MX17004',2010,2,'tmin',None,14.4,14.4,None,None,None,None,None],
['MX17004',2010,3,'tmax',None,None,None,None,32.1,None,None,None],
['MX17004',2010,3,'tmin',None,None,None,None,14.2,None,None,None],
['MX17004',2010,4,'tmax',None,None,None,None,None,None,None,None],
['MX17004',2010,4,'tmin',None,None,None,None,None,None,None,None],
['MX17004',2010,5,'tmax',None,None,None,None,None,None,None,None],
['MX17004',2010,5,'tmin',None,None,None,None,None,None,None,None]]
headers = ['id','year','month','element','d1','d2','d3','d4','d5','d6','d7','d8']
weather = pd.DataFrame(data, columns=headers)
weather
```
In this dataset there are two problems. First, we have a violation of use case #1 where observations are stored in the column labels for the days (e.g. d1, d2, d3, etc.). Second, we have a violation of use case #3. Observe that the 'element' column contains values that should be variables. We want the min and max temperatures for each day as columns.
First, let's deal with the first problem by including `id`, `year`, `month` and `element` as `id_vars`. Observe that we will currently not try to tidy the `element` column. We want to remove the 'd' from the day so let's name the column `temp_day`:
```
melted_weather = pd.melt(weather, id_vars=['id', 'year', 'month', 'element'],
var_name='temp_day', value_name='temperature')
melted_weather.head(10)
```
Now, let's create an actual date for the measurement rather than storing year, month and day separately. Let's add a new column to the dataframe named 'day' that uses a regular expression to remove the letter 'd' from the beginning of the day.
```
melted_weather["day"] = melted_weather["temp_day"].str.extract("d(\d+)", expand=False)
melted_weather.head(10)
```
We can now combine the year, month and day to form a proper date using the Pandas `apply` function. Execute the code below and observe the in-line comments for the meaning of each line of code:
```
# Import the datetime library.
import datetime
# Our year, month, and day columns must be numeric. Currently they are
# strings. We can use the Pandas "apply" function to convert these columns.
melted_weather[["year", "month", "day"]] = melted_weather[["year", "month", "day"]].apply(pd.to_numeric)
# Convert temperature to numeric as well
melted_weather[["temperature"]] = melted_weather[["temperature"]].apply(pd.to_numeric)
# We want to use the Python datetime function to cobmine the year, month and day
# into a proper date. In Python this is a datetime object, not a string. So, we
# need to use the apply function, just like above, to convert the dates. We'll
# create a simple little function that we'll use to apply the datetime change.
def create_date(row):
return datetime.datetime(year=row["year"], month=int(row["month"]), day=row["day"])
# Apply the create_date function to each row of our data frame for the "date" column.
melted_weather["date"] = melted_weather.apply(lambda row: create_date(row), axis=1)
# Now take a look!
melted_weather.head(10)
```
Now that we have our date corrected, and properly melted, we can address the second problem: the `element` column containing variable names. To fix this we need to do the opposite of melting and we need to **pivot**. To do this we can use the [pd.pivot](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pivot.html) function. This function takes the following arguments:
- `index`: indicates the columns to use to make the new frame’s index. If None, uses existing index
- `columns`: indicates the column to use whose values will become the new frame’s columns.
- `values`: indicates the columns to use for populating new frame’s values.
Let's use the `pivot_table` function, which is a generalization of the `pivot` function that handles duplicate values or one index/column pair. This will move the `element` column values to be new columns in our data frame. But first, we will also want to drop unwanted columns:
```
# Remove unwanted columns
weather_min = melted_weather.drop(['year', 'month', 'day', 'temp_day'], axis=1)
weather_min.head(10)
# Unpivot and reset indexes. The pivot_table function automatically removes rows with null values.
weather_tidy = weather_min.pivot_table(index=["id","date"], columns="element", values="temperature")
weather_tidy.reset_index(drop=False, inplace=True)
weather_tidy
```
The weather data is now tidy (although rather small).
Observe, that in the code above, we called the function `reset_index` on the Tidy'ed weather data. If we do not do this, then the row indexes are not incremental within the data frame.
#### Task 3b: Practice with a new dataset
<span style="float:right; margin-left:10px; clear:both;">
</span>
Download the [PI_DataSet.txt](https://hivdb.stanford.edu/download/GenoPhenoDatasets/PI_DataSet.txt) file from [HIV Drug Resistance Database](https://hivdb.stanford.edu/pages/genopheno.dataset.html). Store the file in the same directory as the practice notebook for this assignment.
Here is the meaning of data columns:
- SeqID: a numeric identifier for a unique HIV isolate protease sequence. Note: disruption of the protease inhibits HIV’s ability to reproduce.
- The Next 8 columns are identifiers for unique protease inhibitor class drugs.
- The values in these columns are the fold resistance over wild type (the HIV strain susceptible to all drugs).
- Fold change is the ratio of the drug concentration needed to inhibit the isolate.
- The latter columns, with P as a prefix, are the positions of the amino acids in the protease.
- '-' indicates consensus.
- '.' indicates no sequence.
- '#' indicates an insertion.
- '~' indicates a deletion;.
- '*' indicates a stop codon
- a letter indicates one letter Amino Acid substitution.
- two and more amino acid codes indicates a mixture.
Import this dataset into your notebook, view the top few rows of the data and respond to these questions:
- What are the variables?
- What are the observations?
- What are the values?
#### Task 3c: Practice with a new dataset Part 2
<span style="float:right; margin-left:10px; clear:both;">
</span>
Use the data retreived from task 3b, generate a data frame containing a Tidy’ed set of values for drug concentration fold change. BE sure to:
- Set the column names as ‘SeqID’, ‘Drug’ and ‘Fold_change’.
- Order the data frame first by sequence ID and then by Drug name
- Reset the row indexes
- Display the first 10 elements.
| github_jupyter |
```
import os
import pandas as pd
import time
import statsmodels.api as sm
import sklearn.utils as utils
import matplotlib.pyplot as plt
%matplotlib inline
start_time = time.time()
MA_location = {"Greater_Boston_Area" : 0,
"Salem" : 0,
"Plymouth" : 0,
"Waltham" : 0,
"Framingham" : 0,
"Worcester" : 0,
"Lexington" : 0,
"Danvers" : 0,
}
Greater_Boston_Area = {"Boston" : 0,
"Providence" : 0,
"Lowell" : 0,
"Cambridge" : 0,
"Quincy" : 0,
"Newton" : 0
}
#A_cities = ["Boston","Worcester","Salem","Plymouth","Newton","Waltham","Framingham"]
data = pd.DataFrame(columns = ["Company","Title","Location","Rating","Work/Life Balance","Benefit","Security","Culture"])
print(MA_cities)
#path = os.getcwd()+str("/Company")
#print("Path is :",path)
#index = 0
#for file in os.listdir(path):
# f = open(path+"/"+file,'r')
# df = pd.read_csv(f)
# for i in range(len(df.index)):
# if df["Location"][i] in MA_cities:
# data.loc[index] = df.loc[i]
# index = index+1
path = os.getcwd()
print("Path is :", path)
f = open("Merged_Company_Information.csv",'r')
#f = open(path+"/Company/Accion-International.csv",'r')
df = pd.read_csv(f)
index = 0
for i in range(len(df.index)):
if df["Location"][i] in MA_location.keys():
data.loc[index] = df.loc[i]
index = index+1
elif df["Location"][i] in Greater_Boston_Area.keys():
data.loc[index] = df.loc[i]
data.set_value(index,"Location","Greater_Boston_Area")
#ata.loc[index]["Location",copy = False]=str("Greater_Boston_Area")
index = index+1
print("Done!!!-------- %s seconds--------" % (time.time()-start_time))
data
data.to_csv("collected_data.csv")
def findMean(lst):
total = 0
num = 0
for i in lst:
if i!= "none":
total = total + int(i)
num = num + 1
avg = float(total)/float(num)
return avg
#normalize the data:
start_time = time.time()
data_list = []
avg_work = findMean(data["Work/Life Balance"])
avg_benefit = findMean(data["Benefit"])
avg_security = findMean(data["Security"])
avg_culture = findMean(data["Culture"])
for i in range(len(data.index)):
temp = [0,0,0,0,0,0,0]
if data["Title"][i].find("Engineer")!=-1:
temp[0] = 1
elif data["Title"][i].find("Analyst")!=-1:
temp[0] = 2
elif data["Title"][i].find("Consultant")!=-1:
temp[0] = 3
elif data["Title"][i].find("Writer")!=-1:
temp[0] = 4
elif data["Title"][i].find("Account")!=-1:
temp[0] = 5
elif data["Title"][i].find("Marketing")!=-1:
temp[0] = 6
elif data["Title"][i].find("Research")!=-1:
temp[0] = 7
elif data["Title"][i].find("Clinic")!=-1:
temp[0] = 8
elif data["Title"][i].find("Develop")!=-1:
temp[0] = 9
else:
temp[0] = 0
Location = data["Location"][i]
lst = list(MA_location.keys())
for k in range(len(lst)):
if lst[k] == Location:
temp[1] = k+1
temp[2] = data["Rating"][i]
if data["Work/Life Balance"][i]!="none":
temp[3] = data["Work/Life Balance"][i]
else:
temp[3] = avg_work
if data["Benefit"][i]!="none":
temp[4] = data["Benefit"][i]
else:
temp[4] = avg_benefit
if data["Security"][i]!="none":
temp[5] = data["Security"][i]
else:
temp[5] = avg_security
if data["Culture"][i]!="none":
temp[6] = data["Culture"][i]
else:
temp[6] = avg_culture
data_list.append(temp)
print("Done!!!-------- %s seconds--------" % (time.time()-start_time))
df2 = pd.DataFrame(data_list,columns = ["Category","Location","Rating","Work/Life Balance","Benefit","Security","Culture"])
df2
X = df2[["Category","Location","Work/Life Balance","Benefit","Security","Culture"]]
Y = df2["Rating"]
plt.scatter(range(len(Y)), Y, c="slategray", alpha=0.3, linewidths=0.2)
X, Y = utils.shuffle(X, Y, random_state=1)
model = sm.OLS(Y.astype(float), X.astype(float))
result1 = model.fit()
print(result1.summary())
good = []
for i in range(len(df2["Rating"])):
if df2["Rating"][i] >3:
good.append(1)
else:
good.append(0)
cols = ["Category","Location","Work/Life Balance","Benefit","Security","Culture"]
#df2.columns[:2]
logit = sm.Logit(good,df2[cols].astype(float))
#fit the model
result2 = logit.fit()
result2.summary()
#According to the results provided by both
#Linear Regression and Logic Regression,
#Benefit always has a larger influence
#on rating among all attributes.
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import sys
import argparse as argp
change_50_dat = pd.read_csv('/Users/leg2015/workspace/Aagos/Data/Mut_Treat_Change_50_CleanedDataStatFit.csv', index_col="update", float_precision="high")
change_0_dat = pd.read_csv('/Users/leg2015/workspace/Aagos/Data/Mut_Treat_Change_0_CleanedDataStatFit.csv', index_col="update", float_precision="high")
mut_dat = pd.read_csv('/Users/leg2015/workspace/Aagos/Data/Change_Treat_f_.003_CleanedDataStatFit.csv', index_col="update", float_precision="high")
# max_gen_data = all_dat.loc[49000]
# early_gen_data = all_dat.loc[10000]
change_50_max = change_50_dat.loc[50000]
change_0_max = change_0_dat.loc[50000]
mut_max = mut_dat.loc[50000]
change_50_plot = sns.boxplot(y=max_gen_change_data["mean_coding_sites"], x="change", data=max_gen_change_data)
plt.suptitle("gen 49000 mean coding sites ")
plt.savefig("Change_m_.003_c_.01_f_.001_mean_coding_2.pdf")
max_gen_fitness = max_gen_data.loc[:,[ 'max_fitness', 'c', 'm', 'f', 'replicate']]
max_gen_gene_len = max_gen_data.loc[:, ['max_gene_length', 'c', 'm', 'f', 'replicate']]
max_gen_overlap = max_gen_data.loc[:,[ 'max_overlap', 'c', 'm', 'f', 'replicate']]
max_gen_coding = max_gen_data.loc[:,[ 'max_coding_sites', 'c', 'm', 'f', 'replicate']]
max_gen_neutral = max_gen_data.loc[:,[ 'max_neutral_sites', 'c', 'm', 'f', 'replicate']]
max_gen_neighbor = max_gen_data.loc[:,[ 'max_neighbor_genes', 'c', 'm', 'f', 'replicate']]
facet = sns.FacetGrid(max_gen_gene_len, col="c", row="m",)
facet.map_dataframe(sns.boxplot, x="f", y="max_gene_length")
for axis in facet.axes.flat:
axis.set_xlabel("f")
plt.subplots_adjust(top=0.95)
facet.fig.suptitle('Gen 49,000 max gene length') # can also get
facet.savefig("Mut_Rate_Low_max_genlen.pdf")
# TODO: figure early_gen_neighbor how to save pdfs to figure directory
# also way so don't have to boilerplate would be nice
facet = sns.FacetGrid(early_gen_rep_gene_len, col="c", row="m",)
facet.map_dataframe(sns.boxplot, x="f", y="genome_size")
for axis in facet.axes.flat:
axis.set_xlabel("f")
plt.subplots_adjust(top=0.95)
facet.fig.suptitle('Gen 10,000 genome size of representative org') # can also get
facet.savefig("early_gen_rep_gene_len.pdf")
facet = sns.FacetGrid(max_gen_overlap, col="c", row="m",)
facet.map_dataframe(sns.boxplot, x="f", y="mean_Overlap")
for axis in facet.axes.flat:
axis.set_xlabel("f")
plt.subplots_adjust(top=0.95)
facet.fig.suptitle('Gen 49,000 mean overlap') # can also get
facet.savefig("max_gen_mean_overlap.pdf")
facet = sns.FacetGrid(early_gen_overlap, col="c", row="m",)
facet.map_dataframe(sns.boxplot, x="f", y="mean_Overlap")
for axis in facet.axes.flat:
axis.set_xlabel("f")
plt.subplots_adjust(top=0.95)
facet.fig.suptitle('Gen 10,000 mean overlap') # can also get
facet.savefig("early_gen_mean_overlap.pdf")
facet = sns.FacetGrid(early_gen_fitness, col="c", row="m",)
facet.map_dataframe(sns.boxplot, x="f", y="mean_fitness")
for axis in facet.axes.flat:
axis.set_xlabel("f")
plt.subplots_adjust(top=0.95)
facet.fig.suptitle('Gen 10,000 mean fitness') # can also get
facet.savefig("early_gen_mean_fitness.pdf")
facet = sns.FacetGrid(max_gen_fitness, col="c", row="m",)
facet.map_dataframe(sns.boxplot, x="f", y="mean_fitness")
# for plot in plt.subplots():
# print(plot.AxesSubplot)
# print(facet.axes[4,0].get_yticklabels())
# print(facet.axes[4,0].get_xticklabels())
# print("\n")
# labels = ["meme1", "meme2"]
for axis in facet.axes.flat:
# locs, labels = axis.xticks()
# axis.set_yticklabels(labels)
axis.set_xlabel("f")
# _ = plt.setp(axis.get_yticklabels(), visible=True)
# _ = plt.setp(axis.get_xticklabels(), visible=True)
# axis.set_yticklabels(axis.get_yticklabels(), visible=True)
# print(axis.get_xticklabels())
# axis.set_xticklabels(axis.get_xticklabels(), visible=True)
# axis.set_yticklabels(facet.axes[4,0].get_yticklabels(), visible=True)
# axis.set_xticklabels(facet.axes[4,0].get_xticklabels(), visible=True)
plt.subplots_adjust(hspace=0.3)
# yticklabels = facet.axes[4,0].get_yticklabels()
# xticklabels = facet.axes[4,0].get_xticklabels()
# meme = []
# mema = []
# for ax in facet.axes[-1,:]:
# xlabel = ax.get_xticklabels()
# print("x lab ", xlabel)
# meme.append(xlabel)
# for ax in facet.axes[:,0]:
# ylabel = ax.get_xticklabels()
# print("y lab", ylabel)
# mema.append(ylabel)
# for i in range(len(meme)):
# for j in range(len(mema)):
# facet.axes[j,i].set_xticklabels("meme")
# facet.axes[j,i].set_yticklabels("mema")
# for ax in facet.axes:
# _ = plt.setp(ax.get_yticklabels(), visible=True)
# _ = plt.setp(ax.get_xticklabels(), visible=True)
# for ax in facet.axes:
# plt.show()
plt.subplots_adjust(top=0.95)
facet.fig.suptitle('Gen 49,000 mean fitness') # can also get
facet.savefig("max_gen_mean_fitness.pdf")
facet = sns.FacetGrid(max_gen_fitness, col="c", row="m",)
facet.map_dataframe(sns.boxplot, x="f", y="mean_fitness")
# for plot in plt.subplots():
# print(plot.AxesSubplot)
# print(facet.axes[4,0].get_yticklabels())
# print(facet.axes[4,0].get_xticklabels())
# print("\n")
# labels = ["meme1", "meme2"]
for axis in facet.axes.flat:
# locs, labels = axis.xticks()
# axis.set_yticklabels(labels)
axis.set_xlabel("f")
# _ = plt.setp(axis.get_yticklabels(), visible=True)
# _ = plt.setp(axis.get_xticklabels(), visible=True)
# axis.set_yticklabels(axis.get_yticklabels(), visible=True)
# print(axis.get_xticklabels())
# axis.set_xticklabels(axis.get_xticklabels(), visible=True)
# axis.set_yticklabels(facet.axes[4,0].get_yticklabels(), visible=True)
# axis.set_xticklabels(facet.axes[4,0].get_xticklabels(), visible=True)
plt.subplots_adjust(hspace=0.3)
# yticklabels = facet.axes[4,0].get_yticklabels()
# xticklabels = facet.axes[4,0].get_xticklabels()
# meme = []
# mema = []
# for ax in facet.axes[-1,:]:
# xlabel = ax.get_xticklabels()
# print("x lab ", xlabel)
# meme.append(xlabel)
# for ax in facet.axes[:,0]:
# ylabel = ax.get_xticklabels()
# print("y lab", ylabel)
# mema.append(ylabel)
# for i in range(len(meme)):
# for j in range(len(mema)):
# facet.axes[j,i].set_xticklabels("meme")
# facet.axes[j,i].set_yticklabels("mema")
# for ax in facet.axes:
# _ = plt.setp(ax.get_yticklabels(), visible=True)
# _ = plt.setp(ax.get_xticklabels(), visible=True)
# for ax in facet.axes:
plt.show()
facet.savefig("max_gen__mean_fitness.pdf")
for curr in group:
plt.scatter((curr[1].m + curr[1].f + curr[1].c), curr[1].mean_fitness)
plt.show()
playData = max_gen_fitness.iloc[0:5]
playData
memes = max_gen_fitness.iloc[20:25]
memes
plt.boxplot(playData.mean_fitness)
plt.show()
# plt.boxplot(playData.mean_fitness)
# plt.boxplot(memes.mean_fitness)
superData = [playData.mean_fitness, memes.mean_fitness]
plt.boxplot(superData)
plt.show()
```
| github_jupyter |
# TV Script Generation
In this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern).
## Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
```
## Explore the Data
Play around with `view_sentence_range` to view different parts of the data.
```
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
```
## Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
### Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call `vocab_to_int`
- Dictionary to go from the id to word, we'll call `int_to_vocab`
Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
```
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {i: word for (i, word) in enumerate(sorted_vocab)}
vocab_to_int = {word: i for (i, word) in int_to_vocab.items()}
return (vocab_to_int, int_to_vocab)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
```
### Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
```
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
return {
'.' : '||Period||',
',' : '||Comma||',
'"' : '||Quotation_Mark||',
';' : '||Semicolon||',
'!' : '||Exclamation_mark||',
'?' : '||Question_mark||',
'(' : '||Left_Parentheses||',
')' : '||Right_Parentheses||',
'--' : '||Dash||',
'\n' : '||Return||'}
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
```
## Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
```
# Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
```
## Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
### Check the Version of TensorFlow and Access to GPU
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
```
### Input
Implement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple `(Input, Targets, LearningRate)`
```
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
input = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name='targets')
learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')
return (input, targets, learning_rate)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
```
### Build RNN Cell and Initialize
Stack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).
- The Rnn size should be set using `rnn_size`
- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell#zero_state) function
- Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)
Return the cell and initial state in the following tuple `(Cell, InitialState)`
```
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
layers_count = 2
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
multi = tf.contrib.rnn.MultiRNNCell([cell] * layers_count)
initial_state = tf.identity(
input=multi.zero_state(batch_size, tf.float32), name='initial_state')
return (multi, initial_state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
```
### Word Embedding
Apply embedding to `input_data` using TensorFlow. Return the embedded sequence.
```
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(
initial_value=tf.random_uniform(
dtype=tf.float32,
shape=(vocab_size, embed_dim),
minval=-1.0,
maxval=1.0))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
```
### Build RNN
You created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.
- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)
- Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)
Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
```
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, final_state = tf.nn.dynamic_rnn(
cell=cell, inputs=inputs, dtype=tf.float32)
final_state = tf.identity(input=final_state, name='final_state')
return (outputs, final_state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
```
### Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.
- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.
- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
```
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embed = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=vocab_size,
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev= 0.1),
biases_initializer=tf.zeros_initializer())
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
```
### Batches
Implement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:
- The first element is a single batch of **input** with the shape `[batch size, sequence length]`
- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`
If you can't fill the last batch with enough data, drop the last batch.
For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2], [ 7 8], [13 14]]
# Batch of targets
[[ 2 3], [ 8 9], [14 15]]
]
# Second Batch
[
# Batch of Input
[[ 3 4], [ 9 10], [15 16]]
# Batch of targets
[[ 4 5], [10 11], [16 17]]
]
# Third Batch
[
# Batch of Input
[[ 5 6], [11 12], [17 18]]
# Batch of targets
[[ 6 7], [12 13], [18 1]]
]
]
```
Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
```
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
number_of_batches = int(len(int_text) / (batch_size * seq_length))
input_batch = np.array(int_text[: number_of_batches * batch_size * seq_length])
target_batch = np.array(int_text[1: number_of_batches * batch_size * seq_length + 1])
target_batch[-1] = int_text[0]
input_batch = np.split(input_batch.reshape(batch_size, -1), number_of_batches, axis = 1)
target_batch = np.split(target_batch.reshape(batch_size, -1), number_of_batches, axis = 1)
return np.array(list(zip(input_batch, target_batch)))
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
```
## Neural Network Training
### Hyperparameters
Tune the following parameters:
- Set `num_epochs` to the number of epochs.
- Set `batch_size` to the batch size.
- Set `rnn_size` to the size of the RNNs.
- Set `embed_dim` to the size of the embedding.
- Set `seq_length` to the length of sequence.
- Set `learning_rate` to the learning rate.
- Set `show_every_n_batches` to the number of batches the neural network should print progress.
```
# Number of Epochs
num_epochs = 256
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 128
# Sequence Length
seq_length = 16
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 128
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
```
### Build the Graph
Build the graph using the neural network you implemented.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
```
## Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
```
## Save Parameters
Save `seq_length` and `save_dir` for generating a new TV script.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
```
# Checkpoint
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
```
## Implement Generate Functions
### Get Tensors
Get tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
```
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
input_tensor = loaded_graph.get_tensor_by_name('input:0')
initial_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0')
final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0')
probs_tensor = loaded_graph.get_tensor_by_name('probs:0')
return (input_tensor, initial_state_tensor, final_state_tensor, probs_tensor)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
```
### Choose Word
Implement the `pick_word()` function to select the next word using `probabilities`.
```
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
return int_to_vocab[np.argmax(probabilities)]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
```
## Generate TV Script
This will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
```
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
```
# The TV Script is Nonsensical
It's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.
# Submitting This Project
When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
| github_jupyter |
## Roadsigns Data Collection
# Installing Selenium
```
pip install selenium
```
### Starting the web driver
```
import selenium
from selenium import webdriver
# Put the path for your ChromeDriver here
DRIVER_PATH = 'D:\Vedanth\proxvision\chromedriver'
wd = webdriver.Chrome(executable_path=DRIVER_PATH)
wd.get('https://google.com')
```
When you run the above two cells a window with google.com should open
Search for Dogs
```
search_box = wd.find_element_by_css_selector('input.gLFyf')
search_box.send_keys('Dogs')
wd.quit()
#Close the driver
```
The function fetch_image_urls expects three input parameters:<br>
query : Search term, like Dog<br>
max_links_to_fetch : Number of links the scraper is supposed to collect<br>
webdriver : instantiated Webdriver
```
def fetch_image_urls(query:str, max_links_to_fetch:int, wd:webdriver, sleep_between_interactions:int=1):
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(sleep_between_interactions)
# build the google query
search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img"
# load the page
wd.get(search_url.format(q=query))
image_urls = set()
image_count = 0
results_start = 0
while image_count < max_links_to_fetch:
scroll_to_end(wd)
# get all image thumbnail results
thumbnail_results = wd.find_elements_by_css_selector("img.Q4LuWd")
number_results = len(thumbnail_results)
print(f"Found: {number_results} search results. Extracting links from {results_start}:{number_results}")
for img in thumbnail_results[results_start:number_results]:
# try to click every thumbnail such that we can get the real image behind it
try:
img.click()
time.sleep(sleep_between_interactions)
except Exception:
continue
# extract image urls
actual_images = wd.find_elements_by_css_selector('img.n3VNCb')
for actual_image in actual_images:
if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):
image_urls.add(actual_image.get_attribute('src'))
image_count = len(image_urls)
if len(image_urls) >= max_links_to_fetch:
print(f"Found: {len(image_urls)} image links, done!")
break
else:
print("Found:", len(image_urls), "image links, looking for more ...")
time.sleep(30)
return
load_more_button = wd.find_element_by_css_selector(".mye4qd")
if load_more_button:
wd.execute_script("document.querySelector('.mye4qd').click();")
# move the result startpoint further down
results_start = len(thumbnail_results)
return image_urls
```
### Downloading images with Pillow
```
pip install Pillow
```
The persist_image function grabs an image URL url and downloads it into the folder_path. The function will assign the image a random 10-digit id.
```
def persist_image(folder_path:str,url:str):
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - Could not download {url} - {e}")
try:
image_file = io.BytesIO(image_content)
image = Image.open(image_file).convert('RGB')
file_path = os.path.join(folder_path,hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')
with open(file_path, 'wb') as f:
image.save(f, "JPEG", quality=85)
print(f"SUCCESS - saved {url} - as {file_path}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
```
Now let's combine above two functions and make a single short and concise function
```
import os
def search_and_download(search_term:str,driver_path:str,target_path='./images',number_images=5):
target_folder = os.path.join(target_path,'_'.join(search_term.lower().split(' ')))
if not os.path.exists(target_folder):
os.makedirs(target_folder)
with webdriver.Chrome(executable_path=driver_path) as wd:
res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5)
for elem in res:
persist_image(target_folder,elem)
```
## Entering out search terms and downloading the images
```
search_term = "dogs"
search_and_download(
search_term = search_term,
driver_path = DRIVER_PATH
)
```
| github_jupyter |
```
# -*- coding: utf-7 -*-
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df_train = pd.read_csv("../data/titanic_train.csv")
df_test = pd.read_csv("../data/titanic_test.csv")
df_train.head(5)
print(df_train.info())
print("-----------------")
print(df_test.info())
# 데이터셋에서 name, ticket, bodt, cabin, home.dest 피처를 제거합니다.
df_train = df_train.drop(['name', 'ticket', 'body', 'cabin', 'home.dest'], axis=1)
df_test = df_test.drop(['name', 'ticket', 'body', 'cabin', 'home.dest'], axis=1)
print(df_train['survived'].value_counts())
df_train['survived'].value_counts().plot.bar()
# survived 피처를 기준으로 그룹을 나누어 그룹별 pclass 피처의 분포를 살펴봅니다.
print(df_train['pclass'].value_counts())
# countplot() : 카테고리별(x) 데이터의 양 확인
ax = sns.countplot(x ='pclass', hue = 'survived', data = df_train)
from scipy import stats
# 두 집단을 피처를 비교해주며 탐색작업을 자동화하는 함수를 정의합니다.
def valid_features(df, col_name, distribution_check=True):
# 두 집단 (survived=1, survivied=0)의 분포 그래프를 출력합니다.
# FacetGrid(data, row, col, hue) : 다중 플롯 그리드를 만들어서 여러가지 쌍 관계를 표현하기 위한 그리드 Class이다. 도화지에 축을 나누는것과 같다.
g = sns.FacetGrid(df, col='survived')
g.map(plt.hist, col_name, bins=30)
# 두 집단 (survived=1, survived=0)의 표준편차를 각각 출력합니다.
titanic_survived = df[df['survived']==1]
titanic_survived_static = np.array(titanic_survived[col_name])
# Numpy.std() 함수는 지정된 축을 따라 주어진 배열의 표준 편차를 계산합니다.
print("data std is" '%.2f' % np.std(titanic_survived_static))
titanic_n_survived = df[df['survived']==0]
titanic_n_survived_static = np.array(titanic_n_survived[col_name])
# Numpy.std() 함수는 지정된 축을 따라 주어진 배열의 표준 편차를 계산합니다.
print("data std is" '%.2f' % np.std(titanic_n_survived_static))
# T-test로 두 집단의 평균 차이를 검정합니다
tTestResult = stats.ttest_ind(titanic_survived[col_name], titanic_n_survived[col_name])
# equal_var는 등분산 여부를 표시해서 넣어주는 것인데 True와 False중에서 선택해서 표시해주면된다.
tTestResultDiffVar = stats.ttest_ind(titanic_survived[col_name], titanic_n_survived[col_name], equal_var=False)
print("The t-statistic and p-value assuming equal variances is %.3f and %.3f" % tTestResult)
print("The t-statistic and p-value not assuming equal variances is %.3f and %.3f" % tTestResultDiffVar)
if distribution_check:
# Shapiro-Wilk 검정 : 분포의 정규성 정도를 검증합니다.
print("The w-statistic and p-value in Survived %.3f and %.3f" %stats.shapiro(titanic_survived[col_name]))
print("The w-statistic and p-value in Non-Survived %.3f and %.3f" %stats.shapiro(titanic_n_survived[col_name]))
# 앞서 정의한 vaild_feautures 함수를 실행합니다. age 피처와 sibso 피처를 탐색합니다.
valid_features(df_train[df_train['age'] > 0], 'age', distribution_check=True)
valid_features(df_train, 'sibsp', distribution_check=False)
# 로지스틱 회귀 모델을 사용하기 위해서는 회귀분석을 수행할 때와 동일한 방법으로 데이터를 가공해야한다.
# age의 결측값을 평균값으로 대체합니다.
replace_mean = df_train[df_train['age'] > 0]['age'].mean()
# fillna() : 결측값을 특정 값으로 채운다
df_train['age'] = df_train['age'].fillna(replace_mean)
df_test['age'] = df_test['age'].fillna(replace_mean)
# embark: 2개의 결측값을 최빈값으로 대체합니다.
embarked_mode = df_train['embarked'].value_counts().index[0]
df_train['embarked'] = df_train['embarked'].fillna(embarked_mode)
df_test['embarked'] = df_test['embarked'].fillna(embarked_mode)
# 원-핫 인코딩을 위한 통합 데이터 프레임(whole_df)을 생성합니다.
whole_df = df_train.append(df_test)
train_idx_num = len(df_train)
# pandas 패키지를 이용한 원-핫 인코딩을 수행합니다.
whole_df_encoded = pd.get_dummies(whole_df)
df_train = whole_df_encoded[:train_idx_num]
df_test = whole_df_encoded[train_idx_num:]
df_train.head()
# 데이터를 학습 데이터셋, 테스트 데이터셋으로 분리합니다.
x_train, y_train = df_train.loc[:, df_train.columns != 'survived'].values, df_train['survived'].values
x_test, y_test = df_test.loc[:, df_test.columns != 'survived'].values, df_test['survived'].values
# 로지스틱 회귀 모델을 사용
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# 로지스틱 회귀 모델을 학습합니다.
# max_iter=100 이 부족하여 오류 발생, 고칠 필요가 있음
lr = LogisticRegression(random_state=0,max_iter=500)
lr.fit(x_train, y_train)
# 학습한 모델의 테스트 데이터셋에 대한 예측 결과를 반환합니다.
y_pred = lr.predict(x_test)
# predict_proba의 출력은 각 클래스에 대한 확률
y_pred_probability = lr.predict_proba(x_test)[:,1]
# 테스트 데이터셋에 대한 정확도, 정밀도, 특이도, f1 평가 지표를 각자 출력합니다.
print("accuracy: %.2f" % accuracy_score(y_test, y_pred))
print("Precision : %.3f" % precision_score(y_test, y_pred))
print("Recall : %.3f" % f1_score(y_test, y_pred))
print("F1 : %.3f" % f1_score(y_test, y_pred))
from sklearn.metrics import confusion_matrix
# Confusion Matrix를 출력합니다.
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
from sklearn.metrics import roc_curve, roc_auc_score
# AUC(Area Under the Curve)를 계산하여 출력합니다.
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_probability)
roc_auc = roc_auc_score(y_test, y_pred_probability)
print("AUC : %.3f" % roc_auc)
# ROC curve를 그래프로 출력합니다.
plt.rcParams['figure.figsize'] = [5, 4]
plt.plot(false_positive_rate, true_positive_rate, label='ROC curve (area = %0.3f)' % roc_auc, color='red', linewidth=4.0)
plt.plot([0,1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve of Logistic regression')
plt.legend(loc="lower right")
from sklearn.tree import DecisionTreeClassifier
# 의사결정나무를 학습하고, 학습한 모델로 테스트 데이터셋에 대한 예측값을 반환합니다.
dtc = DecisionTreeClassifier()
dtc.fit(x_train, y_train)
y_pred = dtc.predict(x_test)
y_pred_probability = dtc.predict_proba(x_test)[:,1]
# 학습한 모델의 성능을 계산하여 출력합니다.
print("accuracy: %.2f" % accuracy_score(y_test, y_pred))
print("Precision : %.3f" % precision_score(y_test, y_pred))
print("Recall : %.3f" % recall_score(y_test, y_pred))
print("F1 : %.3f" % f1_score(y_test, y_pred))
# 학습한 모델의 AUC를 계산하여 출력합니다.
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_probability)
roc_auc = roc_auc_score(y_test, y_pred_probability)
print("AUC : %.3f" % roc_auc)
# ROC curve를 그래프로 출력합니다.
plt.rcParams['figure.figsize'] = [5, 4]
plt.plot(false_positive_rate, true_positive_rate, label='ROC curve (area = %0.3f)' % roc_auc,
color='red', linewidth=4.0)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve of Logistic regression')
plt.legend(loc="lower right")
# 데이터를 다시 불러옵니다.
df_train = pd.read_csv("../data/titanic_train.csv")
df_test = pd.read_csv("../data/titanic_test.csv")
df_train = df_train.drop(['ticket', 'body', 'home.dest'], axis=1)
df_test = df_test.drop(['ticket', 'body', 'home.dest'], axis=1)
# age의 결측값을 평균값으로 대체합니다.
replace_mean = df_train[df_train['age'] > 0]['age'].mean()
df_train['age'] = df_train['age'].fillna(replace_mean)
df_test['age'] = df_test['age'].fillna(replace_mean)
#embark : 2개의 결속값을 최반값으로 대체합니다.
embarked_mode = df_train['embarked'].value_counts().index[0]
df_train['embarked'] = df_train['embarked'].fillna(embarked_mode)
df_test['embarked'] = df_test['embarked'].fillna(embarked_mode)
# 원-핫 인코딩을 위한 통합 데이터 프레임(whole_df)을 생성합니다.
whole_df = df_train.append(df_test)
train_idx_num = len(df_train)
print(whole_df['cabin'].value_counts()[:10])
# 결측 데이터의 경우는 'X'로 대체합니다.
whole_df['cabin'] = whole_df['cabin'].fillna('X')
# cabin 피처의 첫 번째 알파벳을 추출합니다.
whole_df['cabin'] = whole_df['cabin'].apply(lambda x: x[0])
# 추출한 알파벳 중, G와 T는 수가 너무 작기 때문에 마찬가지로 'X'로 대체합니다.
whole_df['cabin'] = whole_df['cabin'].replace({"G":"X", "T":"X"})
ax = sns.countplot(x='cabin', hue = 'survived', data = whole_df)
plt.show()
# 이름에서 호칭을 추출합니다.
name_grade = whole_df['name'].apply(lambda x : x.split(", ",1)[1].split(".")[0])
name_grade = name_grade.unique().tolist()
print(name_grade)
# 호칭에 따라 사회적 지위(1910년대 기준)를 정의합니다.
grade_dict = {'A': ['Rev', 'Col', 'Major', 'Dr', 'Capt', 'Sir'], # 명예직을 나타냅니다.
'B': ['Ms', 'Mme', 'Mrs', 'Dona'], # 여성을 나타냅니다.
'C': ['Jonkheer', 'the Countess'], # 귀족이나 작위를 나타냅니다.
'D': ['Mr', 'Don'], # 남성을 나타냅니다.
'E': ['Master'], # 젊은남성을 나타냅니다.
'F': ['Miss', 'Mlle', 'Lady']} # 젊은 여성을 나타냅니다.
# 정의한 호칭의 기준에 따라 A~F의 문자로 name 피처를 다시 정의하는 함수입니다.
def give_grade(x):
# split 함수는 a.split()처럼 괄호 안에 아무 값도 넣어 주지 않으면 공백(스페이스, 탭, 엔터 등)을 기준으로 문자열을 나누어 준다.
# 만약 b.split(':')처럼 괄호 안에 특정 값이 있을 경우에는 괄호 안의 값을 구분자로 해서 문자열을 나누어 준다.
grade = x.split(", ", 1)[1].split(".")[0]
for key, value in grade_dict.items():
for title in value:
if grade == title:
return key
return 'G'
# 위의 함수를 적용하여 name 피처를 새롭게 정의합니다.
whole_df['name'] = whole_df['name'].apply(lambda x: give_grade(x))
print(whole_df['name'].value_counts())
# pandas 패키지를 이용한 원-핫 인코딩을 수행합니다.
whole_df_encoded = pd.get_dummies(whole_df)
df_train = whole_df_encoded[:train_idx_num]
df_test = whole_df_encoded[train_idx_num:]
df_train.head()
# 데이터를 학습 데이터셋, 테스트 데이터셋으로 분리합니다.
x_train, y_train = df_train.loc[:, df_train.columns != 'survived'].values, df_train['survived'].values
x_test, y_test = df_test.loc[:, df_test.columns != 'survived'].values, df_test['survived'].values
# 로지스틱 회귀 모델을 학습합니다.
lr = LogisticRegression(random_state=0)
lr.fit(x_train, y_train)
# 학습한 모델의 테스트 데이터셋에 대한 예측 결과를 반환합니다.
y_pred = lr.predict(x_test)
y_pred_probability = lr.predict_proba(x_test)[:,1]
# 테스트 데이터셋에 대한 accuracy, precision, recall, f1 평가 지표를 각각 출력합니다.
print("accuracy: %.2f" % accuracy_score(y_test, y_pred))
print("Precision : %.3f" % precision_score(y_test, y_pred))
print("Recall : %.3f" % recall_score(y_test, y_pred))
print("F1 : %.3f" % f1_score(y_test, y_pred)) # AUC (Area Under the Curve) & ROC curve
# AUC (Area Under the Curve)를 계산하여 출력합니다.
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_probability)
roc_auc = roc_auc_score(y_test, y_pred_probability)
print("AUC : %.3f" % roc_auc)
# ROC curve를 그래프로 출력합니다.
plt.rcParams['figure.figsize'] = [5, 4]
plt.plot(false_positive_rate, true_positive_rate, label='ROC curve (area = %0.3f)' % roc_auc,
color='red', linewidth=4.0)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve of Logistic regression')
plt.legend(loc="lower right")
# 예측 대상인 survived 피처를 제외한 모든 피처를 리스트로 반환합니다. (그래프의 y축)
cols = df_train.columns.to_list()
cols.remove('survived')
y_pos = np.arange(len(cols))
# 각 피처별 회귀 분석 계수를 그래프의 x축으로 하여 피처 영향력 그래프를 출력합니다.
plt.rcParams['figure.figsize'] = [5, 4]
fig, ax = plt.subplots()
ax.barh(y_pos, lr.coef_[0], align='center', color='green', ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(cols)
ax.invert_yaxis()
ax.set_xlabel('Coef')
ax.set_title("Each Feature's Coef")
plt.show()
from sklearn.model_selection import KFold
# K-fold 교차 검증의 k를 5로 설정합니다.
k = 5
cv = KFold(k, shuffle=True, random_state=0)
auc_history = []
# K-fold를 5번의 분할 학습으로 반복합니다.
# 파이썬 enumerate 는 순서가 있는 자료형의 index번호 와 index값 을 반환하는 함수다.
for i, (train_data_row, test_data_row) in enumerate(cv.split(whole_df_encoded)):
# 5개로 분할된 fold 중 4개를 학습 데이터셋, 1개를 테스트 데이터셋으로 지정합니다.
# 매 반복시마다 테스트 데이터셋은 변경됩니다.
# iloc 행 번호를 기준으로 행 데이터 읽기
df_train = whole_df_encoded.iloc[train_data_row]
df_test = whole_df_encoded.iloc[test_data_row]
# survived 피처를 y, 나머지 피처들을 x 데이터로 지정합니다.
# loc 인덱스 기준으로 행 데이터 읽기
splited_x_train, splited_y_train = df_train.loc[:, df_train.columns != 'survived'].values, df_train['survived'].values
splited_x_test, splited_y_test = df_test.loc[:, df_test.columns != 'survived'].values, df_test['survived'].values
# 주어진 데이터로 로지스틱 회귀 모델을 학습합니다.
lr = LogisticRegression(random_state=0)
lr.fit(splited_x_train, splited_y_train)
y_pred = lr.predict(splited_x_test)
# predict_proba의 출력은 각 클래스에 대한 확률
y_pred_probability = lr.predict_proba(splited_x_test)[:,1]
# 테스트 데이터셋의 AUC를 계산하여 auc_history에 저장합니다.
false_positive_rate, true_positive_rate, thresholds = roc_curve(splited_y_test, y_pred_probability)
roc_auc = roc_auc_score(splited_y_test, y_pred_probability)
auc_history.append(roc_auc)
# auc_history에 저장된 다섯 번의 학습 결과(AUC)를 그래프로 출력합니다.
plt.xlabel("Each K-fold")
plt.ylabel("AUC of splited test data")
plt.plot(range(1, k+1), auc_history) # baseline
!pip install scikit-plot
import scikitplot as skplt
skplt.estimators.plot_learning_curve(lr, x_train, y_train)
plt.show()
```
| github_jupyter |
# Noise2Void - 2D Example for SEM data
```
# We import all our dependencies.
from n2v.models import N2VConfig, N2V
import numpy as np
from csbdeep.utils import plot_history
from n2v.utils.n2v_utils import manipulate_val_data
from n2v.internals.N2V_DataGenerator import N2V_DataGenerator
from matplotlib import pyplot as plt
import urllib
import os
import zipfile
```
# Download Example Data
Data by Reza Shahidi and Gaspar Jekely, Living Systems Institute, Exeter<br>
Thanks!
# Training Data Preparation
For training we load __one__ set of low-SNR images and use the <code>N2V_DataGenerator</code> to extract training <code>X</code> and validation <code>X_val</code> patches.
```
# We create our DataGenerator-object.
# It will help us load data and extract patches for training and validation.
datagen = N2V_DataGenerator()
# We load all the '.tif' files from the 'data' directory.
# If you want to load other types of files see the RGB example.
# The function will return a list of images (numpy arrays).
imgs = datagen.load_imgs_from_directory(directory = "C:/Users/ccx55/OneDrive/Documents/GitHub/Phd/Single-nanoparticle-catalysis/CO_OX_TEM/Data/200420/all_data/")
# Let's look at the shape of the images.
print(imgs[0].shape,imgs[1].shape)
# The function automatically added two extra dimensions to the images:
# One at the beginning, is used to hold a potential stack of images such as a movie.
# One at the end, represents channels.
# Lets' look at the images.
# We have to remove the added extra dimensions to display them as 2D images.
plt.imshow(imgs[0][0,...,0], cmap='magma')
plt.show()
plt.imshow(imgs[1][0,...,0], cmap='magma')
plt.show()
# We will use the first image to extract training patches and store them in 'X'
patch_shape = (96,96)
X = datagen.generate_patches_from_list(imgs[:1], shape=patch_shape)
# We will use the second image to extract validation patches.
X_val = datagen.generate_patches_from_list(imgs[1:], shape=patch_shape)
# Patches are created so they do not overlap.
# (Note: this is not the case if you specify a number of patches. See the docstring for details!)
# Non-overlapping patches would also allow us to split them into a training and validation set
# per image. This might be an interesting alternative to the split we performed above.
# Just in case you don't know how to access the docstring of a method:
datagen.generate_patches_from_list?
# Let's look at one of our training and validation patches.
plt.figure(figsize=(14,7))
plt.subplot(1,2,1)
plt.imshow(X[0,...,0], cmap='magma')
plt.title('Training Patch');
plt.subplot(1,2,2)
plt.imshow(X_val[0,...,0], cmap='magma')
plt.title('Validation Patch');
```
# Configure
Noise2Void comes with a special config-object, where we store network-architecture and training specific parameters. See the docstring of the <code>N2VConfig</code> constructor for a description of all parameters.
When creating the config-object, we provide the training data <code>X</code>. From <code>X</code> we extract <code>mean</code> and <code>std</code> that will be used to normalize all data before it is processed by the network. We also extract the dimensionality and number of channels from <code>X</code>.
Compared to supervised training (i.e. traditional CARE), we recommend to use N2V with an increased <code>train_batch_size</code> and <code>batch_norm</code>.
To keep the network from learning the identity we have to manipulate the input pixels during training. For this we have the parameter <code>n2v_manipulator</code> with default value <code>'uniform_withCP'</code>. Most pixel manipulators will compute the replacement value based on a neighborhood. With <code>n2v_neighborhood_radius</code> we can control its size.
Other pixel manipulators:
* normal_withoutCP: samples the neighborhood according to a normal gaussian distribution, but without the center pixel
* normal_additive: adds a random number to the original pixel value. The random number is sampled from a gaussian distribution with zero-mean and sigma = <code>n2v_neighborhood_radius</code>
* normal_fitted: uses a random value from a gaussian normal distribution with mean equal to the mean of the neighborhood and standard deviation equal to the standard deviation of the neighborhood.
* identity: performs no pixel manipulation
For faster training multiple pixels per input patch can be manipulated. In our experiments we manipulated about 0.198% of the input pixels per patch. For a patch size of 64 by 64 pixels this corresponds to about 8 pixels. This fraction can be tuned via <code>n2v_perc_pix</code>.
For Noise2Void training it is possible to pass arbitrarily large patches to the training method. From these patches random subpatches of size <code>n2v_patch_shape</code> are extracted during training. Default patch shape is set to (64, 64).
In the past we experienced bleedthrough artifacts between channels if training was terminated to early. To counter bleedthrough we added the `single_net_per_channel` option, which is turned on by default. In the back a single U-Net for each channel is created and trained independently, thereby removing the possiblity of bleedthrough. <br/>
__Note:__ Essentially the network gets multiplied by the number of channels, which increases the memory requirements. If your GPU gets too small, you can always split the channels manually and train a network for each channel one after another.
<font color='red'>Warning:</font> to make this example notebook execute faster, we have set <code>train_epochs</code> to only 10. <br>For better results we suggest 100 to 200 <code>train_epochs</code>.
```
# train_steps_per_epoch is set to (number of training patches)/(batch size), like this each training patch
# is shown once per epoch.
config = N2VConfig(X, unet_kern_size=3,
train_steps_per_epoch=int(X.shape[0]/128), train_epochs=10, train_loss='mse', batch_norm=True,
train_batch_size=128, n2v_perc_pix=0.198, n2v_patch_shape=(64, 64),
n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5)
# Let's look at the parameters stored in the config-object.
vars(config)
# a name used to identify the model
model_name = 'n2v_2D'
# the base directory in which our model will live
basedir = 'models'
# We are now creating our network model.
model = N2V(config, model_name, basedir=basedir)
```
# Training
Training the model will likely take some time. We recommend to monitor the progress with TensorBoard, which allows you to inspect the losses during training. Furthermore, you can look at the predictions for some of the validation images, which can be helpful to recognize problems early on.
You can start TensorBoard in a terminal from the current working directory with tensorboard --logdir=. Then connect to http://localhost:6006/ with your browser.
```
# We are ready to start training now.
history = model.train(X, X_val)
```
### After training, lets plot training and validation loss.
```
print(sorted(list(history.history.keys())))
plt.figure(figsize=(16,5))
plot_history(history,['loss','val_loss']);
```
## Export Model in BioImage ModelZoo Format
See https://imagej.net/N2V#Prediction for details.
```
model.export_TF(name='Noise2Void - 2D SEM Example',
description='This is the 2D Noise2Void example trained on SEM data in python.',
authors=["Tim-Oliver Buchholz", "Alexander Krull", "Florian Jug"],
test_img=X_val[0,...,0], axes='YX',
patch_shape=patch_shape)
```
| github_jupyter |
# Deploy model
**Important**: Change the kernel to *PROJECT_NAME local*. You can do this from the *Kernel* menu under *Change kernel*. You cannot deploy the model using the *PROJECT_NAME docker* kernel.
```
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
import pandas as pd
import numpy as np
import imp
import pickle
import os
import sys
import json
from azureml.logging import get_azureml_logger
run_logger = get_azureml_logger()
run_logger.log('amlrealworld.timeseries.deploy-model','true')
```
Enter the name of the model to deploy.
```
model_name = "linear_regression"
```
Load the test dataset and retain just one row. This record will be used to create and input schema for the web service. It will also allow us to simulate invoking the web service with features for one hour period and generating a demand forecast for this hour.
```
aml_dir = os.environ['AZUREML_NATIVE_SHARE_DIRECTORY']
test_df = pd.read_csv(os.path.join(aml_dir, 'nyc_demand_test.csv'), parse_dates=['timeStamp'])
test_df = test_df.drop(['demand', 'timeStamp'], axis=1).copy().iloc[[0]]
test_df
```
Load model from disk and transfer it to the working directory.
```
with open(os.path.join(aml_dir, model_name + '.pkl'), 'rb') as f:
mod = pickle.load(f)
with open('model_deploy.pkl', 'wb') as f:
pickle.dump(mod, f)
```
Check model object has loaded as expected.
```
mod
```
Apply model to predict test record
```
np.asscalar(mod.predict(test_df))
```
### Author a realtime web service
Create a score.py script which implements the scoring function to run inside the web service. Change model_name variable as required.
```
%%writefile score.py
# The init and run functions will load and score the input using the saved model.
# The score.py file will be included in the web service deployment package.
def init():
import pickle
import os
global model
with open('model_deploy.pkl', 'rb') as f:
model = pickle.load(f)
def run(input_df):
input_df = input_df[['precip', 'temp', 'hour', 'month', 'dayofweek',
'temp_lag1', 'temp_lag2', 'temp_lag3', 'temp_lag4', 'temp_lag5',
'temp_lag6', 'demand_lag1', 'demand_lag2', 'demand_lag3',
'demand_lag4', 'demand_lag5', 'demand_lag6']]
try:
if (input_df.shape != (1,17)):
return 'Bad imput: Expecting dataframe of shape (1,17)'
else:
pred = model.predict(input_df)
return int(pred)
except Exception as e:
return(str(e))
```
This script will be written to your current working directory:
```
os.getcwd()
```
#### Test the *init* and *run* functions
```
import score
imp.reload(score)
score.init()
score.run(test_df)
```
#### Create web service schema
The web service schema provides details on the required structure of the input data as well as the data types of each column.
```
inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, test_df)}
generate_schema(run_func=score.run, inputs=inputs, filepath='service_schema.json')
```
#### Deploy the web service
The command below deploys a web service names "demandforecast", with input schema defined by "service_schema.json". The web service runs "score.py" which scores the input data using the model "model_deploy.pkl". This may take a few minutes.
```
!az ml service create realtime -f score.py -m model_deploy.pkl -s service_schema.json -n demandforecast -r python
```
Check web service is running.
```
!az ml service show realtime -i demandforecast
```
Test the web service is working by invoking it with a test record.
```
!az ml service run realtime -i demandforecast -d "{\"input_df\": [{\"hour\": 0, \"month\": 6, \"demand_lag3\": 7576.558, \"temp_lag5\": 77.36, \"temp\": 74.63, \"demand_lag1\": 6912.7, \"demand_lag5\": 7788.292, \"temp_lag6\": 80.92, \"temp_lag3\": 76.72, \"demand_lag6\": 8102.142, \"temp_lag4\": 75.85, \"precip\": 0.0, \"temp_lag2\": 75.72, \"demand_lag2\": 7332.625, \"temp_lag1\": 75.1, \"demand_lag4\": 7603.008, \"dayofweek\": 4}]}"
```
#### Delete the web service
```
!az ml service delete realtime --id=demandforecast
```
| github_jupyter |
# Ejercicios Graphs, Paths & Components
Ejercicios básicos de Grafos.
## Ejercicio - Número de Nodos y Enlaces
_ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _
Cuente el número de nodos y enlaces con los siguientes links (asumiendo que el grafo puede ser dirigido Y no dirigido):
```
edges = set([(1, 2), (3, 1), (3, 2), (2, 4)])
edges = set([(1, 2), (3, 1), (3, 2), (2, 4)])
edges_list = [i[0] for i in edges] + [i[1] for i in edges]
nodes = set(edges_list)
edges_number = len(edges)
nodes_number = len(nodes)
print "Número de nodos: " + str(nodes_number)
print "Número de enlaces: " + str(edges_number)
"""Now using NetorkX"""
import networkx as nx
G = nx.Graph()
G.add_edges_from(edges)
print "Número de nodos: " + str(G.number_of_nodes())
print "Número de aristas: " + str(G.number_of_edges())
```
## Ejercicio - Matriz de Adyacencia
_ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _
Cree la matriz de adyacencia del grafo del ejercicio anterior (para dirigido y no-dirigido)
```
"""Código propio"""
import numpy as np
edges = set([(1,2), (3, 1), (3, 2), (2, 4)])
def adj_matrix_dgraph(edges):
edges_list = [i[0] for i in edges] + [i[1] for i in edges]
nodes = set(edges_list)
"""create matrix"""
matrix = np.zeros((len(nodes),len(nodes)))
for edge in edges:
matrix[edge[0] - 1,edge[1] -1] = 1
return matrix
def adj_matrix(edges):
edges_list = [i[0] for i in edges] + [i[1] for i in edges]
nodes = set(edges_list)
"""create matrix"""
matrix = np.zeros((len(nodes),len(nodes)))
for edge in edges:
i = edge[0]-1
j = edge[1]-1
matrix[i,j] = 1
matrix[j,i] = 1
return matrix
print "matriz para grafo dirigido:\n" + str(adj_matrix_dgraph(edges))
print "\n"
print "matriz para grafo no dirigido:\n" + str(adj_matrix(edges))
"""Solución con NetworkX"""
import networkx as nx
G = nx.Graph()
G.add_edges_from(edges)
matrix = nx.adjacency_matrix(G)
print matrix
DG = nx.DiGraph()
DG.add_edges_from(edges)
print "\n"
print (nx.adjacency_matrix(DG))
```
D## Ejercicio - Sparseness
Calcule la proporción entre número de links existentes en 3 redes reales (http://snap.stanford.edu/data/index.html) contra el número de links posibles.
```
import numpy as np
""" The entered datasets correspond to non-directed graphs"""
""" information about the dataset can be found in the following link:
http://snap.stanford.edu/data/egonets-Facebook.html """
edges1 = np.genfromtxt('0.edges', dtype="int", delimiter=" ")
edges2 = np.genfromtxt('348.edges', dtype="int", delimiter=" ")
edges3 = np.genfromtxt('414.edges', dtype="int", delimiter=" ")
def edges_to_nodes(edges):
edges_list = [i[0] for i in edges] + [i[1] for i in edges]
nodes = set(edges_list)
return nodes
def edge_rate(edges):
nodes = edges_to_nodes(edges)
n = len(nodes)
print ("len(n) = %d" %(n))
""" For a non-directed graph and excluding reflexive relations"""
possible_edges = (n*(n-1))/2
print ("possible_edges=%d" % (possible_edges))
result = float(len(edges))/possible_edges
return result
def edge_rate_dgraph(edges):
nodes = edges_to_nodes(edges)
n = len(nodes)
""" For a directed graph including reflexive relations"""
possible_edges = n**2
result = float(len(edges))/possible_edges
return result
print (edge_rate(edges1))
print (edge_rate(edges2))
print (edge_rate(edges3))
""" With networkx """
import networkx as nx
G1 = nx.read_edgelist('0.edges', delimiter=" ")
G2 = nx.read_edgelist('348.edges', delimiter=" ")
G3 = nx.read_edgelist('414.edges', delimiter=" ")
def possible_edges(graph):
nodes = graph.number_of_nodes()
return (nodes*(nodes-1))/2
print ("possible_edges(G1)=%d" % (possible_edges(G1)))
def edge_rate_nx(graph):
return float(graph.number_of_edges())/float(possible_edges(graph))
print ("\n")
print (edge_rate_nx(G1))
print (edge_rate_nx(G2))
print (edge_rate_nx(G3))
```
En la matriz de adyacencia de cada uno de las redes elegidas, cuantos ceros hay?
```
""" Without NetworkX """
import numpy as np
def edges_to_nodes(edges):
edges_list = [i[0] for i in edges] + [i[1] for i in edges]
nodes = set(edges_list)
print ("len(nodes)=%d" %(len(nodes)))
return nodes
""" The entered datasets correspond to non-directed graphs"""
""" information about the dataset can be found in the following link:
http://snap.stanford.edu/data/egonets-Facebook.html """
edges1 = np.genfromtxt('0.edges', dtype="int", delimiter=" ")
print (len(edges1))
edges2 = np.genfromtxt('348.edges', dtype="int", delimiter=" ")
print (len(edges2))
edges3 = np.genfromtxt('414.edges', dtype="int", delimiter=" ")
print (len(edges3))
""" Asuming there aren't repeated elements in the dataset """
def number_of_zeroes(edges):
n = len(edges_to_nodes(edges))
zeroes = n**2 - len(edges)
return zeroes
def number_of_zeroes_dgraph(edges):
n = len(edges_to_nodes(edges))
zeroes = n**2 - len(edges)
return zeroes
print ("number_of_zeroes(edges1)=%d" %(number_of_zeroes(edges1)))
print ("number_of_zeroes(edges2)=%d" %(number_of_zeroes(edges2)))
print ("number_of_zeroes(edges3)=%d" %(number_of_zeroes(edges3)))
""" With NetworkX """
import networkx as nx
""" The selected datasets are non-directed graphs. Therefore their adjacency matrix is simetrical """
""" For undirected graphs NetworkX stores only the edges of one of the matrix's triangles (upper or lower)"""
G1 = nx.read_edgelist('0.edges', delimiter=" ")
print (len(G1.edges()))
G2 = nx.read_edgelist('348.edges', delimiter=" ")
print (len(G2.edges()))
G3 = nx.read_edgelist('414.edges', delimiter=" ")
print (len(G3.edges()))
N1 = len(G1.nodes())
N2 = len(G2.nodes())
N3 = len(G3.nodes())
def zeroes(graph):
N = len(graph.nodes())
result = N**2 - 2*len(graph.edges())
print ("zeroes=%d" %(result))
return result
zeroes(G1)
zeroes(G2)
zeroes(G3)
```
## Ejercicio - Redes Bipartitas
Defina una red bipartita y genere ambas proyecciones, explique qué son los nodos y links tanto de la red original como de las proyeccciones
```
import numpy as np
network1 = set([(1,'a'),(3,'b'), (4,'d'),(5,'b'),(1,'b'), (2,'d'), (1,'d'), (3,'c')])
def projection_u(edges):
edges_list = list(edges)
result = []
for i in range(0,len(edges_list)):
for j in range(i+1, len(edges_list)):
if edges_list[i][1] == edges_list[j][1]:
tup = (edges_list[i][0], edges_list[j][0])
result.append(tup)
return set(result)
print (projection_u(network1))
def projection_v(edges):
edges_list = list(edges)
result = []
for i in range(0,len(edges_list)):
for j in range(i+1, len(edges_list)):
if edges_list[i][0] == edges_list[j][0]:
tup = (edges_list[i][1], edges_list[j][1])
result.append(tup)
return set(result)
print (projection_v(network1))
```
## Ejercicio - Paths
Cree un grafo de 5 nodos con 5 enlaces. Elija dos nodos cualquiera e imprima:
+ 5 Paths diferentes entre los nodos
+ El camino mas corto entre los nodos
+ El diámetro de la red
+ Un self-avoiding path
# Ejercicio - Componentes
Baje una red real (http://snap.stanford.edu/data/index.html) y lea el archivo
Utilizando NetworkX o iGraph descubra el número de componentes
Implemente el algorithmo Breadth First para encontrar el número de componentes (revise que el resultado es el mismo que utilizando la librería)
## Ejercicio - Degree distribution
_ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _
Haga un plot con la distribución de grados de la red real
Calcule el grado promedio
## Ejercicio - Diámetro
```
N = 5
```
Cree un grafo de N nodos con el máximo diámetro posible
Cree un grafo de N nodos con el mínimo diámetro posible
Cree un grafo de N nodos que sea un ciclo simple
## Ejercicio - Pregunta "real"
Una aerolínea tiene las siguientes rutas desde las ciudades a las que sirve (cada par tiene servicio en ambas direcciones).
```
routemap = [('St. Louis', 'Miami'),
('St. Louis', 'San Diego'),
('St. Louis', 'Chicago'),
('San Diego', 'Chicago'),
('San Diego', 'San Francisco'),
('San Diego', 'Minneapolis'),
('San Diego', 'Boston'),
('San Diego', 'Portland'),
('San Diego', 'Seattle'),
('Tulsa', 'New York'),
('Tulsa', 'Dallas'),
('Phoenix', 'Cleveland'),
('Phoenix', 'Denver'),
('Phoenix', 'Dallas'),
('Chicago', 'New York'),
('Chicago', 'Los Angeles'),
('Miami', 'New York'),
('Miami', 'Philadelphia'),
('Miami', 'Denver'),
('Boston', 'Atlanta'),
('Dallas', 'Cleveland'),
('Dallas', 'Albuquerque'),
('Philadelphia', 'Atlanta'),
('Denver', 'Minneapolis'),
('Denver', 'Cleveland'),
('Albuquerque', 'Atlanta'),
('Minneapolis', 'Portland'),
('Los Angeles', 'Seattle'),
('San Francisco', 'Portland'),
('San Francisco', 'Seattle'),
('San Francisco', 'Cleveland'),
('Seattle', 'Portland')]
```
Cuál es el máximo número de intercambios que tendría que hacer un pasajero en un solo viaje entre dos ciudades servidas? (suponiendo rutas óptimas)
Si usted necesitara viajar mucho en esta aerolínea, cual sería el lugar óptimo para vivir? (i.e. minimizar el número de intercambios para llegar a cualquier ciudad)
Visualize la red
| github_jupyter |
<a id="title_ID"></a>
# JWST Pipeline Validation Testing Notebook: Calwebb_Image3, Resample step
<span style="color:red"> **Instruments Affected**</span>: FGS, MIRI, NIRCam, NIRISS, NIRSpec
Tested on MIRI Simulated data
### Table of Contents
<div style="text-align: left">
<br> [Introduction](#intro_ID) <br> [Run JWST Pipelines](#pipeline_ID) <br> [Imports](#imports_ID) <br> [Create an association table for your cal files and run them through calwebb_image3](#runpipeline_ID) <br> [Find Stars in Image and Determine their Coordinates](#runscript_ID) <br> [Compare RA and Dec to expected Values](#residual_ID) <br> [About This Notebook](#about_ID) <br>
</div>
<a id="intro_ID"></a>
# Introduction
This test is designed to test the resample step in the calwebb_image3 pipeline. At the end of the calwebb_image3 pipeline, the set of files defined in an association table will be distortion corrected and combined. Resample is the step that applies the distortion correction using the drizzling algorithm (as defined in the DrizzlePac handbook) and combines the listed files. For more information on the pipeline step visit the links below.
Step description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/resample/main.html
Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/resample
The data for this test were created with the MIRI Data Simulator, and the documentation for that code can be found here: http://miri.ster.kuleuven.be/bin/view/Public/MIRISim_Public
### Calibration WG Requested Algorithm:
A short description and link to the page: https://outerspace.stsci.edu/display/JWSTCC/Vanilla+Image+Combination
### Defining Terms
Definition of terms or acronymns.
JWST: James Webb Space Telescope
MIRI: Mid-Infrared Instrument
MIRISim: MIRI Data Simulator
### Description of test
This test is performed by creating a set of simulated data with multiple point sources located at specified coordinates. The simulator puts in the expected distortion, so the initial output data comes out of the simulator in distorted coordinates. When this data is then run through calwebb_detector1, calwebb_image2 and calwebbb_image3, the combined, undistorted image should have the point sources registered at the expected locations. In flight, this test can be repeated with known stars that should be found at their expected coordinates.
### Create the data for testing
The set of data used in this particular test were created with the MIRI Data Simulator (MIRISim). Referring to the MIRISim link, you can see how to set up and run the simulator to re-create the input files if you wish. The data was run with a scene.ini file that specified what the scene should look like, with coordinates for the stars given in units of arcsecond offsets from the center of the field of view. The scene.ini file as well as the setup files simuation.ini and simulator.ini are needed to run the simulation.
Once in the mirisim conda environment, the simulation is run with the command line:
> mirisim simulation.ini
The simulator created four files, two exposures each at two different dither positions, using the specified filter. Make sure the WCSAXES header keyword in the SCI extension is set to 2 and not 4. If it is set to 4, change it to 2.
[Top of Page](#title_ID)
<a id="pipeline_ID"></a>
## Run JWST Pipelines
The four files were then run individually through the calwebb_detector1 and calwebb_image2 pipelines. When running the calwebb_detector1 pipeline, increase the threshold for a detection in the jump step from 4 sigma to 10 sigma to avoid a current issue where the jump detection step flags a large percentage of pixels as jumps. This can be done on the command line. (commands to be typed start with $)
The pipelines can be run on the command line with the following commands or put into a script while using the pipeline conda environment.
$ strun calwebb_detector1.cfg filename --steps.jump.rejection_threshold 10.0
The output of the calwebb_detector1 pipeline is a set of four *rate.fits files which will then be run through the calwebb_image2 pipeline.
$ strun calwebb_image2.cfg filename
The output of the calwebb_image2 pipeline was then a set of four *cal.fits files. An association table was created that included these four files as input, and then the files and the association table were run through the calwebb_image3 pipeline.
The cal files are stored in artifactory, and this notebook is meant to pull those files for the test of resample. Step through the cells of this notebook to run calwebb_image3 and then check the alignment.
[Top of Page](#title_ID)
<a id="imports_ID"></a>
# Imports
The following packages will need to be imported for the scripts to work.
* astropy.io for opening files
* astropy.stats for sigma clipping routine
* astropy.visualization for image plotting
* ci_watson.artifactory_helpers to read in data from artifactory
* jwst.datamodels for opening files as a JWST Datamodel
* jwst.pipeline to run the pipeline step/module
* jwst.associations to create association table
* numpy for calculations
* matplotlib.pyplot.plt to generate plot
* os for path information
* photutils for star finding and aperture photometry
* regtest to retrieve data from artifactory needed to run notebook
[Top of Page](#title_ID)
```
from astropy.io import ascii, fits
from astropy.stats import sigma_clipped_stats
from astropy.table import Column
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from ci_watson.artifactory_helpers import get_bigdata
from jwst.datamodels import DrizProductModel, ImageModel
from jwst.pipeline import Image3Pipeline
from jwst import associations
from jwst.associations.lib.rules_level3_base import DMS_Level3_Base
from jwst.associations import asn_from_list
import matplotlib.pyplot as plt
import numpy as np
import os
from photutils import CircularAperture, DAOStarFinder, CircularAnnulus, aperture_photometry
from jwst.regtest.regtestdata import RegtestData
```
<a id="runpipeline_ID"></a>
# Open an association table for your cal files and run them through calwebb_image3
Load the association table to use the .cal files that were output from calwebb_image2. That will be the input for calwebb_image3 that uses the resample step to combine each of the individual images.
[Top of Page](#title_ID)
```
# Use regtest infrastructure to access all input files associated with the association file
rtdata = RegtestData(inputs_root="jwst_validation_notebooks", env="validation_data")
rtdata.get_asn("resample/resample_miri_test/starfield_74_asnfile.json")
rtdata.input #this should be the list of files associated with the asn
# Run Calwebb_image3 on the association table
# set any specific parameters
# tweakreg parameters to allow data to run
fwhm=2.5 # Gaussian kernel FWHM of objects expected, default=2.5
minobj=5 # minimum number of objects needed to match positions for a good fit, default=15
snr= 250 # signal to noise threshold, default=5
sigma= 3 # clipping limit, in sigma units, used when performing fit, default=3
fit_geom='shift' # ftype of affine transformation to be considered when fitting catalogs, default='general'
use2dhist=False # boolean indicating whether to use 2D histogram to find initial offset, default=True
pipe3=Image3Pipeline()
pipe3.tweakreg.kernel_fwhm = fwhm
pipe3.tweakreg.snr_threshold = snr
pipe3.tweakreg.minobj = minobj
pipe3.tweakreg.sigma = sigma
pipe3.tweakreg.fitgeometry = fit_geom
pipe3.tweakreg.use2dhist = use2dhist
#pipe3.skymatch.skip = True # test to see if this affects the final output
pipe3.source_catalog.save_results = True
pipe3.save_results = True
# run Image3
im = pipe3.run(rtdata.input)
```
<a id="runscript_ID"></a>
# Find stars in image and determine their coordinates
The output of the pipeline command in the previous step (given our association table) is an i2d.fits file. This file is in the format of a JWST Data model type of DrizProductModel and should be opened as such. It is this file that we will use for source finding and to determine whether the stars are found in the expected locations. The i2d file and the associated text file containing the input coordinates of the stars can be found in artifactory.
[Top of Page](#title_ID)
#### Read in combined i2d data file and list of coordinates
```
# Read in the combined data file and list of coordinates
with ImageModel('starfield_74_combined_i2d.fits') as im:
# raises exception if file is not the correct model
pass
coords = get_bigdata('jwst_validation_notebooks',
'validation_data',
'resample',
'resample_miri_test',
'radec_coords.txt')
# read in text file with RA and Dec input coordinates
RA_in, Dec_in = np.loadtxt( coords, dtype=str, unpack=True)
# put RA and Dec into floats
RA_sim = RA_in.astype(float)
Dec_sim = Dec_in.astype(float)
# pull out data portion of input file
data = im.data
# print stats on input image
mean, median, std = sigma_clipped_stats(data, sigma=200.0, maxiters=5) # default sigma=3
print(mean, median, std)
```
#### Run DAOStar finder to find sources in the image and examine the image and positions marked.
The block of code below will find the sources in the image, create apertures for each source found, and output the table of x, y coordinates along with the peak pixel value. It will also show a scaled version of the image and mark in blue the positions of sources found.
```
# Run DAOStarFinder to find sources in image
ap_radius = 4. # radius for aperture for centroiding and photometry
daofind = DAOStarFinder(fwhm=3.0, threshold=10.*std) # default threshold=5*std, fwhm=3
sources = daofind(data)
print(sources['xcentroid','ycentroid','peak'])
# create apertures for sources
positions = (sources['xcentroid'], sources['ycentroid'])
apertures = CircularAperture(positions, r=ap_radius)
# mark sources on image frame to see if the correct sources were found
norm = ImageNormalize(stretch=SqrtStretch())
# keep image stretch in mind for plotting. sky subtracted range ~ (-15, 10), single sample ~ (0, 20)
plt.imshow(data, cmap='Greys', origin='lower', vmin=-15,vmax=10, norm=norm)
apertures.plot(color='blue', lw=1.5, alpha=0.5)
plt.show()
```
#### Run photometry on apertures (with a specified annulus for background subtraction)
Set a specified annulus (inner and outer radii for the annulus).
Run photometry on aperture and annuli.
Subtract background values in annulus from aperture photometry.
Output should be a table of photometry values printed to the screen (full table has columns id, xcenter, ycenter, aperture_sum and the added columns annulus_median, aperture_bkg and aperture_sum_bkgsub). You can choose which columns you wish to see printed.
```
# set values for inner and outer annuli to collect background counts
inner_annulus = 10.
outer_annulus = 15.
# set up annulus for background
background_aper = CircularAnnulus(positions, r_in=inner_annulus, r_out=outer_annulus)
# perform photometry on apertures for targets and background annuli
phot_table = aperture_photometry(im.data, apertures)
# perform background subtraction with outlier rejection
bkg_median = []
bkg_mask = background_aper.to_mask(method='center')
bmask = bkg_mask[0]
for mask in bkg_mask:
aper_data = bmask.multiply(data)
aper_data = aper_data[mask.data > 0]
# perform sigma-clipped median
_, median_sigclip, _ = sigma_clipped_stats(aper_data)
bkg_median.append(median_sigclip)
bkg_median = np.array(bkg_median)
# do calculations on background regions found in annuli
# Get average background per pixel
phot_table['annulus_median'] = bkg_median
# Get total background in the science aperture (per pixel * area in aperture)
phot_table['aperture_bkg'] = bkg_median * apertures.area
# subtract background in aperture from flux in aperture
phot_table['aperture_sum_bkgsub'] = phot_table['aperture_sum'] - phot_table['aperture_bkg']
print(phot_table['aperture_sum','annulus_median','aperture_bkg','aperture_sum_bkgsub'])
```
#### Put x, y coordinates into RA and Dec using the wcs information from the files.
The output of the next block of code should be a table showing the x and y centroid positions as well as the associated RA and Dec values.
```
# using wcs info from images, put coordinates into RA, Dec
ra, dec = im.meta.wcs(sources['xcentroid'], sources['ycentroid'])
# add RA, Dec to sources table
ra_col = Column(name='RA', data=ra)
dec_col = Column(name='Dec', data=dec)
sources.add_column(ra_col)
sources.add_column(dec_col)
# print RA, Dec for each x, y position found
print(sources['xcentroid', 'ycentroid', 'RA', 'Dec'])
# add option to print out list of sources with flux values
outtable = 'sourcelist_phot_rate.txt'
sources.add_column(phot_table['aperture_sum'])
sources.add_column(phot_table['aperture_sum_bkgsub'])
```
#### Compare the RA and Dec positions used to create the simulated data to the values found in the output image.
Difference each set of RA and Dec coordinates in both the input list and the found coordinates, taking into account any angles close to 360/0 degrees. If the difference for both the RA and Dec are below a set tolerance, then the positions match. Take the matched positions and convert the differences from degrees to milli arcseconds, and output the RA and Dec positions as well as the differences.
```
# Compare input RA, Dec to found RA, Dec
print(' RA found Dec found RA_Diff (mas) Dec_diff (mas) Bkg sub flux pass/fail')
for i in np.arange(0,len(RA_sim)):
for j in np.arange(0,len(ra)):
ra_diff = 180 - abs(abs(RA_sim[i] - ra[j])-180)
dec_diff = 180 - abs(abs(Dec_sim[i] - dec[j])-180)
if ra_diff < 1e-5 and dec_diff < 1e-5:
# put differences in milliarcseconds
ra_diff = ra_diff * 3600000
dec_diff = dec_diff * 3600000
if ra_diff < 30 and dec_diff < 30:
test = 'pass'
else:
test = 'fail'
print('{:15.6f} {:15.6f} {:15.6f} {:15.6f} {:15.6f} {}'.format(ra[j], dec[j], ra_diff, dec_diff,
phot_table['aperture_sum_bkgsub'][j], test))
```
<a id="residual_ID"></a>
# Compare output RA and Dec to expected values
The output RA and Dec coordinates should match the input RA and Dec coordinates to within 1/10 of a PSF FWHM (~0.03 arcsec for F770W).
Output RA_Diff and Dec_diff above should be on order of 30 or fewer milliarcseconds.
Check to see if your input flux is roughly what you expected based on the input data.
[Top of Page](#title_ID)
<a id="about_ID"></a>
## About this Notebook
**Author:** M. Cracraft, Research and Instrument Scientist II, INS/MIRI
<br>**Updated On:** 08/09/2019 to add in aperture photometry
An extra optional test that can be done is to plot the flux values against x or y values. Previous testing has shown a spatial dependence of the flux with y values, so a quick plot can show whether this problem is fixed or not. Prior to the resample step, there is no pattern, after the step, a pattern is clear. Just do this as a last check. If the scatter is not random, there may be a problem that needs to be checked. (Of course, this only works if you give an equivalent if not equal input count level to each input star.)
```
plt.title('Surface brightness vs. y position on detector')
plt.ylim(35500,37500) # help weed out sources that were erroneously 'hits' (bad pixels, cosmic rays, etc)
plt.xlabel('y centroid position')
plt.ylabel('Surface brightness')
plt.plot(sources['ycentroid'], phot_table['aperture_sum_bkgsub'], marker='o',linestyle='') #ylim=(30000,40000))
plt.show()
```
[Top of Page](#title_ID)
<img style="float: right;" src="./stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="stsci_pri_combo_mark_horizonal_white_bkgd" width="200px"/>
| github_jupyter |
```
import random, time, ffmpeg
import numpy as np
from math import ceil
import threading
import cv2
from datetime import datetime
import PIL.Image as Image
import tensorflow as tf
import os
import matplotlib.pyplot as plt
model_settings = {
# Training settings
'current_epoch': 1,
'max_steps': 1000,
'moving_decay': 0.9999, 'weight_decay': 0.00005, 'dropout': 0.5,
'learning_rate': 1e-4, # 1e-4 from previous code
'checkpoints': 200, # Number of steps to create checkpoint
'batch_sizes': [30], # Batch per device
'read_pretrained_model': True,
'load_fc_layers': True,
'train_conv': False,
'train_fc': True,
'save_graph': True,
'is_testing': False,
# Neural-Network settings
'frames_per_batch': 16, # Number of frames in a batch
'video_fps': 12, # FPS of frames extracted
'crop_size': 112, # Input frames dimension
'channels': 3,
'trans_max': 10, # Translation factor for pre-processing
# System settings
'devices_to_run': ['/gpu:0'], # Multiple devices are not supported yet :(
'num_thread': 4, # Number of threads to read video files
'queue_size': 3000, # Queue size for reading input
# Directory settings
'read_from_frames': True,
'model_name': 'UCF_finetune',
'checkpoint_dir': './checkpoints/',
'model_save_dir': './models/',
# 'model_read_loc' : '../ViolanceDetection-Jupyter/models/s1m-ucf101.model',
'model_read_loc': './models/UCF_finetuneFC_last.model',
'data_home': '../datasets/UCF-101-Frames/',
'train_test_loc': '../datasets/UCF-ActionRecognitionSplits',
'train_file_name': '/trainlist01.txt',
# 'train_file_name': '/train_small.txt',
'test_file_name': '/testlist01.txt',
'mean_clip_loc': '../datasets/PreprocessData/crop_mean.npy'
}
def set_model_settings(model_settings):
# Storage of variables RAM:'/cpu:0' GPU:'/gpu:0'
model_settings['variable_storage'] = model_settings['devices_to_run'][0]
# model_settings['variable_storage'] = '/cpu:0'
# Total number of batch
model_settings['total_batch'] = np.sum(model_settings['batch_sizes'])
# Input shape for placeholders
model_settings['input_shape'] = (model_settings['frames_per_batch'],
model_settings['crop_size'],
model_settings['crop_size'],
model_settings['channels'])
# Mean clip for input
model_settings['np_mean'] = np.load(model_settings['mean_clip_loc']). \
reshape(model_settings['input_shape'])
if model_settings['is_testing']:
model_settings['input_from_placeholders'] = False
model_settings['dequeue_immediately'] = True
model_settings['dropout'] = 1.0
model_settings['trans_max'] = 0
else:
model_settings['input_from_placeholders'] = False
model_settings['dequeue_immediately'] = False
model_settings['start_time'] = datetime.now()
set_model_settings(model_settings)
# Reads train/test filenames from provided splits
# Returns video directions and their labels in a list
def get_data_dir(filename, from_frames=False):
dir_videos, label_videos = [], []
with open(filename, 'r') as input_file:
for line in input_file:
file_name, label = line.split(' ')
# if will read from frames
if from_frames:
file_name = '.'.join(file_name.split('.')[:-1])
dir_videos.append(file_name)
label_videos.append(int(label) - 1)
return dir_videos, label_videos
# Shuffles video directions along with labels
def shuffle_list(dir_videos, label_videos, seed=time.time()):
print('Shuffling the dataset...')
video_indices = list(range(len(dir_videos)))
random.seed(seed)
random.shuffle(video_indices)
shuffled_video_dirs = [dir_videos[i] for i in video_indices]
shuffled_labels = [label_videos[i] for i in video_indices]
return shuffled_video_dirs, shuffled_labels
# Given video directory it reads the video
# extracts the frames, and do pre-processing operation
def read_clips_from_video(dirname, model_settings):
# Input size for the network
frames_per_batch = model_settings['frames_per_batch']
video_fps = model_settings['video_fps']
crop_size = model_settings['crop_size']
np_mean = model_settings['np_mean']
trans_max = model_settings['trans_max']
# Data augmentation randoms
horizontal_flip = random.random()
trans_factor = random.randint(-trans_max, trans_max)
# Video information
probe = ffmpeg.probe(dirname)
video_info = probe["streams"][0]
video_width = video_info["width"]
video_height = video_info["height"]
video_duration = float(video_info["duration"])
num_frame = int(video_info["nb_frames"])
# Select which portion of the video will be input
rand_max = int(num_frame - ((num_frame / video_duration) * (frames_per_batch / video_fps)))
start_frame = random.randint(0, max(rand_max - 1, 0))
# end_frame = ceil(start_frame + (num_frame / video_duration) * frames_per_batch / video_fps + 1)
video_start = (video_duration / num_frame) * start_frame
video_end = min(video_duration, video_start + ((frames_per_batch + 1) / video_fps))
# Cropping factor
x_pos = max(video_width - video_height + 2 * trans_factor, 0) // 2
y_pos = max(video_height - video_width + 2 * trans_factor, 0) // 2
crop_size1 = min(video_height, video_width)
# Read specified times of the video
ff = ffmpeg.input(dirname, ss=video_start, t=video_end - video_start)
# Trim video -> did not work :(
# ff = ff.trim(end_frame='50')
# Divide into frames
ff = ffmpeg.filter(ff, 'fps', video_fps)
# Crop
ff = ffmpeg.crop(ff, x_pos, y_pos, crop_size1, crop_size1)
# Subsample
ff = ffmpeg.filter(ff, 'scale', crop_size, crop_size)
# Horizontal flip with some probability
if horizontal_flip > 0.5:
ff = ffmpeg.hflip(ff)
# Output the video
ff = ffmpeg.output(ff, 'pipe:',
format='rawvideo',
pix_fmt='rgb24')
# Run Process in quiet mode
out, _ = ffmpeg.run(ff, capture_stdout=True, quiet=True)
# Extract to numpy array
video = np.frombuffer(out, np.uint8). \
reshape([-1, crop_size, crop_size, 3])
# Copies last frame if # of frames < 16
# Subtracts the mean and converts type to float32
num_frames = video.shape[0]
if num_frames < frames_per_batch:
last_frame = video[-1]
num_frame_repeat = frames_per_batch - num_frames
# print('Frames repeated: ', num_frame_repeat)
last_repeat = np.repeat(last_frame[np.newaxis],
num_frame_repeat,
axis=0)
video = np.concatenate((video, last_repeat), axis=0) - np_mean
else:
video = video[:frames_per_batch] - np_mean
return video
def get_frames_data(filename, frames_per_batch=16):
ret_arr = []
for parent, dirnames, filenames in os.walk(filename):
num_frames = len(filenames)
start_max = max(0, num_frames - frames_per_batch)
start_index = random.randint(0, start_max)
end_index = min(start_index+frames_per_batch, num_frames)
filenames = sorted(filenames)
for i in range(start_index, end_index):
image_name = str(filename) + '/' + str(filenames[i])
img = Image.open(image_name)
img_data = np.array(img)
ret_arr.append(img_data)
return ret_arr
def set_placeholders(model_settings):
if model_settings['read_from_frames']:
shape = (None, None, None, 3)
images_placeholder = tf.placeholder(tf.float32, shape=shape, name="input_clip")
else:
images_placeholder = tf.placeholder(tf.float32, shape=model_settings['input_shape'], name="input_clip")
labels_placeholder = tf.placeholder(tf.int64, shape=(), name="labels")
dropout_placeholder = tf.placeholder_with_default(model_settings['dropout'], shape=())
model_settings['images_placeholder'] = images_placeholder
model_settings['labels_placeholder'] = labels_placeholder
model_settings['dropout_placeholder'] = dropout_placeholder
print('Finished setting placeholders..')
def process_frames(model_settings):
with tf.name_scope('Frame_Process'), tf.device('/cpu:0'):
images_placeholder = model_settings['images_placeholder']
trans_max = model_settings['trans_max']
crop_size = model_settings['crop_size']
frames_per_batch = model_settings['frames_per_batch']
np_mean = tf.convert_to_tensor(model_settings['np_mean'])
clips_shape = tf.shape(images_placeholder)
video_width = clips_shape[1]
video_height = clips_shape[2]
rem_frame = frames_per_batch - clips_shape[0]
trans_factor = tf.random.uniform([1], -trans_max, trans_max, dtype=tf.int32)
crop_size1 = tf.math.minimum(video_height, video_width)
x_pos = tf.math.maximum(video_width - video_height + 2 * trans_factor, 0) // 2
x_start, x_end = x_pos[0], x_pos[0]+crop_size1
y_pos = tf.math.maximum(video_height - video_width + 2 * trans_factor, 0) // 2
y_start, y_end = y_pos[0], y_pos[0]+crop_size1
clips_cropped = images_placeholder[:,x_start:x_end, y_start:y_end]
clips_interp = tf.image.resize_bicubic(clips_cropped, (crop_size, crop_size))
clips_interp = tf.clip_by_value(clips_interp, 0, 255)
last_frame = clips_interp[-1]
rem_frames = tf.tile(tf.expand_dims(last_frame,0), [rem_frame, 1, 1, 1])
final_clips = tf.concat([clips_interp, rem_frames], 0)
final_clips = tf.image.random_flip_left_right(final_clips)
final_clips -= np_mean
return final_clips
def read_clips(dirnames, model_settings):
for dirname in dirnames:
read_clip(dirname, model_settings)
train_dir_locations = model_settings['train_test_loc'] + model_settings['train_file_name']
dir_frames, labels = get_data_dir(train_dir_locations, True)
set_placeholders(model_settings)
frames = process_frames(model_settings)
i = random.randint(0, 5000)
file_loc = model_settings['data_home'] + dir_frames[i]
imgs = get_frames_data(file_loc)
images_placeholder = model_settings['images_placeholder']
with tf.Session() as sess:
frames = sess.run(frames, {images_placeholder: imgs})
plt.imshow(frames[0].astype(np.uint8))
plt.show()
dirnames_threads = []
threads = []
for i in range(8):
cur = []
for j in range(5):
read_index = 5 * i + j
video_dir, label = dir_videos[read_index], label_clips[read_index]
video_dir = model_settings['data_home'] + video_dir
cur.append(video_dir)
dirnames_threads.append(cur)
for i in range(8):
dirnames = dirnames_threads[i]
threads.append(threading.Thread(target=read_clips, args=(dirnames, model_settings)))
time0 = time.time()
for i in range(8):
threads[i].start()
for i in range(8):
threads[i].join()
print('Time diff:', time.time() - time0)
index = 150
dirname = model_settings['data_home'] + dir_videos[index]
frames_per_batch = model_settings['frames_per_batch']
video_fps = model_settings['video_fps']
crop_size = model_settings['crop_size']
np_mean = model_settings['np_mean']
horizontal_flip = random.random()
probe = ffmpeg.probe(dirname)
video_info = probe["streams"][0]
video_width = video_info["width"]
video_height = video_info["height"]
video_duration = float(video_info["duration"])
num_frame = int(video_info["nb_frames"])
rand_max = int(num_frame - ((num_frame / video_duration) * (frames_per_batch / video_fps)))
start_frame = random.randint(0, rand_max - 1)
end_frame = ceil(start_frame + (num_frame / video_duration) * frames_per_batch / video_fps + 1)
video_start = (video_duration / num_frame) * start_frame
video_end = video_start + ((frames_per_batch+1) / video_fps)
print(end_frame-start_frame, video_start, video_end)
x_pos = max(video_width - video_height, 0) // 2
y_pos = max(video_height - video_width, 0) // 2
crop_size1 = min(video_height, video_width)
# Input video
ff = ffmpeg.input(dirname, ss=video_start, t=video_end-video_start)
# Trim video
#ff = ff.trim(end_frame='50')
# Divide into frames
ff = ffmpeg.filter(ff, 'fps', video_fps)
# Crop
ff = ffmpeg.crop(ff, x_pos, y_pos, crop_size1, crop_size1)
# Subsample
ff = ffmpeg.filter(ff, 'scale', crop_size, crop_size)
# Horizontal flip with some probability
if horizontal_flip > 0.5:
ff = ffmpeg.hflip(ff)
# Output the video
ff = ffmpeg.output(ff, 'pipe:',
format='rawvideo',
pix_fmt='rgb24')
# Run Process in quiet mode
out, _ = ffmpeg.run(ff, capture_stdout=True, quiet=True)
# Extract to numpy array
video = np.frombuffer(out, np.uint8). \
reshape([-1, crop_size, crop_size, 3])
# Substracts the mean and converts type to float32
video = video[:16] - np_mean
#print(video.shape)
index = 150
dirname = model_settings['data_home'] + dir_videos[index]
frames_per_batch = model_settings['frames_per_batch']
video_fps = model_settings['video_fps']
crop_size = model_settings['crop_size']
np_mean = model_settings['np_mean']
horizontal_flip = random.random()
probe = ffmpeg.probe(dirname)
video_info = probe["streams"][0]
video_width = video_info["width"]
video_height = video_info["height"]
video_duration = float(video_info["duration"])
num_frame = int(video_info["nb_frames"])
rand_max = int(num_frame - ((num_frame / video_duration) * (frames_per_batch / video_fps)))
start_frame = random.randint(0, rand_max - 1)
end_frame = ceil(start_frame + (num_frame / video_duration) * frames_per_batch / video_fps + 1)
#end_frame = min(end_frame, num_frame)
x_pos = max(video_width - video_height, 0) // 2
y_pos = max(video_height - video_width, 0) // 2
crop_size1 = min(video_height, video_width)
```
| github_jupyter |
# DiFuMo (Dictionaries of Functional Modes)
<div class="alert alert-block alert-danger">
<b>NEW:</b> New in release 0.7.1
</div>
## Outline
- <a href="#descr">Description</a>
- <a href="#howto">Description</a>
- <a href="#closer">Coser look on the object</a>
- <a href="#visualize">Visualize</a>
<span id="descr"></span>
## Description
- New atlas fetcher :func:`nilearn.datasets.fetch_atlas_difumo`
- Download statistical maps which can serve as atlases to extract functional signals with different dimensionalities (64, 128, 256, 512, and 1024)
- These modes are optimized to represent well raw BOLD timeseries, over a with range of experimental conditions.
<span id="howto"></span>
## How to use it?
First of all, make sure you have nilearn >= 0.7.1 installed:
```
import nilearn
print(nilearn.__version__)
```
If this is verified, we should be able to export the difumo fetcher from the `datasets` module:
```
from nilearn.datasets import fetch_atlas_difumo
```
The documentation for this function can be seen on the website [here](http://nilearn.github.io/modules/generated/nilearn.datasets.fetch_atlas_difumo.html#nilearn.datasets.fetch_atlas_difumo) or thanks to the Jupyter magic command:
```
?fetch_atlas_difumo
```
Looking at the docstring, it looks like there are mainly two parameters to control the data we wish to donwload:
- dimension: this will be the number of functional maps of the atlas. It must be 64, 128, 256, 512, or 1024
- resolution: this enables to download atlas sampled either at 2mm or 3mm resolution
Let's try it:
```
difumo_64 = fetch_atlas_difumo(dimension=64, # Feel free to change these parameters!
resolution_mm=2)
```
This should have either downloaded the 64 component atlas sampled at 2mm from osf, or simply grabed the data in the nilearn cache if you have downloaded it already.
<span id="closer"></span>
## Closer look on the object
Like for any dataset in nilearn, the resulting object is a scikit-learn Bunch object with the following keys:
- description: string describing the dataset
- maps: the actual data
- labels: label information for the maps
```
type(difumo_64)
difumo_64.keys()
```
Reading the description before usage is always recommanded:
```
print(difumo_64.description.decode()) # Note that description strings will be soon shipped as Python strings,
# avoiding the anoying call to decode...
```
Label information is directly available:
```
assert len(difumo_64.labels) == 64 # We have one label information tuple per component
difumo_64.labels[:6] # Print the first 6 label information
```
We can see that each component has:
- a label index going from 1 to 64
- a name
- a network (todo: explain)
- a network (todo: explain)
- coordinates (todo: explain)
Finally, the actual data is a simple path to a nifti image on disk, which is the usual way to represent niimg in Nilearn:
```
difumo_64.maps
```
If you wan to have a look at the actual data, you can open this image using usual nilearn loading utilities:
```
from nilearn.image import get_data
raw_maps = get_data(difumo_64.maps) # raw_maps is a 4D numpy array holding the
raw_maps.shape # coefficients of the functional modes
```
<span id="visualize"></span>
## Visualize it
**Method 1**
Looking at probabilitic atlases can be done with the function `plot_prob_atlas` of the `plotting` module:
```
from nilearn.plotting import plot_prob_atlas
plot_prob_atlas(difumo_64.maps, title='DiFuMo 64')
```
**Method 2**
Another way to visualize the atlas is through the report of the `NiftiMapsMasker` object.
<div class="alert alert-block alert-danger">
<b>Danger:</b> This feature is under development and still not available in 0.7.1. I might remove this section if I don't submit my PR in time.
</div>
```
from nilearn.input_data import NiftiMapsMasker
masker = NiftiMapsMasker(difumo_64.maps)
masker
```
| github_jupyter |
# Name
Data processing by creating a cluster in Cloud Dataproc
# Label
Cloud Dataproc, cluster, GCP, Cloud Storage, KubeFlow, Pipeline
# Summary
A Kubeflow Pipeline component to create a cluster in Cloud Dataproc.
# Details
## Intended use
Use this component at the start of a Kubeflow Pipeline to create a temporary Cloud Dataproc cluster to run Cloud Dataproc jobs as steps in the pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|----------|-------------|----------|-----------|-----------------|---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | | |
| region | The Cloud Dataproc region to create the cluster in. | No | GCPRegion | | |
| name | The name of the cluster. Cluster names within a project must be unique. You can reuse the names of deleted clusters. | Yes | String | | None |
| name_prefix | The prefix of the cluster name. | Yes | String | | None |
| initialization_actions | A list of Cloud Storage URIs identifying executables to execute on each node after the configuration is completed. By default, executables are run on the master and all the worker nodes. | Yes | List | | None |
| config_bucket | The Cloud Storage bucket to use to stage the job dependencies, the configuration files, and the job driver console’s output. | Yes | GCSPath | | None |
| image_version | The version of the software inside the cluster. | Yes | String | | None |
| cluster | The full [cluster configuration](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster). | Yes | Dict | | None |
| wait_interval | The number of seconds to pause before polling the operation. | Yes | Integer | | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
cluster_name | The name of the cluster. | String
Note: You can recycle the cluster by using the [Dataproc delete cluster component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/dataproc/delete_cluster).
## Cautions & requirements
To use the component, you must:
* Set up the GCP project by following these [steps](https://cloud.google.com/dataproc/docs/guides/setup-project).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the following types of access to the Kubeflow user service account:
* Read access to the Cloud Storage buckets which contains initialization action files.
* The role, `roles/dataproc.editor` on the project.
## Detailed description
This component creates a new Dataproc cluster by using the [Dataproc create cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/create).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline SDK:
```
%%capture --no-stderr
!pip3 install kfp --upgrade
```
2. Load the component using KFP SDK
```
import kfp.components as comp
dataproc_create_cluster_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.4.0-rc.1/components/gcp/dataproc/create_cluster/component.yaml')
help(dataproc_create_cluster_op)
```
### Sample
Note: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Set sample parameters
```
# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
# Optional Parameters
EXPERIMENT_NAME = 'Dataproc - Create Cluster'
```
#### Example pipeline that uses the component
```
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc create cluster pipeline',
description='Dataproc create cluster pipeline'
)
def dataproc_create_cluster_pipeline(
project_id = PROJECT_ID,
region = 'us-central1',
name='',
name_prefix='',
initialization_actions='',
config_bucket='',
image_version='',
cluster='',
wait_interval='30'
):
dataproc_create_cluster_op(
project_id=project_id,
region=region,
name=name,
name_prefix=name_prefix,
initialization_actions=initialization_actions,
config_bucket=config_bucket,
image_version=image_version,
cluster=cluster,
wait_interval=wait_interval)
```
#### Compile the pipeline
```
pipeline_func = dataproc_create_cluster_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Kubernetes Engine for Kubeflow](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts)
* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/create_cluster/sample.ipynb)
* [Dataproc create cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/create)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| github_jupyter |
```
def string_adder(a = "", b = ""):
return str(a + " " + b)
string_adder(a = "Michael", b = "Akinola")
#string_adder()
#string_adder("$", "1000")
# Define a long_word function that accepts a string.
# The function should return a Boolean that reflects whether the string has more than 7 characters.
#
def long_word(a_string):
if len(a_string) > 7:
return True
return False
# EXAMPLES:
long_word("Python") #=> False
# long_word("magnificent") => True
# Define a first_longer_than_second function that accepts two string arguments.
# The function should return a True if the first string is longer than the second
# and False otherwise (including if they are equal in length).
#
def first_longer_than_second(first, second):
if len(first) > len(second):
return True
return False
# EXAMPLES:
#first_longer_than_second("Python", "Ruby") => True
first_longer_than_second("cat", "mouse") #=> False
# first_longer_than_second("Steven", "Seagal") => False
#Define a same_first_and_last_letter function that accepts a string as an argument.
# The function should return a True if the first and last character are equal, and False otherwise
# Assume the string will always have 1 or more characters.
#
def same_first_and_last_letter(word):
if word[0] == word[-1]:
return True
return False
# EXAMPLES:
# same_first_and_last_letter("runner") => True
# same_first_and_last_letter("clock") => False
same_first_and_last_letter("q") #=> True
# same_first_and_last_letter("Runner") #=> False
# Define a three_number_sum function that accepts a 3-character string as an argument.
# The function should add up the sum of the digits of the string.
# HINT: You’ll have to figure out a way to convert the string-ified numbers to integers.
#
def three_number_sum(x):
return int(x[0]) + int(x[1]) + int(x[2])
# EXAMPLES:
# three_number_sum("123") => 6
three_number_sum("567") #=> 18
# three_number_sum("444") => 12
# three_number_sum("000") => 0
# Define a first_three_characters function that accepts a string argument.
# The function should return the first three characters of the string.
#
def first_three_characters(string):
return string[0:3]
# EXAMPLES:
first_three_characters("dynasty") #=> "dyn"
# first_three_characters("empire") => "emp"
# Define a last_five_characters function that accepts a string argument.
# The function should return the last 5 characters of the string.
#
def last_five_characters(text):
return text[-5:]
# EXAMPLES:
last_five_characters("dynasty") #=> "nasty"
# last_fiee_characters("empire") => "mpire"
# Define a is_palindrome function that accepts a string argument.
# The function should return True if the string is spelled the same backwards as it is forwards.
# Return False otherwise.
#
def is_palindrome(word):
if word == word[::-1]:
return True
return False
# EXAMPLES:
#is_palindrome("racecar") #=> True
is_palindrome("yummy") #=> False
# Define a vowel_count function that accepts a string argument.
# The function should return the count of vowels in the string.
# The 5 vowels are "a", "e", "i", "o", and "u".
# You can assume the string will be in all lowercase.
def vowel_count(a_string):
vowels = set("aeiou")
count = 0
for letters in a_string:
if letters in vowels:
count = count + 1
return count
vowel_count("love")
def vowel_cou(str):
count = 0
vowel = set("aeiouAEIOU")
for alphabet in str:
if alphabet in vowel:
count = count + 1
print("No. of vowels:", count)
vowel_cou("accelerate")
# Define a find_my_letter function that accepts two arguments: a string and a character
# The function should return the first index position of the character in the string
# The function should return a -1 if the character does not exist in the string
def find_my_letter(a_string, a_char):
return a_string.find(a_char)
find_my_letter("Michelle", "e")
# find_my_letter(a_string = "television", a_char = "e")
# Define a fancy_cleanup function that accepts a single string argument
# The function should clean up the whitespace on both sides of the
# argument. It should also replace every occurence of the letter "g" with the
# letter "z" and every occurence of a space with an exclamation point (!).
def fancy_cleanup(a_string):
return a_string.strip().replace(" ", "e").replace("!", "g")
fancy_cleanup(" int lli!ent ")
# Define a even_or_odd function that accepts a single integer.
# If the integer is even, the function should return the string “even”.
# If the integer is odd, the function should return the string “odd”.
def even_or_odd(an_integer):
if an_integer % 2 == 0:
return "even"
return "odd"
even_or_odd(23)
def truthy_or_falsy(an_argument):
if bool(an_argument):
return "The value " + str(an_argument) + " is truthy"
return "The value " + str(an_argument) + " is falsy"
truthy_or_falsy("celebrate")
# truthy_or_falsy(an_argument = "")
# truthy_or_falsy(an_argument = "shaba")
# Define an up_and_down function that accepts a string argument
# If the string consists of all uppercase letters, return a new string
# consisting of all lowercase letters. If the string consists of all
# lowercase letters, return a new string consisting of all uppercase
# characters. If the string has a mix of uppercase and lowercase
# characters, return a new string where the casing of each letter is swapped.
def up_and_down(string):
if string == string.upper():
return string.lower()
elif string == string.lower():
return string.upper()
else:
return string.swapcase()
up_and_down("chocolate")
#up_and_down("CHOCOLATE")
#up_and_down("Chocolate")
# Declare a negative_energy function that accepts a numeric argument and returns its absolute value.
# The absolute value is the number's distance from zero.
def negative_energy(number):
return abs(number)
negative_energy(12)
#negative_energy(-5)
# Define a divisible_by_three_and_four function that accepts a number as its argument.
# It should return True if the number is evenly divisible by both 3 and 4 . It should return False otherwise.
def divisible_by_three_and_four(number):
if number % 3 == 0 and number % 4 == 0:
return True
return False
#divisible_by_three_and_four(12)
divisible_by_three_and_four(6)
# Declare a string_theory function that accepts a string as an argument.
# It should return True if the string has more than 3 characters and starts with a capital “S”. It should return False otherwise.
def string_theory(string):
if len(string) > 3 and string[0] == "S":
return True
return False
string_theory(string = "Shade")
#string_theory(string = "Max")
#string_theory(string = "Sam")
#count down traditional
def count_down(final_number):
current_number = final_number
while current_number > 0:
print(current_number)
current_number -= 1
count_down(3)
#count down recurssion
def count_down(final_number):
if final_number <= 0:
return
print(final_number)
count_down(final_number - 1)
count_down(3)
#traditional method
def reverse(str):
start_index = 0
last_index = len(str) - 1
reversed_string = ""
while last_index >= start_index:
reversed_string += str[last_index]
last_index -= 1
return reversed_string
reverse("Iron")
#recursion method
def reverse_d(str):
if len(str) <= 1:
return str
return str[-1] + reverse_d(str[:-1])
#e.g
reverse_d("Einstein")
#factorial
def factorial(number_n):
x = 1
for i in range(1,number_n + 1):
x = x * i
print("The factorial of", number_n,"is",x)
factorial(10)
# Factorial of a number using recursion
def factorial(n):
if n == 1:
return n
elif n < 0:
print("Sorry, factorial does not exist for negative numbers")
elif n == 0:
print("The factorial of 0 is 1")
else:
return n * factorial(n-1)
factorial(5)
# Define a function product_of_even_indices that accepts a list of numbers.
# The list will always have 6 total elements.
# The function should return the product (multiplied total) of all numbers at an even index (0, 2, 4).
def product_of_even_indices(num_list):
num_list = list(num_list)
return num_list[0] * num_list[2] * num_list[4]
product_of_even_indices(num_list = (1,2,3,4,5,6,))
# Define a function first_letter_of_last_string that accepts a list of strings.
# It should return one character — the first letter of the last string in the list.
# Assume the list will always have at least one string.
def first_letter_of_last_string(string):
string = list(string)
return string[-1][0]
first_letter_of_last_string(string = ('ade', 'bambo', 'taiwo'))
# Define a split_in_two function that accepts a list and a number.
# If the number is even, return the list elements from the third element to the end of the list.
# If the number is odd, return the list elements from index 0 (inclusive) to 2 (exclusive)
def split_into_two(a_list, num):
a_list = list(a_list)
if num % 2 == 0:
return a_list[-4:]
else:
return a_list[:2]
split_into_two((1,2,3,4,5,6,7,8), 8)
# split_into_two(a_list = (1,2,3,4,5,6,7,8), num = 7)
# Declare a nested_extraction function that accepts a list of lists and an index position.
# The function should use the index as the basis of finding both the nested list
# and the element from that list with the given index position
# You can assume the number of lists will always be equal to
# the number of elements within each of them.
def nested_extraction(lists, index):
lists = list(lists)
return lists[index][index]
nested_extraction(([1,2,3],[4,5,6],[7,8,9]), 2)
# nested_extraction(([1,2,3],[4,5,6],[7,8,9]), 1)
# nested_extraction(([1,2,3],[4,5,6],[7,8,9]), 0)
# Declare a beginning_and_end function that accepts a list of elements.
# It should return True if the first and last elements in the list are equal and False if they are unequal.
# Assume the list will always have at least 1 element.
def beginning_and_end(list_s):
#list_s = list
if list_s[0] == list_s[-1]:
return True
else:
return False
beginning_and_end([1,2,3,4,5,1])
# beginning_and_end([1,2,3,4,5])
# Declare a long_word_in_collection function that accepts a list and a string.
# The function should return True if
# - the word exists in the list AND
# - the word has more than 4 characters.
#
# words = ["cat", "dog", "rhino"]
# long_word_in_collection(words, "rhino") => True
# long_word_in_collection(words, "cat") => False
# long_word_in_collection(words, "monkey") => False
def long_word_in_collection(a_list, string):
if string in a_list and len(string) >= 4:
return True
else:
return False
long_word_in_collection(['Mike', 'Sam', 'Ade', 'Wale'], 'Mike')
# long_word_in_collection(['Mike', 'Sam', 'Ade', 'Wale'], 'Sam')
# Declare a count_of_a function that accepts a list of strings.
# It should return a list with counts of how many “a” characters appear per string.
# Do NOT use list comprehension.
#
# count_of_a(["alligator", "aardvark", "albatross"] => [2, 3, 2]
# count_of_a(["plywood"]) => [0]
# count_of_a([]) => []
def count_of_a(letters):
return letters.count("a")
letters = ["ade", "mike", "sam", "alligator"]
print(list(map(count_of_a, letters)))
def only_odds(odds):
return (odds % 2) != 0
odds = [1,2,3,4,5]
print(list(map(only_odds, odds)))
def count_of_a(letters):
return letters.count("a")
letters = ["alligator", "aardvark", "albatross"]
print(list(map(count_of_a, letters)))
#lambda function
metals = ["gold", "silver", "platinum", "palladium"]
print(list(filter(lambda metal: len(metal) > 5, metals)))
print(list(filter(lambda element: len(element) < 4, metals)))
print(list(filter(lambda word: "p" in word, metals)))
print(list(map(lambda word: word.count("l"), metals)))
print(list(map(lambda val: val.replace("s", "$"), metals)))
#list comprehension
animals = ["elephant", "horse", "cat", "giraffe", "cheetah", "dog"]
long_words = [animal for animal in animals if len(animal) > 5]
print(long_words)
#filter function
def is_long_animal(animal):
return len(animal) > 5
print(list(filter(is_long_animal, animals)))
#list comprehension
numbers = [4, 8, 15, 16, 23, 42]
cubes = [number ** 3 for number in numbers]
print(cubes)
#map function
def cube(number):
return number ** 3
print(list(map(cube, numbers)))
animals = ["cat", "bear", "zebra", "donkey", "cheetah"]
print(list(map(len, animals)))
# Declare a greater_sum function that accepts two lists of numbers.
# It should return the list with the greatest sum.
# You can assume the lists will always have different sums.
#
# greater_sum([1, 2, 3], [1, 2, 4]) => [1, 2, 4]
# greater_sum([4, 5], [2, 3, 6]) => [2, 3, 6]
# greater_sum([1], []) => [1]
def greater_sum(list_1, list_2):
if sum(list_1) > sum(list_2):
return list_1
else:
return list_2
greater_sum([1,2,3,4], [1,2,3,5])
# Declare a sum_difference function that accepts two lists of numbers.
# It should return the difference between the sum of values in the first and the second one
#
# sum_difference([1, 2, 3], [1, 2, 4]) => 6 - 7 => -1
# sum_difference([4, 5], [2, 3, 6]) => 9 - 11 => -2
# sum_difference([1], []) => 1
def sum_difference(list_a, list_b):
return sum(list_a) - sum(list_b)
sum_difference([1,2,3,4], [1,2,3,5])
def product(numbers):
numbers = list(numbers)
product = 1
for num in numbers:
product = product * num
return product
product([1, 2, 3]) #=> 6
# product([4, 5, 6, 7]) #=> 840
# product([10]) #=> 10
# Define a smallest_number function that accepts a list of numbers.
# It should return the smallest value in the list.
def smallest_number(numbers):
smallest = numbers[0]
for number in numbers:
if number < smallest:
smallest = number
return smallest
smallest_number([1, 2, 3]) #=> 1
# smallest_number([3, 2, 1]) #=> 1
# smallest_number([4, 5, 4]) #=> 4
# smallest_number([-3, -2, -1]) #=> -3
```
| github_jupyter |
```
# Code borrowed from sklean
# https://scikit-learn.org/stable/auto_examples/applications/plot_topics_extraction_with_nmf_lda.html
# Author: Luke Kumar
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
import pyLDAvis
import pyLDAvis.sklearn
pyLDAvis.enable_notebook()
import pickle
```
## Params
```
n_samples = None # 2000
n_features = 10000
n_components = 25
n_top_words = 20
```
# Data Loading
```
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
if n_samples is not None:
data_samples = dataset.data[:n_samples]
else:
data_samples = dataset.data
data_samples[0]
len(data_samples)
```
# Encode Text
```
# max_df : float in range [0.0, 1.0] or int, default=1.0
# When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold
# (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None.
# min_df : float in range [0.0, 1.0] or int, default=1
# When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold.
# This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents,
# integer absolute counts. This parameter is ignored if vocabulary is not None.
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
```
# LDA Model
```
lda = LatentDirichletAllocation(n_components=n_components, max_iter=100,
learning_method = 'batch', #'online',
random_state=0, verbose=0, n_jobs=-1,
mean_change_tol=0.001)
lda.fit(tf)
# save model
pickle.dump(lda, open('lda.pkl', 'wb'))
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
print_top_words(lda, tf_vectorizer.get_feature_names(), n_top_words)
pyLDAvis.sklearn.prepare(lda, tf, tf_vectorizer)
```
| github_jupyter |
# Tutorial 06: Networks from OpenStreetMap
In this tutorial, we discuss how networks that have been imported from OpenStreetMap can be integrated and run in Flow. This will all be presented via the Bay Bridge network, seen in the figure below. Networks from OpenStreetMap are commonly used in many traffic simulators for the purposes of replicating traffic in realistic traffic geometries. This is true in both SUMO and Aimsun (which are both supported in Flow), with each supporting several techniques for importing such network files. This process is further simplified and abstracted in Flow, with users simply required to specify the path to the osm file in order to simulate traffic in the network.
<img src="img/bay_bridge_osm.png" width=750>
<center> **Figure 1**: Snapshot of the Bay Bridge from OpenStreetMap </center>
Before we begin, let us import all relevant Flow parameters as we have done for previous tutorials. If you are unfamiliar with these parameters, you are encouraged to review tutorial 1.
```
# the TestEnv environment is used to simply simulate the network
from flow.envs import TestEnv
# the Experiment class is used for running simulations
from flow.core.experiment import Experiment
# all other imports are standard
from flow.core.params import VehicleParams
from flow.core.params import NetParams
from flow.core.params import InitialConfig
from flow.core.params import EnvParams
from flow.core.params import SumoParams
```
## 1. Running a Default Simulation
In order to create a network object in Flow with network features depicted from OpenStreetMap, we will use the base `Network` class. This class can sufficiently support the generation of any .osm file.
```
from flow.networks import Network
```
In order to recreate the network features of a specific osm file, the path to the osm file must be specified in `NetParams`. For this example, we will use an osm file extracted from the section of the Bay Bridge as depicted in Figure 1.
In order to specify the path to the osm file, simply fill in the `osm_path` attribute with the path to the .osm file as follows:
```
net_params = NetParams(
osm_path='networks/bay_bridge.osm'
)
```
Next, we create all other parameters as we have in tutorials 1 and 2. For this example, we will assume a total of 1000 are uniformly spread across the Bay Bridge. Once again, if the choice of parameters is unclear, you are encouraged to review Tutorial 1.
```
# create the remainding parameters
env_params = EnvParams()
sim_params = SumoParams(render=True)
initial_config = InitialConfig()
vehicles = VehicleParams()
vehicles.add('human', num_vehicles=100)
# create the network
network = Network(
name='bay_bridge',
net_params=net_params,
initial_config=initial_config,
vehicles=vehicles
)
```
We are finally ready to test our network in simulation. In order to do so, we create an `Experiment` object and run the simulation for a number of steps. This is done in the cell below.
```
# create the environment
env = TestEnv(
env_params=env_params,
sim_params=sim_params,
network=network
)
# run the simulation for 1000 steps
exp = Experiment(env=env)
exp.run(1, 1000)
```
## 2. Customizing the Network
While the above example does allow you to view the network within Flow, the simulation is limited for two reasons. For one, vehicles are placed on all edges within the network; if we wished to simulate traffic solely on the on the bridge and do not care about the artireols, for instance, this would result in unnecessary computational burdens. Next, as you may have noticed if you ran the above example to completion, routes in the base network class are defaulted to consist of the vehicles' current edges only, meaning that vehicles exit the network as soon as they reach the end of the edge they are originated on. In the next subsections, we discuss how the network can be modified to resolve these issues.
### 2.1 Specifying Traversable Edges
In order to limit the edges vehicles are placed on to the road sections edges corresponding to the westbound Bay Bridge, we define an `EDGES_DISTRIBUTION` variable. This variable specifies the names of the edges within the network that vehicles are permitted to originated in, and is assigned to the network via the `edges_distribution` component of the `InitialConfig` input parameter, as seen in the code snippet below. Note that the names of the edges can be identified from the .osm file or by right clicking on specific edges from the SUMO gui (see the figure below).
<img src="img/osm_edge_name.png" width=600>
<center> **Figure 2**: Name of an edge from SUMO </center>
```
# we define an EDGES_DISTRIBUTION variable with the edges within
# the westbound Bay Bridge
EDGES_DISTRIBUTION = [
"11197898",
"123741311",
"123741303",
"90077193#0",
"90077193#1",
"340686922",
"236348366",
"340686911#0",
"340686911#1",
"340686911#2",
"340686911#3",
"236348361",
"236348360#0",
"236348360#1"
]
# the above variable is added to initial_config
new_initial_config = InitialConfig(
edges_distribution=EDGES_DISTRIBUTION
)
```
### 2.2 Creating Custom Routes
Next, we choose to specify the routes of vehicles so that they can traverse the entire Bay Bridge, instead of the only the edge they are currently on. In order to this, we create a new network class that inherits all its properties from `Network` and simply redefine the routes by modifying the `specify_routes` variable. This method was originally introduced in Tutorial 07: Creating Custom Network. The new network class looks as follows:
```
# we create a new network class to specify the expected routes
class BayBridgeOSMNetwork(Network):
def specify_routes(self, net_params):
return {
"11197898": [
"11197898", "123741311", "123741303", "90077193#0", "90077193#1",
"340686922", "236348366", "340686911#0", "340686911#1",
"340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1",
],
"123741311": [
"123741311", "123741303", "90077193#0", "90077193#1", "340686922",
"236348366", "340686911#0", "340686911#1", "340686911#2",
"340686911#3", "236348361", "236348360#0", "236348360#1"
],
"123741303": [
"123741303", "90077193#0", "90077193#1", "340686922", "236348366",
"340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361",
"236348360#0", "236348360#1"
],
"90077193#0": [
"90077193#0", "90077193#1", "340686922", "236348366", "340686911#0",
"340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0",
"236348360#1"
],
"90077193#1": [
"90077193#1", "340686922", "236348366", "340686911#0", "340686911#1",
"340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1"
],
"340686922": [
"340686922", "236348366", "340686911#0", "340686911#1", "340686911#2",
"340686911#3", "236348361", "236348360#0", "236348360#1"
],
"236348366": [
"236348366", "340686911#0", "340686911#1", "340686911#2", "340686911#3",
"236348361", "236348360#0", "236348360#1"
],
"340686911#0": [
"340686911#0", "340686911#1", "340686911#2", "340686911#3", "236348361",
"236348360#0", "236348360#1"
],
"340686911#1": [
"340686911#1", "340686911#2", "340686911#3", "236348361", "236348360#0",
"236348360#1"
],
"340686911#2": [
"340686911#2", "340686911#3", "236348361", "236348360#0", "236348360#1"
],
"340686911#3": [
"340686911#3", "236348361", "236348360#0", "236348360#1"
],
"236348361": [
"236348361", "236348360#0", "236348360#1"
],
"236348360#0": [
"236348360#0", "236348360#1"
],
"236348360#1": [
"236348360#1"
]
}
```
### 2.3 Rerunning the SImulation
We are now ready to rerun the simulation with fully defined vehicle routes and a limited number of traversable edges. If we run the cell below, we can see the new simulation in action.
```
# create the network
new_network = BayBridgeOSMNetwork(
name='bay_bridge',
net_params=net_params,
initial_config=new_initial_config,
vehicles=vehicles,
)
# create the environment
env = TestEnv(
env_params=env_params,
sim_params=sim_params,
network=new_network
)
# run the simulation for 1000 steps
exp = Experiment(env=env)
exp.run(1, 10000)
```
## 3. Other Tips
This tutorial introduces how to incorporate OpenStreetMap files in Flow. This feature, however, does not negate other features that are introduced in other tutorials and documentation. For example, if you would like to not have vehicles be originated side-by-side within a network, this can still be done by specifying a "random" spacing for vehicles as follows:
initial_config = InitialConfig(
spacing="random",
edges_distribution=EDGES_DISTRIBUTION
)
In addition, inflows of vehicles can be added to networks imported from OpenStreetMap as they are for any other network (see the tutorial on adding inflows for more on this).
| github_jupyter |
<table width="100%"> <tr>
<td style="background-color:#ffffff;">
<a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="35%" align="left"> </a></td>
<td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
prepared by Abuzer Yakaryilmaz (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
<br>
Özlem Salehi | July 6, 2019 (updated)
</td>
</tr></table>
<table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
<h2> <font color="blue"> Solutions for </font>Rotation Automata</h2>
<a id="task1"></a>
<h3> Task 1 </h3>
Do the same task given above by using different angles.
Test at least three different angles.
Please modify the code above.
<h3>Solution</h3>
Any odd multiple of $ \frac{\pi}{16} $ works: $ i \frac{\pi}{16} $, where $ i \in \{1,3,5,7,\ldots\} $
<a id="task2"></a>
<h3> Task 2 </h3>
Let $ \mathsf{p} = 11 $.
Determine an angle of rotation such that when the length of stream is a multiple of $ \sf p $, then we observe only state $ 0 $, and we can also observe state $ 1 $, otherwise.
Test your rotation by using a quantum circuit. Execute the circuit for all streams of lengths from 1 to 11.
<h3>Solution</h3>
We can pick any angle $ k\frac{2\pi}{11} $ for $ k \in \{1,\ldots,10\} $.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# the angle of rotation
r = randrange(1,11)
print("the picked angle is",r,"times of 2pi/11")
print()
theta = r*pi/11
# we read streams of length from 1 to 11
for i in range(1,12):
# quantum circuit with one qubit and one bit
qreg = QuantumRegister(1)
creg = ClassicalRegister(1)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
mycircuit.ry(2*theta,qreg[0]) # apply one rotation for each symbol
# we measure after reading the whole stream
#mycircuit.measure(qreg[0],creg[0])
# execute the circuit 1000 times
job = execute(mycircuit,Aer.get_backend('unitary_simulator'))
u=job.result().get_unitary(mycircuit,decimals=3)
# we print the unitary matrix in nice format
for i in range(len(u)):
s=""
for j in range(len(u)):
val = str(u[i][j].real)
while(len(val)<8): val = " "+val
s = s + val
print(s)
```
<a id="task3"></a>
<h3> Task 3 </h3>
List down 10 possible different angles for Task 2, where each angle should be between 0 and $2\pi$.
<h3>Solution</h3>
Any angle $ k\frac{2\pi}{11} $ for $ k \in \{1,\ldots,10\} $.
<h3>Task 4</h3>
For each stream of length from 1 to 10, experimentially determine the best angle of rotation (we observe state $\ket{1}$ the most) by using your circuit.
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# for each stream of length from 1 to 10
for i in range(1,11):
# we try each angle of the form k*2*pi/11 for k=1,...,10
# we try to find the best k for which we observe 1 the most
number_of_one_state = 0
best_k = 1
all_outcomes_for_i = "length "+str(i)+"-> "
for k in range(1,11):
theta = k*2*pi/11
# quantum circuit with one qubit and one bit
qreg = QuantumRegister(1)
creg = ClassicalRegister(1)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
mycircuit.ry(2*theta,qreg[0]) # apply one rotation for each symbol
# we measure after reading the whole stream
mycircuit.measure(qreg[0],creg[0])
# execute the circuit 10000 times
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=10000)
counts = job.result().get_counts(mycircuit)
all_outcomes_for_i = all_outcomes_for_i + str(k)+ ":" + str(counts['1']) + " "
if int(counts['1']) > number_of_one_state:
number_of_one_state = counts['1']
best_k = k
print(all_outcomes_for_i)
print("for length",i,", the best k is",best_k)
print()
```
<a id="task5"></a>
<h3> Task 5 </h3>
Let $ \mathsf{p} = 31 $.
Create a circuit with three quantum bits and three classical bits.
Rotate the qubits with angles $ 3\frac{2\pi}{31} $, $ 7\frac{2\pi}{31} $, and $ 11\frac{2\pi}{31} $, respectively.
Execute your circuit for all streams of lengths from 1 to 30. Check whether the number of state $ \ket{000} $ is less than half or not.
<i>Note that whether a key is in dictionary or not can be checked as follows:</i>
```python
if '000' in counts.keys():
c = counts['000']
else:
c = 0
```
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# the angles of rotations
theta1 = 3*2*pi/31
theta2 = 7*2*pi/31
theta3 = 11*2*pi/31
# we read streams of length from 1 to 30
for i in range(1,32):
# quantum circuit with three qubits and three bits
qreg = QuantumRegister(3)
creg = ClassicalRegister(3)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
# apply rotations for each symbol
mycircuit.ry(2*theta1,qreg[0])
mycircuit.ry(2*theta2,qreg[1])
mycircuit.ry(2*theta3,qreg[2])
# we measure after reading the whole stream
mycircuit.measure(qreg,creg)
# execute the circuit N times
N = 1000
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(mycircuit)
print(counts)
if '000' in counts.keys():
c = counts['000']
else:
c = 0
print('000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
print("the ratio of 000 is ",percentange,"%")
print()
```
<a id="task6"></a>
<h3> Task 6 </h3>
Let $ \mathsf{p} = 31 $.
Create a circuit with three quantum bits and three classical bits.
Rotate the qubits with random angles of the form $ k\frac{2\pi}{31}, $ where $ k
\in \{1,\ldots,30\}.$
Execute your circuit for all streams of lengths from 1 to 30.
Calculate the maximum percentage of observing the state $ \ket{000} $.
Repeat this task for a few times.
<i>Note that whether a key is in dictionary or not can be checked as follows:</i>
```python
if '000' in counts.keys():
c = counts['000']
else:
c = 0
```
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# randomly picked angles of rotations
k1 = randrange(1,31)
theta1 = k1*2*pi/31
k2 = randrange(1,31)
theta2 = k2*2*pi/31
k3 = randrange(1,31)
theta3 = k3*2*pi/31
print("k1 =",k1,"k2 =",k2,"k3 =",k3)
print()
max_percentange = 0
# we read streams of length from 1 to 30
for i in range(1,31):
k1 = randrange(1,31)
theta1 = k1*2*pi/31
k2 = randrange(1,31)
theta2 = k2*2*pi/31
k3 = randrange(1,31)
theta3 = k3*2*pi/31
# quantum circuit with three qubits and three bits
qreg = QuantumRegister(3)
creg = ClassicalRegister(3)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
# apply rotations for each symbol
mycircuit.ry(2*theta1,qreg[0])
mycircuit.ry(2*theta2,qreg[1])
mycircuit.ry(2*theta3,qreg[2])
# we measure after reading the whole stream
mycircuit.measure(qreg,creg)
# execute the circuit N times
N = 1000
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(mycircuit)
# print(counts)
if '000' in counts.keys():
c = counts['000']
else:
c = 0
# print('000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
if max_percentange < percentange: max_percentange = percentange
# print("the ration of 000 is ",percentange,"%")
# print()
print("max percentage is",max_percentange)
```
<a id="task7"></a>
<h3> Task 7 </h3>
Repeat Task 6 by using four and five qubits.
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
number_of_qubits = 4
#number_of_qubits = 5
# randomly picked angles of rotations
theta = []
for i in range(number_of_qubits):
k = randrange(1,31)
print("k",str(i),"=",k)
theta += [k*2*pi/31]
# print(theta)
# we count the number of zeros
zeros = ''
for i in range(number_of_qubits):
zeros = zeros + '0'
print("zeros = ",zeros)
print()
max_percentange = 0
# we read streams of length from 1 to 30
for i in range(1,31):
# quantum circuit with qubits and bits
qreg = QuantumRegister(number_of_qubits)
creg = ClassicalRegister(number_of_qubits)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
# apply rotations for each symbol
for k in range(number_of_qubits):
mycircuit.ry(2*theta[k],qreg[k])
# we measure after reading the whole stream
mycircuit.measure(qreg,creg)
# execute the circuit N times
N = 1000
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(mycircuit)
# print(counts)
if zeros in counts.keys():
c = counts[zeros]
else:
c = 0
# print('000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
if max_percentange < percentange: max_percentange = percentange
# print("the ration of 000 is ",percentange,"%")
# print()
print("max percentage is",max_percentange)
```
| github_jupyter |
# Walkthrough the NIRCAM imaging WCS pipeline rountrip of values through the coordinate frame transforms
```
import jwst
jwst.__version__
from astropy.io import fits
from jwst import assign_wcs
from jwst.datamodels import image
# add in the columns for ra and dec min/max points, translated from the wcs object for now
direct_data='test_disperse_f335m_rate_updated.fits' # original image provided for testing
# We will open the direct image as an Image datamodel
direct_image = image.ImageModel(direct_data)
```
### Some basics about this image
```
direct_image.meta.instrument.pupil, direct_image.meta.instrument.filter, direct_image.meta.instrument.module, direct_image.meta.instrument.detector, direct_image.meta.instrument.channel
```
### This is the FITS WCS information
```
direct_image.get_fits_wcs()
# Load up the assign_wcs step that will assign all the transforms from world->detector
assign_wcs_step=assign_wcs.AssignWcsStep()
reference_file_types = ['distortion', 'filteroffset', 'specwcs', 'regions',
'wavelengthrange', 'camera', 'collimator',
'disperser', 'fore', 'fpa', 'msa', 'ote', 'ifupost',
'ifufore', 'ifuslicer']
reference_file_names = {}
# Ask CRDS for the reference files that apply to the image are working with
for name in reference_file_types:
reffile = assign_wcs_step.get_reference_file(direct_image, name)
reference_file_names[name] = reffile if reffile else ""
reference_file_names
direct_gwcs = assign_wcs_step(direct_image)
```
### Some information about where the transforms are centered
```
direct_gwcs.meta.wcsinfo.crpix1, direct_gwcs.meta.wcsinfo.crpix2, direct_gwcs.meta.wcsinfo.crval1, direct_gwcs.meta.wcsinfo.crval2
```
### The GWCS object that contains all the transforms is now attached to the image model
```
direct_gwcs.meta.wcs
```
### Check the transform from detector pixels to sky coordinates in decimal degrees of RA and DEC
The default transform goes from detector pixels to sky coordinate (ra,dec)
```
direct_gwcs.meta.wcs(110,110)
detector_to_world = direct_gwcs.meta.wcs.get_transform('detector','world')
detector_to_world(110,110)
```
### Now get the inverse transform from RA,DEC to detector pixels, using the RA,DEC we just calculated
This should return the pixel (110,110)
```
world_to_detector = direct_gwcs.meta.wcs.get_transform('world','detector')
world_to_detector(0.00804448203007923, -0.007899731808577077)
```
### Let's check the other transforms to make sure it's just the distortion reference file that is off
```
direct_gwcs.meta.wcs.available_frames
world_to_v2v3 = direct_gwcs.meta.wcs.get_transform('world','v2v3')
world_to_v2v3(0.00804448203007923, -0.007899731808577077) # degrees
v2v3_to_world = direct_gwcs.meta.wcs.get_transform('v2v3','world')
v2v3_to_world(149.63161618088085, -555.8266943126895) # arcseconds
```
### The following transforms only goes through the distortion reference file, it can't seem to return the original detector coordinates
```
detector_to_v2v3 = direct_gwcs.meta.wcs.get_transform('detector','v2v3')
detector_to_v2v3(110, 110)
v2v3_to_detector = direct_gwcs.meta.wcs.get_transform('v2v3','detector')
v2v3_to_detector(149.63161618088085, -555.8266943126896)
```
### The transform across the distortion image is not able to reproduce values roundtripping
Let's check if we can reproduce the anchor point of the distortion, the value at CRPIX1, CRPIX2
```
crpix1, crpix2, crval1, crval2=direct_gwcs.meta.wcsinfo.crpix1, direct_gwcs.meta.wcsinfo.crpix2, direct_gwcs.meta.wcsinfo.crval1, direct_gwcs.meta.wcsinfo.crval2
```
#### Check the roundtrip of crpix1,crpix2 from detector <-> v2v3
This also uses the distortion reference file
```
crpix1, crpix2
detector_to_v2v3(crpix1, crpix2)
v2v3_to_detector(120.63991845281103, -527.3565915096161)
```
#### Check the roundtrip of crval1,crval2 from world <-> v2v3
```
crval1, crval2
world_to_v2v3(crval1, crval2)
v2v3_to_world(120.67137599999998, -527.387665)
```
## The above examples convince me that the distortion reference image, specifically `jwst_nircam_distortion_0061.asdf` has 2D variations that make it impossible to compute the correct detector pixel coordinates given a position on the sky.
### It's possible that the *incorrect* distortion reference image is being returned from CRDS for the image, let's have a look at the RMAP that is being used
## This is the current rmap in use
https://jwst-crds.stsci.edu/browse/jwst_nircam_distortion_0018.rmap
It's checking these values:
'parkey' : (('META.EXPOSURE.TYPE', 'META.INSTRUMENT.DETECTOR', 'META.INSTRUMENT.CHANNEL', 'META.INSTRUMENT.PUPIL', 'META.INSTRUMENT.FILTER'),
('META.OBSERVATION.DATE', 'META.OBSERVATION.TIME')),
'reference_to_dataset' : {
'CHANNEL' : 'META.INSTRUMENT.CHANNEL',
'DETECTOR' : 'META.INSTRUMENT.DETECTOR',
'EXP_TYPE' : 'META.EXPOSURE.TYPE',
```
direct_image.meta.exposure.type, direct_image.meta.instrument.detector, direct_image.meta.instrument.channel, direct_image.meta.instrument.pupil, direct_image.meta.instrument.filter
```
### For the values specified above, the RMAP shows matching as:
('NRC_IMAGE|NRC_TSIMAGE|NRC_FLAT|NRC_LED|NRC_WFSC|NRC_GRISM|NRC_TSGRISM|NRC_FOCUS',
'NRCA1',
'SHORT',
'CLEAR|F162M|F164N|GDHS0|GDHS60|WLM8|WLP8|PINHOLES|MASKIPR',
'N/A') : UseAfter({'2014-10-01 00:00:00' : 'jwst_nircam_distortion_0061.asdf',
}),
So the reference file matches regardness of FILTER (which has N/A) that is specified, it only cares about pupil and the detector specification
## Let's try the same thing with a different image, this one is taken from the latest NIRCAM simulations
It has a differently populated FITS WCS information, and specifies a different filter and detector
```
direct_data='V54321001002P000000000110d_A5_F444W_rate.fits' # most recent simulation for testing
# We will open the direct image as a DrizProduct datamodel
direct_image = image.ImageModel(direct_data)
direct_image.meta.instrument.pupil, direct_image.meta.instrument.filter,direct_image.meta.instrument.module, direct_image.meta.instrument.detector, direct_image.meta.instrument.channel
reference_file_names = {}
# Ask CRDS for the reference files that apply to the image are working with
for name in reference_file_types:
reffile = assign_wcs_step.get_reference_file(direct_image, name)
reference_file_names[name] = reffile if reffile else ""
reference_file_names
direct_image.get_fits_wcs()
direct_gwcs = assign_wcs_step(direct_image)
direct_gwcs.meta.wcs
```
### Check the transform from detector pixels to sky coordinates in decimal degrees of RA and DEC
The default transform goes from detector pixels to sky coordinate (ra,dec)
```
direct_gwcs.meta.wcs(110,110)
detector_to_world = direct_gwcs.meta.wcs.get_transform('detector','world')
detector_to_world(110,110)
```
### Now get the inverse transform from RA,DEC to detector pixels, using the RA,DEC we just calculated
This should return the pixel (110,110)
```
world_to_detector = direct_gwcs.meta.wcs.get_transform('world','detector')
world_to_detector(53.149149027123194, -27.839618613331695)
```
### Let's check the other transforms to make sure it's just the distortion reference file that is off
```
direct_gwcs.meta.wcs.available_frames
world_to_v2v3 = direct_gwcs.meta.wcs.get_transform('world','v2v3')
world_to_v2v3(53.149149027123194, -27.839618613331695) # degrees
v2v3_to_world = direct_gwcs.meta.wcs.get_transform('v2v3','world')
v2v3_to_world(144.31111617155733, -550.8134158050235) # arcseconds
```
### The following transforms only goes through the distortion reference file, it can't seem to return the original detector coordinates
```
detector_to_v2v3 = direct_gwcs.meta.wcs.get_transform('detector','v2v3')
detector_to_v2v3(110, 110)
v2v3_to_detector = direct_gwcs.meta.wcs.get_transform('v2v3','detector')
v2v3_to_detector(144.31111617152274, -550.8134158049928)
```
### The transform across the distortion image is not able to reproduce values roundtripping
Let's check if we can reproduce the anchor point of the distortion, the value at CRPIX1, CRPIX2
```
crpix1, crpix2, crval1, crval2=direct_gwcs.meta.wcsinfo.crpix1, direct_gwcs.meta.wcsinfo.crpix2, direct_gwcs.meta.wcsinfo.crval1, direct_gwcs.meta.wcsinfo.crval2
crval1,crval2, direct_gwcs.meta.wcsinfo.roll_ref
```
#### Check the roundtrip of crpix1,crpix2 from detector <-> v2v3
This also uses the distortion reference file
```
crpix1, crpix2
detector_to_v2v3(crpix1, crpix2)
v2v3_to_detector(86.04055467237623, -493.16454761867965)
```
#### Check the roundtrip of crval1,crval2 from world <-> v2v3
```
crval1, crval2
world_to_v2v3(crval1, crval2)
v2v3_to_world(86.10345800001141, -493.2275120000079)
```
## Using a different distortion reference file we still are seeing the same offsets with the reverse transform.
We can do a little more detective work and chart the roundtrip offsets that are present in all the distortion reference files.
First we need to get a local copy of all the distortion reference files in CRDS for the NRC_IMAGE mode.
I'm going to do this by asking CRDS. Make sure you have these environment variables set:
CRDS_SERVER_URL=https://jwst-crds.stsci.edu
CRDS_PATH=/Users/sosey/crds_cache --> wherever you want the files stored locally
# get all the nircam distortion files currently in use
crds sync --contexts jwst-nircam-distortion-operational --fetch-references
```
import glob
dist_files=glob.glob('/Users/sosey/crds_cache/references/jwst/nircam/*distortion*')
```
### Let's make a WCS object for each of the distortion files that will take us through the transform.
We use the most recent image as the starting point and direct the distortion file to use.
```
from jwst.assign_wcs import nircam
from jwst.datamodels.wcs_ref_models import DistortionModel
from gwcs.wcs import WCS
```
### Next we'll cut the list down to just the distortion files used with imageing mode
```
image_dist=[]
for dist in dist_files:
print(dist)
data=DistortionModel(dist)
try:
if (data['exp_type'] == 'NRC_IMAGE'):
image_dist.append(dist)
except KeyError:
try:
if (data['EXP_TYPE'] == 'NRC_IMAGE'):
image_dist.append(dist)
except KeyError:
if "NRC_IMAGE" in data.meta.exposure.p_exptype:
image_dist.append(dist)
data.close()
direct_data='V54321001002P000000000110d_A5_F444W_rate.fits' # latest simulated image for testing
# We will open the direct image as a DrizProduct datamodel
direct_image = image.ImageModel(direct_data)
direct_image.meta.instrument.pupil, direct_image.meta.instrument.filter,direct_image.meta.instrument.module, direct_image.meta.instrument.detector, direct_image.meta.instrument.channel
# Load up the assign_wcs step to populate our structure, we should see only the distortion file being used
assign_wcs_step=assign_wcs.AssignWcsStep()
reference_file_types = ['distortion', 'filteroffset', 'specwcs', 'regions',
'wavelengthrange', 'camera', 'collimator',
'disperser', 'fore', 'fpa', 'msa', 'ote', 'ifupost',
'ifufore', 'ifuslicer']
reference_file_names = {}
# Ask CRDS for the reference files that apply to the image are working with
for name in reference_file_types:
reffile = assign_wcs_step.get_reference_file(direct_image, name)
reference_file_names[name] = reffile if reffile else ""
reference_file_names
```
### I'm going to call a part of the pipeline that already assumes the correct file has been matched by CRDS, so I should just need to give it the reference file to use and it will return the pipeline that includes that file
```
results=[]
for dist in image_dist:
reference_file_names['distortion'] = dist
pipeline = nircam.imaging(direct_image, reference_file_names)
test_wcs = WCS(pipeline)
ra,dec = test_wcs(110,110)
try:
w2d = test_wcs.get_transform('world','detector')
x,y = w2d(ra,dec)
results.append({'dfile':dist, 'ra':ra, 'dec':dec, 'x':x, 'y':y, 'start_x': 110, 'start_y': 110})
except NotImplementedError:
pass
line = '{:>79} {:>8} {:>8} {:>19} {:>19} {:>19} {:>19}'.format("FILE", "START_X", "START_Y","RETURNED X", "RETURNED Y", "DELTA X", "DELTA Y")
print(line)
for res in results:
print("{:>79} {:>8} {:>8} {:>19} {:>19} {:>19} {:>19}".format(res['dfile'], res['start_x'], res['start_y'],res['x'], res['y'], res['start_x'] - res['x'], res['start_y']-res['y']))
```
NOTE: according to the RMAP, distortion files numbered prior to `*0011.asf` are old and replaced by the useafter date with updated files, so the results for `jwst_nircam_distortion_0004.asdf` above can be ignored
## We can do the same number crunching just using the the model inside the distortion reference file itself, and we should get the same answer as with the GWCS model
```
results=[]
for dist in image_dist:
model = DistortionModel(dist).model
try:
v2v3x, v2v3y = model(110, 110)
x, y = model.inverse(v2v3x, v2v3y)
results.append({'dfile':dist, 'v2v3x':v2v3x, 'v2v3y':v2v3y, 'x':x, 'y':y, 'start_x': 110, 'start_y': 110})
except NotImplementedError:
pass
line = '{:>79} {:>8} {:>8} {:>19} {:>19} {:>19} {:>19}'.format("FILE", "START_X", "START_Y","RETURNED X", "RETURNED Y", "DELTA X", "DELTA Y")
print(line)
for res in results:
print("{:>79} {:>8} {:>8} {:>19} {:>19} {:>19} {:>19}".format(res['dfile'], res['start_x'], res['start_y'],res['x'], res['y'], res['start_x'] - res['x'], res['start_y']-res['y']))
```
## The NIRCAM team used scripts that translate through the SIAF file as part of their data simulator
Below, I'll go through the same process and see what happens (communicae via Hilbert):
The SIAF file contains all of the relevant coordinate system definitions and parameters, as well as the polynomial coefficients which describe the translation between the V2,V3 system and pixel space for each NIRCam aperture on each detector. Colin Cox's report from 2009 which defines the terms in the SIAF file and shows how to use the information in the SIAF file to generate functions for translating between coordinate systems.
(Note that the translation functions and definitions are built around pixel coordinates that are indexed to 1. So if you are going to run translations in python, where things are indexed to 0, be sure to add 1 when creating inputs for the translation models, and subtract 1 from the outputs.)
If you are interested in quickly being able to translate between pixel space and RA/Dec or V2/V3, I have already used the data in the SIAF file to construct distortion reference files. These are the reference files that will be used in the DMS pipeline. There's a separate file for each aperture. I've attached the file for full-frame A1 observations as an example. Let me know if you would like others.
I've attached some python code in coord_translate.py that contains everything you need to go from x,y to RA,Dec and back. The script depends on the other attached python scripts.
Inside the scripts, there are instructions for translating between x,y <-> ra,dec through the SIAF file
```
from asdf import AsdfFile
from astropy.io import ascii
import numpy as np
from SIAFDistortions import rotations
from SIAFDistortions import read_siaf_table
from SIAFDistortions import polynomial
distortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv', header_start=1)
```
## Method 1, (Ra, Dec)--> (x, y) just using the distortion reference file
There are two methods for translating from RA,Dec to x,y. The first makes use of only
the distortion reference file (the asdf file). This method is faster (and is
therefore used within the DMS pipeline), but loses accuracy the farther from the
reference location that you get. For full frame observations, errors can approach
~20 pixels at the corners of the detector (ther reference location is the center
of the detector).
```
def RADecToXY_approx(ra,dec,attitude_matrix,coord_transform,refpix_v2,refpix_v3):
#If the full set of distortion coefficients are not provided,
#(i.e. you don't have the SIAF file)
#then we fall back to the coordinate transform provided by the
#distortion reference file. These results are not exact, and
#become less accurate the farther the source is from the center
#of the detector. Results can be incorrect by ~20 pixels in the
#corners of the detector.
#RA,Dec to V2,V3
pixelv2,pixelv3 = rotations.getv2v3(attitude_matrix,ra,dec)
#V2,V3 to distorted pixels
deltapixelx,deltapixely = coord_transform.inverse(pixelv2-refpix_v2,pixelv3-refpix_v3)
return deltapixelx,deltapixely
#distortion reference file to use
dist_reffile = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'
#RA and Dec you wish to convert to x,y
ra = 53.1490299775 # decimal degrees. RA you wish to convert to x,y
dec = -27.8168745624 # decimal degrees. Dec you wish to convert to x,y
#telescope pointing information
tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector
tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector
tel_rot = 45.04234416817661 #telescope rotation, degrees.
#in this case, if you don't have the SIAF file, you'll need to get the reference
#location v2,v3 coordinates for the desired aperture from some other source.
refpix_v2 = 120.6714 # arcsec. reference location is usually center of aperture
refpix_v3 = -527.3877 # arcsec.
refpix_x = 1024.5 # pixels. reference location x for desired aperture
refpix_y = 1024.5 # pixels. reference location y for desired aperture
#Read in the CRDS-format distortion reference file
with AsdfFile.open(dist_reffile) as dist_file:
coord_transform = dist_file.tree['model']
#Create attitude_matrix
attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)
#Translate
dx,dy = RADecToXY_approx(ra,dec,attitude_matrix,coord_transform,refpix_v2,refpix_v3)
#Final x,y value
pixelx = dx + refpix_x
pixely = dy + refpix_y
print("Approx {},{}".format(pixelx,pixely))
```
#### I'm going to turn the above into a function that accepts, ra, dec, distortion file so I can compare the differences with the above results
```
def method_1_sky_to_pix(ra=0., dec=0., distortion_file=None):
if distortion_file is None:
distortion_file = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'
with AsdfFile.open(distortion_file) as dist_file:
coord_transform = dist_file.tree['model']
#telescope pointing information
tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector
tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector
tel_rot = 45.04234416817661 #telescope rotation, degrees.
#in this case, if you don't have the SIAF file, you'll need to get the reference
#location v2,v3 coordinates for the desired aperture from some other source.
refpix_v2 = 120.6714 # arcsec. reference location is usually center of aperture
refpix_v3 = -527.3877 # arcsec.
refpix_x = 1024.5 # pixels. reference location x for desired aperture
refpix_y = 1024.5 # pixels. reference location y for desired aperture
#Create attitude_matrix
attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)
#Translate
dx,dy = RADecToXY_approx(ra,dec,attitude_matrix,coord_transform,refpix_v2,refpix_v3)
#Final x,y value
pixelx = dx + refpix_x
pixely = dy + refpix_y
return (pixelx,pixely)
```
## Method 2, (Ra, Dec) --> (x, y) using the distortion reference file and the SIAF file
The second method of translating from RA,Dec to pixel x,y uses extra information in
the SIAF file that is not present in the distortion coefficient file. It is
computationally slower than the other method, but has minimal errors.
```
def getDistortionCoefficients(table,from_sys,to_sys,aperture):
'''from the table of distortion coefficients, get the coeffs that
correspond to the requested transformation and return as a list
for x and another for y
'''
match = table['AperName'] == aperture
if np.any(match) == False:
print("Aperture name {} not found in input CSV file.".format(aperture))
sys.exit()
row = table[match]
if ((from_sys == 'science') & (to_sys == 'ideal')):
label = 'Sci2Idl'
elif ((from_sys == 'ideal') & (to_sys == 'science')):
label = 'Idl2Sci'
else:
print("WARNING: from_sys of {} and to_sys of {} not a valid transformation.".format(from_sys,to_sys))
sys.exit()
#get the coefficients, return as list
X_cols = [c for c in row.colnames if label+'X' in c]
Y_cols = [c for c in row.colnames if label+'Y' in c]
x_coeffs = [row[c].data[0] for c in X_cols]
y_coeffs = [row[c].data[0] for c in Y_cols]
#Also get the V2,V3 and x,y values of the reference pixel
v2ref = row['V2Ref'].data[0]
v3ref = row['V3Ref'].data[0]
xref = row['XSciRef'].data[0]
yref = row['YSciRef'].data[0]
#Get parity and V3 Y angle info as well
parity = row['VIdlParity'].data[0]
yang = row['V3IdlYAngle'].data[0]
return x_coeffs,y_coeffs,v2ref,v3ref,xref,yref,parity,yang
def RADecToXY_exact(ra,dec,attitude_matrix,v2v32idlx,v2v32idly,v2_ref,v3_ref,x_sci2idl,y_sci2idl):
#RA,Dec to V2,V3
pixelv2,pixelv3 = rotations.getv2v3(attitude_matrix,ra,dec)
#Now V2,V3 to undistorted angular distance from the reference pixel
xidl = v2v32idlx(pixelv2-v2_ref,pixelv3-v3_ref)
yidl = v2v32idly(pixelv2-v2_ref,pixelv3-v3_ref)
#Finally, undistorted distances to distorted pixel values
deltapixelx, deltapixely, err, iter = polynomial.invert(x_sci2idl,y_sci2idl,xidl,yidl,5)
return deltapixelx,deltapixely
#distortion reference file to use
dist_reffile = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'
#aperture_name
ap_name = 'NRCA1_FULL'
#RA and Dec you wish to convert to x,y
ra = 53.1490299775 # decimal degrees. RA you wish to convert to x,y
dec = -27.8168745624 # decimal degrees. Dec you wish to convert to x,y
#telescope pointing information
tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector
tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector
tel_rot = 45.04234416817661 #telescope rotation, degrees.
#read in the SIAF file
distortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',header_start=1)
#get the extra parameters needed from the SIAF file
x_sci2idl,y_sci2idl,refpix_v2,refpix_v3,refpix_x,refpix_y,parity,v3yang = getDistortionCoefficients(distortionTable,'science','ideal',ap_name)
#generate the fucntion which will translate from V2,V3 to undistorted coordinates
v2v32idlx, v2v32idly = read_siaf_table.get_siaf_v2v3_transform('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',ap_name,to_system='ideal')
#Create attitude_matrix
attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)
#Translate
dx,dy = RADecToXY_exact(ra,dec,attitude_matrix,v2v32idlx,v2v32idly,refpix_v2,refpix_v3,x_sci2idl,y_sci2idl)
#Final x,y value
pixelx = dx + refpix_x
pixely = dy + refpix_y
print("Exact {},{}".format(pixelx,pixely))
```
### The function below does the example shown above, accepting x, y, distortion_file
```
def method_2_sky_to_pix(ra=0., dec=0., distortion_file=None, ap_name=None):
if distortion_file is None:
distortion_file = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'
ap_name = 'NRCA1_FULL'
with AsdfFile.open(distortion_file) as dist_file:
coord_transform = dist_file.tree['model']
#aperture_name
if ap_name is None:
raise ValueError("Expected ap_name for distortion file")
#telescope pointing information
tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector
tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector
tel_rot = 45.04234416817661 #telescope rotation, degrees.
#read in the SIAF file
distortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',header_start=1)
#get the extra parameters needed from the SIAF file
x_sci2idl,y_sci2idl,refpix_v2,refpix_v3,refpix_x,refpix_y,parity,v3yang = getDistortionCoefficients(distortionTable,'science','ideal',ap_name)
#generate the fucntion which will translate from V2,V3 to undistorted coordinates
v2v32idlx, v2v32idly = read_siaf_table.get_siaf_v2v3_transform('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',ap_name,to_system='ideal')
#Create attitude_matrix
attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)
#Translate
dx,dy = RADecToXY_exact(ra,dec,attitude_matrix,v2v32idlx,v2v32idly,refpix_v2,refpix_v3,x_sci2idl,y_sci2idl)
#Final x,y value
pixelx = dx + refpix_x
pixely = dy + refpix_y
return (pixelx,pixely)
```
## Method, (x, y) --> (Ra, Dec)
Translating from x,y to RA,Dec is simpler, with only one method, which
gives exact answers
```
def XYToRADec(pixelx,pixely,attitude_matrix,coord_transform,refpix_x,refpix_y,refpix_v2,refpix_v3):
#Translate a given x,y location on the detector
#to RA,Dec
#Transform distorted pixels to V2,V3
deltav2,deltav3 = coord_transform(pixelx-refpix_x,pixely-refpix_y)
pixelv2 = deltav2 + refpix_v2
pixelv3 = deltav3 + refpix_v3
#Now translate V2,V3 to RA,Dec
ra,dec = rotations.pointing(attitude_matrix,pixelv2,pixelv3)
return ra,dec
#pixel coords to translate
pixelx = 1024.5
pixely = 1024.5
#telescope pointing information
tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector
tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector
tel_rot = 45.04234416817661 #telescope rotation, degrees.
#distortion reference file to use
dist_reffile = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'
ap_name = 'NRCA1_FULL'
#Read in the CRDS-format distortion reference file
with AsdfFile.open(dist_reffile) as dist_file:
coord_transform = dist_file.tree['model']
#read in the SIAF file
distortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',header_start=1)
#get parameters needed from the SIAF file
x_sci2idl,y_sci2idl,refpix_v2,refpix_v3,refpix_x,refpix_y,parity,v3yang = getDistortionCoefficients(distortionTable,'science','ideal',ap_name)
#Create attitude_matrix
attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)
#Translate
ra,dec = XYToRADec(pixelx,pixely,attitude_matrix,coord_transform,refpix_x,refpix_y,refpix_v2,refpix_v3)
print('RA,Dec is {},{}'.format(ra,dec))
def pix_to_sky(x=0, y=0, distortion_file=None, ap_name=None):
#telescope pointing information
tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector
tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector
tel_rot = 45.04234416817661 #telescope rotation, degrees.
#distortion reference file to use
if distortion_file is None:
distortion_file = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'
ap_name = 'NRCA1_FULL'
if ap_name is None:
raise ValueError("Need to specify ap_name appropriate for the distortion file")
#Read in the CRDS-format distortion reference file
with AsdfFile.open(distortion_file) as dist_file:
coord_transform = dist_file.tree['model']
#read in the SIAF file
distortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',header_start=1)
#get parameters needed from the SIAF file
x_sci2idl,y_sci2idl,refpix_v2,refpix_v3,refpix_x,refpix_y,parity,v3yang = getDistortionCoefficients(distortionTable,'science','ideal',ap_name)
#Create attitude_matrix
attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)
#Translate
ra,dec = XYToRADec(x, y, attitude_matrix,coord_transform,refpix_x,refpix_y,refpix_v2,refpix_v3)
return (ra,dec)
```
### The examples above were pulled from `coord_translate.py` and the telescope pointing information was taken from `V54321001002P000000000110d_A5_F444W_rate.fits` (used in the examples above)
Now let's look at the (110,110) pixel that is far away from where the distortion is defined at crpix1, crpix2
```
ra, dec = pix_to_sky(110, 110)
ra, dec
```
#### We'll take that calculated ra,dec and feed it back to find the original pixel using the two methods outlined above
```
method_1_sky_to_pix(ra, dec)
method_2_sky_to_pix(ra, dec)
```
*Note that the reference files mention an updated SIAF file I don't have access to: NIRCam_SIAF_2017-03-28.csv*
## method_2 returns much closer values than method_1 and the distortion reference file alone, as expected by the team. We should figure out how to add the extra calculations to the distortion reference file so that we can get proper translations for use in the WFSS and Resample pipelines
| github_jupyter |
[](https://pythonista.mx)
## La DB API de Python para bases de datos relacionales.
Debido a que existen muy diversos gestores de bases de datos, tanto SQL como no-SQL, la comunidad de Python publicó la [PEP-249](https://www.python.org/dev/peps/pep-0249/), la cual define modelo genérico de API para la gestión de bases de datos, de tal modo que independienetemente de las paerticularidades del gestor,existan interfaces (clases, funciones y métodos) unificadas para acceder a los datos.
En la siguiente liga se puede consultar las diversas bases de datos soportadas por Python:
https://wiki.python.org/moin/DatabaseInterfaces
## Conexión a MySQL.
Para ilustrar una conexión y operación simple de una base de datos relacional se utilizará la base de datos MariaDB conectada mediante el driver *pymysql*.
Para conocer más sobre *pymysql*, consultar la siguiente liga.
https://pymysql.readthedocs.io/en/latest/
```
!pip install pymysql
import pymysql
```
### El objeto *pymysql.connect*.
El objeto *pymysql.connect* es un objeto instanciado de la clase *pymysql.connections.Connection*, el cual permite abrir una conexión a la base de datos con la siguiente sintaxis:
``` python
pymysql.connect(user=<objeto tipo str>, password=<objeto tipo str>,
host='<URL>', port='<puerto>',
database=<objeto tipo str>)
```
Existen algunos otros parámetros, pero los que se indican son los más comunes.
Por defecto la URL del host es *localhost*, el puerto es el *3306* y la base datos es la principal.
**Ejemplo:**
```
conexion = pymysql.connect(user='root', password='0p3n5t4ck')
```
### El método *pymysql.connect.query()*.
Este método permite ingresar consultas SQL a la base de datos ingresándola como parámetro.
**Ejemplo:**
```
conexion.query("CREATE DATABASE pythonista;")
```
### El método *pymysql.connect.commit()*
Este método permite realizar un commit a a la base de datos.
```
conexion.commit()
```
### El método *pymysql.connect.close()*
Este método permite cerrar la conexión con la base de datos.
```
conexion.close()
```
### El objeto *pymysql.cursor*.
Aún cuando es posible realizar operaciones de consulta con los objetos *pymysql.connect*, estos objetos se utilizan primordialmente para operaciones de conexión las bases de datos.
El objeto cursor es una instancia de la clase *pymysql.cursors.Cursor*, el cual contiene los métodos:
* *execute()*, con el que se pueden enviar instrucciones SQL a la base de datos.
* *fetchone()*, con el que se obtiene el primer resultado de una búsqueda.
* *fetchall()* con el que se obtienen todos los resultado de una búsqueda dentro de un bojeto de tipo *tuple*.
### La declaración *with* para conexiones de bases de datos.
Las conexiones de bases de datos también pueden ser utilizadas dentro de una declaración *with*.
De esta forma se abre una conexión y se crea un objeto de tipo *pymysql.cursor* que puede ser utilizado dentro del bloque de código inscrito a *with*. Tan pronto como el bloque es ejecutado, se realiza un commit de las acciones realizadas y se cierra la conexión.
**Ejemplo:**
```
sql = 'SHOW DATABASES;'
with pymysql.connect(user='root', password='0p3n5t4ck') as cursor:
print(type(cursor))
cursor.execute(sql)
print(cursor.fetchall())
with pymysql.connect(user='root', password='0p3n5t4ck') as conexion:
conexion.execute("DROP DATABASE pythonista;")
print(cursor.fetchall())
sql = 'SHOW DATABASES;'
with pymysql.connect(user='root', password='0p3n5t4ck') as cursor:
cursor.execute(sql)
print(cursor.fetchall())
```
## Conexión a MongoDB.
[MongoDB](https://www.mongodb.com/) es una base de datos muy popular que se basa en "colecciones" y "documentos" en formato JSON en vez de registros.
La API de MongoDB para Python fue desarrollada por el mismo equipo de MongoDB.
Para conocer más sobre el uso de la API de MongoDB consultar:
http://api.mongodb.com/python/current/tutorial.html
**Ejemplo:**
Se realizará una conexión al servidor de Mongodb corriendo en el sistema local (*localhost*) y se consultará el estado del gestor.
```
!pip install pymongo
from pymongo import MongoClient
client = MongoClient("localhost")
db=client.admin
serverStatusResult=db.command("serverStatus")
claves = [keys for keys in serverStatusResult]
claves
serverStatusResult['uptime']
serverStatusResult['host']
serverStatusResult['version']
```
<p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
<p style="text-align: center">© José Luis Chiquete Valdivieso. 2017.</p>
| github_jupyter |
```
# Execute this code block to install dependencies when running on colab
try:
import torch
except:
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-1.0.0-{platform}-linux_x86_64.whl torchvision
! pip install --upgrade git+https://github.com/sovrasov/flops-counter.pytorch.git
try:
import torchtext
except:
!pip install torchtext
try:
import spacy
except:
!pip install spacy
try:
spacy.load('en')
except:
!python -m spacy download en
```
# Data loading and preprocessing
```
import torch
from torchtext import data
import torch.nn.functional as F
from torch import nn
from torch import optim
from torch.distributions import Categorical
from torch.distributions import Binomial
from torchtext import datasets
import os.path
import random
import numpy as np
TEXT = data.Field(tokenize='spacy', lower=True, include_lengths=True)
LABEL = data.LabelField(dtype=torch.float)
_train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = _train_data.split(0.8)
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')
TEXT.build_vocab(train_data, max_size=100000, vectors="glove.6B.100d")
LABEL.build_vocab(train_data)
print(f"Unique tokens in TEXT vocabulary: {len(TEXT.vocab)}")
print(f"Unique tokens in LABEL vocabulary: {len(LABEL.vocab)}")
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
# Assume that we are on a CUDA machine, then this should print a CUDA device:
print(device)
```
# Model and training
```
BATCH_SIZE = 50
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device,
shuffle = False,
sort_key=lambda x: len(x.text),
sort_within_batch=True)
eps = torch.tensor(1e-9)
temp = []
R = 20 # chunk of words read or skipped
def reward_function(prob, true_label):
"""
Returns 1 if correct prediction, -1 otherwise
"""
# print("true_label", "prob", true_label, prob)
if prob>0.5 and true_label>0.5:
return torch.tensor(1.0, requires_grad=True)
if prob<0.5 and true_label<0.5:
return torch.tensor(1.0, requires_grad=True)
return torch.tensor(-1.0, requires_grad=True)
def sample_binary(prob):
if prob>random.random:
return torch.tensor(1)
return torch.tensor(0)
class SkipReadingModel(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, gamma=0.99, train_mode=True, K=4):
super().__init__()
# store dimensions and constants
self.input_dim = input_dim
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.gamma = torch.tensor(gamma)
self.train_mode = train_mode
self.K = K
# create layers
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.lstm_cell = nn.LSTMCell(input_size = embedding_dim, hidden_size = hidden_dim, bias = True)
self.stop_linear_1 = nn.Linear(hidden_dim, hidden_dim)
self.stop_linear_2 = nn.Linear(hidden_dim, hidden_dim)
self.stop_linear_3 = nn.Linear(hidden_dim, 1)
self.jumping_linear_1 = nn.Linear(hidden_dim, hidden_dim)
self.jumping_linear_2 = nn.Linear(hidden_dim, hidden_dim)
self.jumping_linear_3 = nn.Linear(hidden_dim, K)
self.output_linear_1 = nn.Linear(hidden_dim, hidden_dim)
self.output_linear_2 = nn.Linear(hidden_dim, output_dim)
self.value_head = nn.Linear(hidden_dim, 1)
# Baseline weight
self.wb = nn.Parameter(data=torch.zeros(self.hidden_dim), requires_grad=True)
self.cb = nn.Parameter(data=torch.tensor((0.0)), requires_grad=True)
# Initialize lstm_cell states
self.initialize_lstm_cell_states()
# Initalize episode number and time number
self.initialize_for_new_batch()
self.initialize_time_number()
# Overall reward and loss history
self.reward_history = []
self.loss_history = []
self.training_accuracies = []
self.validation_accuracies = []
# torch.tensor((0.0), requires_grad=True)
def initialize_lstm_cell_states(self):
self.c = torch.zeros(1, self.hidden_dim, requires_grad=True)
self.h = torch.zeros(1, self.hidden_dim, requires_grad=True)
def initialize_episode_number(self):
self.ep = 0
def initialize_time_number(self):
self.t = 0
def clear_batch_lists(self):
del self.saved_log_probs_s[:]
del self.saved_log_probs_n[:]
del self.saved_log_probs_o[:]
del self.reward_baselines[:]
del self.rewards[:]
del self.label_targets[:]
del self.label_predictions[:]
del self.state_values[:]
self.initialize_episode_number()
self.training_accuracy = 0.0
def initialize_for_new_batch(self):
"""
Cleans history of log probabilities, rewards, targets etc for the last
batch
"""
self.initialize_episode_number()
# Episode policy and reward history
self.saved_log_probs_s = [] # log probabilities for each time step t in each episode in batch
self.saved_log_probs_n = [] # log probs for jump
self.saved_log_probs_o = [] # log_prob for class
self.rewards = [] # reward at final time step of each episode in batch
self.reward_baselines = [] # reward baselines for each time step t in each episode in batch
self.state_values = []
# Predictions and targets history (for cross entropy loss calculation)
self.label_predictions = [] # 1 probability for each episode
self.label_targets = []# 1 label for each episode
self.training_accuracy = 0.0
def classify(self):
# global temp
# temp.append(self.c[0])
out = self.output_linear_1(self.c[0])
out = self.output_linear_2(out)
self.label_predictions.append(out)
prob_o = torch.sigmoid(out)
class_categ = Binomial(probs=prob_o)
_class = class_categ.sample()
if self.train_mode:
self.rewards.append(reward_function(_class, self.label_targets[-1]))
self.saved_log_probs_o.append((class_categ.log_prob(_class), ))
# return torch.sigmoid(out)
def get_baseline(self):
return torch.dot(self.wb, self.c[0].detach()) + self.cb
def save_training_accuracy(self):
correct = 0
for _r in self.rewards:
if _r > 0:
correct += 1
self.training_accuracy = correct/len(self.rewards)
self.training_accuracies.append(self.training_accuracy)
def forward(self, pack):
texts, lengths, labels = pack
embeddeds = self.embedding(texts)
# embeddeds = nn.utils.rnn.pack_padded_sequence(embeddeds, lengths)
self.initialize_for_new_batch()
for episode_number in range(embeddeds.shape[1]):
# load episode data
self.ep = episode_number
embedded = embeddeds[:, self.ep, :]
#print(texts.shape, embeddeds.shape, embedded.shape)
#print(label)
# initialize counters and index
tokens_read = 0
jumps_made = 0
word_index = 0
words_len = embedded.shape[0]
self.initialize_lstm_cell_states()
self.initialize_time_number()
self.saved_log_probs_s.append([])
self.saved_log_probs_n.append([])
self.state_values.append([])
self.reward_baselines.append([])
if self.train_mode:
label = labels[self.ep].reshape(1)
self.label_targets.append(label)
# start iterating through sequence, while skipping some words
while word_index<words_len and word_index<400:
self.t += 1
#print("embedded_word", embedded_word.shape)
# generate next lstm cell state
for _r in range(min(R, words_len-word_index)):
embedded_word = embedded[word_index]
self.h, self.c = self.lstm_cell(torch.reshape(embedded_word, (1, -1)), (self.h, self.c))
word_index += 1
# print('word_index', word_index, 'tokens_read', tokens_read, 'jumps_made', jumps_made)
# print(self.c)
_state_value = self.value_head(self.c[0])
self.state_values.append(_state_value)
_s = self.stop_linear_1(self.c[0])
_s = F.relu(_s)
_s = self.stop_linear_2(_s)
_s = F.relu(_s)
_s = self.stop_linear_3(_s)
probs_s = torch.sigmoid(_s)
try:
stop_categ = Binomial(probs=probs_s)
stop = stop_categ.sample()
except:
print("_c", self.c)
#temp = (self.c, _s, probs_s, self.stop_linear_1, self.stop_linear_2, self.stop_linear_3, stop_categ)
raise ValueError('got the expected error')
# Add log probability of our chosen action to our history
self.saved_log_probs_s[-1].append(stop_categ.log_prob(stop))
self.reward_baselines[-1].append(self.get_baseline())
if stop > 0.5:
self.classify()
break
else:
_n = self.jumping_linear_1(self.c[0])
_n = F.relu(_n)
_n = self.jumping_linear_2(_n)
_n = F.relu(_n)
_n = self.jumping_linear_3(_n)
_n = F.softmax(_n)
n_categ = Categorical(_n)
n = n_categ.sample()
self.saved_log_probs_n[-1].append(n_categ.log_prob(n))
word_index += n * R
else:
# print("Finished while loop")
# raise ValueError('Finshed ')
self.classify()
if self.train_mode:
self.save_training_accuracy()
return self.label_predictions
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 128
OUTPUT_DIM = 1
FLOP_COST = 0.0001
seed = 7
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
bce = nn.BCEWithLogitsLoss(reduction='mean')
policy_model = SkipReadingModel(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM).to(device)
pretrained_embeddings = TEXT.vocab.vectors
policy_model.embedding.weight.data.copy_(pretrained_embeddings)
if os.path.exists('fast_text_model.weights'):
policy_model.load_state_dict(torch.load('fast_text_model.weights'))
# define the optimiser
optimizer = optim.Adam(policy_model.parameters(), lr=0.01)
def update_policy():
#print(len(policy_model.rewards), len(policy_model.label_predictions), len(policy_model.label_targets))
#print(len(policy_model.saved_log_probs_o), len(policy_model.saved_log_probs_n), len(policy_model.saved_log_probs_s))
#print(len(policy_model.reward_baselines))
policy_loss_sum = torch.tensor(0.0, requires_grad=True)
reward_sum = torch.tensor(0.0, requires_grad=True)
baseline_loss_sum = torch.tensor(0.0, requires_grad=True)
value_loss_sum = torch.tensor(0.0, requires_grad=True)
for reward, prediction, target, log_probs_o, log_probs_n, log_probs_s, baselines, svs in zip(
policy_model.rewards, policy_model.label_predictions,
policy_model.label_targets, policy_model.saved_log_probs_o,
policy_model.saved_log_probs_n, policy_model.saved_log_probs_s,
policy_model.reward_baselines, policy_model.state_values):
for lpn in log_probs_n:
policy_loss_sum = policy_loss_sum + lpn
for i, (lps, b, sv) in enumerate(zip(log_probs_s, baselines, svs)):
policy_loss_sum = policy_loss_sum + lps
r = torch.pow(policy_model.gamma, i) * (-FLOP_COST)
if i == len(svs)-1:
r = r + torch.pow(policy_model.gamma, i) * reward
adv = r - sv.item()
reward_sum = reward_sum + adv
value_loss_sum = value_loss_sum + F.smooth_l1_loss(sv, torch.tensor([r]))
# baseline_loss_sum = baseline_loss_sum + torch.pow(rew, 2)
# baseline_loss_sum = baseline_loss_sum - torch.pow(rew, 2) + torch.pow(torch.pow(policy_model.gamma, i) * reward + rew, 2)
policy_loss_sum = policy_loss_sum + log_probs_o[0]
# print("reward sum", reward_sum)
# print("policy_loss_sum", policy_loss_sum)
loss = policy_loss_sum * reward_sum + value_loss_sum
optimizer.zero_grad()
# print('policy_loss', policy_loss)
loss.backward(retain_graph=True)
optimizer.step()
policy_model.clear_batch_lists()
def test_model():
policy_model.train_mode = True
correct = 0
total=0
for _data in test_iterator:
# get the inputs
texts, text_lengths, labels = _data.text[0], _data.text[1], _data.label
# print("Input review texts, text_lengths, labels", texts.shape, text_lengths.shape, labels.shape)
predictions = policy_model((texts.to(device), text_lengths.to(device), labels.to(device)))
for (prediction, label) in zip(predictions, labels):
if reward_function(label, prediction) > 0:
correct += 1
total += 1
if total%1000 == 0:
print(total)
if total%5000 == 0:
break
print("Test accuracy :", correct/total)
policy_model.train_mode = True
return correct/total
def validate_model():
policy_model.train_mode = True
correct = 0
total=0
for _data in valid_iterator:
# get the inputs
texts, text_lengths, labels = _data.text[0], _data.text[1], _data.label
# print("Input review texts, text_lengths, labels", texts.shape, text_lengths.shape, labels.shape)
predictions = policy_model((texts.to(device), text_lengths.to(device), labels.to(device)))
for (prediction, label) in zip(predictions, labels):
if reward_function(label, prediction) > 0:
correct += 1
total += 1
if total%1000 == 0:
break
print("Validation accuracy :", correct/total)
policy_model.train_mode = True
policy_model.validation_accuracies.append(correct/total)
return correct/total
# test_model()
# the epoch loop
with torch.enable_grad():
validate_model()
for epoch in range(10):
running_reward = 10
t = 0
for _data in train_iterator:
# get the inputs
texts, text_lengths, labels = _data.text[0], _data.text[1], _data.label
# print("Input review texts, text_lengths, labels", texts.shape, text_lengths.shape, labels.shape)
prediction = policy_model((texts.to(device), text_lengths.to(device), labels.to(device)))
# print("Prediction", prediction.item())
# raise ValueError('Not done')
t += 1
if t%2 == 0:
print("batch no. %d, training accuracy %4.2f" % (t, policy_model.training_accuracy))
if t%10 == 0:
validate_model()
#print("wb", policy_model.wb)
# print("lstm hh hi", policy_model.lstm_cell.weight_hh[0][::10], policy_model.lstm_cell.weight_ih[0][::10])
#print("lstm hh hi", policy_model.lstm_cell.weight_hh, policy_model.lstm_cell.weight_ih)
#print("emb", policy_model.embedding.weight)
#print("jmp", policy_model.jumping_linear.weight)
#print("out", policy_model.output_linear.weight)
if t%1000 == 0:
break
update_policy()
#running_reward = 0.05 * policy_model.reward_episode + (1 - 0.05) * running_reward
#print("Epoch %d, reward %4.2f" % (epoch, running_reward))
print("Epoch %d" % (epoch))
print('**** Finished Training ****')
# test_model()
torch.save(policy_model.state_dict(), 'fast_text_model.weights')
```
# Evaluation
```
import timeit
import spacy
import matplotlib.pyplot as plt
nlp = spacy.load('en')
def predict_sentiment(model, sentence):
model.train_mode = False
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
# tensor = torch.LongTensor(indexed).to(device)
tensor = torch.LongTensor(indexed).to('cpu')
tensor = tensor.unsqueeze(1)
model((tensor, torch.tensor([tensor.shape[0]]), None))
res = torch.sigmoid(model.label_predictions[0])
model.train_mode = False
return res
times = []
lengths=[]
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=1,
device=device,
shuffle = True,
# sort_key=lambda x: len(x.text),
sort_within_batch=False)
i = 0
for _data in test_iterator:
if i%100 == 0:
# get the inputs
texts, text_lengths, labels = _data.text[0], _data.text[1], _data.label
#print(texts.shape, text_lengths.shape, labels.shape)
start_time = timeit.default_timer()
predictions = policy_model((texts.to(device), text_lengths.to(device), labels.to(device)))
elapsed = timeit.default_timer() - start_time
lengths.append(texts.shape[0])
times.append(elapsed)
# print("Input review texts, text_lengths, labels", texts.shape, text_lengths.shape, labels.shape)
if i>20000:
break
i += 1
import pickle
pickle_out = open("test_times_1.pickle","wb")
pickle.dump((lengths, times), pickle_out)
pickle_out.close()
plt.scatter(lengths, times, label='skip-model')
plt.xlabel('Lengths of sentences')
plt.ylabel('Time taken for prediction')
plt.show()
predict_sentiment(policy_model, "This film is terrible what can I say")
import pickle
pickle_out = open("training_epochs_1.pickle","wb")
pickle.dump((policy_model.training_accuracies, policy_model.validation_accuracies), pickle_out)
pickle_out.close()
```
| github_jupyter |
# Python Notional Machine
Our goal is to refresh ourselves on basics (and some subtleties) associated with Python's data and computational model. Along the way, we'll also use or refresh ourselves on the <b>environment model</b> as a way to think about and keep track of the effect of executing Python code. Specifically, we'll demonstrate use of *environment diagrams* to explain the outcomes of different code sequences.
## Variables and data types
### Integers
```
a = 307
b = a
print('a:', a, '\nb:', b)
a = a + 310
a += 400
print('a:', a, '\nb:', b)
```
So far so good -- integers, and variables pointing to integers, are straightforward.
### Lists
```
x = ['baz', 302, 303, 304]
print('x:', x)
y = x
print('y:', y)
x = 377
print('x:', x, '\ny:', y)
```
Unlike integers, lists are mutable:
```
x = y
x[0] = 388
print('x:', x)
print('y:', y)
```
As seen above, we have to be careful about sharing (also known as "aliasing") mutable data!
```
a = [301, 302, 303]
b = [a, a, a]
print(b)
b[0][0] = 304
print(b)
print(a)
```
### Tuples
Tuples are a lot like lists, except that they are immutable.
```
x = ('baz', [301, 302], 303, 304)
y = x
print('x:', x, '\ny:', y)
```
Unlike a list, we can't change the top most structure of a tuple. What happens if we try the following?
```
x[0] = 388
```
What will happen in the following (operating on `x`)?
```
x[1][0] = 311
print('x:', x, '\ny:', y)
```
So we still need to be careful! The tuple didn't change at the top level -- but it might have members that are themselves mutable.
### Strings
Strings are also immutable. We can't change them once created.
```
a = 'ya'
b = a + 'rn'
print('a:', a, '\nb:', b)
a[0] = 'Y'
c = 'twine'
d = c
c += ' thread'
print('c:', c, '\nd:', d)
```
That's a little bit tricky. Here the `+=` operator makes a copy of `c` first to use as part of the new string with `' there'` included at the end.
### Back to lists: append, extend, and the '+' and '+=' operators
```
x = [1, 2, 3]
y = [4, 5]
x.append(y)
y[0] = 99
print('x:', x, '\ny:', y)
```
So again, we have to watch out for aliasing/sharing, whenever we mutate an object.
```
x = [1, 2, 3]
y = [4, 5]
x.extend(y)
y[0] = 88
print('x:', x, '\ny:', y)
```
<pre>
</pre>
What happens when using the `+` operator used on lists?
```
x = [1, 2, 3]
y = x
x = x + [4, 5]
print('x:', x)
```
So the `+` operator on a list looks sort of like extend. But has it changed `x` in place, or made a copy of `x` first for use in the longer list?
And what happens to `y` in the above?
```
print('y:', y)
```
So that clarifies things -- the `+` operator on a list makes a (shallow) copy of the left argument first, then uses that copy in the new larger list.
Another case, this time using the `+=` operator with a list. Note: in the case of integers, `a = a + <val>` and `a += <val>` gave exactly the same result. How about in the case of lists?
```
x = [1, 2, 3]
y = x
x += [4, 5]
y[0] = 77
print('x:', x, '\ny:', y)
```
So `x += <something>` is NOT the same thing as `x = x + <something>` if `x` is a list! Here it actually DOES mutate or change `x` in place, if that is allowed (i.e., if `x` is a mutable object).
Contrast this with the same thing, but for `x` in the case where `x` was a string. Since strings are immutable, python does not change `x` in place. Rather, the `+=` operator is overloaded to do a top-level copy of the target, make that copy part of the new larger object, and assign that new object to the variable.
Let's check your understanding. What will happen in the following, that looks just like the code above for lists, but instead using tuples. What will x and y be after executing this?
```
x = (301, 302, 303)
y = x
x += (304, 305)
print('x:', x, '\ny:', y)
```
## Functions and scoping
```
x = 500
def foo(y):
return x + y
z = foo(307)
print('x:', x, '\nfoo:', foo, '\nz:', z)
def bar(x):
x = 1000
return foo(307)
w = bar('hi')
print('x:', x, '\nw:', w)
```
Importantly, `foo` "remembers" that it was created in the global environment, so looks in the global environment to find a value for `x`. It does **not** look back in its "call chain"; rather, it looks back in its parent environment.
### Optional arguments and default values
```
def foo(x, y = []):
y = y + [x]
return y
a = foo(7)
b = foo(8, [1, 2, 3])
print('a:', a, '\nb:', b)
c = foo(7)
print('a:', a, '\nb:', b, '\nc:', c)
```
Let's try something that looks close to the same thing... but with an important difference!
```
def foo(x, y = []):
y.append(x) # different here
return y
a = foo(7)
b = foo(8, [1, 2, 3])
print('a:', a, '\nb:', b)
```
Okay, so far it looks the same as with the earlier `foo`.
```
c = foo(7)
print('a:', a, '\nb:', b, '\nc:', c)
```
So quite different... all kinds of aliasing going on. Perhaps surprisingly, the default value to an optional argument is only evaluated once, at function *definition* time. The moral here is to be **very** careful (and indeed it may be best to simply avoid) having optional/default arguments that are mutable structures like lists... it's hard to remember or debug such aliasing!
## Reference Counting
This is an advanced feature you don't need to know about, but you might be curious about. Python knows to throw away an object when its "reference counter" reaches zero. You can inspect the current value of an object's reference counter with `sys.getrefcount`.
```
import sys
L1 = [301, 302, 303]
print(sys.getrefcount(L1))
L2 = L1
print(sys.getrefcount(L1))
L3 = [L1, L1, L1]
print(sys.getrefcount(L1))
L3.pop()
print(sys.getrefcount(L1))
L3 = 307
print(sys.getrefcount(L1))
```
## Readings -- if you want/need more refreshers
Check out readings and exercises from <a href=https://hz.mit.edu/catsoop/6.145><b>6.145</b></a>:
<ul>
<li> <a href=https://hz.mit.edu/catsoop/6.145/assignment0.0/readings#_variables_and_assignment>Assignment and aliasing</a>
<li> What is an <a href=https://hz.mit.edu/catsoop/6.145/assignment0.0/readings#_environment_diagrams>environment</a>? What is a frame? How should we draw environment diagrams?
<li> What is a <a href=https://hz.mit.edu/catsoop/6.145/assignment1.0/readings>function</a>?
What happens when one is defined? What happens when one is called?
<li> What happens when a <a href=https://hz.mit.edu/catsoop/6.145/assignment1.1/readings#_function_ception_and_returning_functions>function is defined inside another function</a> (also known as a closure)?
<li> What is a <a href=https://hz.mit.edu/catsoop/6.145/assignment2.0/readings>class</a>? What is an instance? What is self? What is __init__?
<li> How does <a href=https://hz.mit.edu/catsoop/6.145/assignment2.1/readings>inheritance</a> in classes work?
</ul>
Another resource is the <a href=https://greenteapress.com/wp/think-python-2e/>Think Python</a> textbook.
| github_jupyter |

```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from scipy.spatial.distance import cdist, pdist
from sklearn.metrics import silhouette_score
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
df = pd.read_csv("datasets/household_power_consumption.csv",
dtype={'Global_active_power': str, 'Global_reactive_power': str,
'Voltage': str, 'Global_intensity': str,
'Sub_metering_1': str, 'Sub_metering_2': str,
'Sub_metering_3': str},
sep=";")
df.info()
df.head()
df.isnull().sum()
```
# Preprocessing
```
# correção do tipo date e junção (data hora)
df["Datetime"] = df["Date"] + " " + df["Time"]
df["Datetime"] = pd.to_datetime(df["Datetime"], format="%d/%m/%Y %H:%M:%S")
df = df.drop(columns=["Date", "Time"])
# correção de tipos e padronização de nulos pela moda
for col in df.select_dtypes(include="object").columns:
df.loc[(df[col] == "?"), [col]] = np.nan
df[col] = df[col].astype(float)
mode = df[col].mode()[0]
df[col] = df[col].replace(np.nan, mode)
df.isnull().sum()
# obter valores para entrada no modelo K-means
# tudo menos a coluna de data
loc_X = df.iloc[0:, :7]
loc_X.head()
# valores de entrada convertidos em np.array
X = loc_X.values
# gerando 1% dos dados totais para amostra do modelo
df_sample, sample = train_test_split(X, train_size = .01)
df_sample.shape
# Aplicando redução de dimensionalidade no array
# das 7 colunas de variaveis para 2 componentes principais
pca = PCA(n_components = 2).fit_transform(df_sample)
# Determinando um range de Hyperparâmetro "K" do Kmeans
k_range = range(1, 12)
k_range
# Aplicando o modelo K-means para cada valor de K
k_means_var = [KMeans(n_clusters = k).fit(pca) for k in k_range]
# Curva de Elbow
#Ajustando o centroide do cluster para cada modelo
centroids = [X.cluster_centers_ for X in k_means_var]
# Calculando a distancia euclidiana de cada ponto de dado para o centroide
k_euclid = [cdist(pca, cent, 'euclidean') for cent in centroids]
dist = [np.min(ke, axis = 1) for ke in k_euclid]
# Soma dos quadrados das distancias dentro do cluster
soma_quadrados_intra_cluster = [sum(d**2) for d in dist]
# Soma total dos quadrados
soma_total = sum(pdist(pca)**2)/pca.shape[0]
# Soma dos quadrados entre clusters
soma_quadrados_inter_cluster = soma_total - soma_quadrados_intra_cluster
# Curva de Elbow
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(k_range, soma_quadrados_inter_cluster/soma_total*100, "b*-")
ax.set_ylim((0, 100))
plt.grid(True)
plt.xlabel("N° de Clusters")
plt.ylabel("% de Variância Explicada")
plt.title("Variância Explicada para cada valor de K")
# Escolhendo um valor de K (igual a 8) para avaliaçâo de maquina preditiva
# criando um modelo com k = 8
model_v1 = KMeans(n_clusters=8)
model_v1.fit(pca)
# Obtendo os valores minimos e maximos e organiza o shape
x_min, x_max = pca[:, 0].min() - 5, pca[:, 0].max() - 1
y_min, y_max = pca[:, 1].min() + 1, pca[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .02), np.arange(y_min, y_max, .02))
Z = model_v1.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot ds areas dos clusters
plt.figure(1)
plt.clf()
plt.imshow(
Z,
interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto',
origin='lower'
);
# Metrica de avaliaçao para Clusterização
# The best value is 1 and the worst value is -1
?silhouette_score
# Silhouette_score
labels = model_v1.labels_
silhouette_score(pca, labels, metric='euclidean')
# Marcando os clusters da Maquina Preditiva Final
# Lista de nomes de colunas
names = loc_X.columns.tolist()
# Incluindo o n° do cluster na base de clientes
cluster_map = pd.DataFrame(df_sample, columns=names)
cluster_map["cluster"] = model_v1.labels_
cluster_map.head()
cluster_map.dtypes
cluster_map.groupby("cluster")["Global_active_power"].mean().rename("Mean_GAP").reset_index()
# Conclusão: Grupo 5 tem maior consumo de energia dentre os 8 grupos
# Grupo 0 tem menor consumo, assim, possivel falha na transmissão de energia?
```
| github_jupyter |
```
from crystal_toolkit.helpers.layouts import Columns, Column
from crystal_toolkit.settings import SETTINGS
from jupyter_dash import JupyterDash
from pydefect.analyzer.calc_results import CalcResults
from pydefect.analyzer.dash_components.cpd_energy_dash import CpdEnergy2D3DComponent, CpdEnergyOtherComponent
from pydefect.chem_pot_diag.chem_pot_diag import ChemPotDiag, CpdPlotInfo, \
CompositionEnergy
from pydefect.corrections.manual_correction import ManualCorrection
from pydefect.input_maker.defect_entry import DefectEntry
from pymatgen import Composition, Structure, Lattice, Element
import dash_html_components as html
import crystal_toolkit.components as ctc
from dash.dependencies import Input, Output, State
import json
app = JupyterDash(suppress_callback_exceptions=True,
assets_folder=SETTINGS.ASSETS_PATH)
from vise.analyzer.band_edge_properties import BandEdge
comp_energies = [
CompositionEnergy(Composition("Mg"), 0.0, "a"),
CompositionEnergy(Composition("Ca"), 0.0, "a"),
CompositionEnergy(Composition("Sr"), 0.0, "a"),
CompositionEnergy(Composition("O"), 0.0, "a"),
CompositionEnergy(Composition("H"), 0.0, "a"),
# CompositionEnergy(Composition("MgCaO3"), -100.0, "a"),
CompositionEnergy(Composition("MgCaSrO3"), -100.0, "a"),
]
#cpd = ChemPotDiag(comp_energies, target=Composition("MgCaO3"))
cpd = ChemPotDiag(comp_energies, target=Composition("MgCaSrO3"))
cpd_plot_info = CpdPlotInfo(cpd)
print(cpd.target.elements)
print(cpd.dim)
print(cpd.target_vertices)
print(cpd.all_compounds)
print(cpd.impurity_abs_energy(Element.H, label="A"))
structure = Structure(Lattice.cubic(1), species=["O"] * 2, coords=[[0]*3]*2)
defect_structure = Structure(Lattice.cubic(1), species=["O"] * 1, coords=[[0]*3])
common = dict(site_symmetry="1",
magnetization=0.0,
kpoint_coords=[[0]*3],
kpoint_weights=[1.0],
potentials=[0.0],
vbm_info=BandEdge(0.0),
cbm_info=BandEdge(1.0),
fermi_level=0.0)
perfect = CalcResults(structure=structure,energy=0, **common)
defects = [CalcResults(structure=defect_structure, energy=1.0, **common),
CalcResults(structure=defect_structure, energy=0.5, **common)]
de_common = dict(name="Va_O1",
structure=defect_structure, site_symmetry="1",
perturbed_structure=defect_structure, defect_center=[[0]*3])
defect_entries = [DefectEntry(charge=0, **de_common),
DefectEntry(charge=1, **de_common)]
corrections = [ManualCorrection(correction_energy=1.0),
ManualCorrection(correction_energy=1.0)]
cpd_e_component = CpdEnergyOtherComponent(cpd_plot_info,
perfect,
defects,
defect_entries,
corrections)
my_layout = html.Div([Column(cpd_e_component.layout)])
ctc.register_crystal_toolkit(app=app, layout=my_layout, cache=None)
app.run_server(port=8097)
#app.run_server(mode='inline', port=8094)
```
| github_jupyter |
```
import pandas as pd
```
# Read the CSV and Perform Basic Data Cleaning
```
df = pd.read_csv("../data/exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
df.tail()
df.info()
```
# Select your features (columns)
```
# Set features. This will also be used as x values.
target = df["koi_disposition"]
target_names = ['CANDIDATE','CONFIRMED','FALSE POSITIVE']
features = df.drop("koi_disposition", axis=1)
feature_names=features.columns
```
# Create a Train Test Split
Use `koi_disposition` for the y values
```
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
# Step 1: Label-encode data set
label_encoder = LabelEncoder()
label_encoder.fit(target)
encoded_y = label_encoder.transform(target)
encoded_y
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, encoded_y, random_state=42)
X_train.shape, y_train.shape
```
# Pre-processing
Scale the data using the MinMaxScaler and perform some feature selection
```
# Scale the data
from sklearn.preprocessing import MinMaxScaler
X_scaler = MinMaxScaler().fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
```
# Train the Model
```
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
# Loop through different k values to see which has the highest accuracy
# Note: We only use odd numbers because we don't want any ties
train_scores = []
test_scores = []
for k in range(1, 20, 2):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train_scaled, y_train)
train_score = knn.score(X_train_scaled, y_train)
test_score = knn.score(X_test_scaled, y_test)
train_scores.append(train_score)
test_scores.append(test_score)
print(f"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}")
plt.plot(range(1, 20, 2), train_scores, marker='o')
plt.plot(range(1, 20, 2), test_scores, marker="x")
plt.xlabel("k neighbors")
plt.ylabel("Testing accuracy Score")
plt.show()
knn = KNeighborsClassifier(n_neighbors=19)
knn.fit(X_train_scaled, y_train)
predictions = knn.predict(X_test_scaled)
print(f"k = 19: Training Data Score: {knn.score(X_train_scaled, y_train):.5f}")
print(f"k = 19: Testing Data Score: {knn.score(X_test_scaled, y_test):.5f}")
from sklearn.metrics import classification_report
predictions = knn.predict(X_test_scaled)
print(classification_report(y_test, predictions,
target_names=list(label_encoder.inverse_transform([0,1,2]))))
```
# Hyperparameter Tuning
Use `GridSearchCV` to tune the model's parameters
```
# Create the GridSearchCV model
# Create the GridSearch estimator along with a parameter object containing the values to adjust
from sklearn.model_selection import GridSearchCV
param_grid = {'n_neighbors': [3,5,7,9,11,13,15,17,19],
'weights':['uniform','distance']}
grid = GridSearchCV(KNeighborsClassifier(), param_grid, verbose=3)
# Train the model with GridSearch
grid.fit(X_train_scaled, y_train)
print(grid.best_params_)
print(grid.best_score_)
from sklearn.metrics import classification_report
predictions = grid.predict(X_test_scaled)
print(classification_report(y_test, predictions,
target_names=list(label_encoder.inverse_transform([0,1,2]))))
```
# Save the Model
```
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'knn_model.sav'
joblib.dump(knn, filename)
```
| github_jupyter |
# V2: SCF optimization with VAMPyR
## V2.1: Hydrogen atom
In order to solve the one-electron Schr\"{o}dinger equation in MWs we reformulate them in an integral form [1].
\begin{equation}
\phi = -2\hat{G}_{\mu}\hat{V}\phi
\end{equation}
Where $\hat{V}$ is the potential acting on the system, $\phi$ is the wavefunction, $\hat{G}$ is the Helmholtz integral operator, where its kernel is defined as $G_\mu(r - r') = \frac{\exp(-\mu |r - r'|)}{4\pi |r - r'|}$
and $\mu$ is a parameter defined above through the energy.
The Helmholtz operator is already implemented in vampyr, therefore the only things you need are the integral KS equation and the definition of $\mu$
\begin{equation}
\mu = \sqrt{-2E}
\end{equation}
The way you initialize the Helmholtz operator is as follows
```
H = vp.HelmholtzOperator( mra, exp=mu, prec=eps )
```
where `mu` is the $\mu$ is the parameter defined above, mra you have seen before, and `eps` is the desired threshold precision. This operator is applied the same way you applied the vp.ScalingProjector earlier.
In this exercise you will be solving the KS equation iteratively for a simple system, the Hydrogen atom. This means that you only have the nuclear potential to take into account for the potential term in the KS equation.
$$ V_{nuc}(\mathbf{r}) = -\frac{1}{|\mathbf{r}|}$$
We will also be working with a single orbital, of which the initial guess is
$$ \phi_0(\mathbf{r}) = e^{-|\mathbf{r}|^2} $$
where
$$ |\mathbf{r}| = \sqrt{x^2 + y^2 + z^2}$$
The orbital update is defined as follows
\begin{align}
\Delta\tilde{\phi}^n &= -2\hat{G}[V_{nuc}\phi^n] - \phi^n \\
\Delta\tilde{\phi}^n &= \tilde{\phi}^{n+1} - \phi^n
\end{align}
where we use \~ to denote a function that is **not** normalized, and $n$ is the iteration index.
#### Implementation exercise:
1. Make a nuclear potential as a python function `f_nuc(r)`
2. Make an initial guess for the orbital as a python function `f_phi(r)` (hint use `np.exp` to get an exponetial function)
3. Create a Helmholtz operator $G_\mu$ with $\mu$ as shown above, use the exact value of $E = -0.5 a.u.$ for a hydrogen atom
4. Project both nuclear potential ($V$) and orbital ($\phi_n$) to the MW basis using a `vp.ScalingProjector` with precision $\epsilon=1.0e-3$
5. Compute new orbital through application of the Helmholtz operator
6. Compute the size of the orbital update $||\tilde{\phi}^{n+1} - \phi^n||$
7. Normalize the orbital $\phi^{n+1} = \tilde{\phi}^{n+1}/||\tilde{\phi}^{n+1}||$
8. Update orbital $\phi^{n+1} \rightarrow \phi^{n}$ for next iteration
9. Repeat steps 5-8 until your wavefunction has converged
The convergence criterion is the norm of $\Delta \phi^n$, but you should start by looping a set amount of times before trying the threshold.
```
from vampyr import vampyr3d as vp
import numpy as np
import matplotlib.pyplot as plt
r_x = np.linspace(-0.99, 0.99, 1000) # create an evenly spaced set of points between -0.99 and 0.99
r_y = np.zeros(1000)
r_z = np.zeros(1000)
r = [r_x, r_y, r_z]
# Analytic nuclear potential
def f_nuc(r):
# TODO: implement the nuclear potential
return
# Analytic guess for solution
def f_phi(r):
# TODO: implement the initial guess for the orbital
return
# Prepare Helmholtz operator
E = -0.5
mu = np.sqrt(-2*E)
G_mu = # TODO: Construct BSH operator from mu)
V = # TODO: Project nuclear potential V from f_nuc
phi_n = # TODO: Project starting guess phi_n from f_phi
phi_n.normalize()
# Optimization loop
thrs = 1.0e-3
update = 1.0
i = 0
while (i < 3): # switch to (update > thrs) later
# TODO:
# Compute product of potential V and wavefunction phi_n
# Apply Helmholtz operator to obtain phi_np1
# Compute norm = ||phi^{n+1}||
# Compute update = ||phi^{n+1} - phi^{n}||
# this will plot the wavefunction at each iteration
phi_n_plt = [phi_n([x, 0.0, 0.0]) for x in r_x]
plt.plot(r_x, phi_n_plt)
# this will print some info, you need to compute in the loop:
print("iteration: {} Norm: {} Update: {}".format(i, norm, update))
i += 1
plt.show()
```
## V2.2 Extension to Helium
A few things change when you go from Hydrogen to Helium:
1. The energy is no longer known exactly, and thus will have to be computed from the wavefunction
2. The Helmholtz operator which depends on the energy through $\mu = \sqrt{-2E}$ needs to be updated in every iteration
3. The potential operator $V$ depends on the wavefunction and must be updated in every iteration
In this example we will use the Hartree-Fock model, which for a single-orbital system like Helium, reduces to the following potential operator:
\begin{align}
\hat{V} &= \hat{V}_{nuc} + 2\hat{J} - \hat{K}\\
&= \hat{V}_{nuc} + \hat{J}
\end{align}
since $\hat{K} = \hat{J}$ for a doubly occupied single orbital.
The Coulomb potential $\hat{J}$ can be computed by application of the Poisson operator $P$:
\begin{equation}
\hat{J}(r) = P\left[4\pi\rho\right]
\end{equation}
Where $\rho$ is the square of the orbital
\begin{equation}
\rho = \phi*\phi
\end{equation}
#### Pen and paper exercise:
Use the fact that
\begin{equation}
\tilde{\phi}^{n+1} = -\Big[\hat{T} - E^n\Big]^{-1} V^n\phi^n \end{equation}
to show that
\begin{equation}
E^{n+1} = \frac{\langle\tilde{\phi}^{n+1}|\hat{T} +
\hat{V}^{n+1}|\tilde{\phi}^{n+1}\rangle}
{||\tilde{\phi}^{n+1}||^2}
\end{equation}
can be written as a pure update $dE^n$ involving only the potentials $\hat{V}^{n+1}$, $\hat{V}^n$ as well as the orbitals $\tilde{\phi}^{n+1}$ and $\phi^n$
\begin{equation}
E^{n+1} = E^{n} + dE^n
\end{equation}
#### Implementation exercise:
1. Make a nuclear potential function `f_nuc(r)` for the Helium atom
2. Make an initial guess for the orbital as a python function `f_phi(r)` (hint use `np.exp` to get an exponetial function)
3. Project both nuclear potential ($V$) and orbital ($\phi_n$) to the MW basis using a `vp.ScalingProjector` with precision $\epsilon=1.0e-3$
4. Create a Helmholtz operator $G^n$ with $\mu^n$ using the current energy $E^n$
5. Compute total potential $\hat{V^n} = \hat{V}_{nuc} + \hat{J^n}$, where the Coulomb potential is computed using the `vp.PoissonOperator` on the current squared orbital $\rho^n = ||\phi^n||^2$
6. Compute new orbital through application of the Helmholtz operator on $\phi^{n+1} = -2\hat{G}^n\hat{V}^n\phi^n$
7. Compute the size of the orbital update $||\tilde{\phi}^{n+1} - \phi^n||$
8. Normalize the orbital $\phi^{n+1} = \tilde{\phi}^{n+1}/||\tilde{\phi}^{n+1}||$
9. Update orbital $\phi^{n+1} \rightarrow \phi^{n}$ for next iteration
10. Repeat steps 4-9 until your wavefunction has converged
The convergence criterion is the norm of $\Delta \phi^n$, but you should start by looping a set amount of times before trying the threshold.
```
from vampyr import vampyr3d as vp
import numpy as np
import matplotlib.pyplot as plt
r_x = np.linspace(-0.99, 0.99, 1000) # create an evenly spaced set of points between -0.99 and 0.99
r_y = np.zeros(1000)
r_z = np.zeros(1000)
r = [r_x, r_y, r_z]
# Analytic nuclear potential Helium
def f_nuc(r):
#implement the nuclear potential
return
# Analytic guess for solution (same as for Hydrogen)
def f_phi(r):
# implement the initial guess for the orbital
return
# TODO:
# Project nuclear potential V_nuc from f_nuc
# Project starting guess phi_n from f_phi
# Set a starting guess E_n for the energy
# Optimization loop
thrs = 1.0e-3
update = 1.0
i = 0
while (i < 3): # switch to (update > thrs) later
# Prepare Helmholtz operator from current energy
mu_n = np.sqrt(-2*E_n)
G_n = # TODO: Construct BSH operator from mu_n)
# TODO:
# Compute rho
# Initialize vp.PoissonOperator and compute J
# Compute total potential V = V_nuc + J
# Iterate Helmholtz operator to get new orbital phi^{n+1}
dE_n = # TODO: insert energy expression from above
# Prepare for next iteration
E_n += dE_n
phi_n += dPhi_n
# This will plot the wavefunction at each iteration
phi_n_plt = [phi_n([x, 0.0, 0.0]) for x in r_x]
plt.plot(r_x, phi_n_plt)
# this will print some info, you need to compute in the loop:
# norm = ||phi^{n+1}||
# update = ||phi^{n+1} - phi^{n}||
print("iteration: {} Energy: {} Norm: {} Update: {}".format(i, E_n, norm, update))
i += 1
plt.show()
```
You should expect the orbital energy to converge towards
$E_n \approx -0.918$.
#### Bonus exercise:
The total energy can be computed after convergence as
$E_{tot} = 2E_n - \langle\rho|J\rangle$, should be around $E_{tot} \approx -2.86$.
## Sources
[1] Stig Rune Jensen, Santanu Saha, José A. Flores-Livas, William Huhn, Volker Blum, Stefan Goedecker, and Luca Frediani The Elephant in the Room of Density Functional Theory Calculations. The Journal of Physical Chemistry Letters 2017 8 (7), 1449-1457
DOI: 10.1021/acs.jpclett.7b00255
| github_jupyter |
# Predição de atraso de voos
https://docs.microsoft.com/en-us/learn/modules/predict-flight-delays-with-python/0-introduction
### Importando o arquivo
```
!curl https://topcs.blob.core.windows.net/public/FlightData.csv -o flightdata.csv
import pandas as pd
df = pd.read_csv('flightdata.csv')
df.head()
observacoes, features = df.shape
print("O dataframe possui {} observacoes e {} features.".format(observacoes, features))
```
* Column Description
* YEAR Year that the flight took place
* QUARTER Quarter that the flight took place (1-4)
* MONTH Month that the flight took place (1-12)
* DAY_OF_MONTH Day of the month that the flight took place (1-31)
* DAY_OF_WEEK Day of the week that the flight took place (1=Monday, 2=Tuesday, etc.)
* UNIQUE_CARRIER Airline carrier code (e.g., DL)
* TAIL_NUM Aircraft tail number
* FL_NUM Flight number
* ORIGIN_AIRPORT_ID ID of the airport of origin
* ORIGIN Origin airport code (ATL, DFW, SEA, etc.)
* DEST_AIRPORT_ID ID of the destination airport
* DEST Destination airport code (ATL, DFW, SEA, etc.)
* CRS_DEP_TIME Scheduled departure time
* DEP_TIME Actual departure time
* DEP_DELAY Number of minutes departure was delayed
* DEP_DEL15 0=Departure delayed less than 15 minutes, 1=Departure delayed 15 minutes or more
* CRS_ARR_TIME Scheduled arrival time
* ARR_TIME Actual arrival time
* ARR_DELAY Number of minutes flight arrived late
* ARR_DEL15 0=Arrived less than 15 minutes late, 1=Arrived 15 minutes or more late
* CANCELLED 0=Flight was not cancelled, 1=Flight was cancelled
* DIVERTED 0=Flight was not diverted, 1=Flight was diverted
* CRS_ELAPSED_TIME Scheduled flight time in minutes
* ACTUAL_ELAPSED_TIME Actual flight time in minutes
* DISTANCE Distance traveled in miles
### Limpeza e preparação dos dados
```
# Possui valores nulos?
df.isnull().values.any()
# Quais colunas?
df.isnull().sum()
# A coluna 'Unnamed: 25' não possui valores para nenhuma observação, então vou excluir
df = df.drop('Unnamed: 25', axis=1)
# Selecionar dados para trabalhar
df = df[["MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "ORIGIN", "DEST", "CRS_DEP_TIME", "ARR_DEL15"]]
df.isnull().sum()
# Verificar alguns valores nulos
df[df.isnull().values.any(axis=1)].head()
# Como os valores nulos correspondem a voos atrasados, vamos sinalizá-los como tal
df = df.fillna({'ARR_DEL15': 1})
# Como a coluna CRS_DEP_TIME possui horário mas sem o formato apropriado, vamos vazer uma conta para diminuir o range e evitar afetar as análises
import math
for index, row in df.iterrows():
df.loc[index, 'CRS_DEP_TIME'] = math.floor(row['CRS_DEP_TIME'] / 100)
df.head()
# Tratando colunas Origem e Destino devido a serem valores categóricos
df = pd.get_dummies(df, columns=['ORIGIN', 'DEST'])
df.head()
```
### Realizando a separação dos dados em treino e teste
```
from sklearn.model_selection import train_test_split
train_x, test_x, train_y, test_y = train_test_split(df.drop('ARR_DEL15', axis=1), df['ARR_DEL15'], test_size=0.2, random_state=42)
print("{} observações para treino e {} observações para teste".format(train_x.shape[0],test_x.shape[0]))
```
### Realizando o treinamento com o RandomForest
```
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(random_state=13)
model.fit(train_x, train_y)
# Predição
predicted = model.predict(test_x)
```
### Verificando métricas da predição
```
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
pred_accuracy = model.score(test_x, test_y)
print("Acurácia {}".format(pred_accuracy))
probabilities = model.predict_proba(test_x)
pred_roc = roc_auc_score(test_y, probabilities[:, 1])
print("Curva ROC {}".format(pred_roc))
train_predictions = model.predict(train_x)
pred_precision = precision_score(train_y, train_predictions)
print("Precisão {}".format(pred_precision))
pred_recall = recall_score(train_y, train_predictions)
print("Recall {}".format(pred_recall))
from sklearn.metrics import confusion_matrix
cf_matrix = confusion_matrix(test_y, predicted)
print("Flights on time:")
print(" - and predicted on time:", cf_matrix[0][0])
print(" - but predicted delayed:", cf_matrix[0][1])
print("Flights delayed:")
print(" - but predicted on time:", cf_matrix[1][0])
print(" - and predicted delayed:", cf_matrix[1][1])
```
### Visualizando a saída do modelo preditivo
```
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(test_y, probabilities[:, 1])
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], color='grey', lw=1, linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
```
### Expondo o modelo
```
def predict_delay(departure_date_time, origin, destination):
from datetime import datetime
try:
departure_date_time_parsed = datetime.strptime(departure_date_time, '%d/%m/%Y %H:%M:%S')
except ValueError as e:
return 'Error parsing date/time - {}'.format(e)
month = departure_date_time_parsed.month
day = departure_date_time_parsed.day
day_of_week = departure_date_time_parsed.isoweekday()
hour = departure_date_time_parsed.hour
origin = origin.upper()
destination = destination.upper()
input = [{'MONTH': month,
'DAY': day,
'DAY_OF_WEEK': day_of_week,
'CRS_DEP_TIME': hour,
'ORIGIN_ATL': 1 if origin == 'ATL' else 0,
'ORIGIN_DTW': 1 if origin == 'DTW' else 0,
'ORIGIN_JFK': 1 if origin == 'JFK' else 0,
'ORIGIN_MSP': 1 if origin == 'MSP' else 0,
'ORIGIN_SEA': 1 if origin == 'SEA' else 0,
'DEST_ATL': 1 if destination == 'ATL' else 0,
'DEST_DTW': 1 if destination == 'DTW' else 0,
'DEST_JFK': 1 if destination == 'JFK' else 0,
'DEST_MSP': 1 if destination == 'MSP' else 0,
'DEST_SEA': 1 if destination == 'SEA' else 0 }]
return model.predict_proba(pd.DataFrame(input))[0][0]
```
### Executando testes do modelo
```
predict_delay('1/10/2018 21:45:00', 'JFK', 'ATL')
import numpy as np
labels = ('Oct 1', 'Oct 2', 'Oct 3', 'Oct 4', 'Oct 5', 'Oct 6', 'Oct 7')
values = (predict_delay('1/10/2018 21:45:00', 'JFK', 'ATL'),
predict_delay('2/10/2018 21:45:00', 'JFK', 'ATL'),
predict_delay('3/10/2018 21:45:00', 'JFK', 'ATL'),
predict_delay('4/10/2018 21:45:00', 'JFK', 'ATL'),
predict_delay('5/10/2018 21:45:00', 'JFK', 'ATL'),
predict_delay('6/10/2018 21:45:00', 'JFK', 'ATL'),
predict_delay('7/10/2018 21:45:00', 'JFK', 'ATL'))
alabels = np.arange(len(labels))
plt.bar(alabels, values, align='center', alpha=0.5)
plt.xticks(alabels, labels)
plt.ylabel('Probability of On-Time Arrival')
plt.ylim((0.0, 1.0))
```
| github_jupyter |
```
import os
import glob
import LatLon
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 10)
# plot
%matplotlib inline
import matplotlib.pyplot as plt
import pylab
import seaborn as sns
sns.set_style("whitegrid")
from pysurvey.plot import setup, legend, icolorbar, density, minmax
import geoplotlib
import geoplotlib.colors
clean = pd.DataFrame.from_csv('/Users/ajmendez/tmp/flight/flight_clean_3.csv')
clean['flightid'] = flights['flight']+'.'+flights['flightindex'].apply(str)
clean[clean['flightpoints'] > 100].groupby(['flight'], as_index=False).count().sort('date')
isgood = ( (clean['flight'] == 'A719AA') |
# (clean['flight'] == 'A9C737') |
# (clean['flight'] == 'A313B4') |
(clean['flight'] == '406696') )
flights = clean[isgood]
print len(flights)
# flights.plot('flightindex', 'flightpoints', kind='scatter')
flights.plot('lon', 'lat', kind='scatter', c='datenum',
cmap=pylab.cm.Spectral, lw=0, alpha=0.5)
# pylab.axhline(10)
colors = geoplotlib.colors.create_set_cmap(flights['flightnum'], pylab.cm.jet)
geoplotlib.tiles_provider('darkmatter')
for fi in np.unique(flights['flightnum']):
geoplotlib.scatter(flights[flights['flightnum'] == fi], color=colors[fi])
bbox = geoplotlib.utils.BoundingBox(40.5,-78.0,38.5,-76)
geoplotlib.set_bbox(bbox)
geoplotlib.inline(800)
import sys
sys.path.append('/Users/ajmendez/tmp/sklearn-expertsys/')
import RuleListClassifier
reload(RuleListClassifier)
from RuleListClassifier import *
from sklearn.datasets.mldata import fetch_mldata
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
feature_labels = ["#Pregnant","Glucose concentration test","Blood pressure(mmHg)","Triceps skin fold thickness(mm)","2-Hour serum insulin (mu U/ml)","Body mass index","Diabetes pedigree function","Age (years)"]
data = fetch_mldata("diabetes") # get dataset
y = (data.target+1)/2
Xtrain, Xtest, ytrain, ytest = train_test_split(data.data, y) # split
clf = RuleListClassifier(max_iter=100, class1label="diabetes", verbose=False)
clf.fit2(Xtrain, ytrain, feature_labels=feature_labels)
print "RuleListClassifier Accuracy:", clf.score(Xtest, ytest), "Learned interpretable model:\n", clf
clf = RuleListClassifier(max_iter=10000, class1label="diabetes", verbose=False)
clf.fit(Xtrain, ytrain, feature_labels=feature_labels)
print "RuleListClassifier Accuracy:", clf.score(Xtest, ytest), "Learned interpretable model:\n", clf
print "RandomForestClassifier Accuracy:", RandomForestClassifier().fit(Xtrain, ytrain).score(Xtest, ytest)
feature_labels = ['lat', 'lon', 'alt', 'datenum']
Xtrain = flights[feature_labels]
ytrain = flights['flight'] == 'A719AA'
clf = RuleListClassifier(max_iter=1000,
class1label="flight",
verbose=False)
clf.fit(Xtrain, ytrain, feature_labels=feature_labels)
print "RuleListClassifier Accuracy:", clf.score(Xtrain, ytrain), "Learned interpretable model:\n", clf
print "RandomForestClassifier Accuracy:", RandomForestClassifier().fit(Xtrain, ytrain).score(Xtrain, ytrain)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
## For plotting
import matplotlib.pyplot as plt
from matplotlib import style
import datetime as dt
import seaborn as sns
sns.set_style("whitegrid")
path = '../Data/dff1.csv'
df= pd.read_csv(path, parse_dates=['ds'])
# df = df.rename(columns = {"Date":"ds","Close":"y"})
df = df[['ds', 'y','fbsp', 'diff','tby', 'ffr', 'fta', 'eps', 'div', 'une', 'wti', 'ppi',
'rfs']]
# df
df['fbsp_tby'] = df['fbsp'] * df['tby']
df['fbsp_ffr'] = df['fbsp'] * df['ffr']
df['fbsp_div'] = df['fbsp'] * df['div']
df['eps_tby'] = df['eps'] * df['tby']
df['eps_ffr'] = df['eps'] * df['ffr']
df['eps_div'] = df['eps'] * df['div']
# cutoff between test and train data
cutoff = len(df) - 252
df_train = df[:cutoff].copy()
df_test = df[cutoff:].copy()
print(cutoff)
df_train.columns
possible_features = ['tby', 'ffr', 'fta', 'eps', 'div', 'une', 'wti',
'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby',
'eps_ffr', 'eps_div']
from itertools import chain, combinations
def powerset(iterable):
#"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
#print(list(powerset(possible_features)))
len(possible_features)
from statsmodels.regression.linear_model import OLS
reg_new = OLS((df_train['diff']).copy(),df_train[possible_features].copy()).fit()
print(reg_new.params)
#from the output, we can see it's consistent with sklearn output
new_coef = reg_new.params
new_possible_feats = new_coef[abs(new_coef)>0].index
power_feats = list(powerset(new_possible_feats))
power_feats.remove(())
power_feats = [ list(feats) for feats in power_feats]
len(power_feats)
AIC_scores = []
parameters = []
for feats in power_feats:
tmp_reg = OLS((df_train['diff']).copy(),df_train[feats].copy()).fit()
AIC_scores.append(tmp_reg.aic)
parameters.append(tmp_reg.params)
Min_AIC_index = AIC_scores.index(min(AIC_scores))
Min_AIC_feats = power_feats[Min_AIC_index]
Min_AIC_params = parameters[Min_AIC_index]
print(Min_AIC_feats)
print(Min_AIC_params)
len(Min_AIC_feats)
###After selecting the best features, we report the testing error, and make the plot
AIC_df_test = df_test[Min_AIC_feats]
AIC_pred_test = AIC_df_test.dot(Min_AIC_params)+df_test.fbsp
AIC_df_train = df_train[Min_AIC_feats]
AIC_pred_train = AIC_df_train.dot(Min_AIC_params)+ df_train.fbsp
from sklearn.metrics import mean_squared_error as MSE
mse_train = MSE(df_train.y, AIC_pred_train)
mse_test = MSE(df_test.y, AIC_pred_test)
#compare with fbprophet()
fb_mse_train = MSE(df_train.y, df_train.fbsp)
fb_mse_test = MSE(df_test.y, df_test.fbsp)
print(mse_train,fb_mse_train)
print(mse_test,fb_mse_test)
df_train.ds
plt.figure(figsize=(18,10))
# plot the training data
plt.plot(df_train.ds,df_train.y,'b',
label = "Training Data")
plt.plot(df_train.ds, AIC_pred_train,'r-',
label = "Improved Fitted Values by Best_AIC")
# # plot the fit
plt.plot(df_train.ds, df_train.fbsp,'g-',
label = "FB Fitted Values")
# # plot the forecast
plt.plot(df_test.ds, df_test.fbsp,'g--',
label = "FB Forecast")
plt.plot(df_test.ds, AIC_pred_test,'r--',
label = "Improved Forecast by Best_AIC")
plt.plot(df_test.ds,df_test.y,'b--',
label = "Test Data")
plt.legend(fontsize=14)
plt.xlabel("Date", fontsize=16)
plt.ylabel("SP&500 Close Price", fontsize=16)
plt.show()
plt.figure(figsize=(18,10))
plt.plot(df_test.y,label="Training Data")
plt.plot(df_test.fbsp,label="FB Forecast")
plt.plot(AIC_pred_test,label="Improved Forecast by Best_AIC")
plt.legend(fontsize = 14)
plt.show()
column = ['tby', 'ffr', 'fta', 'eps', 'div', 'une',
'wti', 'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div']
from sklearn import preprocessing
df1_train = df_train[['diff', 'tby', 'ffr', 'fta', 'eps', 'div', 'une', 'wti', 'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div']]
X = preprocessing.scale(df1_train)
from statsmodels.regression.linear_model import OLS
reg_new = OLS((X[:,0]).copy(),X[:,1:].copy()).fit()
print(reg_new.params)
# Before Covid
# pd.Series(reg_new.params, index=['tby', 'ffr', 'fta', 'eps', 'div', 'une',
# 'wti', 'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div'] )
# before covid
coef1 = [ 1.50405129, 1.03228322, 0.27409454, 1.17073571, 0.31243092,
-0.75747342, 0.46988206, -0.39944639, 2.10369448, -0.69112943,
-2.1804296 , -2.38576385, -1.14196633, 1.41832903, -0.34501927]
# include covid
coef2 = [ 0.65150054, 1.70457239, -0.1573802 , -0.18007979, -0.15221931,
-0.62326075, 0.45065894, -0.38972706, 2.87210843, -1.17604495,
-4.92858316, -2.15459111, 0.11418468, 2.74829778, 0.55520382]
# Include Covid
# pd.Series( np.append( ['coefficients (before covid)'], np.round(coef1,3)), index= np.append(['features'], column) )
index = ['10 Year U.S Treasury Bond Yield Rates (tby)', 'Federal Funds Rates (ffr)',
'Federal Total Assets (fta)', 'Earning-Per-Share of S&P 500 (eps)', 'Dividend Yield of S&P 500 (div)',
'Unemployment Rates (une) ', 'West Texas Intermediate oil index (wit)', 'Producer Price Index (ppi)',
'Retail and Food Services Sales (rfs)',
'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div'
]
len(index)
pd.Series(coef2, index =index1)
df3 = pd.DataFrame(coef1, index = index1, columns = ['coefficients (before covid)'])
df3['coefficients (include covid)'] =pd.Series(coef2, index =index1)
df3
```
| github_jupyter |
```
!pwd
import sys
sys.path.append('/workspace')
from src.core.db.config import DatabaseEnum
from src.core.db.models.pdf_models import Fincen8300Rev4
from src.core.db.models.main_models import EmployeeToDocument
from src.core.db.session import DBContext, DbQuery
```
# Read the pdf data into a data source
```
db = DbQuery(DatabaseEnum.PDF_INGESTION_DB)
result = db.execute("SELECT * from public.fincen8300_rev4")
type(result)
from pandas import DataFrame
df = DataFrame(result.fetchall())
df.columns = result.keys()
df.head(100)
df.columns
from src.sources.data_source import DataSource
fincen = DataSource(df)
```
# Read employee data into data source
```
db = DbQuery(DatabaseEnum.MAIN_INGESTION_DB)
result = db.execute("SELECT * from public.employee")
print(result.keys())
df_employee = DataFrame(result.fetchall())
df_employee.columns = result.keys()
df_employee.head(20)
employee = DataSource(df_employee)
```
# Map the columns
```
from src.sources.structured_data_source import StructuredDataSource
from src.mapping.pdfs.pdf_field_name_classifier import FieldNameClassifier
from src.mapping.pdfs.pdf_field_label_catalog import FieldLabelCatalog
from src.mapping.pdfs.pseudofield_generator import PseudofieldGenerator
pseudofield_generator = PseudofieldGenerator(fincen)
pseudofield_generator.generate()
def _create_column_relations_for(source, target):
"""Create column relations from canonical column identifiers."""
gold_id_info = FieldNameClassifier.get_id_info_from_df(target.get_data())
data_id_info = FieldNameClassifier.get_id_info_from_df(source.get_data())
for identifier in FieldLabelCatalog:
if identifier in gold_id_info and identifier in data_id_info:
g_id = gold_id_info[identifier]
d_id = data_id_info[identifier]
source.create_column_relation(
d_id.field_name, g_id.field_name, target
)
print(
"New relation detected: %s" % str(source.column_relations[-1])
)
_create_column_relations_for(fincen, employee)
```
# Find Row Mappings
```
from src.mapping.rows.row_mapping_configuration import RowMappingConfiguration
from src.mapping.values.value_matching_configuration import ValueMatchingConfiguration
import json
def load_config(path):
"""
Read JSON from a filepath
"""
with open(path, "r") as F:
return json.load(F)
value_matching_config_json = load_config('../config/mapping/levenshtein_default.json')
row_mapping_config_json = load_config('../config/mapping/weighted_linear_default.json')
value_matching_config = ValueMatchingConfiguration(**value_matching_config_json)
row_mapping_config = RowMappingConfiguration(**row_mapping_config_json)
x = fincen.get_column_relations()
print(x)
fincen.map_rows_to(
employee, value_matching_config, row_mapping_config
)
import pandas as pd
def _generate_structured_row_matches(source, employee):
"""Generate structured row matches."""
rows = {
'first_name': [], # just for sanity check
'last_name': [], # just for sanity check
'emp_uuid': [],
'doc_uuid': []
}
target_df = employee.get_data()
for relation in source.row_relations:
source_index = relation.source_index
target_index = relation.target_index
source_row = source.get_data().iloc[source_index]
target_row = employee.get_data().iloc[target_index]
rows['emp_uuid'].append(target_row.id)
rows["doc_uuid"].append(source_row.id)
rows["first_name"].append(source_row.first_name)
rows["last_name"].append(source_row.last_name)
return pd.DataFrame(rows)
results_df = _generate_structured_row_matches(fincen, employee)
results_df.head()
fincen.get_data().iloc[2].first_name
```
# Write final dataframe to Main database
```
num_records = results_df.shape[0]
with DBContext(DatabaseEnum.MAIN_INGESTION_DB) as main_db:
for i in range(num_records):
row = results_df.iloc[i]
main_db.add(EmployeeToDocument(employee_id=str(row.emp_uuid),
document_ingestion_id=str(row.doc_uuid)
))
```
| github_jupyter |
```
import pandas as pd
import numpy as np
from datetime import datetime
from sqlalchemy import create_engine
from dateutil.relativedelta import relativedelta
from pricing.service.scoring.lscore import LScoring
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
init_notebook_mode(connected=True)
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
```
- Avaliar meses em que o faturamento foi zero (antes dos ultimos 6 meses)
- Avaliar o periodo em que o faturamento ficou x% abaixo da media
- Avaliar a amplitude do faturamento
#### Qual a probabilidade de se observar faturamento nulo nos ultimos 12 meses?
```
lista_teste = ['26998230000185', '17160880000166', '13.919.916/0001-91']
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/credito-digital")
con = engine.connect()
df = pd.read_sql("select * from tb_Fluxo where cnpj in {}".format(tuple(lista_teste)), con)
con.close()
df = df[['cnpj', 'dataFluxo', 'valorFluxo']]
df.columns = ['cnpj', 'data', 'valor']
dt = df[df['cnpj']=='26998230000185']
dt['data'] = dt.apply(lambda x : x['data'] + relativedelta(months=5), axis=1)
dt
body = {'dados' : dt[['data', 'valor']].to_dict("records"), 'id_produto' : 'tomatico'}
ls = LScoring(body)
ls.calcula()
trace = go.Scatter(
x = dt['data'],
y = dt['valor'],
)
layout = go.Layout(title='faturamento')
fig = go.Figure(data = [trace], layout = layout)
iplot(fig)
dt = df[df['cnpj']=='17160880000166']
body = {'dados' : dt[['data', 'valor']].to_dict("records"), 'id_produto' : 'tomatico'}
ls = LScoring(body)
ls.calcula()
trace = go.Scatter(
x = dt['data'],
y = dt['valor'],
)
layout = go.Layout(title='faturamento')
fig = go.Figure(data = [trace], layout = layout)
iplot(fig)
_df = df[df['cnpj']=='26998230000185']
_df['valor'].mean()
abs( - 0) < 0.1
_df['valor'].min()/_df['valor'].mean()
_df['low'] = _df['valor']/_df['valor'].mean()
_df['low'].max()/_df['low'].min()
_df['valor'].max()/_df['valor'].min()
_df[_df['low']< 1]['low'].mean()
len(_df[_df['low']< 1])/len(_df)
engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo")
con = engine.connect()
df = pd.read_sql("select * from fluxo_pv", con)
con.close()
df['data'].iloc[10]
df.drop(index=0, inplace=True)
df = df[df['data']>datetime(2018,2,1).date()]
df = df[df['cpf_cnpj']!='00.000.000/0001-91']
df.groupby('cpf_cnpj').count().max()
df['zeros'] = df.apply(lambda x : int(x['valor'] == 0), axis=1)
df_zeros = df.groupby('cpf_cnpj').sum().reset_index()[['cpf_cnpj', 'zeros']]
df_zeros[df_zeros['zeros']>0]
dt = df[df['cpf_cnpj']=='13.919.916/0001-91'][['cpf_cnpj', 'data', 'valor']]
dt['cpf_cnpj'] = dt.apply(lambda x : x['cpf_cnpj'].replace(".", "").replace("/", "").replace("-", ""), axis=1)
dt.columns = ['cnpj', 'data', 'valor']
df = pd.concat([df, dt])
df.to_excel("base_zeros.xlsx")
from datetime import datetime
datetime.now().date().replace(day=1)
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Using TFRecords and `tf.Example`
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/tf-records"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/load_data/tf-records.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/load_data/tf-records.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
To read data efficiently it can be helpful to serialize your data and store it in a set of files (100-200MB each) that can each be read linearly. This is especially true if the data is being streamed over a network. This can also be useful for caching any data-preprocessing.
The TFRecord format is a simple format for storing a sequence of binary records.
[Protocol buffers](https://developers.google.com/protocol-buffers/) are a cross-platform, cross-language library for efficient serialization of structured data.
Protocol messages are defined by `.proto` files, these are often the easiest way to understand a message type.
The `tf.Example` message (or protobuf) is a flexible message type that represents a `{"string": value}` mapping. It is designed for use with TensorFlow and is used throughout the higher-level APIs such as [TFX](https://www.tensorflow.org/tfx/).
This notebook will demonstrate how to create, parse, and use the `tf.Example` message, and then serialize, write, and read `tf.Example` messages to and from `.tfrecord` files.
Note: While useful, these structures are optional. There is no need to convert existing code to use TFRecords, unless you are using [`tf.data`](https://www.tensorflow.org/guide/datasets) and reading data is still the bottleneck to training. See [Data Input Pipeline Performance](https://www.tensorflow.org/guide/performance/datasets) for dataset performance tips.
## Setup
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
tf.enable_eager_execution()
import numpy as np
import IPython.display as display
```
## `tf.Example`
### Data types for `tf.Example`
Fundamentally a `tf.Example` is a `{"string": tf.train.Feature}` mapping.
The `tf.train.Feature` message type can accept one of the following three types (See the [`.proto` file]((https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto) for reference). Most other generic types can be coerced into one of these.
1. `tf.train.BytesList` (the following types can be coerced)
- `string`
- `byte`
1. `tf.train.FloatList` (the following types can be coerced)
- `float` (`float32`)
- `double` (`float64`)
1. `tf.train.Int64List` (the following types can be coerced)
- `bool`
- `enum`
- `int32`
- `uint32`
- `int64`
- `uint64`
In order to convert a standard TensorFlow type to a `tf.Example`-compatible `tf.train.Feature`, you can use the following shortcut functions:
Each function takes a scalar input value and returns a `tf.train.Feature` containing one of the three `list` types above.
```
# The following functions can be used to convert a value to a type compatible
# with tf.Example.
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
```
Note: To stay simple, this example only uses scalar inputs. The simplest way to handle non-scalar features is to use `tf.serialize_tensor` to convert tensors to binary-strings. Strings are scalars in tensorflow. Use `tf.parse_tensor` to convert the binary-string back to a tensor.
Below are some examples of how these functions work. Note the varying input types and the standardizes output types. If the input type for a function does not match one of the coercible types stated above, the function will raise an exception (e.g. `_int64_feature(1.0)` will error out, since `1.0` is a float, so should be used with the `_float_feature` function instead).
```
print(_bytes_feature(b'test_string'))
print(_bytes_feature(u'test_bytes'.encode('utf-8')))
print(_float_feature(np.exp(1)))
print(_int64_feature(True))
print(_int64_feature(1))
```
All proto messages can be serialized to a binary-string using the `.SerializeToString` method.
```
feature = _float_feature(np.exp(1))
feature.SerializeToString()
```
### Creating a `tf.Example` message
Suppose you want to create a `tf.Example` message from existing data. In practice, the dataset may come from anywhere, but the procedure of creating the `tf.Example` message from a single observation will be the same.
1. Within each observation, each value needs to be converted to a `tf.train.Feature` containing one of the 3 compatible types, using one of the functions above.
1. We create a map (dictionary) from the feature name string to the encoded feature value produced in #1.
1. The map produced in #2 is converted to a [`Features` message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto#L85).
In this notebook, we will create a dataset using NumPy.
This dataset will have 4 features.
- a boolean feature, `False` or `True` with equal probability
- a random bytes feature, uniform across the entire support
- an integer feature uniformly randomly chosen from `[-10000, 10000)`
- a float feature from a standard normal distribution
Consider a sample consisting of 10,000 independently and identically distributed observations from each of the above distributions.
```
# the number of observations in the dataset
n_observations = int(1e4)
# boolean feature, encoded as False or True
feature0 = np.random.choice([False, True], n_observations)
# integer feature, random between -10000 and 10000
feature1 = np.random.randint(0, 5, n_observations)
# bytes feature
strings = np.array([b'cat', b'dog', b'chicken', b'horse', b'goat'])
feature2 = strings[feature1]
# float feature, from a standard normal distribution
feature3 = np.random.randn(n_observations)
```
Each of these features can be coerced into a `tf.Example`-compatible type using one of `_bytes_feature`, `_float_feature`, `_int64_feature`. We can then create a `tf.Example` message from these encoded features.
```
def serialize_example(feature0, feature1, feature2, feature3):
"""
Creates a tf.Example message ready to be written to a file.
"""
# Create a dictionary mapping the feature name to the tf.Example-compatible
# data type.
feature = {
'feature0': _int64_feature(feature0),
'feature1': _int64_feature(feature1),
'feature2': _bytes_feature(feature2),
'feature3': _float_feature(feature3),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
```
For example, suppose we have a single observation from the dataset, `[False, 4, bytes('goat'), 0.9876]`. We can create and print the `tf.Example` message for this observation using `create_message()`. Each single observation will be written as a `Features` message as per the above. Note that the `tf.Example` [message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/example.proto#L88) is just a wrapper around the `Features` message.
```
# This is an example observation from the dataset.
example_observation = []
serialized_example = serialize_example(False, 4, b'goat', 0.9876)
serialized_example
```
To decode the message use the `tf.train.Example.FromString` method.
```
example_proto = tf.train.Example.FromString(serialized_example)
example_proto
```
## TFRecord files using `tf.data`
The `tf.data` module also provides tools for reading and writing data in tensorflow.
### Writing a TFRecord file
The easiest way to get the data into a dataset is to use the `from_tensor_slices` method.
Applied to an array, it returns a dataset of scalars.
```
tf.data.Dataset.from_tensor_slices(feature1)
```
Applies to a tuple of arrays, it returns a dataset of tuples:
```
features_dataset = tf.data.Dataset.from_tensor_slices((feature0, feature1, feature2, feature3))
features_dataset
# Use `take(1)` to only pull one example from the dataset.
for f0,f1,f2,f3 in features_dataset.take(1):
print(f0)
print(f1)
print(f2)
print(f3)
```
Use the `tf.data.Dataset.map` method to apply a function to each element of a `Dataset`.
The mapped function must operate in TensorFlow graph mode: It must operate on and return `tf.Tensors`. A non-tensor function, like `create_example`, can be wrapped with `tf.py_func` to make it compatible.
Using `tf.py_func` requires that you specify the shape and type information that is otherwise unavailable:
```
def tf_serialize_example(f0,f1,f2,f3):
tf_string = tf.py_func(
serialize_example,
(f0,f1,f2,f3), # pass these args to the above function.
tf.string) # the return type is `tf.string`.
return tf.reshape(tf_string, ()) # The result is a scalar
```
Apply this function to each element in the dataset:
```
serialized_features_dataset = features_dataset.map(tf_serialize_example)
serialized_features_dataset
```
And write them to a TFRecord file:
```
filename = 'test.tfrecord'
writer = tf.data.experimental.TFRecordWriter(filename)
writer.write(serialized_features_dataset)
```
### Reading a TFRecord file
We can also read the TFRecord file using the `tf.data.TFRecordDataset` class.
More information on consuming TFRecord files using `tf.data` can be found [here](https://www.tensorflow.org/guide/datasets#consuming_tfrecord_data).
Using `TFRecordDataset`s can be useful for standardizing input data and optimizing performance.
```
filenames = [filename]
raw_dataset = tf.data.TFRecordDataset(filenames)
raw_dataset
```
At this point the dataset contains serialized `tf.train.Example` messages. When iterated over it returns these as scalar string tensors.
Use the `.take` method to only show the first 10 records.
Note: iterating over a `tf.data.Dataset` only works with eager execution enabled.
```
for raw_record in raw_dataset.take(10):
print(repr(raw_record))
```
These tensors can be parsed using the function below.
Note: The `feature_description` is necessary here because datasets use graph-execution, and need this description to build their shape and type signature.
```
# Create a description of the features.
feature_description = {
'feature0': tf.FixedLenFeature([], tf.int64, default_value=0),
'feature1': tf.FixedLenFeature([], tf.int64, default_value=0),
'feature2': tf.FixedLenFeature([], tf.string, default_value=''),
'feature3': tf.FixedLenFeature([], tf.float32, default_value=0.0),
}
def _parse_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.parse_single_example(example_proto, feature_description)
```
Or use `tf.parse example` to parse a whole batch at once.
Apply this finction to each item in the dataset using the `tf.data.Dataset.map` method:
```
parsed_dataset = raw_dataset.map(_parse_function)
parsed_dataset
```
Use eager execution to display the observations in the dataset. There are 10,000 observations in this dataset, but we only display the first 10. The data is displayed as a dictionary of features. Each item is a `tf.Tensor`, and the `numpy` element of this tensor displays the value of the feature.
```
for parsed_record in parsed_dataset.take(10):
print(repr(raw_record))
```
Here, the `tf.parse_example` function unpacks the `tf.Example` fields into standard tensors.
## TFRecord files using tf.python_io
The `tf.python_io` module also contains pure-Python functions for reading and writing TFRecord files.
### Writing a TFRecord file
Now write the 10,000 observations to the file `test.tfrecords`. Each observation is converted to a `tf.Example` message, then written to file. We can then verify that the file `test.tfrecords` has been created.
```
# Write the `tf.Example` observations to the file.
with tf.python_io.TFRecordWriter(filename) as writer:
for i in range(n_observations):
example = serialize_example(feature0[i], feature1[i], feature2[i], feature3[i])
writer.write(example)
!ls
```
### Reading a TFRecord file
Suppose we now want to read this data back, to be input as data into a model.
The following example imports the data as is, as a `tf.Example` message. This can be useful to verify that a the file contains the data that we expect. This can also be useful if the input data is stored as TFRecords but you would prefer to input NumPy data (or some other input data type), for example [here](https://www.tensorflow.org/guide/datasets#consuming_numpy_arrays), since this example allows us to read the values themselves.
We iterate through the TFRecords in the infile, extract the `tf.Example` message, and can read/store the values within.
```
record_iterator = tf.python_io.tf_record_iterator(path=filename)
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
print(example)
# Exit after 1 iteration as this is purely demonstrative.
break
```
The features of the `example` object (created above of type `tf.Example`) can be accessed using its getters (similarly to any protocol buffer message). `example.features` returns a `repeated feature` message, then getting the `feature` message returns a map of feature name to feature value (stored in Python as a dictionary).
```
print(dict(example.features.feature))
```
From this dictionary, you can get any given value as with a dictionary.
```
print(example.features.feature['feature3'])
```
Now, we can access the value using the getters again.
```
print(example.features.feature['feature3'].float_list.value)
```
## Walkthrough: Reading/Writing Image Data
This is an example of how to read and write image data using TFRecords. The purpose of this is to show how, end to end, input data (in this case an image) and write the data as a TFRecord file, then read the file back and display the image.
This can be useful if, for example, you want to use several models on the same input dataset. Instead of storing the image data raw, it can be preprocessed into the TFRecords format, and that can be used in all further processing and modelling.
First, let's download [this image](https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg) of a cat in the snow and [this photo](https://upload.wikimedia.org/wikipedia/commons/f/fe/New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg) of the Williamsburg Bridge, NYC under construction.
### Fetch the images
```
cat_in_snow = tf.keras.utils.get_file('320px-Felis_catus-cat_on_snow.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg')
williamsburg_bridge = tf.keras.utils.get_file('194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg')
display.display(display.Image(filename=cat_in_snow))
display.display(display.HTML('Image cc-by: <a "href=https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg">Von.grzanka</a>'))
display.display(display.Image(filename=williamsburg_bridge))
display.display(display.HTML('<a "href=https://commons.wikimedia.org/wiki/File:New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg">source</a>'))
```
### Write the TFRecord file
As we did earlier, we can now encode the features as types compatible with `tf.Example`. In this case, we will not only store the raw image string as a feature, but we will store the height, width, depth, and an arbitrary `label` feature, which is used when we write the file to distinguish between the cat image and the bridge image. We will use `0` for the cat image, and `1` for the bridge image.
```
image_labels = {
cat_in_snow : 0,
williamsburg_bridge : 1,
}
# This is an example, just using the cat image.
image_string = open(cat_in_snow, 'rb').read()
label = image_labels[cat_in_snow]
# Create a dictionary with features that may be relevant.
def image_example(image_string, label):
image_shape = tf.image.decode_jpeg(image_string).shape
feature = {
'height': _int64_feature(image_shape[0]),
'width': _int64_feature(image_shape[1]),
'depth': _int64_feature(image_shape[2]),
'label': _int64_feature(label),
'image_raw': _bytes_feature(image_string),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
for line in str(image_example(image_string, label)).split('\n')[:15]:
print(line)
print('...')
```
We see that all of the features are now stores in the `tf.Example` message. Now, we functionalize the code above and write the example messages to a file, `images.tfrecords`.
```
# Write the raw image files to images.tfrecords.
# First, process the two images into tf.Example messages.
# Then, write to a .tfrecords file.
with tf.python_io.TFRecordWriter('images.tfrecords') as writer:
for filename, label in image_labels.items():
image_string = open(filename, 'rb').read()
tf_example = image_example(image_string, label)
writer.write(tf_example.SerializeToString())
!ls
```
### Read the TFRecord file
We now have the file `images.tfrecords`. We can now iterate over the records in the file to read back what we wrote. Since, for our use case we will just reproduce the image, the only feature we need is the raw image string. We can extract that using the getters described above, namely `example.features.feature['image_raw'].bytes_list.value[0]`. We also use the labels to determine which record is the cat as opposed to the bridge.
```
raw_image_dataset = tf.data.TFRecordDataset('images.tfrecords')
# Create a dictionary describing the features.
image_feature_description = {
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
}
def _parse_image_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.parse_single_example(example_proto, image_feature_description)
parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
parsed_image_dataset
```
Recover the images from the TFRecord file:
```
for image_features in parsed_image_dataset:
image_raw = image_features['image_raw'].numpy()
display.display(display.Image(data=image_raw))
```
| github_jupyter |
# Módulo 2 - Modelos preditivos e séries temporais
# Desafio do Módulo 2
```
import pandas as pd
import numpy as np
base = pd.read_csv('https://pycourse.s3.amazonaws.com/banknote_authentication.txt', header=None)
base.head()
#labels:
#variance, skewness, curtosis e entropy)
base.columns=['variance', 'skewness', 'curtosis', 'entropy', 'class']
base.head()
#Qual o tamanho desse dataset (número de linhas, número de colunas)?
base.shape
#Qual variável possui o maior range (diferença entre valor máximo e mínimo)?
#Qual a média da coluna skewness?
#Qual a média da coluna entropy?
#Qual a desvio padrão da coluna curtosis?
base.describe()
#Qual a mediana da coluna variance?
base.median()
#Qual a porcentagem de exemplos do dataset que são cédulas falsas (class=1)?
falsas = (base['class'] == 1).sum()
total = base.shape[0]
falsas / total * 100
#Qual o valor da correlação de Pearson entre as variáveis skewness e curtosis?
import scipy as sp
from scipy import stats
sp.stats.pearsonr(base['skewness'], base['curtosis'])
#Qual a acurácia do KNN no conjunto de teste?
from sklearn.model_selection import train_test_split
x = base.drop('class', axis=1)
y = base['class']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30, random_state=1)
import sklearn
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score #Medir acurária abaixo
#a. Algoritmo KNN:
clf_KNN = KNeighborsClassifier(n_neighbors=5)
#b. Algoritmo Árvore de Decisão (Decision Tree):
clf_arvore = DecisionTreeClassifier(random_state=1)
#c. Algoritmo Floresta Aleatória (Random Forest):
clf_floresta = RandomForestClassifier(max_depth=8, random_state=1)
#d. Algoritmo SVM:
clf_svm = SVC(gamma='auto',kernel='rbf', random_state=1)
#e. Algoritmo Rede MLP:
clf_mlp = MLPClassifier(hidden_layer_sizes=(2,), solver='lbfgs', random_state=1)
#Qual a acurácia do KNN no conjunto de teste?
clf_KNN.fit(x_train, y_train)
knn_predict = clf_KNN.predict(x_test)
accuracy_score(y_test, knn_predict)
#Qual a acurácia da Árvore de Decisão no conjunto de teste?
clf_arvore.fit(x_train, y_train)
arvore_predict = clf_arvore.predict(x_test)
accuracy_score(y_test, arvore_predict)
#Qual a acurácia do Random Forest no conjunto de teste?
clf_floresta.fit(x_train, y_train)
floresta_redict = clf_floresta.predict(x_test)
accuracy_score(y_test, floresta_redict)
#Qual a acurácia do SVM no conjunto de teste?
clf_svm.fit(x_train, y_train)
svm_predict = clf_svm.predict(x_test)
accuracy_score(y_test, svm_predict)
#Qual a acurácia da rede MLP no conjunto de teste?
clf_mlp.fit(x_train, y_train)
mlp_predict = clf_mlp.predict(x_test)
accuracy_score(y_test, mlp_predict)
#Analisando o valor da importância relativa das features do Random Forest (atributo feature_importances_),
#qual feature melhor contribuiu para a predição de class?
#Qual o valor da importância relativa da feature skewness?
relativos = clf_floresta.feature_importances_
base.head()
relativos
```
# Fim
# Visite o meu [github](https://github.com/k3ybladewielder) <3
| github_jupyter |
<a href="https://codeimmersives.com"><img src = "https://www.codeimmersives.com/wp-content/uploads/2019/09/CodeImmersives_Logo_RGB_NYC_BW.png" width = 400> </a>
<h1 align=center><font size = 5>Agenda</font></h1>
###
<div class="alert alert-block alert-info" style="margin-top: 20px">
1. [Review](#0)<br>
2. [Pandas continued](#2)<br>
2. [Exercise](#10)<br>
3. [Exercise](#12)<br>
</div>
<hr>
<h2>Review</h2>
<h2>Pandas Series continued</h2>
We can sort a series by using the sort_values method.<br>
<code>
import pandas as pd
x = [27,4,14,23,33,14,22,11]
ps = pd.Series(x).sort_values(ascending = False)
ps
</code><br>
NOTE: We can use the ascending = True to sort the series in ascending order
```
import pandas as pd
x = [27,4,14,23,33,14,22,11]
ps = pd.Series(x).sort_values(ascending = False)
ps
```
We can also sort a series using a lambda expression. Consider the <br>
following list of strings:<br>
<code>
ps = pd.Series(['a', 'B', 'c', 'D', 'e'])
ps.sort_values(key=lambda x: x.str.lower())
ps
</code><br>
NOTE: We can use the str method to change all values in a series to a string
```
ps = pd.Series(['a', 'B', 'c', 'D', 'e'])
ps.sort_values(key=lambda x: x.str.lower())
ps
```
<H2>Exercise</h2>
Take the following list of cities and alphabetize them and put them<br>
In the proper case: ALBANY ---> Albany<br>
<case>
cities = ['PROVIDENCE', 'HARTFORD','BOSTON','ALBANY','TRENTON']
</case>
<h4>Solution</h4>
```
```
<h4>Pandas Series.str methods</h4>
Here is a list of str methods to try:<br>
str.contains<br>
str.count<br>
str.endswith<br>
str.find<br>
str.index<br>
str.join<br>
str.get<br>
str.len<br>
str.split<br>
str.isalnum<br>
<h2> Exercise </h2>
1) use pd.str to get a boolean matrix for which words in the series <code> ['Mouse', 'dog', 'house and parrot', '23', np.NaN] </code> contain 'og' - use pd.str.contains <br>
2) Use pd.str.count to get the count of 'a' in the series <code> ['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat','aardvark'] </code> <br>
3) Use pd.str.count to get the count of dollar signs in the series *hint* special characters need a double backslash before them ('\\') <code>
['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat', 'canna$', 'findme_', 'dollarsign$', 'infinite', 'lo$'] </code>
<br>
4) Use pd.str.endswith() to see how many strings end with 't' in the series <code> ['bat', 'bear', 'caT', np.nan, 'lost', 'later', 'tattletale', 'tops', 'taste', 'tart', 'tango', 'taint', 'tarentino', 'tot'] </code> <br>
5) Use pd.str.join() to join all elements in the series <code> [['lion', 'elephant', 'zebra'],
[1.1, 2.2, 3.3],
['cat', 'gerbil', 'dog'],
['cow', 4.5, 'goat'],
['duck', 'swan', 'fish', 'guppy']] </code> using a hyphen '-' to join.
Solution:
```
import pandas as pd
import numpy as np
s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
print(s1.str.contains('og'))
```
We can split the data in a series and unpack the results into columns<br>
<br>
<code>
cities = ['PROVIDENCE, Rhode Island', 'Harford, Connecticut','Boston, Massachusetts']
ps = pd.Series(cities).str.split(expand = True)
ps
</code><br>
NOTE: We use the expand key word argument to create the multiple columns
```
cities = ['PROVIDENCE, Rhode Island', 'Harford, Connecticut','Boston, Massachusetts']
ps = pd.Series(cities).str.split(expand = True)
ps
```
We can remove a row by dropping its index<br>
<br>
<code>
import pandas as pd
favorites = {'1st place':'Pineapple',
'2nd place': 'Grapes',
'3rd place': 'Granny Smith Apples',
'4th place': 'Strawberries'}
ps = pd.Series(favorites.values(), index = favorites.keys())
new_ps = ps.drop(labels='4th place')
new_ps
</code>
```
import pandas as pd
favorites = {'1st place':'Pineapple',
'2nd place': 'Grapes',
'3rd place': 'Granny Smith Apples',
'4th place': 'Strawberries'}
ps = pd.Series(favorites)
new_ps = ps.drop(labels='4th place')
new_ps
```
<h4>Pandas Series sum</h4>
We can sum up all of our values in the series<br>
<code>
import pandas as pd
data = [90,93,97,88, 89]
ps = pd.Series(data)
print(ps.sum())
</code>
```
import pandas as pd
data = [90,93,97,88, 89]
ps = pd.Series(data)
print(ps.sum())
```
### DataFrame
```
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002, 2003],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}
frame = pd.DataFrame(data)
frame
frame.head()
pd.DataFrame(data, columns=['year', 'state', 'pop'])
frame.loc[1]
import numpy as np
frame = pd.DataFrame(data, columns = ['year', 'state', 'pop', 'area'])
frame
frame['area'] = np.arange(6)
frame
```
<h2> Exercise </h2>
1) Edit the states dataframe to create a new dataframe frame2 that adds a 'debt' column <br>
2) change the indices to be string numbers from one to six ('one', ... 'six) <br>
3) Print all the values in the years column for viewing <br>
4) Print row 'three' with all it's values using loc[] <br>
5) Use numpy's arange() function to fill the debt column frame2['debt'] with a range of numbers (Hint: you need to match the range to the number of rows in the frame for broadcasting to work).
Solution
```
# pd.dataFrame(data = [], columns =[], index=[])
```
Now instead of an arange, say we wanted to specify the debt column manually ourselves, we can do this by creating another series and assigning the column to the series (remember how dataframes contain series).
```
val = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])
frame2['debt'] = val
frame2
frame2['Eastern'] = frame2.state == 'Ohio'
frame2
#del frame2['eastern']
```
<h2> Exercise </h2>
1) Add a western state column that reads True for western states. <br>
2) Print the resulting table. <br>
3) Add an Eastern_Debt column that is populated with a random number from 0 to 100. Print the result.
3) delete all western and eastern columns using del
Solution
```
import numpy as np
frame2['Western'] = frame2.state == 'Nevada'
frame2['Eastern_Debt'] = np.random.randint(0,101,6)
frame2
del frame2['Eastern'], frame2['Eastern_Debt'], frame2['Western']
frame2
```
<h4> Transposing </h4>
```
pop = {'Nevada': {2001: 2.4, 2002: 2.9},
'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
frame3 = pd.DataFrame(pop)
frame3
frame4= pd.DataFrame(frame3.T)
frame4
pd.DataFrame(pop, index=[2001, 2002, 2003])
pdata = {'Ohio': frame3['Ohio'][:-1],
'Nevada': frame3['Nevada'][:2]}
pd.DataFrame(pdata)
frame3.index.name = 'year'; frame3.columns.name = 'state'
frame3
#to add header: rows: df.index.attribute, columns: df.columns.attribute
frame3.values
frame2.values
```
### Correlation and Covariance
conda install pandas-datareader
```
price = pd.read_pickle('yahoo_price.pkl')
volume = pd.read_pickle('yahoo_volume.pkl')
```
import pandas_datareader.data as web
all_data = {ticker: web.get_data_yahoo(ticker)
for ticker in ['AAPL', 'IBM', 'MSFT', 'GOOG']}
price = pd.DataFrame({ticker: data['Adj Close']
for ticker, data in all_data.items()})
volume = pd.DataFrame({ticker: data['Volume']
for ticker, data in all_data.items()})
```
returns = price.pct_change()
returns.tail()
#returns['MSFT'].corr(returns['IBM'])
returns['MSFT'].cov(returns['IBM'])
returns.MSFT.corr(returns.IBM)
returns.corr()
#returns.cov()
returns.corrwith(returns.IBM)
returns.corrwith(volume)
```
Exercise!
This notebook is part of a course at www.codeimmersives.com called Data Science. If you accessed this notebook outside the course, you can get more information about this course online by clicking here.
<hr>
Copyright © 2021 Code Immersives
This notebook is part of a course at www.codeimmersives.com called Data Science. If you accessed this notebook outside the course, you can get more information about this course online by clicking here.
<hr>
Copyright © 2021 Code Immersives
| github_jupyter |
## High and Low Pass Filters
Now, you might be wondering, what makes filters high and low-pass; why is a Sobel filter high-pass and a Gaussian filter low-pass?
Well, you can actually visualize the frequencies that these filters block out by taking a look at their fourier transforms. The frequency components of any image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum. So, let's treat our filters as small images, and display them in the frequency domain!
```
import numpy as np
import matplotlib.pyplot as plt
import cv2
%matplotlib inline
# Define gaussian, sobel, and laplacian (edge) filters
gaussian = (1/9)*np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
sobel_x= np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
sobel_y= np.array([[-1,-2,-1],
[0, 0, 0],
[1, 2, 1]])
# laplacian, edge filter
laplacian=np.array([[0, 1, 0],
[1,-4, 1],
[0, 1, 0]])
filters = [gaussian, sobel_x, sobel_y, laplacian]
filter_name = ['gaussian','sobel_x', \
'sobel_y', 'laplacian']
# perform a fast fourier transform on each filter
# and create a scaled, frequency transform image
f_filters = [np.fft.fft2(x) for x in filters]
fshift = [np.fft.fftshift(y) for y in f_filters]
frequency_tx = [np.log(np.abs(z)+1) for z in fshift]
# display 4 filters
for i in range(len(filters)):
plt.subplot(2,2,i+1),plt.imshow(frequency_tx[i],cmap = 'gray')
plt.title(filter_name[i]), plt.xticks([]), plt.yticks([])
plt.show()
```
Areas of white or light gray, allow that part of the frequency spectrum through! Areas of black mean that part of the spectrum is blocked out of the image.
Recall that the low frequencies in the frequency spectrum are at the center of the frequency transform image, and high frequencies are at the edges. You should see that the Gaussian filter allows only low-pass frequencies through, which is the center of the frequency transformed image. The sobel filters block out frequencies of a certain orientation and a laplace (detects edges regardless of orientation) filter, should block out low-frequencies!
You are encouraged to load in an image, apply a filter to it using `filter2d` then visualize what the fourier transform of that image looks like before and after a filter is applied.
```
## TODO: load in an image, and filter it using a kernel of your choice
## apply a fourier transform to the original *and* filtered images and compare them
image = cv2.imread('images/birds.jpg')
# Change color to RGB (from BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# normalize the image
norm_image = gray/255.0
filtered_images = [cv2.filter2D(gray, -1, w) for w in filters]
f_filters = [np.fft.fft2(x) for x in filtered_images]
fshift = [np.fft.fftshift(y) for y in f_filters]
frequency_tx = [np.log(np.abs(z)+1) for z in fshift]
for i in range(len(filters)):
plt.subplot(2,2,i+1),plt.imshow(frequency_tx[i],cmap = 'gray')
plt.title(filter_name[i]), plt.xticks([]), plt.yticks([])
plt.show()
plt.imshow(filtered_image_y, cmap='gray')
```
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from matplotlib.pyplot import MultipleLocator
complexity = pd.read_csv('C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/complexity_220318.csv')
ticker_industry = pd.read_excel('C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/Ticker-Industry.xlsx')
ticker_industry['Company'] = [x.split(':')[-1] for x in ticker_industry['Exchange:Ticker']]
complexity_ind = pd.merge(complexity,ticker_industry.loc[:,['Company','Primary Industry']], how='left',on='Company')
complexity_ind ['Year'] = [x[:4] for x in complexity_ind['Date']]
complexity_ind ['Quarter'] = [x[4:] for x in complexity_ind['Date']]
complexity_ind = complexity_ind.rename (columns={'Primary Industry':'Industry'})
complexity_ind
```
#### Data re-organization for Yannan's portfolio construction
The following codes in this part is to merge sentiment score with complexity score for further analysis.
```
sentiment = pd.read_csv('C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/sentiment_data.csv')
sentiment['TickerQuarter'] = sentiment['Ticker']+sentiment['Quarter']
complexity_ind['TickerQuarter'] = complexity_ind['Company']+complexity_ind['Date']
complexity_ind.query("Company == 'AAPL'")
# merge two dataframes on Quarter number
sentiment_complexity = pd.merge(sentiment,complexity_ind.loc[:,['TickerQuarter','Flesch score','Polysyllable number per sentence']], how='left',on='TickerQuarter')
senti_complex_combine = sentiment_complexity.drop ('TickerQuarter',axis=1)
senti_complex_combine.to_csv("C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/senti_complex_combine_220318.csv",index=False)
# We got mean score for each quarter.
#We would like to see cumulative return in a time series so we insert the mean score of the quarter for each day.
all_factors = pd.read_csv('C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/all_factors_220318.csv')
all_factors['Mean']= all_factors['Mean'].fillna(method='pad',axis=0)
all_factors['Total_Words']= all_factors['Total_Words'].fillna(method='pad',axis=0)
all_factors['Sum']= all_factors['Sum'].fillna(method='pad',axis=0)
all_factors['Median']= all_factors['Median'].fillna(method='pad',axis=0)
all_factors['Std']= all_factors['Std'].fillna(method='pad',axis=0)
all_factors['%Positive']= all_factors['%Positive'].fillna(method='pad',axis=0)
all_factors['%Negative']= all_factors['%Negative'].fillna(method='pad',axis=0)
all_factors['%Neutral']= all_factors['%Neutral'].fillna(method='pad',axis=0)
all_factors['Flesch score']= all_factors['Flesch score'].fillna(method='pad',axis=0)
all_factors['Polysyllable number per sentence']= all_factors['Polysyllable number per sentence'].fillna(method='pad',axis=0)
all_factors.to_csv("C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/all_factors_value_insert_220318_v2.csv",index=False)
```
### QoQ & YoY Plot (for one company/industry)
```
# slice the frame to get a pivot table to view QoQ & YoY change
industry = 'all industries'
factor = 'Polysyllable number per sentence'
#qoq_sliced_frame = complexity_ind.query("Industry == 'Soft Drinks' ")
qoq_sliced_frame = complexity_ind
pivot_flesch = pd.pivot_table(qoq_sliced_frame, values = factor , index = ['Quarter'], columns = ['Year'], aggfunc=np.mean)
# Plot in one figure
sns.set(rc = {'figure.figsize':(10,10)}) # set figure sizess
plt.ylim(ymin=2.5,ymax=4) # set y axis limitation
x_major_locator=MultipleLocator(1) # Set axis density
ax=plt.gca()
ax.xaxis.set_major_locator(x_major_locator)
g = sns.lineplot(data=pivot_flesch,markers=True)
g.set(title = industry+" - "+factor)
g.legend(loc="upper left", bbox_to_anchor=(1, 1)) # move legend to a proper position
# To compare the year-end transcripts complexity across industries
Q4_complexity_ind = complexity_ind.query ("Quarter == 'Q4'")
quarter ='Q4'
factor = 'Reading time'
sns.set(rc = {'figure.figsize':(10,10)}) # set figure sizess
plt.ylim(ymin=200,ymax=1000) # set y axis limitation
x_major_locator=MultipleLocator(1) # Set axis density
ax=plt.gca()
ax.xaxis.set_major_locator(x_major_locator)
g = sns.lineplot(data=Q4_complexity_ind,x='Year',y=factor, hue='Industry',markers=True, legend='brief',style="Industry",ci=None)
g.set(title = quarter + " - "+ factor)
g.legend(loc="upper left", bbox_to_anchor=(1, 1)) # move legend to a proper position
# To compare complexity across industries
factor = 'Polysyllable number per sentence'
all_ind_pivot_flesch = pd.pivot_table(complexity_ind, values = factor, columns = ['Industry'], aggfunc=np.median)
g=sns.barplot( data=all_ind_pivot_flesch,palette="Blues_d")
plt.ylim(ymin=2,ymax=5) # set y axis limitation
plt.xticks(fontsize=14, rotation=90)
g.set(title = factor + " - "+ "Industry Comparison")
```
| github_jupyter |
# Build a classification decision tree
We will illustrate how decision tree fit data with a simple classification
problem using the penguins dataset.
<div class="admonition note alert alert-info">
<p class="first admonition-title" style="font-weight: bold;">Note</p>
<p class="last">If you want a deeper overview regarding this dataset, you can refer to the
Appendix - Datasets description section at the end of this MOOC.</p>
</div>
```
import pandas as pd
penguins = pd.read_csv("../datasets/penguins_classification.csv")
culmen_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"]
target_column = "Species"
```
Besides, we split the data into two subsets to investigate how trees will
predict values based on an out-of-samples dataset.
```
from sklearn.model_selection import train_test_split
data, target = penguins[culmen_columns], penguins[target_column]
data_train, data_test, target_train, target_test = train_test_split(
data, target, random_state=0)
range_features = {
feature_name: (data[feature_name].min() - 1, data[feature_name].max() + 1)
for feature_name in data.columns}
```
<div class="admonition caution alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;">Caution!</p>
<p class="last">Here and later, we use the name <tt class="docutils literal">data</tt> and <tt class="docutils literal">target</tt> to be explicit. In
scikit-learn documentation, <tt class="docutils literal">data</tt> is commonly named <tt class="docutils literal">X</tt> and <tt class="docutils literal">target</tt> is
commonly called <tt class="docutils literal">y</tt>.</p>
</div>
In a previous notebook, we learnt that a linear classifier will define a
linear separation to split classes using a linear combination of the input
features. In our 2-dimensional space, it means that a linear classifier will
define some oblique lines that best separate our classes. We define a
function below that, given a set of data points and a classifier, will plot
the decision boundaries learnt by the classifier.
```
import numpy as np
import matplotlib.pyplot as plt
def plot_decision_function(fitted_classifier, range_features, ax=None):
"""Plot the boundary of the decision function of a classifier."""
from sklearn.preprocessing import LabelEncoder
feature_names = list(range_features.keys())
# create a grid to evaluate all possible samples
plot_step = 0.02
xx, yy = np.meshgrid(
np.arange(*range_features[feature_names[0]], plot_step),
np.arange(*range_features[feature_names[1]], plot_step),
)
# compute the associated prediction
Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = LabelEncoder().fit_transform(Z)
Z = Z.reshape(xx.shape)
# make the plot of the boundary and the data samples
if ax is None:
_, ax = plt.subplots()
ax.contourf(xx, yy, Z, alpha=0.4, cmap="RdBu")
return ax
```
Thus, for a linear classifier, we will obtain the following decision
boundaries. These boundaries lines indicate where the model changes its
prediction from one class to another.
```
from sklearn.linear_model import LogisticRegression
linear_model = LogisticRegression()
linear_model.fit(data_train, target_train)
import seaborn as sns
# create a palette to be used in the scatterplot
palette = ["tab:red", "tab:blue", "black"]
ax = sns.scatterplot(data=penguins, x=culmen_columns[0], y=culmen_columns[1],
hue=target_column, palette=palette)
plot_decision_function(linear_model, range_features, ax=ax)
# put the legend outside the plot
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Decision boundary using a logistic regression")
```
We see that the lines are a combination of the input features since they are
not perpendicular a specific axis. Indeed, this is due to the model
parametrization that we saw in the previous notebook, controlled by the
model's weights and intercept.
Besides, it seems that the linear model would be a good candidate for
such problem as it gives good accuracy.
```
linear_model.fit(data_train, target_train)
test_score = linear_model.score(data_test, target_test)
print(f"Accuracy of the LogisticRegression: {test_score:.2f}")
```
Unlike linear models, decision trees are non-parametric models: they are not
controlled by a mathematical decision function and do not have weights or
intercept to be optimized.
Indeed, decision trees will partition the space by considering a single
feature at a time. Let's illustrate this behaviour by having a decision
tree make a single split to partition the feature space.
```
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(max_depth=1)
tree.fit(data_train, target_train)
ax = sns.scatterplot(data=penguins, x=culmen_columns[0], y=culmen_columns[1],
hue=target_column, palette=palette)
plot_decision_function(tree, range_features, ax=ax)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Decision boundary using a decision tree")
```
The partitions found by the algorithm separates the data along the axis
"Culmen Depth", discarding the feature "Culmen Length". Thus, it highlights
that a decision tree does not use a combination of feature when making a
split. We can look more in depth at the tree structure.
```
from sklearn.tree import plot_tree
_, ax = plt.subplots(figsize=(8, 6))
_ = plot_tree(tree, feature_names=culmen_columns,
class_names=tree.classes_, impurity=False, ax=ax)
```
<div class="admonition tip alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;">Tip</p>
<p class="last">We are using the function <tt class="docutils literal">fig, ax = <span class="pre">plt.subplots(figsize=(8,</span> 6))</tt> to create
a figure and an axis with a specific size. Then, we can pass the axis to the
<tt class="docutils literal">sklearn.tree.plot_tree</tt> function such that the drawing happens in this axis.</p>
</div>
We see that the split was done the culmen depth feature. The original
dataset was subdivided into 2 sets based on the culmen depth
(inferior or superior to 16.45 mm).
This partition of the dataset minimizes the class diversities in each
sub-partitions. This measure is also known as a **criterion**,
and is a settable parameter.
If we look more closely at the partition, we see that the sample superior to
16.45 belongs mainly to the Adelie class. Looking at the values, we indeed
observe 103 Adelie individuals in this space. We also count 52 Chinstrap
samples and 6 Gentoo samples. We can make similar interpretation for the
partition defined by a threshold inferior to 16.45mm. In this case, the most
represented class is the Gentoo species.
Let's see how our tree would work as a predictor. Let's start to see the
class predicted when the culmen depth is inferior to the threshold.
```
tree.predict([[0, 15]])
```
The class predicted is the Gentoo. We can now check if we pass a culmen
depth superior to the threshold.
```
tree.predict([[0, 17]])
```
In this case, the tree predicts the Adelie specie.
Thus, we can conclude that a decision tree classifier will predict the most
represented class within a partition.
During the training, we have a count of samples in each partition, we can
also compute the probability of belonging to a specific class within this
partition.
```
y_pred_proba = tree.predict_proba([[0, 17]])
y_proba_class_0 = pd.Series(y_pred_proba[0], index=tree.classes_)
y_proba_class_0.plot.bar()
plt.ylabel("Probability")
_ = plt.title("Probability to belong to a penguin class")
```
We will manually compute the different probability directly from the tree
structure.
```
adelie_proba = 103 / 161
chinstrap_proba = 52 / 161
gentoo_proba = 6 / 161
print(f"Probabilities for the different classes:\n"
f"Adelie: {adelie_proba:.3f}\n"
f"Chinstrap: {chinstrap_proba:.3f}\n"
f"Gentoo: {gentoo_proba:.3f}\n")
```
It is also important to note that the culmen length has been disregarded for
the moment. It means that whatever the value given, it will not be used
during the prediction.
```
tree.predict_proba([[10000, 17]])
```
Going back to our classification problem, the split found with a maximum
depth of 1 is not powerful enough to separate the three species and the model
accuracy is low when compared to the linear model.
```
tree.fit(data_train, target_train)
test_score = tree.score(data_test, target_test)
print(f"Accuracy of the DecisionTreeClassifier: {test_score:.2f}")
```
Indeed, it is not a surprise. We saw earlier that a single feature will not
be able to separate all three species. However, from the previous analysis we
saw that by using both features we should be able to get fairly good results.
In the next exercise, you will increase the size of the tree depth. You will
get intuitions on how the space partitioning is repeated over time.
| github_jupyter |
<a href="https://colab.research.google.com/github/Andrewzekid/MCExcelAPI/blob/main/ExcelAPI.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import pandas as pd
import math
import datetime
from openpyxl import Workbook
def clean_names(origional_name):
#removes any unwanted whitespace from a name
# " Juri Mikoshiba " turns into "Juri Mikoshiba"
split_on_whitespace = origional_name.split(" ")
for i in split_on_whitespace:
i = i.replace("\t","")
i = i.replace("\n","")
name = [i for i in split_on_whitespace if i != ""]
cleaned_name = " ".join(name)
return cleaned_name
def clean_df(df):
df["Name"] = df.apply(lambda x: clean_names(x["Name"]),axis=1)
df["Score"] = df.apply(lambda x: clean_scores(x["Score"]),axis=1)
df["Class"] = df.apply(lambda x: clean_names(x["Class"]),axis=1)
return df
clean_df(df3)
clean_names("Arita Mana")
clean_names("Arita Mana Angelina")
def clean_scores(origional_score):
#removes any unwanted whitespace from a score
if type(origional_score) == str:
#if the score is a string
#process a string (I.e: turn a score from " 750 " into 750)
origional_score = int(origional_score)
return origional_score
def clean_grade_string(string):
#code to clean the strings for grade values i.e: " 8A " becomes "G8A"
#code to replace extra whitespaces
string = string.replace(" ","")
if "G" not in string:
string = "G" + string
return string
SCORES_LIST = [750,500,250,100,100,100,100,100,100,100,100,100]
#define the global variable "Scores List"
from openpyxl import load_workbook
from openpyxl.utils import get_column_letter
def read_excel_file(path,rowstart,rowend,colstart,colend):
"""
read(path,rowstart,rowend,colstart,colend)
Description:
Prints out the values from every cell within the rowstart,rowend,colstart,colend parameters, from a workbook that can be found via path.
Parameters:
Path(str):
path to the workbook
rowstart,rowend(int):
indicate what rows to read from
colstart,colend(int):
indicate what columns to read from
"""
wb = load_workbook(path)
ws = wb.active
for row in range(rowstart,rowend):
for column in range(colstart,colend):
col = get_column_letter(column)
print(ws["{}{}".format(col,row)].value)
print("=====================================")
df = pd.read_excel(path)
return wb,df
wb,df = read_excel_file("Math Commitee Data (3).xlsx",1,11,1,4)
df
wb1,df1 = read_excel_file("Math Commitee Week 6(1-5).xlsx",1,4,1,20)
df1
df1
def process_results_file(path):
wb1,df1 = read_excel_file(path,1,10,1,20)
df1 = df1.dropna(axis=1)
df1 = df1.drop(["Email","Start time","ID","(Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!"],axis=1)
df1 = df1.drop_duplicates(subset="Name (Please type your name)",keep="last")
return df1
def check_answers_and_assign_scores(df,answer,booster=1):
correct_df_1 = df[df["Your answer"] == answer]
#rename the "Name" column of the dataframe containing the info about the people who got the correct answers
correct_df = correct_df_1.rename({"Name (Please type your name)":"Name","Homeroom":"Class"},axis=1)
#main code to assign the scores
correct_df["Completion time"] = pd.to_datetime(correct_df["Completion time"])
correct_df = correct_df.sort_values("Completion time")
scores_to_be_used = SCORES_LIST[0:len(correct_df)] * booster
correct_df["Score"] = scores_to_be_used
#as we no longer need the completion time and the answer columns anymore, drop them
correct_df = correct_df.drop(["Completion time","Your answer"],axis=1)
correct_df = correct_df.reset_index()
correct_df = correct_df.drop("index",axis=1)
print(correct_df)
return correct_df
def check_and_remove_descrepancies(df,df2):
indexes = list(df["Name"])
indexes_new = list(df2["Name"])
df = df.set_index("Name")
df2 = df2.set_index("Name")
print(indexes)
print(indexes_new)
print(df)
print(df2)
for i in range(len(indexes_new)):
#loop through the newly answered questions
if indexes_new[i] in indexes:
#the same person has answered another question
df.loc[indexes_new[i],"Score"] = df.loc[indexes_new[i],"Score"] + df2.loc[indexes_new[i],"Score"]
else:
#a new person has answered
print(df2.loc[indexes_new[i]])
df = df.append(df2.loc[indexes_new[i]],ignore_index=False)
df = df.sort_values("Score",ascending=False)
return df
process_results_file("Math Commitee Week 6(1-5).xlsx")
def update_leaderboard(path_to_leaderboard,path_to_new_data,answer,booster=1):
wb,df1 = read_excel_file(path_to_leaderboard,1,11,1,4)
df1 = clean_df(df1)
# df1 = clean_df(df1)
#after we have processed the leaderboard dataframe, process the dataframe containing the new submissions
df3 = process_results_file(path_to_new_data)
df3 = check_answers_and_assign_scores(df3,answer=answer,booster=booster)
df3 = clean_df(df3)
print(df3)
final_df = check_and_remove_descrepancies(df1,df3)
#it is now done!!1
return final_df
final_df = update_leaderboard("Math Commitee Data (3).xlsx","Math Commitee Week 6(1-5).xlsx",answer=11)
final_df
import re
clean_grade_string(" 9A ")
#normally the whitespace in " 9A " would ca
wb,df = read_excel_file("Math Commitee Data (3).xlsx",1,11,1,4)
df2 = process_results_file("Math Commitee Week 6(1-5).xlsx")
df2
def check_answers_and_assign_scores(df,answer,booster=1):
correct_df_1 = df[df["Your answer"] == answer]
#rename the "Name" column of the dataframe containing the info about the people who got the correct answers
correct_df = correct_df_1.rename({"Name (Please type your name)":"Name","Homeroom":"Class"},axis=1)
#main code to assign the scores
correct_df["Completion time"] = pd.to_datetime(correct_df["Completion time"])
correct_df = correct_df.sort_values("Completion time")
scores_to_be_used = SCORES_LIST[0:len(correct_df)] * booster
correct_df["Score"] = scores_to_be_used
#as we no longer need the completion time and the answer columns anymore, drop them
correct_df = correct_df.drop(["Completion time","Your answer"],axis=1)
correct_df = correct_df.reset_index()
correct_df = correct_df.drop("index",axis=1)
print(correct_df)
return correct_df
def modify_score(path,value,name,cellstr=None,df=None):
"""
Docstring:
modify one value in the excel spreadsheet and save it
Parameters:
Path:
Path to the excel file that you want to modify
cellstr:
Index of the cell
Value:
Value that you want to replace with
name:
Name of person that has thier score being modified
"""
wb = load_workbook(path)
if cellstr != None:
wb[cellstr].value = value
print("Modified Value of Cell {} to: {}".format(cellstr,value))
wb.save("Math Commitee Data modified.xlsx")
else:
#we do not know the precise location of the cell to be edited, so thus we will edit the dataframe and save that instead.
if df == None:
#no df provided, trigger error
raise ValueError("Please provide a valid dataframe, or provide a cellstring")
else:
#edit the dataframe
df.loc[name,"Score"] = value
print("{}'s score has been changed to {}!".format(name,value))
return df
grade_student_pairs = {"Juri Mikoshiba":"G6A","Manato Tanaka":"G6B","Tyler Glanville":"G6A","Shoukei Hada":"G6A","Ota Horii":"G6B","Divyansh Gupta":"G8A","Yuvraj Jadia":"G6B","Arita Mana Angelina":"G6B","Shi Xintong":"G8B","Zhang Tongxi":"G7A"}
wb,df1 = read_excel_file("Math Commitee Data (3).xlsx",1,4,1,12)
df1
df3
df4 = df3.set_index("Name")
df4.index
df4.append(df4.loc["Aryan Sokhiya "],ignore_index=False)
df = check_and_remove_descrepancies(df1,df3)
df
print(df1,df3)
df1
df3
df1.loc[len(df1) + 1] = {"Andrew":[1,3]}
df3 = pd.DataFrame({"Name":["Andrew Wang"],"Class":["G9B"],"Score":[1500]})
df3.set_index("Name")
pd.concat([df1,df3])
df1
df1
test_1,df2 = read_excel_file("Updated scores (1).xlsx",1,4,1,12)
df2 = clean_df(df2)
df2
df1
df = check_and_remove_descrepancies(df1,df2)
df
df2
df1
df
test_1,df2 = read_excel_file("Math Commitee Data (3).xlsx",1,2,1,12)
df2
df1
test_1
df.index
df.index[2] = "Tyler Glanville"
list(df.index)
indexes = list(df.index)
for i in range(len(indexes)):
print(i)
x = indexes[i].replace("\t","")
x = x.replace("\n","")
indexes[i] = x
df = df.set_index("Score")
df
df["Name"] = indexes
df
df.set_index("Name")
df.Score
df
df1 = df.copy(deep=True)
df1
df1 = clean_df(df1)
df1
indexes
df
df.inde
```
| github_jupyter |
<img src="../../images/banners/python-advanced.png" width="600"/>
# <img src="../../images/logos/python.png" width="23"/> Python's property(): Add Managed Attributes to Your Classes
## <img src="../../images/logos/toc.png" width="20"/> Table of Contents
* [Managing Attributes in Your Classes](#managing_attributes_in_your_classes)
* [The Getter and Setter Approach in Python](#the_getter_and_setter_approach_in_python)
* [The Pythonic Approach](#the_pythonic_approach)
* [Getting Started With Python’s `property()`](#getting_started_with_python’s_`property()`)
* [Creating Attributes With `property()`](#creating_attributes_with_`property()`)
* [Using `property()` as a Decorator](#using_`property()`_as_a_decorator)
* [Providing Read-Only Attributes](#providing_read-only_attributes)
* [Creating Read-Write Attributes](#creating_read-write_attributes)
* [Providing Write-Only Attributes](#providing_write-only_attributes)
* [Putting Python’s `property()` Into Action](#putting_python’s_`property()`_into_action)
* [Validating Input Values](#validating_input_values)
* [Providing Computed Attributes](#providing_computed_attributes)
* [Caching Computed Attributes](#caching_computed_attributes)
* [Logging Attribute Access and Mutation](#logging_attribute_access_and_mutation)
* [Managing Attribute Deletion](#managing_attribute_deletion)
* [Creating Backward-Compatible Class APIs](#creating_backward-compatible_class_apis)
* [Overriding Properties in Subclasses](#overriding_properties_in_subclasses)
* [Conclusion](#conclusion)
---
With Python’s [`property()`](https://docs.python.org/3/library/functions.html#property), you can create **managed attributes** in your classes. You can use managed attributes, also known as **properties**, when you need to modify their internal implementation without changing the public [API](https://en.wikipedia.org/wiki/API) of the class. Providing stable APIs can help you avoid breaking your users’ code when they rely on your classes and objects.
Properties are arguably the most popular way to create managed attributes quickly and in the purest [Pythonic](https://realpython.com/learning-paths/writing-pythonic-code/) style.
**In this tutorial, you’ll learn how to:**
- Create **managed attributes** or **properties** in your classes
- Perform **lazy attribute evaluation** and provide **computed attributes**
- Avoid **setter** and **getter** methods to make your classes more Pythonic
- Create **read-only**, **read-write**, and **write-only** properties
- Create consistent and **backward-compatible APIs** for your classes
You’ll also write a few practical examples that use `property()` for validating input data, computing attribute values dynamically, logging your code, and more. To get the most out of this tutorial, you should know the basics of [object-oriented](https://realpython.com/python3-object-oriented-programming/) programming and [decorators](https://realpython.com/primer-on-python-decorators/) in Python.
<a class="anchor" id="managing_attributes_in_your_classes"></a>
## Managing Attributes in Your Classes
When you define a class in an [object-oriented](https://en.wikipedia.org/wiki/Object-oriented_programming) programming language, you’ll probably end up with some instance and class [attributes](https://realpython.com/python3-object-oriented-programming/#class-and-instance-attributes). In other words, you’ll end up with variables that are accessible through the instance, class, or even both, depending on the language. Attributes represent or hold the internal [state](https://en.wikipedia.org/wiki/State_(computer_science)) of a given object, which you’ll often need to access and mutate.
Typically, you have at least two ways to manage an attribute. Either you can access and mutate the attribute directly or you can use **methods**. Methods are functions attached to a given class. They provide the behaviors and actions that an object can perform with its internal data and attributes.
If you expose your attributes to the user, then they become part of the public [API](https://en.wikipedia.org/wiki/API) of your classes. Your user will access and mutate them directly in their code. The problem comes when you need to change the internal implementation of a given attribute.
Say you’re working on a `Circle` class. The initial implementation has a single attribute called `.radius`. You finish coding the class and make it available to your end users. They start using `Circle` in their code to create a lot of awesome projects and applications. Good job!
Now suppose that you have an important user that comes to you with a new requirement. They don’t want `Circle` to store the radius any longer. They need a public `.diameter` attribute.
At this point, removing `.radius` to start using `.diameter` could break the code of some of your end users. You need to manage this situation in a way other than removing `.radius`.
Programming languages such as [Java](https://realpython.com/oop-in-python-vs-java/) and [C++](https://en.wikipedia.org/wiki/C%2B%2B) encourage you to never expose your attributes to avoid this kind of problem. Instead, you should provide **getter** and **setter** methods, also known as [accessors](https://en.wikipedia.org/wiki/Accessor_method) and [mutators](https://en.wikipedia.org/wiki/Mutator_method), respectively. These methods offer a way to change the internal implementation of your attributes without changing your public API.
In the end, these languages need getter and setter methods because they don’t provide a suitable way to change the internal implementation of an attribute if a given requirement changes. Changing the internal implementation would require an API modification, which can break your end users’ code.
<a class="anchor" id="the_getter_and_setter_approach_in_python"></a>
### The Getter and Setter Approach in Python
Technically, there’s nothing that stops you from using getter and setter [methods](https://realpython.com/python3-object-oriented-programming/#instance-methods) in Python. Here’s how this approach would look:
```
# point.py
class Point:
def __init__(self, x, y):
self._x = x
self._y = y
def get_x(self):
return self._x
def set_x(self, value):
self._x = value
def get_y(self):
return self._y
def set_y(self, value):
self._y = value
```
In this example, you create `Point` with two **non-public attributes** `._x` and `._y` to hold the [Cartesian coordinates](https://en.wikipedia.org/wiki/Cartesian_coordinate_system) of the point at hand.
To access and mutate the value of either `._x` or `._y`, you can use the corresponding getter and setter methods. Go ahead and save the above definition of `Point` in a Python [module](https://realpython.com/python-modules-packages/) and [import](https://realpython.com/python-import/) the class into your [interactive shell](https://realpython.com/interacting-with-python/).
Here’s how you can work with `Point` in your code:
```
point = Point(12, 5)
point.get_x()
point.get_y()
point.set_x(42)
point.get_x()
# Non-public attributes are still accessible
point._x
point._y
```
With `.get_x()` and `.get_y()`, you can access the current values of `._x` and `._y`. You can use the setter method to store a new value in the corresponding managed attribute. From this code, you can confirm that Python doesn’t restrict access to non-public attributes. Whether or not you do so is up to you.
<a class="anchor" id="the_pythonic_approach"></a>
### The Pythonic Approach
Even though the example you just saw uses the Python coding style, it doesn’t look Pythonic. In the example, the getter and setter methods don’t perform any further processing with `._x` and `._y`. You can rewrite `Point` in a more concise and Pythonic way:
```
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
point = Point(12, 5)
point.x
point.y
point.x = 42
point.x
```
This code uncovers a fundamental principle. Exposing attributes to the end user is normal and common in Python. You don’t need to clutter your classes with getter and setter methods all the time, which sounds pretty cool! However, how can you handle requirement changes that would seem to involve API changes?
Unlike Java and C++, Python provides handy tools that allow you to change the underlying implementation of your attributes without changing your public API. The most popular approach is to turn your attributes into **properties**.
[Properties](https://en.wikipedia.org/wiki/Property_(programming)) represent an intermediate functionality between a plain attribute (or field) and a method. In other words, they allow you to create methods that behave like attributes. With properties, you can change how you compute the target attribute whenever you need to do so.
For example, you can turn both `.x` and `.y` into properties. With this change, you can continue accessing them as attributes. You’ll also have an underlying method holding `.x` and `.y` that will allow you to modify their internal implementation and perform actions on them right before your users access and mutate them.
The main advantage of Python properties is that they allow you to expose your attributes as part of your public API. If you ever need to change the underlying implementation, then you can turn the attribute into a property at any time without much pain.
In the following sections, you’ll learn how to create properties in Python.
<a class="anchor" id="getting_started_with_python’s_`property()`"></a>
## Getting Started With Python’s `property()`
Python’s [`property()`](https://docs.python.org/3/library/functions.html#property) is the Pythonic way to avoid formal getter and setter methods in your code. This function allows you to turn [class attributes](https://realpython.com/python3-object-oriented-programming/#class-and-instance-attributes) into **properties** or **managed attributes**. Since `property()` is a built-in function, you can use it without importing anything. Additionally, `property()` was [implemented in C](https://github.com/python/cpython/blob/main/Objects/descrobject.c#L1460) to ensure optimal performance.
With `property()`, you can attach getter and setter methods to given class attributes. This way, you can handle the internal implementation for that attribute without exposing getter and setter methods in your API. You can also specify a way to handle attribute deletion and provide an appropriate [docstring](https://realpython.com/documenting-python-code/) for your properties.
Here’s the full signature of `property()`:
```
property(fget=None, fset=None, fdel=None, doc=None)
```
The first two arguments take function objects that will play the role of getter (`fget`) and setter (`fset`) methods. Here’s a summary of what each argument does:
|Argument|Description|
|:--|:--|
|`fget`|Function that returns the value of the managed attribute|
|`fset`|Function that allows you to set the value of the managed attribute|
|`fdel`|Function to define how the managed attribute handles deletion|
|`doc`|String representing the property’s docstring|
The [return](https://realpython.com/python-return-statement/) value of `property()` is the managed attribute itself. If you access the managed attribute, as in `obj.attr`, then Python automatically calls `fget()`. If you assign a new value to the attribute, as in `obj.attr = value`, then Python calls `fset()` using the input `value` as an argument. Finally, if you run a `del obj.attr` statement, then Python automatically calls `fdel()`.
You can use `doc` to provide an appropriate docstring for your properties. You and your fellow programmers will be able to read that docstring using Python’s [`help()`](https://docs.python.org/3/library/functions.html#help). The `doc` argument is also useful when you’re working with [code editors and IDEs](https://realpython.com/python-ides-code-editors-guide/) that support docstring access.
You can use `property()` either as a [function](https://realpython.com/defining-your-own-python-function/) or a [decorator](https://realpython.com/primer-on-python-decorators/) to build your properties. In the following two sections, you’ll learn how to use both approaches. However, you should know up front that the decorator approach is more popular in the Python community.
<a class="anchor" id="creating_attributes_with_`property()`"></a>
### Creating Attributes With `property()`
You can create a property by calling `property()` with an appropriate set of arguments and assigning its return value to a class attribute. All the arguments to `property()` are optional. However, you typically provide at least a **setter function**.
The following example shows how to create a `Circle` class with a handy property to manage its radius:
```
# circle.py
class Circle:
def __init__(self, radius):
self._radius = radius
def _get_radius(self):
print("Get radius")
return self._radius
def _set_radius(self, value):
print("Set radius")
self._radius = value
def _del_radius(self):
print("Delete radius")
del self._radius
radius = property(
fget=_get_radius,
fset=_set_radius,
fdel=_del_radius,
doc="The radius property."
)
```
In this code snippet, you create `Circle`. The class initializer, `.__init__()`, takes `radius` as an argument and stores it in a non-public attribute called `._radius`. Then you define three non-public methods:
1. **`._get_radius()`** returns the current value of `._radius`
2. **`._set_radius()`** takes `value` as an argument and assigns it to `._radius`
3. **`._del_radius()`** deletes the instance attribute `._radius`
Once you have these three methods in place, you create a class attribute called `.radius` to store the property object. To initialize the property, you pass the three methods as arguments to `property()`. You also pass a suitable docstring for your property.
In this example, you use [keyword arguments](https://realpython.com/defining-your-own-python-function/#keyword-arguments) to improve the code readability and prevent confusion. That way, you know exactly which method goes into each argument.
To give `Circle` a try, run the following code:
```
circle = Circle(42.0)
circle.radius
circle.radius = 100.0
circle.radius
del circle.radius
circle.radius
help(circle)
```
The `.radius` property hides the non-public instance attribute `._radius`, which is now your managed attribute in this example. You can access and assign `.radius` directly. Internally, Python automatically calls `._get_radius()` and `._set_radius()` when needed. When you execute `del circle.radius`, Python calls `._del_radius()`, which deletes the underlying `._radius`.
<div class="alert alert-success" role="alert">
Properties are <strong>class attributes</strong> that manage <strong>instance attributes</strong>.
</div>
You can think of a property as a collection of methods bundled together. If you inspect `.radius` carefully, then you can find the raw methods you provided as the `fget`, `fset`, and `fdel` arguments:
```
Circle.radius.fget
Circle.radius.fset
Circle.radius.fdel
dir(Circle.radius)
```
You can access the getter, setter, and deleter methods in a given property through the corresponding `.fget`, `.fset`, and `.fdel`.
Properties are also **overriding descriptors**. If you use [`dir()`](https://realpython.com/python-scope-legb-rule/#dir) to check the internal members of a given property, then you’ll find `.__set__()` and `.__get__()` in the list. These methods provide a default implementation of the [descriptor protocol](https://docs.python.org/3/howto/descriptor.html#descriptor-protocol).
The default implementation of `.__set__()`, for example, runs when you don’t provide a custom setter method. In this case, you get an `AttributeError` because there’s no way to set the underlying property.
<a class="anchor" id="using_`property()`_as_a_decorator"></a>
### Using `property()` as a Decorator
Decorators are everywhere in Python. They’re functions that take another function as an argument and return a new function with added functionality. With a decorator, you can attach pre- and post-processing operations to an existing function.
When [Python 2.2](https://docs.python.org/3/whatsnew/2.2.html#attribute-access) introduced `property()`, the decorator syntax wasn’t available. The only way to define properties was to pass getter, setter, and deleter methods, as you learned before. The decorator syntax was added in [Python 2.4](https://docs.python.org/3/whatsnew/2.4.html#pep-318-decorators-for-functions-and-methods), and nowadays, using `property()` as a decorator is the most popular practice in the Python community.
The decorator syntax consists of placing the name of the decorator function with a leading `@` symbol right before the definition of the function you want to decorate:
```
@decorator
def func(a):
return a
```
In this code fragment, `@decorator` can be a function or class intended to decorate `func()`. This syntax is equivalent to the following:
```
def func(a):
return a
func = decorator(func)
```
The final line of code reassigns the name `func` to hold the result of calling `decorator(func)`. Note that this is the same syntax you used to create a property in the section above.
Python’s `property()` can also work as a decorator, so you can use the `@property` syntax to create your properties quickly:
```
# circle.py
class Circle:
def __init__(self, radius):
self._radius = radius
@property
def radius(self):
"""The radius property."""
print("Get radius")
return self._radius
@radius.setter
def radius(self, value):
print("Set radius")
self._radius = value
@radius.deleter
def radius(self):
print("Delete radius")
del self._radius
```
This code looks pretty different from the getter and setter methods approach. `Circle` now looks more Pythonic and clean. You don’t need to use method names such as `._get_radius()`, `._set_radius()`, and `._del_radius()` anymore. Now you have three methods with the same clean and descriptive attribute-like name. How is that possible?
The decorator approach for creating properties requires defining a first method using the public name for the underlying managed attribute, which is `.radius` in this case. This method should implement the getter logic.
Then you define the setter method for `.radius`. In this case, the syntax is fairly different. Instead of using `@property` again, you use `@radius.setter`. Why do you need to do that? Take another look at the `dir()` output:
```
dir(Circle.radius)
```
Besides `.fget`, `.fset`, `.fdel`, and a bunch of other special attributes and methods, `property` also provides `.deleter()`, `.getter()`, and `.setter()`. These three methods each return a new property.
When you decorate the second `.radius()` method with `@radius.setter`, you create a new property and reassign the class-level name `.radius` (line 8) to hold it. This new property contains the same set of methods of the initial property at line 8 with the addition of the new setter method provided on line 14. Finally, the decorator syntax reassigns the new property to the `.radius` class-level name.
The mechanism to define the deleter method is similar. This time, you need to use the `@radius.deleter` decorator. At the end of the process, you get a full-fledged property with the getter, setter, and deleter methods.
Finally, how can you provide suitable docstrings for your properties when you use the decorator approach? If you check `Circle` again, you’ll note that you already did so by adding a docstring to the getter method on line 9.
The new `Circle` implementation works the same as the example in the section above:
```
circle = Circle(42.0)
circle.radius
circle.radius = 100.0
circle.radius
del circle.radius
# This should raise AttributeError cause we deleted circle.radius above
circle.radius
help(circle)
```
You don’t need to use a pair of parentheses for calling `.radius()` as a method. Instead, you can access `.radius` as you would access a regular attribute, which is the primary use of properties. They allow you to treat methods as attributes, and they take care of calling the underlying set of methods automatically.
Here’s a recap of some important points to remember when you’re creating properties with the decorator approach:
- The `@property` decorator must decorate the **getter method**.
- The docstring must go in the **getter method**.
- The **setter and deleter methods** must be decorated with the name of the getter method plus `.setter` and `.deleter`, respectively.
Up to this point, you’ve created managed attributes using `property()` as a function and as a decorator. If you check your `Circle` implementations so far, then you’ll note that their getter and setter methods don’t add any real extra processing on top of your attributes.
In general, you should avoid turning attributes that don’t require extra processing into properties. Using properties in those situations can make your code:
- Unnecessarily verbose
- Confusing to other developers
- Slower than code based on regular attributes
Unless you need something more than bare attribute access, don’t write properties. They’re a waste of [CPU](https://en.wikipedia.org/wiki/Central_processing_unit) time, and more importantly, they’re a waste of *your* time. Finally, you should avoid writing explicit getter and setter methods and then wrapping them in a property. Instead, use the `@property` decorator. That’s currently the most Pythonic way to go.
<a class="anchor" id="providing_read-only_attributes"></a>
## Providing Read-Only Attributes
Probably the most elementary use case of `property()` is to provide **read-only attributes** in your classes. Say you need an [immutable](https://docs.python.org/3/glossary.html#term-immutable) `Point` class that doesn’t allow the user to mutate the original value of its coordinates, `x` and `y`. To achieve this goal, you can create `Point` like in the following example:
```
# point.py
class Point:
def __init__(self, x, y):
self._x = x
self._y = y
@property
def x(self):
return self._x
@property
def y(self):
return self._y
```
Here, you store the input arguments in the attributes `._x` and `._y`. As you already learned, using the leading underscore (`_`) in names tells other developers that they’re non-public attributes and shouldn’t be accessed using dot notation, such as in `point._x`. Finally, you define two getter methods and decorate them with `@property`.
Now you have two read-only properties, `.x` and `.y`, as your coordinates:
```
point = Point(12, 5)
# Read coordinates
point.x
point.y
# Write coordinates
point.x = 42
```
Here, `point.x` and `point.y` are bare-bone examples of read-only properties. Their behavior relies on the underlying descriptor that `property` provides. As you already saw, the default `.__set__()` implementation raises an `AttributeError` when you don’t define a proper setter method.
You can take this implementation of `Point` a little bit further and provide explicit setter methods that raise a custom exception with more elaborate and specific messages:
```
# point.py
class WriteCoordinateError(Exception):
pass
class Point:
def __init__(self, x, y):
self._x = x
self._y = y
@property
def x(self):
return self._x
@x.setter
def x(self, value):
raise WriteCoordinateError("x coordinate is read-only")
@property
def y(self):
return self._y
@y.setter
def y(self, value):
raise WriteCoordinateError("y coordinate is read-only")
```
In this example, you define a custom exception called `WriteCoordinateError`. This exception allows you to customize the way you implement your immutable `Point` class. Now, both setter methods raise your custom exception with a more explicit message. Go ahead and give your improved `Point` a try!
<a class="anchor" id="creating_read-write_attributes"></a>
## Creating Read-Write Attributes
You can also use `property()` to provide managed attributes with **read-write** capabilities. In practice, you just need to provide the appropriate getter method (“read”) and setter method (“write”) to your properties in order to create read-write managed attributes.
Say you want your `Circle` class to have a `.diameter` attribute. However, taking the radius and the diameter in the class initializer seems unnecessary because you can compute the one using the other. Here’s a `Circle` that manages `.radius` and `.diameter` as read-write attributes:
```
# circle.py
import math
class Circle:
def __init__(self, radius):
self.radius = radius
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
self._radius = float(value)
@property
def diameter(self):
return self.radius * 2
@diameter.setter
def diameter(self, value):
self.radius = value / 2
```
Here, you create a `Circle` class with a read-write `.radius`. In this case, the getter method just returns the radius value. The setter method converts the input value for the radius and assigns it to the non-public `._radius`, which is the variable you use to store the final data.
> **Note:** There is a subtle detail to note in this new implementation of `Circle` and its `.radius` attribute. In this case, the class initializer assigns the input value to the `.radius` property directly instead of storing it in a dedicated non-public attribute, such as `._radius`.
Why? Because you need to make sure that every value provided as a radius, including the initialization value, goes through the setter method and gets converted to a floating-point number.
`Circle` also implements a `.diameter` attribute as a property. The getter method computes the diameter using the radius. The setter method does something curious. Instead of storing the input diameter `value` in a dedicated attribute, it calculates the radius and writes the result into `.radius`.
Here’s how your `Circle` works:
```
circle = Circle(42)
circle.radius
circle.diameter
circle.diameter = 100
circle.diameter
circle.radius
```
Both `.radius` and `.diameter` work as normal attributes in these examples, providing a clean and Pythonic public API for your `Circle` class.
<a class="anchor" id="providing_write-only_attributes"></a>
## Providing Write-Only Attributes
You can also create **write-only** attributes by tweaking how you implement the getter method of your properties. For example, you can make your getter method raise an exception every time a user accesses the underlying attribute value.
Here’s an example of handling passwords with a write-only property:
```
# users.py
import hashlib
import os
class User:
def __init__(self, name, password):
self.name = name
self.password = password
@property
def password(self):
raise AttributeError("Password is write-only")
@password.setter
def password(self, plaintext):
salt = os.urandom(32)
self._hashed_password = hashlib.pbkdf2_hmac(
"sha256", plaintext.encode("utf-8"), salt, 100_000
)
```
The initializer of `User` takes a username and a password as arguments and stores them in `.name` and `.password`, respectively. You use a property to manage how your class processes the input password. The getter method raises an `AttributeError` whenever a user tries to retrieve the current password. This turns `.password` into a write-only attribute:
```
john = User("John", "secret")
john._hashed_password
john.password
john.password = "supersecret"
john._hashed_password
```
In this example, you create `john` as a `User` instance with an initial password. The setter method hashes the password and stores it in `._hashed_password`. Note that when you try to access `.password` directly, you get an `AttributeError`. Finally, assigning a new value to `.password` triggers the setter method and creates a new hashed password.
In the setter method of `.password`, you use `os.urandom()` to generate a 32-byte random [string](https://realpython.com/python-strings/) as your hashing function’s [salt](https://en.wikipedia.org/wiki/Salt_(cryptography)). To generate the hashed password, you use [`hashlib.pbkdf2_hmac()`](https://docs.python.org/3/library/hashlib.html#hashlib.pbkdf2_hmac). Then you store the resulting hashed password in the non-public attribute `._hashed_password`. Doing so ensures that you never save the plaintext password in any retrievable attribute.
<a class="anchor" id="putting_python’s_`property()`_into_action"></a>
## Putting Python’s `property()` Into Action
So far, you’ve learned how to use Python’s `property()` built-in function to create managed attributes in your classes. You used `property()` as a function and as a decorator and learned about the differences between these two approaches. You also learned how to create read-only, read-write, and write-only attributes.
In the following sections, you’ll code a few examples that will help you get a better practical understanding of common use cases of `property()`.
<a class="anchor" id="validating_input_values"></a>
### Validating Input Values
One of the most common use cases of `property()` is building managed attributes that validate the input data before storing or even accepting it as a secure input. [Data validation](https://en.wikipedia.org/wiki/Data_validation) is a common requirement in code that takes input from users or other information sources that you consider untrusted.
Python’s `property()` provides a quick and reliable tool for dealing with input data validation. For example, thinking back to the `Point` example, you may require the values of `.x` and `.y` to be valid [numbers](https://realpython.com/python-numbers/). Since your users are free to enter any type of data, you need to make sure that your point only accepts numbers.
Here’s an implementation of `Point` that manages this requirement:
```
# point.py
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x(self):
return self._x
@x.setter
def x(self, value):
try:
self._x = float(value)
print("Validated!")
except ValueError:
raise ValueError('"x" must be a number') from None
@property
def y(self):
return self._y
@y.setter
def y(self, value):
try:
self._y = float(value)
print("Validated!")
except ValueError:
raise ValueError('"y" must be a number') from None
```
The setter methods of `.x` and `.y` use [`try` … `except`](https://realpython.com/python-exceptions/#the-try-and-except-block-handling-exceptions) blocks that validate input data using the Python [EAFP](https://docs.python.org/3/glossary.html#term-eafp) style. If the call to `float()` succeeds, then the input data is valid, and you get `Validated!` on your screen. If `float()` raises a `ValueError`, then the user gets a `ValueError` with a more specific message.
It’s important to note that assigning the `.x` and `.y` properties directly in `.__init__()` ensures that the validation also occurs during object initialization. Not doing so is a common mistake when using `property()` for data validation.
Here’s how your `Point` class works now:
```
point = Point(12, 5)
point.x
point.y
point.x = 42
point.x
point.y = 100.0
point.y
point.x = "one"
point.y = "1o"
```
If you assign `.x` and `.y` values that `float()` can turn into floating-point numbers, then the validation is successful, and the value is accepted. Otherwise, you get a `ValueError`.
This implementation of `Point` uncovers a fundamental weakness of `property()`. Did you spot it?
That’s it! You have repetitive code that follows specific patterns. This repetition breaks the [DRY (Don’t Repeat Yourself)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle, so you would want to [refactor](https://realpython.com/python-refactoring/) this code to avoid it. To do so, you can abstract out the repetitive logic using a descriptor:
```
# point.py
class Coordinate:
def __set_name__(self, owner, name):
self._name = name
def __get__(self, instance, owner):
return instance.__dict__[self._name]
def __set__(self, instance, value):
try:
instance.__dict__[self._name] = float(value)
print("Validated!")
except ValueError:
raise ValueError(f'"{self._name}" must be a number') from None
class Point:
x = Coordinate()
y = Coordinate()
def __init__(self, x, y):
self.x = x
self.y = y
```
Now your code is a bit shorter. You managed to remove repetitive code by defining `Coordinate` as a [descriptor](https://realpython.com/python-descriptors/) that manages your data validation in a single place. The code works just like your earlier implementation. Go ahead and give it a try!
In general, if you find yourself copying and pasting property definitions all around your code or if you spot repetitive code like in the example above, then you should consider using a proper descriptor.
<a class="anchor" id="providing_computed_attributes"></a>
### Providing Computed Attributes
If you need an attribute that builds its value dynamically whenever you access it, then `property()` is the way to go. These kinds of attributes are commonly known as **computed attributes**. They’re handy when you need them to look like [eager](https://en.wikipedia.org/wiki/Eager_evaluation) attributes, but you want them to be [lazy](https://en.wikipedia.org/wiki/Lazy_evaluation).
The main reason for creating eager attributes is to optimize computation costs when you access the attribute often. On the other hand, if you rarely use a given attribute, then a lazy property can postpone its computation until needed, which can make your programs more efficient.
Here’s an example of how to use `property()` to create a computed attribute `.area` in a `Rectangle` class:
```
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
@property
def area(self):
return self.width * self.height
```
In this example, the `Rectangle` initializer takes `width` and `height` as arguments and stores them in regular instance attributes. The read-only property `.area` computes and returns the area of the current rectangle every time you access it.
Another common use case of properties is to provide an auto-formatted value for a given attribute:
```
class Product:
def __init__(self, name, price):
self._name = name
self._price = float(price)
@property
def price(self):
return f"${self._price:,.2f}"
```
In this example, `.price` is a property that formats and returns the price of a particular product. To provide a currency-like format, you use an [f-string](https://realpython.com/python-f-strings/) with appropriate formatting options.
As a final example of computed attributes, say you have a `Point` class that uses `.x` and `.y` as Cartesian coordinates. You want to provide [polar coordinates](https://en.wikipedia.org/wiki/Polar_coordinate_system) for your point so that you can use them in a few computations. The polar coordinate system represents each point using the distance to the origin and the angle with the horizontal coordinate axis.
Here’s a Cartesian coordinates `Point` class that also provides computed polar coordinates:
```
# point.py
import math
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
@property
def distance(self):
return round(math.dist((0, 0), (self.x, self.y)))
@property
def angle(self):
return round(math.degrees(math.atan(self.y / self.x)), 1)
def as_cartesian(self):
return self.x, self.y
def as_polar(self):
return self.distance, self.angle
```
This example shows how to compute the distance and angle of a given `Point` object using its `.x` and `.y` Cartesian coordinates. Here’s how this code works in practice:
```
point = Point(12, 5)
point.x
point.y
point.distance
point.angle
point.as_cartesian()
point.as_polar()
```
When it comes to providing computed or lazy attributes, `property()` is a pretty handy tool. However, if you’re creating an attribute that you use frequently, then computing it every time can be costly and wasteful. A good strategy is to [cache](https://realpython.com/lru-cache-python/) them once the computation is done.
<a class="anchor" id="caching_computed_attributes"></a>
### Caching Computed Attributes
Sometimes you have a given computed attribute that you use frequently. Constantly repeating the same computation may be unnecessary and expensive. To work around this problem, you can cache the computed value and save it in a non-public dedicated attribute for further reuse.
To prevent unexpected behaviors, you need to think of the mutability of the input data. If you have a property that computes its value from constant input values, then the result will never change. In that case, you can compute the value just once:
```
# circle.py
from time import sleep
class Circle:
def __init__(self, radius):
self.radius = radius
self._diameter = None
@property
def diameter(self):
if self._diameter is None:
sleep(0.5) # Simulate a costly computation
self._diameter = self.radius * 2
return self._diameter
```
Even though this implementation of `Circle` properly caches the computed diameter, it has the drawback that if you ever change the value of `.radius`, then `.diameter` won’t return a correct value:
```
circle = Circle(42.0)
circle.radius
circle.diameter # With delay
circle.diameter # Without delay
circle.radius = 100.0
circle.diameter # Wrong diameter
```
In these examples, you create a circle with a radius equal to `42.0`. The `.diameter` property computes its value only the first time you access it. That’s why you see a delay in the first execution and no delay in the second. Note that even though you change the value of the radius, the diameter stays the same.
If the input data for a computed attribute mutates, then you need to recalculate the attribute:
```
# circle.py
from time import sleep
class Circle:
def __init__(self, radius):
self.radius = radius
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
self._diameter = None
self._radius = value
@property
def diameter(self):
if self._diameter is None:
sleep(0.5) # Simulate a costly computation
self._diameter = self._radius * 2
return self._diameter
```
The setter method of the `.radius` property resets `._diameter` to [`None`](https://realpython.com/null-in-python/) every time you change the value of the radius. With this little update, `.diameter` recalculates its value the first time you access it after every mutation of `.radius`:
```
circle = Circle(42.0)
circle.radius
circle.diameter # With delay
circle.diameter # Without delay
circle.radius = 100.0
circle.diameter # With delay
circle.diameter # Without delay
```
Cool! `Circle` works correctly now! It computes the diameter the first time you access it and also every time you change the radius.
Another option to create cached properties is to use [`functools.cached_property()`](https://docs.python.org/3/library/functools.html#functools.cached_property) from the standard library. This function works as a decorator that allows you to transform a method into a cached property. The property computes its value only once and caches it as a normal attribute during the lifetime of the instance:
```
# circle.py
from functools import cached_property
from time import sleep
class Circle:
def __init__(self, radius):
self.radius = radius
@cached_property
def diameter(self):
sleep(0.5) # Simulate a costly computation
return self.radius * 2
```
Here, `.diameter` computes and caches its value the first time you access it. This kind of implementation is suitable for those computations in which the input values don’t mutate. Here’s how it works:
```
circle = Circle(42.0)
circle.diameter # With delay
circle.diameter # Without delay
circle.radius = 100
circle.diameter # Wrong diameter
# Allow direct assignment
circle.diameter = 200
circle.diameter # Cached value
```
When you access `.diameter`, you get its computed value. That value remains the same from this point on. However, unlike `property()`, `cached_property()` doesn’t block attribute mutations unless you provide a proper setter method. That’s why you can update the diameter to `200` in the last couple of lines.
If you want to create a cached property that doesn’t allow modification, then you can use `property()` and [`functools.cache()`](https://docs.python.org/3/library/functools.html#functools.cache) like in the following example:
```
# circle.py
# this import works in python 3.9+
from functools import cache
from time import sleep
class Circle:
def __init__(self, radius):
self.radius = radius
@property
@cache
def diameter(self):
sleep(0.5) # Simulate a costly computation
return self.radius * 2
```
This code stacks `@property` on top of `@cache`. The combination of both decorators builds a cached property that prevents mutations:
```
circle = Circle(42.0)
circle.diameter # With delay
circle.diameter # Without delay
circle.radius = 100
circle.diameter
circle.diameter = 200
```
In these examples, when you try to assign a new value to `.diameter`, you get an `AttributeError` because the setter functionality comes from the internal descriptor of `property`.
<a class="anchor" id="logging_attribute_access_and_mutation"></a>
### Logging Attribute Access and Mutation
Sometimes you need to keep track of what your code does and how your programs flow. A way to do that in Python is to use [`logging`](https://realpython.com/python-logging/). This module provides all the functionality you would require for logging your code. It’ll allow you to constantly watch the code and generate useful information about how it works.
If you ever need to keep track of how and when you access and mutate a given attribute, then you can take advantage of `property()` for that, too:
```
# circle.py
import logging
logging.basicConfig(
format="%(asctime)s: %(message)s",
level=logging.INFO,
datefmt="%H:%M:%S"
)
class Circle:
def __init__(self, radius):
self._msg = '"radius" was {state}. Current value: {value}'
self.radius = radius
@property
def radius(self):
"""The radius property."""
logging.info(self._msg.format(state="accessed", value=str(self._radius)))
return self._radius
@radius.setter
def radius(self, value):
try:
self._radius = float(value)
logging.info(self._msg.format(state="mutated", value=str(self._radius)))
except ValueError:
logging.info('validation error while mutating "radius"')
```
Here, you first import `logging` and define a basic configuration. Then you implement `Circle` with a managed attribute `.radius`. The getter method generates log information every time you access `.radius` in your code. The setter method logs each mutation that you perform on `.radius`. It also logs those situations in which you get an error because of bad input data.
Here’s how you can use `Circle` in your code:
```
circle = Circle(42.0)
circle.radius
circle.radius = 100
circle.radius
circle.radius = "value"
```
Logging useful data from attribute access and mutation can help you debug your code. Logging can also help you identify sources of problematic data input, analyze the performance of your code, spot usage patterns, and more.
<a class="anchor" id="managing_attribute_deletion"></a>
### Managing Attribute Deletion
You can also create properties that implement deleting functionality. This might be a rare use case of `property()`, but having a way to delete an attribute can be handy in some situations.
Say you’re implementing your own [tree](https://en.wikipedia.org/wiki/Tree_(data_structure)) data type. A tree is an [abstract data type](https://en.wikipedia.org/wiki/Abstract_data_type) that stores elements in a hierarchy. The tree components are commonly known as **nodes**. Each node in a tree has a parent node, except for the root node. Nodes can have zero or more children.
Now suppose you need to provide a way to delete or clear the list of children of a given node. Here’s an example that implements a tree node that uses `property()` to provide most of its functionality, including the ability to clear the list of children of the node at hand:
```
# tree.py
class TreeNode:
def __init__(self, data):
self._data = data
self._children = []
@property
def children(self):
return self._children
@children.setter
def children(self, value):
if isinstance(value, list):
self._children = value
else:
del self.children
self._children.append(value)
@children.deleter
def children(self):
self._children.clear()
def __repr__(self):
return f'{self.__class__.__name__}("{self._data}")'
```
In this example, `TreeNode` represents a node in your custom tree data type. Each node stores its children in a Python [list](https://realpython.com/python-lists-tuples/). Then you implement `.children` as a property to manage the underlying list of children. The deleter method calls `.clear()` on the list of children to remove them all:
```
root = TreeNode("root")
child1 = TreeNode("child 1")
child2 = TreeNode("child 2")
root.children = [child1, child2]
root.children
del root.children
root.children
```
Here, you first create a `root` node to start populating the tree. Then you create two new nodes and assign them to `.children` using a list. The [`del`](https://realpython.com/python-keywords/#the-del-keyword) statement triggers the internal deleter method of `.children` and clears the list.
<a class="anchor" id="creating_backward-compatible_class_apis"></a>
### Creating Backward-Compatible Class APIs
As you already know, properties turn method calls into direct attribute lookups. This feature allows you to create clean and Pythonic APIs for your classes. You can expose your attributes publicly without the need for getter and setter methods.
If you ever need to modify how you compute a given public attribute, then you can turn it into a property. Properties make it possible to perform extra processing, such as data validation, without having to modify your public APIs.
Suppose you’re creating an accounting application and you need a base class to manage currencies. To this end, you create a `Currency` class that exposes two attributes, `.units` and `.cents`:
```
class Currency:
def __init__(self, units, cents):
self.units = units
self.cents = cents
# Currency implementation...
```
This class looks clean and Pythonic. Now say that your requirements change, and you decide to store the total number of cents instead of the units and cents. Removing `.units` and `.cents` from your public API to use something like `.total_cents` would break more than one client’s code.
In this situation, `property()` can be an excellent option to keep your current API unchanged. Here’s how you can work around the problem and avoid breaking your clients’ code:
```
# currency.py
CENTS_PER_UNIT = 100
class Currency:
def __init__(self, units, cents):
self._total_cents = units * CENTS_PER_UNIT + cents
@property
def units(self):
return self._total_cents // CENTS_PER_UNIT
@units.setter
def units(self, value):
self._total_cents = self.cents + value * CENTS_PER_UNIT
@property
def cents(self):
return self._total_cents % CENTS_PER_UNIT
@cents.setter
def cents(self, value):
self._total_cents = self.units * CENTS_PER_UNIT + value
# Currency implementation...
```
Now your class stores the total number of cents instead of independent units and cents. However, your users can still access and mutate `.units` and `.cents` in their code and get the same result as before. Go ahead and give it a try!
When you write something upon which many people are going to build, you need to guarantee that modifications to the internal implementation don’t affect how end users work with your classes.
<a class="anchor" id="overriding_properties_in_subclasses"></a>
## Overriding Properties in Subclasses
When you create Python classes that include properties and release them in a package or library, you should expect your users to do a lot of different things with them. One of those things could be **subclassing** them to customize their functionalities. In these cases, your users have to be careful and be aware of a subtle gotcha. If you partially override a property, then you lose the non-overridden functionality.
For example, suppose you’re coding an `Employee` class to manage employee information in your company’s internal accounting system. You already have a class called `Person`, and you think about subclassing it to reuse its functionalities.
`Person` has a `.name` attribute implemented as a property. The current implementation of `.name` doesn’t meet the requirement of returning the name in uppercase letters. Here’s how you end up solving this:
```
# persons.py
class Person:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
# Person implementation...
class Employee(Person):
@property
def name(self):
return super().name.upper()
# Employee implementation...
```
In `Employee`, you override `.name` to make sure that when you access the attribute, you get the employee name in uppercase:
```
person = Person("John")
person.name
person.name = "John Doe"
person.name
employee = Employee("John")
employee.name
```
Great! `Employee` works as you need! It returns the name using uppercase letters. However, subsequent tests uncover an unexpected behavior:
```
# this should raise AttributeError
employee.name = "John Doe"
```
What happened? Well, when you override an existing property from a parent class, you override the whole functionality of that property. In this example, you reimplemented the getter method only. Because of that, `.name` lost the rest of the functionality from the base class. You don’t have a setter method any longer.
The idea is that if you ever need to override a property in a subclass, then you should provide all the functionality you need in the new version of the property at hand.
<a class="anchor" id="conclusion"></a>
## Conclusion
A **property** is a special type of class member that provides functionality that’s somewhere in between regular attributes and methods. Properties allow you to modify the implementation of instance attributes without changing the public API of the class. Being able to keep your APIs unchanged helps you avoid breaking code your users wrote on top of older versions of your classes.
Properties are the [Pythonic](https://realpython.com/learning-paths/writing-pythonic-code/) way to create **managed attributes** in your classes. They have several use cases in real-world programming, making them a great addition to your skill set as a Python developer.
**In this tutorial, you learned how to:**
- Create **managed attributes** with Python’s `property()`
- Perform **lazy attribute evaluation** and provide **computed attributes**
- Avoid **setter** and **getter** methods with properties
- Create **read-only**, **read-write**, and **write-only** attributes
- Create consistent and **backward-compatible APIs** for your classes
You also wrote several practical examples that walked you through the most common use cases of `property()`. Those examples include input [data validation](#validating-input-values), computed attributes, [logging](https://realpython.com/python-logging/) your code, and more.
| github_jupyter |
---
# Minimizing risks for loan investments - (Keras - Artificial Neural Network)
[by Tomas Mantero](https://www.kaggle.com/tomasmantero)
---
### Table of Contents
1. [Overview](#ch1)
1. [Dataset](#ch2)
1. [Exploratory Data Analysis](#ch3)
1. [Data PreProcessing](#ch4)
1. [Categorical Variables and Dummy Variables](#ch5)
1. [Scaling and Train Test Split](#ch6)
1. [Creating a Model](#ch7)
1. [Training the Model](#ch8)
1. [Evaluation on Test Data](#ch9)
1. [Predicting on a New Customer](#ch10)
<a id="ch1"></a>
## Overview
---
One of the objectives of this notebook is to **show step-by-step how to visualize the dataset and assess whether or not a new customer is likely to pay back the loan.**
LendingClub is a US peer-to-peer lending company, headquartered in San Francisco, California. It was the first peer-to-peer lender to register its offerings as securities with the Securities and Exchange Commission, and to offer loan trading on a secondary market. LendingClub is the world's largest peer-to-peer lending platform.
Given historical data on loans given out with information on whether or not the borrower defaulted (charge-off), can we build a model that can predict wether or nor a borrower will pay back their loan? This way in the future when we get a new potential customer **we can assess whether or not they are likely to pay back the loan.**
The following questions will be answered throughout the Kernel:
* ***Which features are available in the dataset?***
* ***What is the distribution of numerical feature values across the samples?***
* ***What is the length of the dataframe?***
* ***What is the total count of missing values per column?***
* ***How many unique employment job titles are there?***
* ***Would you offer this person a loan?***
* ***Did this person actually end up paying back their loan?***
If you have a question or feedback, do not hesitate to write and if you like this kernel,<b><font color='green'> please upvote! </font></b>
<img src="https://images.pexels.com/photos/259165/pexels-photo-259165.jpeg?auto=compress&cs=tinysrgb&h=750&w=1260" title="source: www.pexels.com" width="500" height="500"/>
<br>
<a id="ch2"></a>
## Dataset
---
* We will be using a subset of the LendingClub DataSet obtained from Kaggle: https://www.kaggle.com/wordsforthewise/lending-club
There are many LendingClub data sets on Kaggle. Here is the information on this particular data set:
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>LoanStatNew</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>loan_amnt</td>
<td>The listed amount of the loan applied for by the borrower. If at some point in time, the credit department reduces the loan amount, then it will be reflected in this value.</td>
</tr>
<tr>
<th>1</th>
<td>term</td>
<td>The number of payments on the loan. Values are in months and can be either 36 or 60.</td>
</tr>
<tr>
<th>2</th>
<td>int_rate</td>
<td>Interest Rate on the loan</td>
</tr>
<tr>
<th>3</th>
<td>installment</td>
<td>The monthly payment owed by the borrower if the loan originates.</td>
</tr>
<tr>
<th>4</th>
<td>grade</td>
<td>LC assigned loan grade</td>
</tr>
<tr>
<th>5</th>
<td>sub_grade</td>
<td>LC assigned loan subgrade</td>
</tr>
<tr>
<th>6</th>
<td>emp_title</td>
<td>The job title supplied by the Borrower when applying for the loan.*</td>
</tr>
<tr>
<th>7</th>
<td>emp_length</td>
<td>Employment length in years. Possible values are between 0 and 10 where 0 means less than one year and 10 means ten or more years.</td>
</tr>
<tr>
<th>8</th>
<td>home_ownership</td>
<td>The home ownership status provided by the borrower during registration or obtained from the credit report. Our values are: RENT, OWN, MORTGAGE, OTHER</td>
</tr>
<tr>
<th>9</th>
<td>annual_inc</td>
<td>The self-reported annual income provided by the borrower during registration.</td>
</tr>
<tr>
<th>10</th>
<td>verification_status</td>
<td>Indicates if income was verified by LC, not verified, or if the income source was verified</td>
</tr>
<tr>
<th>11</th>
<td>issue_d</td>
<td>The month which the loan was funded</td>
</tr>
<tr>
<th>12</th>
<td>loan_status</td>
<td>Current status of the loan</td>
</tr>
<tr>
<th>13</th>
<td>purpose</td>
<td>A category provided by the borrower for the loan request.</td>
</tr>
<tr>
<th>14</th>
<td>title</td>
<td>The loan title provided by the borrower</td>
</tr>
<tr>
<th>15</th>
<td>zip_code</td>
<td>The first 3 numbers of the zip code provided by the borrower in the loan application.</td>
</tr>
<tr>
<th>16</th>
<td>addr_state</td>
<td>The state provided by the borrower in the loan application</td>
</tr>
<tr>
<th>17</th>
<td>dti</td>
<td>A ratio calculated using the borrower’s total monthly debt payments on the total debt obligations, excluding mortgage and the requested LC loan, divided by the borrower’s self-reported monthly income.</td>
</tr>
<tr>
<th>18</th>
<td>earliest_cr_line</td>
<td>The month the borrower's earliest reported credit line was opened</td>
</tr>
<tr>
<th>19</th>
<td>open_acc</td>
<td>The number of open credit lines in the borrower's credit file.</td>
</tr>
<tr>
<th>20</th>
<td>pub_rec</td>
<td>Number of derogatory public records</td>
</tr>
<tr>
<th>21</th>
<td>revol_bal</td>
<td>Total credit revolving balance</td>
</tr>
<tr>
<th>22</th>
<td>revol_util</td>
<td>Revolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit.</td>
</tr>
<tr>
<th>23</th>
<td>total_acc</td>
<td>The total number of credit lines currently in the borrower's credit file</td>
</tr>
<tr>
<th>24</th>
<td>initial_list_status</td>
<td>The initial listing status of the loan. Possible values are – W, F</td>
</tr>
<tr>
<th>25</th>
<td>application_type</td>
<td>Indicates whether the loan is an individual application or a joint application with two co-borrowers</td>
</tr>
<tr>
<th>26</th>
<td>mort_acc</td>
<td>Number of mortgage accounts.</td>
</tr>
<tr>
<th>27</th>
<td>pub_rec_bankruptcies</td>
<td>Number of public record bankruptcies</td>
</tr>
</tbody>
</table>
---
### Imports
```
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# scaling and train test split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# creating a model
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.constraints import max_norm
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import load_model
# evaluation on test data
from sklearn.metrics import classification_report,confusion_matrix
```
Before starting, let us make a function to get feature information on the data as a .csv file for easy lookup throughout the notebook.
```
data_info = pd.read_csv('C:/Users/Tomas/Desktop/Carpetas/Programación/Python/Kaggle/Lending Club Loan Data/lending_club_info.csv',index_col='LoanStatNew')
def feat_info(col_name):
print(data_info.loc[col_name]['Description'])
# example
feat_info('mort_acc')
```
### Loading the data
```
df = pd.read_csv('C:/Users/Tomas/Desktop/Carpetas/Programación/Python/Kaggle/Lending Club Loan Data/lending_club_loan_two.csv')
```
**Which features are available in the dataset?**
```
print(df.info())
```
**Preview the data**
```
df.head()
```
**What is the distribution of numerical feature values across the samples?**
```
df.describe().transpose()
```
<a id="ch3"></a>
## Exploratory Data Analysis
---
### Analyze by visualizing data
Get an understanding for which variables are important, view summary statistics, and visualize the data.
### Pearson correlation matrix
We use the Pearson correlation coefficient to examine the strength and direction of the linear relationship between two continuous variables.
The correlation coefficient can range in value from −1 to +1. The larger the absolute value of the coefficient, the stronger the relationship between the variables. For the Pearson correlation, an absolute value of 1 indicates a perfect linear relationship. A correlation close to 0 indicates no linear relationship between the variables.
The sign of the coefficient indicates the direction of the relationship. If both variables tend to increase or decrease together, the coefficient is positive, and the line that represents the correlation slopes upward. If one variable tends to increase as the other decreases, the coefficient is negative, and the line that represents the correlation slopes downward.
* We can see a strong correlation between loan_amnt and installment. (The monthly payment owed by the borrower if the loan originates)
```
sns.set(style="whitegrid", font_scale=1)
plt.figure(figsize=(12,12))
plt.title('Pearson Correlation Matrix',fontsize=25)
sns.heatmap(df.corr(),linewidths=0.25,vmax=0.7,square=True,cmap="GnBu",linecolor='w',
annot=True, annot_kws={"size":10}, cbar_kws={"shrink": .7})
```
### Loan status and loan amount distribution
* This is an imbalance problem, because we have a lot more entries of people that fully paid their loans then people that did not pay back.
* We can expect to probably do very well in terms of accuracy but our precision and recall are going to be the true metrics that we will have to evaluate our model based off of.
* In the loan amount distribution we can see spikes in even ten thousend dollar, so this is indicating that there are certain amounts that are basically standard loans.
```
f, axes = plt.subplots(1, 2, figsize=(15,5))
sns.countplot(x='loan_status', data=df, ax=axes[0])
sns.distplot(df['loan_amnt'], kde=False, bins=40, ax=axes[1])
sns.despine()
axes[0].set(xlabel='Status', ylabel='')
axes[0].set_title('Count of Loan Status', size=20)
axes[1].set(xlabel='Loan Amount', ylabel='')
axes[1].set_title('Loan Amount Distribution', size=20)
```
### Relationship between loan_amnt, loan_status and installment
```
f, axes = plt.subplots(1, 2, figsize=(15,5))
sns.scatterplot(x='installment', y='loan_amnt', data=df, ax=axes[0])
sns.boxplot(x='loan_status', y='loan_amnt', data=df, ax=axes[1])
sns.despine()
axes[0].set(xlabel='Installment', ylabel='Loan Amount')
axes[0].set_title('Scatterplot between Loan Amount and Installment', size=15)
axes[1].set(xlabel='Loan Status', ylabel='Loan Amount')
axes[1].set_title('Boxplot between Loan Amount and Loan Status', size=15)
```
In case that the boxplot is a little hard to read you can always compare the averages here:
* So you can see the charged off average price is a little higher than the fully paid loan.
```
df.groupby('loan_status')['loan_amnt'].describe()
```
### Countplot per grade and subgrade
* Essentially this is showing the percentage of charged off loans.
* Looks like it is increasing as the letter grade gets higher.
* Better grades are bluer and the worse grades are redder.
```
f, axes = plt.subplots(1, 2, figsize=(15,5), gridspec_kw={'width_ratios': [1, 2]})
sns.countplot(x='grade', hue='loan_status', data=df, order=sorted(df['grade'].unique()), palette='seismic', ax=axes[0])
sns.countplot(x='sub_grade', data=df, palette='seismic', order=sorted(df['sub_grade'].unique()), ax=axes[1])
sns.despine()
axes[0].set(xlabel='Grade', ylabel='Count')
axes[0].set_title('Count of Loan Status per Grade', size=20)
axes[1].set(xlabel='Sub Grade', ylabel='Count')
axes[1].set_title('Count of Loan Status per Sub Grade', size=20)
plt.tight_layout()
```
Now we are going to create a new column called 'loan_repaid' which will contain a 1 if the loan status was "Fully Paid" and a 0 if it was "Charged Off".
```
df['loan_repaid'] = df['loan_status'].map({'Fully Paid':1,'Charged Off':0})
df[['loan_repaid','loan_status']].head()
```
* The interest rate has essentially the highest negative correlation with whether or not someone is going to repay their loan.
* If you have a extremely high interest rate you are going to find it harder to pay off that loan.
```
df.corr()['loan_repaid'].sort_values(ascending=True).drop('loan_repaid').plot.bar(color='green')
```
<a id="ch4"></a>
## Data preprocessing
---
Remove or fill any missing data. Remove unnecessary or repetitive features. Convert categorical string features to dummy variables.
### Missing data
***What is the length of the dataframe?***
```
print(len(df))
```
***What is the total count of missing values per column?***
We have missing values in emp_title, emp_length, title, revol_util, mort_acc and pub_rec_bankruptcies.
```
df.isnull().sum()
feat_info('emp_title')
print('\n')
feat_info('emp_length')
print('\n')
feat_info('title')
print('\n')
feat_info('revol_util')
print('\n')
feat_info('mort_acc')
print('\n')
feat_info('pub_rec_bankruptcies')
```
### Percentage of missing values per column
* In the plot we can see how much data is missing as a percentage of the total data.
* Notice that there is missing almost 10% of mortgage accounts, so we can not drop all those rows.
* On the other hand, we could drop missing values in revol_util or pub_rec_bankruptcies.
```
plt.figure(figsize=(10,5))
((df.isnull().sum())/len(df)*100).plot.bar(title='Percentage of missing values per column', color='green')
```
Let's examine emp_title and emp_length to see whether it will be okay to drop them.
***How many unique employment job titles are there?***
Realistically there are too many unique job titles to try to convert this to a dummy variable feature.
```
print(df['emp_title'].nunique())
df['emp_title'].value_counts()
```
Let's drop emp_title:
```
df = df.drop('emp_title',axis=1)
```
Now we want the percentage of charge offs per category. Essentially informing us what percent of people per employment category didn't pay back their loan.
* We can see that across the extremes it looks to be extremely similar.
* Looks like this particular feature of employment length doesn't actually have some extreme differences on the charge off rates.
* Looks like regardless of what actual employment length you have if you were to pick someone, about 20% of them are going to have not paid back their loans.
```
per_charge_off = df[df['loan_repaid'] == 0]['emp_length'].value_counts() / df[df['loan_repaid'] == 1]['emp_length'].value_counts()
per_charge_off.plot.bar(color='green')
```
Let's drop emp_length:
```
df = df.drop('emp_length', axis=1)
```
If we review the title column vs the purpose column looks like there is repeated information
```
df[['title', 'purpose']].head(10)
```
The title column is simply a string subcategory/description of the purpose column. Let's drop the column.
```
df = df.drop('title', axis=1)
```
Now we are going to deal with the missing data of mort_acc. Since mort_acc has a strong correlation with total_acc we will group the dataframe by the total_acc and calculate the mean value for the mort_acc per total_acc entry. To get the result below:
```
print("Mean of mort_acc column per total_acc")
total_acc_avg = df.groupby('total_acc').mean()['mort_acc']
print(total_acc_avg)
```
Let's fill in the missing mort_acc values based on their total_acc value. If the mort_acc is missing, then we will fill in that missing value with the mean value corresponding to its total_acc value from the Series we created above. This involves using an .apply() method with two columns.
```
total_acc_avg = df.groupby('total_acc').mean()['mort_acc']
def fill_mort_acc(total_acc,mort_acc):
'''
Accepts the total_acc and mort_acc values for the row.
Checks if the mort_acc is NaN , if so, it returns the avg mort_acc value
for the corresponding total_acc value for that row.
total_acc_avg here should be a Series or dictionary containing the mapping of the
groupby averages of mort_acc per total_acc values.
'''
if np.isnan(mort_acc):
return total_acc_avg[total_acc]
else:
return mort_acc
df['mort_acc'] = df.apply(lambda x: fill_mort_acc(x['total_acc'], x['mort_acc']), axis=1)
```
revol_util and the pub_rec_bankruptcies have missing data points, but they account for less than 0.5% of the total data. Let's remove the rows that are missing those values in those columns with dropna().
```
df = df.dropna()
# check for missing values
df.isnull().sum()
```
<a id="ch5"></a>
## Categorical variables and dummy variables
---
We're done working with the missing data! Now we just need to deal with the string values due to the categorical columns.
### Term feature
Convert the term feature into either a 36 or 60 integer numeric data type using .apply() or .map().
```
print(df['term'].value_counts())
print('\n')
print('\n')
df['term'] = df['term'].apply(lambda term: int(term[:3]))
print(df['term'].value_counts())
```
### Grade feature
We already know grade is part of sub_grade, so just drop the grade feature.
```
df = df.drop('grade', axis=1)
```
Let's convert the subgrade into dummy variables. Then concatenate these new columns to the original dataframe. Remember to drop the original subgrade column and to add drop_first=True to your get_dummies call.
```
subgrade_dummies = pd.get_dummies(df['sub_grade'],drop_first=True)
df = pd.concat([df.drop('sub_grade',axis=1),subgrade_dummies],axis=1)
```
### Verification_status, application_type, initial_list_status, purpose features
Let's convert these columns into dummy variables and concatenate them with the original dataframe.
```
dummies = pd.get_dummies(df[['verification_status', 'application_type','initial_list_status','purpose']], drop_first=True)
df = df.drop(['verification_status', 'application_type','initial_list_status','purpose'],axis=1)
df = pd.concat([df,dummies],axis=1)
```
### Home_ownership feature
Convert these to dummy variables, but replace NONE and ANY with OTHER, so that we end up with just 4 categories, MORTGAGE, RENT, OWN, OTHER. Then concatenate them with the original dataframe.
```
df['home_ownership'] = df['home_ownership'].replace(['NONE', 'ANY'], 'OTHER')
dummies = pd.get_dummies(df['home_ownership'],drop_first=True)
df = df.drop('home_ownership',axis=1)
df = pd.concat([df,dummies],axis=1)
```
### Address feature
Let's feature engineer a zip code column from the address in the data set. Create a column called 'zip_code' that extracts the zip code from the address column.
```
df['zip_code'] = df['address'].apply(lambda address:address[-5:])
dummies = pd.get_dummies(df['zip_code'],drop_first=True)
df = df.drop(['zip_code','address'],axis=1)
df = pd.concat([df,dummies],axis=1)
```
### Issue_d feature
This would be data leakage, we wouldn't know beforehand whether or not a loan would be issued when using our model, so in theory we wouldn't have an issue_date, drop this feature.
```
df = df.drop('issue_d', axis=1)
```
### Earliest_cr_line feature
This appears to be a historical time stamp feature. Extract the year from this feature using a .apply function, then convert it to a numeric feature. Set this new data to a feature column called 'earliest_cr_year'. Then drop the earliest_cr_line feature.
```
df['earliest_cr_year'] = df['earliest_cr_line'].apply(lambda date:int(date[-4:]))
df = df.drop('earliest_cr_line', axis=1)
df.select_dtypes(['object']).columns
df = df.drop('loan_status',axis=1)
```
<a id="ch6"></a>
## Scaling and train test split
---
```
# Features
X = df.drop('loan_repaid',axis=1).values
# Label
y = df['loan_repaid'].values
# Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
```
### Normalizing / scaling the data
We scale the feature data. To prevent data leakage from the test set, we only fit our scaler to the training set.
```
scaler = MinMaxScaler()
# fit and transfrom
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# everything has been scaled between 1 and 0
print('Max: ',X_train.max())
print('Min: ', X_train.min())
```
<a id="ch7"></a>
## Creating a model
---
**Dropout Layers**
* Dropout is a technique where randomly selected neurons are ignored during training. They are “dropped-out” randomly.
* Simply put, dropout refers to ignoring units (i.e. neurons) during the training phase of certain set of neurons which is chosen at random.
* Helps prevent overfitting.
```
model = Sequential()
# input layer
model.add(Dense(78,activation='relu'))
model.add(Dropout(0.2))
# hidden layer
model.add(Dense(39,activation='relu'))
model.add(Dropout(0.2))
# hidden layer
model.add(Dense(19,activation='relu'))
model.add(Dropout(0.2))
# output layer
model.add(Dense(1, activation='sigmoid'))
# compile model
model.compile(optimizer="adam", loss='binary_crossentropy')
```
### Early stopping
This callback allows you to specify the performance measure to monitor, the trigger, and once triggered, it will stop the training process. Basically, it stop training when a monitored quantity has stopped improving.
```
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25)
```
<a id="ch8"></a>
## Training the model
---
Now that the model is ready, we can fit the model into the data.
```
model.fit(x=X_train,
y=y_train,
epochs=400,
batch_size=256,
validation_data=(X_test, y_test),
callbacks=[early_stop])
```
### Training loss per epoch
* This plot shows the training loss per epoch.
* This plot helps us to see if there is overfitting in the model. In this case there is no overfitting because both lines go down at the same time.
```
losses = pd.DataFrame(model.history.history)
plt.figure(figsize=(15,5))
sns.lineplot(data=losses,lw=3)
plt.xlabel('Epochs')
plt.ylabel('')
plt.title('Training Loss per Epoch')
sns.despine()
```
<a id="ch9"></a>
## Evaluation on test data
---
***Classification Report***
* **Accuracy** is just the actual percent that we got right, in this case it was 89%.
* Note that since the data is imbalance if we were to make a model that approve all the loans, it would have a 80% accuracy.
* For example: (317696/len(df)) = 0.80 where 317696 is `df[loan_repaid].value_counts()`
* So do not be fooled by a model that returns back 80% accuracy because by default a model that always reports back to the loan will be repaid itself will be 80% accurate on this actual test data set.
* The **recall** means "how many of this class you find over the whole number of element of this class"
* The **precision** will be "how many are correctly classified among that class"
* The **f1-score** is the harmonic mean between precision & recall
* The **support** is the number of occurence of the given class in your dataset.
* Precision is 0.97, which is really good. On the other hand, recall is not good.
* We should focus in improving the f1-score in the 0 class. We should improve the 0.61.
***Confusion Matirx***
* A confusion matrix is a technique for summarizing the performance of a classification algorithm.
* Classification accuracy alone can be misleading if you have an unequal number of observations in each class, which is our case.
* We have 230 Type I errors (False Positive) and 8698 Type II errors (False Negative).
* 6960 True Positive and 63156 True Negative.
```
predictions = model.predict_classes(X_test)
print('Classification Report:')
print(classification_report(y_test, predictions))
print('\n')
print('Confusion Matirx:')
print(confusion_matrix(y_test, predictions))
```
<a id="ch10"></a>
## Predicting on a new customer
---
***Would you offer this person a loan?***
```
rnd.seed(101)
random_ind = rnd.randint(0,len(df))
new_customer = df.drop('loan_repaid',axis=1).iloc[random_ind]
new_customer
# we need to reshape this to be in the same shape of the training data that the model was trained on
model.predict_classes(new_customer.values.reshape(1,78))
```
***Did this person actually end up paying back their loan?***
```
# the prediction was right
df.iloc[random_ind]['loan_repaid']
```
## References
* [An Introduction to Statistical Learning with Applications in R](http://faculty.marshall.usc.edu/gareth-james/ISL/) - This book provides an introduction to statistical learning methods.
* [Python for Data Science and Machine Learning Bootcamp](https://www.udemy.com/course/python-for-data-science-and-machine-learning-bootcamp/) - Use Python for Data Science and Machine Learning.
**Thank you** for taking the time to read through my exploration of a Kaggle dataset. I look forward to doing more!
If you have a question or feedback, do not hesitate to comment and if you like this kernel,<b><font color='green'> please upvote! </font></b>
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
### GOAL :
Make a pipeline that finds lane lines on the road for basic understanding of concepts
Reflect on your work in a written report
---
## Pipeline :
To achieve the first goal to find lane lines I am using canny edge detection technique.
**The main steps involved in canny edge detection are as follow:**
1. Grayscale Conversion
2. Gaussian Blur
3. Determine the Intensity Gradients
4. Non Maximum Suppression - thins the edges
5. Double Thresholding - reduces the noise (takes out some edges that were detected which were acually not present)
6. Edge Tracking by Hysteresis - to connect weak edges to strong ones
Steps followed for canny edge detction in this project :
1. Grayscle conversion using cv2
2. Gaussian Blur using cv2
3. Canny edge using cv2
- Noise Reduction
- Finding Intensity Gradient of the Image
- Non-maximum Suppression
- Hysteresis Thresholding
4. Masking area of interest
5. Hough Lines - to identify the lane lines
6. addWeighted function - to show lines on the original image
---
Here it is what to expect after completion of pipeline :
<figure>
<img src="test_images/solidWhiteRight.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Before pipeline process </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> After pipeline process</p>
</figcaption>
</figure>
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
ymax_left_g=539
ymin_left_g=0
xmax_left_g=0
xmin_left_g=0
ymax_right_g=539
ymin_right_g=0
xmax_right_g=0
xmin_right_g=0
counter=0
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
slope1=0
slope2=0
count1=0
count2=0
left_x=[]
left_y=[]
right_x=[]
right_y=[]
global counter
global ymax_left_g
global ymin_left_g
global xmax_left_g
global xmin_left_g
global ymax_right_g
global ymin_right_g
global xmax_right_g
global xmin_right_g
for vertices in lines :
slope=(vertices[0][3]-vertices[0][1])/(vertices[0][2]-vertices[0][0])
#print('Slope : ', slope )
#print('Slope : ', (lines[3]-vertices[1])/(vertices[2]-vertices[0]) )
#print('vertices : ' , vertices) #==========================
if abs(slope)<0.4 or abs(slope)>0.7:
# Ignore invalid lines
#cv2.line(img, (xmin_left_g, ymin_left_g), (xmax_left_g, ymax_left_g), color, thickness)
#cv2.line(img, (xmin_right_g,ymin_right_g), (xmax_right_g, ymax_right_g), color, thickness)
continue
elif slope>0 :
slope1=slope1+round(slope, 4)
count1=count1+1
left_x.append(vertices[0][0])
left_x.append(vertices[0][2])
left_y.append(vertices[0][1])
left_y.append(vertices[0][3])
else :
slope2=slope2+round(slope,4)
count2=count2+1
right_x.append(vertices[0][0])
right_x.append(vertices[0][2])
right_y.append(vertices[0][1])
right_y.append(vertices[0][3])
if len(left_x)==0 and len(right_x)==0 :
#right line
cv2.line(img, (xmin_left_g, ymin_left_g), (xmax_left_g, ymax_left_g), color, thickness)
#left line
cv2.line(img, (xmin_right_g,ymin_right_g), (xmax_right_g, ymax_right_g), color, thickness)
elif len(left_x)==0 :
slope_c_right=np.polyfit(right_x,right_y,1)
ymax_right=539
ymin_right=np.min(right_y)
xmax_right= int((ymax_right-slope_c_right[1])/slope_c_right[0])
xmin_right= int((ymin_right-slope_c_right[1])/slope_c_right[0])
#right line
cv2.line(img, (xmin_left_g, ymin_left_g), (xmax_left_g, ymax_left_g), color, thickness)
#left line
cv2.line(img, (xmin_right,ymin_right), (xmax_right, ymax_right), color, thickness)
elif len(right_x)==0:
slope_c_left=np.polyfit(left_x,left_y,1)
ymax_left=539
ymin_left=np.min(left_y)
xmax_left= int((ymax_left-slope_c_left[1])/slope_c_left[0])
xmin_left= int((ymin_left-slope_c_left[1])/slope_c_left[0])
#right line
cv2.line(img, (xmin_left, ymin_left), (xmax_left, ymax_left), color, thickness)
#left line
cv2.line(img, (xmin_right_g,ymin_right_g), (xmax_right_g, ymax_right_g), color, thickness)
else:
slope_c_left=np.polyfit(left_x,left_y,1)
ymax_left=539
ymin_left=325 #np.min(left_y)
xmax_left= int((ymax_left-slope_c_left[1])/slope_c_left[0])
xmin_left= int((ymin_left-slope_c_left[1])/slope_c_left[0])
slope_c_right=np.polyfit(right_x,right_y,1)
ymax_right=539
ymin_right=325 #np.min(right_y)
xmax_right= int((ymax_right-slope_c_right[1])/slope_c_right[0])
xmin_right= int((ymin_right-slope_c_right[1])/slope_c_right[0])
ymax_left_g=ymax_left
ymin_left_g=ymin_left
xmax_left_g=xmax_left
xmin_left_g=xmin_left
ymax_right_g=ymax_right
ymin_right_g=ymin_right
xmax_right_g=xmax_right
xmin_right_g=xmin_right
counter=1
#right line
cv2.line(img, (xmin_left, ymin_left), (xmax_left, ymax_left), color, thickness)
#left line
cv2.line(img, (xmin_right,ymin_right), (xmax_right, ymax_right), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
```
## Test Images
Build your pipeline to work on the images in the directory "test_images"
**You should make sure your pipeline works well on these images before you try the videos.**
```
import os
os.listdir("test_images/")
```
## Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
plt.imshow(image)
plt.figure() #to print multiple images
#image_working= np.copy(image) #to copy an image
gray = grayscale(image) #grayscale conversion
plt.imshow(gray, cmap='gray')
plt.figure() #multiple images in same output
kernel_size = 7
blur_gray = gaussian_blur(gray,kernel_size)
#setting threshold values
highThreshold = 180*0.7;
lowThreshold = highThreshold*0.3;
edges=cv2.Canny(blur_gray,lowThreshold,highThreshold)
plt.imshow(edges, cmap='gray')
## perform the region_of_interest, here or earlier
#taking region_of_interest as traiangle
upper_left=[400,320]
upper_right=[550,320]
left_bottom=[150,539]
right_bottom=[900,539]
triangle = np.array([ upper_left, left_bottom , right_bottom,upper_right])
masked_image=region_of_interest(edges,[triangle])
plt.imshow(masked_image,cmap='gray')
print (image.shape[1])
#hough_transform, hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap)
image_hough=hough_lines(masked_image,2,np.pi/180,25,25,10)
plt.imshow(image_hough,cmap='gray')
#weighted_img
w_img=weighted_img(image_hough,image)
plt.imshow(w_img,cmap='gray')
mpimg.imsave("test-after.png", w_img)
```
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
```
## Reading in an image from video clip
```
#reading in an image from video clip
clipTest = VideoFileClip('test_videos/solidWhiteRight.mp4')
#clipTest = VideoFileClip('test_videos/solidYellowLeft.mp4')
n_frames = sum(1 for x in clipTest.iter_frames())
print(n_frames)
count=0
for frame in clipTest.iter_frames():
count=count+1
if count==174:
image=frame
break
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
def process_image(image):
gray = grayscale(image) #grayscale conversion
kernel_size = 7
blur_gray = gaussian_blur(gray,kernel_size) #gaussian blur
#setting threshold values
highThreshold = 180*0.7;
lowThreshold = highThreshold*0.3;
edges=cv2.Canny(blur_gray,lowThreshold,highThreshold) #canny edge
#plt.imshow(edges, cmap='gray')
upper_left=[450,320]
upper_right=[550,320]
left_bottom=[130,539]
right_bottom=[900,539]
mask_area = np.array([ upper_left, left_bottom , right_bottom,upper_right])
masked_image=region_of_interest(edges,[mask_area])
#hough_transform, hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap)
image_hough=hough_lines(masked_image,2,np.pi/180,25,25,5)
#weighted_img -print line detected on top of original image
result=weighted_img(image_hough,image)
return result
```
Let's try the one with the solid white lane on the right first ...
```
global counter
counter=0
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
global counter
counter=0
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Reflections and Thoughts
There are number of ways in which the current implementation of pipelines can be improved. A running average of the lane lines can be maintained in case an erroneous frame appears. Pipeline process here does not take into account the dark and light patches on the road due to shadows, rains etc. An approach to this can be included in the pipeline process.
Running average can be implemented by storing the values as global variables. To take care of light and dark patches we can work on hsv colorspace.
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
'''challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)'''
'''HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))'''
```
| github_jupyter |
# Plotting the cheapest and the most expensive houses for sale in Mexico City
[elnortescrapper](https://github.com/rafrodriguez/elnortescrapper) is a custom-made Python web scrapper for advertisements of houses for sale in Mexico that are listed in [Avisos de ocasión](http://www.avisosdeocasion.com).
It was used to retrieve the advertisements of houses for sale in Mexico City into a CSV file.
This notebook shows how to use that information to plot the 300 cheapest and the 300 most expensive houses for sale in Mexico City
```
import pandas
from pandas import DataFrame
df = pandas.read_csv('mexico_city_houses.csv')
df.count()
df.columns
df.head()
# Prepare the column "precio" (price)
# Remove the word 'pesos'
df_with_pesos_removed = df[ df['precio'].str.contains(' pesos') ]['precio'].str.replace(' pesos', '')
df.ix[df['precio'].str.contains(' pesos'), 'precio'] = df_with_pesos_removed
# Remove the commas
df.ix[:,'precio'] = df['precio'].str.replace(',','')
# Convert the prices in USD ("dólares") to MXN
from forex_python.converter import CurrencyRates
exchange_rates = CurrencyRates()
USD_to_MXN = exchange_rates.get_rate('USD','MXN')
df.ix[df['precio'].str.contains('dólares'),'precio'] = df[ df['precio'].str.contains('dólares')]['precio'].str.replace(' dólares','').str.replace(',','').astype('int')*USD_to_MXN
# Convert the 'precio' column to numeric
df.ix[:,'precio'] = pandas.to_numeric(df['precio'])
print("(Converted USD prices with 1 USD =",USD_to_MXN," MXN)")
# We are going to plot the n most expensive and n cheapest houses for sale
n = 300
from string import Template
# Template for the final html with the embedded map
# It has two placeholders: one for the javascript code of the map and one for the google maps api key
html_template = Template("""
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="initial-scale=1.0, user-scalable=no">
<meta charset="utf-8">
<title>Map</title>
<style>
#map {
height: 500px;
width: 1000px;
}
html, body {
height: 100%;
margin: 0;
padding: 0;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
$map_js
</script>
$google_maps_api_key
</body>
</html>
""")
# Template for the javascript code for the map
map_js_template = Template("""
function initMap() {
var map = new google.maps.Map(document.getElementById('map'));
var bounds = new google.maps.LatLngBounds();
// Markers of most expensive houses
var markers_set_1 = [$markers_1];
for( i = 0; i < markers_set_1.length; i++ ) {
marker_position = new google.maps.LatLng(markers_set_1[i][1], markers_set_1[i][2]);
var marker = new google.maps.Marker({
position: marker_position,
map: map,
title: markers_set_1[i][0],
url: markers_set_1[i][3]
});
marker.setIcon('http://maps.google.com/mapfiles/ms/icons/green-dot.png')
// Update the bounds
bounds.extend(marker_position)
// Add the InfoWindow as a property of each marker in order
// to ensure that it is displayed next to it
marker.info = new google.maps.InfoWindow({
content: markers_set_1[i][0] + '<br> <a target="_blank" href="'+markers_set_1[i][3]+'">Open ad</a>'
});
// Listener to open the InfoWindow
google.maps.event.addListener(marker, 'click', function() {
this.info.open(map, this);
});
// If closing an opened InfoWindow when another part of the map is clicked,
// add a listener to the map here, and keep track of the last opened InfoWindow
}
// Markers of cheapest houses
var markers_set_2 = [$markers_2];
for( i = 0; i < markers_set_2.length; i++ ) {
marker_position = new google.maps.LatLng(markers_set_2[i][1], markers_set_2[i][2]);
var marker = new google.maps.Marker({
position: marker_position,
map: map,
title: markers_set_1[i][0],
url: markers_set_1[i][3]
});
marker.setIcon('http://maps.google.com/mapfiles/ms/icons/purple-dot.png')
// Update the bounds
bounds.extend(marker_position)
// Add the InfoWindow as a property of each marker in order
// to ensure that it is displayed next to the marker
// and not next to the last marker
marker.info = new google.maps.InfoWindow({
content: markers_set_2[i][0] + '<br> <a target="_blank" href="'+markers_set_2[i][3]+'">Open ad</a>'
});
google.maps.event.addListener(marker, 'click', function() {
this.info.open(map, this);
});
// Adjust the bounds of the map
map.fitBounds(bounds);
map.setCenter(bounds.getCenter());
}
}"""
)
# Choose the entries that have location and sort the DataFrame by price
df_by_price_asc = df[ pandas.notnull(df['latitude'])].sort_values(['precio'], ascending=True)
# Markers for the most expensive houses, in the form of a javascript list
markers_1 = ""
for index, element in df_by_price_asc.ix[:,['precio', 'colonia', 'latitude', 'longitude', 'url']].tail(n).iterrows():
precio = "$"+"{:,}".format(int(element['precio']))
colonia = str(element['colonia'])
latitude = str(element['latitude'])
longitude = str(element['longitude'])
url = element['url']
markers_1 += "['"+precio+" ("+colonia+")',"+latitude+","+longitude+",'"+url+"'],\n"
# Markers for the cheapest houses, in the form of a javascript list
markers_2 = ""
for index, element in df_by_price_asc.ix[:,['precio', 'colonia', 'latitude', 'longitude', 'url']].head(n).iterrows():
precio = "$"+"{:,}".format(int(element['precio']))
colonia = str(element['colonia'])
latitude = str(element['latitude'])
longitude = str(element['longitude'])
url = element['url']
markers_2 += "['"+precio+" ("+colonia+")',"+latitude+","+longitude+",'"+url+"'],\n"
# Replace in the template of the map
map_js = map_js_template.safe_substitute({'markers_1':markers_1,'markers_2':markers_2})
# Replace the key and the javascript of the map in the final html template
google_maps_api_key = '''<script async defer
src="https://maps.googleapis.com/maps/api/js?key=AIzaSyByfLrvUSff1YaEZq1r1vDT9xhW8-6nZOc&callback=initMap">
</script>'''
final_html = html_template.safe_substitute({'map_js':map_js, 'google_maps_api_key':google_maps_api_key})
```
<b style="color:purple;">Purple:</b> The cheapest houses for sale
<b style="color:green;">Green:</b> The most expensive houses for sale
```
from IPython.display import HTML
HTML(final_html)
```
| github_jupyter |
```
import pandas as pd
%pwd
node_features_file = "../../generate_node_features/corpus_2020_audience_overlap_level_0_and_1_node_features.csv"
edge_file = "../../generate_node_features/combined_data_corpus_2020_level_0_1_df_edges.csv"
node_features_df = pd.read_csv(node_features_file, index_col=0)
node_features_df.head()
node_features_df.info()
node_features_df.alexa_ranks = node_features_df.alexa_ranks.fillna(0)
node_features_df.total_sites_linking_ins = node_features_df.total_sites_linking_ins.fillna(0)
node_features_df.info()
```
# Normalizing features
```
node_features_df['normalized_alexa_rank'] = node_features_df['alexa_ranks'].apply(lambda x: 1/x if x else 0)
import math
node_features_df['normalized_total_sites_linked_in'] = node_features_df['total_sites_linking_ins'].apply(lambda x: math.log2(x) if x else 0)
```
---
```
edge_df = pd.read_csv(edge_file)
edge_df.head()
edge_df.info()
import stellargraph as sg
G = sg.StellarGraph(node_features_df[['normalized_alexa_rank', 'normalized_total_sites_linked_in']], edge_df)
print(G.info())
```
# Unsupervised Deep Graph Infomax
```
from stellargraph.mapper import (
CorruptedGenerator,
FullBatchNodeGenerator,
GraphSAGENodeGenerator,
HinSAGENodeGenerator,
ClusterNodeGenerator,
)
from stellargraph import StellarGraph
from stellargraph.layer import GCN, DeepGraphInfomax, GraphSAGE, GAT, APPNP, HinSAGE
from tensorflow import keras
```
1. Specify the other optional parameter values: root nodes, the number of walks to take per node, the length of each walk, and random seed.
```
nodes = list(G.nodes())
number_of_walks = 1
length = 5
```
2. Create the UnsupervisedSampler instance with the relevant parameters passed to it.
```
fullbatch_generator = FullBatchNodeGenerator(G, sparse=False)
gcn_model = GCN(layer_sizes=[128], activations=["relu"], generator=fullbatch_generator)
corrupted_generator = CorruptedGenerator(fullbatch_generator)
gen = corrupted_generator.flow(G.nodes())
from tensorflow.keras import Model
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
```
3. Create a node pair generator:
```
infomax = DeepGraphInfomax(gcn_model, corrupted_generator)
x_in, x_out = infomax.in_out_tensors()
deep_graph_infomax_model = Model(inputs=x_in, outputs=x_out)
deep_graph_infomax_model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer=Adam(lr=1e-3))
from stellargraph.utils import plot_history
epochs = 100
es = EarlyStopping(monitor="loss", min_delta=0, patience=20)
history = deep_graph_infomax_model.fit(gen, epochs=epochs, verbose=0, callbacks=[es])
plot_history(history)
x_emb_in, x_emb_out = gcn_model.in_out_tensors()
# for full batch models, squeeze out the batch dim (which is 1)
x_out = tf.squeeze(x_emb_out, axis=0)
emb_model = Model(inputs=x_emb_in, outputs=x_out)
node_features_fullbactch_generator = fullbatch_generator.flow(node_features_df.index)
node_embeddings = emb_model.predict(node_features_fullbactch_generator)
embeddings_wv = dict(zip(node_features_df.index.tolist(), node_embeddings))
embeddings_wv['crooked.com']
class ModelWrapper:
def __init__(self, embeddings_wv):
self.wv = embeddings_wv
def __str__(self):
return 'Unsupervised Deep Graph Infomax'
import sys,os
sys.path.append("/home/panayot/Documents/site_similarity")
from utils.notebook_utils import train_model
data_year = '2020'
node2vec_model = ModelWrapper(embeddings_wv)
%pwd
%run ../../utils/notebook_utils.py
from sklearn import svm
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegressionCV
result_report = []
clf = LogisticRegressionCV(Cs=10, cv=5, scoring="accuracy", multi_class="ovr", max_iter=300, random_state=42)
result_report.append([
str(node2vec_model),
'LogisticRegression CV = 5',
*list(train_model(clf, node2vec_model=node2vec_model, data_year=data_year).values())
]);
clf2 = LogisticRegressionCV(Cs=10, cv=10, scoring="accuracy", multi_class="ovr", max_iter=300, random_state=42)
result_report.append([
str(node2vec_model),
'LogisticRegression CV = 10',
*list(train_model(clf2, node2vec_model=node2vec_model, data_year=data_year).values())
]);
tree_clf = GradientBoostingClassifier(random_state=42)
result_report.append([
str(node2vec_model),
'GradientBoostingClassifier',
*list(train_model(tree_clf, node2vec_model=node2vec_model, data_year=data_year).values())
]);
svm_clf = svm.SVC(decision_function_shape='ovo', probability=True, random_state=42)
result_report.append([
str(node2vec_model),
'SVC ovo',
*list(train_model(svm_clf, node2vec_model=node2vec_model, data_year=data_year).values())
]);
model_res = pd.DataFrame(result_report,
columns=["Feature", "Classifier", "Accuracy", "Balanced Accuracy score",
"F1 micro score", "F1 macro score", "F1 weighted score", "MAE", "Confusion matrix"])
model_res.head()
```
| github_jupyter |
# iMCSpec (iSpec+emcee)
iMCSpec is a tool which combines iSpec(https://www.blancocuaresma.com/s/iSpec) and emcee(https://emcee.readthedocs.io/en/stable/) into a single unit to perform Bayesian analysis of spectroscopic data to estimate stellar parameters. For more details on the individual code please refer to the links above. This code have been tested on Syntehtic dataset as well as GAIA BENCHMARK stars (https://www.blancocuaresma.com/s/benchmarkstars). The example shown here is for the grid genarated MARCS.GES_atom_hfs. If you want to use any other grid, just download it from the https://www.cfa.harvard.edu/~sblancoc/iSpec/grid/ and make the necessary changes in the line_regions.
Let us import all the necessary packages that are required for this analysis.
```
import os
import sys
import numpy as np
import pandas as pd
import emcee
from multiprocessing import Pool
import matplotlib.pyplot as plt
os.environ["OMP_NUM_THREADS"] = "1"
os.environ['QT_QPA_PLATFORM']='offscreen'
os.environ["NUMEXPR_MAX_THREADS"] = "8" #CHECK NUMBER OF CORES ON YOUR MACHINE AND CHOOSE APPROPRIATELY
ispec_dir = '/home/swastik/iSpec' #MENTION YOUR DIRECTORY WHERE iSPEC is present
sys.path.insert(0, os.path.abspath(ispec_dir))
import ispec
#np.seterr(all="ignore") #FOR MCMC THE WARNING COMES FOR RED BLUE MOVES WHEN ANY PARTICULAR WALKER VALUE DONOT LIE IN THE PARAMETER SPACE
```
Let us read the input spectra. Here I have the input spectrum in .txt format for reading the spectra. You can use the .fits format also for reading the spectra using Astropy (https://docs.astropy.org/en/stable/io/fits/). Please note that my input spectra is normalized and radial velocity (RV) corrected. For normalization and RV correction you can used iSpec or iraf.
```
df = pd.read_csv('/home/swastik/Downloads/test/HPArcturus.txt', sep ='\s+') #ENTER YOUR INPUT SPECTRA
df = df[df.flux != 0] #FOR SOME SPECTROGRAPH PARTS OF SPECTRA ARE MISSING AND THE CORRESPONDING FLUX VALUES ARE LABELLED AS ZEROS. WE WANT TO IGNORE SUCH POINTS
x = df['waveobs'].values
y = df['flux'].values
yerr = df['err'].values
df = np.array(df,dtype=[('waveobs', '<f8'), ('flux', '<f8'), ('err', '<f8')])
```
You can perform the analysis on the entire spectrum or choose specific regions/segments for which you want to perform the analysis for.
```
#--- Read lines with atomic data ------------------------------------------------
# line_regions = ispec.read_line_regions(ispec_dir + "/input/regions/47000_GES/grid_synth_good_for_params_all.txt") #CHANGE THIS ACCORDINGLY FOR THE INPUT GRID
# line_regions = ispec.adjust_linemasks(df, line_regions, max_margin=0.5)
# segments = ispec.create_segments_around_lines(line_regions, margin=0.5)
# ### Add also regions from the wings of strong lines:
# ## H beta
# hbeta_segments = ispec.read_segment_regions(ispec_dir + "/input/regions/wings_Hbeta_segments.txt")
# #segments = hbeta_segments
# segments = np.hstack((segments, hbeta_segments))
# ## H ALPHA
# halpha_segments = ispec.read_segment_regions(ispec_dir + "/input/regions/wings_Halpha_segments.txt")
# segments = np.hstack((segments, halpha_segments))
# ## MG TRIPLET
# mgtriplet_segments = ispec.read_segment_regions(ispec_dir + "/input/regions/wings_MgTriplet_segments.txt")
# segments = np.hstack((segments, mgtriplet_segments))
##IRON
# fe_segments = ispec.read_segment_regions(ispec_dir + "/input/regions/fe_lines_segments.txt")
# segments = np.hstack((segments, fe_segments))
##CALCIUM TRIPLET
# catriplet_segments = ispec.read_segment_regions(ispec_dir + "/input/regions/Calcium_Triplet_segments.txt")
# segments = np.hstack((segments, catriplet_segments))
##Na doublet
# NaDoublet_segments = ispec.read_segment_regions(ispec_dir + "/input/regions/Calcium_Triplet_segments.txt")
# segments = np.hstack((segments, NaDoublet_segments_segments))
# for j in range(len(segments)):
# segments[j][0] = segments[j][0]+0.05
# segments[j][1] = segments[j][1]-0.05
#YOU CAN CHANGE THE STARTING AND ENDING POINTS OF THE SEGEMENT
```
I will create a mask all false values with the same dimension as my original spectra in 1D. I will keep only those values of wavelength and flux for which the value falls in the segments (i.e, Mask is True).
```
# mask =np.zeros(x.shape,dtype =bool)
# for i in range(len(segments)):
# mask|= (x>segments[i][0])&(x<segments[i][1])
# x = x[mask] #SELECTING THOSE VALUES ONLY FOR WHICH MASK VALUE IS TRUE
# y = y[mask]
# #yerr = yerr[mask]
yerr = y*0.0015 #IF ERROR IS NOT SPECIFIED YOU CAN CHOOSE ACCORDINGLY
```
Now let us interpolate the spectrum using iSpec. Here for simplicity I have considered only Teff, log g and [M/H] as free parameters. Vmic and Vmac are obtained from emperical relations by Jofre et al.2013 and Maria Bergemann
```
def synthesize_spectrum(theta):
teff ,logg ,MH = theta
# alpha = ispec.determine_abundance_enchancements(MH)
alpha =0.0
microturbulence_vel = ispec.estimate_vmic(teff, logg, MH)
macroturbulence = ispec.estimate_vmac(teff, logg, MH)
limb_darkening_coeff = 0.6
resolution = 47000
vsini = 1.6 #CHANGE HERE
code = "grid"
precomputed_grid_dir = ispec_dir + "/input/grid/SPECTRUM_MARCS.GES_GESv6_atom_hfs_iso.480_680nm/"
# precomputed_grid_dir = ispec_dir + "/input/grid/SPECTRUM_MARCS.GES_GESv6_atom_hfs_iso.480_680nm_light/"
# The light grid comes bundled with iSpec. It is just for testing purpose. Donot use it for Scientific purpose.
grid = ispec.load_spectral_grid(precomputed_grid_dir)
atomic_linelist = None
isotopes = None
modeled_layers_pack = None
solar_abundances = None
fixed_abundances = None
abundances = None
atmosphere_layers = None
regions = None
if not ispec.valid_interpolated_spectrum_target(grid, {'teff':teff, 'logg':logg, 'MH':MH, 'alpha':alpha, 'vmic': microturbulence_vel}):
msg = "The specified effective temperature, gravity (log g) and metallicity [M/H] \
fall out of the spectral grid limits."
print(msg)
# Interpolation
synth_spectrum = ispec.create_spectrum_structure(x)
synth_spectrum['flux'] = ispec.generate_spectrum(synth_spectrum['waveobs'], \
atmosphere_layers, teff, logg, MH, alpha, atomic_linelist, isotopes, abundances, \
fixed_abundances, microturbulence_vel = microturbulence_vel, \
macroturbulence=macroturbulence, vsini=vsini, limb_darkening_coeff=limb_darkening_coeff, \
R=resolution, regions=regions, verbose=1,
code=code, grid=grid)
return synth_spectrum
```
You can also synthesize the spectrum directly from various atmospheric models. A skeleton of the code taken from iSpec is shown below. For more details check example.py in iSpec.
```
# def synthesize_spectrum(theta,code="spectrum"):
# teff ,logg ,MH = theta
# resolution = 47000
# alpha = ispec.determine_abundance_enchancements(MH)
# microturbulence_vel = ispec.estimate_vmic(teff, logg, MH)
# macroturbulence = ispec.estimate_vmac(teff, logg, MH)
# limb_darkening_coeff = 0.6
# regions = None
# # Selected model amtosphere, linelist and solar abundances
# #model = ispec_dir + "/input/atmospheres/MARCS/"
# #model = ispec_dir + "/input/atmospheres/MARCS.GES/"
# #model = ispec_dir + "/input/atmospheres/MARCS.APOGEE/"
# #model = ispec_dir + "/input/atmospheres/ATLAS9.APOGEE/"
# model = ispec_dir + "/input/atmospheres/ATLAS9.Castelli/"
# #model = ispec_dir + "/input/atmospheres/ATLAS9.Kurucz/"
# #model = ispec_dir + "/input/atmospheres/ATLAS9.Kirby/"
# #atomic_linelist_file = ispec_dir + "/input/linelists/transitions/VALD.300_1100nm/atomic_lines.tsv"
# #atomic_linelist_file = ispec_dir + "/input/linelists/transitions/VALD.1100_2400nm/atomic_lines.tsv"
# atomic_linelist_file = ispec_dir + "/input/linelists/transitions/GESv6_atom_hfs_iso.420_920nm/atomic_lines.tsv"
# #atomic_linelist_file = ispec_dir + "/input/linelists/transitions/GESv6_atom_nohfs_noiso.420_920nm/atomic_lines.tsv"
# isotope_file = ispec_dir + "/input/isotopes/SPECTRUM.lst"
# atomic_linelist = ispec.read_atomic_linelist(atomic_linelist_file, wave_base=wave_base, wave_top=wave_top)
# atomic_linelist = atomic_linelist[atomic_linelist['theoretical_depth'] >= 0.01]
# isotopes = ispec.read_isotope_data(isotope_file)
# if "ATLAS" in model:
# solar_abundances_file = ispec_dir + "/input/abundances/Grevesse.1998/stdatom.dat"
# else:
# # MARCS
# solar_abundances_file = ispec_dir + "/input/abundances/Grevesse.2007/stdatom.dat"
# #solar_abundances_file = ispec_dir + "/input/abundances/Asplund.2005/stdatom.dat"
# #solar_abundances_file = ispec_dir + "/input/abundances/Asplund.2009/stdatom.dat"
# #solar_abundances_file = ispec_dir + "/input/abundances/Anders.1989/stdatom.dat"
# modeled_layers_pack = ispec.load_modeled_layers_pack(model)
# solar_abundances = ispec.read_solar_abundances(solar_abundances_file)
# ## Custom fixed abundances
# #fixed_abundances = ispec.create_free_abundances_structure(["C", "N", "O"], chemical_elements, solar_abundances)
# #fixed_abundances['Abund'] = [-3.49, -3.71, -3.54] # Abundances in SPECTRUM scale (i.e., x - 12.0 - 0.036) and in the same order ["C", "N", "O"]
# ## No fixed abundances
# fixed_abundances = None
# atmosphere_layers = ispec.interpolate_atmosphere_layers(modeled_layers_pack, {'teff':teff, 'logg':logg, 'MH':MH, 'alpha':alpha}, code=code)
# synth_spectrum = ispec.create_spectrum_structure(x)
# synth_spectrum['flux'] = ispec.generate_spectrum(synth_spectrum['waveobs'],
# atmosphere_layers, teff, logg, MH, alpha, atomic_linelist, isotopes, solar_abundances,
# fixed_abundances, microturbulence_vel = microturbulence_vel,
# macroturbulence=macroturbulence, vsini=vsini, limb_darkening_coeff=limb_darkening_coeff,
# R=resolution, regions=regions, verbose=0,
# code=code)
# return synth_spectrum
```
So far we have discussed about reading the input original spectra and interpolating the synthetic spectra from iSpec. Now the important part that comes into picture is to compare the original spectra and the interpolated spectra. For this we will use Montecarlo Markhov chain method to compare both the spectrums. For this we have used the emcee package by
Dan Foreman-Mackey.
```
walkers = eval(input("Enter Walkers: ")) #WALKER IMPLIES THE INDEPENDENT RANDOMLY SELECTED PARAMETER SETS. NOTE IT SHOULD HAVE ATLEAST TWICE THE VALUE OF AVAILABLE FREE PARAMETERS
Iter = eval(input("Enter Iterations: ")) #ITERATION IMPLIES NUMBER OF RUNS THE PARAMETERS WILL BE CHECKED FOR CONVERGENCE. FOR MOST CASES 250-300 SHOULD DO.
```
We will be creating four functions for this MCMC run. The first is straightforward, and is known as the model. The model function should take as an argument a list representing our θ vector, and return the model evaluated at that θ. For completion, your model function should also have your parameter array as an input. The form of this function comes from the Gaussian probability distribution P(x)dx.
```
def log_likelihood(theta):
model = synthesize_spectrum(theta) #GENARATING THE SPECTRUM FOR A GIVEN VALUE OF THETA
sigma2 = yerr ** 2 # FINDING THE Variance
return -0.5 * np.sum((y - (model['flux'])) ** 2/ sigma2) # returns the -chi^2/2 value
```
There is no unique way to set up your prior function. For the simplistic case we have choosen the log prior function returns zero if the input values genarated randomly lies wittin the specified ranges and -infinity if it doesnt(atleast one vale should satisty this criterion). You can choose your own prior function as well.
```
def log_prior(theta):
teff, logg, MH = theta
if 3200 < teff < 6900 and 1.1 < logg < 4.8 and -2.49 < MH <= 0.49 : #CHANGE HERE
return 0.0
return -np.inf
```
The last function we need to define is lnprob(). This function combines the steps above by running the lnprior function, and if the function returned -np.inf, passing that through as a return, and if not (if all priors are good), returning the lnlike for that model (by convention we say it’s the lnprior output + lnlike output, since lnprior’s output should be zero if the priors are good). lnprob needs to take as arguments theta,x,y,and yerr, since these get passed through to lnlike.
```
def log_probability(theta):
lp = log_prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta)
```
Select input guess values and create intial set of stellar parameters RANDOMLY
```
initial = np.array([4650,1.8,-0.7]) #INPUT GUESS VALUES
pos = initial + np.array([100,0.1,0.1])*np.random.randn(walkers, 3) # YOU CAN CHOOSE UNIFORM RANDOM FUNCTION OR GAUSSIAUN RANDOM NUMBER GENARATOR
nwalkers, ndim = pos.shape
```
Now we will run the EMCEE sampler to run the code. This will take some time depending on the your system. But don't worry :)
```
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability)
sampler.run_mcmc(pos,Iter, progress=True)
```
Let us plot the Walkers and Iterations. Check out for convergence in this plot. If you see the convergence you are good to go.
```
fig, axes = plt.subplots(3, figsize=(10, 7), sharex=True)
samples = sampler.get_chain()
accepted = sampler.backend.accepted.astype(bool) #Here accepted indicated that the lines for each parameter below have converged/moved at least one time.
labels = ["teff","logg","MH"]
for i in range(ndim):
ax = axes[i]
ax.plot(samples[:, :, i], "k", alpha=0.3)
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number");
```
Let us check how good is the fitting.....
```
fig, ax = plt.subplots(1, figsize=(10, 7), sharex=True)
samples = sampler.flatchain
theta_max = samples[np.argmax(sampler.flatlnprobability)]
best_fit_model = synthesize_spectrum(theta_max)
ax.plot(x,y,alpha=0.3)
ax.plot(x,best_fit_model['flux'],alpha =0.3)
ax.plot(x,y-best_fit_model['flux'],alpha =0.3)
plt.savefig('t2.pdf') #CHANGE HERE
print(('Theta max: ',theta_max)) # Genarating the spectrum for the Maximum likelyhood function.
#NOTE THE SPIKES IN THE PLOT BELOW. THESE ARE DUE TO THE FACT THAT END POINTS OF THE SPECTRUMS ARE EXTRAPOLATED
```
Since the first few runs the walkers are exploring the parameter space and convergence have not yet been achived. We will ignore such runs. This is also known as "BURN-IN".
```
new_samples = sampler.get_chain(discard=100, thin=1, flat=False)
new_samples = new_samples[:,accepted,:] # WE ARE ONLY CHOOSING THE VALUES FOR WHICH THE WALKER HAVE MOVED ATLEAST ONCE DURING THE ENTIRE ITERATION. Stagnent walkers indicates that the prior function might have returned -inf.
```
Checking the Convergence after the BURN-IN... If it seems to be converged then it is DONE.
```
fig, axes = plt.subplots(3, figsize=(10, 7), sharex=True)
for i in range(ndim):
ax = axes[i]
ax.plot(new_samples[:, :, i], "k", alpha=0.3)
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number")
plt.savefig('t3.pdf') #CHANGE HERE
flat_samples = new_samples.reshape(-1,new_samples.shape[2])
np.savetxt("RNtesto.txt",flat_samples,delimiter='\t') #CHANGE HERE
```
# DATA VISUALIZATION
Now after the final list of stellar parameters it is important to visualise the stellar parameter distribution. Also it is important to check for any correlation among the stellar parameters. Here I have shown two medhods by which you can do this. Note: I have taken very few points for analysis and for a proper plot you actually need a much larger dataset (40x300:150 Burns at minimum)
```
import corner
from pandas.plotting import scatter_matrix
df = pd.read_csv('/home/swastik/RNtesto.txt',delimiter='\t',header = None)
df.columns = ["$T_{eff}$", "logg", "[M/H]"]
df.hist() #Plotting Histogram for each individual stellar parameters. THIS NEED NOT BE A GAUSSIAN ONE
#df = df[df.logg < 4.451 ] #REMOVE ANY OUTLIER DISTRIBUTION
scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde') #PLOTTING THE SCATTER MATRIX. I HAVE USED A VERY LIGHT DATASET FOR TEST PURPOSE> YOU CAN USE A MORE WALKER X ITERATION FOR A BETTER RESULT
samples = np.vstack([df]) #IT IS NECESSARY TO STACK THE DATA VERTICALLY TO OBTAIN THE DISTRIBUTION FROM THE DATA FRAME
value2 = np.mean(samples, axis=0)
plt.rcParams["font.size"] = "10" #THIS CHANGES THE FONT SIZE OF THE LABELS(NOT LEGEND)
#FINALLY... MAKING THE CORNER PLOT>>>>
#fig = corner.corner(df,show_titles=True,plot_datapoints=True,quantiles=[0.16, 0.5, 0.84],color ='black',levels=(1-np.exp(-0.5),),label_kwargs=dict(fontsize=20,color = 'black'),hist_kwargs=dict(fill = True,color = 'dodgerblue'),alpha =0.2)
fig = corner.corner(df,show_titles=True,plot_datapoints=True,quantiles=[0.16, 0.5, 0.84],color ='black',label_kwargs=dict(fontsize=20,color = 'black'),hist_kwargs=dict(fill = True,color = 'dodgerblue'),alpha =0.2)
axes = np.array(fig.axes).reshape((3, 3))
for i in range(3):
ax = axes[i, i]
ax.axvline(value2[i], color="r",alpha =0.8)
for yi in range(3):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(value2[xi], color="r",alpha =0.8,linestyle = 'dashed')
ax.axhline(value2[yi], color="r",alpha =0.8,linestyle = 'dashed')
ax.plot(value2[xi], value2[yi], "r")
# plt.tight_layout()
#THE CORNER PLOT DONOT LOOK GREAT>> THE REASON IS FEW NUMBER OF DATA POINTS AND SHARP CONVERGENCE
```
I would like to thank Sergi blanco-cuaresma for the valuable suggestions and feedbacks regarding the iSpec code and its intregation with emcee. I would also thank
Dan Foreman-Mackey for his insightful comments on using emcee. I would also like to thank Aritra Chakraborty and Dr.Ravinder Banyal for their comments and suggestion on improving the code which might have not been possible without their help.
| github_jupyter |
<a href="https://colab.research.google.com/github/ryanleeallred/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/module2-loadingdata/LS_DS_112_Loading_Data_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Practice Loading Datasets
This assignment is purposely semi-open-ended you will be asked to load datasets both from github and also from CSV files from the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).
Remember that the UCI datasets may not have a file type of `.csv` so it's important that you learn as much as you can about the dataset before you try and load it. See if you can look at the raw text of the file either locally, on github, using the `!curl` shell command, or in some other way before you try and read it in as a dataframe, this will help you catch what would otherwise be unforseen problems.
## 1) Load a dataset from Github (via its *RAW* URL)
Pick a dataset from the following repository and load it into Google Colab. Make sure that the headers are what you would expect and check to see if missing values have been encoded as NaN values:
<https://github.com/ryanleeallred/datasets>
```
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
```
## 2) Load a dataset from your local machine
Download a dataset from the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php) and then upload the file to Google Colab either using the files tab in the left-hand sidebar or by importing `files` from `google.colab` The following link will be a useful resource if you can't remember the syntax: <https://towardsdatascience.com/3-ways-to-load-csv-files-into-colab-7c14fcbdcb92>
While you are free to try and load any dataset from the UCI repository, I strongly suggest starting with one of the most popular datasets like those that are featured on the right-hand side of the home page.
Some datasets on UCI will have challenges associated with importing them far beyond what we have exposed you to in class today, so if you run into a dataset that you don't know how to deal with, struggle with it for a little bit, but ultimately feel free to simply choose a different one.
- Make sure that your file has correct headers, and the same number of rows and columns as is specified on the UCI page. If your dataset doesn't have headers use the parameters of the `read_csv` function to add them. Likewise make sure that missing values are encoded as `NaN`.
```
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
```
## 3) Load a dataset from UCI using `!wget`
"Shell Out" and try loading a file directly into your google colab's memory using the `!wget` command and then read it in with `read_csv`.
With this file we'll do a bit more to it.
- Read it in, fix any problems with the header as make sure missing values are encoded as `NaN`.
- Use the `.fillna()` method to fill any missing values.
- [.fillna() documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.fillna.html)
- Create one of each of the following plots using the Pandas plotting functionality:
- Scatterplot
- Histogram
- Density Plot
## Stretch Goals - Other types and sources of data
Not all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.
If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.
Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.
How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.
One last major source of data is APIs: https://github.com/toddmotto/public-apis
API stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.
*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
```
```
| github_jupyter |
```
###################################
# Test cell, pyechonest - IO HAVOC
###################################
import os
import sys
sys.path.append(os.environ["HOME"] + "/github/pyechonest")
import pyechonest.track as track
import pyechonest.artist as artist
import pyechonest.util as util
import pyechonest.song as song
import sys, pprint
# pprint.pprint(sys.modules)
def get_tempo(artist):
"gets the tempo for a song"
results = song.search(artist=artist, results=1, buckets=['audio_summary'])
if len(results) > 0:
return results[0].audio_summary['tempo']
else:
return None
for hottt_artist in artist.top_hottt(results=1):
print(hottt_artist.name, hottt_artist.id, hottt_artist.hotttnesss)
# print(hottt_artist.name + " " + str(get_tempo(hottt_artist.name)))
# a = artist.Artist(hottt_artist.name)
a = artist.Artist("Red Foo")
images = a.get_images(results=2)
print(images[0]['url'])
# print(images[1]['url'])
# test spotify (spotifpy)
images_b = a.get_spotify_images()
print(str(len(images_b)) + " images found on Spotify")
print(images_b[0]['url'])
###################################
# Generate new pyechonest cache
###################################
import time
import json
from pyechonest import config
from pyechonest import artist
config.ECHO_NEST_API_KEY="2VN1LKJEQUBPUKXEC"
hotttArtistsCache = []
fave4x4ArtistsList = [
'Juan Atkins','Faithless', 'Ruoho Ruotsi', 'Maurice Fulton',
'Leftfield', 'Frivolous', 'Basement Jaxx','Glitch Mob',
'Hollis P. Monroe', 'Frankie Knuckles', 'Francois K',
'Trentemøller', 'Chelonis R. Jones', 'Steve Bug',
'Jimpster', 'Jeff Samuel', 'Ian Pooley',
'Luomo', 'Kerri Chandler', 'Charles Webster',
'Roy Davis Jr.', 'Robert Owens',
'Black Science Orchestra', 'Mr. Fingers', 'Saint Etienne',
'Masters at Work', 'Theo Parrish', 'Moodymann',
'Basic Channel', 'Rhythm & Sound', 'Roman Flügel',
'Joe Lewis', 'DJ Said', 'Recloose', 'Kate Simko', 'Aschka',
'Maya Jane Coles', 'Gys', 'Deadbeat', 'Soultek',
'DeepChord', 'Vladislav Delay', 'Andy Stott', 'Intrusion',
'Rod Modell', 'Kassem Mosse', 'Murcof', 'Marc Leclair',
'Fax', 'Monolake', 'Kit Clayton', 'Bvdub', 'Swayzak',
'Wookie', 'Artful Dodger', 'MJ Cole',
'Les Rythmes Digitales', 'Fischerspooner', 'Cassius',
'Miguel Migs', 'Osunlade', 'Metro Area', 'Dennis Ferrer',
'Ron Trent', 'Larry Heard', 'Alton Miller', 'King Britt',
'Bougie Soliterre', 'Todd Terry', 'Black Coffee',
'Richie Hawtin', 'Speedy J', 'Kenny Larkin', 'Laurent Garnier',
'Carl Craig', 'Robert Hood', 'John Tejada', 'Thomas P. Heckmann',
'Aril Brikha', 'Tiefschwarz', 'Funk D\'Void', 'A Guy Called Gerald',
'Jeff Mills', 'Aaron Carl', 'Josh Wink', 'Derrick May',
'Frankie Bones', 'DJ Assault', 'AUX 88', 'Fumiya Tanaka',
'The Lady Blacktronika', 'Junior Lopez', 'Someone Else', 'Noah Pred',
'Danny Tenaglia', 'Pete Tong', 'Booka Shade', 'Paul Kalkbrenner',
'Dapayk & Padberg', 'Igor O. Vlasov', 'Dreem Teem', 'Todd Edwards',
'187 Lockdown', 'Serious Danger', 'Deep Dish', 'Ellen Allien',
'Matias Aguayo', 'Alex Smoke', 'Modeselektor', 'Mike Shannon',
'Radio Slave', 'Jonas Bering', 'Glitterbug', 'Justus Köhncke',
'Wolfgang Voigt', 'Ripperton', 'Isolée', 'Alex Under',
'Phonique', 'James Holden', 'Minilogue', 'Michael Mayer',
'Pantha Du Prince', 'Håkan Lidbo', 'Lusine', 'Kalabrese',
'Matthew Herbert', 'Jan Jelinek', 'Lucien-N-Luciano', 'Closer Musik',
'Apparat', 'Guillaume & The Coutu Dumonts', 'Thomas Brinkmann',
'The Soft Pink Truth', 'Ada', 'Wighnomy Brothers', 'Ricardo Villalobos',
'Jesse Somfay','Falko Brocksieper', 'Damian Lazarus', 'Superpitcher',
'Catz N\' Dogz', 'Pan/Tone', 'Broker/Dealer', 'Dinky', 'T.Raumschmiere',
'Stephen Beaupré', 'Konrad Black', 'Claude VonStroke', 'DJ Koze',
'Cobblestone Jazz', 'Robag Wruhme', 'Seth Troxler', 'Stewart Walker',
'Farben', 'Pier Bucci', 'Mathew Jonson', 'LoSoul', 'Safety Scissors',
'Anja Schneider', 'Markus Guentner', 'Fuckpony', 'Onur Özer', 'Mossa',
'Kenneth James Gibson', 'Butane', 'Mikael Stavöstrand', 'Franklin de Costa',
'Quantec', 'Jin Choi', 'The Mountain People', 'Château Flight', 'Havantepe',
'Tomas Jirku', 'Limaçon', 'Redshape', 'Mike Huckaby', 'Taylor Deupree',
'Substance & Vainqueur'
]
faveBassArtistsList = [
'Photek', 'Zomby', 'Kode9', 'Vex\'d', 'Plastician', 'Joy Orbison',
'Eskmo', 'Tes La Rok', 'DFRNT', 'Africa HiTech', 'King Midas Sound',
'Skream', 'Djunya', '2562', 'Fantastic Mr. Fox', 'Ikonika',
'Timeblind', 'Mark Pritchard', 'Appleblim', 'Ramadanman', 'D1',
'Matty G', 'Peverelist', 'Untold', 'Roska', 'El-B', 'Mala',
'Coki',' Hijak', 'Mount Kimbie', 'Chrissy Murderbot', 'Scuba',
'Kush Arora', 'Meesha', 'Martyn'
]
# Currently no image resources on EN, lastfm or Spotify for 'Terre Thaemlitz'
faveClassicArtistsList = [
'Björk', 'Kraftwerk', 'DJ Shadow', 'Radiohead', 'The Orb',
'Jean-Michel Jarre', 'Aphex Twin', 'Tangerine Dream',
'Boards of Canada', 'Amon Tobin', 'Ratatat', 'Massive Attack',
'Röyksopp', 'LCD Soundsystem', 'Gotan Project',
'Gus-Gus', 'Everything but the Girl', 'Ursula 1000', 'Llorca',
'UNKLE', 'The Future Sound of London', 'The Avalanches',
'Laika', 'Thievery Corporation', 'Groove Armada', 'Bonobo',
'DJ Food','Tricky', 'Dirty Vegas', 'Télépopmusik', 'Hooverphonic',
'dZihan & Kamien', 'Talvin Singh', 'DJ Vadim', 'Cibo Matto',
'Esthero', 'Martina Topley-Bird', 'Dimitri From Paris',
'Coldcut', 'Death in Vegas', 'Róisín Murphy', 'Nitin Sawhney',
'José Padilla', 'Jimi Tenor', 'Mr. Scruff', 'Dub Pistols',
'Morcheeba', 'Supreme Beings of Leisure', 'Air', 'DJ Krush', 'RJD2',
'Underworld', 'jenn mierau', 'Einstürzende Neubauten',
'Nurse with Wound', 'The Legendary Pink Dots', 'Skinny Puppy',
'Atari Teenage Riot', 'Venetian Snares', 'µ-Ziq', 'Richard Devine',
'Squarepusher', 'Autechre', 'Le Tigre', 'Queens of the Stone Age',
'Xiu Xiu', 'Baby Dee', 'Alastair Galbraith', '不失者', 'I Am Robot and Proud',
'Meg Baird'
]
faveElectroacousticArtistsList = [
'Arthur Russell', 'Jon Appleton', 'Charles Dodge', 'Morton Subotnick',
'James Tenney', 'David Tudor', 'Vladimir Ussachevsky',
'Pauline Oliveros', 'Robert Ashley', 'Nam June Paik', 'La Monte Young',
'Phill Niblock', 'François Bayle', 'James Tenney', 'Tim Hecker', 'Pamela Z',
'Christian Wolff', 'Jean-Claude Risset', 'Paul Lansky', 'Laurie Spiegel',
'Antye Greie', 'Ryoji Ikeda', 'alva noto', 'Ryuichi Sakamoto', 'Lawrence English',
'Tujiko Noriko', 'Arvo Pärt', 'Fennesz', 'Christopher Willits', 'Colleen',
'Ben Frost', 'Jóhann Jóhannsson', 'Sylvain Chauveau'
]
favedubArtistsList = [
'King Tubby', 'Scientist', 'Lee "Scratch" Perry', 'Augustus Pablo',
'Prince Jammy', 'Mad Professor', 'Roots Radics', 'The Upsetters',
'Sly Dunbar', 'Robbie Shakespeare', 'Keith Hudson', 'Tappa Zukie', 'Big Youth',
'The Aggrovators', 'U-Roy', 'Prince Far I',
'Black Uhuru', 'Horace Andy', 'I-Roy', 'The Abyssinians',
'Pablo Moses', 'Max Romeo', 'The Heptones', 'Burning Spear',
'Dennis Brown', 'Jacob Miller', 'Barrington Levy', 'Sugar Minnot',
'Yellowman', 'Gregory Isaacs', 'John Holt', 'Alton Ellis',
'Ken Boothe', 'The Ethiopians', 'Joe Higgs', 'Tommy McCook',
'The Melodians', 'Delroy Wilson', 'Isaac Haile Selassie', 'Polycubist'
]
faveAfricanArtistsList = [
'Manu Dibango', 'Baaba Maal',
'Antibalas Afrobeat Orchestra', 'Orlando Julius', 'William Onyeabor',
'Orchestre Poly-Rythmo', 'Sir Victor Uwaifo',
'Tony Allen & His Afro Messengers', 'Sahara All Stars Band Jos',
'Lijadu Sisters', 'King Sunny Ade', 'Ebo Taylor',
'Gasper Lawal', 'Tunji Oyelana and the Benders', '2 Face', 'P Square',
'Shina Williams & His African Percussionists', 'Weird MC', 'Plantashun Boiz',
'Paul I.K. Dairo', 'D\'banj', 'Ruggedman', 'Eedris Abdulkareem',
'Styl-Plus', 'Tony Tetuila', 'Olamide', 'Ebenezer Obey',
'Haruna Ishola', 'Lágbájá', 'Prince Nico Mbarga', 'West African Highlife Band',
'Modenine', 'Terry tha Rapman', 'Olu Maintain', 'Majek Fashek', 'Konono N°1',
'Koffi Olomidé', 'Les Bantous de la Capitale', 'Thomas Mapfumo', 'Oliver Mtukudzi',
'Chiwoniso Maraire', 'Thomas Mapfumo & The Blacks Unlimited', 'Angélique Kidjo',
'Oumou Sangare', 'Ismaël Lô', 'Geoffrey Oryema', 'Salif Keita', 'Amadou & Mariam',
'Orchestra Baobab', 'Bembeya Jazz National', 'Tiwa Savage'
]
def addArtist(artist):
print(artist.name, artist.id)
artistToAdd = { "name": artist.name,
"id": artist.id,
"images": artist.get_images(results=25),
"URLs": artist.urls,
"genres": artist.terms,
"twitter_id": artist.get_twitter_id()
}
hotttArtistsCache.append(artistToAdd)
time.sleep(10) # delays for 20 seconds
def addFaveArtistList(artistList):
for fave_artist in artistList:
a = artist.Artist(fave_artist, buckets=['images', 'urls', 'terms'])
addArtist(a)
def writeArtistsCaches():
# HOTTT artists
for hottt_artist in artist.top_hottt(results=300):
a = artist.Artist(hottt_artist.id, buckets=['images', 'urls', 'terms'])
addArtist(a)
# FAVE 4x4 artists
addFaveArtistList(fave4x4ArtistsList)
# FAVE Bass artists
addFaveArtistList(faveBassArtistsList)
# FAVE Classic artists
addFaveArtistList(faveClassicArtistsList)
# FAVE Electroacoustic artists
addFaveArtistList(faveElectroacousticArtistsList)
# FAVE Dub artists
addFaveArtistList(favedubArtistsList)
# FAVE African artists
addFaveArtistList(faveAfricanArtistsList)
with open('artistMetaData.js', 'w') as outfile:
outfile.write("var hotttArtistsCache = ")
json.dump(hotttArtistsCache, outfile)
outfile.write(";")
outfile.close()
print ("\n" + "Fini - writeArtistsCaches")
######################
print('fave4x4ArtistsList: ' + str(len(fave4x4ArtistsList)))
print('faveBassArtistsList: ' + str(len(faveBassArtistsList)))
print('faveClassicArtistsList: ' + str(len(faveClassicArtistsList)))
print('faveElectroacousticArtistsList: ' + str(len(faveElectroacousticArtistsList)))
print('favedubArtistsList: ' + str(len(favedubArtistsList)))
print('faveAfricanArtistsList: ' + str(len(faveAfricanArtistsList)))
print('\n\n')
writeArtistsCaches()
```
| github_jupyter |
# Amazon Augmented AI (Amazon A2I) integration with Amazon Fraud Detector
# Visit https://github.com/aws-samples/amazon-a2i-sample-jupyter-notebooks for all A2I Sample Notebooks
1. [Introduction](#Introduction)
2. [Prerequisites](#Setup)
1. [Workteam](#Workteam)
2. [Notebook Permission](#Notebook-Permission)
3. [Client Setup](#Client-Setup)
4. [Create Control Plane Resources](#Create-Control-Plane-Resources)
1. [Create Human Task UI](#Create-Human-Task-UI)
2. [Create Flow Definition](#Create-Flow-Definition)
5. Scenario: When Activation Conditions are met, and a Human Loop is created
1. [Check Status of Human Loop](#Check-Status-of-Human-Loop)
2. [Wait For Workers to Complete Task](#Wait-For-Workers-to-Complete-Task)
3. [Check Status of Human Loop](#Check-Status-of-Human-Loop)
4. [View Task Results](#View-Task-Results)
# Introduction
Amazon Augmented AI (Amazon A2I) makes it easy to build the workflows required for human review of ML predictions. Amazon A2I brings human review to all developers, removing the undifferentiated heavy lifting associated with building human review systems or managing large numbers of human reviewers.
Amazon A2I provides built-in human review workflows for common machine learning use cases, such as content moderation and text extraction from documents, which allows predictions from Amazon Rekognition and Amazon Textract to be reviewed easily. You can also create your own workflows for ML models built on Amazon SageMaker or any other tools. Using Amazon A2I, you can allow human reviewers to step in when a model is unable to make a high confidence prediction or to audit its predictions on an on-going basis. Learn more here: https://aws.amazon.com/augmented-ai/
In this tutorial, we will show how you can use Amazon A2I directly with Amazon Fraud Detector to check for high confidence fraud predictions
For more in depth instructions, visit https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-getting-started.html
To incorporate Amazon A2I into your human review workflows, you need three resources:
* A **worker task template** to create a worker UI. The worker UI displays your input data, such as documents or images, and instructions to workers. It also provides interactive tools that the worker uses to complete your tasks. For more information, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-instructions-overview.html
* A **human review workflow**, also referred to as a flow definition. You use the flow definition to configure your human workforce and provide information about how to accomplish the human review task. For built-in task types, you also use the flow definition to identify the conditions under which a review human loop is triggered. You can use the flow definition to specify that a model prediction will be sent to a human for review based on the threshold defined by you for Fraud detection. You can create a flow definition in the Amazon Augmented AI console or with the Amazon A2I APIs. To learn more about both of these options, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html
* A **human loop** to start your human review workflow. When you use one of the built-in task types, the corresponding AWS service creates and starts a human loop on your behalf when the conditions specified in your flow definition are met or for each object if no conditions were specified. When a human loop is triggered, human review tasks are sent to the workers as specified in the flow definition.
When using a custom task type, you start a human loop using the Amazon Augmented AI Runtime API. When you call StartHumanLoop in your custom application, a task is sent to human reviewers.
### Install Latest SDK
```
# First, let's get the latest installations of our dependencies
!pip install --upgrade pip
!pip install botocore --upgrade
!pip install boto3 --upgrade
!pip install -U botocore
```
## Setup
We need to set up the following data:
* `region` - Region to call A2I
* `bucket` - A S3 bucket accessible by the given role
* Used to store the sample images & output results
* Must be within the same region A2I is called from
* `role` - The IAM role used as part of StartHumanLoop. By default, this notebook will use the execution role
* `workteam` - Group of people to send the work to
```
import boto3
import botocore
REGION = boto3.session.Session().region_name
```
#### Create and Setup S3 Bucket and Paths
Create your own S3 bucket and replace the following with that bucket name
```
# Replace the following with your bucket name
BUCKET = 'your Amazon S3 bucket name'
```
Your bucket, `BUCKET` must be located in the same AWS Region that you are using to run this notebook. This cell checks if they are located in the same Region.
```
# Amazon S3 (S3) client
s3 = boto3.client('s3', REGION)
bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region']
assert bucket_region == REGION, "Your S3 bucket {} and this notebook need to be in the same region.".format(BUCKET)
```
### Notebook Permission
The AWS IAM Role used to execute the notebook needs to have the following permissions:
* FraudDetectorFullAccess
* SagemakerFullAccess
* AmazonSageMakerMechanicalTurkAccess (if using MechanicalTurk as your Workforce)
* S3 Read and Write Access to the bucket you specified in `BUCKET`.
```
from sagemaker import get_execution_role
# Setting Role to the default SageMaker Execution Role
ROLE = get_execution_role()
display(ROLE)
```
Visit: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-permissions-security.html to add the necessary permissions to your role
### Workteam or Workforce
A workforce is the group of workers that you have selected to label your dataset. You can choose either the Amazon Mechanical Turk workforce, a vendor-managed workforce, or you can create your own private workforce for human reviews. Whichever workforce type you choose, Amazon Augmented AI takes care of sending tasks to workers.
When you use a private workforce, you also create work teams, a group of workers from your workforce that are assigned to Amazon Augmented AI human review tasks. You can have multiple work teams and can assign one or more work teams to each job.
# To create your Workteam, visit the instructions here: https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management.html
NOTE: After you have created your workteam, replace WORKTEAM_ARN below with your own Workteam ARN
```
WORKTEAM_ARN = "your workteam ARN"
```
## Client Setup
Here we are going to setup the clients.
```
from IPython.core.display import display, HTML
from IPython.display import clear_output
display(HTML("<style>.container { width:90% }</style>"))
# ------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import os
import sys
import time
import json
import uuid
from datetime import datetime
import io
# -- Client setup --
import boto3
import sagemaker
# -- sklearn --
from sklearn.metrics import roc_curve, roc_auc_score, auc, roc_auc_score
%matplotlib inline
import pprint
# Pretty print setup
pp = pprint.PrettyPrinter(indent=2)
# Function to pretty-print AWS SDK responses
def print_response(response):
if 'ResponseMetadata' in response:
del response['ResponseMetadata']
pp.pprint(response)
# Amazon SageMaker client
sagemaker = boto3.client('sagemaker', REGION)
# Amazon Augmented AI (A2I) Runtime client
a2i_runtime_client = boto3.client('sagemaker-a2i-runtime', REGION)
# -- initialize the Amazon Fraud Detector client
client = boto3.client('frauddetector')
```
# Amazon Fraud Detector Set up
To generate fraud predictions, Amazon Fraud Detector uses machine learning models that are trained
with your historical fraud data. Each model is trained using a model type, which is a specialized recipe to
build a fraud detection model for a specific fraud use case. Deployed models are imported to detectors,
where you can configure decision logic (for example, rules) to interpret the model’s score and assign
outcomes such as pass or send transaction to a human investigator for review.
You can use the AWS Console to create and manage models and detector versions. Alternatively, you can
use the AWS Command Line Interface (AWS CLI) or one of the Amazon Fraud Detector SDKs.
Amazon Fraud Detector components include events, entities, labels, models, rules, variables, outcomes,
and detectors. Using these components, you can build an evaluation that contains your fraud detection
logic.
### To Create a fraud detector model using the console, please refer to the link below
https://docs.aws.amazon.com/frauddetector/latest/ug/frauddetector.pdf
### To Create a fraud detector model using an SDK / Python notebook, please refer to the link below
https://github.com/aws-samples/aws-fraud-detector-samples
#### NOTE:
The following model is create using the default data set provided by Amazon Fraud Detector (at https://docs.aws.amazon.com/frauddetector/latest/ug/samples/training_data.zip)
After you create your own Fraud Detector Model, replace the MODEL_NAME, DETECTOR_NAME, EVENT_TYPE and ENTITY_TYPE with your fraud detector model values
```
MODEL_NAME = 'sample_fraud_detection'
DETECTOR_NAME = 'fraud_detector'
EVENT_TYPE = 'registration'
ENTITY_TYPE = 'customer'
# -- model performance summary --
auc = client.describe_model_versions(
modelId= MODEL_NAME,
modelVersionNumber='1.0',
modelType='ONLINE_FRAUD_INSIGHTS',
maxResults=10
)['modelVersionDetails'][0]['trainingResult']['trainingMetrics']['auc']
df_model = pd.DataFrame(client.describe_model_versions(
modelId= MODEL_NAME,
modelVersionNumber='1.0',
modelType='ONLINE_FRAUD_INSIGHTS',
maxResults=10
)['modelVersionDetails'][0]['trainingResult']['trainingMetrics']['metricDataPoints'])
plt.figure(figsize=(10,10))
plt.plot(df_model["fpr"], df_model["tpr"], color='darkorange',
lw=2, label='ROC curve (area = %0.3f)' % auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title( MODEL_NAME + ' ROC Chart')
plt.legend(loc="lower right",fontsize=12)
plt.axvline(x = 0.02 ,linewidth=2, color='r')
plt.axhline(y = 0.73 ,linewidth=2, color='r')
plt.show()
```
### Test the fraud detector with a sample data record
Using the fraud detector client, invoke the model endpoint with a sample record and examine the results including fraud detection score
```
eventId = uuid.uuid1()
timestampStr = '2013-07-16T19:00:00Z'
# Construct a sample data record
rec = {
'ip_address': '36.72.99.64',
'email_address': 'fake_bakermichael@example.net',
'billing_state' : 'NJ',
'user_agent' : 'Mozilla',
'billing_postal' : '32067',
'phone_number' :'703-989-7890',
'user_agent' : 'Mozilla',
'billing_address' :'12351 Amanda Knolls Fake St'
}
pred = client.get_event_prediction(detectorId=DETECTOR_NAME,
detectorVersionId='1',
eventId = str(eventId),
eventTypeName = EVENT_TYPE,
eventTimestamp = timestampStr,
entities = [{'entityType': ENTITY_TYPE, 'entityId':str(eventId.int)}],
eventVariables=rec)
pred
# Extract/print the model score
pred['modelScores'][0]['scores']['sample_fraud_detection_insightscore']
```
# Create Control Plane Resources
Create Control Plane Resources
Create Human Task UI
Create a human task UI resource, giving a UI template in liquid html. This template will be rendered to the human workers whenever human loop is required.
For over 70 pre built UIs, check: https://github.com/aws-samples/amazon-a2i-sample-task-uis.
Here we'll be constructing the following control plane resources: Human Task UI and Flow Definition, using the SageMaker CreateTaskUI and CreateFlowDefinition APIs, respectively.
These resources can be created once and used to drive any subsequent A2I human loops.
NOTE: The following template models a "Claim" - i.e. mark if a given claim is fraudulent, valid claim or needs further investigation
```
template="""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<crowd-form>
<crowd-classifier
name="category"
categories="['Fradulent Claim', 'Valid Claim', 'Needs furthur Investigation']"
header="Select the most relevant category"
>
<classification-target>
<h3><strong>Risk Score (out of 1000): </strong><span style="color: #ff9900;">{{ task.input.score.sample_fraud_detection_insightscore }}</span></h3>
<hr>
<h3> Claim Details </h3>
<p style="padding-left: 50px;"><strong>Email Address : </strong>{{ task.input.taskObject.email_address }}</p>
<p style="padding-left: 50px;"><strong>Billing Address : </strong>{{ task.input.taskObject.billing_address }}</p>
<p style="padding-left: 50px;"><strong>Billing State : </strong>{{ task.input.taskObject.billing_state }}</p>
<p style="padding-left: 50px;"><strong>Billing Zip : </strong>{{ task.input.taskObject.billing_postal }}</p>
<p style="padding-left: 50px;"><strong>Originating IP : </strong>{{ task.input.taskObject.ip_address }}</p>
<p style="padding-left: 50px;"><strong>Phone Number : </strong>{{ task.input.taskObject.phone_number }}</p>
<p style="padding-left: 50px;"><strong>User Agent : </strong>{{ task.input.taskObject.user_agent }}</p>
</classification-target>
<full-instructions header="Claim Verification instructions">
<ol>
<li><strong>Review</strong> the claim application and documents carefully.</li>
<li>Mark the claim as valid or fraudulent</li>
</ol>
</full-instructions>
<short-instructions>
Choose the most relevant category that is expressed by the text.
</short-instructions>
</crowd-classifier>
</crowd-form>
"""
def create_task_ui(task_ui_name, template):
'''
Creates a Human Task UI resource.
Returns:
struct: HumanTaskUiArn
'''
response = sagemaker.create_human_task_ui(
HumanTaskUiName=task_ui_name,
UiTemplate={'Content': template})
return response
```
### Create an Augmented AI task UI
```
# Task UI name - this value is unique per account and region. You can also provide your own value here.
taskUIName = 'fraud'+ str(uuid.uuid1())
# Create task UI
humanTaskUiResponse = create_task_ui(taskUIName, template)
humanTaskUiArn = humanTaskUiResponse['HumanTaskUiArn']
print(humanTaskUiArn)
```
# Create the Flow Definition¶
In this section, we're going to create a flow definition definition. Flow Definitions allow us to specify:
The workforce that your tasks will be sent to.
The instructions that your workforce will receive. This is called a worker task template.
The configuration of your worker tasks, including the number of workers that receive a task and time limits to complete tasks.
Where your output data will be stored.
This demo is going to use the API, but you can optionally create this workflow definition in the console as well.
For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html.
```
OUTPUT_PATH = f's3://{BUCKET}/a2i-results'
def create_flow_definition(flow_definition_name):
'''
Creates a Flow Definition resource
Returns:
struct: FlowDefinitionArn
'''
response = sagemaker.create_flow_definition(
FlowDefinitionName= flow_definition_name,
RoleArn= ROLE,
HumanLoopConfig= {
"WorkteamArn": WORKTEAM_ARN,
"HumanTaskUiArn": humanTaskUiArn,
"TaskCount": 1,
"TaskDescription": "Please review the claim data and flag for potential fraud",
"TaskTitle": "Review and Approve / Reject claim."
},
OutputConfig={
"S3OutputPath" : OUTPUT_PATH
}
)
return response['FlowDefinitionArn']
# Flow definition name - this value is unique per account and region. You can also provide your own value here.
#uniqueId = str(uuid.uuid4())
uniqueId = str(int(round(time.time() * 1000)))
flowDefinitionName = f'fraud-detector-a2i-{uniqueId}'
#flowDefinitionName = 'fraud-detector-a2i'
flowDefinitionArn = create_flow_definition(flowDefinitionName)
print(flowDefinitionArn)
```
# Starting Human Loops
Now that we have setup our Flow Definition, we are ready to call our Amazon Fraud detector and start our human loops.
In this tutorial, we are interested in starting a HumanLoop only if the prediction probability score returned by our model for objects detected is more than risk threshold 900.
we will kick off a HumanLoop to engage our workforce for a human review.
# Start human loop if the model risk score exceeds a certain treshold
```
OUTPUT_PATH = f's3://{BUCKET}/a2i-results'
FraudScore= pred['modelScores'][0]['scores']['sample_fraud_detection_insightscore']
print(FraudScore)
## SET YOUR OWN THRESHOLD HERE
SCORE_THRESHOLD = 900
if FraudScore > SCORE_THRESHOLD :
# Create the human loop input JSON object
humanLoopInput = {
'score' : pred['modelScores'][0]['scores'],
'taskObject': rec
}
print(json.dumps(humanLoopInput))
humanLoopName = 'Fraud-detector-' + str(int(round(time.time() * 1000)))
print('Starting human loop - ' + humanLoopName)
response = a2i_runtime_client.start_human_loop(
HumanLoopName=humanLoopName,
FlowDefinitionArn= flowDefinitionArn,
HumanLoopInput={
'InputContent': json.dumps(humanLoopInput)
}
)
```
### Check Status of Human Loop
```
all_human_loops_in_workflow = a2i_runtime_client.list_human_loops(FlowDefinitionArn=flowDefinitionArn)['HumanLoopSummaries']
for human_loop in all_human_loops_in_workflow:
print(f'\nHuman Loop Name: {human_loop["HumanLoopName"]}')
print(f'Human Loop Status: {human_loop["HumanLoopStatus"]} \n')
print('\n')
```
# Wait For Workers to Complete Task
Since we are using private workteam, we should go to the labling UI to perform the inspection ourselves.
```
workteamName = WORKTEAM_ARN[WORKTEAM_ARN.rfind('/') + 1:]
print("Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!")
print('https://' + sagemaker.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain'])
```
### Check Status of Human Loop
```
all_human_loops_in_workflow = a2i_runtime_client.list_human_loops(FlowDefinitionArn=flowDefinitionArn)['HumanLoopSummaries']
completed_loops = []
for human_loop in all_human_loops_in_workflow:
print(f'\nHuman Loop Name: {human_loop["HumanLoopName"]}')
print(f'Human Loop Status: {human_loop["HumanLoopStatus"]} \n')
print('\n')
if human_loop['HumanLoopStatus'] == 'Completed':
completed_loops.append(human_loop['HumanLoopName'])
print(completed_loops)
```
### View Task Results
Once work is completed, Amazon A2I stores results in your S3 bucket and sends a Cloudwatch event. Your results should be available in the S3 OUTPUT_PATH when all work is completed.
```
import re
import pprint
pp = pprint.PrettyPrinter(indent=2)
def retrieve_a2i_results_from_output_s3_uri(bucket, a2i_s3_output_uri):
'''
Gets the json file published by A2I and returns a deserialized object
'''
splitted_string = re.split('s3://' + bucket + '/', a2i_s3_output_uri)
output_bucket_key = splitted_string[1]
response = s3.get_object(Bucket=bucket, Key=output_bucket_key)
content = response["Body"].read()
return json.loads(content)
for human_loop_name in completed_loops:
describe_human_loop_response = a2i_runtime_client.describe_human_loop(
HumanLoopName=human_loop_name
)
print(f'\nHuman Loop Name: {describe_human_loop_response["HumanLoopName"]}')
print(f'Human Loop Status: {describe_human_loop_response["HumanLoopStatus"]}')
print(f'Human Loop Output Location: : {describe_human_loop_response["HumanLoopOutput"]["OutputS3Uri"]} \n')
# Uncomment below line to print out a2i human answers
pp.pprint(retrieve_a2i_results_from_output_s3_uri(BUCKET, describe_human_loop_response['HumanLoopOutput']['OutputS3Uri']))
```
# Cleanup
To avoid incurring unnecessary charges, delete the resources used in this
walkthrough when not in use. For instructions, see the following:
How do I delete an S3 Bucket? https://docs.aws.amazon.com/AmazonS3/latest/user-guide/delete-bucket.html
Delete a Flow Definition https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-delete-flow-definition.html
Cleanup: SageMaker Resources https://sagemaker-workshop.com/cleanup/sagemaker.html
Delete Amazon Fraud detector resources https://docs.aws.amazon.com/frauddetector/latest/ug/delete-detector.html
| github_jupyter |
# analyzing dense output
Timothy Tyree<br>
3.26.2021
```
# darkmode=True
from lib.my_initialization import *
# For darkmode plots
from jupyterthemes import jtplot
jtplot.style(theme='monokai', context='notebook', ticks=True, grid=False)
```
## plot the collision times
```
#load the example data
# os.chdir(nb_dir)
# data_dir='data/out.csv'
# assert ( os.path.exists(data_dir) )
# df=pd.read_csv(data_dir,lineterminator=None)
#load a log file
input_folder=f"{nb_dir}/data"
input_fn='example.log'
os.chdir(input_folder)
df=parse_output_log(input_fn, include_inputs=False,printing=False)
Ntrials=df.index.values.shape[0]
Nmin=11
df.drop(columns=[f'{i}' for i in range(Nmin)], inplace=True)
#any duds still there?
assert ( not (df.values<-9000).any() )
df.head()
dfd=df.describe().T.drop(columns=['count'])
dfd.head()
#extract the values
x_values=np.array([eval(x) for x in dfd.index.values])
y_values=np.array(dfd['50%'].values)
yerr1_values=np.array(dfd['25%'].values)
yerr2_values=np.array(dfd['75%'].values)
y2_values=np.array(dfd['mean'].values)
y2err1_values=y2_values-np.array(dfd['std'].values)
y2err2_values=y2_values+np.array(dfd['std'].values)
#plot the values
figsize=(12,6)
fontsize=18
saving=True
savefig_folder=f'{nb_dir}/../fig'
savefig_fn='colltimes.png'
fig, axs = plt.subplots(ncols=2,figsize=figsize)
ax=axs[1]
ax.fill_between(x_values,yerr1_values,yerr2_values,alpha=0.3)
ax.plot(x_values,y_values,lw=2)
ax.set_xlabel('n', fontsize=fontsize)
ax.set_ylabel(r'time to first collision (sec)', fontsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
ax.tick_params(axis='both', which='minor', labelsize=0)
ax.set_title('median', fontsize=fontsize)
ax=axs[0]
ax.fill_between(x_values,y2err1_values,y2err2_values,alpha=0.3)
ax.plot(x_values,y2_values,lw=2)
ax.set_xlabel('n', fontsize=fontsize)
ax.set_ylabel(r'time to first collision (sec)', fontsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
ax.tick_params(axis='both', which='minor', labelsize=0)
ax.set_title('mean', fontsize=fontsize)
fig.tight_layout()
if not saving:
plt.show()
else:
plt.tight_layout()
os.chdir(savefig_folder)
plt.savefig(savefig_fn, dpi=300)
print(f"saved figure in \n\t{savefig_fn}")
# plt.close()
```
## plot the collision rates
```
#plot the values
figsize=(12,6)
fontsize=18
saving=True
savefig_folder=f'{nb_dir}/../fig'
savefig_fn='collrates.png'
fig, axs = plt.subplots(ncols=2,figsize=figsize)
ax=axs[1]
ax.fill_between(x_values,1/yerr2_values,1/yerr1_values,alpha=0.3)
ax.plot(x_values,1/y_values,lw=2)
ax.set_xlabel('n', fontsize=fontsize)
ax.set_ylabel(r'$W_{{-2}}$ (1/sec)', fontsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
ax.tick_params(axis='both', which='minor', labelsize=0)
ax.set_title('median', fontsize=fontsize)
ax=axs[0]
# ax.fill_between(x_values,1/y2err2_values,1/y2err1_values,alpha=0.3)
ax.plot(x_values,1/y2_values,lw=2)
ax.set_xlabel('n', fontsize=fontsize)
ax.set_ylabel(r'$W_{{-2}}$ (1/sec)', fontsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
ax.tick_params(axis='both', which='minor', labelsize=0)
# ax.set_ylim([0,1])
ax.set_title('mean', fontsize=fontsize)
fig.tight_layout()
if not saving:
plt.show()
else:
plt.tight_layout()
os.chdir(savefig_folder)
plt.savefig(savefig_fn, dpi=300)
print(f"saved figure in \n\t{savefig_fn}")
# plt.close()
```
## Results
collision rates/times are being generated with uncertainty that cannot be reliably quantified in terms of mean and standard deviation. Therefore, output logs must include dense output
- DONE: dev parsing output logs
- DONE: generate run_1.dat and run_test.dat
```
def parse_output_log(input_fn,include_inputs=True,printing=False):
with open(input_fn) as f:
trgt1='Printing Inputs...\n'
trgt2='Printing Outputs...\n'
for n,line in enumerate(f):
if trgt1 == line:
if printing:
print(f'found inputs starting after line {n}')
n_input=n
if trgt2 == line:
if printing:
print(f'found outputs starting after line {n}')
n_output=n
with open(input_fn) as f:
inputs=f.readlines()[n_input+1:n_output-1]
col_name_lst=[]
col_value_lst=[]
for line in inputs:
string=line.split(' ')[-1]
eid=string.find('=')
if eid!=-1:
col_name=string[:eid]
col_value=eval(string[eid+1:-2])
col_name_lst.append(col_name)
col_value_lst.append(col_value)
df=pd.read_csv(input_fn,header=n_output-2)
#drop that 'Unammed: {Nmax}' column
df.drop(columns=[df.columns[-1]], inplace=True)
if include_inputs:
if printing:
print("input parameters were:")
print(col_name_lst)
print(col_value_lst)
print("returning outputs as pandas.DataFrame instance")
for name,value in zip ( col_name_lst, col_value_lst):
df[name]=value
return df
input_folder=f"{nb_dir}/data"
input_fn='example.log'
os.chdir(input_folder)
df=parse_output_log(input_fn, include_inputs=True,printing=True)
df.head()
```
## generate run_1.dat
```
A_values=np.array([20.25,25,39,50,56.25,100,156.25,189])[::-1]
L_values=np.sqrt(A_values)
L_values
niter=250 #trials per worker
r_values=np.array([0.1,0.2,0.3,0.4,0.5,1.0])#cm
D_values=np.array([0.25,0.5,0.75,1.0,1.25,1.5,1.75,2.0])#cm^2/s
A_values=np.array([20.25,25,39,50,56.25,100,156.25,189])[::-1]#cm^2
L_values=np.sqrt(A_values)#cm
kappa_values=np.array([1,10,100])#1/s
num_trials_per_setting=6
#iterate over settings, scheduling the longest jobs first
count=0
for r in r_values:
for D in D_values:
for L in L_values:
for kappa in kappa_values:
num_trials=0
while num_trials<num_trials_per_setting:
num_trials+=1
count=count+1
print(count)
```
## debugging log parsing
```
input_fn='/home/timothytyree/Documents/GitHub/bgmc/python/data/osg_output/Log/job.out.8623404.5130'
# def parse_output_log(input_fn,
include_inputs=True,
printing=True
# ):
with open(input_fn) as f:
trgt1='Printing Inputs...\n'
trgt2='Printing Outputs...\n'
for n,line in enumerate(f):
if trgt1 == line:
if printing:
print(f'found inputs starting after line {n}')
n_input=n
if trgt2 == line:
if printing:
print(f'found outputs starting after line {n}')
n_output=n
with open(input_fn) as f:
inputs=f.readlines()[n_input+1:n_output-1]
col_name_lst=[]
col_value_lst=[]
for line in inputs:
string=line.split(' ')[-1]
eid=string.find('=')
if eid!=-1:
col_name=string[:eid]
col_value=eval(string[eid+1:-1])
col_name_lst.append(col_name)
col_value_lst.append(col_value)
df=pd.read_csv(input_fn,header=n_output-1)
#drop that 'Unammed: {Nmax}' column
df.drop(columns=[df.columns[-1]], inplace=True)
if include_inputs:
if printing:
print("input parameters were:")
print(col_name_lst)
print(col_value_lst)
print("returning outputs as pandas.DataFrame instance")
for name,value in zip ( col_name_lst, col_value_lst):
df[name]=value
# return df
eid=string.find('=')
if eid!=-1:
col_name=string[:eid]
col_value=eval(string[eid+1:-2])
col_name_lst.append(col_name)
col_value_lst.append(col_value)
string[eid+1:-1]
```
# (not meant for this ipynb) analyze manual output
```
#preliminary?
arr1=np.array([6.62549e-05,6.329412e-05,6.154902e-05,5.390196e-05,4.927451e-05,4.609804e-05,4.245098e-05,4.035294e-05,3.492157e-05,2.927451e-05,2.776471e-05,2.539216e-05,2.288235e-05,1.94902e-05,1.717647e-05,1.590196e-05,1.413725e-05,1.362745e-05,1.109804e-05,1.011765e-05,9.235294e-06,8.941176e-06,7.941176e-06,7.039216e-06,6.313725e-06,6.176471e-06,6.078431e-06,6e-06,5.823529e-06,5.45098e-06,4.529412e-06,4.509804e-06,4.352941e-06,4.313725e-06,4e-06,3.921569e-06,3.862745e-06,3.745098e-06,3.588235e-06,3.411765e-06,3.333333e-06,2.627451e-06,2.529412e-06,2.490196e-06,2.313725e-06,2.313725e-06,2.294118e-06,2.27451e-06,2.254902e-06,2.078431e-06,1.960784e-06,1.803922e-06,1.72549e-06,1.705882e-06,1.647059e-06,1.627451e-06,1.588235e-06,1.568627e-06,1.568627e-06,1.529412e-06,1.45098e-06,1.45098e-06,1.45098e-06,1.45098e-06,1.45098e-06,1.431373e-06,1.431373e-06,1.431373e-06,1.431373e-06,1.431373e-06,1.411765e-06,1.392157e-06,1.392157e-06,1.352941e-06,1.333333e-06,1.333333e-06,1.313725e-06,1.313725e-06,1.27451e-06,1.215686e-06,1.215686e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.137255e-06,1.137255e-06,1.137255e-06,1.137255e-06,1.137255e-06,1.137255e-06,1.117647e-06,1.098039e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.058824e-06,1.058824e-06])
arr2=np.array([4.143137e-05,4.027451e-05,4.027451e-05,4.015686e-05,3.792157e-05,3.439216e-05,2.94902e-05,2.560784e-05,2.513725e-05,2.264706e-05,2.058824e-05,1.770588e-05,1.670588e-05,1.55098e-05,1.523529e-05,1.347059e-05,1.168627e-05,1.009804e-05,9.529412e-06,9.058824e-06,8.431373e-06,8e-06,7.666667e-06,7.666667e-06,7.215686e-06,6.509804e-06,5.901961e-06,5.666667e-06,5e-06,4.627451e-06,4.156863e-06,3.960784e-06,3.784314e-06,3.72549e-06,3.607843e-06,3.529412e-06,3.529412e-06,3.333333e-06,3.313725e-06,3.294118e-06,3.039216e-06,2.980392e-06,2.745098e-06,2.490196e-06,2.352941e-06,2.333333e-06,2.333333e-06,2.333333e-06,2.235294e-06,2.215686e-06,2.137255e-06,2.039216e-06,1.960784e-06,1.921569e-06,1.882353e-06,1.882353e-06,1.862745e-06,1.803922e-06,1.72549e-06,1.705882e-06,1.666667e-06,1.490196e-06,1.333333e-06,1.333333e-06,1.313725e-06,1.294118e-06,1.27451e-06,1.27451e-06,1.254902e-06,1.254902e-06,1.254902e-06,1.254902e-06,1.254902e-06,1.254902e-06,1.235294e-06,1.235294e-06,1.235294e-06,1.215686e-06,1.215686e-06,1.215686e-06,1.196078e-06,1.176471e-06,1.156863e-06,1.156863e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.098039e-06,1.098039e-06,1.058824e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.019608e-06,1.019608e-06,1.019608e-06,1.019608e-06,1.019608e-06,1.019608e-06,1.019608e-06])
# test-3-7.input all equal :( niter=30
arr1=np.array([0.00263,0.001522,0.002293333,0.002127857,0.00177,0.001630476,0.001752917,0.001701538,0.001588077,0.001218889,0.001012857,0.0008446429,0.0007289655,0.0006366667,0.0005946667,0.000502,0.0004183333,0.000389,0.000378,0.0003736667,0.000344,0.00033,0.0003193333,0.0003073333,0.000291,0.0002713333,0.000257,0.0002026667,0.000162,0.0001593333,0.0001453333,0.0001386667,0.0001386667,0.0001386667,0.00012,0.0001093333,0.000109,0.0001063333,0.0001036667,9.266667e-05,8.633333e-05,8.633333e-05,8.566667e-05,8.1e-05,5.766667e-05,4.733333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.533333e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.266667e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.766667e-05,2.766667e-05,2.7e-05,2.633333e-05,2.633333e-05,2.566667e-05,2.533333e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.4e-05,2.4e-05,2.366667e-05,2.333333e-05,2.333333e-05,2.166667e-05,2.1e-05,2.1e-05,2.1e-05,2.1e-05,2.033333e-05,2e-05,1.966667e-05,1.966667e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.733333e-05,1.733333e-05,1.666667e-05,1.666667e-05,1.633333e-05,1.633333e-05,1.6e-05,1.6e-05,1.6e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.533333e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.4e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.233333e-05,1.2e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.1e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05])
arr2=np.array([0.00263,0.001522,0.002293333,0.002127857,0.00177,0.001630476,0.001752917,0.001701538,0.001588077,0.001218889,0.001012857,0.0008446429,0.0007289655,0.0006366667,0.0005946667,0.000502,0.0004183333,0.000389,0.000378,0.0003736667,0.000344,0.00033,0.0003193333,0.0003073333,0.000291,0.0002713333,0.000257,0.0002026667,0.000162,0.0001593333,0.0001453333,0.0001386667,0.0001386667,0.0001386667,0.00012,0.0001093333,0.000109,0.0001063333,0.0001036667,9.266667e-05,8.633333e-05,8.633333e-05,8.566667e-05,8.1e-05,5.766667e-05,4.733333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.533333e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.266667e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.766667e-05,2.766667e-05,2.7e-05,2.633333e-05,2.633333e-05,2.566667e-05,2.533333e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.4e-05,2.4e-05,2.366667e-05,2.333333e-05,2.333333e-05,2.166667e-05,2.1e-05,2.1e-05,2.1e-05,2.1e-05,2.033333e-05,2e-05,1.966667e-05,1.966667e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.733333e-05,1.733333e-05,1.666667e-05,1.666667e-05,1.633333e-05,1.633333e-05,1.6e-05,1.6e-05,1.6e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.533333e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.4e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.233333e-05,1.2e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.1e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05])
arr1=np.array([0.00263,0.001522,0.002293333,0.002127857,0.00177,0.001630476,0.001752917,0.001701538,0.001588077,0.001218889,0.001012857,0.0008446429,0.0007289655,0.0006366667,0.0005946667,0.000502,0.0004183333,0.000389,0.000378,0.0003736667,0.000344,0.00033,0.0003193333,0.0003073333,0.000291,0.0002713333,0.000257,0.0002026667,0.000162,0.0001593333,0.0001453333,0.0001386667,0.0001386667,0.0001386667,0.00012,0.0001093333,0.000109,0.0001063333,0.0001036667,9.266667e-05,8.633333e-05,8.633333e-05,8.566667e-05,8.1e-05,5.766667e-05,4.733333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.533333e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.266667e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.766667e-05,2.766667e-05,2.7e-05,2.633333e-05,2.633333e-05,2.566667e-05,2.533333e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.4e-05,2.4e-05,2.366667e-05,2.333333e-05,2.333333e-05,2.166667e-05,2.1e-05,2.1e-05,2.1e-05,2.1e-05,2.033333e-05,2e-05,1.966667e-05,1.966667e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.733333e-05,1.733333e-05,1.666667e-05,1.666667e-05,1.633333e-05,1.633333e-05,1.6e-05,1.6e-05,1.6e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.533333e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.4e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.233333e-05,1.2e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.1e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05])
arr2=np.array([0.00263,0.001522,0.002293333,0.002127857,0.00177,0.001630476,0.001752917,0.001701538,0.001588077,0.001218889,0.001012857,0.0008446429,0.0007289655,0.0006366667,0.0005946667,0.000502,0.0004183333,0.000389,0.000378,0.0003736667,0.000344,0.00033,0.0003193333,0.0003073333,0.000291,0.0002713333,0.000257,0.0002026667,0.000162,0.0001593333,0.0001453333,0.0001386667,0.0001386667,0.0001386667,0.00012,0.0001093333,0.000109,0.0001063333,0.0001036667,9.266667e-05,8.633333e-05,8.633333e-05,8.566667e-05,8.1e-05,5.766667e-05,4.733333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.533333e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.266667e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.766667e-05,2.766667e-05,2.7e-05,2.633333e-05,2.633333e-05,2.566667e-05,2.533333e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.4e-05,2.4e-05,2.366667e-05,2.333333e-05,2.333333e-05,2.166667e-05,2.1e-05,2.1e-05,2.1e-05,2.1e-05,2.033333e-05,2e-05,1.966667e-05,1.966667e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.733333e-05,1.733333e-05,1.666667e-05,1.666667e-05,1.633333e-05,1.633333e-05,1.6e-05,1.6e-05,1.6e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.533333e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.4e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.233333e-05,1.2e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.1e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05])
# (each leading with -9999,-9999,-9999,-9999,-9999,-9999,-9999,-9999,)
# test-3-7.input all equal :( niter=3
arr1=np.array([0.0013,0.00164,0.00164,0.00164,0.001596667,0.00137,0.00068,0.00068,0.00068,0.00068,0.00068,0.00068,0.00068,0.0006433333,0.0006433333,0.0006433333,0.0004233333,0.0004233333,0.0004233333,0.0004233333,0.0004233333,0.0002966667,0.0002966667,0.0002966667,0.0002333333,0.0002333333,0.0001666667,0.0001666667,0.0001666667,0.0001666667,0.0001666667,0.0001166667,0.0001166667,0.0001166667,0.0001166667,4.666667e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05])
arr2=np.array([0.0013,0.00164,0.00164,0.00164,0.001596667,0.00137,0.00068,0.00068,0.00068,0.00068,0.00068,0.00068,0.00068,0.0006433333,0.0006433333,0.0006433333,0.0004233333,0.0004233333,0.0004233333,0.0004233333,0.0004233333,0.0002966667,0.0002966667,0.0002966667,0.0002333333,0.0002333333,0.0001666667,0.0001666667,0.0001666667,0.0001666667,0.0001666667,0.0001166667,0.0001166667,0.0001166667,0.0001166667,4.666667e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05])
navg=1
x1_values=np.arange(11,11+arr1.size-navg)
x2_values=np.arange(11,11+arr2.size-navg)
#take a moving average
y1_values=1./(arr1[navg:]+arr1[:-navg])
y2_values=1./(arr2[navg:]+arr2[:-navg])
```
I turned off forces, but the effect on collision times was zero to machine precision...
^this means either (i) I'm not in a parameter regime where collision times are significantly effected by forces or (ii) there's a bug in my code
```
fontsize=18
plt.plot(x1_values, y1_values,lw=2,label='unbiased random walk')
plt.plot(x2_values, y2_values,lw=2,label='attractive forces')
plt.xscale('log')
plt.yscale('log')
# plt.xlim([1e-0,1e3])
# plt.ylim([1e4,6e6])
# plt.title('attractive forces increased exponent\n',fontsize=fontsize)
# plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize)
plt.xlabel(r'N',fontsize=fontsize)
plt.ylabel(r'$W_{-2}$ (Hz)', fontsize=fontsize)
# plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize)
# plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize)
plt.tick_params(axis='both', which='major', labelsize=fontsize)
plt.tick_params(axis='both', which='minor', labelsize=0)
plt.legend(fontsize=fontsize-5,ncol=1,loc='upper left')
plt.show()
# from numpy import linspace
# from scipy.integrate import odeint
# #you can choose a better guess using fsolve instead of 0
# def integral(y, _, F_l, M, cache=[0]):
# v, preva = y[1], cache[0]
# #use value for 'a' from the previous timestep
# F_r = (((1 - preva) / 3) ** 2 + (2 * (1 + preva) / 3) ** 2) * v
# #calculate the new value
# a = (F_l - F_r) / M
# cache[0] = a
# return [v, a]
# y0 = [0, 5]
# time = linspace(0., 10., 1000)
# F_lon = 100.
# mass = 1000.
# dydt = odeint(integral, y0, time, args=(F_lon, mass))
# plt.scatter(x=time,y=time*(10-time),c=time,cmap='jet')
# # plt.scatter(x=time,y=dydt[:,0],c=time,cmap='jet')
# # plt.scatter(x=time,y=dydt[:,1])
# # plt.xscale('log')
# # plt.yscale('log')
# # plt.xlim([1e-1,1e1])
# # plt.ylim([1e4,6e4])
# # plt.title('accelerated attraction/annihilation trials only')
# plt.show()
1
#load the example data
# os.chdir(nb_dir)
# data_dir='data/out.csv'
# assert ( os.path.exists(data_dir) )
# df=pd.read_csv(data_dir,lineterminator=None)
#load a log file
input_folder=f"{nb_dir}/data"
input_fn='example.log'
os.chdir(input_folder)
df=parse_output_log(input_fn, include_inputs=False,printing=False)
Ntrials=df.index.values.shape[0]
Nmin=11
df.drop(columns=[f'{i}' for i in range(Nmin)], inplace=True)
#any duds still there?
assert ( not (df.values<-9000).any() )
df.head()
```
# notes/eqns for ou process
Suppose we have two particles that have been nearest neighbors since time, $t=0$.
Let $X_t=$ the distance between two particles at time $t$. We may then model $X_t$ reverting to (from) some preferred distance, $x_0\ge0$ by setting $\varkappa>0$ ($\varkappa<0$) in
$$
dX_t=\varkappa(x_0-X_t)dt+\sqrt{2D}dW_t.
$$
For $\varkappa>0$, the expected distance is
$$
\mathbb{E}[X_t]=X_0e^{-\varkappa t} + x_0(1-e^{-\varkappa t})
$$
and the covariance is
$$
\text{cov}(x_s,x_t)=\frac{D}{\varkappa}\Big(e^{-\varkappa |t-s|}-e^{-\varkappa (t+s)}\Big).
$$
# DONT: dev add-result.pl
# TODO: dev routine that uses dask to do a sweep over N
HINT: descend from Nmax to Nmin
```
#######################################################
### TODO: implement in PYTHON (and later) with DASK ###
#######################################################
#TODO: open a file in append mode
#TODO: print N, from Nmax descending to $Nmin
#TODO: write params.input for this Ntips
#TODO: system(./xrun < params.input)
#TODO: change params.input to params/params_N_{N}.input
#TODO: do all ^this in dask, but not with a daskbag... this should update a preallocated vector as tasks are finishedd
#HINT: preallocation means save in current increasing N format
# #DONT(do perl stuff in python): initialize line
# i=37
# string.split(',')[i]
#TODO(goal): get item_lst from a daskbag for item = (n,Tavg)
Nmax=60;Nmin=6
n_values=np.arange(Nmax,Nmin,-1)
n_values
#template arguments for simulation
x=np.array([0.1, 2, 5, 500, 0., 0., 1e-5, 1e-5, 8, 500, 1234, 0, 0, 0, 0])
log_dir="/home/timothytyree/Documents/GitHub/bgmc/c/ou"
os.chdir(log_dir)
#make routine that generates Tavg for a given n
def routine(n):
fn_out=f"Log/out_n_{n}.output"
#TODO: integrate return_CollTime with routine
os.system(f"/return_CollTime.x < 1-control.input | grep 'Tavg=' | grep -Eo '[+-]?[0-9]+([.][0-9]+)?' > Log/1-control.output")
# os.system(f"./xrun.sh ${x[0]} ${x[1]} ${x[2]} ${x[3]} ${x[4]} ${x[5]} ${x[6]} ${x[7]} {n} ${x[9]} ${x[10]} ${x[11]} > {fn_out}")
#TODO: parse fn_out for Tavg
retval=os.system(f'grep "Tavg=" {fn_out}')#' #| grep -Eo "[+-]?[0-9]+([.][0-9]+)?"')
# retval=os.system(f'grep "Tavg=" {fn_out} #| grep -Eo "[+-]?[0-9]+([.][0-9]+)?"')
#TODO: return Tavg
return retval
n=32
# os.system(f"./xrun.sh ${x[0]} ${x[1]} ${x[2]} ${x[3]} ${x[4]} ${x[5]} ${x[6]} ${x[7]} {n} ${x[9]} ${x[10]} ${x[11]} > a.out")
routine(n)
!ls
import numpy as np
niter=1500 #trials per worker
r_values=np.array([0.1,1.0])#,0.2,0.3,0.4,0.5,.6,.7,.8,.9,1.,2.])#cm
D_values=np.array([2.,20.])#0.2,1.0,1.5,2.0,3.,4.,5.])#cm^2/s
A_values=np.array([25.])#20.25,25,39,50,56.25,100,156.25,189,250])[::-1]#cm^2
L_values=np.sqrt(A_values)#cm
kappa_values=np.array([500,1500])#5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,250,500])#1/s
varkappa_values=np.array([-5,-1.5,-0.2,0,0.2,1.5,5])#1/s
x0_values=np.array([0.,0.1,0.2,0.3,0.4,0.5,1.0])#cm
Dt_values=np.array([1e-5,1e-4])#10**-i for i in range(6)])
dt=1e-5
Nmax=100
num_trials_per_setting=1
reflect_values=np.array([0])
set_second_values=np.array([0])
no_repulsion_values=np.array([0,1])
no_attraction_values=np.array([0,1])
#iterate over settings, scheduling the longest jobs first
count=0
for set_second in set_second_values:
for reflect in reflect_values:
for r in r_values:
for D in D_values:
for L in L_values:
for kappa in kappa_values:
for varkappa in varkappa_values:
for x0 in x0_values:
for Dt in Dt_values:
for no_repulsion in no_repulsion_values:
for no_attraction in no_attraction_values:
num_trials=0
while num_trials<num_trials_per_setting:
num_trials+=1
count=count+1
print(f"{r} {D} {L} {kappa} {varkappa} {x0} {Dt} {dt} {Nmax} {niter} {reflect} {set_second} {no_repulsion} {no_attraction}")
# print(count)
```
| github_jupyter |
# Approximating Steel Cased Wells - DC
[Lindsey Heagy](http://github.com/lheagy)
In this example, we examine the impact of upscaling the well using
- the assumption that the well is a solid rod of steel
- averaging conductivity such that the $\sigma A$ is the same in both cases
These experiments are conducted at DC. The initial model we consider is based on that shown Um, 2015, *Finite element modelling of transient electromagnetic fields near steel-cased wells* https://doi.org/10.1093/gji/ggv193
### Reproducing these results
To run these examples, you need to have installed
- [discretize](http://discretize.simpeg.xyz) (branch: `feat/3Dcylmesh`)
```
pip install git+https://github.com/simpeg/discretize.git@feat/3Dcylmesh
```
- [SimPEG](http://docs.simpeg.xyz) (branch: `em/feat/galvanic-hj`)
```
pip install git+https://github.com/simpeg/simpeg.git@em/feat/galvanic-hj
```
- [casingSimulations](https://github.com/lheagy/casingSimulations) (branch: `master`)
```
pip install git+https://github.com/lheagy/casingSimulations.git
```
- [pymatsolver](https://github.com/rowanc1/pymatsolver)
```
pip install pymatsolver
```
SimPEG and the implementation for electrical and electromagnetic methods is described in ([Cockett et al., 2015](https://doi.org/10.1016/j.cageo.2015.09.015) and [Heagy et al., 2017](https://arxiv.org/abs/1610.00804)).
```
import discretize
from discretize import utils
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from scipy.constants import mu_0, inch, foot
import ipywidgets
from SimPEG.EM import TDEM
from SimPEG import Utils, Maps
import casingSimulations as casingSim
from pymatsolver import Pardiso
%matplotlib inline
```
## Parameters
To examine the impact of replacing the steel cased well with a solid well, I have run a few simulations using SimPEG and simulating the DC resistivity equations using a finite volume approach on a 3D cylindrical mesh.
The model setup follows . The electrical conductivities used are:
- Casing $1\times 10^6$ S/m
- Air $3\times10^{-7}$ S/m
- Background $3\times10^{-2}$ S/m
- Inside the casing - same as background
In the first example, the well is 200m long. The thickness of the casing is 12mm. Here, we are interested in examining a top casing source, where one electrode is connected to the top of the casing and a return electrode is some radial distance away on the surface. In the following examples, we will put the return electrode a distance of 2x the length of the well (for the first example, it is 400 m away). For datam we measure the radial electric field along a line $90^\circ$ from the source wire, starting from the well and measuring out to a distance of 400m away.
### Experiments
From the base example, there are 3 that are setup, you can select one by changing the `experiment` variable or setting your own values for `casing_l`, `sigma_back` and `sigma_air`.
```
simDir = 'DC_approximations' # create a simulation directory where results can be saved.
# casing parameters by experiment
experiments = {
1: {'casing_l': 200, 'sigma_back': 3e-2},
2: {'casing_l': 200, 'sigma_back': 3e-1},
3: {'casing_l': 1000, 'sigma_back': 3e-2},
}
```
### select an experiment
```
experiment = 3
casing_l = experiments[experiment]['casing_l']
sigma_back = experiments[experiment]['sigma_back']
print(
'Setting up experiment {}. \nThe casing is {}m long, and the '
'conductivity of the background is {} S/m'.format(
experiment, casing_l, sigma_back
)
)
casing_t = 10e-3 # 10mm thick casing
casing_d = 100e-3 # 10cm diameter
model = casingSim.model.CasingInHalfspace(
directory = simDir,
sigma_casing = 1e6, # conductivity of the casing (S/m)
sigma_back = sigma_back, # conductivity of the background (S/m)
sigma_inside = sigma_back, # fluid inside the well has same conductivity as the background
sigma_air = 3e-7, # conductivity of the air
casing_d = casing_d-casing_t, # 135mm is outer casing diameter
casing_l = casing_l,
casing_t = casing_t,
src_a = np.r_[0., np.pi, -1.25], # put the A electrode just below the surface
src_b = np.r_[2*casing_l, np.pi, -1.25] # put the return electrode at a distance of 2 x length of well away
)
# adjust the src_a location so it is connected to well
model.src_a = np.r_[model.casing_r, np.pi, -1.25]
# Here we print the parameters being used to set up the simulation
model.serialize()
```
## Mesh
Here we set up a 3D cylindrical mesh, discretizing in $x$, $\theta$ and $z$.
To discretize in x, we start by defining the finest region of the mesh, ensuring that we have 4 cells across the thickness of the casing. From there, we expand the cell sizes until we reach the second uniform cell size we want to model at (`csx2`). We then use a constant cell spacing of `csx2` until we have meshed out to the end of the domain in which we want to examine data (`domainx2`). Beyond that, we add padding cells to *"infinity"*
```
# parameters defining the core region of the mesh
# note that the finest re
csx2 = 25. # cell size in the x-direction in the second uniform region of the mesh (where we measure data)
csz = 2.5 # cell size in the z-direction
domainx2 = 500 # go out 500m from the well
# padding parameters
npadx, npadz = 15, 22 # number of padding cells
pfx2 = 1.4 # expansion factor for the padding to infinity in the x-direction
pfz = 1.4
# discretization in theta
ncy = 1
nstretchy = 5
stretchfact = 1.5
hy = utils.meshTensor([(1, nstretchy, -stretchfact), (1, ncy), (1, nstretchy, stretchfact)])
hy = hy * 2*np.pi/hy.sum()
# set up a mesh generator which will build a mesh based on the provided parameters
# and casing geometry
cylMeshGen = casingSim.CasingMeshGenerator(
directory=simDir, # directory where we can save things
modelParameters=model, # casing parameters
npadx=npadx, # number of padding cells in the x-direction
npadz=npadz, # number of padding cells in the z-direction
domain_x=domainx2, # extent of the second uniform region of the mesh
hy=hy, # cell spacings in the
csx1=model.casing_t/4., # use at least 4 cells per across the thickness of the casing
csx2=csx2, # second core cell size
csz=csz, # cell size in the z-direction
pfx2=pfx2, # padding factor to "infinity"
pfz=pfz # padding factor to "infinity" for the z-direction
)
cylMeshGen.mesh.plotGrid()
# Plot the source location
ax = plt.subplot(111, projection='polar')
cylMeshGen.mesh.plotGrid(ax=ax, slice='z')
ax.plot(model.src_a[1], model.src_a[0], 'ro')
ax.plot(model.src_b[1], model.src_b[0], 'rs')
ax.set_ylim([0., 3*model.casing_l])
```
## Create models to compare to
Here, we create two more models which we want to simulate on
- `solid` fills in the center of the casing with a conductivity equal to that of steel
- `sigma_A` approximates the casing as a solid rod with conductivity found by preserving the conductivity - cross sectional area product.
```
# Solid steel
model_solid = model.copy()
model_solid.sigma_inside = model_solid.sigma_casing # fill in the center of the well with steel
# average so that we preserve the conductivity * cross sectional area
sigmaA = model.sigma_casing * (model.casing_b**2 - model.casing_a**2) / (model.casing_b**2) # times pi / pi
model_sigma_A = model.copy()
model_sigma_A.sigma_inside = sigmaA
model_sigma_A.sigma_casing = sigmaA
print("Preserving sigma * A gives a conductivity of {} S/m".format(sigmaA))
# put the models in a dictionary for convienence
model_names = ['baseline', 'solid', 'sigma_A']
modelDict = dict(zip(model_names, [model, model_solid, model_sigma_A]))
# Assign physical properties on the mesh
physpropsDict = {
'baseline': casingSim.model.PhysicalProperties(cylMeshGen, model),
'solid': casingSim.model.PhysicalProperties(cylMeshGen, model_solid),
'sigma_A': casingSim.model.PhysicalProperties(cylMeshGen, model_sigma_A)
}
# Plot the models
xlim = np.r_[-1, 1] # x-limits in meters
zlim = np.r_[-1.5*model.casing_l, 10.] # z-limits in meters. (z-positive up)
fig, ax = plt.subplots(1, 3, figsize=(18, 5), dpi=350)
for a, title in zip(ax, model_names):
pp = physpropsDict[title]
pp.plot_sigma(
ax=a,
pcolorOpts={'norm':LogNorm()} # plot on a log-scale
)
a.set_title('{} \n\n $\sigma$ = {:1.2e}S/m'.format(title, pp.modelParameters.sigma_casing), fontsize=13)
# cylMeshGen.mesh.plotGrid(ax=a, slice='theta') # uncomment to plot the mesh on top of this
a.set_xlim(xlim)
a.set_ylim(zlim)
model.src_a
# Set up 4 sources, top casing, top & not coupled, downhole, downhole not coupled
src_a = np.vstack([
[model.casing_r, np.pi, -1.25],
[model.casing_r + 1., np.pi, -1.25],
[model.casing_r, np.pi, -casing_l + 5.],
[0., np.pi, -casing_l + 5.],
[0., np.pi, -casing_l - 5.]
])
src_b = np.ones((src_a.shape[0],1)) * model.src_b
src_names = [
"top casing",
"surface, disconnected",
"downhole",
"downhole, disconnected",
"below hole"
]
# Plot the source location
symbols = ['ro', 'ws', 'k>', 'mo', 'c*', 'yx']
fig, ax = plt.subplots(1, 1, figsize=(4, 5))
# cylMeshGen.mesh.plotGrid(ax=ax, slice='theta')
physpropsDict['baseline'].plot_sigma(
ax=ax,
pcolorOpts={'norm':LogNorm()} # plot on a log-scale
)
for i in range(src_a.shape[0]):
ax.plot(src_a[i, 0], src_a[i, 2], symbols[i])
ax.plot(src_b[i, 0], src_b[i, 2], symbols[i])
ax.set_xlim([-0.1, 1.25]) #src_b[:, 0].max()])
ax.set_ylim([ -1.1*model.casing_l, 1])
```
## set up a DC simulation
```
simDict = {}
for title in model_names:
simDict[title] = casingSim.run.SimulationDC(
modelParameters=modelDict[title], directory=simDir,
meshGenerator=cylMeshGen,
src_a=src_a, src_b=src_b
)
%%time
fieldsDict = {}
for title in model_names:
print('--- Running {} ---'.format(title))
fieldsDict[title] = simDict[title].run()
print('\n')
```
## Plot Results
Here we plot the radial electric field along a line $90^{\circ}$ from the source.
```
# plot e-field on surface
epsilon = 1e-16
def plot_ex_field(theta_ind=1, src_ind=0, xmin=10, xmax=500, zloc=0):
xlim = [xmin, xmax]
src_baseline = simDict['baseline'].survey.srcList[src_ind]
fig, ax = plt.subplots(1, 2, figsize=(10, 4), dpi=400)
for i, key in enumerate(model_names):
f = fieldsDict[key]
src = simDict[key].survey.srcList[src_ind]
casingSim.view.plotLinesFx(
cylMeshGen.mesh, field=f[src,'e'],
pltType='semilogy', ax=ax[0],
theta_ind=theta_ind, xlim=xlim,
color_ind=i, label=key,
zloc=zloc
)
if key != 'baseline':
f = fieldsDict[key]
casingSim.view.plotLinesFx(
cylMeshGen.mesh,
field=100*(
np.absolute(f[src,'e'] - fieldsDict['baseline'][src_baseline,'e'])/
(np.absolute(fieldsDict['baseline'][src_baseline,'e']) + epsilon)
),
pltType='plot', ax=ax[1],
theta_ind=theta_ind, xlim=xlim,
color_ind=i, label=key, zloc=zloc
)
# print('src_a = {}'.format(src_a[src_ind, :]))
ax[0].legend()
ax[0].set_ylabel('Electric Field (V/m)')
ax[0].set_title(src_names[src_ind])
ax[1].legend()
ax[1].set_ylabel('percent difference')
plt.tight_layout()
plt.show()
ipywidgets.interact(
plot_ex_field,
theta_ind = ipywidgets.IntSlider(min=0, max=len(cylMeshGen.hy)-1, value=1),
src_ind = ipywidgets.IntSlider(min=0, max=src_a.shape[0]-1, value=0),
xmin = ipywidgets.FloatText(value=10),
xmax = ipywidgets.FloatText(value=500),
zloc = ipywidgets.FloatText(value=0)
)
```
## Plot the casing currents
```
# plot currents in casing
epsilon = 1e-16
def plot_casing_currents(src_ind=0):
src_baseline = simDict['baseline'].survey.srcList[src_ind]
fig, ax = plt.subplots(1, 2, figsize=(10, 4), dpi=400)
ax = discretize.utils.mkvc(ax)
baseline_src = simDict['baseline'].survey.srcList[src_ind]
ix_baseline, iz_baseline = casingSim.physics.CasingCurrents(
fieldsDict['baseline'][baseline_src, 'j'], cylMeshGen.mesh,
# modelDict['baseline'].casing_a,
0,
modelDict['baseline'].casing_b, modelDict['baseline'].casing_z,
)
for i, key in enumerate(model_names):
f = fieldsDict[key]
src = simDict[key].survey.srcList[src_ind]
mod = modelDict[key]
ix, iz = casingSim.physics.CasingCurrents(
f[src, 'j'], cylMeshGen.mesh, 0,
# mod.casing_a if key == 'baseline' else 0,
mod.casing_b, mod.casing_z,
)
ax[0].plot(cylMeshGen.mesh.vectorNz, -iz, label=key, color="C{}".format(i))
# ax[2].plot(cylMeshGen.mesh.vectorCCz, ix, label=key, color="C{}".format(i))
if key != 'baseline':
ax[1].plot(
cylMeshGen.mesh.vectorNz,
np.absolute(iz-iz_baseline)/(np.absolute(iz_baseline)+epsilon)*100,
label=key, color="C{}".format(i)
)
# ax[3].plot(
# cylMeshGen.mesh.vectorCCz,
# np.absolute(ix-ix_baseline)/(np.absolute(ix_baseline)+epsilon),
# label=key, color="C{}".format(i)
# )
[a.set_xlim([0., -casing_l]) for a in ax]
[a.legend() for a in ax]
[a.set_xlabel('depth (m)')]
# ax[0].set_ylabel('Electric Field (V/m)')
# ax[1].legend()
# ax[1].set_ylabel('percent difference')
ax[0].set_ylabel('Downward-going Current (A)')
ax[1].set_ylabel('Percent difference from baseline')
ax[0].set_title(src_names[src_ind] + ' source \n\n Vertical Current in Casing')
ax[1].set_title('Difference from baseline (%)')
plt.tight_layout()
plt.show()
ipywidgets.interact(
plot_casing_currents,
src_ind = ipywidgets.IntSlider(min=0, max=src_a.shape[0]-1, value=0)
)
mesh = cylMeshGen.mesh
j = fieldsDict['baseline'][simDict['baseline'].survey.srcList[0], 'j']
radius = 1
def horizontal_current_flux(mesh, j, radius=1):
Fx_inds = np.absolute(mesh.gridFx[:,0] - radius) == np.min(np.absolute(mesh.vectorNx - radius))
jA = utils.sdiag(mesh.area) * j
jA_surface = jA[
np.hstack([Fx_inds, np.zeros(mesh.nFy, dtype=bool), np.zeros(mesh.nFz, dtype=bool)])
].reshape(mesh.vnFx[1], mesh.vnFx[2], order='F')
ix = jA_surface.sum(0)
return ix
# plot currents in casing
epsilon = 1e-16
def plot_formation_currents(src_ind=0, radius=1, ymax=None):
src_baseline = simDict['baseline'].survey.srcList[src_ind]
fig, ax = plt.subplots(1, 2, figsize=(10, 4), dpi=400)
ax = discretize.utils.mkvc(ax)
baseline_src = simDict['baseline'].survey.srcList[src_ind]
ix_baseline = horizontal_current_flux(
cylMeshGen.mesh, fieldsDict['baseline'][baseline_src, 'j'], radius
)
for i, key in enumerate(model_names):
f = fieldsDict[key]
src = simDict[key].survey.srcList[src_ind]
ix = horizontal_current_flux(
cylMeshGen.mesh, fieldsDict[key][src, 'j'], radius
)
ax[0].plot(cylMeshGen.mesh.vectorCCz, ix, label=key, color="C{}".format(i))
if key != 'baseline':
ax[1].plot(
cylMeshGen.mesh.vectorCCz,
np.absolute(ix-ix_baseline)/(np.absolute(ix_baseline)+epsilon)*100,
label=key, color="C{}".format(i)
)
[a.set_xlim([0., -1.25*casing_l]) for a in ax]
[a.legend() for a in ax]
[a.set_xlabel('depth (m)')]
if not ymax is not None or ymax !=0:
ylim = [0, ymax]
ax[0].set_ylim(ylim)
ax[0].set_ylabel('Horizontal currents (A)')
ax[1].set_ylabel('Percent difference from baseline')
ax[0].set_title(src_names[src_ind] + ' source \n\n Horizontal Current in Formation, r={}'.format(radius))
ax[1].set_title('Difference from baseline (%)')
plt.tight_layout()
plt.show()
ipywidgets.interact(
plot_formation_currents,
src_ind=ipywidgets.IntSlider(min=0, max=len(src_names)-1, value=0),
radius=ipywidgets.FloatText(value=10),
ymax=ipywidgets.FloatText(value=0)
)
```
## Charges
How do the charges change with different casing models?
```
# Plot the charges
mesh2D = cylMeshGen.create_2D_mesh().mesh
def plotScalar(
theta_ind=0, src_ind=0, clim_max=None, clim_min=None,
max_depth=1.1*model.casing_l,
max_r=0.1, top=10., view='charge'
):
fig, ax = plt.subplots(1, len(model_names), figsize=(len(model_names)*5, 6))
assert view.lower() in ['charge', 'phi', 'j']
xlim = max_r*np.r_[0, 1] # x-limits in meters
zlim = np.r_[-max_depth, top] # z-limits in meters. (z-positive up)
clim = None
plotopts = {
'theta_ind': theta_ind,
}
if not clim_max is not None or clim_max != 0.:
clim = clim_max * np.r_[-1, 1]
plotopts['clim'] = clim
if clim_min is not None or clim_min != 0.:
plotopts['clim'][0] = clim_min
for a, title in zip(ax, model_names):
pp = physpropsDict[title]
src = simDict[title].survey.srcList[src_ind]
plotme = simDict[title].fields()[src, view]
if view in ['charge', 'phi']:
_, cb = pp.plot_prop(
plotme,
ax=a,
pcolorOpts = {'cmap': 'bwr' if view == 'charge' else 'viridis'},
**plotopts
)
elif view == 'j':
jplt = casingSim.face3DthetaSlice(
cylMeshGen.mesh, plotme, theta_ind=theta_ind
)
_, cb = casingSim.plotFace2D(
mesh2D,
jplt, real_or_imag='real', ax=a, range_x=xlim,
range_y=zlim, sample_grid=np.r_[np.diff(xlim)/100., np.diff(zlim)/100.],
logScale=True, clim=clim
)
a.set_title('{} source \n {} model \n\n $\sigma$ = {:1.2e}S/m, \ntheta = {:1.1f} degrees'.format(
src_names[src_ind], title, pp.modelParameters.sigma_casing, cylMeshGen.mesh.vectorCCy[theta_ind]*180/np.pi
), fontsize=13)
# cylMeshGen.mesh.plotGrid(ax=a, slice='theta') # uncomment to plot the mesh on top of this
a.set_xlim(xlim)
a.set_ylim(zlim)
# cb.set_label(view)
# plot outline of casing
m = modelDict[title]
a.plot(
np.r_[m.casing_a, m.casing_a, m.casing_b, m.casing_b, m.casing_a],
np.r_[m.casing_z[1], m.casing_z[0], m.casing_z[0], m.casing_z[1], m.casing_z[1]],
'k',
lw = 0.5
)
plt.tight_layout()
plt.show()
```
### Widget for viewing charges
There are 3 panels, the true (left), if we were to replace the well with solid steel (center) and if we choose sigma to preserve the integrated conductance (right). For reference, I have plotted the true casing cross section.
The widgets available to you are
- **theta_ind**: [0, ntheta slices] Which azimuth should we slice through
- **clim_max**: max value on the colorbar so you can saturate it
- **max_depth**: max z-limit for the plot
- **top**: top of the plot (z)
observations:
- (sanity checks): in all charges at interface between casing and formation. All positive charges (positive electrode connected to casing
- charge distribution more uniform along length of casing for solid steel (also makes sense: better conductor)
```
# fig, ax = plt.subplots(1, 3, figsize=(18, 5), dpi=350)
ipywidgets.interact(
plotScalar,
theta_ind = ipywidgets.IntSlider(min=0, max=len(cylMeshGen.hy)-1, value=1),
src_ind = ipywidgets.IntSlider(min=0, max=src_a.shape[0]-1, value=0),
clim_max = ipywidgets.FloatText(value=0),
clim_min = ipywidgets.FloatText(value=0),
max_depth = ipywidgets.FloatText(value=np.ceil(1.25*model.casing_l)),
max_r = ipywidgets.FloatText(value=0.1),
top = ipywidgets.FloatText(value=10),
view = ipywidgets.ToggleButtons(
options=['charge', 'phi', 'j'], value='charge'
),
)
# Plot the models
def plotScalarDifference(
clim_max=None, theta_ind=0, src_ind=0, max_depth=1.1*model.casing_l, max_r=0.1, top=10., view='charge',
):
assert view in ['charge', 'phi']
fig, ax = plt.subplots(1, len(model_names)-1, figsize=(5*(len(model_names)-1), 5))
xlim = max_r*np.r_[0, 1] # x-limits in meters
zlim = np.r_[-max_depth, top] # z-limits in meters. (z-positive up)
src_baseline = simDict['baseline'].survey.srcList[src_ind]
for a, title in zip(ax, ['solid', 'sigma_A']):
pp = physpropsDict[title]
src = simDict[title].survey.srcList[src_ind]
plotme = simDict[title].fields()[src, view] - simDict['baseline'].fields()[src_baseline, view]
if clim_max is None or clim_max == 0.:
clim = np.absolute(plotme).max() * np.r_[-1., 1.]
else:
clim = clim_max * np.r_[-1, 1]
_, cb = pp.plot_prop(
plotme,
ax=a,
pcolorOpts={
'cmap': 'bwr' if view == 'charge' else 'viridis',
},
clim=clim,
theta_ind=theta_ind
)
a.set_title('{} \n\n $\sigma$ = {:1.2e}S/m\ntheta = {:1.2f} degree'.format(
title, pp.modelParameters.sigma_casing, cylMeshGen.mesh.vectorCCy[theta_ind]*180/np.pi
), fontsize=13)
# cylMeshGen.mesh.plotGrid(ax=a, slice='theta') # uncomment to plot the mesh on top of this
a.set_xlim(xlim)
a.set_ylim(zlim)
cb.set_label('secondary ' + view)
# plot outline of casing
m = modelDict[title]
a.plot(
np.r_[m.casing_a, m.casing_a, m.casing_b, m.casing_b, m.casing_a],
np.r_[m.casing_z[1], m.casing_z[0], m.casing_z[0], m.casing_z[1], m.casing_z[1]],
'k' if view == 'charge' else 'w',
lw = 0.5
)
plt.tight_layout()
plt.show()
```
### Plot the difference in charge distributions (approximation - true)
observations:
- first: colorbar scales are different between the two!
- solid steel: more negative at top, positive at bottom (consistent with more uniform charge distribution)
```
ipywidgets.interact(
plotScalarDifference,
theta_ind = ipywidgets.IntSlider(min=0, max=len(cylMeshGen.hy)-1, value=1),
src_ind = ipywidgets.IntSlider(min=0, max=src_a.shape[0]-1, value=0),
clim_max = ipywidgets.FloatText(value=0),
max_depth = ipywidgets.FloatText(value=1.25*model.casing_l),
max_r = ipywidgets.FloatText(value=0.1),
top = ipywidgets.FloatText(value=10),
view = ipywidgets.ToggleButtons(options=['charge', 'phi'], value='charge'),
)
```
### Total charge on the casing
- integrate the charge on the casing.
```
for src_ind in range(src_a.shape[0]):
print('\n----- src {} ------'.format(src_a[src_ind,[0,2]]))
casing_charge = {}
for title in model_names:
casing_charge[title] = (
utils.mkvc(simDict[title].fields()[simDict[title].survey.srcList[src_ind], 'charge'])
)[modelDict[title].ind_casing(cylMeshGen.mesh)].sum()
print('{:8s}: {:1.3e}'.format(title, casing_charge[title]))
print(np.finfo(float).eps)
```
| github_jupyter |
This notebook contains an example for teaching.
# A Simple Case Study using Wage Data from 2015 - proceeding
So far we considered many machine learning method, e.g Lasso and Random Forests, to build a predictive model. In this lab, we extend our toolbox by predicting wages by a neural network.
## Data preparation
Again, we consider data from the U.S. March Supplement of the Current Population Survey (CPS) in 2015.
```
# Sys.setenv(RETICULATE_PYTHON = "C:/Users/MSI-NB/anaconda3/envs/tensorflow_2")
load("wage2015_subsample_inference.Rdata")
Z <- subset(data,select=-c(lwage,wage)) # regressors
```
Firt, we split the data first and normalize it.
```
nrow(data)
set.seed(1234)
training <- sample(nrow(data), nrow(data)*(3/4), replace=FALSE)
dim(data)
data_train <- data[training,1:16]
data_test <- data[-training,1:16]
data_train
# normalize the data
mean <- apply(data_train, 2, mean)
std <- apply(data_train, 2, sd)
data_train <- scale(data_train, center = mean, scale = std)
data_test <- scale(data_test, center = mean, scale = std)
data_test
data_train <- as.data.frame(data_train)
data_test <- as.data.frame(data_test)
data_train
```
Then, we construct the inputs for our network.
```
X_basic <- "sex + exp1 + shs + hsg+ scl + clg + mw + so + we"
formula_basic <- as.formula(paste("lwage", "~", X_basic))
formula_basic
model_X_basic_train <- model.matrix(formula_basic,data_train)
model_X_basic_test <- model.matrix(formula_basic,data_test)
Y_train <- data_train$lwage
Y_test <- data_test$lwage
model_X_basic_train
```
### Neural Networks
First, we need to determine the structure of our network. We are using the R package *keras* to build a simple sequential neural network with three dense layers.
```
dim(model_X_basic_train)[2]
library(keras)
build_model <- function() {
model <- keras_model_sequential() %>%
layer_dense(units = 20, activation = "relu",
input_shape = dim(model_X_basic_train)[2])%>%
layer_dense(units = 10, activation = "relu") %>%
layer_dense(units = 1)
model %>% compile(
optimizer = optimizer_adam(lr = 0.005),
loss = "mse",
metrics = c("mae")
)
}
```
Let us have a look at the structure of our network in detail.
```
model <- build_model()
```
It is worth to notice that we have in total $441$ trainable parameters.
Now, let us train the network. Note that this takes some computation time. Thus, we are using gpu to speed up. The exact speed-up varies based on a number of factors including model architecture, batch-size, input pipeline complexity, etc.
```
# training the network
num_epochs <- 1000
model %>% fit(model_X_basic_train, Y_train,
epochs = num_epochs, batch_size = 100, verbose = 0)
```
After training the neural network, we can evaluate the performance of our model on the test sample.
```
# evaluating the performnace
model %>% evaluate(model_X_basic_test, Y_test, verbose = 0)
# Calculating the performance measures
pred.nn <- model %>% predict(model_X_basic_test)
MSE.nn = summary(lm((Y_test-pred.nn)^2~1))$coef[1:2]
R2.nn <- 1-MSE.nn[1]/var(Y_test)
# printing R^2
cat("R^2 of the neural network:",R2.nn)
MSE.nn = summary(lm((Y_test-pred.nn)^2~1))$coef[1:2]
MSE.nn
```
| github_jupyter |
[@LorenaABarba](https://twitter.com/LorenaABarba)
12 steps to Navier–Stokes
=====
***
Did you experiment in Steps [1](./01_Step_1.ipynb) and [2](./02_Step_2.ipynb) using different parameter choices? If you did, you probably ran into some unexpected behavior. Did your solution ever blow up? (In my experience, CFD students *love* to make things blow up.)
You are probably wondering why changing the discretization parameters affects your solution in such a drastic way. This notebook complements our [interactive CFD lessons](https://github.com/barbagroup/CFDPython) by discussing the CFL condition. And learn more by watching Prof. Barba's YouTube lectures (links below).
Convergence and the CFL Condition
----
***
For the first few steps, we've been using the same general initial and boundary conditions. With the parameters we initially suggested, the grid has 41 points and the timestep is 0.25 seconds. Now, we're going to experiment with increasing the size of our grid. The code below is identical to the code we used in [Step 1](./01_Step_1.ipynb), but here it has been bundled up in a function so that we can easily examine what happens as we adjust just one variable: **the grid size**.
```
import numpy #numpy is a library for array operations akin to MATLAB
from matplotlib import pyplot #matplotlib is 2D plotting library
%matplotlib inline
def linearconv(nx):
dx = 2 / (nx - 1)
nt = 20 #nt is the number of timesteps we want to calculate
dt = .025 #dt is the amount of time each timestep covers (delta t)
c = 1
u = numpy.ones(nx) #defining a numpy array which is nx elements long with every value equal to 1.
u[int(.5/dx):int(1 / dx + 1)] = 2 #setting u = 2 between 0.5 and 1 as per our I.C.s
un = numpy.ones(nx) #initializing our placeholder array, un, to hold the values we calculate for the n+1 timestep
for n in range(nt): #iterate through time
un = u.copy() ##copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])
pyplot.plot(numpy.linspace(0, 2, nx), u);
```
Now let's examine the results of our linear convection problem with an increasingly fine mesh.
```
linearconv(41) #convection using 41 grid points
```
This is the same result as our Step 1 calculation, reproduced here for reference.
```
linearconv(61)
```
Here, there is still numerical diffusion present, but it is less severe.
```
linearconv(71)
```
Here the same pattern is present -- the wave is more square than in the previous runs.
```
linearconv(85)
```
This doesn't look anything like our original hat function.
### What happened?
To answer that question, we have to think a little bit about what we're actually implementing in code.
In each iteration of our time loop, we use the existing data about our wave to estimate the speed of the wave in the subsequent time step. Initially, the increase in the number of grid points returned more accurate answers. There was less numerical diffusion and the square wave looked much more like a square wave than it did in our first example.
Each iteration of our time loop covers a time-step of length $\Delta t$, which we have been defining as 0.025
During this iteration, we evaluate the speed of the wave at each of the $x$ points we've created. In the last plot, something has clearly gone wrong.
What has happened is that over the time period $\Delta t$, the wave is travelling a distance which is greater than `dx`. The length `dx` of each grid box is related to the number of total points `nx`, so stability can be enforced if the $\Delta t$ step size is calculated with respect to the size of `dx`.
$$\sigma = \frac{u \Delta t}{\Delta x} \leq \sigma_{\max}$$
where $u$ is the speed of the wave; $\sigma$ is called the **Courant number** and the value of $\sigma_{\max}$ that will ensure stability depends on the discretization used.
In a new version of our code, we'll use the CFL number to calculate the appropriate time-step `dt` depending on the size of `dx`.
```
import numpy
from matplotlib import pyplot
def linearconv(nx):
dx = 2 / (nx - 1)
nt = 20 #nt is the number of timesteps we want to calculate
c = 1
sigma = .5
dt = sigma * dx
u = numpy.ones(nx)
u[int(.5/dx):int(1 / dx + 1)] = 2
un = numpy.ones(nx)
for n in range(nt): #iterate through time
un = u.copy() ##copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])
pyplot.plot(numpy.linspace(0, 2, nx), u)
linearconv(41)
linearconv(61)
linearconv(81)
linearconv(101)
linearconv(121)
```
Notice that as the number of points `nx` increases, the wave convects a shorter and shorter distance. The number of time iterations we have advanced the solution at is held constant at `nt = 20`, but depending on the value of `nx` and the corresponding values of `dx` and `dt`, a shorter time window is being examined overall.
Learn More
-----
***
It's possible to do rigurous analysis of the stability of numerical schemes, in some cases. Watch Prof. Barba's presentation of this topic in **Video Lecture 9** on You Tube.
```
from IPython.display import YouTubeVideo
YouTubeVideo('Yw1YPBupZxU')
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
| github_jupyter |
# Likelihood for Retro
To calculate the likelihood of a hypothesis $H$ given observed data $\boldsymbol{k}$, we construct the extended likelihood given as:
$$\large L(H|\boldsymbol{k}) = \prod_{i\in\text{DOMs}} \frac{\lambda_i^{k_i}} {k_i!} e^{-\lambda_i} \prod_{j\in\text{hits}}p^j(t_j|H)^{k_j}$$
where:
* $\lambda_i$ is the expected total charge in DOM $i$ given the hypothesis $H$
* $k_i$ is the observed total charge in DOM $i$
* $p^j(t_j|H)$ is the probability of observing a hit a time $t_j$ in a given DOM $j$ under the hypothesis $H$, raised to the power of the charge $k_j$ of that observed hit
We can take the logarithm of this to change the products into sums
$$\large \log L(H|\boldsymbol{k}) = \sum_{i\in\text{DOMs}} k_i\log{\lambda_i} -\log{{k_i!} - \lambda_i} +\sum_{j\in\text{hits}} k_j\log{p^j(t_j|H)} $$
Since we're only interested in finding the maximum likelihood, we can omit the constant terms $\log{k!}$
In retro, the expected charge $\lambda$ as well as the pdfs $p$ are decomposed into the hypothesis dependent part $N_\gamma(t,x)$ that corresponds to the number of of photons generated by a hypothesis at any given point in space-time and the independent part $p_\gamma(t,x)$ -- the probability that a given source photon in space-time is registered at a DOM.
* The probability $p^j(t_j|H)$ is then simply the sum over all space bins $\sum_x{N_\gamma(t_j,x)p^j_\gamma(t_j,x)}/\lambda_j$, where $\lambda_j$ is the normalization to properly normalize the expression to a pdf
* The time-independent $\lambda_i$s can be interpreted as the total expected charge, given by $\sum_x{\sum_t{p^i_\gamma(x,t)}\sum_t{N_\gamma(x,t)}}$
For many DOMs in an event we observe 0 hits, i.e. $k_i = 0$ for many $i$, this means that the sum over $i$ for these spacial cases simplifies to
$$\sum_{i\in\text{DOMs}} -\lambda_i$$
Plugging in the abvove expression for $\lambda_i$ yields:
$$\sum_{i\in\text{DOMs}}\sum_x{\sum_t{p^i_\gamma(x,t)}\sum_t{N_\gamma(x,t)}}$$
Of course only the probabilities $p^i_\gamma$ are dependent on the DOMs, so we can factorize:
$$\sum_x{\left(\sum_{i\in\text{DOMs}}\sum_t{p^i_\gamma(x,t)}\right)\sum_t{N_\gamma(x,t)}} = \sum_x{p^{TDI}_\gamma(x)\sum_t{N_\gamma(x,t)}}$$
The large sum over the DOMs can therefore be pre-computed, we call this the time-dom-independent (TDI) table, as the time and DOM sums have been evaluated
So we will just need to add the additional terms for DOMs with hits and the total likelihood then can be written as:
$$\large \log L(H|\boldsymbol{k}) = \sum_{i\in\text{hit DOMs}} k_i\log{\sum_x{\sum_t{p^i_\gamma(x,t)}\sum_t{N_\gamma(x,t)}}} -\sum_x{p^{TDI}_\gamma(x)\sum_t{N_\gamma(x,t)}} +\sum_{j\in\text{hits}} k_j\log{\sum_x{N_\gamma(t_j,x)p_\gamma(t_j,x)}/\lambda_j}$$
| github_jupyter |
# Calculadora
Purpose of this project is the creation of a simple calculator using python code.
```
def adicao(x,y):
return x+y
def subtracao(x,y):
return x-y
def multiplicacao(x,y):
return x*y
def divisao(x,y):
return x/y
def escolhadaoperacao():
while True:
try:
escolha = int(input('Indique que operação você gostaria de realizar \n'))
except:
print('Você não digitou um número corretamente')
continue
else:
if escolha > 4 or escolha <=0:
print('Operação não cadastrada')
print(escolhadaoperacao())
continue
else:
print('Obrigado por indicar uma operação')
break
break
finally:
print('Obrigado!\n')
print(primeironumero())
return escolha
def primeironumero():
while True:
try:
num1 = float(input ('Indique o primeiro número da operação que você gostaria de realizar \n'))
except:
print("Você não digitou um número corretamente")
continue
else:
print('Obrigado por digitar um número')
break
finally:
print ("Obrigado!")
print (segundonumero())
return num1
def segundonumero():
while True:
try:
num2 = float(input ('Indique o sgundo número da operação que você gostaria de realizar \n'))
except:
print("Você não digitou um número corretamente")
continue
else:
print('Obrigado por digitar um número')
break
finally:
print ("Obrigado!")
print (conta())
return num2
def conta():
if escolha == 1:
print("Operação ", num1 ," + ", num2, " = ", adicao(num1,num2))
elif escolha == 2:
print("Operação ", num1 ," - ", num2, " = ", subtracao(num1,num2))
elif escolha == 3:
print("Operação ", num1 ," / ", num2, " = ", divisao(num1,num2))
elif escolha == 4:
print("Operação ", num1 ," * ", num2, " = ", multiplicacao(num1,num2))
else:
print("Opção incorreta!")
#######################Python Calculator##########################)
print("1 - Soma \n2 - Subtração \n3 - Divisão \n4 - Multiplicação")
escolhadaoperacao()
primeironumero()
segundonumero()
if escolha == 1:
print("Operação ", num1 ," + ", num2, " = ", adicao(num1,num2))
elif escolha == 2:
print("Operação ", num1 ," - ", num2, " = ", subtracao(num1,num2))
elif escolha == 3:
print("Operação ", num1 ," / ", num2, " = ", divisao(num1,num2))
elif escolha == 4:
print("Operação ", num1 ," * ", num2, " = ", multiplicacao(num1,num2))
else:
print("Opção incorreta!")
```
| github_jupyter |
```
print('Materialisation Data Test')
import os
import compas
from compas.datastructures import Mesh, mesh_bounding_box_xy
from compas.geometry import Vector, Frame, Scale
HERE = os.getcwd()
FILE_I = os.path.join(HERE, 'blocks and ribs_RHINO', 'sessions', 'bm_vertical_equilibrium', 'simple_tripod.rv2')
FILE_O1 = os.path.join(HERE, 'blocks and ribs_RHINO', 'data', 'form.json')
FILE_O2 = os.path.join(HERE, 'blocks and ribs_RHINO', 'data', 'scaled_form.json')
session = compas.json_load(FILE_I)
mesh = Mesh.from_data(session['data']['form'])
loader_mesh = Mesh.from_data(session['data']['form'])
```
### to delete extra faces(more than 4 edges) if subdivided with catmulclark or other weird subdivision that connects the mesh with the ground
```
delete_faces =[]
for fkey in mesh.faces():
if len(mesh.face_vertices(fkey)) > 4:
delete_faces.append(fkey)
for fkey in delete_faces:
mesh.delete_face(fkey)
mesh.remove_unused_vertices()
```
### scale up the form if needed
```
scaled_mesh = mesh.copy()
box_points = mesh_bounding_box_xy(scaled_mesh)
base_mesh = scaled_mesh.from_points(box_points)
centroid = base_mesh.centroid()
#print (centroid)
frame = Frame(centroid,Vector(1,0,0),Vector(0,1,0))
S = Scale.from_factors([100, 100, 100], frame)
scaled_mesh.transform(S)
```
### Visualise and export Initial Mesh
```
mesh.to_json(FILE_O1)
scaled_mesh.to_json(FILE_O2)
print(mesh)
from pythreejs import *
import numpy as np
from IPython.display import display
mesh = scaled_mesh.copy()
vertices = []
for face in mesh.faces():
for v in mesh.face_vertices(face):
xyz = mesh.vertex_attributes(v, "xyz")
vertices.append(xyz)
# print(vertices)
vertices = BufferAttribute(
array = np.array(vertices,dtype=np.float32),
normalized = False)
# print(vertices)
geometry = BufferGeometry(
attributes={'position': vertices})
print(geometry)
geometry.exec_three_obj_method('computeVertexNormals')
mesh_3j = Mesh(geometry=geometry,
material=MeshLambertMaterial(color='red'),
position=[0,0,0])
print(geometry)
print(type(mesh_3j))
c = PerspectiveCamera(position=[0, 5, 5], up=[0, 1, 0],
children=[DirectionalLight(color='white', position=[3, 5, 1], intensity=0.5)])
scene=Scene(children=[mesh_3j,c, AmbientLight(color='#777777')])
renderer = Renderer(camera=c,
scene=scene,
width=800,
height=800,
controls=[OrbitControls(controlling=c)])
display(renderer)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from grn_learn.viz import set_plotting_style
import seaborn as sns
import matplotlib.pyplot as plt
from grn_learn import download_and_preprocess_data
from grn_learn import annot_data_trn
from grn_learn import train_keras_multilabel_nn
from sklearn.model_selection import StratifiedKFold
from keras.backend import clear_session
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras import regularizers
from keras.utils import np_utils
from keras.metrics import categorical_accuracy
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from keras.layers import Dropout
import keras.backend as K
from keras import regularizers
from sklearn.model_selection import train_test_split
seed = 42
np.random.seed(seed)
set_plotting_style()
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
```
### Download, preprocess data for *P. aeru*
```
org = 'paeru'
```
g.download_and_preprocess_data('paeru',
data_dir = 'colombos_'+ org + '_exprdata_20151029.txt')
### Annotate dataset using the TRN from the Martinez-Antonio lab
```
# paeru
paeru_path = '~/Documents/uni/bioinfo/data/paeru/'
g.annot_data_trn(tf_tf_net_path = paeru_path + 'paeru_tf_tf_net.csv',
trn_path = paeru_path + 'paeru-trn.csv',
denoised_data_path= '~/Downloads/',
org = 'paeru',
output_path = '~/Downloads/')
#df_trn = pd.read_csv(path + 'paeru-trn.csv', comment= '#')
#tfs = pd.read_csv(path+'paeru_tfs.csv')
# tf_tf_df = get_gene_data(df_trn, 'Target gene', tf_list)
denoised = pd.read_csv('~/Downloads/denoised_hot_paeru.csv')
denoised.head()
regulons_p = denoised[denoised['TG'] == 1]
non_regulons_p = denoised[denoised['TG'] == 0]
noise = non_regulons_p.sample(n = 50, replace = False)
regulons_with_noise_p = pd.concat([regulons_p, noise], axis = 0)
non_regulons_wo_noise = non_regulons_p.drop(noise.index.to_list())
#annot = regulons_with_noise.iloc[:, :3]
data_p = regulons_with_noise_p.iloc[:, 3:-13]
target_p = regulons_with_noise_p.iloc[:, -13:-1]
target_p.head()
val_shape = int(data.shape[0] * 0.15)
X_train, X_test, y_train, y_test = train_test_split(data,
target,
shuffle = True,
test_size=0.2,
random_state= seed)
x_val = X_train[:val_shape]
partial_x_train = X_train[val_shape:]
y_val = y_train[:val_shape]
partial_y_train = y_train[val_shape:]
```
### Run keras net on paeru dataset
```
nn, history = g.train_keras_multilabel_nn(X_train,
y_train,
partial_x_train,
partial_y_train,
x_val,
y_val,
n_units=64,
epochs=20,
n_deep_layers=3,
batch_size=128)
score, accuracy = nn.evaluate(
X_test,
y_test,
batch_size=64,
verbose=2
)
accuracy
```
### B. subti data download
```
# bsubti
bsubt_path = '~/Documents/uni/bioinfo/data/bsubti/'
g.download_and_preprocess_data('bsubt')
# data_dir = 'colombos_'+ org + '_exprdata_20151029.txt')
```
### B. subti annotate dataset using TRN from the Merino Lab
```
g.annot_data_trn(tf_tf_net_path = bsubt_path + 'bsub-tf-net.csv',
trn_path = bsubt_path + 'bsubt_trn-l.txt',
denoised_data_path= '~/Downloads/',
org = 'bsubt',
output_path = '~/Downloads/')
denoised_b = pd.read_csv('~/Downloads/denoised_hot_bsubt.csv')
#denoised.head()
regulons_b = denoised_b[denoised_b['TG'] == 1]
non_regulons_b = denoised_b[denoised_b['TG'] == 0]
noise = non_regulons_b.sample(n = 50, replace = False)
regulons_with_noise_b = pd.concat([regulons_b, noise], axis = 0)
non_regulons_wo_noise = non_regulons_b.drop(noise.index.to_list())
#annot = regulons_with_noise.iloc[:, :3]
data_b = regulons_with_noise_b.iloc[:, 3:-7]
target_b = regulons_with_noise_b.iloc[:, -7:-1]
data_b[:5, -1]
val_shape = int(data.shape[0] * 0.15)
X_train, X_test, y_train, y_test = train_test_split(data,
target,
shuffle = True,
test_size=0.2,
random_state= seed)
x_val = X_train[:val_shape]
partial_x_train = X_train[val_shape:]
y_val = y_train[:val_shape]
partial_y_train = y_train[val_shape:]
nn, history = g.train_keras_multilabel_nn(X_train,
y_train,
partial_x_train,
partial_y_train,
x_val,
y_val,
n_units=64,
epochs=20,
n_deep_layers=3,
batch_size=128)
history.head()
score, accuracy = nn.evaluate(
X_test,
y_test,
batch_size=64,
verbose=2
)
accuracy
```
## Upload coli data
```
denoised = pd.read_csv('~/Downloads/denoised_hot_coli.csv')
regulons_e = denoised[denoised['TG'] == 1]
non_regulons_e = denoised[denoised['TG'] == 0]
noise = non_regulons_e.sample(n = 50, replace = False)
regulons_with_noise_e = pd.concat([regulons_e, noise], axis = 0)
non_regulons_wo_noise = non_regulons_e.drop(noise.index.to_list())
#annot = regulons_with_noise.iloc[:, :3]
data_e = regulons_with_noise_e.iloc[:, 3:-10]
target_e = regulons_with_noise_e.iloc[:, -10:-1]
```
### Set up simulations for E. coli, B. subti, and P. aeru
```
organisms = ['ecoli', 'bsubti', 'paeru']
datasets = [(data_e, target_e), (data_b, target_b), (data_p, target_p)]
kfold = KFold(n_splits = 5, shuffle= True, random_state=seed)
cross_val_df = pd.DataFrame()
# Iterate over organisms
for ix in range(3):
#
org = organisms[ix]
data = datasets[ix]
#Extract datasets
X = data[0]
y = data[1]
# Iterate over folds
for train, test in kfold.split(data[0], data[1]):
# Train test split
X_train = X.iloc[train, :]
y_train = y.iloc[train, :]
X_test = X.iloc[test, :]
y_test = y.iloc[test, :]
#print(type(X_train))
# Run neural net
nn = Sequential()
#initial layer
nn.add(Dense(128, activation='relu', input_shape=(X_train.shape[1],)))
#extra deep layers
for i in range(2):
nn.add(Dense(64, activation='relu',
kernel_regularizer=regularizers.l2(0.001))
)
nn.add(Dropout(0.25))
#add final output layer
nn.add(Dense(y_train.shape[1], activation='softmax'))
nn.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
#print neural net architecture
nn.summary()
#fit and load history
history = nn.fit(X_train, y_train, epochs=20,
batch_size= 128,
verbose = 0)
# Compute accuracy
score, acc = nn.evaluate(X_test, y_test)
# Store acc in dataframe
sub_df = pd.DataFrame({'accuracy': [acc],
'organism': [org]})
cross_val_df = pd.concat([cross_val_df, sub_df])
#cross_val_df.to_csv('../../data/cv_data.csv', index = False)
#cross_val_df = pd.read_csv('../../data/cv_data.csv')
sns.boxplot?
plt.figure(figsize = (6, 3.2))
sns.boxplot(data = cross_val_df,
y = 'organism',
x = 'accuracy',
color = 'lightgray',
saturation = 1,
whis = 1,
width = 0.7
#alpha = 0.5
)
sns.stripplot(data = cross_val_df,
y = 'organism',
x = 'accuracy',
palette = 'Set2',
#edgecolor = 'gray',
#linewidth = 0.4,
size = 10,
alpha = 0.7)
plt.tight_layout()
plt.xlim(0.5, 1.01)
plt.savefig('cross_val_org.pdf', dpi = 600)
```
| github_jupyter |
## ***Defining the Question***
Provided with the dataset from Nairobi Hospital, your are task to build a model that determines whether or not the patient's symptoms indicate that the patient has hypothyroid.
## ***Metric For Success***
The Metric of Sucess will be to find a decission tree model to determine whether or not a patient has hypothyroid based on the patient's symptoms.
## ***Context***
Hypothyroid is a condition in which your thyroid gland doesn't produce enough of certain crucial hormones. An example of the crucial hormone is thyroxin which plays vital roles in digestion, heart and muscle function, brain development and maintenance of bones.With that stated, Nairobi Hospital conducted a clinical camp to test for hypothyroidism.
## ***Experimental Design***
The experimental design for this project was in adherence to the CRISP-DM methodology.It will follow the CRISP-DM steps which are:
1.Problem Understanding
2.Data Understanding
3.Data Preparation
4.Modelling
5.Evaluation
## **Data Relevance**
The health care dataset is relevant to build a model that determines whether or not the patient's symptoms indicate that the patient has hypothyroid.
***Import the libraries***
```
#ILl first import the necessary libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler
from sklearn.ensemble import RandomForestClassifier, \
GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from IPython.display import Image
import pydotplus
import warnings
warnings.filterwarnings('ignore')
#ILl then load the dataset
df = pd.read_csv('http://bit.ly/hypothyroid_data')
df.head()
#Ill then check the tail of the dataset
df.tail()
#Ill check the the shape of the dataset
df.shape
# The output is 3163 rows and 26 columns
# Ill then use the .info() function which prints information about a DataFrame
df.info()
```
The .info() function shows index dtype and column dtypes, non-null values and memory usage.
## ***Data Cleaning***
```
#ILl then ensure uniformity in my data by changing the column names to lowercase
df.columns = map(str.lower,df.columns)
#ILl then change the column names to small leter
df.columns
#Ill then check for nulls in the dataset
df.isnull().sum()
```
Output shows no null values
```
#Ill then check for unique values in the dataset
#ILl use the for loop function to iterate through the column names
for column in df.columns:
print(column)
print(df[column].unique())
print("Their are", df[column].nunique(),"unique values in this column")
#spacing purposes the "********"
print("***************************************************")
```
After checking the unique values, I have noticed character "?" in some the column names. I'll convert them to nulls so that I impute them
```
#Ill then replace the "?" with nan
df = df.replace(['?'], np.nan)
#Ill then check for nulls again
df.isnull().sum()
#Ill then drop the "tbg" column since it has many null values
df.drop(['tbg', 'tbg_measured'], axis = 1, inplace = True)
# ILl thenconfirm if it has been dropped
df.head()
#They are dropped
# Ill then check the datatypes again
df.dtypes
```
The output shows that all the column names are of object (string). Ill fix that into appropriate data types
```
# Ill then use labelencoder because the categorical columns mostly have two
# initiating the label encoder object
labelenc = LabelEncoder()
df['status'] = labelenc.fit_transform(df['status'])
# replacing string values with numbers 0 and 1
# f/false is transformed to 0 and t/true is transformed to 1,
# y/yes is transformed to 1 and n/no is transformed to 0.
df['sex'].replace({'M':0, 'F':1}, inplace=True)
df.replace({'f':0,'t':1, 'y':1, 'n':0}, inplace=True)
# previewing the first five observations of the dataset
df.head()
```
***Meaning***
In the above output:
1. 0 represents hypothyroidism and 1 represents negative.
2.0 represents male and 1 represents females.
3.false represents 0 and true represents 1,
4.yes represents 1 and no represents 0.
```
#ILl then confirm if the data types have been changed
df.dtypes
```
***Imputation***
Inorder to choose the appropriate imputation procedure,their are things to consider like knowing your continuos variable and categorical variable
```
#Ill first create a copy for imputation
df_copy = df.copy(deep=True)
df_copy.head()
#Ill then convert columns of object datatype to integer datatype using pd.to_numeric function
convert = df_copy.columns[df_copy.dtypes == object]
df_copy[convert]= df_copy[convert].apply(pd.to_numeric, errors='coerce')
# Ill then confirm if the change has been effected
df_copy.dtypes
#ILl then do a value count so as to find the number of occurence in a dataframe
df_copy['sex'].value_counts()
```
**Output reads 2255 females and 908 males**
**For the Categorical variable, imputing with the mode is generally used.**
```
#Ill then fix the dataset. Inorder to start imputing ill have to specify the intergers and strings so
# as to avoid 'can only concatenate str (not "int") to str'.
# Dealing with the null values
numerical_columns = ['age', 'tsh', 't4u', 't3', 'tt4', 'fti']
# filling missing values in columns in mean_col list with mean values of each column
for col in numerical_columns:
df_copy[col].fillna(df_copy[col].mean(), inplace=True)
df_copy['sex'].fillna(1.0, inplace = True)
# checking for null values after imputing
df_copy.isnull().sum()
```
**I used a 1.0 (fillna)in sex because it was the most repeated number in the number.**
Output reads 2255 females and 908 males
For the Categorical variable, imputing with the mode is generally used.
```
#ILl then confirm if their are still null values.
df_copy.isnull().sum()
```
***Conclusion***
On Imputing, I had to differetiate between categorical variables and continuos variables. Mean and Median are generally used in continuos variables(numeric) while categotical variables we use mode(most repeated)
## ***Exploratory Data Analysis***
We can define EDA as the process of performing initial investigation to data so as to unravel patterns, spot anomalies, test hypothesis and to check assumptions
### ***Univariate Analysis***
```
#Ill first check for outliers in the dataset with box plot
column_1 = ['age', 't3', 'tsh', 'fti', 't4u', 'tt4']
#Ill then craft a list of colors for the boxplots
colors = ['cyan','magenta','yellow','black','blue','green']
# plotting the boxplots
figgg, axes = plt.subplots(2,3, figsize=(18,9))
plt.suptitle('Boxplots Showing Outliers in Numerical Columns', fontsize=18, color='darkblue', y=0.93)
#Ill then use a for loop for iterartion
for ax, data, col, color in zip(axes.flatten(), df_copy, column_1, colors):
sns.boxplot(df_copy[col], ax=ax, color=color)
ax.set_facecolor('antiquewhite')
plt.show();
```
**I will not the outliers as they will take a huge percentage of the project.**
```
#Ill then plot a bar graph to show the distribution in the sec column
df_copy.sex.value_counts().plot(kind='bar')
plt.xticks((np.arange(0, 2,)), ('Female', 'Male'), rotation=360)
plt.title('Number of Men and Women in the dataset', fontsize=15, color='black')
plt.show()
#Females are more
#Ill then plot a histogram to show distribution of the patients age
df_copy.age.plot.hist(grid=False, bins=20, rwidth=0.9,
color='blue')
plt.title('Age distribution of patients')
plt.xlabel('Age')
plt.ylabel('Number')
plt.grid(axis='y', alpha=0.75)
```
**The output shows that their is a high number of patients between 50 and 60**
### ***Bivariate Analysis***
Bivariate analysis involves two different variables. It aims to find out the causes and relationships between those two variables.
```
#Ill first preview my dataset
df_copy.head()
#Ill first check the overall correlation of the columns
corr = df_copy.corr()
corr
#Ill then a plot a heatmap to show the coorelation of the variables
correlation= df_copy.corr()
plt.figure(figsize = (20,10))
sns.heatmap(correlation, xticklabels=correlation.columns, yticklabels=correlation.columns, annot=True)
plt.title('A Heatmap of Patient Correlation in our Dataset', color='black')
plt.show()
```
***Conclusion***
1. Problem that were encountered was the pairplot was not running
2. From the above EDA, I chose not to drop the outliers for this project cause it will take a high percentage of data.
3. I also found out that most patients were Females.
4. Most patients were from the age 50 and 60.
## ***Modelling***
### ***Random Forest Classifier***
```
#Ill first seperate the depenent variable and the independent variable
independent = df_copy.drop('status', 1)
dependent = df_copy['status']
#ILl then split the data into training and test sets.
X_train, X_test, y_train, y_test = train_test_split(independent, dependent, test_size=.25, random_state=34)
#Ill then print out the shape of the train set and test set
print('X_train shape: ', X_train.shape, '\nY_Train Shape:', y_train.shape)
print('X_test shape: ', X_test.shape, '\nY_Test Shape:', y_test.shape)
# Ill then scale the train and test sets using stadard scaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Ill then instantiate the random forest classifier object
rfc = RandomForestClassifier(n_estimators=100, random_state=42, min_samples_split=20, max_depth=5)
rfc = rfc.fit(X_train, y_train)
rfc_pred = rfc.predict(X_test)
#Ill check my models predictions
greg_Predicted = pd.DataFrame({'Actual': y_test, 'Predicted': rfc_pred})
greg_Predicted.head()
# ILl then print out the
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, rfc_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, rfc_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, rfc_pred)))
print('R2 squared:', metrics.r2_score(y_test, rfc_pred))
print('Accuracy Score:', metrics.accuracy_score(y_test, rfc_pred))
```
***The accuracy of this model shows a 99% accuracy,It could be overfitting***
```
# lll then plott the decision trees from the random forest
dot_data = StringIO()
test_features = df_copy.columns.to_list()
test_features.remove('status')
tree = rfc.estimators_[50]
export_graphviz(tree, out_file=dot_data, filled=True, rounded=True, \
special_characters=True, feature_names=test_features)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('hypothyroidism_forest.png')
Image(graph.create_png())
```
***Conclusion***
The plot shows the most important features in predicting whether a person is hypothyroid or not.
**. the most import feature is fti test which accounts for 76% of the total feature importance**
## ***Gradient Boosting Classifier***
```
#ILl then split the data into training and test sets.
X_train, X_test, y_train, y_test = train_test_split(independent, dependent, test_size=.25, random_state=34)
# Ill then scale the train and test sets using MinMaxScaler
mscaler = MinMaxScaler()
X_train = mscaler.fit_transform(X_train)
X_test = mscaler.transform(X_test)
#Ill first create a list so as to get the best learning rate
list = [.05, .075, .1, .25, .5, .75, 1]
for x in list:
gboost = GradientBoostingClassifier(n_estimators=100, learning_rate=x, \
max_features=2, max_depth=5, random_state=42)
gboost.fit(X_train, y_train)
print('Learning rate: ', x)
print('Training set accuracy score: {0:.3f}' .format(gboost.score(X_train, y_train)))
print('Test set accuracy score: {0:.3f}' .format(gboost.score(X_test, y_test)))
# The output shows the best learing rate is 0.25
#ILl then get the accuracy score of the model
gb_clf = GradientBoostingClassifier(n_estimators=20, learning_rate=0.25, \
max_features=2, max_depth=5, random_state=42)
gb_clf.fit(X_train, y_train)
gb_pred = gb_clf.predict(X_test)
print('\nAccuracy Score: ', metrics.accuracy_score(y_test, gb_pred))
```
## ***ADA Boost Classifier***
```
#ILl then split the data into training and test sets.
X_train, X_test, y_train, y_test = train_test_split(independent, dependent, test_size=.25, random_state=34)
# ILl then instantiate the ada boost classifier object
ADA = AdaBoostClassifier(n_estimators=50, learning_rate=1)
ADA_boost= ADA.fit(X_train, y_train)
ADApred = ADA_boost.predict(X_test)
# ill then get the accuracy of the model
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
print('Ada Boost Classifier Accuracy: ', round(metrics.accuracy_score(y_test, ADApred) * 100, 2),'%')
```
## ***Support Vector Machines***
```
# Ill then plot data and regression model
plt.figure(figsize=(10,8))
sample_hypo = df_copy.sample(300, random_state=308)
sns.lmplot('tsh', 'tt4', data=sample_hypo, hue='status', legend=False, palette='Set1', fit_reg=False, scatter_kws={'s': 70})
plt.legend({'Hypothroidism': 0, 'Negative': 1})
plt.title('TSH and TT4 test as the variable is equal to Status')
plt.show();
```
### ***Fitting the Model***
```
# Ill then input the model
fitting= sample_hypo[['tsh', 'tt4']].values
# specifying label for the model
label_type = sample_hypo['status'].values
X_train, X_test, y_train, y_test = train_test_split(fitting, label_type, test_size=.25, random_state=34)
# fitting the model
svmodel = SVC(kernel = 'linear')
svclass = svmodel.fit(X_train, y_train)
# Create a mesh grid for our graph
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
# A function that creates the hyperplanes from our model
def plot_contours(ax, clf, xx, yy, **params):
Z = svclass.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
fig, ax = plt.subplots()
# title for the plots
title = ('Hypothyroid Classification with linear SVC')
# Set-up grid for plotting.
t_sam = np.random.choice(fitting[:, 0], 300)
t_sam2 = np.random.choice(fitting[:, 1], 300)
X0, X1 = t_sam,t_sam2
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, svclass, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=label_type, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
# ax.set_ylabel('TT4')
# ax.set_xlabel('TSH')
ax.set_xticks(())
# ax.legend(['Hypothyroid', 'Negative'])
ax.set_yticks(())
ax.set_title(title)
plt.show()
# Ill first take a sample of the dataframe
sample= df_copy.sample(300, random_state=308)
# input for the model
sample_feat = sample[['tsh', 'tt4']].values
# specifying label for the model
katono = sample['status'].values
sample_feat.shape
X_train, X_test, y_train, y_test = train_test_split(sample_feat, katono, test_size=.25, random_state=34)
model3 = SVC(kernel='poly', degree=3, gamma='auto', random_state=308)
# Ill then train the model usin the training sets
model3.fit(X_train, y_train)
# visualizing the data points in the kernel
# Create a mesh grid for our graph
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
# A function that creates the hyperplanes from our model
def plot_contours(ax, clf, xx, yy, **params):
Z = model3.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
fig, ax = plt.subplots()
# title for the plots
title = ('Flower Classification with polynomial SVC')
# Set-up grid for plotting.
# pol_sam = np.random.choice(polfeat[:, 0], 300)
# pol_sam2 = np.random.choice(polfeat[:, 1], 300)
X0, X1 = sample_feat[:, 0], sample_feat[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, model3, xx, yy, cmap=plt.cm.Paired, alpha=0.8)
ax.scatter(X0, X1, c=poltar, cmap=plt.cm.Paired, s=20, edgecolors='k')
# ax.set_ylabel()
# ax.set_xlabel()
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
```
***Conclusion***
Random Forests, Ada Boosted Trees, Gradient Boosted and SVCs are generally good prediction models for testing for hypothyroidism.
| github_jupyter |
# Batch Normalization – Lesson
1. [What is it?](#theory)
2. [What are it's benefits?](#benefits)
3. [How do we add it to a network?](#implementation_1)
4. [Let's see it work!](#demos)
5. [What are you hiding?](#implementation_2)
# What is Batch Normalization?<a id='theory'></a>
Batch normalization was introduced in Sergey Ioffe's and Christian Szegedy's 2015 paper [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf). The idea is that, instead of just normalizing the inputs to the network, we normalize the inputs to _layers within_ the network. It's called "batch" normalization because during training, we normalize each layer's inputs by using the mean and variance of the values in the current mini-batch.
Why might this help? Well, we know that normalizing the inputs to a _network_ helps the network learn. But a network is a series of layers, where the output of one layer becomes the input to another. That means we can think of any layer in a neural network as the _first_ layer of a smaller network.
For example, imagine a 3 layer network. Instead of just thinking of it as a single network with inputs, layers, and outputs, think of the output of layer 1 as the input to a two layer network. This two layer network would consist of layers 2 and 3 in our original network.
Likewise, the output of layer 2 can be thought of as the input to a single layer network, consisting only of layer 3.
When you think of it like that - as a series of neural networks feeding into each other - then it's easy to imagine how normalizing the inputs to each layer would help. It's just like normalizing the inputs to any other neural network, but you're doing it at every layer (sub-network).
Beyond the intuitive reasons, there are good mathematical reasons why it helps the network learn better, too. It helps combat what the authors call _internal covariate shift_. This discussion is best handled [in the paper](https://arxiv.org/pdf/1502.03167.pdf) and in [Deep Learning](http://www.deeplearningbook.org) a book you can read online written by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Specifically, check out the batch normalization section of [Chapter 8: Optimization for Training Deep Models](http://www.deeplearningbook.org/contents/optimization.html).
# Benefits of Batch Normalization<a id="benefits"></a>
Batch normalization optimizes network training. It has been shown to have several benefits:
1. **Networks train faster** – Each training _iteration_ will actually be slower because of the extra calculations during the forward pass and the additional hyperparameters to train during back propagation. However, it should converge much more quickly, so training should be faster overall.
2. **Allows higher learning rates** – Gradient descent usually requires small learning rates for the network to converge. And as networks get deeper, their gradients get smaller during back propagation so they require even more iterations. Using batch normalization allows us to use much higher learning rates, which further increases the speed at which networks train.
3. **Makes weights easier to initialize** – Weight initialization can be difficult, and it's even more difficult when creating deeper networks. Batch normalization seems to allow us to be much less careful about choosing our initial starting weights.
4. **Makes more activation functions viable** – Some activation functions do not work well in some situations. Sigmoids lose their gradient pretty quickly, which means they can't be used in deep networks. And ReLUs often die out during training, where they stop learning completely, so we need to be careful about the range of values fed into them. Because batch normalization regulates the values going into each activation function, non-linearlities that don't seem to work well in deep networks actually become viable again.
5. **Simplifies the creation of deeper networks** – Because of the first 4 items listed above, it is easier to build and faster to train deeper neural networks when using batch normalization. And it's been shown that deeper networks generally produce better results, so that's great.
6. **Provides a bit of regularlization** – Batch normalization adds a little noise to your network. In some cases, such as in Inception modules, batch normalization has been shown to work as well as dropout. But in general, consider batch normalization as a bit of extra regularization, possibly allowing you to reduce some of the dropout you might add to a network.
7. **May give better results overall** – Some tests seem to show batch normalization actually improves the training results. However, it's really an optimization to help train faster, so you shouldn't think of it as a way to make your network better. But since it lets you train networks faster, that means you can iterate over more designs more quickly. It also lets you build deeper networks, which are usually better. So when you factor in everything, you're probably going to end up with better results if you build your networks with batch normalization.
# Batch Normalization in TensorFlow<a id="implementation_1"></a>
This section of the notebook shows you one way to add batch normalization to a neural network built in TensorFlow.
The following cell imports the packages we need in the notebook and loads the MNIST dataset to use in our experiments. However, the `tensorflow` package contains all the code you'll actually need for batch normalization.
```
# Import necessary packages
import tensorflow as tf
import tqdm
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Import MNIST data so we have something for our experiments
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
```
### Neural network classes for testing
The following class, `NeuralNet`, allows us to create identical neural networks with and without batch normalization. The code is heavily documented, but there is also some additional discussion later. You do not need to read through it all before going through the rest of the notebook, but the comments within the code blocks may answer some of your questions.
*About the code:*
>This class is not meant to represent TensorFlow best practices – the design choices made here are to support the discussion related to batch normalization.
>It's also important to note that we use the well-known MNIST data for these examples, but the networks we create are not meant to be good for performing handwritten character recognition. We chose this network architecture because it is similar to the one used in the original paper, which is complex enough to demonstrate some of the benefits of batch normalization while still being fast to train.
```
class NeuralNet:
def __init__(self, initial_weights, activation_fn, use_batch_norm):
"""
Initializes this object, creating a TensorFlow graph using the given parameters.
:param initial_weights: list of NumPy arrays or Tensors
Initial values for the weights for every layer in the network. We pass these in
so we can create multiple networks with the same starting weights to eliminate
training differences caused by random initialization differences.
The number of items in the list defines the number of layers in the network,
and the shapes of the items in the list define the number of nodes in each layer.
e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would
create a network with 784 inputs going into a hidden layer with 256 nodes,
followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes.
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:param use_batch_norm: bool
Pass True to create a network that uses batch normalization; False otherwise
Note: this network will not use batch normalization on layers that do not have an
activation function.
"""
# Keep track of whether or not this network uses batch normalization.
self.use_batch_norm = use_batch_norm
self.name = "With Batch Norm" if use_batch_norm else "Without Batch Norm"
# Batch normalization needs to do different calculations during training and inference,
# so we use this placeholder to tell the graph which behavior to use.
self.is_training = tf.placeholder(tf.bool, name="is_training")
# This list is just for keeping track of data we want to plot later.
# It doesn't actually have anything to do with neural nets or batch normalization.
self.training_accuracies = []
# Create the network graph, but it will not actually have any real values until after you
# call train or test
self.build_network(initial_weights, activation_fn)
def build_network(self, initial_weights, activation_fn):
"""
Build the graph. The graph still needs to be trained via the `train` method.
:param initial_weights: list of NumPy arrays or Tensors
See __init__ for description.
:param activation_fn: Callable
See __init__ for description.
"""
self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]])
layer_in = self.input_layer
for weights in initial_weights[:-1]:
layer_in = self.fully_connected(layer_in, weights, activation_fn)
self.output_layer = self.fully_connected(layer_in, initial_weights[-1])
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
"""
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
"""
# Since this class supports both options, only use batch normalization when
# requested. However, do not use it on the final layer, which we identify
# by its lack of an activation function.
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
# (See later in the notebook for more details.)
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights)
# Apply batch normalization to the linear combination of the inputs and weights
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
# Now apply the activation function, *after* the normalization.
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
return linear_output if not activation_fn else activation_fn(linear_output)
def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None):
"""
Trains the model on the MNIST training dataset.
:param session: Session
Used to run training graph operations.
:param learning_rate: float
Learning rate used during gradient descent.
:param training_batches: int
Number of batches to train.
:param batches_per_sample: int
How many batches to train before sampling the validation accuracy.
:param save_model_as: string or None (default None)
Name to use if you want to save the trained model.
"""
# This placeholder will store the target labels for each mini batch
labels = tf.placeholder(tf.float32, [None, 10])
# Define loss and optimizer
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer))
# Define operations for testing
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if self.use_batch_norm:
# If we don't include the update ops as dependencies on the train step, the
# tf.layers.batch_normalization layers won't update their population statistics,
# which will cause the model to fail at inference time
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# Train for the appropriate number of batches. (tqdm is only for a nice timing display)
for i in tqdm.tqdm(range(training_batches)):
# We use batches of 60 just because the original paper did. You can use any size batch you like.
batch_xs, batch_ys = mnist.train.next_batch(60)
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
# Periodically test accuracy against the 5k validation images and store it for plotting later.
if i % batches_per_sample == 0:
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
self.training_accuracies.append(test_accuracy)
# After training, report accuracy against test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy))
# If you want to use this model later for inference instead of having to retrain it,
# just construct it with the same parameters and then pass this file to the 'test' function
if save_model_as:
tf.train.Saver().save(session, save_model_as)
def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None):
"""
Trains a trained model on the MNIST testing dataset.
:param session: Session
Used to run the testing graph operations.
:param test_training_accuracy: bool (default False)
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
Note: in real life, *always* perform inference using the population mean and variance.
This parameter exists just to support demonstrating what happens if you don't.
:param include_individual_predictions: bool (default True)
This function always performs an accuracy test against the entire test set. But if this parameter
is True, it performs an extra test, doing 200 predictions one at a time, and displays the results
and accuracy.
:param restore_from: string or None (default None)
Name of a saved model if you want to test with previously saved weights.
"""
# This placeholder will store the true labels for each mini batch
labels = tf.placeholder(tf.float32, [None, 10])
# Define operations for testing
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# If provided, restore from a previously saved model
if restore_from:
tf.train.Saver().restore(session, restore_from)
# Test against all of the MNIST test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.test.images,
labels: mnist.test.labels,
self.is_training: test_training_accuracy})
print('-'*75)
print('{}: Accuracy on full test set = {}'.format(self.name, test_accuracy))
# If requested, perform tests predicting individual values rather than batches
if include_individual_predictions:
predictions = []
correct = 0
# Do 200 predictions, 1 at a time
for i in range(200):
# This is a normal prediction using an individual test case. However, notice
# we pass `test_training_accuracy` to `feed_dict` as the value for `self.is_training`.
# Remember that will tell it whether it should use the batch mean & variance or
# the population estimates that were calucated while training the model.
pred, corr = session.run([tf.arg_max(self.output_layer,1), accuracy],
feed_dict={self.input_layer: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
self.is_training: test_training_accuracy})
correct += corr
predictions.append(pred[0])
print("200 Predictions:", predictions)
print("Accuracy on 200 samples:", correct/200)
```
There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines.
We add batch normalization to layers inside the `fully_connected` function. Here are some important points about that code:
1. Layers with batch normalization do not include a bias term.
2. We use TensorFlow's [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization) function to handle the math. (We show lower-level ways to do this [later in the notebook](#implementation_2).)
3. We tell `tf.layers.batch_normalization` whether or not the network is training. This is an important step we'll talk about later.
4. We add the normalization **before** calling the activation function.
In addition to that code, the training step is wrapped in the following `with` statement:
```python
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
```
This line actually works in conjunction with the `training` parameter we pass to `tf.layers.batch_normalization`. Without it, TensorFlow's batch normalization layer will not operate correctly during inference.
Finally, whenever we train the network or perform inference, we use the `feed_dict` to set `self.is_training` to `True` or `False`, respectively, like in the following line:
```python
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
```
We'll go into more details later, but next we want to show some experiments that use this code and test networks with and without batch normalization.
# Batch Normalization Demos<a id='demos'></a>
This section of the notebook trains various networks with and without batch normalization to demonstrate some of the benefits mentioned earlier.
We'd like to thank the author of this blog post [Implementing Batch Normalization in TensorFlow](http://r2rt.com/implementing-batch-normalization-in-tensorflow.html). That post provided the idea of - and some of the code for - plotting the differences in accuracy during training, along with the idea for comparing multiple networks using the same initial weights.
## Code to support testing
The following two functions support the demos we run in the notebook.
The first function, `plot_training_accuracies`, simply plots the values found in the `training_accuracies` lists of the `NeuralNet` objects passed to it. If you look at the `train` function in `NeuralNet`, you'll see it that while it's training the network, it periodically measures validation accuracy and stores the results in that list. It does that just to support these plots.
The second function, `train_and_test`, creates two neural nets - one with and one without batch normalization. It then trains them both and tests them, calling `plot_training_accuracies` to plot how their accuracies changed over the course of training. The really imporant thing about this function is that it initializes the starting weights for the networks _outside_ of the networks and then passes them in. This lets it train both networks from the exact same starting weights, which eliminates performance differences that might result from (un)lucky initial weights.
```
def plot_training_accuracies(*args, **kwargs):
"""
Displays a plot of the accuracies calculated during training to demonstrate
how many iterations it took for the model(s) to converge.
:param args: One or more NeuralNet objects
You can supply any number of NeuralNet objects as unnamed arguments
and this will display their training accuracies. Be sure to call `train`
the NeuralNets before calling this function.
:param kwargs:
You can supply any named parameters here, but `batches_per_sample` is the only
one we look for. It should match the `batches_per_sample` value you passed
to the `train` function.
"""
fig, ax = plt.subplots()
batches_per_sample = kwargs['batches_per_sample']
for nn in args:
ax.plot(range(0,len(nn.training_accuracies)*batches_per_sample,batches_per_sample),
nn.training_accuracies, label=nn.name)
ax.set_xlabel('Training steps')
ax.set_ylabel('Accuracy')
ax.set_title('Validation Accuracy During Training')
ax.legend(loc=4)
ax.set_ylim([0,1])
plt.yticks(np.arange(0, 1.1, 0.1))
plt.grid(True)
plt.show()
def train_and_test(use_bad_weights, learning_rate, activation_fn, training_batches=50000, batches_per_sample=500):
"""
Creates two networks, one with and one without batch normalization, then trains them
with identical starting weights, layers, batches, etc. Finally tests and plots their accuracies.
:param use_bad_weights: bool
If True, initialize the weights of both networks to wildly inappropriate weights;
if False, use reasonable starting weights.
:param learning_rate: float
Learning rate used during gradient descent.
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:param training_batches: (default 50000)
Number of batches to train.
:param batches_per_sample: (default 500)
How many batches to train before sampling the validation accuracy.
"""
# Use identical starting weights for each network to eliminate differences in
# weight initialization as a cause for differences seen in training performance
#
# Note: The networks will use these weights to define the number of and shapes of
# its layers. The original batch normalization paper used 3 hidden layers
# with 100 nodes in each, followed by a 10 node output layer. These values
# build such a network, but feel free to experiment with different choices.
# However, the input size should always be 784 and the final output should be 10.
if use_bad_weights:
# These weights should be horrible because they have such a large standard deviation
weights = [np.random.normal(size=(784,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,10), scale=5.0).astype(np.float32)
]
else:
# These weights should be good because they have such a small standard deviation
weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,10), scale=0.05).astype(np.float32)
]
# Just to make sure the TensorFlow's default graph is empty before we start another
# test, because we don't bother using different graphs or scoping and naming
# elements carefully in this sample code.
tf.reset_default_graph()
# build two versions of same network, 1 without and 1 with batch normalization
nn = NeuralNet(weights, activation_fn, False)
bn = NeuralNet(weights, activation_fn, True)
# train and test the two models
with tf.Session() as sess:
tf.global_variables_initializer().run()
nn.train(sess, learning_rate, training_batches, batches_per_sample)
bn.train(sess, learning_rate, training_batches, batches_per_sample)
nn.test(sess)
bn.test(sess)
# Display a graph of how validation accuracies changed during training
# so we can compare how the models trained and when they converged
plot_training_accuracies(nn, bn, batches_per_sample=batches_per_sample)
```
## Comparisons between identical networks, with and without batch normalization
The next series of cells train networks with various settings to show the differences with and without batch normalization. They are meant to clearly demonstrate the effects of batch normalization. We include a deeper discussion of batch normalization later in the notebook.
**The following creates two networks using a ReLU activation function, a learning rate of 0.01, and reasonable starting weights.**
```
train_and_test(False, 0.01, tf.nn.relu)
```
As expected, both networks train well and eventually reach similar test accuracies. However, notice that the model with batch normalization converges slightly faster than the other network, reaching accuracies over 90% almost immediately and nearing its max acuracy in 10 or 15 thousand iterations. The other network takes about 3 thousand iterations to reach 90% and doesn't near its best accuracy until 30 thousand or more iterations.
If you look at the raw speed, you can see that without batch normalization we were computing over 1100 batches per second, whereas with batch normalization that goes down to just over 500. However, batch normalization allows us to perform fewer iterations and converge in less time over all. (We only trained for 50 thousand batches here so we could plot the comparison.)
**The following creates two networks with the same hyperparameters used in the previous example, but only trains for 2000 iterations.**
```
train_and_test(False, 0.01, tf.nn.relu, 2000, 50)
```
As you can see, using batch normalization produces a model with over 95% accuracy in only 2000 batches, and it was above 90% at somewhere around 500 batches. Without batch normalization, the model takes 1750 iterations just to hit 80% – the network with batch normalization hits that mark after around 200 iterations! (Note: if you run the code yourself, you'll see slightly different results each time because the starting weights - while the same for each model - are different for each run.)
In the above example, you should also notice that the networks trained fewer batches per second then what you saw in the previous example. That's because much of the time we're tracking is actually spent periodically performing inference to collect data for the plots. In this example we perform that inference every 50 batches instead of every 500, so generating the plot for this example requires 10 times the overhead for the same 2000 iterations.
**The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and reasonable starting weights.**
```
train_and_test(False, 0.01, tf.nn.sigmoid)
```
With the number of layers we're using and this small learning rate, using a sigmoid activation function takes a long time to start learning. It eventually starts making progress, but it took over 45 thousand batches just to get over 80% accuracy. Using batch normalization gets to 90% in around one thousand batches.
**The following creates two networks using a ReLU activation function, a learning rate of 1, and reasonable starting weights.**
```
train_and_test(False, 1, tf.nn.relu)
```
Now we're using ReLUs again, but with a larger learning rate. The plot shows how training started out pretty normally, with the network with batch normalization starting out faster than the other. But the higher learning rate bounces the accuracy around a bit more, and at some point the accuracy in the network without batch normalization just completely crashes. It's likely that too many ReLUs died off at this point because of the high learning rate.
The next cell shows the same test again. The network with batch normalization performs the same way, and the other suffers from the same problem again, but it manages to train longer before it happens.
```
train_and_test(False, 1, tf.nn.relu)
```
In both of the previous examples, the network with batch normalization manages to gets over 98% accuracy, and get near that result almost immediately. The higher learning rate allows the network to train extremely fast.
**The following creates two networks using a sigmoid activation function, a learning rate of 1, and reasonable starting weights.**
```
train_and_test(False, 1, tf.nn.sigmoid)
```
In this example, we switched to a sigmoid activation function. It appears to hande the higher learning rate well, with both networks achieving high accuracy.
The cell below shows a similar pair of networks trained for only 2000 iterations.
```
train_and_test(False, 1, tf.nn.sigmoid, 2000, 50)
```
As you can see, even though these parameters work well for both networks, the one with batch normalization gets over 90% in 400 or so batches, whereas the other takes over 1700. When training larger networks, these sorts of differences become more pronounced.
**The following creates two networks using a ReLU activation function, a learning rate of 2, and reasonable starting weights.**
```
train_and_test(False, 2, tf.nn.relu)
```
With this very large learning rate, the network with batch normalization trains fine and almost immediately manages 98% accuracy. However, the network without normalization doesn't learn at all.
**The following creates two networks using a sigmoid activation function, a learning rate of 2, and reasonable starting weights.**
```
train_and_test(False, 2, tf.nn.sigmoid)
```
Once again, using a sigmoid activation function with the larger learning rate works well both with and without batch normalization.
However, look at the plot below where we train models with the same parameters but only 2000 iterations. As usual, batch normalization lets it train faster.
```
train_and_test(False, 2, tf.nn.sigmoid, 2000, 50)
```
In the rest of the examples, we use really bad starting weights. That is, normally we would use very small values close to zero. However, in these examples we choose random values with a standard deviation of 5. If you were really training a neural network, you would **not** want to do this. But these examples demonstrate how batch normalization makes your network much more resilient.
**The following creates two networks using a ReLU activation function, a learning rate of 0.01, and bad starting weights.**
```
train_and_test(True, 0.01, tf.nn.relu)
```
As the plot shows, without batch normalization the network never learns anything at all. But with batch normalization, it actually learns pretty well and gets to almost 80% accuracy. The starting weights obviously hurt the network, but you can see how well batch normalization does in overcoming them.
**The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and bad starting weights.**
```
train_and_test(True, 0.01, tf.nn.sigmoid)
```
Using a sigmoid activation function works better than the ReLU in the previous example, but without batch normalization it would take a tremendously long time to train the network, if it ever trained at all.
**The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.**<a id="successful_example_lr_1"></a>
```
train_and_test(True, 1, tf.nn.relu)
```
The higher learning rate used here allows the network with batch normalization to surpass 90% in about 30 thousand batches. The network without it never gets anywhere.
**The following creates two networks using a sigmoid activation function, a learning rate of 1, and bad starting weights.**
```
train_and_test(True, 1, tf.nn.sigmoid)
```
Using sigmoid works better than ReLUs for this higher learning rate. However, you can see that without batch normalization, the network takes a long time tro train, bounces around a lot, and spends a long time stuck at 90%. The network with batch normalization trains much more quickly, seems to be more stable, and achieves a higher accuracy.
**The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.**<a id="successful_example_lr_2"></a>
```
train_and_test(True, 2, tf.nn.relu)
```
We've already seen that ReLUs do not do as well as sigmoids with higher learning rates, and here we are using an extremely high rate. As expected, without batch normalization the network doesn't learn at all. But with batch normalization, it eventually achieves 90% accuracy. Notice, though, how its accuracy bounces around wildly during training - that's because the learning rate is really much too high, so the fact that this worked at all is a bit of luck.
**The following creates two networks using a sigmoid activation function, a learning rate of 2, and bad starting weights.**
```
train_and_test(True, 2, tf.nn.sigmoid)
```
In this case, the network with batch normalization trained faster and reached a higher accuracy. Meanwhile, the high learning rate makes the network without normalization bounce around erratically and have trouble getting past 90%.
### Full Disclosure: Batch Normalization Doesn't Fix Everything
Batch normalization isn't magic and it doesn't work every time. Weights are still randomly initialized and batches are chosen at random during training, so you never know exactly how training will go. Even for these tests, where we use the same initial weights for both networks, we still get _different_ weights each time we run.
This section includes two examples that show runs when batch normalization did not help at all.
**The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.**
```
train_and_test(True, 1, tf.nn.relu)
```
When we used these same parameters [earlier](#successful_example_lr_1), we saw the network with batch normalization reach 92% validation accuracy. This time we used different starting weights, initialized using the same standard deviation as before, and the network doesn't learn at all. (Remember, an accuracy around 10% is what the network gets if it just guesses the same value all the time.)
**The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.**
```
train_and_test(True, 2, tf.nn.relu)
```
When we trained with these parameters and batch normalization [earlier](#successful_example_lr_2), we reached 90% validation accuracy. However, this time the network _almost_ starts to make some progress in the beginning, but it quickly breaks down and stops learning.
**Note:** Both of the above examples use *extremely* bad starting weights, along with learning rates that are too high. While we've shown batch normalization _can_ overcome bad values, we don't mean to encourage actually using them. The examples in this notebook are meant to show that batch normalization can help your networks train better. But these last two examples should remind you that you still want to try to use good network design choices and reasonable starting weights. It should also remind you that the results of each attempt to train a network are a bit random, even when using otherwise identical architectures.
# Batch Normalization: A Detailed Look<a id='implementation_2'></a>
The layer created by `tf.layers.batch_normalization` handles all the details of implementing batch normalization. Many students will be fine just using that and won't care about what's happening at the lower levels. However, some students may want to explore the details, so here is a short explanation of what's really happening, starting with the equations you're likely to come across if you ever read about batch normalization.
In order to normalize the values, we first need to find the average value for the batch. If you look at the code, you can see that this is not the average value of the batch _inputs_, but the average value coming _out_ of any particular layer before we pass it through its non-linear activation function and then feed it as an input to the _next_ layer.
We represent the average as $\mu_B$, which is simply the sum of all of the values $x_i$ divided by the number of values, $m$
$$
\mu_B \leftarrow \frac{1}{m}\sum_{i=1}^m x_i
$$
We then need to calculate the variance, or mean squared deviation, represented as $\sigma_{B}^{2}$. If you aren't familiar with statistics, that simply means for each value $x_i$, we subtract the average value (calculated earlier as $\mu_B$), which gives us what's called the "deviation" for that value. We square the result to get the squared deviation. Sum up the results of doing that for each of the values, then divide by the number of values, again $m$, to get the average, or mean, squared deviation.
$$
\sigma_{B}^{2} \leftarrow \frac{1}{m}\sum_{i=1}^m (x_i - \mu_B)^2
$$
Once we have the mean and variance, we can use them to normalize the values with the following equation. For each value, it subtracts the mean and divides by the (almost) standard deviation. (You've probably heard of standard deviation many times, but if you have not studied statistics you might not know that the standard deviation is actually the square root of the mean squared deviation.)
$$
\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}}
$$
Above, we said "(almost) standard deviation". That's because the real standard deviation for the batch is calculated by $\sqrt{\sigma_{B}^{2}}$, but the above formula adds the term epsilon, $\epsilon$, before taking the square root. The epsilon can be any small, positive constant - in our code we use the value `0.001`. It is there partially to make sure we don't try to divide by zero, but it also acts to increase the variance slightly for each batch.
Why increase the variance? Statistically, this makes sense because even though we are normalizing one batch at a time, we are also trying to estimate the population distribution – the total training set, which itself an estimate of the larger population of inputs your network wants to handle. The variance of a population is higher than the variance for any sample taken from that population, so increasing the variance a little bit for each batch helps take that into account.
At this point, we have a normalized value, represented as $\hat{x_i}$. But rather than use it directly, we multiply it by a gamma value, $\gamma$, and then add a beta value, $\beta$. Both $\gamma$ and $\beta$ are learnable parameters of the network and serve to scale and shift the normalized value, respectively. Because they are learnable just like weights, they give your network some extra knobs to tweak during training to help it learn the function it is trying to approximate.
$$
y_i \leftarrow \gamma \hat{x_i} + \beta
$$
We now have the final batch-normalized output of our layer, which we would then pass to a non-linear activation function like sigmoid, tanh, ReLU, Leaky ReLU, etc. In the original batch normalization paper (linked in the beginning of this notebook), they mention that there might be cases when you'd want to perform the batch normalization _after_ the non-linearity instead of before, but it is difficult to find any uses like that in practice.
In `NeuralNet`'s implementation of `fully_connected`, all of this math is hidden inside the following line, where `linear_output` serves as the $x_i$ from the equations:
```python
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
```
The next section shows you how to implement the math directly.
### Batch normalization without the `tf.layers` package
Our implementation of batch normalization in `NeuralNet` uses the high-level abstraction [tf.layers.batch_normalization](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization), found in TensorFlow's [`tf.layers`](https://www.tensorflow.org/api_docs/python/tf/layers) package.
However, if you would like to implement batch normalization at a lower level, the following code shows you how.
It uses [tf.nn.batch_normalization](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization) from TensorFlow's [neural net (nn)](https://www.tensorflow.org/api_docs/python/tf/nn) package.
**1)** You can replace the `fully_connected` function in the `NeuralNet` class with the below code and everything in `NeuralNet` will still work like it did before.
```
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
"""
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
"""
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights)
num_out_nodes = initial_weights.shape[-1]
# Batch normalization adds additional trainable variables:
# gamma (for scaling) and beta (for shifting).
gamma = tf.Variable(tf.ones([num_out_nodes]))
beta = tf.Variable(tf.zeros([num_out_nodes]))
# These variables will store the mean and variance for this layer over the entire training set,
# which we assume represents the general population distribution.
# By setting `trainable=False`, we tell TensorFlow not to modify these variables during
# back propagation. Instead, we will assign values to these variables ourselves.
pop_mean = tf.Variable(tf.zeros([num_out_nodes]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_out_nodes]), trainable=False)
# Batch normalization requires a small constant epsilon, used to ensure we don't divide by zero.
# This is the default value TensorFlow uses.
epsilon = 1e-3
def batch_norm_training():
# Calculate the mean and variance for the data coming out of this layer's linear-combination step.
# The [0] defines an array of axes to calculate over.
batch_mean, batch_variance = tf.nn.moments(linear_output, [0])
# Calculate a moving average of the training data's mean and variance while training.
# These will be used during inference.
# Decay should be some number less than 1. tf.layers.batch_normalization uses the parameter
# "momentum" to accomplish this and defaults it to 0.99
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
# The 'tf.control_dependencies' context tells TensorFlow it must calculate 'train_mean'
# and 'train_variance' before it calculates the 'tf.nn.batch_normalization' layer.
# This is necessary because the those two operations are not actually in the graph
# connecting the linear_output and batch_normalization layers,
# so TensorFlow would otherwise just skip them.
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
# During inference, use the our estimated population mean and variance to normalize the layer
return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)
# Use `tf.cond` as a sort of if-check. When self.is_training is True, TensorFlow will execute
# the operation returned from `batch_norm_training`; otherwise it will execute the graph
# operation returned from `batch_norm_inference`.
batch_normalized_output = tf.cond(self.is_training, batch_norm_training, batch_norm_inference)
# Pass the batch-normalized layer output through the activation function.
# The literature states there may be cases where you want to perform the batch normalization *after*
# the activation function, but it is difficult to find any uses of that in practice.
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
return linear_output if not activation_fn else activation_fn(linear_output)
```
This version of `fully_connected` is much longer than the original, but once again has extensive comments to help you understand it. Here are some important points:
1. It explicitly creates variables to store gamma, beta, and the population mean and variance. These were all handled for us in the previous version of the function.
2. It initializes gamma to one and beta to zero, so they start out having no effect in this calculation: $y_i \leftarrow \gamma \hat{x_i} + \beta$. However, during training the network learns the best values for these variables using back propagation, just like networks normally do with weights.
3. Unlike gamma and beta, the variables for population mean and variance are marked as untrainable. That tells TensorFlow not to modify them during back propagation. Instead, the lines that call `tf.assign` are used to update these variables directly.
4. TensorFlow won't automatically run the `tf.assign` operations during training because it only evaluates operations that are required based on the connections it finds in the graph. To get around that, we add this line: `with tf.control_dependencies([train_mean, train_variance]):` before we run the normalization operation. This tells TensorFlow it needs to run those operations before running anything inside the `with` block.
5. The actual normalization math is still mostly hidden from us, this time using [`tf.nn.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization).
5. `tf.nn.batch_normalization` does not have a `training` parameter like `tf.layers.batch_normalization` did. However, we still need to handle training and inference differently, so we run different code in each case using the [`tf.cond`](https://www.tensorflow.org/api_docs/python/tf/cond) operation.
6. We use the [`tf.nn.moments`](https://www.tensorflow.org/api_docs/python/tf/nn/moments) function to calculate the batch mean and variance.
**2)** The current version of the `train` function in `NeuralNet` will work fine with this new version of `fully_connected`. However, it uses these lines to ensure population statistics are updated when using batch normalization:
```python
if self.use_batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
```
Our new version of `fully_connected` handles updating the population statistics directly. That means you can also simplify your code by replacing the above `if`/`else` condition with just this line:
```python
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
```
**3)** And just in case you want to implement every detail from scratch, you can replace this line in `batch_norm_training`:
```python
return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)
```
with these lines:
```python
normalized_linear_output = (linear_output - batch_mean) / tf.sqrt(batch_variance + epsilon)
return gamma * normalized_linear_output + beta
```
And replace this line in `batch_norm_inference`:
```python
return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)
```
with these lines:
```python
normalized_linear_output = (linear_output - pop_mean) / tf.sqrt(pop_variance + epsilon)
return gamma * normalized_linear_output + beta
```
As you can see in each of the above substitutions, the two lines of replacement code simply implement the following two equations directly. The first line calculates the following equation, with `linear_output` representing $x_i$ and `normalized_linear_output` representing $\hat{x_i}$:
$$
\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}}
$$
And the second line is a direct translation of the following equation:
$$
y_i \leftarrow \gamma \hat{x_i} + \beta
$$
We still use the `tf.nn.moments` operation to implement the other two equations from earlier – the ones that calculate the batch mean and variance used in the normalization step. If you really wanted to do everything from scratch, you could replace that line, too, but we'll leave that to you.
## Why the difference between training and inference?
In the original function that uses `tf.layers.batch_normalization`, we tell the layer whether or not the network is training by passing a value for its `training` parameter, like so:
```python
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
```
And that forces us to provide a value for `self.is_training` in our `feed_dict`, like we do in this example from `NeuralNet`'s `train` function:
```python
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
```
If you looked at the [low level implementation](#low_level_code), you probably noticed that, just like with `tf.layers.batch_normalization`, we need to do slightly different things during training and inference. But why is that?
First, let's look at what happens when we don't. The following function is similar to `train_and_test` from earlier, but this time we are only testing one network and instead of plotting its accuracy, we perform 200 predictions on test inputs, 1 input at at time. We can use the `test_training_accuracy` parameter to test the network in training or inference modes (the equivalent of passing `True` or `False` to the `feed_dict` for `is_training`).
```
def batch_norm_test(test_training_accuracy):
"""
:param test_training_accuracy: bool
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
"""
weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,10), scale=0.05).astype(np.float32)
]
tf.reset_default_graph()
# Train the model
bn = NeuralNet(weights, tf.nn.relu, True)
# First train the network
with tf.Session() as sess:
tf.global_variables_initializer().run()
bn.train(sess, 0.01, 2000, 2000)
bn.test(sess, test_training_accuracy=test_training_accuracy, include_individual_predictions=True)
```
In the following cell, we pass `True` for `test_training_accuracy`, which performs the same batch normalization that we normally perform **during training**.
```
batch_norm_test(True)
```
As you can see, the network guessed the same value every time! But why? Because during training, a network with batch normalization adjusts the values at each layer based on the mean and variance **of that batch**. The "batches" we are using for these predictions have a single input each time, so their values _are_ the means, and their variances will always be 0. That means the network will normalize the values at any layer to zero. (Review the equations from before to see why a value that is equal to the mean would always normalize to zero.) So we end up with the same result for every input we give the network, because its the value the network produces when it applies its learned weights to zeros at every layer.
**Note:** If you re-run that cell, you might get a different value from what we showed. That's because the specific weights the network learns will be different every time. But whatever value it is, it should be the same for all 200 predictions.
To overcome this problem, the network does not just normalize the batch at each layer. It also maintains an estimate of the mean and variance for the entire population. So when we perform inference, instead of letting it "normalize" all the values using their own means and variance, it uses the estimates of the population mean and variance that it calculated while training.
So in the following example, we pass `False` for `test_training_accuracy`, which tells the network that we it want to perform inference with the population statistics it calculates during training.
```
batch_norm_test(False)
```
As you can see, now that we're using the estimated population mean and variance, we get a 97% accuracy. That means it guessed correctly on 194 of the 200 samples – not too bad for something that trained in under 4 seconds. :)
# Considerations for other network types
This notebook demonstrates batch normalization in a standard neural network with fully connected layers. You can also use batch normalization in other types of networks, but there are some special considerations.
### ConvNets
Convolution layers consist of multiple feature maps. (Remember, the depth of a convolutional layer refers to its number of feature maps.) And the weights for each feature map are shared across all the inputs that feed into the layer. Because of these differences, batch normalizaing convolutional layers requires batch/population mean and variance per feature map rather than per node in the layer.
When using `tf.layers.batch_normalization`, be sure to pay attention to the order of your convolutionlal dimensions.
Specifically, you may want to set a different value for the `axis` parameter if your layers have their channels first instead of last.
In our low-level implementations, we used the following line to calculate the batch mean and variance:
```python
batch_mean, batch_variance = tf.nn.moments(linear_output, [0])
```
If we were dealing with a convolutional layer, we would calculate the mean and variance with a line like this instead:
```python
batch_mean, batch_variance = tf.nn.moments(conv_layer, [0,1,2], keep_dims=False)
```
The second parameter, `[0,1,2]`, tells TensorFlow to calculate the batch mean and variance over each feature map. (The three axes are the batch, height, and width.) And setting `keep_dims` to `False` tells `tf.nn.moments` not to return values with the same size as the inputs. Specifically, it ensures we get one mean/variance pair per feature map.
### RNNs
Batch normalization can work with recurrent neural networks, too, as shown in the 2016 paper [Recurrent Batch Normalization](https://arxiv.org/abs/1603.09025). It's a bit more work to implement, but basically involves calculating the means and variances per time step instead of per layer. You can find an example where someone extended `tf.nn.rnn_cell.RNNCell` to include batch normalization in [this GitHub repo](https://gist.github.com/spitis/27ab7d2a30bbaf5ef431b4a02194ac60).
| github_jupyter |
```
import logging
import os
import math
from dataclasses import dataclass, field
import copy # for deep copy
import torch
from torch import nn
from transformers import RobertaForMaskedLM, RobertaTokenizerFast, TextDataset, DataCollatorForLanguageModeling, Trainer
from transformers import TrainingArguments, HfArgumentParser
from transformers.modeling_longformer import LongformerSelfAttention
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class RobertaLongSelfAttention(LongformerSelfAttention):
def forward(
self,
hidden_states, attention_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None
):
return super().forward(hidden_states, attention_mask=attention_mask)
class RobertaLongForMaskedLM(RobertaForMaskedLM):
def __init__(self, config):
super().__init__(config)
for i, layer in enumerate(self.roberta.encoder.layer):
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
layer.attention.self = RobertaLongSelfAttention(config, layer_id=i)
def create_long_model(save_model_to, attention_window, max_pos):
model = RobertaForMaskedLM.from_pretrained('roberta-base')
tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base', model_max_length=max_pos)
config = model.config
# extend position embeddings
tokenizer.model_max_length = max_pos
tokenizer.init_kwargs['model_max_length'] = max_pos
current_max_pos, embed_size = model.roberta.embeddings.position_embeddings.weight.shape
max_pos += 2 # NOTE: RoBERTa has positions 0,1 reserved, so embedding size is max position + 2
config.max_position_embeddings = max_pos
assert max_pos > current_max_pos
# allocate a larger position embedding matrix
new_pos_embed = model.roberta.embeddings.position_embeddings.weight.new_empty(max_pos, embed_size)
# copy position embeddings over and over to initialize the new position embeddings
k = 2
step = current_max_pos - 2
while k < max_pos - 1:
new_pos_embed[k:(k + step)] = model.roberta.embeddings.position_embeddings.weight[2:]
k += step
model.roberta.embeddings.position_embeddings.weight.data = new_pos_embed
model.roberta.embeddings.position_ids.data = torch.tensor([i for i in range(max_pos)]).reshape(1, max_pos)
"""
model.roberta.embeddings.position_embeddings.weight.data = new_pos_embed # add after this line
model.roberta.embeddings.position_embeddings.num_embeddings = len(new_pos_embed.data)
# first, check that model.roberta.embeddings.position_embeddings.weight.data.shape is correct — has to be 4096 (default) of your desired length
model.roberta.embeddings.position_ids = torch.arange(0, model.roberta.embeddings.position_embeddings.num_embeddings)[None]
"""
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
config.attention_window = [attention_window] * config.num_hidden_layers
for i, layer in enumerate(model.roberta.encoder.layer):
longformer_self_attn = LongformerSelfAttention(config, layer_id=i)
longformer_self_attn.query = copy.deepcopy(layer.attention.self.query)
longformer_self_attn.key = copy.deepcopy(layer.attention.self.key)
longformer_self_attn.value = copy.deepcopy(layer.attention.self.value)
longformer_self_attn.query_global = copy.deepcopy(layer.attention.self.query)
longformer_self_attn.key_global = copy.deepcopy(layer.attention.self.key)
longformer_self_attn.value_global = copy.deepcopy(layer.attention.self.value)
"""
longformer_self_attn = LongformerSelfAttention(config, layer_id=i)
longformer_self_attn.query = layer.attention.self.query
longformer_self_attn.key = layer.attention.self.key
longformer_self_attn.value = layer.attention.self.value
longformer_self_attn.query_global = layer.attention.self.query
longformer_self_attn.key_global = layer.attention.self.key
longformer_self_attn.value_global = layer.attention.self.value
"""
layer.attention.self = longformer_self_attn
logger.info(f'saving model to {save_model_to}')
model.save_pretrained(save_model_to)
tokenizer.save_pretrained(save_model_to)
return model, tokenizer
def copy_proj_layers(model):
for i, layer in enumerate(model.roberta.encoder.layer):
layer.attention.self.query_global = layer.attention.self.query
layer.attention.self.key_global = layer.attention.self.key
layer.attention.self.value_global = layer.attention.self.value
return model
def pretrain_and_evaluate(args, model, tokenizer, eval_only, model_path):
val_dataset = TextDataset(tokenizer=tokenizer,
file_path=args.val_datapath,
block_size=tokenizer.max_len)
if eval_only:
train_dataset = val_dataset
else:
logger.info(f'Loading and tokenizing training data is usually slow: {args.train_datapath}')
train_dataset = TextDataset(tokenizer=tokenizer,
file_path=args.train_datapath,
block_size=tokenizer.max_len)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=0.15)
trainer = Trainer(model=model, args=args, data_collator=data_collator,
train_dataset=train_dataset, eval_dataset=val_dataset, prediction_loss_only=True)
eval_loss = trainer.evaluate()
eval_loss = eval_loss['eval_loss']
logger.info(f'Initial eval bpc: {eval_loss/math.log(2)}')
if not eval_only:
trainer.train(model_path=model_path)
trainer.save_model()
eval_loss = trainer.evaluate()
eval_loss = eval_loss['eval_loss']
logger.info(f'Eval bpc after pretraining: {eval_loss/math.log(2)}')
@dataclass
class ModelArgs:
attention_window: int = field(default=512, metadata={"help": "Size of attention window"})
max_pos: int = field(default=4096, metadata={"help": "Maximum position"})
parser = HfArgumentParser((TrainingArguments, ModelArgs,))
training_args, model_args = parser.parse_args_into_dataclasses(look_for_args_file=False, args=[
'--output_dir', 'tmp',
'--warmup_steps', '500',
'--learning_rate', '0.00003',
'--weight_decay', '0.01',
'--adam_epsilon', '1e-6',
'--max_steps', '3000',
'--logging_steps', '500',
'--save_steps', '500',
'--max_grad_norm', '5.0',
'--per_gpu_eval_batch_size', '8',
'--per_gpu_train_batch_size', '2', # 32GB gpu with fp32
'--gradient_accumulation_steps', '32',
'--evaluate_during_training',
'--do_train',
'--do_eval',
])
training_args.val_datapath = '/workspace/data/wikitext-103-raw/wiki.valid.raw'
training_args.train_datapath = '/workspace/data/wikitext-103-raw/wiki.train.raw'
# Choose GPU
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
roberta_base = RobertaForMaskedLM.from_pretrained('roberta-base')
roberta_base_tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base')
logger.info('Evaluating roberta-base (seqlen: 512) for refernece ...')
pretrain_and_evaluate(training_args, roberta_base, roberta_base_tokenizer, eval_only=True, model_path=None)
model_path = f'{training_args.output_dir}/roberta-base-{model_args.max_pos}'
if not os.path.exists(model_path):
os.makedirs(model_path)
logger.info(f'Converting roberta-base into roberta-base-{model_args.max_pos}')
model, tokenizer = create_long_model(
save_model_to=model_path, attention_window=model_args.attention_window, max_pos=model_args.max_pos)
"""
Self =
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(query_global): Linear(in_features=768, out_features=768, bias=True)
(key_global): Linear(in_features=768, out_features=768, bias=True)
(value_global): Linear(in_features=768, out_features=768, bias=True)
"""
logger.info(f'Loading the model from {model_path}')
tokenizer = RobertaTokenizerFast.from_pretrained(model_path)
model = RobertaLongForMaskedLM.from_pretrained(model_path)
logger.info(f'Pretraining roberta-base-{model_args.max_pos} ... ')
training_args.max_steps = 3 ## <<<<<<<<<<<<<<<<<<<<<<<< REMOVE THIS <<<<<<<<<<<<<<<<<<<<<<<<
%magic
pretrain_and_evaluate(training_args, model, tokenizer, eval_only=False, model_path=training_args.output_dir)
logger.info(f'Copying local projection layers into global projection layers ... ')
model = copy_proj_layers(model)
logger.info(f'Saving model to {model_path}')
model.save_pretrained(model_path)
logger.info(f'Loading the model from {model_path}')
tokenizer = RobertaTokenizerFast.from_pretrained(model_path)
model = RobertaLongForMaskedLM.from_pretrained(model_path)
import transformers
transformers.__version__
model.roberta.embeddings
model.roberta.embeddings.position_embeddings
model.roberta.embeddings.position_embeddings.num_embeddings
model.roberta.embeddings.position_embeddings.num_embeddings
torch.cop
```
| github_jupyter |
# Inequality Data Processing (WIID)
## Data Dictionary
| Variable | Definition |
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| id | Identifier |
| country | Country/area |
| c3 | 3-digit country code in ISO 3166-1 alpha-3 format |
| c2 | 2-digit country code in ISO 3166-1 alpha-2 format |
| year | Year. Note that when a survey continues for more than a year, the year when it is finished is considered |
| gini_reported | Gini coefficient as reported by the source (in most cases based on microdata, in some older observations estimates derive from grouped data) |
| q1-q5 | Quintile group shares of resource |
| d1-d10 | Decile group shares of resource |
| bottom5 and top5 | Bottom five and top five percent group shares of resource |
| resource | Resource concept |
| resource_detailed | Detailed resource concept |
| scale | Equivalence scale |
| scale_detailed | Detailed equivalence scale |
| sharing_unit | Income sharing unit/statistical unit |
| reference_unit | Unit of analysis, indicates whether the data has been weighted with a person or a household weight |
| areacovr | Area coverage. The land area which was included in the original sample surveys etc. |
| areacovr_detailed | Detailed area coverage |
| popcovr | Population coverage. The population covered in the sample surveys in the land area (all, rural, urban etc.) which was included |
| popcovr_detailed | Detailed population coverage, including age coverage information in certain cases |
| region_un | Regional grouping based on United Nations geoscheme |
| region_un_sub | Sub-regional grouping based on United Nations geoscheme |
| region_wb | Regional grouping based on World Bank classification |
| eu | Current EU member state |
| oecd | Current OECD member state |
| incomegroup | World Bank classification by country income |
| mean | Survey mean given with the same underlying definitions as the Gini coefficient and the share data |
| median | Survey median given with the same underlying definitions as the Gini coefficient and the share data |
| currency | Currency for the mean and median values. If the reference is US$2011PPP it means that the currency is in 2011 US dollar per month, with purchasing power parity applied on it. |
| reference_period | Time period for measuring mean and median values |
| exchangerate | Conversion rate from local currency units (LCU) to United States Dollars (USD) |
| mean_usd | Mean measure in United States Dollar (USD) |
| median_usd | Median measure in United States Dollar (USD) |
| gdp_ppp_pc_usd2011 | Gross Domestic Product (GDP) is converted to United States Dollars (USD) using purchasing power parity rates and divided by total population. Data are in constant 2011 United States Dollar (USD) |
| population | Population of countries from the UN population prospects |
| revision | Indicates the time of the revision when the observation was included to the database |
| quality | Quality assessment |
| quality_score | Computed quality score |
| source | Source type |
| source_detailed | Source from which the observation was obtained |
| source_comments | Additional source comments |
| survey | Originating survey information |
```
import re
import numpy as np
import pandas as pd
import pycountry
%matplotlib inline
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.set_option('display.max_columns', None)
```
## Load The File
```
df = pd.read_excel('../data/external/Inequality/WIID/WIID_19Dec2018.xlsx')
```
## Standardize Country Codes
```
""" Only Select rows with valid country codes
"""
country_locations = []
for country in df['c3']:
try:
pycountry.countries.lookup(country)
country_locations.append(True)
except LookupError:
country_locations.append(False)
df = df[country_locations]
```
## Standardize Indexes
```
df.rename(
{
"c3": "Country Code",
"year": "Year"
},
axis='columns',
inplace=True)
```
## Remove out of scope rows (consumption/gross)
```
df = df[(df.resource != "Consumption")]
```
## Remove out of scope rows by year
```
df = df[df["Year"] > 1994]
df = df[df["Year"] < 2018]
df = df.groupby(["Country Code","Year"]).mean()
```
## Save Data
```
df.to_pickle("../data/processed/Inequality_WIID.pickle")
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Text classification with TensorFlow Lite Model Maker
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/tutorials/model_maker_text_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
The TensorFlow Lite Model Maker library simplifies the process of adapting and converting a TensorFlow model to particular input data when deploying this model for on-device ML applications.
This notebook shows an end-to-end example that utilizes the Model Maker library to illustrate the adaptation and conversion of a commonly-used text classification model to classify movie reviews on a mobile device. The text classification model classifies text into predefined categories. The inputs should be preprocessed text and the outputs are the probabilities of the categories. The dataset used in this tutorial are positive and negative movie reviews.
## Prerequisites
### Install the required packages
To run this example, install the required packages, including the Model Maker package from the [GitHub repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).
**If you run this notebook on Colab, you may see an error message about `tensorflowjs` and `tensorflow-hub` version incompatibility. It is safe to ignore this error as we do not use `tensorflowjs` in this workflow.**
```
!pip install -q tflite-model-maker
```
Import the required packages.
```
import numpy as np
import os
from tflite_model_maker import configs
from tflite_model_maker import ExportFormat
from tflite_model_maker import model_spec
from tflite_model_maker import text_classifier
from tflite_model_maker import TextClassifierDataLoader
import tensorflow as tf
assert tf.__version__.startswith('2')
tf.get_logger().setLevel('ERROR')
```
### Download the sample training data.
In this tutorial, we will use the [SST-2](https://nlp.stanford.edu/sentiment/index.html) (Stanford Sentiment Treebank) which is one of the tasks in the [GLUE](https://gluebenchmark.com/) benchmark. It contains 67,349 movie reviews for training and 872 movie reviews for testing. The dataset has two classes: positive and negative movie reviews.
```
data_dir = tf.keras.utils.get_file(
fname='SST-2.zip',
origin='https://dl.fbaipublicfiles.com/glue/data/SST-2.zip',
extract=True)
data_dir = os.path.join(os.path.dirname(data_dir), 'SST-2')
```
The SST-2 dataset is stored in TSV format. The only difference between TSV and CSV is that TSV uses a tab `\t` character as its delimiter instead of a comma `,` in the CSV format.
Here are the first 5 lines of the training dataset. label=0 means negative, label=1 means positive.
| sentence | label | | | |
|-------------------------------------------------------------------------------------------|-------|---|---|---|
| hide new secretions from the parental units | 0 | | | |
| contains no wit , only labored gags | 0 | | | |
| that loves its characters and communicates something rather beautiful about human nature | 1 | | | |
| remains utterly satisfied to remain the same throughout | 0 | | | |
| on the worst revenge-of-the-nerds clichés the filmmakers could dredge up | 0 | | | |
Next, we will load the dataset into a Pandas dataframe and change the current label names (`0` and `1`) to a more human-readable ones (`negative` and `positive`) and use them for model training.
```
import pandas as pd
def replace_label(original_file, new_file):
# Load the original file to pandas. We need to specify the separator as
# '\t' as the training data is stored in TSV format
df = pd.read_csv(original_file, sep='\t')
# Define how we want to change the label name
label_map = {0: 'negative', 1: 'positive'}
# Excute the label change
df.replace({'label': label_map}, inplace=True)
# Write the updated dataset to a new file
df.to_csv(new_file)
# Replace the label name for both the training and test dataset. Then write the
# updated CSV dataset to the current folder.
replace_label(os.path.join(os.path.join(data_dir, 'train.tsv')), 'train.csv')
replace_label(os.path.join(os.path.join(data_dir, 'dev.tsv')), 'dev.csv')
```
## Quickstart
There are five steps to train a text classification model:
**Step 1. Choose a text classification model architecture.**
Here we use the average word embedding model architecture, which will produce a small and fast model with decent accuracy.
```
spec = model_spec.get('average_word_vec')
```
Model Maker also supports other model architectures such as [BERT](https://arxiv.org/abs/1810.04805). If you are interested to learn about other architecture, see the [Choose a model architecture for Text Classifier](#scrollTo=kJ_B8fMDOhMR) section below.
**Step 2. Load the training and test data, then preprocess them according to a specific `model_spec`.**
Model Maker can take input data in the CSV format. We will load the training and test dataset with the human-readable label name that were created earlier.
Each model architecture requires input data to be processed in a particular way. `TextClassifierDataLoader` reads the requirement from `model_spec` and automatically executes the necessary preprocessing.
```
train_data = TextClassifierDataLoader.from_csv(
filename='train.csv',
text_column='sentence',
label_column='label',
model_spec=spec,
is_training=True)
test_data = TextClassifierDataLoader.from_csv(
filename='dev.csv',
text_column='sentence',
label_column='label',
model_spec=spec,
is_training=False)
```
**Step 3. Train the TensorFlow model with the training data.**
The average word embedding model use `batch_size = 32` by default. Therefore you will see that it takes 2104 steps to go through the 67,349 sentences in the training dataset. We will train the model for 10 epochs, which means going through the training dataset 10 times.
```
model = text_classifier.create(train_data, model_spec=spec, epochs=10)
```
**Step 4. Evaluate the model with the test data.**
After training the text classification model using the sentences in the training dataset, we will use the remaining 872 sentences in the test dataset to evaluate how the model performs against new data it has never seen before.
As the default batch size is 32, it will take 28 steps to go through the 872 sentences in the test dataset.
```
loss, acc = model.evaluate(test_data)
```
**Step 5. Export as a TensorFlow Lite model.**
Let's export the text classification that we have trained in the TensorFlow Lite format. We will specify which folder to export the model.
You may see a warning about `vocab.txt` file does not exist in the metadata but they can be safely ignored.
```
model.export(export_dir='average_word_vec')
```
You can download the TensorFlow Lite model file using the left sidebar of Colab. Go into the `average_word_vec` folder as we specified in `export_dir` parameter above, right-click on the `model.tflite` file and choose `Download` to download it to your local computer.
This model can be integrated into an Android or an iOS app using the [NLClassifier API](https://www.tensorflow.org/lite/inference_with_metadata/task_library/nl_classifier) of the [TensorFlow Lite Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/overview).
See the [TFLite Text Classification sample app](https://github.com/tensorflow/examples/blob/master/lite/examples/text_classification/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/textclassification/client/TextClassificationClient.java#L54) for more details on how the model is used in a working app.
*Note 1: Android Studio Model Binding does not support text classification yet so please use the TensorFlow Lite Task Library.*
*Note 2: There is a `model.json` file in the same folder with the TFLite model. It contains the JSON representation of the [metadata](https://www.tensorflow.org/lite/convert/metadata) bundled inside the TensorFlow Lite model. Model metadata helps the TFLite Task Library know what the model does and how to pre-process/post-process data for the model. You don't need to download the `model.json` file as it is only for informational purpose and its content is already inside the TFLite file.*
*Note 3: If you train a text classification model using MobileBERT or BERT-Base architecture, you will need to use [BertNLClassifier API](https://www.tensorflow.org/lite/inference_with_metadata/task_library/bert_nl_classifier) instead to integrate the trained model into a mobile app.*
The following sections walk through the example step by step to show more details.
## Choose a model architecture for Text Classifier
Each `model_spec` object represents a specific model for the text classifier. TensorFlow Lite Model Maker currently supports [MobileBERT](https://arxiv.org/pdf/2004.02984.pdf), averaging word embeddings and [BERT-Base](https://arxiv.org/pdf/1810.04805.pdf) models.
| Supported Model | Name of model_spec | Model Description | Model size |
|--------------------------|-------------------------|-----------------------------------------------------------------------------------------------------------------------|---------------------------------------------|
| Averaging Word Embedding | 'average_word_vec' | Averaging text word embeddings with RELU activation. | <1MB |
| MobileBERT | 'mobilebert_classifier' | 4.3x smaller and 5.5x faster than BERT-Base while achieving competitive results, suitable for on-device applications. | 25MB w/ quantization <br/> 100MB w/o quantization |
| BERT-Base | 'bert_classifier' | Standard BERT model that is widely used in NLP tasks. | 300MB |
In the quick start, we have used the average word embedding model. Let's switch to [MobileBERT](https://arxiv.org/pdf/2004.02984.pdf) to train a model with higher accuracy.
```
mb_spec = model_spec.get('mobilebert_classifier')
```
## Load training data
You can upload your own dataset to work through this tutorial. Upload your dataset by using the left sidebar in Colab.
<img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_text_classification.png" alt="Upload File" width="800" hspace="100">
If you prefer not to upload your dataset to the cloud, you can also locally run the library by following the [guide](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).
To keep it simple, we will reuse the SST-2 dataset downloaded earlier. Let's use the `TestClassifierDataLoader.from_csv` method to load the data.
Please be noted that as we have changed the model architecture, we will need to reload the training and test dataset to apply the new preprocessing logic.
```
train_data = TextClassifierDataLoader.from_csv(
filename='train.csv',
text_column='sentence',
label_column='label',
model_spec=mb_spec,
is_training=True)
test_data = TextClassifierDataLoader.from_csv(
filename='dev.csv',
text_column='sentence',
label_column='label',
model_spec=mb_spec,
is_training=False)
```
The Model Maker library also supports the `from_folder()` method to load data. It assumes that the text data of the same class are in the same subdirectory and that the subfolder name is the class name. Each text file contains one movie review sample. The `class_labels` parameter is used to specify which the subfolders.
## Train a TensorFlow Model
Train a text classification model using the training data.
*Note: As MobileBERT is a complex model, each training epoch will takes about 10 minutes on a Colab GPU. Please make sure that you are using a GPU runtime.*
```
model = text_classifier.create(train_data, model_spec=mb_spec, epochs=3)
```
Examine the detailed model structure.
```
model.summary()
```
## Evaluate the model
Evaluate the model that we have just trained using the test data and measure the loss and accuracy value.
```
loss, acc = model.evaluate(test_data)
```
## Quantize the model
In many on-device ML application, the model size is an important factor. Therefore, it is recommended that you apply quantize the model to make it smaller and potentially run faster. Model Maker automatically applies the recommended quantization scheme for each model architecture but you can customize the quantization config as below.
```
config = configs.QuantizationConfig.create_dynamic_range_quantization(optimizations=[tf.lite.Optimize.DEFAULT])
config.experimental_new_quantizer = True
```
## Export as a TensorFlow Lite model
Convert the trained model to TensorFlow Lite model format with [metadata](https://www.tensorflow.org/lite/convert/metadata) so that you can later use in an on-device ML application. The label file and the vocab file are embedded in metadata. The default TFLite filename is `model.tflite`.
```
model.export(export_dir='mobilebert/', quantization_config=config)
```
The TensorFlow Lite model file can be integrated in a mobile app using the [BertNLClassifier API](https://www.tensorflow.org/lite/inference_with_metadata/task_library/bert_nl_classifier) in [TensorFlow Lite Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/overview). Please note that this is **different** from the `NLClassifier` API used to integrate the text classification trained with the average word vector model architecture.
The export formats can be one or a list of the following:
* `ExportFormat.TFLITE`
* `ExportFormat.LABEL`
* `ExportFormat.VOCAB`
* `ExportFormat.SAVED_MODEL`
By default, it exports only the TensorFlow Lite model file containing the model metadata. You can also choose to export other files related to the model for better examination. For instance, exporting only the label file and vocab file as follows:
```
model.export(export_dir='mobilebert/', export_format=[ExportFormat.LABEL, ExportFormat.VOCAB])
```
You can evaluate the TFLite model with `evaluate_tflite` method to measure its accuracy. Converting the trained TensorFlow model to TFLite format and apply quantization can affect its accuracy so it is recommended to evaluate the TFLite model accuracy before deployment.
```
accuracy = model.evaluate_tflite('mobilebert/model.tflite', test_data)
print('TFLite model accuracy: ', accuracy)
```
## Advanced Usage
The `create` function is the driver function that the Model Maker library uses to create models. The `model_spec` parameter defines the model specification. The `AverageWordVecModelSpec` and `BertClassifierModelSpec` classes are currently supported. The `create` function comprises of the following steps:
1. Creates the model for the text classifier according to `model_spec`.
2. Trains the classifier model. The default epochs and the default batch size are set by the `default_training_epochs` and `default_batch_size` variables in the `model_spec` object.
This section covers advanced usage topics like adjusting the model and the training hyperparameters.
### Customize the MobileBERT model hyperparameters
The model parameters you can adjust are:
* `seq_len`: Length of the sequence to feed into the model.
* `initializer_range`: The standard deviation of the `truncated_normal_initializer` for initializing all weight matrices.
* `trainable`: Boolean that specifies whether the pre-trained layer is trainable.
The training pipeline parameters you can adjust are:
* `model_dir`: The location of the model checkpoint files. If not set, a temporary directory will be used.
* `dropout_rate`: The dropout rate.
* `learning_rate`: The initial learning rate for the Adam optimizer.
* `tpu`: TPU address to connect to.
For instance, you can set the `seq_len=256` (default is 128). This allows the model to classify longer text.
```
new_model_spec = model_spec.get('mobilebert_classifier')
new_model_spec.seq_len = 256
```
### Customize the average word embedding model hyperparameters
You can adjust the model infrastructure like the `wordvec_dim` and the `seq_len` variables in the `AverageWordVecModelSpec` class.
For example, you can train the model with a larger value of `wordvec_dim`. Note that you must construct a new `model_spec` if you modify the model.
```
new_model_spec = model_spec.AverageWordVecModelSpec(wordvec_dim=32)
```
Get the preprocessed data.
```
new_train_data = TextClassifierDataLoader.from_csv(
filename='train.csv',
text_column='sentence',
label_column='label',
model_spec=new_model_spec,
is_training=True)
```
Train the new model.
```
model = text_classifier.create(new_train_data, model_spec=new_model_spec)
```
### Tune the training hyperparameters
You can also tune the training hyperparameters like `epochs` and `batch_size` that affect the model accuracy. For instance,
* `epochs`: more epochs could achieve better accuracy, but may lead to overfitting.
* `batch_size`: the number of samples to use in one training step.
For example, you can train with more epochs.
```
model = text_classifier.create(new_train_data, model_spec=new_model_spec, epochs=20)
```
Evaluate the newly retrained model with 20 training epochs.
```
new_test_data = TextClassifierDataLoader.from_csv(
filename='dev.csv',
text_column='sentence',
label_column='label',
model_spec=new_model_spec,
is_training=False)
loss, accuracy = model.evaluate(new_test_data)
```
### Change the Model Architecture
You can change the model by changing the `model_spec`. The following shows how to change to BERT-Base model.
Change the `model_spec` to BERT-Base model for the text classifier.
```
spec = model_spec.get('bert_classifier')
```
The remaining steps are the same.
| github_jupyter |
download datasetnya https://drive.google.com/file/d/1IX9cWMwzc4v8lLivk19k2LV2JrCj0KD1/view?usp=sharing
```
import pandas as pd
import numpy as np
df = pd.read_csv('Amazon_Unlocked_Mobile.csv')
# df = df.sample(frac=0.1, random_state=10)
df.head()
df.dropna(inplace=True)
df = df[df['Rating'] != 3]
df['Positively Rated'] = np.where(df['Rating'] > 3, 1, 0)
df.head(10)
df['Positively Rated'].mean()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df['Reviews'],
df['Positively Rated'],
random_state=0)
print('X_train first entry:\n\n', X_train.iloc[0])
print('\n\nX_train shape: ', X_train.shape)
```
# CountVectorizer
```
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer().fit(X_train)
vect.get_feature_names()[::2000]
len(vect.get_feature_names())
X_train_vectorized = vect.transform(X_train)
X_train_vectorized
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train_vectorized, y_train)
from sklearn.metrics import roc_auc_score
predictions = model.predict(vect.transform(X_test))
print('AUC: ', roc_auc_score(y_test, predictions))
feature_names = np.array(vect.get_feature_names())
sorted_coef_index = model.coef_[0].argsort()
print('Smallest Coefs:\n{}\n'.format(feature_names[sorted_coef_index[:10]]))
print('Largest Coefs: \n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))
```
# TfIdf
```
from sklearn.feature_extraction.text import TfidfVectorizer
# Fit the TfidfVectorizer to the training data specifiying a minimum document frequency of 5
vect = TfidfVectorizer(min_df=5).fit(X_train)
len(vect.get_feature_names())
X_train_vectorized = vect.transform(X_train)
model = LogisticRegression()
model.fit(X_train_vectorized, y_train)
predictions = model.predict(vect.transform(X_test))
print('AUC: ', roc_auc_score(y_test, predictions))
feature_names = np.array(vect.get_feature_names())
sorted_tfidf_index = X_train_vectorized.max(0).toarray()[0].argsort()
print('Smallest tfidf:\n{}\n'.format(feature_names[sorted_tfidf_index[:10]]))
print('Largest tfidf: \n{}'.format(feature_names[sorted_tfidf_index[:-11:-1]]))
sorted_coef_index = model.coef_[0].argsort()
print('Smallest Coefs:\n{}\n'.format(feature_names[sorted_coef_index[:10]]))
print('Largest Coefs: \n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))
print(model.predict(vect.transform(['not an issue, phone is working',
'an issue, phone is not working'])))
vect = CountVectorizer(min_df=5, ngram_range=(1,2)).fit(X_train)
X_train_vectorized = vect.transform(X_train)
len(vect.get_feature_names())
model = LogisticRegression()
model.fit(X_train_vectorized, y_train)
predictions = model.predict(vect.transform(X_test))
print('AUC: ', roc_auc_score(y_test, predictions))
feature_names = np.array(vect.get_feature_names())
sorted_coef_index = model.coef_[0].argsort()
print('Smallest Coefs:\n{}\n'.format(feature_names[sorted_coef_index[:10]]))
print('Largest Coefs: \n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))
print(model.predict(vect.transform(['not an issue, phone is working',
'an issue, phone is not working'])))
```
| github_jupyter |
<h1 align="center">Assignment</h1>
<h3 align="center">Faisal Akhtar</h3>
<h3 align="center">Roll No.: 17/1409</h3>
<h3 align="center">Machine Learning - B.Sc. Hons Computer Science - Vth Semester</h4>
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from scipy import stats
column_names = ["CRIM", "ZN", "INDUS", "CHAS", "NOX", "RM", "AGE", "DIS", "RAD", "TAX", "PTRATIO", "B", "LSTAT", "MEDV"]
data = pd.read_csv("../input/boston-housing.csv", header=None, delimiter=r"\s+", names=column_names)
print("\n\nData loaded\n\n")
data.head()
data.describe()
```
<h4>Preprocessing</h4>
```
data = pd.DataFrame(np.c_[data['RM'],data['AGE'],data['MEDV']], columns = ['RM', 'AGE', 'MEDV'])
# Check null values
print("\n\nCheck null values\n",data.isnull().sum())
```
No null values found...Moving on
```
# Discovering outliers by Z-Score
ZScore = np.abs(stats.zscore(data))
print("\n\nChecking where outliers are less than the ZScore")
print("ZScore > 1\n",np.where(ZScore > 1)[0],"\n",np.where(ZScore > 1)[1],"\n")
print("ZScore > 2\n",np.where(ZScore > 2)[0],"\n",np.where(ZScore > 2)[1],"\n")
print("ZScore > 3\n",np.where(ZScore > 3)[0],"\n",np.where(ZScore > 3)[1],"\n")
```
Selecting ZScore 3 to remove outliers
```
data_o = data[(ZScore<3).all(axis=1)]
print ("Shape before removing outliers : ",np.shape(data),"\nShape after removing outliers : ",np.shape(data_o))
```
<h4>Preparing the data for training</h4>
whrere X is input data and Y is output data
```
X = pd.DataFrame(np.c_[data_o['RM'],data_o['AGE']], columns = ['RM', 'AGE'])
Y = pd.DataFrame(np.c_[data_o['MEDV']], columns = ['MEDV'])
print("\n\nX =\n",X.head(5))
print("\n\nY =\n",Y.head(5))
```
<h4>Splitting dataset in Training sets and Test sets</h4>
Where 75% data is for training and 25% is for testing
```
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.25)
print("X_train.shape : ", X_train.shape, "\tX_test.shape", X_test.shape)
print("Y_train.shape : ", Y_train.shape, "\tY_train.shape", Y_train.shape)
```
<h4>Linear Regression</h4>
Fitting Linear regression model to training model
```
lin_model = LinearRegression()
lin_model = lin_model.fit(X_train, Y_train)
```
<h4>Model Analysis</h4>
```
predictions = lin_model.predict(X_test)
# Scatter Plot
plt.scatter(Y_test, predictions)
plt.xlabel("True Values",color='red')
plt.ylabel("Predictions",color='blue')
plt.title("Predicted vs Actual value")
plt.grid(True)
plt.show()
```
<h4>The coefficient of determination R^2 of the prediction</h4>
```
print(lin_model.score(X_test,Y_test))
```
| github_jupyter |
# Project: Train a Quadcopter How to Fly
Design an agent to fly a quadcopter, and then train it using a reinforcement learning algorithm of your choice!
Try to apply the techniques you have learnt, but also feel free to come up with innovative ideas and test them.
## Instructions
Take a look at the files in the directory to better understand the structure of the project.
- `task.py`: Define your task (environment) in this file.
- `agents/`: Folder containing reinforcement learning agents.
- `policy_search.py`: A sample agent has been provided here.
- `agent.py`: Develop your agent here.
- `physics_sim.py`: This file contains the simulator for the quadcopter. **DO NOT MODIFY THIS FILE**.
For this project, you will define your own task in `task.py`. Although we have provided a example task to get you started, you are encouraged to change it. Later in this notebook, you will learn more about how to amend this file.
You will also design a reinforcement learning agent in `agent.py` to complete your chosen task.
You are welcome to create any additional files to help you to organize your code. For instance, you may find it useful to define a `model.py` file defining any needed neural network architectures.
## Controlling the Quadcopter
We provide a sample agent in the code cell below to show you how to use the sim to control the quadcopter. This agent is even simpler than the sample agent that you'll examine (in `agents/policy_search.py`) later in this notebook!
The agent controls the quadcopter by setting the revolutions per second on each of its four rotors. The provided agent in the `Basic_Agent` class below always selects a random action for each of the four rotors. These four speeds are returned by the `act` method as a list of four floating-point numbers.
For this project, the agent that you will implement in `agents/agent.py` will have a far more intelligent method for selecting actions!
```
import random
class Basic_Agent():
def __init__(self, task):
self.task = task
def act(self):
new_thrust = random.gauss(450., 25.)
return [new_thrust + random.gauss(0., 1.) for x in range(4)]
```
Run the code cell below to have the agent select actions to control the quadcopter.
Feel free to change the provided values of `runtime`, `init_pose`, `init_velocities`, and `init_angle_velocities` below to change the starting conditions of the quadcopter.
The `labels` list below annotates statistics that are saved while running the simulation. All of this information is saved in a text file `data.txt` and stored in the dictionary `results`.
```
%load_ext autoreload
%autoreload 2
import csv
import numpy as np
from task import Task
# Modify the values below to give the quadcopter a different starting position.
runtime = 5. # time limit of the episode
init_pose = np.array([0., 0., 10., 0., 0., 0.]) # initial pose
init_velocities = np.array([0., 0., 0.]) # initial velocities
init_angle_velocities = np.array([0., 0., 0.]) # initial angle velocities
file_output = 'data.txt' # file name for saved results
# Setup
task = Task(init_pose, init_velocities, init_angle_velocities, runtime)
agent = Basic_Agent(task)
done = False
labels = ['time', 'x', 'y', 'z', 'phi', 'theta', 'psi', 'x_velocity',
'y_velocity', 'z_velocity', 'phi_velocity', 'theta_velocity',
'psi_velocity', 'rotor_speed1', 'rotor_speed2', 'rotor_speed3', 'rotor_speed4']
results = {x : [] for x in labels}
# Run the simulation, and save the results.
with open(file_output, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(labels)
while True:
rotor_speeds = agent.act()
_, _, done = task.step(rotor_speeds)
to_write = [task.sim.time] + list(task.sim.pose) + list(task.sim.v) + list(task.sim.angular_v) + list(rotor_speeds)
for ii in range(len(labels)):
results[labels[ii]].append(to_write[ii])
writer.writerow(to_write)
if done:
break
```
Run the code cell below to visualize how the position of the quadcopter evolved during the simulation.
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(results['time'], results['x'], label='x')
plt.plot(results['time'], results['y'], label='y')
plt.plot(results['time'], results['z'], label='z')
plt.legend()
_ = plt.ylim()
```
The next code cell visualizes the velocity of the quadcopter.
```
plt.plot(results['time'], results['x_velocity'], label='x_hat')
plt.plot(results['time'], results['y_velocity'], label='y_hat')
plt.plot(results['time'], results['z_velocity'], label='z_hat')
plt.legend()
_ = plt.ylim()
```
Next, you can plot the Euler angles (the rotation of the quadcopter over the $x$-, $y$-, and $z$-axes),
```
plt.plot(results['time'], results['phi'], label='phi')
plt.plot(results['time'], results['theta'], label='theta')
plt.plot(results['time'], results['psi'], label='psi')
plt.legend()
_ = plt.ylim()
```
before plotting the velocities (in radians per second) corresponding to each of the Euler angles.
```
plt.plot(results['time'], results['phi_velocity'], label='phi_velocity')
plt.plot(results['time'], results['theta_velocity'], label='theta_velocity')
plt.plot(results['time'], results['psi_velocity'], label='psi_velocity')
plt.legend()
_ = plt.ylim()
```
Finally, you can use the code cell below to print the agent's choice of actions.
```
plt.plot(results['time'], results['rotor_speed1'], label='Rotor 1 revolutions / second')
plt.plot(results['time'], results['rotor_speed2'], label='Rotor 2 revolutions / second')
plt.plot(results['time'], results['rotor_speed3'], label='Rotor 3 revolutions / second')
plt.plot(results['time'], results['rotor_speed4'], label='Rotor 4 revolutions / second')
plt.legend()
_ = plt.ylim()
```
When specifying a task, you will derive the environment state from the simulator. Run the code cell below to print the values of the following variables at the end of the simulation:
- `task.sim.pose` (the position of the quadcopter in ($x,y,z$) dimensions and the Euler angles),
- `task.sim.v` (the velocity of the quadcopter in ($x,y,z$) dimensions), and
- `task.sim.angular_v` (radians/second for each of the three Euler angles).
```
# the pose, velocity, and angular velocity of the quadcopter at the end of the episode
print(task.sim.pose)
print(task.sim.v)
print(task.sim.angular_v)
```
In the sample task in `task.py`, we use the 6-dimensional pose of the quadcopter to construct the state of the environment at each timestep. However, when amending the task for your purposes, you are welcome to expand the size of the state vector by including the velocity information. You can use any combination of the pose, velocity, and angular velocity - feel free to tinker here, and construct the state to suit your task.
## The Task
A sample task has been provided for you in `task.py`. Open this file in a new window now.
The `__init__()` method is used to initialize several variables that are needed to specify the task.
- The simulator is initialized as an instance of the `PhysicsSim` class (from `physics_sim.py`).
- Inspired by the methodology in the original DDPG paper, we make use of action repeats. For each timestep of the agent, we step the simulation `action_repeats` timesteps. If you are not familiar with action repeats, please read the **Results** section in [the DDPG paper](https://arxiv.org/abs/1509.02971).
- We set the number of elements in the state vector. For the sample task, we only work with the 6-dimensional pose information. To set the size of the state (`state_size`), we must take action repeats into account.
- The environment will always have a 4-dimensional action space, with one entry for each rotor (`action_size=4`). You can set the minimum (`action_low`) and maximum (`action_high`) values of each entry here.
- The sample task in this provided file is for the agent to reach a target position. We specify that target position as a variable.
The `reset()` method resets the simulator. The agent should call this method every time the episode ends. You can see an example of this in the code cell below.
The `step()` method is perhaps the most important. It accepts the agent's choice of action `rotor_speeds`, which is used to prepare the next state to pass on to the agent. Then, the reward is computed from `get_reward()`. The episode is considered done if the time limit has been exceeded, or the quadcopter has travelled outside of the bounds of the simulation.
In the next section, you will learn how to test the performance of an agent on this task.
## The Agent
The sample agent given in `agents/policy_search.py` uses a very simplistic linear policy to directly compute the action vector as a dot product of the state vector and a matrix of weights. Then, it randomly perturbs the parameters by adding some Gaussian noise, to produce a different policy. Based on the average reward obtained in each episode (`score`), it keeps track of the best set of parameters found so far, how the score is changing, and accordingly tweaks a scaling factor to widen or tighten the noise.
Run the code cell below to see how the agent performs on the sample task.
```
import sys
import pandas as pd
from agents.policy_search import PolicySearch_Agent
from task import Task
num_episodes = 1000
target_pos = np.array([0., 0., 10.])
task = Task(target_pos=target_pos)
agent = PolicySearch_Agent(task)
for i_episode in range(1, num_episodes+1):
state = agent.reset_episode() # start a new episode
while True:
action = agent.act(state)
next_state, reward, done = task.step(action)
agent.step(reward, done)
state = next_state
if done:
print("\rEpisode = {:4d}, score = {:7.3f} (best = {:7.3f}), noise_scale = {}".format(
i_episode, agent.score, agent.best_score, agent.noise_scale), end="") # [debug]
break
sys.stdout.flush()
```
This agent should perform very poorly on this task. And that's where you come in!
## Define the Task, Design the Agent, and Train Your Agent!
Amend `task.py` to specify a task of your choosing. If you're unsure what kind of task to specify, you may like to teach your quadcopter to takeoff, hover in place, land softly, or reach a target pose.
After specifying your task, use the sample agent in `agents/policy_search.py` as a template to define your own agent in `agents/agent.py`. You can borrow whatever you need from the sample agent, including ideas on how you might modularize your code (using helper methods like `act()`, `learn()`, `reset_episode()`, etc.).
Note that it is **highly unlikely** that the first agent and task that you specify will learn well. You will likely have to tweak various hyperparameters and the reward function for your task until you arrive at reasonably good behavior.
As you develop your agent, it's important to keep an eye on how it's performing. Use the code above as inspiration to build in a mechanism to log/save the total rewards obtained in each episode to file. If the episode rewards are gradually increasing, this is an indication that your agent is learning.
```
%load_ext autoreload
%autoreload 2
import numpy as np
from agents.agent import Agent
from task import Task
import matplotlib.pyplot as plt
%matplotlib notebook
num_episodes = 300
init_pose = np.array([0., 0., 0.1, 0., 0., 0.])
init_velocities = np.array([0., 0., 0.])
init_angle_velocities = np.array([0., 0., 0.])
target_pos = np.array([0.,0.,0.])
task = Task(init_pose=init_pose, init_velocities=init_velocities, init_angle_velocities=init_angle_velocities,target_pos=target_pos)
agent = Agent(task)
display_graph = True
display_freq = 20
# generate plot function
def plt_dynamic(x, z,score, color_z='g', color_score='b'):
sub1.plot(x, z, color_z)
sub2.plot(x, score, color_score)
fig.canvas.draw()
# create plots
fig, sub1= plt.subplots(1,1)
sub2 = sub1.twinx()
time_limit = 5
z_lower = 0
z_upper = 100
score_lower = 0
score_upper = 15
sub1.set_xlim(0, time_limit) # this is typically time
sub1.set_ylim(z_lower, z_upper) # limits to your y1
sub2.set_xlim(0, time_limit) # time, again
sub2.set_ylim(score_lower, score_upper) # limits to your y2
# set labels and colors for the axes
sub1.set_xlabel('time (s)', color='k')
sub1.tick_params(axis='x', colors='k')
sub1.set_ylabel('z-height', color='g')
sub1.tick_params(axis='y', colors="g")
sub2.set_ylabel('total reward', color='b')
sub2.tick_params(axis='y', colors='b')
best_x, best_z, best_score = [], [0], [0]
total_rewards = []
for episode in range(num_episodes + 1):
state = agent.reset_episode()
done = False
x, z, score = [], [], []
while done is False:
x.append(task.sim.time)
z.append(task.sim.pose[2])
score.append(agent.total_reward)
action = agent.act(state)
next_state, reward, done = task.step(action)
agent.step(action, reward, next_state, done)
state = next_state
total_rewards.append(np.amax(score))
# store if best reward
if np.amax(score) > np.amax(best_score):
best_x, best_z, best_score = x, z, score
print("Episode {:4d}: Improved reward return {}".format(episode, np.amax(best_score)))
if (episode % display_freq == 0) and (display_graph is True):
plt_dynamic(x, z, score)
print("Episode = {:4d}, total reward = {:7.3f}, noise_scale = {}".format(
episode, agent.total_reward, agent.noise_scale))
```
## Plot the Rewards
Once you are satisfied with your performance, plot the episode rewards, either from a single run, or averaged over multiple runs.
```
# create plots
fig, sub1= plt.subplots(1,1)
sub2 = sub1.twinx()
# set plot boundaries
episode = len(total_rewards)
reward_lower = np.amin(total_rewards)
reward_upper = np.amax(total_rewards)
sub1.set_xlim(0, episode)
sub1.set_ylim(reward_lower, reward_upper)
# set labels and colors for the axes
sub1.set_xlabel('episode', color='k')
sub1.tick_params(axis='x', colors='k')
sub1.set_ylabel('total reward', color='g')
sub1.tick_params(axis='y', colors="g")
sub1.plot(range(len(total_rewards)), total_rewards, 'g')
fig.canvas.draw()
print("Best total reward = {}".format(np.amax(best_score)))
# create plots
fig, sub1= plt.subplots(1,1)
sub2 = sub1.twinx()
time_limit = 5
z_lower = 0
z_upper = np.amax(best_z) + 1.0
score_lower = np.amin(best_score)
score_upper = np.amax(best_score) + 1.0
sub1.set_xlim(0, time_limit) # this is typically time
sub1.set_ylim(z_lower, z_upper) # limits to your y1
sub2.set_xlim(0, time_limit) # time, again
sub2.set_ylim(score_lower, score_upper) # limits to your y2
# set labels and colors for the axes
sub1.set_xlabel('time (s)', color='k')
sub1.tick_params(axis='x', colors='k')
sub1.set_ylabel('z-height', color='g')
sub1.tick_params(axis='y', colors="g")
sub2.set_ylabel('total reward', color='b')
sub2.tick_params(axis='y', colors='b')
plt_dynamic(best_x, best_z, best_score)
```
## Reflections
**Question 1**: Describe the task that you specified in `task.py`. How did you design the reward function?
**Answer**:
Starts from point 0 (z is 0.1) in x, y and z and reach 0, 0, 10.
I tried to design the reward function to fly the quadcopter straight up(z-axis) and penalises it for significant changes to its pose (other than z axis). The sigmoid function is used to provide a distance from target score which would always be between 0 and 1 with values closer to the target closer to 0. Then took this distance score minus 1 to give a score of 1 when the quadcopter was on the target.
**Question 2**: Discuss your agent briefly, using the following questions as a guide:
- What learning algorithm(s) did you try? What worked best for you?
- What was your final choice of hyperparameters (such as $\alpha$, $\gamma$, $\epsilon$, etc.)?
- What neural network architecture did you use (if any)? Specify layers, sizes, activation functions, etc.
**Answer**:
The DDPG algorithm is used in this project. After trialling multiple hyperparameters, I used the same gamma and tau values are used as 0.99 and 0.001.
The actor has 2 hidden layers with 512 and 256 nodes and relu activation layers. The critic has single hidden layers of 128 nodes likewise with relu activation. Adding extra layers had a minimal positive effect on the training, except that they were significantly slower.
I have set the learning rates to 0.001 and 0.001 on the actor and critic respectively.
**Question 3**: Using the episode rewards plot, discuss how the agent learned over time.
- Was it an easy task to learn or hard?
- Was there a gradual learning curve, or an aha moment?
- How good was the final performance of the agent? (e.g. mean rewards over the last 10 episodes)
**Answer**:
Learning is really challenging. To a certain point rewards are fluctuating but around 150 episodes rewards are setteled down.
I couldn't get the network to learn enough to attempt as shown in the results above, and so the final set of rewards are not perfect.
**Question 4**: Briefly summarize your experience working on this project. You can use the following prompts for ideas.
- What was the hardest part of the project? (e.g. getting started, plotting, specifying the task, etc.)
- Did you find anything interesting in how the quadcopter or your agent behaved?
**Answer**:
The project was challenging and caused such a serious leap from the others I've worked on. There are too many different components and parameters. I've had really hard times to set the reward function correctly and tried to keep it simple. I need some extra work on RL.
| github_jupyter |
# Mask R-CNN - Train on Shapes Dataset
This notebook shows how to train Mask R-CNN on your own dataset. To keep things simple we use a synthetic dataset of shapes (squares, triangles, and circles) which enables fast training. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.
The code of the *Shapes* dataset is included below. It generates images on the fly, so it doesn't require downloading any data. And it can generate images of any size, so we pick a small image size to train faster.
```
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
```
## Configurations
```
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 3 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 128
IMAGE_MAX_DIM = 128
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = ShapesConfig()
config.display()
```
## Notebook Preferences
```
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
```
## Dataset
Create a synthetic dataset
Extend the Dataset class and add a method to load the shapes dataset, `load_shapes()`, and override the following methods:
* load_image()
* load_mask()
* image_reference()
```
class ShapesDataset(utils.Dataset):
"""Generates the shapes synthetic dataset. The dataset consists of simple
shapes (triangles, squares, circles) placed randomly on a blank surface.
The images are generated on the fly. No file access required.
"""
def load_shapes(self, count, height, width):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("shapes", 1, "square")
self.add_class("shapes", 2, "circle")
self.add_class("shapes", 3, "triangle")
# Add images
# Generate random specifications of images (i.e. color and
# list of shapes sizes and locations). This is more compact than
# actual images. Images are generated on the fly in load_image().
for i in range(count):
bg_color, shapes = self.random_image(height, width)
self.add_image("shapes", image_id=i, path=None,
width=width, height=height,
bg_color=bg_color, shapes=shapes)
def load_image(self, image_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
info = self.image_info[image_id]
bg_color = np.array(info['bg_color']).reshape([1, 1, 3])
image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)
image = image * bg_color.astype(np.uint8)
for shape, color, dims in info['shapes']:
image = self.draw_shape(image, shape, dims, color)
return image
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
shapes = info['shapes']
count = len(shapes)
mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)
for i, (shape, _, dims) in enumerate(info['shapes']):
mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),
shape, dims, 1)
# Handle occlusions
occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
for i in range(count-2, -1, -1):
mask[:, :, i] = mask[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
# Map class names to class IDs.
class_ids = np.array([self.class_names.index(s[0]) for s in shapes])
return mask.astype(np.bool), class_ids.astype(np.int32)
def draw_shape(self, image, shape, dims, color):
"""Draws a shape from the given specs."""
# Get the center x, y and the size s
x, y, s = dims
if shape == 'square':
cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)
elif shape == "circle":
cv2.circle(image, (x, y), s, color, -1)
elif shape == "triangle":
points = np.array([[(x, y-s),
(x-s/math.sin(math.radians(60)), y+s),
(x+s/math.sin(math.radians(60)), y+s),
]], dtype=np.int32)
cv2.fillPoly(image, points, color)
return image
def random_shape(self, height, width):
"""Generates specifications of a random shape that lies within
the given height and width boundaries.
Returns a tuple of three valus:
* The shape name (square, circle, ...)
* Shape color: a tuple of 3 values, RGB.
* Shape dimensions: A tuple of values that define the shape size
and location. Differs per shape type.
"""
# Shape
shape = random.choice(["square", "circle", "triangle"])
# Color
color = tuple([random.randint(0, 255) for _ in range(3)])
# Center x, y
buffer = 20
y = random.randint(buffer, height - buffer - 1)
x = random.randint(buffer, width - buffer - 1)
# Size
s = random.randint(buffer, height//4)
return shape, color, (x, y, s)
def random_image(self, height, width):
"""Creates random specifications of an image with multiple shapes.
Returns the background color of the image and a list of shape
specifications that can be used to draw the image.
"""
# Pick random background color
bg_color = np.array([random.randint(0, 255) for _ in range(3)])
# Generate a few random shapes and record their
# bounding boxes
shapes = []
boxes = []
N = random.randint(1, 4)
for _ in range(N):
shape, color, dims = self.random_shape(height, width)
shapes.append((shape, color, dims))
x, y, s = dims
boxes.append([y-s, x-s, y+s, x+s])
# Apply non-max suppression wit 0.3 threshold to avoid
# shapes covering each other
keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)
shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]
return bg_color, shapes
# Training dataset
dataset_train = ShapesDataset()
dataset_train.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = ShapesDataset()
dataset_val.load_shapes(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
```
## Create Model
```
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last(), by_name=True)
```
## Training
Train in two stages:
1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass `layers='heads'` to the `train()` function.
2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass `layers="all` to train all layers.
```
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=2,
layers="all")
# Save weights
# Typically not needed because callbacks save after every epoch
# Uncomment to save manually
# model_path = os.path.join(MODEL_DIR, "mask_rcnn_shapes.h5")
# model.keras_model.save_weights(model_path)
```
## Detection
```
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()
# Load trained weights
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
results = model.detect([original_image], verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, r['scores'], ax=get_ax())
```
## Evaluation
```
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
image_ids = np.random.choice(dataset_val.image_ids, 10)
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
APs.append(AP)
print("mAP: ", np.mean(APs))
```
| github_jupyter |
## House Prices: Advanced Regression Techniques : Kaggle Competition
### Import Libraries
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
### Import Data
```
df=pd.read_csv('train.csv')
df.head()
df.shape
```
### Step1: Check for missing values
```
fig, ax = plt.subplots(figsize=(20,5)) # To change fig shape for better representation
sns.heatmap(df.isnull(),yticklabels=False,cbar=False, ax=ax)
def missing_zero_values_table(dataframe):
zero_val = (dataframe == 0.00).astype(int).sum(axis=0)
mis_val = dataframe.isnull().sum()
mis_val_percent = 100 * dataframe.isnull().sum() / len(dataframe)
mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)
mz_table = mz_table.rename(
columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})
zero_val = (dataframe == 0.00).astype(int).sum(axis=0)
mis_val = dataframe.isnull().sum()
mis_val_percent = 100 * dataframe.isnull().sum() / len(dataframe)
mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)
mz_table = mz_table.rename(
columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})
mz_table['Data Type'] = dataframe.dtypes
mz_table = mz_table[
mz_table.iloc[:,1] != 0].sort_values(
'% of Total Values', ascending=False).round(1)
print ("Your selected dataframe has " + str(dataframe.shape[1]) + " columns and " + str(dataframe.shape[0]) + " Rows.\n"
"There are " + str(mz_table.shape[0]) +
" columns that have missing values.")
return mz_table
missing_zero_values_table(df)
```
### Step 2: Filling Missing values and droping columns whose missing >70%
```
# droping columns whose missing >70%
df.drop(['PoolQC','MiscFeature','Alley','Fence'],axis=1,inplace=True)
```
#### Handling Missing data : categorical data with MODE & numerical data with MEAN
```
df['FireplaceQu'].value_counts()
df['FireplaceQu'].fillna(value='Gd', inplace=True)
df['LotFrontage'].mean()
df['LotFrontage'].fillna(value=70.05, inplace=True)
df['GarageType'].value_counts()
df['GarageType'].fillna(value='Attchd', inplace=True)
df['GarageYrBlt'].value_counts()
df['GarageYrBlt'].fillna(value=2005, inplace=True)
df['GarageFinish'].value_counts()
df['GarageFinish'].fillna(value='Unf', inplace=True)
df['GarageQual'].value_counts()
df['GarageQual'].fillna(value='TA', inplace=True)
df['GarageCond'].value_counts()
df['GarageCond'].fillna(value='TA', inplace=True)
df['BsmtExposure'].value_counts()
df['BsmtExposure'].fillna(value='No', inplace=True)
df['BsmtFinType1'].value_counts()
df['BsmtFinType1'].fillna(value='Unf', inplace=True)
df['BsmtFinType2'].value_counts()
df['BsmtFinType2'].fillna(value='Unf', inplace=True)
df['BsmtCond'].value_counts()
df['BsmtCond'].fillna(value='TA', inplace=True)
df['BsmtQual'].value_counts()
df['BsmtQual'].fillna(value='TA', inplace=True)
df['MasVnrArea'].mean()
df['MasVnrArea'].fillna(value=103.6, inplace=True)
df['MasVnrType'].value_counts()
df['MasVnrType'].fillna(value='None', inplace=True)
df['Electrical'].value_counts()
df['Electrical'].fillna(value='SBrkr', inplace=True)
df.shape
#df.drop(['Id'],axis=1,inplace=True)
missing_zero_values_table(df)
fig, ax = plt.subplots(figsize=(20,5))
sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='YlGnBu',ax=ax)
df.dropna(inplace=True)
df.shape
df.head()
```
### Data Engineering is done !!
### Now we will handle Categorical Data ( to Numerical Data)
```
##HAndle Categorical Features
columns = list(df.select_dtypes(include=['object']).columns)
columns
len(columns)
main_df=df.copy() # saving original data copy
## Test Data
test_df=pd.read_csv('cleaned_test.csv')
test_df.shape
df.shape
```
#### Read!
Train Data has 77 features and Test Data has 76 features.
That extra column(feature) is SalePrice which will be seperated later before fitting in model
Now we will combine both the test and train data and apply get_dummies to categorical data which will convert categorical variable into dummy/indicator variables.
We have combined them both together so that while conversion identity remains same.
```
test_df.head()
final_df=pd.concat([df,test_df],axis=0)
final_df['SalePrice'] #SalePrice of test data will get Nan values which needs to be predicted
final_df.shape
# function to apply get_dummies to all categorical data which will convert categorical variable into
# dummy/indicator variables
def category_onehot_multcols(multcolumns):
df_final=final_df
i=0
for fields in multcolumns:
print(fields)
df1=pd.get_dummies(final_df[fields],drop_first=True)
final_df.drop([fields],axis=1,inplace=True)
if i==0:
df_final=df1.copy()
else:
df_final=pd.concat([df_final,df1],axis=1)
i=i+1
df_final=pd.concat([final_df,df_final],axis=1)
return df_final
final_df=category_onehot_multcols(columns)
final_df.shape
# removing duplicate columns as they wont help
final_df =final_df.loc[:,~final_df.columns.duplicated()]
final_df.shape
final_df
final_df['SalePrice']
```
### Final dataset of train+test
Now we will seperate test and train
In train we will further seperate features mapping to price
#### X------------->Y
(fetures). . . . (price)
```
df_Train=final_df.iloc[:1460,:] # 1460 is clculted from previous train data size
df_Test=final_df.iloc[1460:,:]
df_Train.head()
df_Test.head()
df_Train.shape
df_Test.drop(['SalePrice'],axis=1,inplace=True)
```
Remove ID and seperate out salesPrice as they are not fetures just label and output
```
X_train=df_Train.drop(['SalePrice','Id'],axis=1)
y_train=df_Train['SalePrice']
X_test=df_Test.drop(['Id'],axis=1)
print(X_train.shape)
print(X_test.shape)
```
## Prediciton and selecting the Algorithm
I am just simply using Liner Regression, we will improve on that later
```
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import mean_squared_error
regressor = LinearRegression()
regressor.fit(X_train, y_train) #training the algorithm
y_pred = regressor.predict(X_test)
y_pred
##Create Sample Submission file and Submit
pred=pd.DataFrame(y_pred)
sub_df=pd.read_csv('sample_submission.csv')
datasets=pd.concat([sub_df['Id'],pred],axis=1)
datasets.columns=['Id','SalePrice']
datasets.to_csv('trial_1.csv',index=False)
sub_df.shape
```
| github_jupyter |
# 1. データサイエンティストによるノートブックでの試行錯誤
データが蓄積され取得できるようになったら、データサイエンティストはEDA(探索的データ解析)を行い、モデルを構築し、評価します。
本ノートブックでは、データサイエンティストによるモデル構築コードを提示します。
以降のノートブックで、作成されたスクリプトのモジュール化を行なっていきます。
## 実験内容
下記のノートブックと同様の実験を行います。
https://github.com/aws-samples/aws-ml-jp/blob/main/mlops/step-functions-data-science-sdk/model-train-evaluate-compare/step_functions_mlworkflow_scikit_learn_data_processing_and_model_evaluation_with_experiments.ipynb
>このノートブックで使用するデータは Census-Income KDD Dataset です。このデータセットから特徴量を選択し、データクレンジングを実施し、二値分類モデルの利用できる形にデータを変換し、最後にデータを学習用とテスト用に分割します。このノートブックではロジスティック回帰モデルを使って、国勢調査の回答者の収入が 5万ドル以上か 5万ドル未満かを予測します。このデータセットはクラスごとの不均衡が大きく、ほとんどのデータに 5万ドル以下というラベルが付加されています。
## 前提:データは事前に dataset/ に手動で格納しておく
データを以下のサイトから入手し、 dataset ディレクトリに配置してください。
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
./dataset/census-income.csv(101.5MB)
```
import shutil
shutil.unpack_archive("./census-income.csv.zip", extract_dir='./dataset')
```
## データサイエンティストによる、モデル構築
データサイエンティストがEDAを行なったあと、ノートブック上でモデルの構築、評価を行なった場合を想定します。
このスクリプトでは、以下の処理が実行されます。
* 重複データやコンフリクトしているデータの削除
* ターゲット変数 income 列をカテゴリ変数から 2つのラベルを持つ列に変換
* age と num persons worked for employer をビニングして数値からカテゴリ変数に変換
* 連続値であるcapital gains, capital losses, dividends from stocks を学習しやすいようスケーリング
* education, major industry code, class of workerを学習しやすいようエンコード
* データを学習用とテスト用に分割し特徴量とラベルの値をそれぞれ保存
## コードの詳細
以下、69行(空行含む)
* ライブラリ読み込み:7行
* 空行:9行
* コメント:11行
* コード実行:19行
* コード実行の改行:23行
```
# Import the latest sagemaker, stepfunctions and boto3 SDKs
import sys
!{sys.executable} -m pip install --upgrade pip
!{sys.executable} -m pip install -qU pandas
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder, KBinsDiscretizer
from sklearn.compose import make_column_transformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, roc_auc_score, accuracy_score
### データ読み込み
columns = [
"age",
"education",
"major industry code",
"class of worker",
"num persons worked for employer",
"capital gains",
"capital losses",
"dividends from stocks",
"income",
]
class_labels = [" - 50000.", " 50000+."]
df = pd.read_csv("./dataset/census-income.csv")
df = df[columns]
### 前処理
#重複データやコンフリクトしているデータの削除
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
df.replace(class_labels, [0, 1], inplace=True)
#ターゲット変数 income 列をカテゴリ変数から 2つのラベルを持つ列に変換
negative_examples, positive_examples = np.bincount(df["income"])
#データを学習用とテスト用に分割
X_train, X_test, y_train, y_test = train_test_split(df.drop("income", axis=1), df["income"], test_size=0.2)
preprocess = make_column_transformer(
#age と num persons worked for employer をビニングして数値からカテゴリ変数に変換
(
KBinsDiscretizer(encode="onehot-dense", n_bins=10),
["age", "num persons worked for employer"],
),
#連続値であるcapital gains, capital losses, dividends from stocks を学習しやすいようスケーリング
(
StandardScaler(),
["capital gains", "capital losses", "dividends from stocks"],
),
#education, major industry code, class of workerを学習しやすいようエンコード
(
OneHotEncoder(sparse=False, handle_unknown='ignore'),
["education", "major industry code", "class of worker"],
),
)
X_train = preprocess.fit_transform(X_train)
X_test = preprocess.transform(X_test)
### 学習
model = LogisticRegression(class_weight="balanced", solver="lbfgs", C=float(1.0), verbose=1)
model.fit(X_train, y_train)
### 推論
predictions = model.predict(X_test)
### 評価
report_dict = classification_report(y_test, predictions, output_dict=True)
report_dict["accuracy"] = accuracy_score(y_test, predictions)
report_dict["roc_auc"] = roc_auc_score(y_test, predictions)
print(report_dict)
```
ノートブックのインタラクティブ性は、EDAやモデルプロトタイプなどの初期の試行錯誤には大変便利です。
一方で、モジュール化されていないコードや記録されていないコードや、本番運用を見据えると、後のコード本番化、リファクタリングなどの工数を増加や、テストの難しさによる品質確保が難しいといった懸念もあります。
試行錯誤の柔軟性を確保しつつ、モジュール化されたコードをきちんと記録していくことが、コードの品質向上と、本番導入の迅速化には重要になります。
以降のノートブックでは、実験を支援するパイプラインを準備し、ノートブックをモジュール化していく例をみていきます。
## [参考] 詰め込んだ場合、以下の23行で完了
```
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder, KBinsDiscretizer
from sklearn.compose import make_column_transformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, roc_auc_score, accuracy_score
df = pd.read_csv("./dataset/census-income.csv")
df = df[["age","education","major industry code","class of worker","num persons worked for employer","capital gains","capital losses","dividends from stocks","income",]]
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
df.replace([" - 50000.", " 50000+."], [0, 1], inplace=True)
X_train, X_test, y_train, y_test = train_test_split(df.drop("income", axis=1), df["income"], test_size=0.2)
preprocess = make_column_transformer((KBinsDiscretizer(encode="onehot-dense", n_bins=10),["age", "num persons worked for employer"],),(StandardScaler(),["capital gains", "capital losses", "dividends from stocks"],),(OneHotEncoder(sparse=False, handle_unknown='ignore'),["education", "major industry code", "class of worker"],),)
X_train = preprocess.fit_transform(X_train)
X_test = preprocess.transform(X_test)
model = LogisticRegression(class_weight="balanced", solver="lbfgs", C=float(1.0), verbose=1)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
report_dict = classification_report(y_test, predictions, output_dict=True)
report_dict["accuracy"] = accuracy_score(y_test, predictions)
report_dict["roc_auc"] = roc_auc_score(y_test, predictions)
print(report_dict)
```
| github_jupyter |
**Deep MNIST for Experts**
This is an example taken from one of the TensorFlow tutorials: https://www.tensorflow.org/versions/r1.1/get_started/mnist/pros
To run this as a docker container on my HP200, I used:
```
nvidia-docker run -it -p 8888:8888 tensorflow/tensorflow:1.5.0-gpu-py3
```
If you have a newer CPU, you can use the image tensorflow/tensorflow:lastest-gpu-py3.
```
# Load the data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
# Create our session. We enable the logging of which devices (GPU or CPU)
# TensorFlow is using. This gets log to the console running the Notebook,
# not in the notebook
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
# Placeholders
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
# Variables
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
# The regression model
y = tf.matmul(x,W) + b
# Loss function
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
# Train the model - define a training step
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Run the step 1000 times
for _ in range(1000):
batch = mnist.train.next_batch(100)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
# Evaluate the model
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
```
**Bulding a multilayer convolutional Network**
```
# Weight initialization
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# Convolution and pooling
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# first convolutional layer
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Densely connected layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Readout layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
import time
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
t1 = time.time()
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
t2 = time.time()
print("Total time was %.0f seconds" % (t2-t1))
# The time with an NVIDIA GTX1060 was 136 seconds
# The time on a macbook pro laptop (CPU-only, no GPU) was 1683 seconds.
```
| github_jupyter |
# Numerical norm bounds for quadrotor
For a quadrotor system with state $x = \begin{bmatrix}p_x & p_z & \phi & v_x & v_z & \dot{\phi} \end{bmatrix}^T$ we have
\begin{equation}
\dot{x} = \begin{bmatrix}
v_x \cos\phi - v_z\sin\phi \\
v_x \sin\phi + v_z\cos\phi \\
\dot{\phi} \\
v_z\dot{\phi} - g\sin{\phi} \\
-v_x\dot{\phi} - g\cos{\phi} + g \\
0
\end{bmatrix}.
\end{equation}
Evaluating the corresponding Jacobian at 0 yields:
\begin{equation}
\nabla f(0)x = \begin{bmatrix} v_x & v_z & \dot{\phi} & -g\phi & 0 & 0 \end{bmatrix}^T
\end{equation}
We want to find an NLDI of the form
\begin{equation}
\dot{x} = \nabla f(0) x + I p, \;\; \|p\| \leq \|Cx\|
\end{equation}
To find $C$, we determine an entry-wise norm bound. That is, for $i=1,\ldots,6$, we want to find $C_i$ such that for all $x$ such that $x_{\text{min}} \leq x \leq x_{\text{max}}$:
\begin{equation}
(\nabla f_i(0)x - \dot{x}_i)^2 \leq x^T C_i x
\end{equation}
and then write
\begin{equation}
\|\dot{x} - \nabla f(0)x\|_2 \leq \|\begin{bmatrix} C_1^{1/2} \\ C_2^{1/2} \\ C_3^{1/2} \\ C_4^{1/2} \\ C_5^{1/2} \\ C_6^{1/2} \end{bmatrix} x \|
\end{equation}
```
import numpy as np
import cvxpy as cp
import scipy.linalg as sla
g = 9.81
```
## Define max and min values
```
# State is: x = [px, pz, phi, vx, vz, phidot]^T
x_max = np.array([1.1, 1.1, 0.06, 0.5, 1.0, 0.8])
x_min = np.array([-1.1, -1.1, -0.06, -0.5, -1.0, -0.8])
px_max, pz_max, phi_max, vx_max, vz_max, phidot_max = x_max
px_min, pz_min, phi_min, vx_min, vz_min, phidot_min = x_min
n = 6
px_idx, pz_idx, phi_idx, vx_idx, vz_idx, phidot_idx = range(n)
```
## Find element-wise bounds
### $f_1$
```
gridnum = 50
vx = np.linspace(vx_min, vx_max, gridnum)
vz = np.linspace(vz_min, vz_max, gridnum)
phi = np.linspace(phi_min, phi_max, gridnum)
Vx, Vz, Phi = np.meshgrid(vx, vz, phi)
v1 = np.ravel(( Vx - (Vx*np.cos(Phi) - Vz*np.sin(Phi)) )**2)
U1 = np.array([np.ravel(Vx*Vx),
np.ravel(Vz*Vz),
np.ravel(Phi*Phi),
2*np.ravel(Vx*Vz),
2*np.ravel(Vx*Phi),
2*np.ravel(Vz*Phi)]).T
c1 = cp.Variable(6)
cp.Problem(cp.Minimize(cp.max(U1@c1 - v1)), [U1@c1 >= v1, c1[:3]>=0]).solve(verbose=True, solver=cp.MOSEK)
c1 = c1.value
c1
C1 = np.zeros((n,n))
C1[vx_idx, vx_idx] = c1[0]/2
C1[vz_idx, vz_idx] = c1[1]/2
C1[phi_idx, phi_idx] = c1[2]/2
C1[vx_idx, vz_idx] = c1[3]
C1[vx_idx, phi_idx] = c1[4]
C1[vz_idx, phi_idx] = c1[5]
C1 += C1.T
gam1 = np.real(sla.sqrtm(C1))
gam1
```
### $f_2$
```
gridnum = 50
vx = np.linspace(vx_min, vx_max, gridnum)
vz = np.linspace(vz_min, vz_max, gridnum)
phi = np.linspace(phi_min, phi_max, gridnum)
Vx, Vz, Phi = np.meshgrid(vx, vz, phi)
v2 = np.ravel(( Vz - (Vx*np.sin(Phi) + Vz*np.cos(Phi)) )**2)
U2 = np.array([np.ravel(Vx*Vx),
np.ravel(Vz*Vz),
np.ravel(Phi*Phi),
2*np.ravel(Vx*Vz),
2*np.ravel(Vx*Phi),
2*np.ravel(Vz*Phi)]).T
c2 = cp.Variable(6)
cp.Problem(cp.Minimize(cp.max(U2@c2 - v2)), [U2@c2 >= v2, c2[:3]>=0]).solve(verbose=True, solver=cp.MOSEK)
c2 = c2.value
c2
C2 = np.zeros((n,n))
C2[vx_idx, vx_idx] = c2[0]/2
C2[vz_idx, vz_idx] = c2[1]/2
C2[phi_idx, phi_idx] = c2[2]/2
C2[vx_idx, vz_idx] = c2[3]
C2[vx_idx, phi_idx] = c2[4]
C2[vz_idx, phi_idx] = c2[5]
C2 += C2.T
gam2 = np.real(sla.sqrtm(C2))
gam2
```
### $f_3$
No error -- linearization is the same as original
### $f_4$
```
gridnum = 50
vz = np.linspace(vz_min, vz_max, gridnum)
phi = np.linspace(phi_min, phi_max, gridnum)
phidot = np.linspace(phidot_min, phidot_max, gridnum)
Vz, Phi, Phidot = np.meshgrid(vz, phi, phidot)
v4 = np.ravel(( -g*Phi - (Vz*Phidot - g*np.sin(Phi)) )**2)
U4 = np.array([np.ravel(Vz*Vz),
np.ravel(Phi*Phi),
np.ravel(Phidot*Phidot),
2*np.ravel(Vz*Phi),
2*np.ravel(Vz*Phidot),
2*np.ravel(Phi*Phidot)]).T
c4 = cp.Variable(6)
cp.Problem(cp.Minimize(cp.max(U4@c4 - v4)), [U4@c4 >= v4, c4[:3]>=0]).solve(verbose=True, solver=cp.MOSEK)
c4 = c4.value
c4
C4 = np.zeros((n,n))
C4[vz_idx, vz_idx] = c4[0]/2
C4[phi_idx, phi_idx] = c4[1]/2
C4[phidot_idx, phidot_idx] = c4[2]/2
C4[vz_idx, phi_idx] = c4[3]
C4[vz_idx, phidot_idx] = c4[4]
C4[phi_idx, phidot_idx] = c4[5]
C4 += C4.T
gam4 = np.real(sla.sqrtm(C4))
gam4
```
### $f_5$
```
gridnum = 50
vx = np.linspace(vx_min, vx_max, gridnum)
phi = np.linspace(phi_min, phi_max, gridnum)
phidot = np.linspace(phidot_min, phidot_max, gridnum)
Vx, Phi, Phidot = np.meshgrid(vx, phi, phidot)
v5 = np.ravel(( 0 - (-Vx*Phidot - g*np.cos(Phi) + g) )**2)
U5 = np.array([np.ravel(Vx*Vx),
np.ravel(Phi*Phi),
np.ravel(Phidot*Phidot),
2*np.ravel(Vx*Phi),
2*np.ravel(Vx*Phidot),
2*np.ravel(Phi*Phidot)]).T
c5 = cp.Variable(6)
cp.Problem(cp.Minimize(cp.max(U5@c5 - v5)), [U5@c5 >= v5, c5[:3]>=0]).solve(verbose=True, solver=cp.MOSEK)
c5 = c5.value
c5
C5 = np.zeros((n,n))
C5[vx_idx, vx_idx] = c5[0]/2
C5[phi_idx, phi_idx] = c5[1]/2
C5[phidot_idx, phidot_idx] = c5[2]/2
C5[vx_idx, phi_idx] = c5[3]
C5[vx_idx, phidot_idx] = c5[4]
C5[phi_idx, phidot_idx] = c5[5]
C5 += C5.T
np.linalg.eig(C5)[0]
gam5 = np.real(sla.sqrtm(C5))
gam5
```
### $f_6$
No error -- linearization is the same as original
## Final system
```
from linearize_dynamics import *
A = quadrotor_jacobian(np.zeros(n))
G = np.eye(n)
C = np.vstack([gam1, gam2, gam4, gam5])
```
### Check correctness
```
prop = np.random.random((1000000, n))
rand_xs = x_max*prop + x_min*(1-prop)
fx = xdot_uncontrolled(torch.Tensor(rand_xs))
# print(np.linalg.norm((fx - rand_xs@A.T)@np.linalg.inv(G).T, axis=1) <= np.linalg.norm(rand_xs@C.T, axis=1))
print((np.linalg.norm((fx - rand_xs@A.T)@np.linalg.inv(G).T, axis=1) <= np.linalg.norm(rand_xs@C.T, axis=1)).all())
ratio = np.linalg.norm(rand_xs@C.T, axis=1)/np.linalg.norm((fx - rand_xs@A.T)@np.linalg.inv(G).T, axis=1)
print(ratio.max())
print(ratio.mean())
print(np.median(ratio))
```
### Save
```
np.save('A.npy', A)
np.save('G.npy', G)
np.save('C.npy', C)
```
## Check if robust LQR solves
```
import scipy.linalg as la
mass = 1
moment_arm = 0.01
inertia_roll = 15.67e-3
B = np.array([
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[1/mass, 1/mass],
[moment_arm/inertia_roll, -moment_arm/inertia_roll]
])
m = B.shape[1]
D = np.zeros((C.shape[0], m))
Q = np.random.randn(n, n)
Q = Q.T @ Q
# Q = np.eye(n)
R = np.random.randn(m, m)
R = R.T @ R
# R = np.eye(m)
alpha = 0.0001
n, m = B.shape
wq = C.shape[0]
S = cp.Variable((n, n), symmetric=True)
Y = cp.Variable((m, n))
mu = cp.Variable()
R_sqrt = la.sqrtm(R)
f = cp.trace(S @ Q) + cp.matrix_frac(Y.T @ R_sqrt, S)
cons_mat = cp.bmat((
(A @ S + S @ A.T + cp.multiply(mu, G @ G.T) + B @ Y + Y.T @ B.T + alpha * S, S @ C.T + Y.T @ D.T),
(C @ S + D @ Y, -cp.multiply(mu, np.eye(wq)))
))
cons = [S >> 0, mu >= 1e-2] + [cons_mat << 0]
cp.Problem(cp.Minimize(f), cons).solve(solver=cp.MOSEK, verbose=True)
K = np.linalg.solve(S.value, Y.value.T).T
```
| github_jupyter |
# Predicting sentiment from product reviews
# Fire up GraphLab Create
```
import graphlab
```
# Read some product review data
Loading reviews for a set of baby products.
```
products = graphlab.SFrame('amazon_baby.gl/')
```
# Let's explore this data together
Data includes the product name, the review text and the rating of the review.
```
products.head()
```
# Build the word count vector for each review
```
products['word_count'] = graphlab.text_analytics.count_words(products['review'])
products.head()
graphlab.canvas.set_target('ipynb')
products['name'].show()
```
# Examining the reviews for most-sold product: 'Vulli Sophie the Giraffe Teether'
```
giraffe_reviews = products[products['name'] == 'Vulli Sophie the Giraffe Teether']
len(giraffe_reviews)
giraffe_reviews['rating'].show(view='Categorical')
```
# Build a sentiment classifier
```
products['rating'].show(view='Categorical')
```
## Define what's a positive and a negative sentiment
We will ignore all reviews with rating = 3, since they tend to have a neutral sentiment. Reviews with a rating of 4 or higher will be considered positive, while the ones with rating of 2 or lower will have a negative sentiment.
```
#ignore all 3* reviews
products = products[products['rating'] != 3]
#positive sentiment = 4* or 5* reviews
products['sentiment'] = products['rating'] >=4
products.head()
```
## Let's train the sentiment classifier
```
train_data,test_data = products.random_split(.8, seed=0)
sentiment_model = graphlab.logistic_classifier.create(train_data,
target='sentiment',
features=['word_count'],
validation_set=test_data)
```
# Evaluate the sentiment model
```
sentiment_model.evaluate(test_data, metric='roc_curve')
sentiment_model.show(view='Evaluation')
```
# Applying the learned model to understand sentiment for Giraffe
```
giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability')
giraffe_reviews.head()
```
## Sort the reviews based on the predicted sentiment and explore
```
giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False)
giraffe_reviews.head()
```
## Most positive reviews for the giraffe
```
giraffe_reviews[0]['review']
giraffe_reviews[1]['review']
```
## Show most negative reviews for giraffe
```
giraffe_reviews[-1]['review']
giraffe_reviews[-2]['review']
```
| github_jupyter |
# Expectiminimax
Der Vollständigkeits halber der ganze Expectiminimax Algorithmus. <br>
Während 1-ply, 2-ply und 3-ply nur den ersten, die ersten beiden, bzw. ersten drei Schritte von Expectiminimax ausgeführt haben, kann man alle mit dem Expectiminmax Algorithmus zusammenfassen. Das erlaubt einem eine saubere Notation und kann (mit einem ausreichend starken Rechner und genug Geduld) eventuell noch tiefer suchen!
```
from Player import ValuePlayer
class ExpectiminimaxValuePlayer(ValuePlayer):
# Konstruktor braucht einen Parameter für die maximal Suchtiefe
# 0 = 1-ply, 1= 2-ply, 2 = 3-ply, usw.
def __init__(self, player, valuefunction, max_depth):
ValuePlayer.__init__(self, player, valuefunction)
self.max_depth = max_depth
def get_action(self, actions, game):
# Spielstatus speichern
old_state = game.get_state()
# Variablen initialisieren
best_value = -1
best_action = None
# Alle Züge durchsuchen
for a in actions:
# Zug ausführen
game.execute_moves(a, self.player)
# Spielstatus bewerten
value = self.expectiminimax(game, 0)
# Besten merken
if value > best_value:
best_value = value
best_action = a
# Spiel zurücksetzen
game.reset_to_state(old_state)
return best_action
def expectiminimax(self, game, depth):
# Blatt in unserem Baum
if depth == self.max_depth:
return self.value(game, self.player)
else:
# Alle möglichen Würfe betrachten
all_rolls = [(a,b) for a in range(1,7) for b in range(a,7)]
value = 0
for roll in all_rolls:
# Wahrscheinlichkeiten von jedem Wurf
probability = 1/18 if roll[0] != roll[1] else 1/36
state = game.get_state()
# Min-Knoten
if depth % 2 == 0:
moves = game.get_moves(roll, game.get_opponent(self.player))
temp_val = 1
for move in moves:
game.execute_moves(move, game.get_opponent(self.player))
# Bewertet wird aber aus unserer Perspektive
v = self.expectiminimax(game, depth + 1)
if v < temp_val:
temp_val = v
# Max-Knoten
else:
moves = game.get_moves(roll, self.player)
temp_val = 0
for move in moves:
game.execute_moves(move, self.player)
# Bewertet wird aber aus unserer Perspektive
v = self.expectiminimax(game, depth + 1)
if v > temp_val:
temp_val = v
# Spiel zurücksetzen
game.reset_to_state(state)
# Wert gewichtet addieren
value += probability * temp_val
return value
def get_name(self):
return "ExpectiminimaxValuePlayer [" + self.value.__name__ + "]"
class ExpectiminimaxModelPlayer(ExpectiminimaxValuePlayer):
def __init__(self, player, model, depth):
ExpectiminimaxValuePlayer.__init__(self, player, self.get_value, depth)
self.model = model
def get_value(self, game, player):
features = game.extractFeatures(player)
v = self.model.get_output(features)
v = 1 - v if self.player == game.players[0] else v
return v
def get_name(self):
return "EMinMaxModelPlayer [" + self.model.get_name() +"]"
import Player
from NeuralNetModel import TDGammonModel
import tensorflow as tf
graph = tf.Graph()
sess = tf.Session(graph=graph)
with sess.as_default(), graph.as_default():
model = TDGammonModel(sess, restore=True)
model.test(games = 100, enemyPlayer = ExpectiminimaxModelPlayer('white', model, 1))
import Player
import PlayerTest
players = [Player.ValuePlayer('black', Player.blocker), ExpectiminimaxValuePlayer('white', Player.blocker, 1)]
PlayerTest.test(players, 100)
import Player
import PlayerTest
from NeuralNetModel import TDGammonModel
import tensorflow as tf
graph = tf.Graph()
sess = tf.Session(graph=graph)
with sess.as_default(), graph.as_default():
model = TDGammonModel(sess, restore=True)
players = [Player.ModelPlayer('black', model), Player.ExpectiminimaxModelPlayer('white', model, 2)]
PlayerTest.test(players, 10)
```
Diese 3 Spiele haben 24 Stunden gedauert....
| github_jupyter |
### Honor Track: experience replay
There's a powerful technique that you can use to improve sample efficiency for off-policy algorithms: [spoiler] Experience replay :)
The catch is that you can train Q-learning and EV-SARSA on `<s,a,r,s'>` tuples even if they aren't sampled under current agent's policy. So here's what we're gonna do:
<img src=https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/exp_replay.png width=480>
#### Training with experience replay
1. Play game, sample `<s,a,r,s'>`.
2. Update q-values based on `<s,a,r,s'>`.
3. Store `<s,a,r,s'>` transition in a buffer.
3. If buffer is full, delete earliest data.
4. Sample K such transitions from that buffer and update q-values based on them.
To enable such training, first we must implement a memory structure that would act like such a buffer.
```
import sys, os
if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/setup_colab.sh -O- | bash
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week3_model_free/submit.py
!touch .setup_complete
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import clear_output
import random
class ReplayBuffer(object):
def __init__(self, size):
"""
Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
Note: for this assignment you can pick any data structure you want.
If you want to keep it simple, you can store a list of tuples of (s, a, r, s') in self._storage
However you may find out there are faster and/or more memory-efficient ways to do so.
"""
self._storage = []
self._maxsize = size
# OPTIONAL: YOUR CODE
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
'''
Make sure, _storage will not exceed _maxsize.
Make sure, FIFO rule is being followed: the oldest examples has to be removed earlier
'''
data = (obs_t, action, reward, obs_tp1, done)
# add data to storage
self._storage.append(data)
# FIFO
while len(self._storage) > self._maxsize:
self._storage.pop(0)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
# <YOUR CODE: randomly generate batch_size integers to be used as indexes of samples>
idxes = random.sample(range(len(self._storage)),
k=batch_size if len(self._storage)>=batch_size else len(self._storage))
# collect <s,a,r,s',done> for each index
data_batch = np.array([self._storage[idx] for idx in idxes])
return data_batch[:,0], data_batch[:,1], data_batch[:,2], data_batch[:,3], data_batch[:,4]
```
Some tests to make sure your buffer works right
```
def obj2arrays(obj):
for x in obj:
yield np.array([x])
def obj2sampled(obj):
return tuple(obj2arrays(obj))
replay = ReplayBuffer(2)
obj1 = (0, 1, 2, 3, True)
obj2 = (4, 5, 6, 7, False)
replay.add(*obj1)
assert replay.sample(1) == obj2sampled(obj1), \
"If there's just one object in buffer, it must be retrieved by buf.sample(1)"
replay.add(*obj2)
assert len(replay) == 2, "Please make sure __len__ methods works as intended."
replay.add(*obj2)
assert len(replay) == 2, "When buffer is at max capacity, replace objects instead of adding new ones."
assert tuple(np.unique(a) for a in replay.sample(100)) == obj2sampled(obj2)
replay.add(*obj1)
assert max(len(np.unique(a)) for a in replay.sample(100)) == 2
replay.add(*obj1)
assert tuple(np.unique(a) for a in replay.sample(100)) == obj2sampled(obj1)
print("Success!")
```
Now let's use this buffer to improve training:
```
import gym
from qlearning import QLearningAgent
try:
env = gym.make('Taxi-v3')
except gym.error.DeprecatedEnv:
# Taxi-v2 was replaced with Taxi-v3 in gym 0.15.0
env = gym.make('Taxi-v2')
n_actions = env.action_space.n
def play_and_train_with_replay(env, agent, replay=None,
t_max=10**4, replay_batch_size=32):
"""
This function should
- run a full game, actions given by agent.getAction(s)
- train agent using agent.update(...) whenever possible
- return total reward
:param replay: ReplayBuffer where agent can store and sample (s,a,r,s',done) tuples.
If None, do not use experience replay
"""
total_reward = 0.0
s = env.reset()
for t in range(t_max):
# get agent to pick action given state s
a = agent.get_action(s)
next_s, r, done, _ = env.step(a)
# update agent on current transition. Use agent.update
agent.update(s, a, r, next_s)
if replay is not None:
# store current <s,a,r,s'> transition in buffer
replay.add(s, a, r, next_s, done)
# sample replay_batch_size random transitions from replay,
# then update agent on each of them in a loop
# s_, a_, r_, next_s_, done_ = replay.sample(replay_batch_size)
samples = replay.sample(replay_batch_size)
for i in range(len(samples[0])):
agent.update(samples[0][i], samples[1][i], samples[2][i], samples[3][i])
s = next_s
total_reward += r
if done:
break
return total_reward
# Create two agents: first will use experience replay, second will not.
agent_baseline = QLearningAgent(
alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
agent_replay = QLearningAgent(
alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
replay = ReplayBuffer(1000)
from IPython.display import clear_output
import pandas as pd
def moving_average(x, span=100):
return pd.DataFrame({'x': np.asarray(x)}).x.ewm(span=span).mean().values
rewards_replay, rewards_baseline = [], []
for i in range(1000):
rewards_replay.append(
play_and_train_with_replay(env, agent_replay, replay))
rewards_baseline.append(
play_and_train_with_replay(env, agent_baseline, replay=None))
agent_replay.epsilon *= 0.99
agent_baseline.epsilon *= 0.99
if i % 100 == 0:
clear_output(True)
print('Baseline : eps =', agent_replay.epsilon,
'mean reward =', np.mean(rewards_baseline[-10:]))
print('ExpReplay: eps =', agent_baseline.epsilon,
'mean reward =', np.mean(rewards_replay[-10:]))
plt.plot(moving_average(rewards_replay), label='exp. replay')
plt.plot(moving_average(rewards_baseline), label='baseline')
plt.grid()
plt.legend()
plt.show()
```
### Submit to Coursera
```
from submit import submit_experience_replay
submit_experience_replay(rewards_replay, rewards_baseline, '', '')
```
#### What to expect:
Experience replay, if implemented correctly, will improve algorithm's initial convergence a lot, but it shouldn't affect the final performance.
### Outro
We will use the code you just wrote extensively in the next week of our course. If you're feeling that you need more examples to understand how experience replay works, try using it for binarized state spaces (CartPole or other __[classic control envs](https://gym.openai.com/envs/#classic_control)__).
__Next week__ we're gonna explore how q-learning and similar algorithms can be applied for large state spaces, with deep learning models to approximate the Q function.
However, __the code you've written__ for this week is already capable of solving many RL problems, and as an added benifit - it is very easy to detach. You can use Q-learning, SARSA and Experience Replay for any RL problems you want to solve - just thow 'em into a file and import the stuff you need.
| github_jupyter |
```
import os, json
from pathlib import Path
from pandas import DataFrame
from mpcontribs.client import Client
from unflatten import unflatten
client = Client()
```
**Load raw data**
```
name = "screening_inorganic_pv"
indir = Path("/Users/patrick/gitrepos/mp/mpcontribs-data/ThinFilmPV")
files = {
"summary": "SUMMARY.json",
"absorption": "ABSORPTION-CLIPPED.json",
"dos": "DOS.json",
"formulae": "FORMATTED-FORMULAE.json"
}
data = {}
for k, v in files.items():
path = indir / v
with path.open(mode="r") as f:
data[k] = json.load(f)
for k, v in data.items():
print(k, len(v))
```
**Prepare contributions**
```
config = {
"SLME_500_nm": {"path": "SLME.500nm", "unit": "%"},
"SLME_1000_nm": {"path": "SLME.1000nm", "unit": "%"},
"E_g": {"path": "ΔE.corrected", "unit": "eV"},
"E_g_d": {"path": "ΔE.direct", "unit": "eV"},
"E_g_da": {"path": "ΔE.dipole", "unit": "eV"},
"m_e": {"path": "mᵉ", "unit": "mₑ"},
"m_h": {"path": "mʰ", "unit": "mₑ"}
}
columns = {c["path"]: c["unit"] for c in config.values()}
contributions = []
for mp_id, d in data["summary"].items():
formula = data["formulae"][mp_id].replace("<sub>", "").replace("</sub>", "")
contrib = {"project": name, "identifier": mp_id, "data": {"formula": formula}}
cdata = {v["path"]: f'{d[k]} {v["unit"]}' for k, v in config.items()}
contrib["data"] = unflatten(cdata)
df_abs = DataFrame(data=data["absorption"][mp_id])
df_abs.columns = ["hν [eV]", "α [cm⁻¹]"]
df_abs.set_index("hν [eV]", inplace=True)
df_abs.columns.name = "" # legend name
df_abs.attrs["name"] = "absorption"
df_abs.attrs["title"] = "optical absorption spectrum"
df_abs.attrs["labels"] = {"variable": "", "value": "α [cm⁻¹]"}
df_dos = DataFrame(data=data["dos"][mp_id])
df_dos.columns = ['E [eV]', 'DOS [eV⁻¹]']
df_dos.set_index("E [eV]", inplace=True)
df_dos.columns.name = "" # legend name
df_dos.attrs["name"] = "DOS"
df_dos.attrs["title"] = "electronic density of states"
df_dos.attrs["labels"] = {"variable": "", "value": "DOS [eV⁻¹]"}
contrib["tables"] = [df_abs, df_dos]
contributions.append(contrib)
len(contributions)
```
**Submit contributions**
```
client.delete_contributions(name)
client.init_columns(name, columns)
client.submit_contributions(contributions[:5])
```
**Retrieve and plot tables**
```
all_ids = client.get_all_ids(
{"project": "screening_inorganic_pv"}, include=["tables"]
).get(name, {})
cids = list(all_ids["ids"])
tids = list(all_ids["tables"]["ids"])
len(cids), len(tids)
client.get_contribution(cids[0])
t = client.get_table(tids[0]) # pandas DataFrame
t.display()
```
| github_jupyter |
# Make data and load data explained
Global Forest Change dataset https://earthenginepartners.appspot.com/science-2013-global-forest/download_v1.6.html is divided into 10x10 degree tiles, each of which comes with six raster files per tile: treecover, gain, data mask, loss year, first and last. All files contain unsigned 8-bit values and have a spatial resolution of 1 arc-second per pixel, which correspond to approximately 30 meters per pixel around the equator. After 2013 loss year and last files were updated annually. The last 2018 loss year file assign an integer value 0-18 to each pixel. 1-18 corresponds to the year (2001-2018) at which deforestation event was observed at this location or 0 if the deforestation was not detected there in the period 2001-2018. The dataset is such that once a pixel is assigned as deforestrated, it does not go back to forested at any time in the future. We collected the following ten tif files: treecover, gain, datamask, all "last" files from 2014 to 2018 and the most recent, 2018, loss year file. As our target area Madre de Dios is located at the intersection of three of the Hansen dataset tiles, for each of the ten files we collected the the corresponding tiles, merged them together in one raster file and cropped it with the Madre de Dios shape file. R file can be fond in r_code/R.ipynb. The resulting raster file was of spatial size 14646 x 15723 pixels and those pixels that were outside Madre de Dios boundaries were masked as NA values. Since we wish our models to be able to predict the label of each pixel of Madre de Dios area by analyzing an image, or time series of images, that captures its local region, we also included pixels lying in a buffer area of 0.09 degree (or approximately 10km) in our dataset. This allowed us to extract features from images that cover area up to 10km away from any Madre de Dios pixel.
<br> **from R:**
Junin area corresponds to two tiles.
An R script has been run to collect this tiles, merge them, and mask them
with Junin area + buffer shape file. See R notebook for more details.
The following tiff files were then created and saved in sourcepath = '/home/ubuntu/Madre':
datamask_2018.tif
gain_2018.tif
treecover2000_2018.tif
if_in_buffer.tif
lossyear_2018.tif
last_2018_1.tif
last_2018_2.tif
last_2018_3.tif
last_2018_4.tif
last_2017_1.tif
last_2017_2.tif
last_2017_3.tif
last_2017_4.tif
last_2016_1.tif
last_2016_2.tif
last_2016_3.tif
last_2016_4.tif
last_2015_1.tif
last_2015_2.tif
last_2015_3.tif
last_2015_4.tif
last_2014_1.tif
last_2014_2.tif
last_2014_3.tif
last_2014_4.tif
Each file is one layer raster file of shape 14646, 15723. NA values are encoded as -1.
NA values are pixels that were masked as they are not in the Madre de Dios area and its buffer
**datamask_2018.tif**:
is a layer that has values 0,1,2,or -1.
0 for no data (Madre de Dios does not have pixels with no data)
1 for mapped land surface
2 for permanent woter bodies
-1 for masked pixel
**gain_2018.tif**
is a layer that has values 0,1,or -1.
0 for no gain experienced between 2001 and 2012
1 for gain experienced between 2001 and 2012
-1. for masked pixel
**treecover2000_2018.tif**
is a layer that has values between 0,100,or -1.
value between 0 and 100 indicates percentage three cover observed in 2000.
-1 for masked pixel
**if_in_buffer.tif**
is a layer that has values 0,1,or -1.
0 if a pixel lies in Madre de Dios area
1 if a pixel lies in the buffer area
-1 for masked pixel
The first three files are produced form the files:
datamask
gain
treecover2000
Downladed from:
https://earthenginepartners.appspot.com/science-2013-global-forest/download_v1.6.html
For how if_in_buffer.tif was created see R notebook.
**lossyear_2018.tif**
Downladed from:
https://earthenginepartners.appspot.com/science-2013-global-forest/download_v1.6.html
contains the most recent lossyear file.
layer with values 0,1,2,3,..18 or -1
0 for no loss experienced between 2001 and 2018
1,2,3..18 for loss experienced in 2001, 2002, 2003,..2018
-1 for masked pixel
last_2018_1.tif
last_2018_2.tif
last_2018_3.tif
last_2018_4.tif
The "last" file 4 bands corresponding to year 2018
each has values between 0:255 or -1
masked pixels have value -1
# Input layers:
<img src="images/table.png">
The only feature we constructed from Hansen data is one hot encoded cathegorical variable **if loss when** that for each neighbouring pixel encodes if it has experienced deforestation and how far in the past this was obsered. If pixel did not experienced deforestation, all cathegorical layers have 0 at that location. Our motivation to have it is because we had the hypothesis that deforestation event cluster in certain areas. Therefore, we wanted to have feature that summarize the information of neighbouring pixels' deforestation state.
One can set different enocding through function:
def if_def_when(lossyear,year,cutoffs = [2,5,8]):
"""
Creates categorical variables for deforestration event given cutoffs.
Values in cutoffs define the time bins
Returns len(cutoffs) + 1 cathegorical layers:
Example: cutoffs = [2,5,8], num of layers = 4 , considered year = year
Cathegories 0,1,2,3 take values 1:
for layer 0 : if year - lossyear is in [0,2)
for layer 1 : if year - lossyear is in [2,5)
for layer 2 : if year - lossyear is in [5,8)
for layer 3 : if deforestation 8 years ago or more
For a given considered year we have no prior knowledge for future deforestations:
if loss event is in year > considered year or pixel is non deforested up to 2018+,
all categories have value 0
"""
<br>Currently the cut offs are as follows:
<br> For considered year t and loss taking values 1,2,3,4,...18:
<br> if loss > t all cathegorical levels take 0. Otherwise:
<br>Layer 1) deforestation in the same or last year = 1 if t - loss in [0,1]
<br>Layer 2) deforestation in the past 2 to 4 years = 1 if t - loss in [2,4]
<br>Layer 3) deforestation in the past 5 to 7 years = 1 if t - loss in [5,7]
<br>Layer 4) deforestation more than 7 years = 1 if t - loss in [8,t-1]
All our models were build so that they can take two or more tensors with the same spatial dimensions, which we define below, and forecast if deforestation is observed in the following year at the locating corresponding to the spatially-central pixel of these tensors.
The first 3D tensor that any of our models receives, which we named **static**, is tensor of shape $\mathbf{S} \in \mathbb{R}^{2 \times (2r+1) \times (2r+1)}$ where $(2r+1 \times 2r+1)$ is its spatial dimension and $r$ is a predefined hyperparameter that defines the number of pixels the input tensor to have in each spatial direction from the target central pixel. In the data classes implementation the parameter **size** correspond to **r**. The two chnnels of this tensor are treecover2000 and datamask.
Our second set of tensors is a time series of 3D tensors $\mathbf{X}_{t-3}$,$\mathbf{X}_{t-2}$,$\mathbf{X}_{t}$ $\in \mathbf{R}^{d \times (2r+1) \times (2r+1)}$, where again each tensor has spatial dimensions $(2r+1) \times (2r+1)$ but depth **d = 4 + the number of layers of the if_loss_when** encoded variable. The channels of a tensor with time index $t$ are **if_loss_when(i,t), last_b30(t), last_b40(t), last_b50(t) and last_b70(t)**
This time series is stored in 4D tensor of shape (c x t x 2r+1 x 2r+1) and here c = 4+4 and t = 3
Finally each tensor with time index $t$ comes with a label $Y_{t+1} \in \{0,1\}$ which takes value 1 only if the target central pixel (at spatial location $r+1 \times r+1$) is marked as deforested exactly in year $t+1$. To clarify this, here we note that if this pixel was labeled as deforested in any other year $t_j \neq t+1$ ,lossyear($t_j$) = 1, or was never labeled as deforested in the study period 2001-2018 , lossyear($t_j$) = 0 $\forall$ $t_j$ in ${1,2,..18}$, then $Y_{t+1} = 0$
## Set of valid pixels in each year
Due to the characteristics of Hansen dataset, we know that if a pixel is labeled as deforested in year $t_{j}$ then the pixel never returns to the state of being forested. Additionally, if its the percentage of tree cover observed in 2000 was below 30\%, than this location is not considered as forest. Only if a pixel with treecover2000<30% experience "gain" in the study period 2001-2012 we may assume it corresponds to a forested area from 2013 onward. Finally, if it has $datamask=1$ then we know it is a permanent water body. Having stated this facts, we note that if our models aim to forecast the label of a pixel with index $j$ , $Y^{j}_{t+1} \equiv \mathbb{I}\{lossyear_j = t+1\}$, they would not be of any use if we know that this pixel j is not a forested area in year t. It will never be reverted to forest and therefore detecting deforestation at this location in year $t+1$ doesn't make sense. Therefore, when predicting the labels of pixels $Y{^j}_{t+1}$ in year $t+1$, we restricted these set of pixels to be:
$$\mathbf{J}_t =: \{j \in \mathbb{M} : ( lossyear_j > t \,\ \cup \,\ lossyear_j = 0 )$$
$$\cap (datamask_j = 1) \cap (treecover_j > 30\% \,\ \cup \,\ gain_j = 1) \}$$
where $\mathbb{M}$ is the index set of pixels lyng within Madre de Dios baundries.
Since channel **treecover2000** has range 0:100 we rescaled it to be in the range 0:1.
<br>Each of the bands of Landset image composite is also separately normalized, where normalization is taken with respect to the spatial domain at a single time image. Reason for that is that different channels have different means and std. Mean and std are also computed per year because images might be taken at different seasons and therefore the channels values distribution might be different.
<br>Since for Madre de Dios datamask has no entrie = 0, we changed the valus as 0 - land, 1 - water bodies.
<br>For our last 3 models that utilize a time series of tensors we worked with the following dataset: $[\mathbf{S^j},\textbf{X}^j_{2014},\textbf{X}^j_{2015},\textbf{X}^j
_{2016}]$ as set of input tensors and $Y^j_{2017}$ as the set of labels to be predicted where $j \in \mathbf{J}_{2016}$.
<br>We split the data into train and validation data sets with ratio 8:2. From the 80% trainig data, a 5 fold cross validation training with early stopping was used. We used the validation data to select the best model of each class. We evaluated their performance on $[\mathbf{S^j},\textbf{X}^j_{2015},\textbf{X}^j_{2016},\textbf{X}^j_{2017}]$ as the set of input tensors and $Y^j_{2018}$ as the set of labels to be predicted where $j \in \mathbf{J}_{2017}$.
<br>Our Model 1, 2D CNN model, is able to analyze only mono-temporal tensors and from them to extract features forecasting the central pixel deforestation label in the following year. We used the union of the following data pairs of tensors and labels as dataset:
<br>$[\mathbf{S^j},\textbf{X}^j_{2014}]$ as an input tensors and $Y^j_{2015}$ as the set of labels to be predicted where $j \in \mathbf{J}_{2014}$.
<br>$[\mathbf{S^j},\textbf{X}^j_{2015}]$ as an input tensors and $Y^j_{2016}$ as the set of labels to be predicted where $j \in \mathbf{J}_{2015}$.
<br>$[\mathbf{S^j},\textbf{X}^j_{2016}]$ as an input tensors and $Y^j_{2017}$ as the set of labels to be predicted where $j \in \mathbf{J}_{2016}$.
<br>We evaluated its performance on :
<br>$[\mathbf{S^j},\textbf{X}^j_{2017}]$ as an input tensors and $Y^j_{2018}$ as the set of labels to be predicted where $j \in \mathbf{J}_{2017}$.
<br> **WHERE IS THIS SAVED? PIXEL FILE?**
## The rest of this notebook explains how to use the tiff files to produce the above defined datasets.
Load needed packages.
sourcepath is the path to folder that has all tiff files.
wherepath is the path to folder where the tensors to be saved and loaded later
```
import torch
from torchvision.transforms import ToTensor
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from torch.utils.data import Dataset
import os.path
import numpy as np
#sourcepath is path to folder that has all tiff files.
# they must have value 111 for NA, and last tif file should come as last_year_1 for band 1 in year = year
#wherepath is the path to folder where the tensors to be saved and laded later
# server = '/home/ubuntu/satellite' # for Amazon server
server = '/rds/general/project/aandedemand/live/satellite/junin' # For Imperial HPC
sourcepath = '/data_reduced'
wherepath = '/data_reduced/tensors'
sourcepath = server + sourcepath
wherepath = server + wherepath
print(sourcepath, wherepath)
#for visualising and time measurung
import matplotlib
import matplotlib.pyplot as plt
import time
print('done')
```
## **to_Tensor**
The following function open a raster (with one layer) as a tensor of size same as the tiff file - (14646, 15723)
<br>**path** is the sourcepath: '/home/ubuntu/Madre'
<br>**name** is one from above, eg: datamask_2018.tif, gain_2018.tif, treecover2000_2018.tif, if_in_buffer.tif
```
def to_Tensor(path,name):
"""
Load Tiff files as tensors
"""
t = Image.open(path+"/"+name)
t = ToTensor()(t)
t = t.squeeze(dim = 0)
return(t)
```
## datamask
```
"""
datamask:
-1 for NA value
0 for no data pixel in Madre de Dios
1 for land pixel in Madre de Dios
2 for water body pixel in Madre de Dios
"""
#Run this:
###############
datamask = to_Tensor(sourcepath,'Hansen/datamask_2018.tif')
##############
#visualise:
colors = ['grey','white','green','blue']
plt.fig = plt.figure(figsize=(14,14))
plt.matshow(datamask,cmap=matplotlib.colors.ListedColormap(colors))
plt.show()
print("Number of pixels in AOI and buffer with no data (0 entries): ", len((datamask == 0).nonzero()))
```
Since for AOI area all pixels have value $\neq 0$, to rescale this layer to be in range 0,1 we change encoding as follows:
datamask:
-1 for NA value
0 for land pixel in AOI
1 for water body pixel in AOI
by running this line:
datamask[datamask != -1] = datamask[datamask != -1] - 1
```
datamask[datamask != -1] = datamask[datamask != -1] - 1
```
## if_in_buffer
```
"""
Buffer:
-1 for NA value
0 for pixel in Madre de Dios
1 for pixel in buffer
"""
buffer = to_Tensor(sourcepath,'buffer/if_in_buffer.tif')
#to visualise
colors = ['grey','green','red']
plt.fig = plt.figure(figsize=(8,8))
plt.matshow(buffer,cmap=matplotlib.colors.ListedColormap(colors))
plt.show()
```
## DSM - new layer
Since no mask is applied, to mask the NA values with -1 do:
```
DSM = to_Tensor(sourcepath,'DSM/DSM_resample_clip_REDUCED.tif')
#DSM[datamask == -1 ] = -1
plt.fig = plt.figure(figsize=(8,8))
plt.matshow(DSM, norm = plt.Normalize(0, 6000))
plt.show()
```
Explore its values ditribution:
```
values = (DSM != -1).nonzero()
print(values)
values = DSM[ values[:,0] , values[:,1]].view(-1)
print(values)
plt.fig = plt.figure()
plt.hist(values.numpy())
plt.show()
(values).min()
```
Since values have positively skewed distribution take log transform. Since minimum value is 107, log transform without shift is possible.
```
DSM[DSM != -1] = np.log(DSM[DSM != -1])
```
After log transform:
```
values = (DSM != -1).nonzero()
values = DSM[ values[:,0] , values[:,1]].view(-1)
plt.fig = plt.figure()
plt.hist(values.numpy())
plt.show()
```
To transform in order to have normal distribution:
<br> rescale_image function defined bellow
```
%cd ../
from Data_maker_loader import rescale_image
DSM ,mean, std = rescale_image(DSM)
del rescale_image
print("Mean after log transform: ", mean)
print("Std after log transform: ", std)
values = (DSM[:,:] != -1).nonzero()
values = DSM[ values[:,0] , values[:,1]].view(-1)
plt.fig = plt.figure()
plt.hist(values.numpy())
plt.show()
torch.save(DSM,wherepath+"/"+'DSM.pt')
```
### In summary, to include this layer do:
```
DSM = to_Tensor(sourcepath,'DSM.tif')
# Mask area out of Madre de Dios and buffer
DSM[datamask == -1 ] = -1
# If positive skewed distribution of the values:
min_val = DSM[DSM != -1 ].min().numpy()
print("Min value of the elevation: ",min_val)
# if there is negative values and zero values log transform must be applied after a shift to positive values only.
# log(0) = -Inf
if min_val > 0:
DSM[DSM != -1] = np.log(DSM[DSM != -1])
else:
DSM[DSM != -1] = np.log(DSM[DSM != -1] + min_val + 1)
#Normalise:
DSM, DSMmean, DSMstd = rescale_image(DSM)
print("Extracted mean: ",DSMmean)
print("Devided std: ",DSMstd)
```
## **last_to_image**
The following function open the 4 rasters (one for each band) of the last files (eg last_18_1,..last_18_4) and returns them as a tensors of size (4, 14646, 15723)
<br>**path** is the sourcepath
<br>**year** any of 13,14,15,16,17,18
```
def last_to_image(path,year):
"""
Given path to folder having tiff files for each last band for given year
returns Tensors with chanels == bands and year as requested in the path
"""
image = []
for b in range(1,5):
band = Image.open(path+"/"+'Hansen/last_20%d_%d.tif'%(year,b))
band = ToTensor()(band)
image.append(band)
image = torch.cat(image,dim = 0)
image = image.float()
return(image)
start = time.time()
#Run this:###########
last_16 = last_to_image(sourcepath,16)
####################
print(last_16.shape)
print("Time needed to load one last file as tensor (in seconds): ",time.time() - start)
```
## Rescale Image
<br> **rescale_image**
Get satelite image of Madre de Dios area at a given year, compute the mean and std of all
non-masked pixels (those covering Madre de Dios area) and returns a normalized satellite image.
Normalization is done per channel. Reason for that is that different channels have different means and
std. Mean and std are also conputed per year because images might be taken at different seasons and therefore the channels values distribution might be different.
### Here we illustrate the 4 channels values distributions of year 2016 satellite image
```
pixels = (last_16[1,:,:] != -1).nonzero()
values1 = last_16[0, pixels[:,0] , pixels[:,1]].view(-1)
values2 = last_16[1, pixels[:,0] , pixels[:,1]].view(-1)
values3 = last_16[2, pixels[:,0] , pixels[:,1]].view(-1)
values4 = last_16[3, pixels[:,0] , pixels[:,1]].view(-1)
plt.fig = plt.figure()
plt.subplot(2, 2, 1)
plt.hist(values1.numpy())
plt.title("band 1")
plt.subplot(2, 2, 2)
plt.hist(values2.numpy())
plt.title("band 2")
plt.subplot(2, 2, 3)
plt.hist(values3.numpy())
plt.title("band 3")
plt.subplot(2, 2, 4)
plt.hist(values4.numpy())
plt.title("band 4")
plt.show()
def rescale_image(image):
# detach and clone the image so that you don't modify the input, but are returning new tensor.
rescaled_image = image.data.clone()
if(len(image.shape) == 2):
rescaled_image = rescaled_image.unsqueeze(dim = 0)
# Compute mean and std only from non masked pixels
# Spatial coordinates of this pixels are:
pixels = (rescaled_image[0,:,:] != -1).nonzero()
mean = rescaled_image[:, pixels[:,0] , pixels[:,1]].mean(1,keepdim=True)
std = rescaled_image[:, pixels[:,0] , pixels[:,1]].std(1,keepdim=True)
rescaled_image[:, pixels[:,0] , pixels[:,1]] -= mean
rescaled_image[:, pixels[:,0] , pixels[:,1]] /= std
if(len(image.shape) == 2):
rescaled_image = rescaled_image.squeeze(dim = 0)
mean = mean.squeeze(dim = 0)
std = std.squeeze(dim = 0)
return(rescaled_image,mean,std)
st = time.time()
rescaled_image, mean, std = rescale_image(last_16)
print("Time to rescale one year image:",time.time() - st)
print("\nMean of channles : \n",mean)
print("\nStd of channles : \n",std)
print("\nOriginal image preserved: ",last_16[:,555,7777])
print("Rescaled values at this location: ",rescaled_image[:,555,7777])
```
#### Rescale_image can also be applied to any tensor of 2 dimensions.
In this case rescaling is done with respect to the whole area in Made de Dios (that does not have -1 values) Note: if a new tensor has negative values by characteristics, one musth change the encoding.
## Create Categorical Layers Indicationg Deforestation Times Periods **if_def_when**
Takes as input:
the lossyear tensor;
the year to be considered as current t; t is one of 1,2,3,..18
the cutoffs - list of cut of values (of lenght n) that determine the one hot encoding
Returns:
"One hot" encded 3D tensor with shpe ([n+1, 14646, 15723]) for loss event.
Example:
Takes: 2D lossyear tensor, year = 14, cutoffs = [2,5,8] (n=3).
Returns: 3D Cathegorical tensor with num of layers = 4 (n+1)
Each Cathegorical layer gets value 1 if:
for layer 0: if year - lossyear is in [0,2) i.e: 14-14, 14-13 (lossyear = 14 , 13)
for layer 1: if year - lossyear is in [2,5) i.e: 14-12, 14-11, 14-10 (lossyear = 12 , 11, 10)
for layer 2: if year - lossyear is in [5,8) i.e: 14-9, 14-8, 14-7 (lossyear = 9 , 8, 7)
for layer 3: 8 years ago or more i.e: 14-6, 14-5,...14-1 (lossyear = 6, 5, 4, 3, 2, 1)
If lossyear value of a pixel is 0 or greater than t ( lossyear > 14 ), all cathegorical layers have value 0, which indicates that at year t=14, we know this pixel is not deforested yet. We consider all future deforestations event for year t = 14 as not observed yet ,"unknown". If pixel is masked, all cathegorical values take the NA value -1
```
def if_def_when(lossyear,year,cutoffs = [2,5,8]):
"""
Creates categorical variables for deforestration event given cutoffs.
Values in cutoffs define the time bins
Returns len(cutoffs) + 1 cathegorical layers:
Example: cutoffs = [2,5,8], num of layers = 4 , considered year = year
Cathegories:
0) if year - lossyear is in [0,2)
1) if year - lossyear is in [2,5)
2) if year - lossyear is in [5,8)
3) 8 years ago or more
No prior knowledge:
if loss event is in year > considered year or pixel is non deforested up to 2018+, all cathegories have value 0
"""
cutoffs.append(year)
cutoffs.insert(0,0)
lossyear[ (lossyear > year) ] = 0
losses = []
for idx in range(0,len(cutoffs) - 1 ):
deff = torch.zeros(lossyear.size())
deff[ (cutoffs[idx] <= (year - lossyear)) & ( (year - lossyear) < cutoffs[idx+1]) ] = 1
losses.append(deff.float())
losses = torch.stack(losses)
#Return Nan values encoded as needed:
losses[:, (lossyear== -1)] = -1
return(losses)
lossyear = torch.tensor([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,-1,0]])
print("loss year cases:")
print(lossyear)
currentyear = 14
print("\nCurrent year: t=", currentyear)
when = if_def_when(lossyear, currentyear, cutoffs = [2,5,8])
print("\nNo prior knowledge for future deforestration. Lossyear modified to: ")
print(lossyear)
print("\nCorresponding categorical layers")
for i in range(0, len(when)):
print("\nTime bin category %d\n"%i,when[i])
```
# Create Tensors of Features to be used from models and save them for later usage. *create_tnsors_pixels*
Given year t, and cutoffs as defined above returns (and save them if wherepath!= None):
<br>Static tensor $S$,
<br>Non static tensor $X_t$,
<br>list of valid pixels coordinates $\mathbf{J}_{t}$
<br>list of labels corresponding to this valid cordinates: $Y^j_{t}$ where $j \in \mathbf{J}_{t}$
<br>Other two inputs are:
<br>sourcepath = path to tiff files
<br>wherepath = if not None, path to the folder where to save these tensors
<br>Static tensor is identical for any year, hence save only once. Static tensor has datamask layer and treecover
<br>Nonstatic tensor has if_deff_when cathegorical layers and the four bands of the landsat image stacked
Valid pixels are these that meet all the following conditions :
1. datamask == 1 , eg land not water body
2. tree_cover > tree_p or gain == 1 if threecanpy in 2000 > tree_p or became forest up to 2012
3. lossyear > end_year or lossyear == 0 experienced loss only after that year (or not at all in the study period)
4. buffer == 0 is in AOI area
$$\mathbf{J}_t =: \{j \in \mathbb{M} : ( lossyear_j > t \,\ \cup \,\ lossyear_j = 0 )$$
$$\cap (datamask_j = 1) \cap (treecover_j > 30\% \,\ \cup \,\ gain_j = 1) \}$$
Where $\mathbb{M}$ is the index set of pixels lyng within the ROI boundries. For each valid pixel j assign label $Y^j_{t} = 1$ if it is deforested in exactly in year = t+1 or zero otherwise.
<br>All pixels in the rasters and produced tensors have value -1 in the locations outside ROI area and its buffer
```
def create_tnsors_pixels(end_year, tree_p = 30, cutoffs = [2,5,8] , sourcepath = sourcepath ,rescale = True, wherepath = None):
"""
Given year, and cutoffs as defined above returns (and save if wherepath!= None)
Static tensor,
Non static tensor,
list of valid pixels codrinates,
list of labels corresponding to this valid cordinates
sourcepath = path to tiff files
wherepath = in not None, path to where to save the tensors
Static tensor is identical for any year, hence save only once
Static tensor has datamask layer and treecover
Nonstatic tensor has if_deff_when cathegorical layers and the image landset 7 bands stacked
Valid pixels are these that meet all the following conditions :
1. datamask == 1 , eg land not water body
2. tree_cover > tree_p or gain == 1 if tree canopy in 2000 > tree_p or became forest up to 2012
3. lossyear > end_year or lossyear == 0 experienced loss only after that year (or not at all in the study period)
4. buffer == 0 is in Madre de Dios area
for each valid pixel assign label 1 if it is deforested in exactly in year+1 or zero otherwise
All pixels in the rasters and produced tensors have value 111 in the locations outside Area of Interest and its buffer
"""
buffer = to_Tensor(sourcepath,'buffer/if_in_buffer.tif')
gain = to_Tensor(sourcepath,'Hansen/gain_2018.tif')
lossyear = to_Tensor(sourcepath,'Hansen/lossyear_2018.tif')
datamask = to_Tensor(sourcepath,'Hansen/datamask_2018.tif')
tree_cover = to_Tensor(sourcepath,'Hansen/treecover2000_2018.tif')
tree_cover = tree_cover.float()
datamask = datamask.float()
#Create list of valid pixels coordinates
pixels = ( (datamask == 1) & #land (not water body)
((tree_cover > tree_p ) | (gain == 1)) & #if forest in 2000 or became forest up to 2012
((lossyear > end_year) | (lossyear == 0))& #experienced loss only after that year (or not at all in the study period)
(buffer == 0)).nonzero() #In area of interest
#Create list of valid pixels labels in year + 1
labels = lossyear[pixels[:,0],pixels[:,1]] == (end_year+1) #can be change to >= (end_year+1) & <111
when = if_def_when(lossyear,end_year, cutoffs = cutoffs)
image = last_to_image(sourcepath,end_year)
if rescale:
#Rescale datamask to have values -1 for nan, 0 for land, 1 for water
datamask[datamask != -1] = datamask[datamask != -1] - 1
#Rescale tree_cover to have values in [0, 1] and -1 for nan
tree_cover[tree_cover != -1] = tree_cover[tree_cover != -1]*0.01
#Normalize image by channel with -1 values for nan
image, _, _ = rescale_image(image)
#Create non Static tensor
image = torch.cat((when,image),dim=0)
#Creates static tensor
static = torch.stack((datamask,tree_cover))
#Creates non static tensor
if wherepath:
if not os.path.isfile(wherepath+"/"+"static.pt"):
torch.save(static, wherepath+"/"+"static.pt")
torch.save(image, wherepath+"/"+"tensor_%d.pt"%(end_year))
torch.save(pixels,wherepath+"/"+"pixels_cord_%d.pt"%(end_year))
torch.save(labels,wherepath+"/"+"labels_%d.pt"%(end_year))
return static, image, pixels, labels
start = time.time()
static, image, pixels, labels = create_tnsors_pixels(18, tree_p = 30, cutoffs = [2,5,8] , sourcepath = sourcepath ,rescale = True, wherepath = None)
print("Total time (in seconds) needed to create tensors: ",time.time() - start)
torch.save(image, wherepath+"/"+"tensor_%d.pt"%(18))
torch.save(pixels,wherepath+"/"+"pixels_cord_%d.pt"%(18))
```
# Data Classes
In pytorch one need to have data class to load the data. This data class must have \__getitem\__(self, idx) function that returns the imput of the model and the output to which the prediction of the model is compared. Here \__getitem\__ returns the model input image/time series of images, the label of the central pixel and its cordinates. Each class must also have function that returns the dataset lenght: \__len\__(self)
## CNN data class
As mentioned above, the data set for our CNN model is trained on all pairs of images labels for years 2014, 2015, 2016, 2017. To make this union of data pairs, the class is given lists.
For example, to create CNN data class that has data points from Images in 2015 and 2016 and their corresponding labels in 2016 and 2017 respectively, image must be the list $\mathbf{X_{2015}}$, $\mathbf{X_{2016}}$, pixels must be the list of valid pixels coordinates in the corresponding year $\mathbf{J_{2015}}$, $\mathbf{J_{2016}}$, and labels must be $\mathbf{Y^{j_{2015}}_{2016}}$, $\mathbf{Y^{j_{2016}}_{2017}}$. The static tensor $\mathbf{S}$ is also gven to the data class.
The data set lenght is then the lenght of the set $\mathbf{J_{2015}} \cup \mathbf{J_{2016}}$ and when the function \_getitem\_(self, idx) is given a index it returns the coresonding pixel static tensor $\mathbf{S}$ , image and label. The function idx_to_image(self,idx) maps each index to the correct pair of image,label with respect to wich year pair we refer to by this index.
The data class also have function change_size(self, new_size) that allows the spatial size of the tensor to be changed withouth re-initializing the class.
```
class DatasetCNN(Dataset):
"""
CNN Data class
if it is passed list of image, pixels and labels, it concate the data as one, where inputs are
all pairs of image:next year labels for valid pixels. Pairs are ordered as sequence in the same order as
in the flatten list
if list is of lenght 1, only one year pairs
size is the radius of the image. Can be modified with Data.change_size(new size)
"""
def __init__(self, size, static, image, pixels, labels):
self.size = size
self.lenghts = None
if(len(image) == 1):
image = torch.cat(image, dim = 0)
self.image = torch.cat((static,image), dim = 0)
else:
#add static to each image in the list so that all images are ready tensors
# do this only when initializig the data class so that it is quick to call ready tensor at each get item call
#save tensors in a list keeping the image order
#save the lengths of each item in the pixles codrintes/labels list
#so that after they are flattened, a map pixel,year -> image,year is possible
self.lenghts = [ len(i) for i in pixels ]
self.image = []
for im in image:
img = torch.cat((static,im), dim = 0)
self.image.append(img)
self.pixels = torch.cat(pixels, dim = 0)
self.labels = torch.cat(labels, dim = 0)
def idx_to_image(self,idx):
"""
given a index of a flatten list of pixels in different years,
return the corresponding image for the given year
"""
if self.lenghts == None:
image = self.image
else:
csum = list(np.cumsum(self.lenghts))
csum.insert(0,0)
for i in range(1,len(csum)):
if ((idx >= csum[i-1]) & (idx < csum[i])):
image = self.image[i-1]
break;
return image
def change_size(self, new_size):
self.size = new_size
def __getitem__(self, idx):
image = self.idx_to_image(idx)
image = image[:,
(self.pixels[idx,0] - self.size) : (self.pixels[idx,0] + self.size + 1),
(self.pixels[idx,1] - self.size) : (self.pixels[idx,1] +self.size + 1)]
label = self.labels[idx]
cor = self.pixels[idx]
return image , label, cor
def __len__(self):
return len(self.pixels)
```
## load_CNNdata
This function takes as input the size one wish the CNNdata object to have and the start and end year of the pairs considered. It then loads the lists needed to initialize the data class and returns the corresponding CNNdata object.
If one wish to add extra static layers, than add_static must be a list of this tensors (2D or 3D for multi-channels)
If one wish to add extra time layers, than add_time must be a list of lists of this tensors (2D or 3D for multi-channels) where the lists are sorted in time and are of lenght end_year - start_year + 1
Eg:add_time = [[layer_1_2014, layer_2_2014],[layer_1_2015, layer_2_2015],[layer_1_2016, layer_2_2016]] where
layer_1 and layer_2 can be 2D or 3D tensors.
```
def load_CNNdata(size, start_year, end_year, add_static = None, add_time = None, path = wherepath):
"""
given start year, end year and size initilalize CNN data class
start year and end year define how many pairs imange - next year label the data to have
size define the returned image size
path = path to saved tensors
"""
path = path + "/"
static = torch.load(path+"static.pt")
if(add_static):
for to_add in add_static:
if len(to_add.shape) == 2 :
to_add = to_add.unsqueeze(dim = 0)
static = torch.cat([static,to_add], dim = 0)
else:
static = torch.cat([static,to_add], dim = 0)
images_ls = []
pixels_ls = []
labels_ls = []
for i, year in enumerate(range(start_year,end_year+1)):
image = torch.load(path+"tensor_%d.pt"%(year))
if(add_time):
for to_add in add_time[i]:
if len(to_add.shape) == 2 :
to_add = to_add.unsqueeze(dim = 0)
image = torch.cat([image,to_add], dim = 0)
else:
image = torch.cat([image,to_add], dim = 0)
images_ls.append(image)
pixels = torch.load(path+"pixels_cord_%d.pt"%(year))
pixels_ls.append(pixels)
labels = torch.load(path+"labels_%d.pt"%(year))
labels_ls.append(labels)
Data = DatasetCNN(size, static = static, image = images_ls, pixels = pixels_ls, labels = labels_ls)
return Data
```
# DatasetRNN
This class is similar to the CNN class. To intialize it one needs to give it the size (= r) of the tensors, the static tensor, a 4D tensor coresponidng to the time series of images, organised as follows (channels,time,height,width), the set of valid pixels in the considered year and the coresponding next year labels for this set of pixels.
For example, to costruct the data set $\mathbf{S,X_{2014},X_{2015},X_{2016}}$ , $\mathbf{Y^{j_{2016}}_{2017}}$ , **static** must be $\mathbf{S}$, **image** bust be the 4D tensor, stack of $\mathbf{X_{2014},X_{2015},X_{2016}}$, **pixels** must be $\mathbf{J_{2016}}$ and **labels** must be $\mathbf{Y^{j_{2016}}_{2017}}$.
\__getitem\__(self, idx) returns the static image, the 4D tensor of time series of nonstatic images, the next year label and the coordinates of the pixel under that index.
```
class DatasetRNN(Dataset):
"""
Data class for Moldel 2:4
get_item return static tensor (to be fed in the static branch)
and a 4d Tensor of non static iamges where the shape is as follows:
(c,t,h,w) = (channels per image ,time , h = 2*size+1, w = 2*size+1)
change_size sets new image size: h&w = 2*new_size + 1
"""
def __init__(self, size, static, images, pixels, labels ):
self.size = size
self.static = static
self.images = images
self.pixels = pixels
self.labels = labels
def change_size(self, new_size):
self.size = new_size
def __getitem__(self, idx):
static = self.static[:,
(self.pixels[idx,0] - self.size) : (self.pixels[idx,0] + self.size + 1),
(self.pixels[idx,1] - self.size) : (self.pixels[idx,1] +self.size + 1)]
#(c x t x h x w)
images = self.images[:,:,
(self.pixels[idx,0] - self.size) : (self.pixels[idx,0] + self.size + 1),
(self.pixels[idx,1] - self.size) : (self.pixels[idx,1] +self.size + 1)]
label = self.labels[idx]
return (static, images) , label
def __len__(self):
return len(self.pixels)
```
**load_RNNdata**
<br>Function that takes size, start_year, end_year and returns DataRNN object corresponding to this time period and size
If one wish to add extra static layers, than add_static must be a list of this tensors (2D or 3D for multi-channels)
If one wish to add extra time layers, than add_time must be a list of lists of this tensors (2D or 3D for multi-channels) where the lists are sorted in time and are of lenght end_year - start_year + 1
Eg:add_time = [[layer_1_2014, layer_2_2014],[layer_1_2015, layer_2_2015],[layer_1_2016, layer_2_2016]] where layer_1 and layer_2 can be 2D or 3D tensors.
```
def load_RNNdata(size, start_year, end_year, add_static = None, add_time = None, path = wherepath):
"""
given start year, end year and size initilalize RNN data class
start year and end year define number of elements in the time series of imanges
size define the returned image size
path = path to saved tensors
"""
path = path + "/"
images = []
for i, year in enumerate(range(start_year,end_year+1)):
image = torch.load(path+"tensor_%d.pt"%(year))
if(add_time):
for to_add in add_time[i]:
if len(to_add.shape) == 2 :
to_add = to_add.unsqueeze(dim = 0)
image = torch.cat([image,to_add], dim = 0)
else:
image = torch.cat([image,to_add], dim = 0)
image = image.unsqueeze(dim = 1)
images.append(image)
images = torch.cat(images, dim = 1)
static = torch.load(path+"static.pt")
if(add_static):
for to_add in add_static:
if len(to_add.shape) == 2 :
to_add = to_add.unsqueeze(dim = 0)
static = torch.cat([static,to_add], dim = 0)
else:
static = torch.cat([static,to_add], dim = 0)
pixels = torch.load(path+"pixels_cord_%d.pt"%(end_year))
labels = torch.load(path+"labels_%d.pt"%(end_year))
Data = DatasetRNN(size = size , images = images ,static = static, pixels = pixels, labels = labels)
return Data
```
# Load Data
## Load CNN data with two years pairs, 16 & 17
<br> Data_16_17 = load_CNNdata(4, start_year = 16, end_year = 17, path = wherepath) initialize CNN data for two years, 16 & 17 with image size 9
<br> Data_16_17.change_size(2) to change the image size from $2 \times 4+1 = 9$ to $2 \times 2+1 = 5$
```
Data_16_17 = load_CNNdata(4, start_year = 16, end_year = 17, path = wherepath)
print("Data lenght of 2 years:",len(Data_16_17))
loss = (Data_16_17.labels == 1).nonzero()
print("% deforested valid pixels in 2017 and 2018:")
print(len(loss)/len(Data_16_17.labels))
print("\nFirst band of statelite image layer: \n")
print(Data_16_17[8498][0][-4,:,:])
print("\nChnage size from 9 to 5:\n")
Data_16_17.change_size(2)
print("\n")
print(Data_16_17[8498][0][-4,:,:])
del Data_16_17
```
## Load 1 year data CNN
Data_17 = load_CNNdata(4, start_year = 17, end_year = 17, path = wherepath)
```
Data_17 = load_CNNdata(4, start_year = 17, end_year = 17, path = wherepath)
print("% valid pixels deforested in 2018: ",len((Data_17.labels == 1).nonzero())/len(Data_17))
print("\nFirst band of statelite image layer: \n")
print(Data_17[55555][0][-4,:,:])
print("\nChnage size from 9 to 5:\n")
Data_17.change_size(2)
print(Data_17[55555][0][-4,:,:])
del Data_17
```
# Add DSM static tensor after it being transformed and rescaled
```
DSM = torch.load(wherepath+"/DSM.pt")
Data_17 = load_CNNdata(4, start_year = 17, end_year = 17, path = wherepath, add_static = [DSM])
```
### data with updated layers:
```
Data_17[99][0].shape
```
# Load RNN data:
RNNData = load_RNNdata(size = 2 , start_year = 14 , end_year = 17)
```
RNNData = load_RNNdata(size = 2 , start_year = 14 , end_year = 16)
print("shape of the statich tensor: (static chanels, h, w) = ",RNNData[22][0][0].shape) # static
print("shape of the non statich tensor: (static chanels, time, h, w) = ",RNNData[22][0][1].shape) # images
loss = (RNNData.labels == 1).nonzero()
print("% deforested valid pixels in 2017:")
print(len(loss)/len(RNNData.labels))
del(RNNData)
```
# Summary:
<br> 1) Save tensors with :
```
for year in range(14,18):
print(year)
static, image, pixels,labels = create_tnsors_pixels(year, tree_p = 0.3,
cutoffs = [2,5,8],
sourcepath = sourcepath,
wherepath = wherepath)
print("Files saved!")
print("\n")
```
2) Load data with:
CNNData_16_17 = load_CNNdata(4, start_year = 16, end_year = 17, path = wherepath)
CNNData_17 = load_CNNdata(4, start_year = 17, end_year = 17, path = wherepath)
RNNData = load_RNNdata(size = 4 , start_year = 14 , end_year = 16, path = wherepath)
To add extra static layer:
server = '/rdsgpfs/general/user/kpp15/home/Hansen'
wherepath = server + '/data/raster/tensors'
%cd server+ '/deforestation_forecasting/python_code'
from Data_maker_loader import *
DSM = torch.load(wherepath+"/DSM.pt")
CNNdata = load_CNNdata(size = 45, start_year = 16, end_year = 16, path = wherepath, add_static = [DSM])
# or
CNNdata = load_CNNdata(size = 45, start_year = 14, end_year = 16, path = wherepath, add_static = [DSM])
# or
RNNdata = load_RNNdata(size = 45, start_year = 14, end_year = 16, path = wherepath, add_static = [DSM])
# Appendix
## idx - image map for CNN data with several {image,next_year_label} pairs.
<br>Pixels cordinates from different years and labels from different years are mixed, more specifically concated sequentially. Get the correct image for given index, and therefore pixel[index], label[index].
<br> Example: 4 years, year 1 have 3 pixels, year 2 - 6, year 3 - 1, year 4 - 5.
<br> Pixels and labels are concated and make in total 15 data points.
<br> Four different images correspond to indexes 0:3, 3:9, 9:9, 10:15
<br> Note 0:3 returns 0,1,2 and 0:15 returns 0,1,2,..14. Python indexing start from 0
```
v = [[1,1,1],[1,1,1,1,1,1,],[1],[1,1,1,1,1]]
lenghts = [ len(i) for i in v ]
print("num of pixels in each year: ",lenghts)
csum = list(np.cumsum(lenghts))
print("Culminative sum of number of pixels in each year: ",csum)
csum.insert(0,0)
for idx in range(0,csum[-1]):
for i in range(1,len(csum)):
if ((idx >= csum[i-1]) & (idx < csum[i])):
print("idx : ",idx,"in tensor: ",i-1)
break;
```
| github_jupyter |
```
import torch
from torch import nn
from torch import optim
from torchvision.datasets import MNIST
from torch.utils.data import TensorDataset, Dataset, DataLoader
from tqdm.notebook import tqdm
import numpy as np
from aijack.defense import VIB, KL_between_normals, mib_loss
dim_z = 256
beta = 1e-3
batch_size = 100
samples_amount = 15
num_epochs = 1
train_data = MNIST("MNIST/.", download=True, train=True)
train_dataset = TensorDataset(
train_data.train_data.view(-1, 28 * 28).float() / 255, train_data.train_labels
)
train_loader = DataLoader(train_dataset, batch_size=batch_size)
test_data = MNIST("MNIST/.", download=True, train=False)
test_dataset = TensorDataset(
test_data.test_data.view(-1, 28 * 28).float() / 255, test_data.test_labels
)
test_loader = DataLoader(test_dataset, batch_size=batch_size)
encoder = nn.Sequential(
nn.Linear(in_features=784, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=2 * dim_z),
)
decoder = nn.Linear(in_features=dim_z, out_features=10)
net = VIB(encoder, decoder, dim_z, num_samples=samples_amount)
opt = torch.optim.Adam(net.parameters(), lr=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(opt, gamma=0.97)
import time
for epoch in range(num_epochs):
loss_by_epoch = []
accuracy_by_epoch = []
I_ZX_bound_by_epoch = []
I_ZY_bound_by_epoch = []
loss_by_epoch_test = []
accuracy_by_epoch_test = []
I_ZX_bound_by_epoch_test = []
I_ZY_bound_by_epoch_test = []
if epoch % 2 == 0 and epoch > 0:
scheduler.step()
for x_batch, y_batch in tqdm(train_loader):
x_batch = x_batch
y_batch = y_batch
y_pred, result_dict = net(x_batch)
sampled_y_pred = result_dict["sampled_decoded_outputs"]
p_z_given_x_mu = result_dict["p_z_given_x_mu"]
p_z_given_x_sigma = result_dict["p_z_given_x_sigma"]
approximated_z_mean = torch.zeros_like(p_z_given_x_mu)
approximated_z_sigma = torch.ones_like(p_z_given_x_sigma)
loss, I_ZY_bound, I_ZX_bound = mib_loss(
y_batch,
sampled_y_pred,
p_z_given_x_mu,
p_z_given_x_sigma,
approximated_z_mean,
approximated_z_sigma,
beta=beta,
)
prediction = torch.max(y_pred, dim=1)[1]
accuracy = torch.mean((prediction == y_batch).float())
loss.backward()
opt.step()
opt.zero_grad()
I_ZX_bound_by_epoch.append(I_ZX_bound.item())
I_ZY_bound_by_epoch.append(I_ZY_bound.item())
loss_by_epoch.append(loss.item())
accuracy_by_epoch.append(accuracy.item())
for x_batch, y_batch in tqdm(test_loader):
x_batch = x_batch
y_batch = y_batch
y_pred, result_dict = net(x_batch)
sampled_y_pred = result_dict["sampled_decoded_outputs"]
p_z_given_x_mu = result_dict["p_z_given_x_mu"]
p_z_given_x_sigma = result_dict["p_z_given_x_sigma"]
approximated_z_mean = torch.zeros_like(p_z_given_x_mu)
approximated_z_sigma = torch.ones_like(p_z_given_x_sigma)
loss, I_ZY_bound, I_ZX_bound = mib_loss(
y_batch,
sampled_y_pred,
p_z_given_x_mu,
p_z_given_x_sigma,
approximated_z_mean,
approximated_z_sigma,
beta=beta,
)
prediction = torch.max(y_pred, dim=1)[1]
accuracy = torch.mean((prediction == y_batch).float())
I_ZX_bound_by_epoch_test.append(I_ZX_bound.item())
I_ZY_bound_by_epoch_test.append(I_ZY_bound.item())
loss_by_epoch_test.append(loss.item())
accuracy_by_epoch_test.append(accuracy.item())
print(
"epoch",
epoch,
"loss",
np.mean(loss_by_epoch_test),
"prediction",
np.mean(accuracy_by_epoch_test),
)
print(
"I_ZX_bound",
np.mean(I_ZX_bound_by_epoch_test),
"I_ZY_bound",
np.mean(I_ZY_bound_by_epoch_test),
)
from aijack.attack import GradientInversion_Attack
y_pred, result_dict = net(x_batch[:1])
sampled_y_pred = result_dict["sampled_decoded_outputs"]
p_z_given_x_mu = result_dict["p_z_given_x_mu"]
p_z_given_x_sigma = result_dict["p_z_given_x_sigma"]
approximated_z_mean = torch.zeros_like(p_z_given_x_mu)
approximated_z_sigma = torch.ones_like(p_z_given_x_sigma)
loss, I_ZY_bound, I_ZX_bound = mib_loss(
y_batch[:1],
sampled_y_pred,
p_z_given_x_mu,
p_z_given_x_sigma,
approximated_z_mean,
approximated_z_sigma,
beta=beta,
)
received_gradients = torch.autograd.grad(loss, net.parameters())
received_gradients = [cg.detach() for cg in received_gradients]
received_gradients = [cg for cg in received_gradients]
from matplotlib import pyplot as plt
import cv2
net.eval()
cpl_attacker = GradientInversion_Attack(
net,
(784,),
lr=0.3,
log_interval=50,
optimizer_class=torch.optim.LBFGS,
distancename="l2",
optimize_label=False,
num_iteration=200,
)
num_seeds = 5
fig = plt.figure(figsize=(6, 2))
for s in tqdm(range(num_seeds)):
cpl_attacker.reset_seed(s)
try:
result = cpl_attacker.attack(received_gradients)
ax1 = fig.add_subplot(2, num_seeds, s + 1)
ax1.imshow(result[0].cpu().detach().numpy()[0].reshape(28, 28), cmap="gray")
ax1.axis("off")
ax1.set_title(torch.argmax(result[1]).cpu().item())
ax2 = fig.add_subplot(2, num_seeds, num_seeds + s + 1)
ax2.imshow(
cv2.medianBlur(result[0].cpu().detach().numpy()[0].reshape(28, 28), 5),
cmap="gray",
)
ax2.axis("off")
except:
pass
plt.suptitle("Result of CPL")
plt.tight_layout()
plt.show()
```
| github_jupyter |
## 1-2. 量子ビットに対する基本演算
量子ビットについて理解が深まったところで、次に量子ビットに対する演算がどのように表されるかについて見ていこう。
これには、量子力学の性質が深く関わっている。
1. 線型性:
詳しくは第4章で学ぶのだが、量子力学では状態(量子ビット)の時間変化はつねに(状態の重ね合わせに対して)線型になっている。つまり、**量子コンピュータ上で許された操作は状態ベクトルに対する線型変換**ということになる
。1つの量子ビットの量子状態は規格化された2次元複素ベクトルとして表現されるのだったから、
1つの量子ビットに対する操作=線型演算は$2 \times 2$の**複素行列**によって表現される。
2. ユニタリ性:
さらに、確率の合計は常に1であるという規格化条件から、量子操作に表す線形演算(量子演算)に対してさらなる制限を導くことができる。まず、各測定結果を得る確率は複素確率振幅の絶対値の2乗で与えられるので、その合計は状態ベクトルの(自分自身との)内積と一致することに注目する:
$$
|\alpha|^2 + |\beta|^2 =
(\alpha^*, \beta^*)
\left(
\begin{array}{c}
\alpha
\\
\beta
\end{array}
\right) = 1.
$$
(アスタリスク $^*$ は複素共役を表す)
量子コンピュータで操作した後の状態は、量子演算に対応する線形変換(行列)を$U$とすると、
$$
U
\left(
\begin{array}{c}
\alpha
\\
\beta
\end{array}
\right)
$$
と書ける。この状態についても上記の規格化条件が成り立つ必要があるので、
$$
(\alpha^*, \beta^*)
U^\dagger U
\left(
\begin{array}{c}
\alpha
\\
\beta
\end{array}
\right) = 1
$$
が要請される。(ダガー $^\dagger$ は行列の転置と複素共役を両方適用したものを表し、エルミート共役という)
この関係式が任意の$\alpha$, $\beta$について成り立つ必要があるので、量子演算$U$は以下の条件を満たす**ユニタリー行列**に対応する:
$$
U^{\dagger} U = U U^{\dagger} = I.
$$
すなわち、**量子ビットに対する操作は、ユニタリー行列で表される**のである。
ここで、用語を整理しておく。量子力学では、状態ベクトルに対する線形変換のことを**演算子** (operator) と呼ぶ。単に演算子という場合は、ユニタリーとは限らない任意の線形変換を指す。それに対して、上記のユニタリー性を満たす線形変換のことを**量子演算** (quantum gate) と呼ぶ。量子演算は、量子状態に対する演算子のうち、(少なくとも理論的には)**物理的に実現可能なもの**と考えることができる。
### 1量子ビット演算の例:パウリ演算子
1つの量子ビットに作用する基本的な量子演算として**パウリ演算子**を導入する。
これは量子コンピュータを学んでいく上で最も重要な演算子であるので、定義を体に染み込ませておこう。
$$
\begin{eqnarray}
I&=&
\left(\begin{array}{cc}
1 & 0
\\
0 & 1
\end{array}
\right),\;\;\;
X=
\left(\begin{array}{cc}
0 & 1
\\
1 & 0
\end{array}
\right),\;\;\;
Y &=&
\left(\begin{array}{cc}
0 & -i
\\
i & 0
\end{array}
\right),\;\;\;
Z=
\left(\begin{array}{cc}
1 & 0
\\
0 & -1
\end{array}
\right).
\end{eqnarray}
$$
各演算子のイメージを説明する。
まず、$I$は恒等演算子で、要するに「何もしない」ことを表す。
$X$は古典ビットの反転(NOT)に対応し
$$X|0\rangle = |1\rangle, \;\;
X|1\rangle = |0\rangle
$$
のように作用する。(※ブラケット表記を用いた。下記コラムも参照。)
$Z$演算子は$|0\rangle$と$|1\rangle$の位相を反転させる操作で、
$$
Z|0\rangle = |0\rangle, \;\;
Z|1\rangle = -|1\rangle
$$
と作用する。
これは$|0\rangle$と$|1\rangle$の重ね合わせの「位相」という情報を保持できる量子コンピュータ特有の演算である。
例えば、
$$
Z \frac{1}{\sqrt{2}} ( |0\rangle + |1\rangle ) = \frac{1}{\sqrt{2}} ( |0\rangle - |1\rangle )
$$
となる。
$Y$演算子は$Y=iXZ$と書け、
位相の反転とビットの反転を組み合わせたもの(全体にかかる複素数$i$を除いて)であると考えることができる。
(詳細は Nielsen-Chuang の `1.3.1 Single qubit gates` を参照)
### SymPyを用いた一量子ビット演算
SymPyではよく使う基本演算はあらかじめ定義されている。
```
from IPython.display import Image, display_png
from sympy import *
from sympy.physics.quantum import *
from sympy.physics.quantum.qubit import Qubit,QubitBra
init_printing() # ベクトルや行列を綺麗に表示するため
# Google Colaboratory上でのみ実行してください
from IPython.display import HTML
def setup_mathjax():
display(HTML('''
<script>
if (!window.MathJax && window.google && window.google.colab) {
window.MathJax = {
'tex2jax': {
'inlineMath': [['$', '$'], ['\\(', '\\)']],
'displayMath': [['$$', '$$'], ['\\[', '\\]']],
'processEscapes': true,
'processEnvironments': true,
'skipTags': ['script', 'noscript', 'style', 'textarea', 'code'],
'displayAlign': 'center',
},
'HTML-CSS': {
'styles': {'.MathJax_Display': {'margin': 0}},
'linebreaks': {'automatic': true},
// Disable to prevent OTF font loading, which aren't part of our
// distribution.
'imageFont': null,
},
'messageStyle': 'none'
};
var script = document.createElement("script");
script.src = "https://colab.research.google.com/static/mathjax/MathJax.js?config=TeX-AMS_HTML-full,Safe";
document.head.appendChild(script);
}
</script>
'''))
get_ipython().events.register('pre_run_cell', setup_mathjax)
from sympy.physics.quantum.gate import X,Y,Z,H,S,T,CNOT,SWAP, CPHASE
```
演算子は何番目の量子ビットに作用するか、
というのを指定して `X(0)` のように定義する。
また、これを行列表示するときには、いくつの量子ビットの空間で表現するか
`nqubits`というのを指定する必要がある。
まだ、量子ビットは1つしかいないので、
`X(0)`、`nqubits=1`としておこう。
```
X(0)
represent(X(0),nqubits=1) # パウリX
```
同様に、`Y`, `Z`なども利用することができる。それに加え、アダマール演算 `H` や、位相演算 `S`、そして$\pi/4$の位相演算 `T` も利用することができる(これらもよく出てくる演算で、定義は各行列を見てほしい):
```
represent(H(0),nqubits=1)
represent(S(0),nqubits=1)
represent(T(0),nqubits=1)
```
これらの演算を状態に作用させるのは、
```
ket0 = Qubit('0')
S(0)*Y(0)*X(0)*H(0)*ket0
```
のように `*`で書くことができる。実際に計算をする場合は `qapply()`を利用する。
```
qapply(S(0)*Y(0)*X(0)*H(0)*ket0)
```
この列ベクトル表示が必要な場合は、
```
represent(qapply(S(0)*Y(0)*X(0)*H(0)*ket0))
```
のような感じで、SymPyは簡単な行列の計算はすべて自動的にやってくれる。
---
### コラム:ブラケット記法
ここで一旦、量子力学でよく用いられるブラケット記法というものについて整理しておく。ブラケット記法に慣れると非常に簡単に見通しよく計算を行うことができる。
列ベクトルは
$$
|\psi \rangle = \left(
\begin{array}{c}
\alpha
\\
\beta
\end{array}
\right)
$$
とかくのであった。これを**ケット**と呼ぶ。同様に、行ベクトルは
$$
\langle \psi | = ( |\psi \rangle ) ^{\dagger} = ( \alpha ^* , \beta ^*)
$$
とかき、これを**ブラ**と呼ぶ。${\dagger}$マークは転置と複素共役を取る操作で、列ベクトルを行ベクトルへと移す。
2つのベクトル、
$$
|\psi \rangle = \left(
\begin{array}{c}
\alpha
\\
\beta
\end{array}
\right), \;\;\;
|\phi \rangle = \left(
\begin{array}{c}
\gamma
\\
\delta
\end{array}
\right)
$$
があったとする。ブラとケットを抱き合わせると
$$
\langle \phi | \psi \rangle = (\gamma ^* , \delta ^* ) \left(
\begin{array}{c}
\alpha
\\
\beta
\end{array}
\right) = \gamma ^* \alpha + \delta ^* \beta
$$
となり、**内積**に対応する。行ベクトルと列ベクトルをそれぞれブラ・ケットと呼ぶのは、このように並べて内積を取ると「ブラケット」になるからである。
逆に、背中合わせにすると
$$
|\phi \rangle \langle \psi | = \left(
\begin{array}{c}
\gamma
\\
\delta
\end{array}
\right) (\alpha ^* , \beta ^*) = \left(
\begin{array}{cc}
\gamma \alpha ^* & \gamma \beta ^*
\\
\delta \alpha ^* & \delta \beta ^*
\end{array}
\right)
$$
となり、演算子となる。例えば、$X$演算子は
$$
X= \left(
\begin{array}{cc}
0 & 1
\\
1 & 0
\end{array}
\right)
=
|0\rangle \langle 1 | + |1\rangle \langle 0|
$$
のように書ける。このことを覚えておけば
$$
\langle 0| 0\rangle = \langle 1 | 1\rangle = 1, \;\;\; \langle 0 | 1 \rangle = \langle 1 | 0 \rangle = 0
$$
から
$$
X |0\rangle = |1\rangle
$$
を行列を書かずに計算できるようになる。
**量子情報の解析計算においては、実際にベクトルの要素を書き下して計算をすることはほとんどなく、このようにブラケットを使って形式的に書いて計算してしまう場合が多い**(古典計算機上で量子コンピュータをシミュレーションする場合はベクトルをすべて書き下すことになる)。
同様に、
$$
I = |0\rangle \langle 0 | + |1\rangle \langle 1| , \;\;\; Z = |0\rangle \langle 0| - |1\rangle \langle 1|
$$
も覚えておくと便利である。
| github_jupyter |
```
%matplotlib inline
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1337) # for reproducibility
from theano import function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape, Layer
from keras.layers.convolutional import Convolution2D, MaxPooling2D, UpSampling2D
from keras.utils import np_utils
from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.regularizers import l2
from seya.layers.variational import VariationalDense as VAE
from seya.layers.convolutional import GlobalPooling2D
from seya.utils import apply_model
from agnez import grid2d
batch_size = 100
nb_epoch = 100
code_size = 200
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 7
nb_classes = 10
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_valid = X_train[50000:]
Y_valid = Y_train[50000:]
X_train = X_train[:50000]
Y_train = Y_train[:50000]
enc = Sequential()
enc.add(Convolution2D(nb_filters, nb_conv, nb_conv,
W_regularizer=l2(.0005),
border_mode='same',
input_shape=(1, img_rows, img_cols)))
enc.add(Dropout(.5))
enc.add(Activation('relu'))
enc.add(Convolution2D(nb_filters, 3, 3,
border_mode='same',
input_shape=(1, img_rows, img_cols)))
enc.add(Activation('tanh'))
enc.add(MaxPooling2D())
enc.add(Flatten())
pool_shape = enc.output_shape
enc.add(VAE(code_size, batch_size=batch_size, activation='tanh',
prior_logsigma=1.7))
# enc.add(Activation(soft_threshold))
dec = Sequential()
dec.add(Dense(np.prod(pool_shape[1:]), input_dim=code_size))
dec.add(Reshape((nb_filters, img_rows/2, img_cols/2)))
dec.add(Activation('relu'))
dec.add(Convolution2D(nb_filters, 3, 3,
border_mode='same'))
dec.add(Activation('relu'))
dec.add(Convolution2D(784, 3, 3,
border_mode='same'))
dec.add(GlobalPooling2D())
dec.add(Activation('sigmoid'))
dec.add(Flatten())
model = Sequential()
model.add(enc)
model.add(dec)
model.compile(loss='binary_crossentropy', optimizer='adam')
cbk = ModelCheckpoint('vae/vae.hdf5', save_best_only=True)
try:
model.fit(X_train, X_train.reshape((-1, 784)), batch_size=batch_size, nb_epoch=nb_epoch, verbose=1,
validation_data=(X_valid, X_valid.reshape((-1, 784))), callbacks=[cbk])
except:
pass
```
# Sample
```
X = K.placeholder(ndim=2)
Y = dec(X)
F = function([X], Y, allow_input_downcast=True)
x = np.random.laplace(0, 1, size=(100, code_size))
y = F(x)
I = grid2d(y.reshape((100, -1)))
plt.imshow(I)
```
# Visualize first layers
```
W = np.asarray(K.eval(enc.layers[0].W))
W = W.reshape((32, -1))
I = grid2d(W)
plt.imshow(I)
```
| github_jupyter |
<table align="center">
<td align="center"><a target="_blank" href="http://introtodeeplearning.com">
<img src="http://introtodeeplearning.com/images/colab/mit.png" style="padding-bottom:5px;" />
Visit MIT Deep Learning</a></td>
<td align="center"><a target="_blank" href="https://colab.research.google.com/github/aamini/introtodeeplearning/blob/master/lab2/Part1_MNIST.ipynb">
<img src="http://introtodeeplearning.com/images/colab/colab.png?v2.0" style="padding-bottom:5px;" />Run in Google Colab</a></td>
<td align="center"><a target="_blank" href="https://github.com/aamini/introtodeeplearning/blob/master/lab2/Part1_MNIST.ipynb">
<img src="http://introtodeeplearning.com/images/colab/github.png" height="70px" style="padding-bottom:5px;" />View Source on GitHub</a></td>
</table>
# Copyright Information
```
# Copyright 2020 MIT 6.S191 Introduction to Deep Learning. All Rights Reserved.
#
# Licensed under the MIT License. You may not use this file except in compliance
# with the License. Use and/or modification of this code outside of 6.S191 must
# reference:
#
# © MIT 6.S191: Introduction to Deep Learning
# http://introtodeeplearning.com
#
```
# Laboratory 2: Computer Vision
# Part 1: MNIST Digit Classification
In the first portion of this lab, we will build and train a convolutional neural network (CNN) for classification of handwritten digits from the famous [MNIST](http://yann.lecun.com/exdb/mnist/) dataset. The MNIST dataset consists of 60,000 training images and 10,000 test images. Our classes are the digits 0-9.
First, let's download the course repository, install dependencies, and import the relevant packages we'll need for this lab.
```
# Import Tensorflow 2.0
#%tensorflow_version 2.x
import tensorflow as tf
#!pip install mitdeeplearning
import mitdeeplearning as mdl
import matplotlib.pyplot as plt
import numpy as np
import random
from tqdm import tqdm
# Check that we are using a GPU, if not switch runtimes
# using Runtime > Change Runtime Type > GPU
assert len(tf.config.list_physical_devices('GPU')) > 0
```
## 1.1 MNIST dataset
Let's download and load the dataset and display a few random samples from it:
```
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = (np.expand_dims(train_images, axis=-1)/255.).astype(np.float32)
train_labels = (train_labels).astype(np.int64)
test_images = (np.expand_dims(test_images, axis=-1)/255.).astype(np.float32)
test_labels = (test_labels).astype(np.int64)
```
Our training set is made up of 28x28 grayscale images of handwritten digits.
Let's visualize what some of these images and their corresponding training labels look like.
```
plt.figure(figsize=(10,10))
random_inds = np.random.choice(60000,36)
for i in range(36):
plt.subplot(6,6,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
image_ind = random_inds[i]
plt.imshow(np.squeeze(train_images[image_ind]), cmap=plt.cm.binary)
plt.xlabel(train_labels[image_ind])
```
## 1.2 Neural Network for Handwritten Digit Classification
We'll first build a simple neural network consisting of two fully connected layers and apply this to the digit classification task. Our network will ultimately output a probability distribution over the 10 digit classes (0-9). This first architecture we will be building is depicted below:

### Fully connected neural network architecture
To define the architecture of this first fully connected neural network, we'll once again use the Keras API and define the model using the [`Sequential`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential) class. Note how we first use a [`Flatten`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten) layer, which flattens the input so that it can be fed into the model.
In this next block, you'll define the fully connected layers of this simple work.
```
def build_fc_model():
fc_model = tf.keras.Sequential([
# First define a Flatten layer
tf.keras.layers.Flatten(),
# '''TODO: Define the activation function for the first fully connected (Dense) layer.'''
tf.keras.layers.Dense(128, activation=tf.nn.relu),
# '''TODO: Define the second Dense layer to output the classification probabilities'''
# '''TODO: Dense layer to output classification probabilities'''
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
return fc_model
model = build_fc_model()
```
As we progress through this next portion, you may find that you'll want to make changes to the architecture defined above. **Note that in order to update the model later on, you'll need to re-run the above cell to re-initialize the model. **
Let's take a step back and think about the network we've just created. The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a 2d-array (28 x 28 pixels), to a 1d-array of 28 * 28 = 784 pixels. You can think of this layer as unstacking rows of pixels in the image and lining them up. There are no learned parameters in this layer; it only reformats the data.
After the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are fully-connected neural layers. The first `Dense` layer has 128 nodes (or neurons). The second (and last) layer (which you've defined!) should return an array of probability scores that sum to 1. Each node contains a score that indicates the probability that the current image belongs to one of the handwritten digit classes.
That defines our fully connected model!
### Compile the model
Before training the model, we need to define a few more settings. These are added during the model's [`compile`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#compile) step:
* *Loss function* — This defines how we measure how accurate the model is during training. As was covered in lecture, during training we want to minimize this function, which will "steer" the model in the right direction.
* *Optimizer* — This defines how the model is updated based on the data it sees and its loss function.
* *Metrics* — Here we can define metrics used to monitor the training and testing steps. In this example, we'll look at the *accuracy*, the fraction of the images that are correctly classified.
We'll start out by using a stochastic gradient descent (SGD) optimizer initialized with a learning rate of 0.1. Since we are performing a categorical classification task, we'll want to use the [cross entropy loss](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/sparse_categorical_crossentropy).
You'll want to experiment with both the choice of optimizer and learning rate and evaluate how these affect the accuracy of the trained model.
```
'''TODO: Experiment with different optimizers and learning rates. How do these affect
the accuracy of the trained model? Which optimizers and/or learning rates yield
the best performance?'''
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=1e-1),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
### Train the model
We're now ready to train our model, which will involve feeding the training data (`train_images` and `train_labels`) into the model, and then asking it to learn the associations between images and labels. We'll also need to define the batch size and the number of epochs, or iterations over the MNIST dataset, to use during training.
In Lab 1, we saw how we can use `GradientTape` to optimize losses and train models with stochastic gradient descent. After defining the model settings in the `compile` step, we can also accomplish training by calling the [`fit`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#fit) method on an instance of the `Model` class. We will use this to train our fully connected model
```
# Define the batch size and the number of epochs to use during training
BATCH_SIZE = 64
EPOCHS = 5
model.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS)
```
As the model trains, the loss and accuracy metrics are displayed. With five epochs and a learning rate of 0.01, this fully connected model should achieve an accuracy of approximatley 0.97 (or 97%) on the training data.
### Evaluate accuracy on the test dataset
Now that we've trained the model, we can ask it to make predictions about a test set that it hasn't seen before. In this example, the `test_images` array comprises our test dataset. To evaluate accuracy, we can check to see if the model's predictions match the labels from the `test_labels` array.
Use the [`evaluate`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#evaluate) method to evaluate the model on the test dataset!
```
'''TODO: Use the evaluate method to test the model!'''
test_loss, test_acc = model.evaluate(test_images, test_labels) # TODO
print('Test accuracy:', test_acc)
```
You may observe that the accuracy on the test dataset is a little lower than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of *overfitting*, when a machine learning model performs worse on new data than on its training data.
What is the highest accuracy you can achieve with this first fully connected model? Since the handwritten digit classification task is pretty straightforward, you may be wondering how we can do better...

## 1.3 Convolutional Neural Network (CNN) for handwritten digit classification
As we saw in lecture, convolutional neural networks (CNNs) are particularly well-suited for a variety of tasks in computer vision, and have achieved near-perfect accuracies on the MNIST dataset. We will now build a CNN composed of two convolutional layers and pooling layers, followed by two fully connected layers, and ultimately output a probability distribution over the 10 digit classes (0-9). The CNN we will be building is depicted below:

### Define the CNN model
We'll use the same training and test datasets as before, and proceed similarly as our fully connected network to define and train our new CNN model. To do this we will explore two layers we have not encountered before: you can use [`keras.layers.Conv2D` ](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) to define convolutional layers and [`keras.layers.MaxPool2D`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D) to define the pooling layers. Use the parameters shown in the network architecture above to define these layers and build the CNN model.
```
def build_cnn_model():
cnn_model = tf.keras.Sequential([
# TODO: Define the first convolutional layer
tf.keras.layers.Conv2D(filters=24, kernel_size=(3, 3), activation=tf.nn.relu),
# TODO: Define the first max pooling layer
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
# TODO: Define the second convolutional layer
tf.keras.layers.Conv2D(filters=36, kernel_size=(3, 3), activation=tf.nn.relu),
# TODO: Define the second max pooling layer
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
# TODO: Define the last Dense layer to output the classification
# probabilities. Pay attention to the activation needed a probability
# output
# '''TODO: Dense layer to output classification probabilities'''
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
return cnn_model
cnn_model = build_cnn_model()
# Initialize the model by passing some data through
cnn_model.predict(train_images[[0]])
# Print the summary of the layers in the model.
print(cnn_model.summary())
```
### Train and test the CNN model
Now, as before, we can define the loss function, optimizer, and metrics through the `compile` method. Compile the CNN model with an optimizer and learning rate of choice:
```
'''TODO: Define the compile operation with your optimizer and learning rate of choice'''
cnn_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy']) # TODO
```
As was the case with the fully connected model, we can train our CNN using the `fit` method via the Keras API.
```
'''TODO: Use model.fit to train the CNN model, with the same batch_size and number of epochs previously used.'''
cnn_model.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS)
```
Great! Now that we've trained the model, let's evaluate it on the test dataset using the [`evaluate`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#evaluate) method:
```
'''TODO: Use the evaluate method to test the model!'''
test_loss, test_acc = cnn_model.evaluate(test_images, test_labels) # TODO
print('Test accuracy:', test_acc)
```
What is the highest accuracy you're able to achieve using the CNN model, and how does the accuracy of the CNN model compare to the accuracy of the simple fully connected network? What optimizers and learning rates seem to be optimal for training the CNN model?
### Make predictions with the CNN model
With the model trained, we can use it to make predictions about some images. The [`predict`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#predict) function call generates the output predictions given a set of input samples.
```
predictions = cnn_model.predict(test_images)
```
With this function call, the model has predicted the label for each image in the testing set. Let's take a look at the prediction for the first image in the test dataset:
```
predictions[0]
```
As you can see, a prediction is an array of 10 numbers. Recall that the output of our model is a probability distribution over the 10 digit classes. Thus, these numbers describe the model's "confidence" that the image corresponds to each of the 10 different digits.
Let's look at the digit that has the highest confidence for the first image in the test dataset:
```
'''TODO: identify the digit with the highest confidence prediction for the first
image in the test dataset. '''
prediction = np.argmax(predictions[0]) # TODO
print(prediction)
```
So, the model is most confident that this image is a "???". We can check the test label (remember, this is the true identity of the digit) to see if this prediction is correct:
```
print("Label of this digit is:", test_labels[0])
plt.imshow(test_images[0,:,:,0], cmap=plt.cm.binary)
```
It is! Let's visualize the classification results on the MNIST dataset. We will plot images from the test dataset along with their predicted label, as well as a histogram that provides the prediction probabilities for each of the digits:
```
#@title Change the slider to look at the model's predictions! { run: "auto" }
image_index = 79 #@param {type:"slider", min:0, max:100, step:1}
plt.subplot(1,2,1)
mdl.lab2.plot_image_prediction(image_index, predictions, test_labels, test_images)
plt.subplot(1,2,2)
mdl.lab2.plot_value_prediction(image_index, predictions, test_labels)
```
We can also plot several images along with their predictions, where correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent confidence (out of 100) for the predicted label. Note the model can be very confident in an incorrect prediction!
```
# Plots the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 4
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
mdl.lab2.plot_image_prediction(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
mdl.lab2.plot_value_prediction(i, predictions, test_labels)
```
## 1.4 Training the model 2.0
Earlier in the lab, we used the [`fit`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#fit) function call to train the model. This function is quite high-level and intuitive, which is really useful for simpler models. As you may be able to tell, this function abstracts away many details in the training call, and we have less control over training model, which could be useful in other contexts.
As an alternative to this, we can use the [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape) class to record differentiation operations during training, and then call the [`tf.GradientTape.gradient`](https://www.tensorflow.org/api_docs/python/tf/GradientTape#gradient) function to actually compute the gradients. You may recall seeing this in Lab 1 Part 1, but let's take another look at this here.
We'll use this framework to train our `cnn_model` using stochastic gradient descent.
```
# Rebuild the CNN model
cnn_model = build_cnn_model()
batch_size = 12
loss_history = mdl.util.LossHistory(smoothing_factor=0.95) # to record the evolution of the loss
plotter = mdl.util.PeriodicPlotter(sec=2, xlabel='Iterations', ylabel='Loss', scale='semilogy')
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2) # define our optimizer
if hasattr(tqdm, '_instances'): tqdm._instances.clear() # clear if it exists
for idx in tqdm(range(0, train_images.shape[0], batch_size)):
# First grab a batch of training data and convert the input images to tensors
(images, labels) = (train_images[idx:idx+batch_size], train_labels[idx:idx+batch_size])
images = tf.convert_to_tensor(images, dtype=tf.float32)
# GradientTape to record differentiation operations
with tf.GradientTape() as tape:
#'''TODO: feed the images into the model and obtain the predictions'''
logits = cnn_model(images) # TODO
#'''TODO: compute the categorical cross entropy loss
loss_value = tf.keras.backend.sparse_categorical_crossentropy(labels, logits) # TODO
loss_history.append(loss_value.numpy().mean()) # append the loss to the loss_history record
plotter.plot(loss_history.get())
# Backpropagation
'''TODO: Use the tape to compute the gradient against all parameters in the CNN model.
Use cnn_model.trainable_variables to access these parameters.'''
grads = tape.gradient(loss_value, cnn_model.trainable_variables) # TODO
optimizer.apply_gradients(zip(grads, cnn_model.trainable_variables))
```
## 1.5 Conclusion
In this part of the lab, you had the chance to play with different MNIST classifiers with different architectures (fully-connected layers only, CNN), and experiment with how different hyperparameters affect accuracy (learning rate, etc.). The next part of the lab explores another application of CNNs, facial detection, and some drawbacks of AI systems in real world applications, like issues of bias.
| github_jupyter |
<a href="https://colab.research.google.com/github/carlomigs/tensortrade/blob/master/examples/migs_TensorTrade_Tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import os
import sys
import warnings
import numpy
def warn(*args, **kwargs):
pass
warnings.warn = warn
warnings.simplefilter(action='ignore', category=FutureWarning)
numpy.seterr(divide = 'ignore')
sys.path.append(os.path.dirname(os.path.abspath('')))
# copy in datasets from btgym
!git clone https://github.com/Kismuz/btgym.git
!git clone https://github.com/notadamking/tensortrade.git
!pip install -e /content/tensortrade
import os
os.getcwd()
#%cd /content/tensortrade
%ls
!pip install -e tensortrade[tf,tensorforce,baselines,ccxt,fbm] -U
!pip install talib-binary
%ls /content/btgym/examples/data/
import numpy
import pandas as pd
from tensortrade.exchanges.simulated import SimulatedExchange
headers=['datetime', 'open', 'high', 'low', 'close', 'volume']
df = pd.read_csv('/content/btgym/examples/data/DAT_ASCII_EURUSD_M1_2017.csv',
delimiter=';',
names=headers)
exchange = SimulatedExchange(data_frame=df, base_instrument='USD', pretransform=True)
exchange.data_frame.tail()
from tensortrade.features import FeaturePipeline
from tensortrade.features.scalers import MinMaxNormalizer
from tensortrade.features.stationarity import FractionalDifference
from tensortrade.features.indicators import SimpleMovingAverage
price_columns = ["open", "high", "low", "close"]
normalize_price = MinMaxNormalizer(price_columns, inplace=True)
moving_averages = SimpleMovingAverage(price_columns)
difference_all = FractionalDifference(difference_order=1, inplace=True)
feature_pipeline = FeaturePipeline(steps=[normalize_price,
moving_averages,
difference_all])
exchange.feature_pipeline = feature_pipeline
exchange.data_frame.tail()
from tensortrade.actions import DiscreteActionStrategy
#action_strategy = DiscreteActionStrategy(n_actions=20, instrument_symbol='BTC')
action_strategy = DiscreteActionStrategy(n_actions=20)
from tensortrade.rewards import SimpleProfitStrategy
from tensortrade.rewards import RiskAdjustedReturnStrategy
reward_strategy = SimpleProfitStrategy()
#reward_strategy = RiskAdjustedReturnStrategy()
from tensortrade.environments import TradingEnvironment
environment = TradingEnvironment(exchange=exchange,
feature_pipeline=feature_pipeline,
action_strategy=action_strategy,
reward_strategy=reward_strategy)
environment.exchange.transform_data_frame()
from stable_baselines.common.policies import MlpLnLstmPolicy
from stable_baselines import PPO2
model = PPO2
policy = MlpLnLstmPolicy
params = { "learning_rate": 1e-5, 'nminibatches': 1 }
```
## Training a Strategy
Creating our trading strategy is as simple as plugging in our agent and the environment.
MigsStableBaselinesTradingStrategy
```
#@title
import os
import gym
import json
import pandas as pd
import numpy as np
from abc import ABCMeta, abstractmethod
from typing import Union, Callable, List, Dict
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.policies import BasePolicy
from stable_baselines.common.base_class import BaseRLModel
from stable_baselines import DQN
from tensortrade.environments.trading_environment import TradingEnvironment
from tensortrade.strategies import TradingStrategy
class MigsStableBaselinesTradingStrategy(TradingStrategy):
"""A trading strategy capable of self tuning, training, and evaluating with stable-baselines."""
def __init__(self,
environment: TradingEnvironment,
model: BaseRLModel = DQN,
policy: Union[str, BasePolicy] = 'MlpPolicy',
model_kwargs: any = {},
**kwargs):
"""
Arguments:
environment: A `TradingEnvironment` instance for the agent to trade within.
model (optional): The RL model to create the agent with. Defaults to DQN.
policy (optional): The RL policy to train the agent's model with. Defaults to 'MlpPolicy'.
model_kwargs (optional): Any additional keyword arguments to adjust the model.
kwargs (optional): Optional keyword arguments to adjust the strategy.
"""
self._model = model
self._model_kwargs = model_kwargs
self.environment = environment
self._agent = self._model(policy, self._environment, **self._model_kwargs)
@property
def environment(self) -> 'TradingEnvironment':
"""A `TradingEnvironment` instance for the agent to trade within."""
return self._environment
@environment.setter
def environment(self, environment: 'TradingEnvironment'):
self._environment = DummyVecEnv([lambda: environment])
def restore_agent(self, path: str):
"""Deserialize the strategy's learning agent from a file.
Arguments:
path: The `str` path of the file the agent specification is stored in.
"""
self._agent = self._model.load(path, self._environment, self._model_kwargs)
def save_agent(self, path: str):
"""Serialize the learning agent to a file for restoring later.
Arguments:
path: The `str` path of the file to store the agent specification in.
"""
self._agent.save(path)
def tune(self, steps: int = None, episodes: int = None, callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:
raise NotImplementedError
def run(self, steps: int = None, episodes: int = None, episode_callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:
if steps is None and episodes is None:
raise ValueError(
'You must set the number of `steps` or `episodes` to run the strategy.')
steps_completed = 0
episodes_completed = 0
average_reward = 0
obs, state, dones = self._environment.reset(), None, [False]
performance = {}
while (steps is not None and (steps == 0 or steps_completed < steps)) or (episodes is not None and episodes_completed < episodes):
actions, state = self._agent.predict(obs, state=state, mask=dones)
obs, rewards, dones, info = self._environment.step(actions)
steps_completed += 1
average_reward -= average_reward / steps_completed
average_reward += rewards[0] / (steps_completed + 1)
exchange_performance = info[0].get('exchange').performance
performance = exchange_performance if len(exchange_performance) > 0 else performance
if dones[0]:
#if episode_callback is not None and episode_callback(performance):
# break
episodes_completed += 1
print("episode #: {}".format(episodes_completed))
print(performance[-2:])
obs = self._environment.reset()
print("Finished running strategy.")
print("Total episodes: {} ({} timesteps).".format(episodes_completed, steps_completed))
print("Average reward: {}.".format(average_reward))
return performance
#from tensortrade.strategies import StableBaselinesTradingStrategy
strategy = MigsStableBaselinesTradingStrategy(environment=environment,
model=model,
policy=policy,
model_kwargs=params)
```
Then to train the strategy (i.e. train the agent on the current environment), all we need to do is call `strategy.run()` with the total number of steps or episodes you’d like to run.
If this feedback loop is a bit slow for you, you can pass a callback function to `run`, which will be called at the end of each episode. The callback function will pass in a `data_frame` containing the agent's performance that episode, and expects a `bool` in return. If `True`, the agent will continue training, otherwise, the agent will stop and return its overall performance.
```
#performance = strategy.run(steps=100000)
performance = strategy.run(episodes=3, episode_callback=True)
%matplotlib inline
performance.net_worth.plot()
```
## BTC
MigsStableBaselinesTradingStrategy
```
#@title
import os
import gym
import json
import pandas as pd
import numpy as np
from abc import ABCMeta, abstractmethod
from typing import Union, Callable, List, Dict
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.policies import BasePolicy
from stable_baselines.common.base_class import BaseRLModel
from stable_baselines import DQN
from tensortrade.environments.trading_environment import TradingEnvironment
from tensortrade.strategies import TradingStrategy
class MigsStableBaselinesTradingStrategy(TradingStrategy):
"""A trading strategy capable of self tuning, training, and evaluating with stable-baselines."""
def __init__(self,
environment: TradingEnvironment,
model: BaseRLModel = DQN,
policy: Union[str, BasePolicy] = 'MlpPolicy',
model_kwargs: any = {},
**kwargs):
"""
Arguments:
environment: A `TradingEnvironment` instance for the agent to trade within.
model (optional): The RL model to create the agent with. Defaults to DQN.
policy (optional): The RL policy to train the agent's model with. Defaults to 'MlpPolicy'.
model_kwargs (optional): Any additional keyword arguments to adjust the model.
kwargs (optional): Optional keyword arguments to adjust the strategy.
"""
self._model = model
self._model_kwargs = model_kwargs
self.environment = environment
self._agent = self._model(policy, self._environment, **self._model_kwargs)
@property
def environment(self) -> 'TradingEnvironment':
"""A `TradingEnvironment` instance for the agent to trade within."""
return self._environment
@environment.setter
def environment(self, environment: 'TradingEnvironment'):
self._environment = DummyVecEnv([lambda: environment])
def restore_agent(self, path: str):
"""Deserialize the strategy's learning agent from a file.
Arguments:
path: The `str` path of the file the agent specification is stored in.
"""
self._agent = self._model.load(path, self._environment, self._model_kwargs)
def save_agent(self, path: str):
"""Serialize the learning agent to a file for restoring later.
Arguments:
path: The `str` path of the file to store the agent specification in.
"""
self._agent.save(path)
def tune(self, steps: int = None, episodes: int = None, callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:
raise NotImplementedError
def run(self, steps: int = None, episodes: int = None, episode_callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:
if steps is None and episodes is None:
raise ValueError(
'You must set the number of `steps` or `episodes` to run the strategy.')
steps_completed = 0
episodes_completed = 0
average_reward = 0
obs, state, dones = self._environment.reset(), None, [False]
performance = {}
while (steps is not None and (steps == 0 or steps_completed < steps)) or (episodes is not None and episodes_completed < episodes):
actions, state = self._agent.predict(obs, state=state, mask=dones)
obs, rewards, dones, info = self._environment.step(actions)
steps_completed += 1
average_reward -= average_reward / steps_completed
average_reward += rewards[0] / (steps_completed + 1)
exchange_performance = info[0].get('exchange').performance
performance = exchange_performance if len(exchange_performance) > 0 else performance
if dones[0]:
#if episode_callback is not None and episode_callback(performance):
# break
episodes_completed += 1
print("episode #: {}".format(episodes_completed))
print(performance[-2:])
obs = self._environment.reset()
print("Finished running strategy.")
print("Total episodes: {} ({} timesteps).".format(episodes_completed, steps_completed))
print("Average reward: {}.".format(average_reward))
return performance
import talib
talib.get_functions()
from tensortrade.environments import TradingEnvironment
from tensortrade.exchanges.simulated import FBMExchange
from tensortrade.features.scalers import MinMaxNormalizer
from tensortrade.features.stationarity import FractionalDifference
from tensortrade.features import FeaturePipeline
from tensortrade.rewards import SimpleProfitStrategy
from tensortrade.actions import DiscreteActionStrategy
#from tensortrade.strategies import StableBaselinesTradingStrategy
from tensortrade.features.indicators import TAlibIndicator
from stable_baselines.common.policies import MlpLnLstmPolicy
from stable_baselines import PPO2
price_columns = ["open", "high", "low", "close"]
normalize = MinMaxNormalizer(price_columns, inplace=True)
#moving_averages = TAlibIndicator(["RSI", "BBANDS", "STOCH", "MACD", "CCI", "WILLR"])
difference = FractionalDifference(difference_order=1,
inplace=True)
feature_pipeline = FeaturePipeline(steps=[normalize,
# moving_averages,
difference])
reward_strategy = SimpleProfitStrategy()
action_strategy = DiscreteActionStrategy(n_actions=40)
exchange = FBMExchange(base_instrument='BTC',
timeframe='1h',
pretransform=True)
environment = TradingEnvironment(exchange=exchange,
action_strategy=action_strategy,
reward_strategy=reward_strategy,
feature_pipeline=feature_pipeline)
model = PPO2
policy = MlpLnLstmPolicy
params = { "learning_rate": 1e-5, 'nminibatches': 1 }
strategy = MigsStableBaselinesTradingStrategy(environment=environment,
model=model,
policy=policy,
model_kwargs=params)
performance = strategy.run(episodes=100)
strategy.save_agent(path="tensortrade/ppo_btc_1h")
%matplotlib inline
performance.net_worth.plot()
%ls /content/tensortrade/examples/data/
import pandas as pd
from tensortrade.environments import TradingEnvironment
from tensortrade.exchanges.simulated import SimulatedExchange
df = pd.read_csv('/content/tensortrade/examples/data/Coinbase_BTCUSD_1h.csv',skiprows=1)
exchange = SimulatedExchange(data_frame=df,
base_instrument='BTC',
feature_pipeline=feature_pipeline)
environment = TradingEnvironment(exchange=exchange,
action_strategy=action_strategy,
reward_strategy=reward_strategy)
model = PPO2
policy = MlpLnLstmPolicy
params = { "learning_rate": 1e-5, 'nminibatches': 1 }
new_strategy = MigsStableBaselinesTradingStrategy(environment=environment,
model=model,
policy=policy,
model_kwargs=params)
new_strategy.restore_agent(path="tensortrade/ppo_btc_1h")
test_performance = new_strategy.run(steps=2000)
test_performance.net_worth.plot()
```
## Saving and Restoring
All trading strategies are capable of saving their agent to a file, for later restoring. The environment is not saved, as it does not have state that we care about preserving. To save our `TensorflowTradingStrategy` to a file, we just need to provide the path of the file to our strategy.
```
print(os.getcwd())
%ls
strategy.save_agent(path="ppo_btc_1h")
```
_This specific strategy saves multiple files, including a directory of models to the path provided._
To restore the agent from the file, we first need to instantiate our strategy, before calling restore_agent.
```
new_strategy = StableBaselinesTradingStrategy(environment=environment,
model=model,
policy=policy,
model_kwargs=params)
new_strategy.restore_agent(path="ppo_btc_1h")
```
Our strategy is now restored back to its previous state, and ready to be used again. Let's see how it does.
## Tuning Your Strategy
Sometimes a trading strategy will require tuning a set of hyper-parameters, or features, on an environment to achieve maximum performance. In this case, each `TradingStrategy` provides an optionally implementable tune method.
Tuning a model is similar to training a model, however in addition to adjusting and saving the weights and biases of the best performing model, the strategy also adjusts and persists the hyper-parameters that produced that model.
```
from tensortrade.environments import TradingEnvironment
from tensortrade.exchanges.simulated import FBMExchange
exchange = FBMExchange(timeframe='1h', base_instrument='BTC', feature_pipeline=feature_pipeline)
environment = TradingEnvironment(exchange=exchange,
action_strategy=action_strategy,
reward_strategy=reward_strategy)
new_strategy.environment = environment
tuned_performance = new_strategy.tune(episodes=10)
```
In this case, the agent will be trained for 10 episodes, with a different set of hyper-parameters each episode. The best set will be saved within the strategy, and used any time strategy.run() is called thereafter.
## Strategy Evaluation
Now that we've tuned and trained our agent, it's time to see how well it performs. To evaluate our strategy's performance on unseen data, we will need to run it on a new environment backed by such data.
```
import pandas as pd
from tensortrade.environments import TradingEnvironment
from tensortrade.exchanges.simulated import SimulatedExchange
df = pd.read_csv('/content/tensortrade/examples/data/Coinbase_BTCUSD_d.csv', skiprows=1)
exchange = SimulatedExchange(data_frame=df,
feature_pipeline=feature_pipeline,
base_instrument='USD',
should_pretransform_obs=True)
environment = TradingEnvironment(exchange=exchange,
action_strategy=action_strategy,
reward_strategy=reward_strategy)
new_strategy.environment = environment
test_performance = new_strategy.run(steps=2000)
df.tail()
%matplotlib inline
test_performance.net_worth.plot()
```
When complete, strategy.run returns a `Pandas.data_frame` of the agent's performance, including the net worth and balance of the agent at each time step.
## Live Trading
Once you've built a profitable trading strategy, trained an agent to trade it properly, and ensured its "generalize-ability" to new data sets, all there is left to do is profit. Using a live exchange such as `CCXTExchange`, you can plug your strategy in and let it run!
While the gambler in you may enjoy starting a strategy and letting it run without bounds, the more risk averse of you can use a `trade_callback`, which will be called each time the strategy makes a trade. This callback function, similar to the episode callback, will pass in a data frame containing the agent's overall performance, and expects a `bool` in return. If `True`, the agent will continue trading, otherwise, the agent will stop and return its performance over the session.
```
import ccxt
from tensortrade.environments import TradingEnvironment
from tensortrade.exchanges.live import CCXTExchange
binance = ccxt.binance({
'apiKey': 'HfPX38sJ2aKewYDUJx6TaWrDhuT7rq426elO5Gbc55Dvg4klASEfm0aqcdl4Mpz6',
'secret': 'aGdMMaaol3GkyzDcrSEoc4aFlWG78qCxnGl3o22ub24u2scrZRcsIe2qn0kI82GQ',
'enableRateLimit': True,
})
exchange = CCXTExchange(exchange=binance,
base_instrument='BNB',
observation_type='ohlcv',
timeframe='1h')
btcusd_actions = DiscreteActionStrategy(n_actions=20, instrument_symbol='BNB/BTC')
environment = TradingEnvironment(exchange=exchange,
feature_pipeline=feature_pipeline,
action_strategy=btcusd_actions,
reward_strategy=reward_strategy)
strategy.environment = environment
live_performance = strategy.run(steps=1)
live_performance
```
_Passing `steps=0` instructs the strategy to run until otherwise stopped._
```
```
https://colab.research.google.com/drive/1r9I-DJjrT-0JHbrB10NLFudZ7hQdOcdq
| github_jupyter |
<a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 300, align = "center"></a>
<h1 align=center><font size = 5>Lab: Connect to Db2 database on Cloud using Python</font></h1>
# Introduction
This notebook illustrates how to access a DB2 database on Cloud using Python by following the steps below:
1. Import the `ibm_db` Python library
1. Enter the database connection credentials
1. Create the database connection
1. Close the database connection
__Note:__ Please follow the instructions given in the first Lab of this course to Create a database service instance of Db2 on Cloud and retrieve your database Service Credentials.
## Import the `ibm_db` Python library
The `ibm_db` [API ](https://pypi.python.org/pypi/ibm_db/) provides a variety of useful Python functions for accessing and manipulating data in an IBM® data server database, including functions for connecting to a database, preparing and issuing SQL statements, fetching rows from result sets, calling stored procedures, committing and rolling back transactions, handling errors, and retrieving metadata.
We first import the ibm_db library into our Python Application
Execute the following cell by clicking within it and then
press `Shift` and `Enter` keys simultaneously
```
import ibm_db
```
When the command above completes, the `ibm_db` library is loaded in your notebook.
## Identify the database connection credentials
Connecting to dashDB or DB2 database requires the following information:
* Driver Name
* Database name
* Host DNS name or IP address
* Host port
* Connection protocol
* User ID (or username)
* User Password
__Notice:__ To obtain credentials please refer to the instructions given in the first Lab of this course
Now enter your database credentials below and execute the cell with `Shift` + `Enter`
```
#Replace the placeholder values with your actual Db2 hostname, username, and password:
dsn_hostname = "dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net" # e.g.: "dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net"
dsn_uid = "jng27037" # e.g. "abc12345"
dsn_pwd = "d@n08mjs9b5s6v20" # e.g. "7dBZ3wWt9XN6$o0J"
dsn_driver = "{IBM DB2 ODBC DRIVER}"
dsn_database = "BLUDB" # e.g. "BLUDB"
dsn_port = "50000" # e.g. "50000"
dsn_protocol = "TCPIP" # i.e. "TCPIP"
```
## Create the DB2 database connection
Ibm_db API uses the IBM Data Server Driver for ODBC and CLI APIs to connect to IBM DB2 and Informix.
Lets build the dsn connection string using the credentials you entered above
```
#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter
#Create the dsn connection string
dsn = (
"DRIVER={0};"
"DATABASE={1};"
"HOSTNAME={2};"
"PORT={3};"
"PROTOCOL={4};"
"UID={5};"
"PWD={6};").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd)
#print the connection string to check correct values are specified
print(dsn)
```
Now establish the connection to the database
```
#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter
#Create database connection
try:
conn = ibm_db.connect(dsn, "", "")
print ("Connected to database: ", dsn_database, "as user: ", dsn_uid, "on host: ", dsn_hostname)
except:
print ("Unable to connect: ", ibm_db.conn_errormsg() )
```
Congratulations if you were able to connect successfuly. Otherwise check the error and try again.
```
#Retrieve Metadata for the Database Server
server = ibm_db.server_info(conn)
print ("DBMS_NAME: ", server.DBMS_NAME)
print ("DBMS_VER: ", server.DBMS_VER)
print ("DB_NAME: ", server.DB_NAME)
#Retrieve Metadata for the Database Client / Driver
client = ibm_db.client_info(conn)
print ("DRIVER_NAME: ", client.DRIVER_NAME)
print ("DRIVER_VER: ", client.DRIVER_VER)
print ("DATA_SOURCE_NAME: ", client.DATA_SOURCE_NAME)
print ("DRIVER_ODBC_VER: ", client.DRIVER_ODBC_VER)
print ("ODBC_VER: ", client.ODBC_VER)
print ("ODBC_SQL_CONFORMANCE: ", client.ODBC_SQL_CONFORMANCE)
print ("APPL_CODEPAGE: ", client.APPL_CODEPAGE)
print ("CONN_CODEPAGE: ", client.CONN_CODEPAGE)
```
## Close the Connection
We free all resources by closing the connection. Remember that it is always important to close connections so that we can avoid unused connections taking up resources.
```
ibm_db.close(conn)
```
## Summary
In this tutorial you established a connection to a DB2 database on Cloud database from a Python notebook using ibm_db API.
Copyright © 2017 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
| github_jupyter |
<a href="https://colab.research.google.com/github/RajamannarAanjaram/TSAI-Assignment/blob/master/13%20ViT/Cat%20Dogs/CatDogs_TransferLearning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
! pip install timm
! pip install -q kaggle
import timm
from pprint import pprint
import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, Dataset
#to unzip the datasets
import zipfile
import glob
from PIL import Image
from itertools import chain
from tqdm import tqdm
from __future__ import print_function
from google.colab import files
files.upload()
! mkdir ~/.kaggle
! cp kaggle.json ~/.kaggle/
! chmod 600 ~/.kaggle/kaggle.json
! kaggle competitions download -c 'dogs-vs-cats-redux-kernels-edition'
from sklearn.model_selection import train_test_split
#definining batch size, epocs, learning rate and gamma for training
seed= 40
batch_size = 64
epochs = 20
lr = 3e-5
gamma = 0.7 #for learning rate scheduler
use_cuda = torch.cuda.is_available()
device= 'cuda:0' if use_cuda else 'cpu'
#Load data
os.makedirs('data', exist_ok=True)
train_dir = 'data/train'
test_dir = 'data/test'
with zipfile.ZipFile('train.zip') as train_zip:
train_zip.extractall('data')
with zipfile.ZipFile('test.zip') as test_zip:
test_zip.extractall('data')
train_list = glob.glob(os.path.join(train_dir,'*.jpg'))
test_list = glob.glob(os.path.join(test_dir, '*.jpg'))
print(f"Train Data: {len(train_list)}")
print(f"Test Data: {len(test_list)}")
labels = [path.split('/')[-1].split('.')[0] for path in train_list]
random_idx = np.random.randint(1, len(train_list), size=9)
fig, axes = plt.subplots(3, 3, figsize=(16, 12))
for idx, ax in enumerate(axes.ravel()):
img = Image.open(train_list[idx])
ax.set_title(labels[idx])
ax.imshow(img)
train_list, valid_list = train_test_split(train_list,
test_size=0.2,
stratify=labels,
random_state=seed)
print(f"Train Data: {len(train_list)}")
print(f"Validation Data: {len(valid_list)}")
print(f"Test Data: {len(test_list)}")
train_transforms = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
val_transforms = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
test_transforms = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
class CatsDogsDataset(Dataset):
def __init__(self, file_list, transform=None):
self.file_list = file_list
self.transform = transform
def __len__(self):
self.filelength = len(self.file_list)
return self.filelength
def __getitem__(self, idx):
img_path = self.file_list[idx]
img = Image.open(img_path)
img_transformed = self.transform(img)
label = img_path.split("/")[-1].split(".")[0]
label = 1 if label == "dog" else 0
return img_transformed, label
train_data = CatsDogsDataset(train_list, transform=train_transforms)
valid_data = CatsDogsDataset(valid_list, transform=test_transforms)
test_data = CatsDogsDataset(test_list, transform=test_transforms)
train_loader = DataLoader(dataset = train_data, batch_size=batch_size, shuffle=True )
valid_loader = DataLoader(dataset = valid_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset = test_data, batch_size=batch_size, shuffle=True)
model_names = timm.list_models('*vit*')
pprint(model_names)
import timm
model = timm.create_model('vit_base_patch16_224', pretrained=True).to(device)
print(model)
model.head = nn.Linear(768, 2).to(device)
print(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
for epoch in range(epochs):
epoch_loss = 0
epoch_accuracy = 0
for data, label in tqdm(train_loader):
data = data.to(device)
label = label.to(device)
output = model(data)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc = (output.argmax(dim=1) == label).float().mean()
epoch_accuracy += acc / len(train_loader)
epoch_loss += loss / len(train_loader)
with torch.no_grad():
epoch_val_accuracy = 0
epoch_val_loss = 0
for data, label in valid_loader:
data = data.to(device)
label = label.to(device)
val_output = model(data)
val_loss = criterion(val_output, label)
acc = (val_output.argmax(dim=1) == label).float().mean()
epoch_val_accuracy += acc / len(valid_loader)
epoch_val_loss += val_loss / len(valid_loader)
print(
f"Epoch : {epoch+1} - loss : {epoch_loss:.4f} - acc: {epoch_accuracy:.4f} - val_loss : {epoch_val_loss:.4f} - val_acc: {epoch_val_accuracy:.4f}\n"
)
```
| github_jupyter |
```
import os
import numpy as np
import pandas as pd
import jinja2 as jj
def mklbl(prefix, n):
return ["%s%s" % (prefix, i) for i in range(n)]
miindex = pd.MultiIndex.from_product([mklbl('A', 4),
mklbl('B', 2),
mklbl('C', 4),
mklbl('D', 2)],
names=['RowIdx-1', 'RowIdx-2', 'RowIdx-3', 'RowIdx-4'])
index =['-'.join(col).strip() for col in miindex.values]
micolumns = pd.MultiIndex.from_tuples([('a', 'foo', 'zap'),
('a', 'foo', 'zip'),
('a', 'bar', 'zap'),
('a', 'bar', 'zip'),
('b', 'foo', 'zap'),
('b', 'foo', 'zep'),
('b', 'bah', 'zep'),
('b', 'bah', 'zyp'),
('b', 'bah', 'zap'),
],
names=['ColIdx-{}'.format(i) for i in range(1, 4)])
cols =['-'.join(col).strip() for col in micolumns.values]
data = np.arange(len(miindex) * len(micolumns), dtype=np.float).reshape((len(miindex),len(micolumns)))
data = data.tolist()
dfrc = pd.DataFrame(data, index=miindex, columns=micolumns).sort_index().sort_index(axis=1)
dfr = pd.DataFrame(data, index=miindex, columns=cols).sort_index().sort_index(axis=1)
dfr.columns.name = 'UniqueCol'
dfc = pd.DataFrame(data, index=index, columns=micolumns).sort_index().sort_index(axis=1)
dfc.index.name = 'UniqueRow'
df = pd.DataFrame(data, index=index, columns=cols).sort_index()
df.index.name = 'UniqueRow'
df.columns.name = 'UniqueCol'
dfrc.info()
dfr.info()
dfc.info()
df.info()
dfrc.head()
```
## Save df html
Must:
+ use notebook.css
+ wrap dataframe.html in specific classes - like in notebook
Result can be iframed in any doc
```
%%writefile templates/index.tpl.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>dataframe</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://cdn.jupyter.org/notebook/5.6.0/style/style.min.css">
</head>
<body>
<div class="output_are">
<div class="output_subarea output_html rendered_html output_result">
__$data.df_html$__
</div>
</div>
</body>
</html>
dir_template = 'templates'
dir_dump = 'dump'
loader = jj.FileSystemLoader(dir_template)
env = jj.Environment(loader=loader,
variable_start_string='__$',
variable_end_string='$__',
block_start_string='{-%',
block_end_string='%-}'
)
template = env.get_template('index.tpl.html')
# data = {'df_html': dfrc.to_html()}
data = {'df_html': dfrc.head(10).to_html()}
content = template.render(data=data)
if not os.path.exists(dir_dump):
os.makedirs(dir_dump)
path = os.path.join(dir_dump, 'index.html')
with open(path, 'w') as f:
f.write(content)
print('file {} saved to disk'.format(path))
!cd dump && python -m http.server 8080
```
| github_jupyter |
$\newcommand{\tensor}[1]{\boldsymbol{#1}}$
$\newcommand{\tensorthree}[1]{\mathbfcal{#1}}$
$\newcommand{\tensorfour}[1]{\mathbb{#1}}$
$\newcommand{\dar}{\, \text{d} a^r}$
$\newcommand{\dvr}{\, \text{d} v^r}$
$\newcommand{\dv}{\, \text{d} v}$
$\newcommand{\dt}{\, \text{d} t}$
$\newcommand{\dthe}{\, \text{d} \theta}$
$\newcommand{\tr}{\operatorname{tr}}$
## The nonlinear heat equation solver in FEniCS
```
from IPython.display import display_pretty, display_html, display_jpeg, display_png, display_json, display_latex, display_svg
from IPython.display import Image
Image(url='http://python.org/images/python-logo.gif')
```
### Functionals
The total **deformation gradient** can be can be decomposed as:
\begin{equation}
\tensor{F} = \tensor{F}_e ~ \tensor{F}_{\theta}
\end{equation}
The analysis restricted to isotropic materials, for which the thermal part of the deformation gradient is:
\begin{equation}
\tensor{F}_{\theta} = \vartheta \left( \theta \right) ~ \tensor{I}
\end{equation}
The scalar $\theta = \upsilon \left( \theta \right)$ is the **thermal stretch ratio** in any material direction. In this case, the elastic and thermal Green strains become:
\begin{equation}
\tensor{E}_{e} = \frac{1}{\vartheta^2} \left( \tensor{E} - \tensor{E}_{\theta} \right), \qquad \tensor{E}_{\theta} = \frac{1}{2} \left( \vartheta^2 - 1 \right) \tensor{I}
\end{equation}
The relationship holds:
\begin{equation}
\tensor{I} + 2 \tensor{E} = \vartheta^2 \left( \tensor{I} + 2 \tensor{E}_{e} \right)
\end{equation}
Since the thermal stretch ratio $\vartheta$ and the coefficient of thermal expansion $\alpha$ are related by
\begin{equation}
\alpha \left( \theta \right) = \frac{1}{\vartheta} \frac{\text{d} \vartheta}{\dthe}
\end{equation}
the rate of elastic strain can be written as
\begin{equation}
\dot{\tensor{E}}_{e} = \frac{1}{\vartheta^2 \left( \theta \right)} \left[ \dot{\tensor{E}} - \alpha \left( \theta \right) \left( \tensor{I} + 2 \tensor{E} \right) \dot{\theta} \right]
\end{equation}
### Stress Response
Within the model of the multiplicative decomposition, the Helmholtz free energy can be conveniently split into two
parts:
\begin{equation}
{\varphi} \left( \tensor{u}, \theta \right) = {\varphi}_{e} \left( \tensor{E}_{e}, \theta \right) + {\varphi}_{\theta} \left( \theta \right)
\end{equation}
where ${\varphi}_{e}$ is an isotropic function of the elastic strain $\tensor{E}_{e}$ and the temperature $\theta$. This decomposition is physically appealing because the function ${\varphi}_{e}$ can be taken as one of the
well-known strain energy functions of the isothermal finite-strain elasticity, except that the coefficients of the strain-dependent terms are the functions of temperature, while the function ${\varphi}_{\theta}$ can be separately adjusted in accord with experimental data for the specific heat.
The time-rate of the free energy
\begin{equation}
\dot{\varphi} = \frac{\text{d} \varphi \left( \tensor{u}, \theta \right)}{\dt} = \frac{\partial {\varphi}_{e}}{\partial \tensor{E}_{e}} : \dot{\tensor{E}_{e}} + \frac{\partial {\varphi}_{e}}{\partial \theta} ~ \dot{\theta} + \frac{\text{d} \varphi_{\theta}}{\dthe} ~ \dot{\theta}
\end{equation}
there follows
\begin{equation}
\dot{\varphi} = \frac{1}{\vartheta^2} \frac{\partial {\varphi}_{e}}{\partial \tensor{E}_{e}} : \dot{\tensor{E}_{e}} - \left[ \frac{\alpha}{\vartheta^2} \frac{\partial {\varphi}_{e}}{\partial \tensor{E}_{e}}: \left( \tensor{I} + 2 \tensor{E} \right) - \frac{\partial {\varphi}_{e}}{\partial \theta} - \frac{\text{d} \varphi_{\theta}}{\dthe} \right] ~ \dot{\theta}
\end{equation}
The comparison with the energy equation:
\begin{equation}
\dot{\varphi} = \frac{1}{\varrho^r} \tensor{S} : \dot{\tensor{E}} - \eta \dot{\theta}
\end{equation}
establishes the constitutive relations for the symmetric second Piola–Kirchhoff stress tensor $\tensor{S}$ and the specific entropy $\eta$. These
are:
\begin{equation}
\tensor{S} = \frac{\varrho^r}{\vartheta^2} \frac{\partial {\varphi}_{e}}{\partial \tensor{E}_{e}}
\end{equation}
\begin{equation}
\eta = \alpha \frac{\partial {\varphi}_{e}}{\partial \tensor{E}_{e}} : \left( \tensor{I} + 2 \tensor{E} \right) - \frac{\partial {\varphi}_{e}}{\partial \theta} - \frac{\text{d} \varphi_{\theta}}{\dthe}
\end{equation}
\begin{equation}
\varrho^r = \upsilon^3 \varrho^{\theta}
\end{equation}
\begin{equation}
\tensor{S} = \vartheta ~ \tensor{S}_{e}, \qquad \tensor{S}_{e} = \varrho^{\theta} \frac{\partial {\varphi}_{e}}{\partial \tensor{E}_{e}}
\end{equation}
\begin{equation}
\varrho^{\theta} \frac{\partial {\varphi}_{e}}{\partial \tensor{E}_{e}} = \frac{1}{2} \lambda \left( \theta \right) \left( \tr{\tensor{E}_{e}} \right)^{2} + \mu \left( \theta \right) ~ \tensor{E}_{e} : \tensor{E}_{e}
\end{equation}
where $\lambda \left( \theta \right)$ and $\mu \left( \theta \right)$ are the temperature-dependent Lamé moduli. It follows that
\begin{equation}
\tensor{S}_{e} = \tensorfour{C} \left( \tensor{u}, \theta \right) : \tensor{E}_{e}
\end{equation}
into $\tensor{S} = \vartheta \tensor{S}_{e}$, the stress response becomes
\begin{equation}
\tensor{S} = \frac{1}{\vartheta} \left[ \lambda \left( \tr{\tensor{E}} \right) \tensor{I} + 2 \mu \tensor{E} \right] - \frac{3}{2} \left[ \vartheta - \frac{1}{\vartheta} \right] \kappa \tensor{I},
\end{equation}
where $\kappa$ refers to the temperature-dependent bulk modulus. This is an exact expression for the thermoelastic stress response in the case of quadratic representation of $\varphi_e$ e in terms of the finite elastic strain $\tensor{E}_{e}$. If the Lamé moduli are taken to be temperature-independent, and if the approximation $\upsilon \approx 1 + \alpha^{r} \left( \theta - \theta^{r} \right)$ is used ($\alpha^{r}$ o being the coefficient of linear thermal expansion at $\theta - \theta^{r}$), that reduces to
\begin{equation}
\tensor{S} = \lambda^{r} \left( \tr{\tensor{E}} \right) \tensor{I} + 2 \mu^{r} \tensor{E} - 3 \alpha^{r} \left( \theta - \theta^{r} \right) \kappa^{r} \tensor{I}
\end{equation}
### Entropy expression
In the case of quadratic strain energy representation, there is a relationship $\varrho^r {\varphi}_{e} = \vartheta ^ 3 ~ \tensor{S}_{e} : \tensor{E}_{e}/2 $, so that
\begin{equation}
\varrho^r \frac{\partial {\varphi}_{e}}{\partial \theta} = \frac{3}{2} \vartheta^2 \frac{\text{d} \varphi_{\theta}}{\dthe} \tensor{S}_{e} : \tensor{E}_{e} + \frac{1}{2} \vartheta^3 \frac{\partial \tensor{S}_{e}}{\partial \theta} : \tensor{E}_{e}
\end{equation}
\begin{equation}
\varrho^r \frac{\partial {\varphi}_{e}}{\partial \theta} = \frac{3}{2} \alpha \left[ \tensor{S} : \tensor{E} - \frac{1}{2} \left( \vartheta^2 - 1 \right) ~ \tr{\tensor{S}} \right] + \frac{1}{2} \vartheta^3 \frac{\partial {\tensor{S}}_{e}}{\partial \theta} : \tensor{E}_{e}
\end{equation}
The coefficient of thermal expansion $\alpha$ can be readily verified that
\begin{equation}
\vartheta \frac{\partial {\tensor{S}}_{e}}{\partial \theta} = \frac{\partial {\tensor{S}}}{\partial \theta} + \alpha \left( \tensor{S} + 3 \vartheta \kappa \tensor{I} \right)
\end{equation}
\begin{equation}
\vartheta^{3} \frac{\partial {\tensor{S}}_{e}}{\partial \theta} : \tensor{E}_{e} = \frac{\partial {\tensor{S}}}{\partial \theta} : \left[ \tensor{E} - \frac{\left( \vartheta^{2} - 1 \right)}{2} ~ \tensor{I} \right] + \alpha \left[ \tensor{S} : \tensor{E} + \frac{\left( 1 + \vartheta^{2} \right)}{2} \tr{\tensor{S}} \right]
\end{equation}
\begin{equation}
\varrho^r \frac{\partial {\varphi}_{e}}{\partial \theta} = 2 \alpha ~ \tensor{S} : \tensor{E} + \frac{\alpha \left( 2 - \vartheta^{2} \right)}{2} \tr{\tensor{S}} + \frac{1}{2} \frac{\partial {\tensor{S}}}{\partial \theta} : \left[ \tensor{E} - \frac{\left( \vartheta^{2} - 1 \right)}{2} ~ \tensor{I} \right]
\end{equation}
\begin{equation}
\eta = \frac{1}{2 \varrho^r} \left[ 3 \vartheta \alpha \kappa \tensor{I} - \frac{\partial {\tensor{S}}}{\partial \theta} \right] : \left[ \tensor{E} - \frac{\left( \vartheta^{2} - 1 \right)}{2} ~ \tensor{I} \right] - \frac{\text{d} \varphi_{\theta}}{\dthe}
\end{equation}
Recalling the standard expression for the latent heat $\tensor{\varepsilon}$, we finally have
\begin{equation}
\eta = \frac{1}{2} \left( \frac{\tensor{\varepsilon}}{\theta} + \frac{\vartheta \alpha \kappa}{\varrho^r} \tensor{I} \right) : \left( \tensor{E} - \frac{\left( \vartheta^{2} - 1 \right)}{2} ~ \tensor{I} \right) - \frac{\text{d} \varphi_{\theta}}{\dthe}
\end{equation}
This is an exact expression for the entropy $\eta$ within the approximation used for the elastic strain energy function. The second-order tensor of the latent heat $\tensor{\varepsilon}$ can be calculated as
\begin{equation}
\tensor{\varepsilon} = - \frac{\theta}{\varrho^r} \frac{\partial {\tensor{S}}}{\partial \theta} = - \frac{\theta}{\varrho^r} \left( \vartheta \frac{\partial {\tensor{S}}}{\partial \theta} -\alpha \left( \tensor{S} + 3 \vartheta \kappa \tensor{I} \right) \right)
\end{equation}
which gives
\begin{equation}
\tensor{\varepsilon} = \frac{\theta}{\varrho^r} \left( \alpha \left( \tensor{S} + 3 \vartheta \kappa \tensor{I} \right) - \frac{1}{\vartheta} \frac{\text{d} \tensorfour{C}}{\dthe} : \left( \tensor{E} - \frac{\left( \vartheta^{2} - 1 \right)}{2} ~ \tensor{I} \right) \right)
\end{equation}
If the elastic moduli are independent of the temperature, and if the stress components are much smaller than the elastic bulk modulus, then the specific heat becomes $\tensor{\varepsilon} = 3 \vartheta \alpha \theta \kappa \tensor{I} / {\varrho^r}$, while the entropy expression reduces to
\begin{equation}
\eta = \frac{3}{\varrho^r} \vartheta \alpha \kappa \left( \tr{\tensor{E}} - \frac{3}{2} \left( \vartheta^2 - 1 \right) \right) - \frac{\text{d} \varphi_{\theta}}{\dthe}
\end{equation}
The function $\varphi_{\theta}$ can be selected according to experimental data for the specific heat $c_{E} = \theta \partial \eta / \partial \theta$. For example, if we take
\begin{equation}
\varphi_{\theta} = -\frac{1}{2} \left( \frac{c_{E}}{\theta^r} + \frac{9 \left( \alpha^r \right)^2 \kappa^r}{\varrho^r} \right) \left( \theta - \theta^r \right)^2
\end{equation}
then becomes
\begin{equation}
\eta = \frac{3}{\varrho^r} \alpha^r \kappa^r \tr{\tensor{E}} + \frac{c_{E}}{\theta^r} \left( \theta - \theta^r \right)
\end{equation}
which is in agreement with the classical result from the linearized theory of thermoelasticity. The approximations $\alpha \approx \alpha^r$ and $\vartheta \approx 1 + \alpha^r \left( \theta - \theta^r \right)$ are used in the above derivation.
| github_jupyter |
```
import pandas as pd
import numpy as np
import pickle
from joblib import dump, load
import matplotlib.pyplot as plt
def read_data_small():
X_train = pd.read_csv("data_small/X_train_small.csv")
X_test = pd.read_csv("data_small/X_test_small.csv")
y_train = np.asarray(pd.read_csv("data_small/y_train_small.csv", header=None)[0])
return X_train, X_test, y_train
def read_data_big():
X_train = pd.read_csv("data_big/X_train_big.csv")
X_test = pd.read_csv("data_big/X_test_big.csv")
y_train = np.asarray(pd.read_csv("data_big/y_train_big.csv", header=None)[0])
return X_train, X_test, y_train
def read_data():
X_train = pd.read_csv("data/X_train.csv")
X_test = pd.read_csv("data/X_test.csv")
y_train = np.asarray(pd.read_csv("data/y_train.csv", header=None)[0])
return X_train, X_test, y_train
```
# Visualization
```
X_train, X_test, y_train = read_data_small()
X_train['y_label'] = y_train
X_train["timeSinceLastTradeSameOrder"] = X_train[["timestamp","orderId"]].groupby("orderId").diff()
X_train.loc[(X_train['timeSinceLastTradeSameOrder']<=45) & (X_train['source']!='SYSTEM'),'y_label']= 3
plot1 = X_train[['timestamp','y_label']]
plot1 = plot1[plot1.y_label!=0]
plot1a = plot1[plot1.y_label==1]
plot1b = plot1[plot1.y_label==2]
plot1c = plot1[plot1.y_label==3]
plt.scatter(plot1a['timestamp'],plot1a['y_label'],color='red')
plt.scatter(plot1b['timestamp'],plot1b['y_label'],color='blue')
plt.scatter(plot1c['timestamp'],plot1c['y_label'],color='green')
plt.show()
X_clean = format_data(pd.concat([X_train, X_test]))
X_train_clean = X_clean.iloc[:X_train.shape[0],:]
X_test_clean = X_clean.iloc[X_train.shape[0]:,:]
X_train_clean_scaled = scale(X_train_clean)
X_test_clean_scaled = scale(X_test_clean)
# fit classifier
clf = LogisticRegression(random_state=0, class_weight='balanced').fit(X_train_clean_scaled, y_train)
y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)
y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)
X_train['y_pred_0'] = y_train_prob_pred[:,0]
X_train['y_pred_1'] = y_train_prob_pred[:,1]
X_train['y_pred_2'] = y_train_prob_pred[:,2]
```
Example for user 'KYPPWBZJQ'
```
ex1 = X_train[X_train['endUserRef']=='KYPPWBZJQ'].iloc[3]
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'class0','class1','class2','class3'
ex = ex1
sizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
ex2 = X_train[X_train['endUserRef']=='KYPPWBZJQ'].iloc[4]
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'class0','class1','class2','class3'
ex = ex2
sizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
ex3 = X_train[X_train['endUserRef']=='KYPPWBZJQ'].iloc[5]
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'class0','class1','class2','class3'
ex = ex3
sizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
ex4 = X_train[X_train['endUserRef']=='KYPPWBZJQ'].iloc[6]
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'class0','class1','class2','class3'
ex = ex4
sizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
```
Example for user 'AIWZOZZIY'
```
ex1 = X_train[X_train['endUserRef']=='AIWZOZZIY'].iloc[5]
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'class0','class1','class2','class3'
ex = ex1
sizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
ex2 = X_train[X_train['endUserRef']=='AIWZOZZIY'].iloc[6]
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'class0','class1','class2','class3'
ex = ex2
sizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
cancel_ratio = X_train[['endUserRef','operation','price']].groupby(['endUserRef','operation']).count()
cancel_ratio = cancel_ratio/cancel_ratio.groupby(['endUserRef']).sum()
cancel_ratio = cancel_ratio.reset_index()
cancel_ratio = cancel_ratio[cancel_ratio.operation == 'CANCEL'].rename(columns={'price':'cancel_ratio'}).reset_index().drop(['index','operation'], axis=1)
cancel_ratio = cancel_ratio.sort_values(by='cancel_ratio',ascending=False)
cancel_ratio
np.arange(0,1,0.2)
plt.rcdefaults()
fig, ax = plt.subplots()
ax.barh(cancel_ratio[8:18]['endUserRef'],cancel_ratio[8:18]['cancel_ratio']*100)
ax.set_xlabel('Cancel Percentage')
ax.set_title('User vs. Cancel Ratio')
plt.show()
y_train = X_train['y_label']
X_train = X_train.drop(['y_label'],axis=1)
X_train["timeSinceLastTrade"] = X_train[["timestamp","endUserRef"]].groupby("endUserRef").diff()
X_test["timeSinceLastTrade"] = X_test[["timestamp","endUserRef"]].groupby("endUserRef").diff()
X_train, X_test, y_train = read_data()
X_train['y_label'] = y_train
X_train["timeSinceLastTradeSameOrder"] = X_train[["timestamp","orderId"]].groupby("orderId").diff()
X_train.loc[(X_train['timeSinceLastTradeSameOrder']<=45) & (X_train['source']!='SYSTEM'),'y_label']= 3
y_train = X_train['y_label']
X_train = X_train.drop(['y_label'],axis=1)
X_train["timeSinceLastTrade"] = X_train[["timestamp","endUserRef"]].groupby("endUserRef").diff()
X_test["timeSinceLastTrade"] = X_test[["timestamp","endUserRef"]].groupby("endUserRef").diff()
pd.DataFrame(X_train).to_csv("bigLabeledData.csv")
def preprocess_label(X_train, y_train):
X_train['y_label'] = y_train
X_train["timeSinceLastTradeSameOrder"] = X_train[["timestamp","orderId"]].groupby("orderId").diff()
X_train.loc[(X_train['timeSinceLastTradeSameOrder']<=45) & (X_train['source']!='SYSTEM'),'y_label']= 3
y_train = X_train['y_label']
X_train = X_train.drop(['y_label'],axis=1)
X_train["timeSinceLastTrade"] = X_train[["timestamp","endUserRef"]].groupby("endUserRef").diff()
X_test["timeSinceLastTrade"] = X_test[["timestamp","endUserRef"]].groupby("endUserRef").diff()
# X_train = X_train.sample(n=200000)
return X_train,y_train
def balance_data(X_train, X_test, y_train):
X_train, y_train = preprocess_label(X_train, y_train)
X_train['y_label'] = y_train
X_train_0 = X_train[X_train['y_label']==0].sample(n=100000, replace = True)
X_train_1 = X_train[X_train['y_label']==1].sample(n=100000, replace = True)
X_train_2 = X_train[X_train['y_label']==2].sample(n=100000, replace = True)
X_train_3 = X_train[X_train['y_label']==3].sample(n=100000, replace = True)
X_train = pd.concat([X_train_0, X_train_1, X_train_2, X_train_3])
X_train = X_train.sample(frac=1).reset_index(drop=True)
y_train = X_train['y_label']
X_train = X_train.drop(['y_label'],axis=1)
return X_train, X_test, y_train
X_train, X_test, y_train = read_data_small()
X_train, X_test, y_train = balance_data(X_train, X_test, y_train)
def format_data(df):
# encode the binaries
df["isBid"] = df.isBid*1
df["isBuyer"] = df.isBuyer*1
df["isAggressor"] = df.isAggressor*1
df["type"] = (df.type == "ORDER")*1
df["source"] = (df.source=="USER")*1
df["orderId"] = df.orderId.str.split('-').str[-1]
df["tradeId"] = df.tradeId.str.split('-').str[-1]
df["bidOrderId"] = df.bidOrderId.str.split('-').str[-1]
df["askOrderId"] = df.askOrderId.str.split('-').str[-1]
# encode the multiple lable data
df['operation'] = df['operation'].fillna('SUCCESS')
tmp_operation = pd.DataFrame(pd.get_dummies(df.operation), columns=df.operation.unique()[:-1])
df = pd.concat([df, tmp_operation], axis=1)
df['op_before'] = df.groupby(['endUserRef'])['operation'].transform(lambda x:x.shift(1))
tmp_op_before = pd.DataFrame(pd.get_dummies(df.op_before), columns=df.op_before.unique()[:-1])
df = pd.concat([df, tmp_op_before], axis=1)
df['multiple_cancel'] = ((df['op_before'] == 'CANCEL') & (df['operation'] == 'CANCEL'))*1.0
df['vol_before'] = df.groupby(['endUserRef'])['volume'].transform(lambda x:x.shift(1))
df['price_before'] = df.groupby(['endUserRef'])['price'].transform(lambda x:x.shift(1))
df['bestBid'] = df['bestBid'].fillna(1)
df['midpoint'] = (df.bestBidVolume - df.bestAskVolume)/df.bestBid
# cancel_ratio = df[['endUserRef','operation','price']].groupby(['endUserRef','operation']).count()
# cancel_ratio = cancel_ratio/cancel_ratio.groupby(['endUserRef']).sum()
# cancel_ratio = cancel_ratio.reset_index()
# cancel_ratio = cancel_ratio[cancel_ratio.operation == 'CANCEL'].rename(columns={'price':'cancel_ratio'}).reset_index().drop(['index','operation'], axis=1)
# df = df.merge(cancel_ratio, how='outer', on='endUserRef')
df['isBidBefore'] = df.groupby(['endUserRef'])['isBid'].transform(lambda x:x.shift(1))
df.loc[df['isBid']!=df['isBidBefore'],'flip'] = 1
df.loc[df['isBid']==df['isBidBefore'],'flip'] = 0
df = df.drop(['isBidBefore'],axis=1)
# categorical data
tmp_endUserRef = pd.DataFrame(pd.get_dummies(df.endUserRef), columns=df.endUserRef.unique()[:-1])
df = pd.concat([df, tmp_endUserRef], axis=1)
tmp_ob = pd.DataFrame(pd.get_dummies(df.obId), columns=df.obId.unique()[:-1])
df = pd.concat([df, tmp_ob], axis=1)
# # smartly engineered features can be very useful to improve the classification resutls
# df["timeSinceLastTrade"] = df[["timestamp","endUserRef"]].groupby("endUserRef").diff()
df["averageTradeSize"] = df[["volume","endUserRef"]].groupby("endUserRef").mean()
# cancel, volume>0, after successful trade
df['cancelAfterSuccessfulTrade'] = (df['operation'] == 'CANCEL') & (df['volume']>0) & (df['op_before'] == 'SUCCESS')*1.0
df['isVolumeTooBig'] = (df['volume'] > 3*df['vol_before'])*1.0
df["numberEditsOnOrders"] = df[["timestamp", "orderId"]].groupby("orderId").count()
df = df.fillna(-1)
df = df.drop(['operation', 'op_before', 'obId', 'member','user','endUserRef'], axis=1)
return(df)
```
# MODEL
```
# import libraries
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import scale
from sklearn.model_selection import cross_validate
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
def detect_spoofying_log_reg(X_train, X_test, y_train):
# clean up the data
# X_train, y_train = preprocess_label(X_train, y_train)
X_clean = format_data(pd.concat([X_train, X_test]))
X_train_clean = X_clean.iloc[:X_train.shape[0],:]
X_test_clean = X_clean.iloc[X_train.shape[0]:,:]
X_train_clean_scaled = scale(X_train_clean)
X_test_clean_scaled = scale(X_test_clean)
# fit classifier
clf = LogisticRegression(random_state=0, class_weight='balanced').fit(X_train_clean_scaled, y_train)
dump(clf, 'logreg_model.joblib')
y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)
y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)
return y_train_prob_pred, y_test_prob_pred
def detect_spoofying_Bernoulli_NB(X_train, X_test, y_train):
# clean up the data
# X_train, y_train = preprocess_label(X_train, y_train)
X_clean = format_data(pd.concat([X_train, X_test]))
X_train_clean = X_clean.iloc[:X_train.shape[0],:]
X_test_clean = X_clean.iloc[X_train.shape[0]:,:]
X_train_clean_scaled = scale(X_train_clean)
X_test_clean_scaled = scale(X_test_clean)
# fit classifier
clf = BernoulliNB(alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None).fit(X_train_clean_scaled, y_train)
y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)
y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)
dump(clf, 'bernoulli_nb_model.joblib')
return y_train_prob_pred, y_test_prob_pred
def detect_spoofying_GBC(X_train, X_test, y_train):
# clean up the data
# X_train, y_train = preprocess_label(X_train, y_train)
X_clean = format_data(pd.concat([X_train, X_test]))
X_train_clean = X_clean.iloc[:X_train.shape[0],:]
X_test_clean = X_clean.iloc[X_train.shape[0]:,:]
X_train_clean_scaled = scale(X_train_clean)
X_test_clean_scaled = scale(X_test_clean)
# fit classifier
clf = GradientBoostingClassifier(n_estimators=10, learning_rate=0.5,
max_depth=2, random_state=3).fit(X_train_clean_scaled, y_train)
y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)
y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)
dump(clf, 'gbc.joblib')
return y_train_prob_pred, y_test_prob_pred
def detect_spoofying_random_forest(X_train, X_test, y_train):
# clean up the data
# X_train, y_train = preprocess_label(X_train, y_train)
X_clean = format_data(pd.concat([X_train, X_test]))
X_train_clean = X_clean.iloc[:X_train.shape[0],:]
X_test_clean = X_clean.iloc[X_train.shape[0]:,:]
X_train_clean_scaled = scale(X_train_clean)
X_test_clean_scaled = scale(X_test_clean)
# fit classifier
clf = RandomForestClassifier(n_estimators=50, max_depth=10,
min_samples_split=2, random_state=0).fit(X_train_clean_scaled, y_train)
dump(clf, 'rf_model.joblib')
y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)
y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)
return y_train_prob_pred, y_test_prob_pred
```
# SCORING
```
from sklearn.metrics import cohen_kappa_score
def score(y_pred, y_true):
"""
y_pred: a numpy 4d array of probabilities of point assigned to each label
y_true: a numpy array of true labels
"""
y_pred_label = np.argmax(y_pred, axis=1)
return cohen_kappa_score(y_pred_label, y_true)
def wrapper(detect_spoofying):
# read in data
# or if you have the computational power to work with the big data set,
# you can comment out the read_data_samll line and uncomment the following read_data_big
X_train, X_test, y_train = read_data_small()
X_train, X_test, y_train = balance_data(X_train, X_test, y_train)
# process the data, train classifier and output probability matrix
y_train_prob_pred, y_test_prob_pred = detect_spoofying(X_train, X_test, y_train)
# score the predictions
score_train = score(y_train_prob_pred, y_train)
# score_test = score(y_test_prob_pred, y_test)
# return the scores
return score_train, y_train_prob_pred, y_test_prob_pred
```
# k-fold cross validation
```
### optional: examples of k-fold cross validation ###
# k-fold cross validation can help you compare the classification models
from sklearn.model_selection import KFold
n = 5 # here we choose a 10 fold cross validation
kf = KFold(n_splits = n)
# X_train, X_test, y_train = read_data_small()
kf.get_n_splits(X_train)
print(kf)
kf_scores = pd.DataFrame(np.zeros([n,2]), columns=["train score", "test score"])
rowindex = 0
for train_index, test_index in kf.split(X_train):
print("TRAIN:", train_index, "TEST:", test_index)
print(X_train.index)
print(y_train)
X_train_kf, X_test_kf = X_train.iloc[train_index], X_train.iloc[test_index]
y_train_kf, y_test_kf = y_train[train_index], y_train[test_index]
y_train_prob_pred_kf, y_test_prob_pred_kf = detect_spoofying_random_forest(X_train_kf, X_test_kf, y_train_kf)
score_train_kf = score(y_train_prob_pred_kf, y_train_kf)
score_test_kf = score(y_test_prob_pred_kf, y_test_kf)
kf_scores.iloc[rowindex, 0] = score_train_kf
kf_scores.iloc[rowindex, 1] = score_test_kf
print(score_train_kf, score_test_kf)
rowindex += 1
kf_scores
score_train, y_train_prob_pred, y_test_prob_pred = wrapper(detect_spoofying_random_forest)
score_train
### optional: examples of k-fold cross validation ###
# k-fold cross validation can help you compare the classification models
from sklearn.model_selection import KFold
n = 5 # here we choose a 10 fold cross validation
kf = KFold(n_splits = n)
# X_train, X_test, y_train = read_data_small()
# X_train, X_test, y_train = balance_data(X_train, X_test, y_train)
kf.get_n_splits(X_train)
# print(kf)
kf_scores = pd.DataFrame(np.zeros([n,2]), columns=["train score", "test score"])
rowindex = 0
for train_index, test_index in kf.split(X_train):
print("TRAIN:", train_index, "TEST:", test_index)
# print(X_train.index)
# print(y_train)
X_train_kf, X_test_kf = X_train.iloc[train_index], X_train.iloc[test_index]
y_train_kf, y_test_kf = y_train[train_index], y_train[test_index]
y_train_prob_pred_kf, y_test_prob_pred_kf = detect_spoofying_GBC(X_train_kf, X_test_kf, y_train_kf)
score_train_kf = score(y_train_prob_pred_kf, y_train_kf)
score_test_kf = score(y_test_prob_pred_kf, y_test_kf)
kf_scores.iloc[rowindex, 0] = score_train_kf
kf_scores.iloc[rowindex, 1] = score_test_kf
print(score_train_kf, score_test_kf)
rowindex += 1
kf_scores
score_train, y_train_prob_pred, y_test_prob_pred = wrapper(detect_spoofying_GBC)
score_train
pd.DataFrame(y_train_prob_pred).to_csv("y_train_prob_pred.csv")
pd.DataFrame(y_test_prob_pred).to_csv("y_test_prob_pred.csv")
score_train, y_train_prob_pred, y_test_prob_pred = wrapper(detect_spoofying_GBC)
score_train
pd.DataFrame(y_train_prob_pred).to_csv("y_train_prob_pred.csv")
pd.DataFrame(y_test_prob_pred).to_csv("y_test_prob_pred.csv")
```
# BATCH
```
X_train = pd.read_csv("bigLabeledData.csv")
X_clean = format_data(pd.concat([X_train, X_test]))
X_train_clean = X_clean.iloc[:X_train.shape[0],:]
X_test_clean = X_clean.iloc[X_train.shape[0]:,:]
X_train_clean_scaled = scale(X_train_clean)
X_test_clean_scaled = scale(X_test_clean)
# fit classifier
clf = GradientBoostingClassifier(n_estimators=35, learning_rate=0.5,
max_depth=3, random_state=3).fit(X_train_clean_scaled, y_train)
# clf = LogisticRegression(random_state=0, class_weight='balanced').fit(X_train_clean_scaled, y_train)
dump(clf, 'gbc.joblib')
y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)
y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)
pd.DataFrame(y_train_prob_pred).to_csv("y_train_prob_pred.csv")
pd.DataFrame(y_test_prob_pred).to_csv("y_test_prob_pred.csv")
```
# LSTM
```
X_train_clean_scaled.shape
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
model = nn.Sequential()
model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# define model
model = Sequential()
model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
```
| github_jupyter |
```
import numpy
import pandas
import matplotlib.pyplot
import matplotlib.style
import copy
matplotlib.style.use('ggplot')
%matplotlib inline
titanic_data = pandas.read_csv('Titanic.csv', index_col=None)
```
How many passengers we know?
```
passengers_num = titanic_data.shape[0] #rows in Titanic.csv table
print(passengers_num)
```
How many of them survived?
```
survived = titanic_data[titanic_data.loc[:, 'Survived'] == True] #rows from Titanic.csv, column 'Survived'
total_survived_num = survived.shape[0] #number of rows with 1
survived_percent = total_survived_num / (passengers_num / 100)
print('{} out of {} (~{:.5f} %)'.format(total_survived_num, passengers_num, survived_percent))
```
How many men survived? How many women?
```
men = titanic_data[titanic_data.loc[:, 'Sex'] == 'male']
men_num = men.shape[0]
women_num = passengers_num - men_num
men_survived = men[men.loc[:, 'Survived'] == True]
men_survived_num = men_survived.shape[0]
women_survived_num = total_survived_num - men_survived_num
men_survived_percent = men_survived_num / (men_num / 100)
women_survived_percent = women_survived_num / (women_num / 100)
print('Men survived: {} out of {} (~{:.5f} %)'.format(men_survived_num, men_num, men_survived_percent))
print('Women survived: {} out of {} (~{:.5f} %)'.format(women_survived_num, women_num, women_survived_percent))
```
Stats for each class:
```
classes = titanic_data.PClass.unique()
print(classes)
titanic_data[titanic_data.loc[:, 'PClass'] == '*']
```
Now it seems that "*" class is only one person (that did not survive), so we should probably filter it out.
```
classes = numpy.delete(classes, numpy.where(classes=='*'))
print(classes)
cols = ['class', 'survived', 'deceased', 'total', 'percent of class', 'percent of all survivors']
classes_data = pandas.DataFrame()
for cls in classes:
total = titanic_data[titanic_data.loc[:, 'PClass'] == cls]
total_num = total.shape[0]
survived = total[total.loc[:, 'Survived'] == True]
survived_num = survived.shape[0]
deceased_num = total_num - survived_num
percent_class = survived_num / (total_num / 100)
percent_total = survived_num / (total_survived_num / 100)
row = pandas.DataFrame([{'class' : cls, 'survived' : survived_num,
'deceased' : deceased_num, 'total' : total_num,
'percent of class' : percent_class,
'percent of all survivors' : percent_total }], columns=cols)
classes_data = classes_data.append(row, ignore_index=True)
classes_data
```
Survival rate based on age:
```
max_age = titanic_data.loc[:, 'Age'].max()
survived_ages = pandas.DataFrame(columns=['decade', 'survived', 'total', 'percent of all survivors'])
for decade in range(0,int(max_age),10):
total_decade = titanic_data[(titanic_data.loc[:, 'Age'] >= decade) &
(titanic_data.loc[:, 'Age'] < decade + 10)]
survived_decade = total_decade[total_decade.loc[:, 'Survived'] == True]
percent = survived_decade.shape[0] / (total_survived_num / 100)
row = pandas.DataFrame([{'decade' : '{}-{}'.format(decade, decade + 9),
'survived' : survived_decade.shape[0],
'total' : total_decade.shape[0],
'percent of all survivors' : percent}])
survived_ages = survived_ages.append(row, ignore_index=True)
total_na = titanic_data[titanic_data.loc[:, 'Age'].isnull()]
survived_na = total_na[total_na.loc[:, 'Survived'] == True]
percent_na = survived_na.shape[0] / (total_survived_num / 100)
row = pandas.DataFrame([{'decade' : 'No age data',
'survived' : survived_na.shape[0],
'total' : total_na.shape[0],
'percent of all survivors' : percent_na}])
survived_ages = survived_ages.append(row, ignore_index=True)
survived_ages['total'].plot.bar(color='r')
survived_ages['survived'].plot.bar(color='b')
matplotlib.pyplot.legend(loc='upper left')
matplotlib.pyplot.title('Survivas based on age group')
matplotlib.pyplot.xlabel('Age groups')
matplotlib.pyplot.ylabel('Number of passengers')
matplotlib.pyplot.xticks(list(range(len(survived_ages))), survived_ages['decade'].values, rotation = 'vertical')
matplotlib.pyplot.show()
survived_ages['percent of all survivors'].plot.bar()
matplotlib.pyplot.title('% of all survivors ({}) based on age group'.format(total_survived_num))
matplotlib.pyplot.xlabel('Age groups')
matplotlib.pyplot.ylabel('% of all survivors')
matplotlib.pyplot.xticks(list(range(len(survived_ages))), survived_ages['decade'].values, rotation = 'vertical')
matplotlib.pyplot.show()
```
Relationship between passenger's age and class:
```
classes_nums = dict((elem, 0) for elem in classes)
classes_percent = copy.deepcopy(classes_nums)
age_class = pandas.DataFrame(columns=classes_nums)
age_class_percent = pandas.DataFrame(columns=classes_percent)
for decade in range(0,int(max_age),10):
total_decade = titanic_data[(titanic_data.loc[:, 'Age'] >= decade) &
(titanic_data.loc[:, 'Age'] < decade + 10)]
classes_nums['decade'] = '{}-{}'.format(decade, decade + 9)
classes_percent['decade'] = '{}-{}'.format(decade, decade + 9)
for cls in classes:
total_cls = total_decade[total_decade.loc[:, 'PClass'] == cls]
classes_nums[cls] = total_cls.shape[0]
classes_percent[cls] = total_cls.shape[0] / (total_decade.shape[0] / 100)
age_class = age_class.append([classes_nums], ignore_index=True)
age_class_percent = age_class_percent.append([classes_percent], ignore_index=True)
```
Number of passengers in each class by age:
```
age_class.plot.bar()
matplotlib.pyplot.xticks(list(range(len(age_class))), age_class['decade'].values, rotation = 'vertical')
matplotlib.pyplot.title('Number of passengers in each class by age')
matplotlib.pyplot.xlabel('Age groups')
matplotlib.pyplot.ylabel('Number of passengers')
matplotlib.pyplot.show()
```
Distribution of passengers in each age group between classes (meaning in class *C* was *y %* of passengers from age group *x*):
```
age_class_percent.plot.bar()
matplotlib.pyplot.xticks(list(range(len(age_class_percent))), age_class_percent['decade'].values, rotation = 'vertical')
matplotlib.pyplot.title('Distribution of passengers in each age group between classes')
matplotlib.pyplot.xlabel('Age groups')
matplotlib.pyplot.ylabel('% of passengers')
matplotlib.pyplot.show()
```
If we want to plot data for passangers without known age:
```
classes_nums['decade'] = 'No age data'
classes_percent['decade'] = 'No age data'
for cls in classes:
total_cls = total_na[total_na.loc[:, 'PClass'] == cls]
classes_nums[cls] = total_cls.shape[0]
classes_percent[cls] = total_cls.shape[0] / (total_na.shape[0] / 100)
age_class_na = age_class.append([classes_nums], ignore_index=True)
age_class_percent_na = age_class_percent.append([classes_percent], ignore_index=True)
age_class_na.plot.bar()
matplotlib.pyplot.xticks(list(range(len(age_class_na))), age_class_na['decade'].values, rotation = 'vertical')
matplotlib.pyplot.title('Number of passengers in each class by age\n (including passengers with uknown age)')
matplotlib.pyplot.xlabel('Age groups')
matplotlib.pyplot.ylabel('Number of passengers')
matplotlib.pyplot.show()
age_class_percent_na.plot.bar()
matplotlib.pyplot.xticks(list(range(len(age_class_percent_na))), age_class_percent_na['decade'].values, rotation = 'vertical')
matplotlib.pyplot.title('Distribution of passengers in each age group between classes\n (including passengers with uknown age)')
matplotlib.pyplot.xlabel('Age groups')
matplotlib.pyplot.ylabel('% of passengers')
matplotlib.pyplot.show()
```
| github_jupyter |
# TAP Affect
This notebook was used as part of the [HETA project](http://heta.io) to experiment with [TAP](https://github.com/heta-io/tap) affect thresholds. It makes use of the [TapCliPy](https://github.com/heta-io/tapclipy) python client for TAP to call the `affectExpressions` query.
To use this notebook for your own tests, you will need:
1. The URL of your TAP server
2. Save the text files that you want to work with into the same directory as this notebook
```
# Install the TAP Python Client
!pip install 'tapclipy>=0.1.4'
# Import the client library
from tapclipy import tap_connect
```
### Connect to TAP and retrieve the current schema
As TAP exposes a GraphQL API, there can be changes in the schema over time. After connecting to TAP, this schema needs to be loaded into the client.
```
# Set the url for your TAP server
tapURL = 'http://localhost:9000'
# Create TAP Connection
tap = tap_connect.Connect(tapURL)
# Load the Current Schema
tap.fetch_schema()
#Print out schema fields
for query,type in tap.schema_query_name_types().items():
print("{} >> {}".format(query, type))
```
### Setup Query
The client includes built in queries. We can either use the client query for `affectExpressions` or we can create our own. If creating from scratch, it wise to use the client query as a template to ensure the query is properly formed.
```
# Get query from client
query = tap.query('affectExpressions')
print(query)
```
### Helper functions
To make it easier to run repeated tests on different files, we can setup some helper functions.
```
# Open a text file and return it as a string
def readFile(filename):
file = open(filename)
text = file.read()
file.close()
return text
# Test on a file
myText = readFile('dummy-affect.txt')
myText
# Get Affect Analytics from TAP and format the results
def textAffect(text,arousal=0.0,valence=0.0,dominance=0.0):
parameters = '{"valence":'+str(valence)+',"arousal":'+str(arousal)+',"dominance":'+str(dominance)+'}'
json = tap.analyse_text(query, text,parameters)
analytics = json['data']['affectExpressions']['analytics']
filtered = [x['affect'] for x in analytics if x['affect']]
flattened = [item for sublist in filtered for item in sublist]
#print(flattened)
numFiltered = len(flattened)
numLexicon = len(analytics)
words = text.split(' ')
numWords = len(words)
percentAffect = numFiltered/numWords*100
print("{0} words matched out of {1} total words in the text - {2} percent".format(numFiltered,numWords,percentAffect))
for t in flattened:
#print(t)
print(t['text'],'\t[a] ',t['arousal'],' [v] ',t['valence'],' [d] ',t['dominance'])
# Test the function on our text
textAffect(myText,arousal=4.95)
```
### Do analysis
Load the file, then check the results with different values of `arousal`, `valence`, and `dominance`.
```
# Read file
text1 = readFile('dummy-affect.txt')
# Check values
textAffect(text1,arousal=4.0,valence=5.0,dominance=0.0)
# Check different values
textAffect(text1,arousal=5.0,valence=7.0,dominance=5.0)
```
| github_jupyter |
# Figure 3: Cluster-level consumptions
This notebook generates individual panels of Figure 3 in "Combining satellite imagery and machine learning to predict poverty".
```
from fig_utils import *
import matplotlib.pyplot as plt
import time
%matplotlib inline
```
## Predicting consumption expeditures
The parameters needed to produce the plots are as follows:
- country: Name of country being evaluated as a lower-case string
- country_path: Path of directory containing LSMS data corresponding to the specified country
- dimension: Number of dimensions to reduce image features to using PCA. Defaults to None, which represents no dimensionality reduction.
- k: Number of cross validation folds
- k_inner: Number of inner cross validation folds for selection of regularization parameter
- points: Number of regularization parameters to try
- alpha_low: Log of smallest regularization parameter to try
- alpha_high: Log of largest regularization parameter to try
- margin: Adjusts margins of output plot
The data directory should contain the following 5 files for each country:
- conv_features.npy: (n, 4096) array containing image features corresponding to n clusters
- consumptions.npy: (n,) vector containing average cluster consumption expenditures
- nightlights.npy: (n,) vector containing the average nightlights value for each cluster
- households.npy: (n,) vector containing the number of households for each cluster
- image_counts.npy: (n,) vector containing the number of images available for each cluster
Exact results may differ slightly with each run due to randomly splitting data into training and test sets.
#### Panel A
```
# Plot parameters
country = 'nigeria'
country_path = '../data/LSMS/nigeria/'
dimension = None
k = 5
k_inner = 5
points = 10
alpha_low = 1
alpha_high = 5
margin = 0.25
# Plot single panel
t0 = time.time()
X, y, y_hat, r_squareds_test = predict_consumption(country, country_path,
dimension, k, k_inner, points, alpha_low,
alpha_high, margin)
t1 = time.time()
print 'Finished in {} seconds'.format(t1-t0)
```
#### Panel B
```
# Plot parameters
country = 'tanzania'
country_path = '../data/LSMS/tanzania/'
dimension = None
k = 5
k_inner = 5
points = 10
alpha_low = 1
alpha_high = 5
margin = 0.25
# Plot single panel
t0 = time.time()
X, y, y_hat, r_squareds_test = predict_consumption(country, country_path,
dimension, k, k_inner, points, alpha_low,
alpha_high, margin)
t1 = time.time()
print 'Finished in {} seconds'.format(t1-t0)
```
#### Panel C
```
# Plot parameters
country = 'uganda'
country_path = '../data/LSMS/uganda/'
dimension = None
k = 5
k_inner = 5
points = 10
alpha_low = 1
alpha_high = 5
margin = 0.25
# Plot single panel
t0 = time.time()
X, y, y_hat, r_squareds_test = predict_consumption(country, country_path,
dimension, k, k_inner, points, alpha_low,
alpha_high, margin)
t1 = time.time()
print 'Finished in {} seconds'.format(t1-t0)
```
#### Panel D
```
# Plot parameters
country = 'malawi'
country_path = '../data/LSMS/malawi/'
dimension = None
k = 5
k_inner = 5
points = 10
alpha_low = 1
alpha_high = 5
margin = 0.25
# Plot single panel
t0 = time.time()
X, y, y_hat, r_squareds_test = predict_consumption(country, country_path,
dimension, k, k_inner, points, alpha_low,
alpha_high, margin)
t1 = time.time()
print 'Finished in {} seconds'.format(t1-t0)
```
| github_jupyter |
<div align="center">
<h1><strong>Herencia</strong></h1>
<strong>Hecho por:</strong> Juan David Argüello Plata
</div>
## __Introducción__
<div align="justify">
La relación de herencia facilita la reutilización de código brindando una base de programación para el desarrollo de nuevas clases.
</div>
## __1. Superclase y subclases__
En la relación de herencia entre dos clases, se cataloga a las clases como _padre_ e _hija_. La clase hija (subclase) _hereda_ los __métodos__ y __atributos__ de la clase padre. Las subclases (clases hijas) emplean el siguiente formato:
```
class clase_hija (clase_padre):
//Atributos
...
//Métodos
...
```
La clase padre suele usarse como un formato para la construcción de clases hijas. Un ejemplo de ello es la _calculadora científica_, que se puede catalogar como una subclase de la calculadora convencional.
```
#Calculadora convencional
class Calculadora:
def suma (self, x, y):
return x+y;
def resta (self, x, y):
return x-y;
def mult (self, x, y):
return x*y;
def div (self, x, y):
return x/y
```
Además de las operaciones básicas, la clase de `Calculadora_cientifica` debería poder calcular el promedio de una lista numérica y la desviación estándar.
---
<div align="center">
<strong>Promedio</strong>
$$
\begin{equation}
\bar{x} = \frac{\sum _{i=0} ^n x_i}{n}
\end{equation}
$$
<strong>Desviación estándar</strong>
$$
\begin{equation}
s = \sqrt{ \frac{\sum _{i=0} ^n \left( x_i - \bar{x} \right)}{n-1} }
\end{equation}
$$
</div>
```
#Calculadora científica
class Calculadora_cientifica (Calculadora):
def promedio (self, numeros):
return sum(numeros)/len(numeros)
def desvest (self, numeros):
promedio = self.promedio(numeros)
des = 0;
for num in numeros:
des += (num-promedio)**2
des /= (len(numeros)-1);
return des**(1/2)
```
__Observa__ que al momento de crear un objeto del tipo `Calculadora_cientifica` es posible utilizar los métodos heredados de la clase `Calculadora`.
```
calc1 = Calculadora_cientifica();
print("2+3 = " + str(calc1.suma(2,3)));
print("Promedio de: [2,3,10] = " + str(calc1.promedio([2,3,10])));
print("Desviación estándar de: [2,3,10] = " + str(calc1.desvest([2,3,10])));
```
En Python, durante la relación de herencia puede haber múltiples clases padre por cada hija.
## 1.1. Operadores __`self`__ y __`super()`__
<div align="justify">
El operador `self` se refiere a la clase _per se_. Se emplea dentro de la clase para especificar el uso de sus métodos y atributos. El operador `super()` se emplea en una relación de herencia para referirse explícitamente a los métodos y atributos de la clase padre. Es decir: en una relación de herencia, se emplea `self` para referirse a los métodos y atributos de la subclase (clase _hija_) y `super()` para los métodos y atributos de la superclase (clase _padre_).
</div>
### 1.1.1 Constructores
Tanto la superclase como las subclases pueden tener sus propios constructores. Si la superclase tiene un constructor, la sublcase debe emplear el operador `super().__init__(entradas)` para ejecutarlo.
Por ejemplo, supongamos la situación de un estudiante. Se puede asumir que la clase de `Student` deriva de la clase `Person`, como se aprecia en el diagrama UML.
<div align="center">
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKEAAAE/CAYAAAAuSqMxAAAGnnRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjRUMjAlM0EwOCUzQTUyLjE5M1olMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJ5M09uZVJTR3RFdURUblR0NmRNUCUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyQzVSQnM0M29EYS1LZHpaZU50dXklMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1ZscmM5bzRGUDAxekhRJTJGcE9NSEVQb1JTS0R0Wmp1MGRKdjlLdXlMclVhV3ZMSUlrRiUyRmZLMXQlMkJZWjV0R0RwcFpoakdPcnE2c3U0NUVnZTc1UTZqMVZpU09QeEglMkJNQmFqdVd2V3U1TnkzR3VldzUlMkJhMkNkQVczSHlvQkFVaiUyQkQ3QktZMGljd1lCNjJvRDRrdFVBbEJGTTByb09lNEJ3OFZjT0lsR0paRDVzTFZwODFKZ0UwZ0tsSFdCTzlwNzRLTTdUblhKZjRlNkJCbU05c2Q5OWxQUkhKZzgxS2twRDRZbG1CM051V081UkNxT3dxV2cyQjZkcmxkYm4lMkZzTDVuZHclMkZkOGNmUHlmJTJGazM4SGZYejk5dThxU2pVNFpVaXhCQWxjJTJGbmZycFlUNTYlMkY2MzklMkZiOTRNdnE4dE1iOTJjY3JNOFI2Skd4aDZqVUJtUWh1VnF6V2VSbVRKWTBZNGRnYXpBVlhVOU5qWVpzd0duQzg5dkR1UUNMd0NGSlJaS0J2T3BTSUVmVkN5dnc3c2hZTHZZWkVFZThoYncxQ0lla1RwaVVNdTJ3RXNGc3FJeWFuVzR1WTZwRm1hZ2tKeGt6eXd0Z0ZkRWNTWldJOHdSaUpFenBMYjFpSFJFUUdsQSUyQkVVaUxLRTRrRjk4RTNyWUxwdEtHa2VDaTBvOGNmU1llaFRWY0RWaFV4R25yR0lDSlFjbzBocHJkUW10bHFkdDVlbHNLMXV3WUxxNkoxZTJiRG1NMFNGTG1MNmI3ZzVpSTh3Q0tVODdrYjg3V1BuQThacVUxSEdCTFBpWUtCTG1OU2xTRmVWSlphUXFrNFR4Q3EzUkJxeTBFaUxFNGlhSWdWNjYwcXdtUXdWenRsbWNURW96eTRTMk51MmlYeXhheFhRd0xIemxrcWlaRDZQdkJVTW9vb01pdTJRU3dvVjJsQk9nUDhZTm1HMXR0T3E0TTNOTVMyWGJieG84T2xHZ3FPNmlJMGxSR2daSmVnWmJ0RllIczM3MkdCcmV1OG5jcHZWVTQxWWs5bDBkbkJZaHdLRHA4VzBRelBqNWRHNXA2akpWUVJNNWZub3J6alhKaHlkd2ZsRUJIS1hzbCUyQlZyS3ZleGNtdTcyRjdBMkdHVTFOaEttR3ZmVUg5Z0Q5RVJLcDAlMkJWOGY5Vnl1TG15RzVwd201cHd0JTJGRFB5QXpZUkNSVVVhSHp5eXgyUXhlWE9yUnRxMzBjcTNzOHdDJTJCUjJ0bXhnN05DTER3bFh0NmhmVFl5N1V1Zng5MEdtMU8xOFBVeVh5MyUyRjVTMiUyRmE1M1g4dHNibHQlMkZ0JTJGYjZXJTJGN3FoMUt0VW9LbGFQJTJCQU83dXNNVzRUN2h4dyUyQjNhUEY5cnZZJTJGOTVXUm1PSjZYMHF4c0JCNnVOQTg0cmxKaSUyQlAyV2UxaHlmemYlMkZIJTJGQXU5ZTdlR3prM3FzNXolMkJYTzdSM1BabXAyVU5jbHJ2cXAyVkluOXBnaVVlVUUlMkZibXIyeTdQd3FjcEJyMXdqYiUyQnVkanZIWG1rdDl2bm9uJTJGYkk1ME44b0Q3ZmYwa0gxc3pKclNUR3lCa3JKNXRaYzBSWmZuNWQ4Q0wxUTlSOEFQSXJTbXdtVmplbHNBZ0JiQWpGOHJKVGk0UkMlMkJuQlljYlF1d1p3eEQ4RmZiZDdlZDFHcEFSR0ZIMnN2OURZNCUyRnNtSW5WRmhjZTA2cDdQc2R4NmlteVJabFQxRGNLcGliSXFOQklkTm9UWUxOJTJCalpPSGx5eWozOWdjJTNEJTNDJTJGZGlhZ3JhbSUzRSUzQyUyRm14ZmlsZSUzRYlScd8AAA6lSURBVHic7Z3Bjds4FEBZQDaXvS8C6LClqJFc3MVcVUBqEOaeDtxDbga2hUwD3EPyna9vkqJE2n88eg8QMDOSKIp+/hI14mcIIUQWFuclRAAvkBDcQUJwBwnBHSQEd5AQ3EFCcAcJwZ3DSph6YHo6nbyrdUgOLeH5fF78bRiGOM+zU42OCxIqTqdTnKYpxhjj5XJZREnhfD7HEEIcx/H6d/k5FU3XyjmdTtd1tj5HAQkzfwshXKPi6XSK4zjGGP/II+vmeb6ui/FXNNXlDsNwFTtVTmrd0Ti0hHYRIc7ncxyG4bqtRLPL5XKV53K5xBh/Sai31ci2reV8dA4tYe7yN89zUtKUPDHGOE1T8nIs5QhImAYJE9hIaNdZCTX6ckwkrAMJC+v1fZ+0kZVnmqZF9Fu7JxTRkPAPSJjB9mpFllQkHIbh5r4yV46AhH84rITwfkBCcAcJwR0kBHeQENxBQnAHCcEdJAR3QggxfPnyZctIeRaWrstv/4iE4MdvGZEQ/EBCcAcJwR0kBHeQENxBQnAHCcEdJAR3kBDcqZYwhPwIs7VsAnYIpQwg0gPAZZ0ek1GbzQCem64SprIJ6GGOMf4anWZHnMl+kk4jN75XC3zkjAUfjaKEuUHgdkTZlpFjep3dz4qlI2opKwI8N10jYU5CfZkOIeySsJQVAZ6bu0tosxD0ioTwcejSOy5JaPOxDMOwS0L5PZUVAZ6bu0sY4zJDgd52q4S5rAjw3PCcENxBQnAHCcEdJAR3kBDcQUJwBwnBHSQEd5AQ3EFCcAcJwR0kBHeQENxBQnAHCcEdJAR3kBDcubuEa7Ni3oPUmOVpmroPEfU4t4/Ih5XQHhMJ3y9dR9vZTAp6XS47w9osmFv3k7qO47iQTktYMzqwJitEqo66jdbOTQb7H52uEsoHVMqyYAcz2fmArSi5/fTIu9wAqWEYrtvskTDGclaI0nmX6ij7ybqjU5RwbwaG2hnO986MvpaNQSTU5W+VsHYsdOm8c3XkMr6kayS0eWO0CDVjkmslXMvGoEU5nU5xmqaHSKjXl+qIhEvuIuGjI2GqriKKlFlK0vSISFhqr6PTpXds76NS94SlgfH6vq92P31PZSOq7cRIJ8NKKNuM49gkYeq8S3VEwiVdJZSb+FRPMCfTWg+ydr/UJVEzDMNNb1n3eFsk1L1jTa6OSLikq4Q0KuwBCcGdLhICtICE4A4SgjtICO4gIbiDhOAOEoI7SAjuICG4g4TgDhKCO0gI7iAhuIOE4A4SgjtPI6Eej2LHpsBz8zQSapDwY9FltF2M62NFajIaxHg7nFMGChEJPy7dJNTClDIplDIa2KGYqVF79md4fooSbsnAUJttYG1OY3t8JPz4dImEW7INrEmoh0+GEJDwAHSRcEu2gZpxvAKR8Bh06x3re8JStoGShDaTwjAMSHgAuklYm21g7XI8DMO1DL0vEn5cnvI5IXwskBDcQUJwBwnBHSQEd5AQ3EFCcAcJwR0kBHeQENxBQnAHCcGdq4QsLM4LkRD8QEJwBwnBHSQEd5AQ3EFCcAcJwR0kBHeQENxBQnAHCcEdJAR3ukoomRFksbkH70EI+axe76E8WKebhJJHRidNGsfxJo1cb5Dw+amW0ApmGYbhmhBJkPw09ndZBImgOi2cFkESa9roqvPWvL6+xhDCdVubA0dn+DInf62LLu9yuazWV44FbXSR0GZYzTEMwzUylrK56nXzPC8SKA3DsBBUhJMybHrhnIT6SzNN01Vu/QVYq6/90sE+ihLWZmqtkdBm0iplc9XCpCKYPQEtYS4bWC7Xoa5Pqrya+kIbD4uENvdgrYQxxkXSddvZ6SWhLW9LfaGNh9wT2vyCuXWl+zd9nNLlOJeEfauERMLHcffese1I6HssK0VKGH2/JmVskVC2HcdxIbZep2cJKN0T5uoLbXR9TmjvIe2lc623WepE5O5HpYf67du3GzHsZTw1w4Cti+5d19YX2ugqIcAekBDcQUJwBwnBHSQEd5AQ3EFCcAcJO/D29uZdhafmKiELi+vi/U14Zt7e3uJff/1FNGwECRt4eXmJ//77b3x5efGuylODhDt5e3uLnz9/jt+/f4+fP38mGjaAhDt5eXmJX79+jTHG+PXrV6JhA0i4A4mCP378iDHG+OPHD6JhA0i4Ax0FBaLhfpBwIzYKCkTD/SDhRlJRUCAa7gMJN5CLggLRcB9IuIFSFBSIhttBwkrWoqBANNwOElby8vISP336FP/555/F8vfff9/87dOnT0TDDSBhBT9//oz//fdfcgkhZNf9/PnTu+pPARI2EgJN2Aot2AgStkMLNoKE7dCCjSBhO7RgI0jYDi3YCBK2Qws2goTt0IKNIGE7tGAjSNgOLdgIErZDCzaChO3Qgo0gYTu0YCNI2A4t2AgStkMLNoKE7dCCjSBhO7RgI0jYDi3YCBK2Qws2goTt0IKNIGE7tGAjSNgOLdgIErZDCzaChO3Qgo0gYTu0YCNI2A4t2AgStkMLNoKE7dCCjSBhO8F9Nh8WlhD4JoMfSAjuICG4g4TgDhKCO0gI7iAhuIOE4M5hJbQPS+268/m8qbzz+XxTzt56bT32s3NICYdhiNM0XX+fpmkhEBI+lsNJeLlcYgghXi6Xxd9DCHGe5zgMwzU6vr6+LraV9YKIF0KIp9NpIaEcx0Za2Ue219LpY9v6fWQOJ2GMMY7jeJUuhYghwuQk1GVYCe26cRxjjH8klEis1+ljH4lDShjjL6F0pNJC1khoL7/69/P5vJBVR98asZHwgIgY8uHXSCgSCyKaXmcXJExzOAnneV5c/oRhGK7RUESw9497I6EGCW85nIQxxpvLr0QuEcNKKFKM47gQRveyS/eEOmoi4S2HlDDG2+eEujcqHZfz+Xx9fCM92tS9Xk3vWMpfk1Af+ygcVkJ4PyAhuIOE4A4SgjtICO4gIbiDhOAOEoI7IYQYvnz5kvxfJwvLI5bf/hEJwY/fMiIh+IGE4A4SgjtICO4gIbiDhOAOEoI7d5MwhPcxgNz+bJfcsM+99dpz3kfncBLqOskr+DUi7jk21NEkoYyHCOHXGAvhPWUxKEkY468UIHbwuc1+YEfS1R4b6tgtoR06OQzD4gOWD8c7i8GahDXRNyUhGRT60SRhbmytFPweshjcS0KGbPajKKG+5OjLrWCHQ9qC30MWAyR8/3TrmOQux95ZDLbeE5aOt/XYUMduCadpuumMlCT0ymJQ0ztekwYJ70tTJNS9QZ10Msb3k8XASmgXK0xt75gMCv3odjkG2AsSgjtICO4gIbiDhOAOEoI7SAjuICG4g4TgDhKCO0gI7iAhuIOE4A4SgjtICO4gIbiDhODOh5Uw9TZ0LfaN7tTb1l7UnosepNbj873n2+KHkHDPfrrB7SygntSel4z5uUdWi97sllAPAs+NAZHxFjGuZ1Ow5ehxJ5pcObosO17FfhClMvTAK804jtfz25MZomY/3V65mentuaS+HHrsT21Wi9Ix751ZollCkWSappuhnDrHix1Vl8umIB9EanyxVDiVlSG1Lidhri52eGqOXB1qsjOU9pN1th6ptl1DJhHfe+76mFL3dxsJbXRIyZOLRKltS2k1arIy2PqVfrZl2A/CRo3T6bQ7M8SW/Sylsdo5UhKWzr10zBgdJSxlYMhFqVzWhR4SlrIylLI56MYv1aX0oYzjGE+n0+7MEFv2s20fQugiYencS8e0n0NvnjoSpuqT+n1LNCjdE6YiYalNSpGwtJ+t4yMiYemYMb5zCeUDS9236G+2vRfJbbuW5UrfO9lvdi6bw9p9USpVnb6flePI1SBXh5rsDLX72fre654wlxdIH1Pq/m4l1DkK7Tot4VrPsFbCXFYGu25v79g0zOpxUhEsJ2HtfjEue6R6fU3vWPZPSbj2ZCB1zBjvm1mi2+UYYC9ICO7slhCgF0gI7iAhuIOE4A4SgjtICO4gIbiDhOAOEoI7SAjuXCVkYXFeiITgBxKCO0gI7iAhuIOE4A4SgjtICO4gIbiDhOAOEoI7SAjuICG401VCnR8whNskSvcghL5ZAXqXB+t0kzCVzXQcx2RyoZ4g4fNTLaEVzCK5TzQ6PZv+XRZhLcOpznejo6vOnfL6+hpDCIucKbrONi+MOvlrXWxG0i2ZVWE/XSSszXBam61Vr5vneZEgSXIx63rpnIg2rW5OQv2lmaZpkXFLyl+rr/3SwT6KEuYSO9pLbI2EW3IU2uSSubx+cgKpxJxruQLtOct2trya+kIbD4uEW7K1WvF0UnXb2ekloS1vS32hjYfcE6by6m2R0B6ndDlOZY61ZdZISCR8HHfvHduORE22Vi2Mvl+TMrZIKNuO45jNPKqzzJbuCUuZaGE/XZ8T2nvI2jlIajoRuftR6aF++/btRgx7GS9lTbXlnc/n6vpCG10lBNgDEoI7SAjuICG4g4TgjruE+pHI2nZ2GcexenKZEqkypmmK0zTF8/m8+LdhL0r17nFOz8RTSXivt1tSD6a1eCLko0DCwoa552Jrb8HknrfZt1Zy8+3K8VMSpuZuy9Wjdg5hmUjRnn+pLexra/LCw9qzRvu7rv9R6Cphbp7f3NsoUq5+MCzHqJ1vNyXh1vl8U1N1WeTSnyP1to88dC+9iaOPmZqr+QgUJax9i2bLywJWhpxcqckJ7WL/J73lf9C5OuZm41y7JKemrK2dVbM0Q+kR6BoJS/P8CiUJ98y3u0XCmjmESxKuDVcYxzHO8xzneV7MjZw6d33M0lzNR+DuEtZGwr3z7dZKWCpfr7tcLrsioWwjl9zSDJtEwiVdesc1LyDk5hcWufbOt1sr4ZY5hPfcE+py7HFKb+LktkPCjaxJWJpjV7+1Uppvt8fluHYO4T29Y30Mfdmu7R3n5mo+Al0k/Gh4Pyc8GkiYQf/HpDTGBdpBQnAHCcEdJAR3kBDcQUJwBwnBHSQEd5AQ3EFCcAcJwR0kBHeQENxBQnAHCcEdJAR3kBDcQUJwBwnBnauELCyey//fX7r5PGEejgAAAABJRU5ErkJggg==">
</div>
```
class Person:
name = ""
phoneNumber = ""
email = ""
def __init__(self, nombre, numero, mail):
self.name = nombre;
self.phoneNumber = numero;
self.email = mail;
class Student (Person):
studentID = 0
promedioGeneral = 0
def __init__(self, nombre, telefono, email, studentID):
super().__init__(nombre, telefono, email)
self.studentID = studentID;
#---------------------------------Creación de personas y estudiantes------------------
john = Student("John Pérez", 302010, "john@gmail.com", 10010)
print(john.name)
```
En ese contexto, el operador `super` se refiere explícitamente a la superclase, mientras que el operador `this` se emplea para trabajar con los atributos y métodos de la subclase.
### Ejemplo:
Elabora una clase _padre_ `Animal` para construir clases hijas: `Oso`, `Tigre` y `Perro`. Puedes emplear el diagram UML como sugerencia.
<div align="center">
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAjEAAAE6CAYAAADnQAOqAAAHtnRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjVUMjAlM0E0NCUzQTI3LjI3OVolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJ1Y2J3aUxiZm1fTzQ0TGw5ajFhZiUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyd3BPaG93WlZLZVpOTTBYX0dLNlolMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1ZwZGIlMkJJNEZQMDFrWFlmV3VVTENJOThUR2RINm1pclphU1ozWmVWU1F4WTQ4VElNU1gwMSUyQjkxN0JBU1F4c1lzdkNRcXFyaWsydkh2dWRlN3NtbGxqZUpzODhjclZkZldZU3A1ZHBSWm5sVHkzVUhnUXQlMkZKYkJUZ08lMkZhQ2xoeUVpbklLWUVaZWNNYUxNdzJKTUpweFZBd1JnVlpWOEdRSlFrT1JRVkRuTE50MVd6QmFQV3BhN1RFQmpBTEVUWFI3eVFTSzRVRzdxREUlMkY4Qmt1U3FlN1BTSDZrNk1DbU45a25TRklyWTlnTHhQbGpmaGpBbDFGV2NUVEtYdkNyJTJCb2VVOG43dTQzeG5FaW1reklrSXVmQmc5Zmhvc3Y2VHB6WXUlMkJmNzU4ZjlDcXZpRzcwZ1VjSmlSSFZXeGE3d2clMkZwbHNRVUpUQWFMMWdpWnZxT0EyTkV5VEtCNnhBMmdqa0FyNWdMQWk0YzZSdUNyUUVOVjRSR3oyakhObks3cVVEaHoySTBYakZPM21CWmVMQmFFMjV6b2FQQjdWY3Nabkltd0RhZ0hLZGc4MUw0d0tsQlgxRldNWHhHcWRCQXlDaEY2NVRNOThlSUVWJTJCU1pNeUVZTEUyMHM2QjQlMkJEc3BOZWRQWmVRQTVqRldQQWRtT2dKWGtHJTJGanYlMkJCSG03TFlITDZHbHRWQXNuWFFhd0RlTGxmdXVRWUxqVE5aMUR1R3BSYnJnZEF3dUk1QjJRRTF6UEJTYkswd1BPdWw0MXlUMGdUSEtGSUdjeDNBaHUzQllxUk5mR3NzY09VVmNRMjByJTJCSGRyV3dBc2VLbkc3T2Z1SUpvd3ppWjVvd0ZXZUUwaHBVaEJyRkMzRXkwTkkxQ21IM3o3bk4xQyUyQlJ2N1I3SmNSZzdvTG0lMkJiZ2lVWVFUR1NSTUlJRlVSRWo2MTR3a0luZCUyRmJ3eSUyRndOTEVmdXhaUGRqNEJNWk9PWVpmYWM3RmhDVndGa1R5NE1BUWJsc3NRNjVaSkozT1VETzhkRGhKMXpZSnAxNWIwZVFkaWFZYXg1VGszQ21PaXc5UjV5S0NZNkNLNHBMUmI1THc2WU5qc082WnJIdEhHS1pvanVrTFM0a2dUSzdQbFcyTiUyQlZ1Ukd6VDhyQWhhNHRZJTJGd2kwYzFsWiUyQjJJUUNtT3NTJTJCc3FjQjgwNEx4TCUyRjZxVDNETkwlMkZURmtuQjY0a0J4eSUyRktnY2E1N2hqMnkwUjNqY0lmNmpXOG45WlNNSmFSZSUyQnklMkZzeXM3MTJsakxlVzlZT3VqTGRHYnMlMkI5YlJrUG1wVHh2VVNYeVE5djNDRW42eEM4JTJCZHZ2a1BpdkRCN1FxZmpXSXFUZnNBajRiZFdBSXQzZnkzJTJCY1JDUFpVSUhSbkRKWmM4Y0E2YUxzOU5Yd2ljZ0g1MzVmaWJnbzN6Z2o0b2VFd2M5cTlMYzJrdGZUN0hDd0t3WUpIRXhQOG9JQ2tQT2NSN3NFeXNuNXFETDdCWE1DJTJGcEVpNUtCJTJCNDhqbyUyQk5TcU54eWJiWGlJUDFaSklFdVclMkJEM2UlMkZlTzhIeU9hWTRvRWVhM3U3UmpUZXJrWEdkZW5HdzM3dmxPeGhEcVRublhZSjZvdDVBY2ZMS1FPYlN3RXdZRjJCMlk2NzA1dTJLMXYyTFBmM1pkWGwwNGYyUHQyJTJGejE3dUZBN0xyTm1UOUF2SkpMNVB2eU5MTG1wbURvQmZXRSUyRnpmUHZUVUM3NW51eUxLS2hLbnFIN2JTdWJKNVpObFU2M2E5c2RzMjM1VTQzWDQzZVd3dG4xM3cxN3BUem5YMEUzRnc2ZThOMnBYTXBnJTJGMmhYNVhCanROclV3YiUyRkw3TFZVR25CcGJMVjdzTjd3YkQ4cWE3ckJQN2pjTmhJeUY1TEN4WWhkeEFaUUFYdm1xblgwb0s5JTJCMnVtJTJCdVlYNnJJc2NQU0dvQnAwT3ZEQ0l1RGJ4d1BoWG5TZzM2QiUyRjB1bkFTJTJCbTl0UTcwajMzSjNlbkF1JTJGb0l1TGtPOUk5OVc5NjFVTThudEFXUmFiUWFmMGxrRGc1RXBxa3hnOERlJTJGOVRLelpVYXAlMkYwekc2ZFhib1RDc1B5blJXVmUlMkZ1ZW45JTJCayUyRiUzQyUyRmRpYWdyYW0lM0UlM0MlMkZteGZpbGUlM0VXDPYSAAAgAElEQVR4nO3dTa8cR9XA8bKdbB7Yxg47S20gYPM52p8Ag8QaqaVI7FBYATEC9S4S8pplyxISxr5WbCfAoj8AW2PSC0R4VxRFQoiA4TwLU+Oauv02Uz31+v9JrTh3ZmrO9D1n6tzqnmklQEaUUmxsQTcA/lBxyAqTCEIi/wC/qDhkhUkEIZF/gF9UHLLCJIKQyD/ALyoOWWESQUjkH+AXFYesMIkgJPIP8IuKQ1aYRBAS+Qf4RcUhK0wiCIn8A/yi4pAVJhGERP4BflFxyErJk0jbtqKUkrquVz+maRpRSskwDJvFcYoxU1Fy/gEhUHHISsmTSF3Xu2+NDdlA0MQA8IWKQ1ZKnUSGYRCl1K6BaNt2d5v+mf6vUkr6vt+7bRgGqapKqqraa4a6rtv9217hMb9q37yNJgaAL1QcslLqJKIPJZnNiKabiq7rRET2mg67idENjm6K9P7U4+vmp65raZpGRGTX6Iw1RqUpNf+AUKg4ZKXUScRsXOyGw24q9GqLfZs5hrmyI/KyUdGNkGauxujbaGIA+ELFISslTiJ93+8dQtINiP7/sSZGNyvHNjH6kFPXdeduo4kB4AsVh6yUOImY57qY21ijIuLexCw1ODQxAHyh4pCVEicRs2HRzENKp1iJMc+rMVdl7DFLU2L+ASFRcchKaZOIbi7MTyOJvDzE1DTNSZoY3STpZmbu8FVJSss/IDQqDllhEkFI5B/gFxWHrDCJICTyD/CLikNWmEQQEvkH+EXFIStMIgiJ/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDlm5evXq6Ffws7H52K5evRq6BICi0MQgK0qR0giH/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDllhEkFI5B/gFxWHrDCJICTyD/CLikNWmEQQEvkH+EXFIStLk0hd19L3vQzDIFVVeYpqX9/3opSSYRhO+jz2F7HZt/V9PxkbjsO+A/yi4pCVpUlENy5930td1z5COsdHE1NVlbRtu/v/tm339s1UEwM3NDGAX1QcsjI1iUx9Tbw50Yu8bDCaptndx5zsh2EYXd3Qj9PNgm5S9L+bppl9Dt3Q6Nvqut6NP/Wc+nXZzZC+v/1zpZR0XSdVVe3Gunv37t7zmSsxS/ui67q91xdqZSsmNDGAX1QcsjI3iXRdt2sm9GElm9mMiIg0TbO3YmOucJi32Y/TTcEwDOdWXszmQOTFKom5QqSbDfM16f+345min98cx6QbEvv5xpqYqX1hNjVVVdHECE0M4BsVh6zMTSJt2+4m5KqqRg/n2A2HXrkwb9PMFQ/7cXMTvn3fuXH6vt9rDqZWWcaYKyVjjZHZxNgN1tK+MP899v+lookB/KLikJWtDidNTdynaGLM28eefyzuQ8+n0ePqGGhiToMmBvCLikNW5iaRNSf1xr4Ss0bXdaOvr6qq3WoMTcxp0MQAflFxyMrUJGJ+pNo8rGSbm7hFzp8TYzc4hzQxepyxc2LsVRrdfNirQXP7wTx8pB+nx3VtYuzXVNc1TYzQxAC+UXHIytQksuakXpHliXvp00mHNDHmp36mxhl7TrvBmTq0NHcISp/4e+fOnaObGD6ddB5NDOAXFYesMImE0TTN3sfIS0X+AX5RccgKk4gfepVGb6zCvED+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDllZmkS2vIr1sVd8XvO4Yy4SaX4CCmHQxAB+UXHIytIksuVVrGliYKOJAfyi4pAV18sOiMxfNdr8VI7+nhdt7lpFc48ba1bmrnRd1/Ve3OaVs/X95l4DTod9DfhFxSErc5PI2i+8MxuQsS+tM2/Tz2dfmNH8Ft65x02Zu9K1fVkBfTFL+9pIh175Gu5oYgC/qDhkZW4SWXsV66mrRtuHgeYOC81dc+mYw0l2k2SuuOgGxbyUwLFXvoYbmhjALyoOWXE9nDR31eipq1hr5qEf8wvglh43Zu5K1/q5uq7ba8z07Vtd+RqHo4kB/KLikJW5SWTtVaynPrU0t6Ji33bqlZi+73eHicYu6sg36IZBEwP4RcUhK1OTyNqrWOsxpq4abV/FWt82dr+5q1+vbWLGrnRtxjl1Vem514DTYT8DflFxyMrcOSprTuoVmb9qtHmb3YxUVbW7bWolxeXTSbamafaaMX1lav09OBxK8o8mBvCLikNWSppEaE7iU1L+ATGg4pCVEiYR++PXiEcJ+QfEhIpDVphEEBL5B/hFxSErTCIIifwD/KLikJUUJxF92QD9sWmuf5SuFPMPSBkVh6wsTSJrrmJtflTZBx2H/nTTlo69SOXYGFPfcMzE/RL7AvCLikNWliaRNV9457uJOSWaGL/YF4BfVByy4nrZAfO7XsxLDehNf4Gc+WV05v3N74IxLY1jfh+M2UDNXY16qbEY+06aqfHsRmXsG4fHrqhtNzGlXz27xNcMhETFIStzk8ghV7E2vzBu7MrU9jfq6i+aMy8UOXXJgLlx7CtO29/0u+Zq1GaTNPaFfGPjrW1ipuKfeu7Srp5NEwP4RcUhK3OTyJqrWOsxxhqcsYldj2FP2HOHpObGmbvm0pqrUR9yfaexK3TPxTHWlNnjl371bJoYwC8qDllxPZyk72teLdq8/7FNzNpxzOZh6urXc03B3BWz58Y7pIkxX5/ZxHD1bJoYwDcqDlmZm0TWnNSrx7AnaJHjV2IOGSfUSow99hYrMSWiiQH8ouKQlalJ5NCrWPd9P3tl6kOamEPGMZsHfV/zHJY1TcLcFbOnxtONiY65ruvJ83bWnhNT4tWzS3u9QGhUHLIyNYkcchVr82rQU1emPvRw0tpx7CbmmE8nzV0xe24885NWZoMzd0XtpU8nlXQoSYQmBvCNikNWmEQQEvkH+EXFIStMIgiJ/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDllhEkFI5B/gFxWHrDCJICTyD/CLikNWmEQQEvkH+EXFIStMIgiJ/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZMW8+CAbW4gNgD9UHLLCJIKQyD/ALyoOWWESQUjkH+AXFYesMIkgJPIP8IuKQ1aYRBAS+Qf4RcUhK0wiCIn8A/yi4pAVJhGERP4BflFxyAqTCEIi/wC/qDhkhUkEIZF/gF9UHLLCJIKQyD/ALyoOWWESQUjkH+AXFYesMIkgJPIP8IuKQ1aYRBAS+Qf4RcUhK1tNIn3f713Ur2maTcado5SSvu+jHQ/LaGIAv6g4ZGWLSaTrOlFKyTAMu5/VdS1t2zqPPYcmJn00MYBfVByysmYSsRsUW1VV0nXd3s+GYdgbW/+/3jS9gtM0ze42s5Go63p0daeqqt3P7969K0qp3X31mDrmruukqqpzr8mMxRxvGIbFePVzwQ37EPCLikNWXJsYPdnPNTkiL5oEvTLTNI3UdS0iL5uCsdu6rtv9W49hNji6YdFj6EZqqYkxm662bXfNkdlALcVrN204Dk0M4BcVh6xMTSL6EJG92YeI1jQxeuIfe8xcwzG2gmLHbjYxeoy5Me1YdDxj462JF25oYgC/qDhkxcdKjG6Ixh6ztGrStu3kycJbNTH2eIfECzc0MYBfVByycupzYsyJf+62ufNXzOeZO5ykx7Abq0ObGFZi/KGJAfyi4pCVU346yT4R1zzHxG4qxhoO83wVPcYhTYy+b13Xe42ReVvbtrvb5s6JmYoXbmhiAL+oOGRlq0nEPofGPvSz9GmfuZNwp87H0Z8QunPnzrnGwj4MZY45FYv56aa18cINTQzgFxWHrDCJICTyD/CLikNWmEQQEvkH+EXFIStMIgiJ/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDllhEkFI5B/gFxWHrIxd5JGNzecGwB8qDsCi69evhw4BAM6hiQEw69NPPxWllNy7dy90KACwhyYGwKzr16/LhQsX5LXXXgsdCgDsoYkBMEmvwnzjG98QpZTcv38/dEgAsEMTA2DS9evX5f/+7/9EROT1119nNQZAVGhiAIzSqzDf/e53RUTk8ePHopSSBw8ehA0MAP6HJgbAqC9/+cu7VRjtypUrcvny5UARAcA+mhgA5/zzn/8UpZR873vf2/v5u+++K0opefjwYaDIAOAlmhgA53zpS1+Sz3zmM6O3XblyRa5cueI5IgA4jyYGwJ5//OMfopSS73//+6O3P3z4UJRS8u6773qODAD20cQA2PPGG29MrsJoly9fZjUGQHA0MQB2/v73v4tSSm7fvj17v7OzM1FKyaNHjzxFBgDn0cQA2HnjjTfks5/97Kr7Xr58WV5//fUTRwQA02hiAIiIyCeffCJKKfnBD36w6v73798XpZQ8efLkxJEBwDiaGAAiIvLFL35x9SqM9tprr7EaAyAYmhgA8vHHH4tSSn74wx8e9Lh79+6JUkref//9E0UGANNoYgDIF77whYNXYTRWYwCEQhMDFO6jjz4SpZT86Ec/OurxP/vZz0QpJb/85S83jgwA5tHEAIX7/Oc/L0op5+1zn/tc6JcCoDA0MUDBPvzwQ/nVr34lv/jFL+T999+X9957T548eSJPnjyRx48fy6NHj+TRo0eilJKf/OQn8vDhQzk7O5MHDx7IgwcP5P79+/Lzn/9c7t27J/fu3ZP33nsv9EsCUBCaGACLlFLyu9/9LnQYALCHJgbAIpoYADGiiQGwSCklv//970OHAQB7aGIALKKJARAjmhgAi5RS8uGHH4YOAwD20MQAWKSUkj/84Q+hwwCAPTQxABYppeSPf/xj6DAAYA9NDIBFSin505/+FDoMANhDEwNgkVJK/vznP4cOAwD20MQAWKSUkr/85S+hwwCAPTQxABYppeSvf/1r6DAAYA9NDIBFSin529/+FjoMANhDEwNgkVJKPvroo9BhAMAemhgAi2hiAMSIJgbAIqWUfPzxx6HDAIA9NDEAFiml5JNPPgkdBgDsoYkBMOuDDz4QpZT8+te/Dh0KAOyhiQEw69GjR6KUkp/+9KehQwGAPTQxAGb9+Mc/lgsXLkjbtqFDAYA9NDEAZn3rW9+Sixcvyje/+c3QoQDAHpoYAJM++OADuXjxoly6dEkuXrwov/nNb0KHBAA7NDEAJn3961+Xt99+W1599VV5++235atf/WrokABghyYGwKjvfOc7cv36dRERefXVV+XTTz+Vr3zlK/Ltb387cGQA8AJNDIA9v/3tb+VrX/uaXL9+XR4/fiwiL5qYf/3rX/LkyRO5ceOG3Lp1S549exY4UgClo4kBCvb8+XN5+vSpnJ2dyTvvvCNvvvmmKKXk9u3be/fTTYx2+/ZtUUrJm2++Ke+8846cnZ3J06dP5fnz575fAoCC0cScwLNnz3YTws2bN+XatWuilGJji3K7du2a3Lx5c9eQjK2w2E0Mec7GNr+tqSu4o4nZGH+hIkevvPLKuSYGwLi1K5xwRxOzkcePH3OuALJFEwO4efbsmdy6dUtu3LixO9cM7mhiNnLjxg156623QocBnARNDLCNt956S27cuBE6jGzQxGzg9u3bcuvWrdBhACdDEwNs59atWxxa2ghNjKNnz56JUopDSMgaTQywHeaN7dDEONInbQE5o4kBtqU//AE3NDGOSESU4JVXXpF///vfocMAssEfwNugiXF08+ZNOTs7Cx0GcFI0McC2zs7O5ObNm6HDSB5NjKNr167J06dPQ4cBnBRNDLCtp0+fyrVr10KHkTyaGEdKKb7IDtmjiQG29fz5c1GKKdgVe9ARSYgS0MQA22P+cMcedEQSogSXLl2iiQE2xvzhjj3oiCRECWhigO0xf7hjDzoiCVECmhhge8wf7tiDjkhClIAmBtge84c79qAjkhAloIkBtsf84Y496IgkRAkuXbrEVwkAG2P+cMcedEQSogQ0McD2mD/csQcdkYQoAU0MsD3mD3fsQUckIUpAEwNsj/nDHXvQEUmIEtDEANtj/nDHHnREEqIENDHA9pg/3LEHHZGEKMHFixdpYoCNMX+4Yw86IglRApoYYHvMH+7Yg45IQpSAJgbYHvOHO/agI5IQJbh48aL85z//CR0GkBXmD3fsQUckIUpAEwNsj/nDHXvQEUmIEtDEANtj/nDHHnREEqIENDHA9pg/3LEHHZGEKAFNDLA95g937EFHJCFKQBMDbI/5wx170BFJiBLQxADbY/5wxx50RBKiBDQxwPaYP9yxBx2RhCjBhQsXaGKAjTF/uGMPOiIJUQKaGGB7zB/u2IOOSEKU4MKFC/Lf//43dBhAVpg/3LEHHZGEKAFNDLA95g937EFHJCFKQBMDbI/5wx170BFJiBLQxADbY/5wxx50RBKiBDQxwPaYP9yxBx2RhCgBTQywPeYPd+xBRyQhSkATA2yP+cMde9ARSYgS0MQA22P+cMcedEQSogQ0McD2mD/csQcdkYQoAXkObI+6cscedEQSogTkObA96sode9ARSYgSkOfA9qgrd+xBRyQhSkCeA9ujrtyxBx2RhCgBeQ5sj7pyp5RSwsYWcotB6H3Axpai0PuMjU0plWbxIA+x5F8scaBMqeZfqnEjDzQxCC6W/IslDpQp1fxLNW7kgSYGwcWSf7HEgTKlmn+pxo080MQguFjyL5Y4UKZU8y/VuJEHmhgEF0v+xRIHypRq/qUaN/JAE4PgYsm/WOJAmVLNv1TjRh5oYhBcLPkXSxwoU6r5l2rcyANNDIKLJf9iiQNlSjX/Uo0beaCJWWB/qU7f96FDyk4s+RdLHCF1XTf5hVJt20rTNKKUkmEYQoeanVTzL9W4tzRVLzg9mpgJwzCIUkrqut79TL/Bd10XMLL8xJJ/scQRi7qu2ScepbqvU417S/ZcoZt9/ug9PZqYCToJl35udt5N0+zdt23b3W1VVZ085lTFkn+xxBGLsSbGXompqmqX33ozH6v/q9/M9f35Y+C8VPMv1bi3ZDcxfd/vrcaYK5zmXKDvZ9bL2M/M56F+9tHETKiqai95NJ2Mfd/vNTS6YdFv1vb/m2/w2BdL/sUSRyyWmhjz3/qN125izDdaczyzjvBCqvmXatxbmmti7IbGnFvsupn6mf0cHBV4iSZmgp2Umvnmq9+Ux84RsJsg3rSnxZJ/scQRi6Umxs7xsZUYsy7s1cqx1cuSpZp/qca9pbnDSfoPWl0L5v/rhsWsg7Gfjc0fU39ol4YmZsJUgpgrLDrZzE3jfJr1Ysm/WOKIxVITY+f4WBOj6fvbG03MS6nmX6pxb2kst/V7va4ZezObGPMk4LGf2Sv7IqzuazQxE6bOiZk62VEf69dvyqzErBdL/sUSRyy2WIkx0bTMSzX/Uo17S1Mr9yJybiXGtLaJYSVmGk3MhLlPJ5nHNvWbtr6/vo1zYtaLJf9iiSMWW5wTMzWevj8rky+lmn+pxr2luSbGbkrqut7Vydomxn4OVvZfoolZYC8B2isp5m12EpvLiDQw02LJv1jiiMWaTyeZuW/+Zbi0Yjn2Jl26VPMv1bi3NNfEiJz//iXtkCbGPiRLA/MCTQyCiyX/YokjFXajwuEiN6nmX6pxIw80MQgulvyLJY5UjJ2si+Oluv9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPNDEILpb8iyUOlCnV/Es1buSBJgbBxZJ/scSBMqWaf6nGjTzQxCC4WPIvljhQplTzL9W4kQeaGAQXS/7FEgfKlGr+pRo38kATg+Biyb9Y4kCZUs2/VONGHpRSoq5evXruo5JsbL62q1evhq4DERGhDthCbrHUwaGoG7aQ2//yj04a4cSSf7HEgTKlmn+pxo08/K+ZIQkRTiz5F0scKFOq+Zdq3MgDTQyCiyX/YokDZUo1/1KNG3mgiUFwseRfLHGgTKnmX6pxIw80MQgulvyLJQ6UKdX8SzVu5IEmBsHFkn+xxIEypZp/qcaNPNDEILhY8i+WOFCmVPMv1biRh6SamLZtRSklfd9L0zTS933okLCBWPIvljhQplTzL9W4kQenJkY3FL5UVbX7b6jC6fvey3Nv8Tx6jGEYTjL+VkqL41T73v4SKPu2sVqNKQ9Kl+rvIYW4qY18JdXElIQmxr+Um5iqqqRt293/61VLjVqNXyx1cKjY46Y28nZ0E6NXQ/Qk2XXdXqfbdZ2IvHzD1omj76//3TTN3rhL4zRNs7vNTDxzzENez9Lj7NvGJqCxx8+Nu9RYmK9zaTy7Uem6brdiNbbP9P3s13Hs/ttCLG+CLnEckgNr972+X13Xu5+P5Y5+vP1zXT9mrd69e3dvTDOWpRoza7Npml2eYRux1MGhjol7LLen3vvNuUNvbdtO3l/HNAwDtVGATVZi7ERp2/bcRKo7YZ0gwzCcm4APGadpGqnreheL2W3bt82Ze1xVVXuFpM/DMffX2H2OjccsRLuJmRpvbROj47Nvm3rutftvC7G8eR8bx6E5MJZDc/cz35yn6Lqauq+uVXvMsTfqqbw137irquKNemOx1MGhXJoYnYdz7/1jz6ebhjX3pzbytkkTYxubSHWyzf3yDxln7DZtqvu2zT1ubMXF/vma+6yNx37M3POMxTm3X8aaRHt8s+jW7r+txPLm7fJmbFrKgWN/t0uW/jI136jHVuTmcsn899j/w10sdXAol7qZyu2p/JpqRpbykdrI12wTYy6d2Yd99IN1A2LeVyl1dBOzdhw7iY5pYuYeN3XewlITc2w8U49ZG+eaJkbkfMGa49sbTcwLc3VwTA6M7fu5+x3ze9CP1bXFG3X8YqmDQ7nUjZnbU+/9mrniueb+U6iNvGyyEmO/kR+7EnPIOCFWYuzHrLnP2niOXYmxx95iJca3WN68t1qJGfv5KVdiuq4bPfxnvunzRh2/WOrgUFusxMy994u8OCRknpy7dH/z59RG3jZpYuy/Js1jgoc0MYeMYyeKfV7B2iSae5wZmz7mOnYuiX2fY+OxH2Pvi7Hx9KSnY6jrevI8orXnxNi/h1OL5c3btQ5E1uXA0jkxcytpczGYf6Xa5wy4vlHbr9PMM2wjljo41BZNzNx7f9M0kx8AGbv/WHzURr6cmhjzLG7zLG/zF37o4aS149hJdIpPJ43dNvWXs/34Yz6dZH9qa+145tn7Y5OgueyqLb0OX4eSROJ58z42jkNzYG0OTR0OnPrdmGPY99O1eufOnaPfqPkExmnFUgeH2qKJEZl+7zd/buff2P11THbdUBt5cmpigC3Ekn+xxJGCsb+O4SbV/Es17lOhNvzKuomx/8I1NzrleMSSf7HEESP9lyj1czqp5l+qcW+F2ggr6yYGaYgl/2KJA2VKNf9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPNDEILpb8iyUOlCnV/Es1buSBJgbBxZJ/scSBMqWaf6nGjTzQxCC4WPIvljhQplTzL9W4kQeaGAQXS/7FEgfKlGr+pRo38kATg+Biyb9Y4kCZUs2/VONGHmhiEFws+RdLHChTqvmXatzIA00Mgosl/2KJA2VKNf9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPuyaGjS3kFoPQ+4CNLUWh9xkbm1IqzeJBHmLJv1jiQJlSzb9U40YeaGIQXCz5F0scKFOq+Zdq3MgDTQyCiyX/YokDZUo1/1KNG3mgiUFwseRfLHGgTKnmX6pxIw80MQgulvyLJQ6UKdX8SzVu5IEmBsHFkn+xxIEypZp/qcaNPNDEILhY8i+WOFCmVPMv1biRB5oYBBdL/sUSB8qUav6lGjfyQBOD4GLJv1jiQJlSzb9U40YeaGIQXCz5F0scKFOq+Zdq3MgDTQyCiyX/YokDZUo1/1KNG3mgiUFwseRfLHGgTKnmX6pxIw+bNjF93+9dlKlpmk3GnaOUkr7vox0Py2J5E6QOTjcelsVSB4eibk43HpZt1sR0XSdKKRmGYfezuq6lbVvnseeQhOmL5c2bOjjdeFgWSx0ciro53XhYtrqJsRPMVlWVdF2397NhGPYSXP+/3jTdgTdNs7vNTIS6rke786qqdj+/e/euKKV299Vj6pi7rpOqqsZe/C4Wc7xhGBbj1c8FN7HsQ+qAOggp1X1I3VA3IW3SxOhf1lySirz4JevOumkaqetaRF7+Usdu67pu9289hpmgOuH0GLoQlpLQLJq2bXfJbRbAUrx20eE4sRQydUAdhBRLHRyKuqFuQpptYvQSn73ZS3xrklD/4sYeM5cwYx2w/QLMJNRjzI1px6LjGRtvTbxwE8ubN3VAHYQUSx0cirqhbkLythKjE3rsMUtdb9u2o8uBOq4tktAe75B44SaWN2/q4Px41IE/sdTBoaib8+NRN/5s0sSIzB/TNH9xc7fNHX80n2duOVCPYRfGoUlIJ+1PLG/e1IFMjrcmXriJpQ4ORd3I5Hhr4oWb1U3Mkqmzy+0TqcxjhHZSjCWMebxRj3FIEur71nW9l9jmbW3b7m4zf742XriJ5c2bOqAOQoqlDg5F3VA3IW3WxIicPwZqL93pxNCbtuYkKv0Y+3iqPsP7zp075xLDXkY0x5yKxTw7fW28cBPLmzd1QB2EFEsdHIq6oW5C2rSJAY4RS/7FEgfKlGr+pRo38kATg+Biyb9Y4kCZUs2/VONGHmhiEFws+RdLHChTqvmXatzIA00Mgosl/2KJA2VKNf9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPmzUxc5+d35rrc20Za9u20rat9H2/9/XWLubi87mffYnl9VAHx6MO3KX6eqib41E37pJsYmIxDMNe4umEPKUc93Msr4c6OA51sI1UXw91cxzqZhtOTYzeIfpz9OY4U5+RF5m+qqgV1O5x+nnsK4yat5mf6bc/fz813lKs+n5TV0dtmubct/O4gj4AAAKRSURBVEyaj9ex2JeU1xcDW3reNfs5B7G8HuqAOggp1ddD3VA3ITk1MUq9vBKnvXPs29ZeVXTs6qD6l2BfYdT8t05m89sTl8Yz73Po1VH1a7TVdb33eqZes/5CprnnXbOfcxDL66EOqIOQUn091A11E9LRTcxUt6f/PfbthsMwzF7XYmypa+xaE2NJOHbNi6Xxxp5z7TU57NeojS0JmmOaX2e99LxL+zkXsbwe6oA6CCnV10PdUDchzTYx5jKYvWw3dZVO8zZ707/MqauKTu3gQ5JQv6i+P38RrrHnOfbqqHNJOLbEWde1dF0nXdftlhHXJOHcfs5FLK+HOqAOQkr19VA31E1Is03MnEM66TnmcuAWSbjUSU89XlubhMMwrO6k9c/1kl/XdXTShlheD3VAHYSU6uuhbqibkI5uYkTOHws0x1Hq5XE4sxtce1VRfV99+1IS6jjsY5pz4029jjVXR9Vj28aOaZpjHfK8a/ZzDmJ5PdQBdRBSqq+HuqFuQnJqYsyzo5fOLjeX6+auKjp2xvWaJDTPWF873tx9xp7XTsK1Z5ebr9sswLVnl8/t5xzE8nqoA+ogpFRfD3VD3YTk1MTEYOyYpi/D4P9z/jmKJf9iieMY1EH6Us2/VOMWoW5yQBPjyPzGxbXHcbEvlvyLJY5jUAfpSzX/Uo1bhLrJQfJNDNIXS/7FEgfKlGr+pRo38kATg+Biyb9Y4kCZUs2/VONGHmhiEFws+RdLHChTqvmXatzIA00Mgosl/2KJA2VKNf9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPNDEILpb8iyUOlCnV/Es1buRh18SwsYXcYhB6H7CxpSj0PmNjS7NyAABA8WhiAABAkmhiAABAkv4frFRxkImXgLsAAAAASUVORK5CYII=">
</div>
```
#---------------------Clase padre---------------------------
class Animal:
nombre=""
edad=0
tamaño=0
def __init__(self, nombre, edad, tamaño):
self.nombre = nombre;
self.edad = edad;
self.tamaño = tamaño;
#------------------Clases hijas----------------------------
class Oso (Animal):
tamaño_ocico=0
def __init__(self, nombre, edad, tamaño, tamaño_ocico):
super().__init__(nombre, edad, tamaño);
self.tamaño_ocico = tamaño_ocico;
self.descripcion();
def descripcion(self):
print(self.nombre + " es un oso de " + str(self.edad) +" años y tiene un ocico de " + str(self.tamaño_ocico) + " metros.");
class Tigre (Animal):
color=""
def __init__(self, nombre, edad, tamaño, color):
super().__init__(nombre, edad, tamaño);
self.color = color;
self.descripcion();
def descripcion(self):
print(self.nombre + " es un tigre de " + str(self.edad) +" años y tiene un color " + str(self.color));
#-----------------------------Ejemplos----------------------------------------
Tigre = Tigre("Malo", 3, 2.2, "Amarillo");
```
## __2. Polimorfismo__
_Polimorfismo_ se deriva de las palabras griegas "poli", que significa muchos, y "morphe", que significa formas. En programación, se emplea este concepto para cambiar el contenido de un método heredado para que se ajuste a las necesidades principales de las subclases.
Existen dos tipos de polimorfismo: _dinámico_ y _estático_.
### 2.1. Polimorfismo dinámico
Se conoce también en la literatura como _polimorfismo en tiempo real_, _vinculación dinámica_ o _anulación de método_ ("overriding", por su traducción al inglés). La multiplicidad de formas ocurren en diferentes clases.
Supongamos que en la relación de herencia entre una clase padre e hija existen métodos con el mismo nombre, pero en diferentes formas. Cuando un objeto es asignado a una _referencia de clase_ y el método del objeto es llamado, el método del objeto de la clase se ejecuta; más no el de la clase referencia.
Dado que la creación del objeto ocurre en tiempo real, la forma en como se ejecuta el método sólo puede ser decidido cuando se ejecuta el método.
Por ejemplo: una figura geométrica tiene un área y un perímetro; pero la forma de la figura define la manera en cómo se calcula.
```
%matplotlib inline
from math import pi
import matplotlib.pyplot as plt
#CLASE PADRE
class FiguraGeometrica:
dimensiones = []
ubicacionEspacial = [0,0]
fig, ax = plt.subplots()
def Area(self):
return None
def Perimetro(self):
return None
#CLASES HJIAS
class Circulo(FiguraGeometrica):
def __init__(self, radio, ubicacion = [0,0]):
self.dimensiones.append(radio)
self.ubicacionEspacial = ubicacion
#Figura
self.ax.add_patch(plt.Circle(tuple(self.ubicacionEspacial), radio))
def Area(self):
return pi*self.dimensiones[0]**2
def Perimetro(self):
return 2*pi*self.dimensiones[0]
class Rectangulo(FiguraGeometrica):
def __init__(self, b, h, ubicacion=[0,0]):
self.dimensiones.append(b)
self.dimensiones.append(h)
self.ubicacionEspacial = ubicacion
def Area(self):
return self.dimensiones[0]*self.dimensiones[1]
def Perimetro(self):
return 2*(self.dimensiones[0]+self.dimensiones[1])
#---------------------CREACIÓN DE FIGURAS-----------------------
cir1 = Circulo(0.2)
cir2 = Circulo(0.4, [0.6,0])
rec1 = Rectangulo(3,4)
print("Círculo de radio " + str(cir1.dimensiones[0]) + " tiene un área de " + str(cir1.Area()))
print("Rectángulo de " + str(rec1.dimensiones[0]) + " de base por " + str(rec1.dimensiones[1]) + " de altura tiene un área de " + str(rec1.Area()))
```
### 2.2. Polimorfismo estático
Conocido también como _polimorfismo en tiempo de compilación_, _vinculación estática_ o _sobrecarga de métodos_. Consiste en tener múltiples métodos con el mismo nombre pero diferentes argumentos de entrada. Se escoge el método dependiendo de cuántas entradas pase el usuario. Por ejemplo: en una calculadora, es posible sumar dos o tres números.
```
class Calculadora:
def suma(self, x=0, y=0, z=0):
return x+y+z
calc = Calculadora();
print("Suma de dos números: " + str(calc.suma(5,10)));
print("Suma de tres números: " + str(calc.suma(5,10,4)));
```
El constructor es un método que permite realizar un desarrollo particular en el momento en que se crea un objeto. También es posible sobrecargar el constructor de una clase dependiendo de las entradas que ingresa un usuario.
```
class Persona:
nombre = ""
cedula = 0
def __init__(self, nombre:str ="", cedula:int=0):
if cedula == 0 and nombre == "":
print("Se creó una persona desconocida")
elif nombre == "":
self.cedula = cedula
print("La persona está identificada por C.C." + str(self.cedula))
elif cedula == 0:
self.nombre = nombre
print("Se creó a " + self.nombre)
else:
self.nombre = nombre
self.cedula = cedula
print("Se creó a " + nombre + " con C.C." + str(cedula))
#Sin argumentos de entrada...
juan = Persona()
#Sólo el nombre
juan = Persona("Juan")
#Sólo la cédula
juan = Persona(cedula=1098)
#Toda la información solicitada
args = {"nombre":"Juan", "cedula":1098}
juan = Persona(**args)
```
## __3. Ejercicios__
### 3.1. _Toppings_
Eres el dueño de una franquicia de Zirus Pizza. Establece una relación de herencia en donde puedas definir tipos de pizza con base en el sabor de la masa base (integral, harina, etc). Puedes guiarte del diagrama UML.
<div align="center">
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAa8AAAE6CAYAAAChwN3xAAAH8nRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjhUMDIlM0E1OSUzQTI5LjA2MlolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJKaWliTW5yTWoyYkhRTlRNWFI5dSUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyZU9sX2c3MS1US2FNdlFYZ0pKbWslMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1ZwdGIlMkJvMkdQMDFrYlpKSUp5RUFCOTU2ZDJWMWtwc1ZOdnVweXVUdU1TN1RwdzVwa0IlMkYlMkZSNG5ObmtER2pxaWZxRkNWWHo4RXZzNXg0OVBBcFl6aiUyRmElMkZDcHlFVHp3Z3pMSUh3ZDV5RnBadGo4WTIlMkZGZkFJUWRjZTVBREcwR0RIRUlGc0tKdlJJT20yWllHSkswMGxKd3pTWk1xNlBNNEpyNnNZRmdJdnFzMmUlMkJHc2V0Y0ViMGdEV1BtWU5kRyUyRmFDRERIQjNib3dMJTJGU3VnbU5IZEczaVN2aWJCcHJGZVNoampndXhMa1BGak9YSEF1ODZ0b1B5ZE14YzdFSmUlMkYzNVV6dGNXS0N4TEpOQiUyRm43Yk9zdXhyJTJCSjFUJTJGdXYlMkZIWDRYYzdTWHA2bEZmTXRuckJUempGZ0t4eFN2Uzg1Y0VFSTkzUmlPRVlTck1YSHN1VnJrRlF4b3h1WXJqMllUWkVBUEJLaEtRUXg2bXVrRHdCMUE4cEN4N3hnVyUyRlZuRk9KJTJGUiUyQm1OQXU1b0c4d0xHWjZUS2dXVWt2Qzlpb3RWcW9ud0FOQUJVbWh6ZElFQXRXZ0o3eXZOSHpFcWRTQXp4bkRTVXJYeDJWRVdHeG9QT05TOGtnMzBoR0M1WkQ5MmRDakk2R3dFUWlQaUJRSGFLSTcyQ090QWIwSnhycTRLeFNGUEkyRlpUVVo3V0N0NHMxeDZJSm91TkJjWDhHNzNlRGRzaDBBbm5HRXJibGp6UkMzbk9rTDQxaGFFSHJiMlUlMkJ6VURqWlZrdWxxcVZ4c3k3RmF3N3NUMWRTMEhoVHFTNWQxbVFGZ1pVWjNZTCUyRklIUE8xQWlMbU9jNm80elZJQ00xUmw3a1dhR2xDZlpoQm85Wm00VmJJSCUyRm84Q3FJUTE5WW85cVVJUTBDRWl1UmNJa2x6aFdoNkU4NHJETUwlMkYzQUdIeUJrUHVnUHJTRk1mQTVsVkpUaG81b0xPZWN4ckFYVFRCd0U1TFlqU25MdGxIUiUyQm16YmxwZVdrUXR0R1RxN2JrWnFjRTJxcWNjeG94bDNPc2NtazZFTUVSMEFWSXdXano0cndSUTgxV0hlYXJEc25HR1o0VGRpU3AxUlNyc1lYZWRzYTg1OUY3cWhscmhoM3hLMTdnbHRZN0NDUHc5YVh3Rng1YjZzNk9MRjlRUk1mNHZuVHo1QU5Yam5jNHA0QXV0UEl1R1VDc0RzU3liQWhraVY5ZThOM0E5R1JnYkRkdGc1aTJKV0Q4QnFVOTZBWTgyZ3R5Q2tEME11Y2U1SUFDbE9aUHVlWE9yVldteVdDUU9yNEUyS0RHemFqZDhHQTNMUEtsVmxsZUJOYjRYVmxLMFozVzlFWnVaTlB0aFhqRDlpS3JhU01CamlZaWEzRW1iR29aNEM3JTJCZmdFSmJVMEg4T3V6QWRxOGZoQjRtQ3EzZ1pCYWMyNE92bG5BR2xyZ0x5OCUyQklXcUcyZHhEMlZrVEFUWlUlMkZtM2dpSE9lZW1iYnFTdUYlMkZ0eTRXQUtNU3hNZHhwNEJzajY5U2VlYllDaWMxYXE5RjRTUVNFJTJCeWdxVlhBUUpHcSUyQnJhaDRDbHMyM3dpZnY4UW1uTVZnVmNvbDM5elR2cDRnV2hHRkpYNnR6TzhXMEhtN0pzNjFyJTJGSTFiOXplb1psdnlOZWxlNVpkY3RZRTg1NTJCOGtVM0JzcUVkMXpqJTJGOUJpODNISk9KMjdGNzZORng2Nk5ZcTlsZ2tJMWJWd3V3elVmUDY1Ykliemt5cjN1V2VPc2J2SnZkV2o4NWswZHFYTEhYYmxjbEh6U2VwdWMyOUdiOXRYN1YzNVhIVHFJZWFTMGIxdjllNjAwUEtnTUJ2dzltSm9QdldnQnMwbHE4cVRMTlpsTTFxWVZ0c2M2cVoxUUhIRTQlMkJBNXBISHR2RWV1QVVvV1YxbEpZenFBalpCdmVJelpRNEclMkJwNkMxUHRpUERobjFCOGdwZSUyQlMlMkJBJTJGdnFvayUyQkd3a2VON2xtRFlMN1UxS3c3dDNHb2p0MGZqQ2JGWDlXRFRKeiUyQlpJS0tXdTlqOXJYaGJTYTFVNmRyJTJCenBwQ3JUZiUyRiUyQlZjS2lvSjg5cXNkTmJhSHMlMkJuWFVnbFdVRTJVdmZjQ1p4a1dXY2JCeVM0blpsMDYwU09VQ05IdUtkU3hQVldFb3JGbCUyRjA1WWNVdkpweUglMkZ3QSUzRCUzQyUyRmRpYWdyYW0lM0UlM0MlMkZteGZpbGUlM0UN2GSpAAAgAElEQVR4nO3dW6gkRx3H8d4Iq3gJEgIRZPGE9kGFGBGCYgxKkAxRwaCyBjTCsvEywrpKCELEGHFROlEMwtHgDeKtPUIiEoOIQZTxwQjx8pANxIwPycOaByXoKgiJfx92a7ampqq6urqna6r7+4GBs2emu6una/t3qi//LoqiEF68Ur4AoK2CnQdSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYowivxWKxuuG1qqrV76uqWv2+ruvB2qG3AX5j6H8Ahje68JrNZqvfz2YzwmvHjaH/ARjeqMKrLEspikKWy6WIyNrvVHjpo7GiKGSxWKzmY76n5tM0ndmO+XxuDdPYZdd1vfp9WZb9fXE7YAz9D8DwRhVeKjTquvb+TgVDWZarcFkul2ujJj14fNPZ2qECRh+JdV22es+17FyNof8BGN6owquqKinLUubz+Woko0Yt+mFDfZRjBo0KPBvbdK52KGbYtF22+rwKPfPfuRtD/wMwvNGF13w+XwXGbDZbCy9zZFSW5VoI6efI9BBqms7WDkV9NnbZ+iFI12HFnI2h/wEY3ujCSz8/pP+7rutVEOiH7mwhpM9juVwGT+cbecUue2wjLdMY+h+A4Y0uvNT5I3VBhB5eKggWi8XGSMg8vKjCRkS809naoabT2xW7bDMQZ7PZqC7aGEP/AzC80YWXiKyuMBTZDAb90Js6VKf4rgb0TWe2w3W1Yeyy9dHYGLaXbmzrA2AYowgv5Iv+ByAG4YWk6H8AYhBeSIr+ByAG4YWk6H8AYhBeSIr+ByAG4YWk6H8AYhBeSIr+ByAG4YWk6H8AYhR7e3vW2nm8eA3x2tvbS/1/AECGiqLgL1+kQ/8DEIPwQlL0PwAxCC8kRf8DEIPwQlL0PwAxCC8kRf8DEIPwQlL0PwAxCC8kRf8DECP78NKfUDyfz9ce4ojdl3v/A5BGY3jNZjNZLBayXC6tj59XTwS2vYag2qQ/Pbkt/cnHfbRbhSmaEV4AYjSGlwqHxWKx9kh7x8yy3GmXZSmLxUIWiwXhNTDCC0AMZ3i5RlNVVflmtrbTrut6bdq6rkVEViGhDvkVRSHL5XL183w+X5tv03z0kZO+fH2e5nqqZaoRmz6fkOldbdLnt1wund8VziG8AMTwjrzqul4FiTp82DCz1WfUjl/twKuqWhvF6UGoDj0ul8vVe2q6NvOZz+dro8OyLJ3v6cqylLquN0Zerul9bTK/B/gRXgBieMOrqqrVzrssy8aRhG+nXdf1Ruio+ZnBEjsf23uKGTg6W3i1mV5fblP7sY7wAhBjq4cNzQshYsMrdD56iKjDekrb8Gqa3tUmW/vhRngBiOEdebW5WOP8zFY7bXPkEjvyajOfoUZevjaZ7Ycf4QUghjO89Evj9cOHDTNb7bTNkUtZllHh1WY+ZoiY56xsl/qrz4Wc83KN6vQ2me2HH+EFIIYzvNperHF+Zmuf06+804Om7WHD0PmY4RVytaGavy28fNO72qS+LwIsDOEFIIb3sCGwbfQ/ADEILyRF/wMQg/BCUvQ/ADEILyRF/wMQg/BCUvQ/ADEaw6upqvyWGtXqSr02BXW5CnC3EF4AYjSGV9sblfuwzYAhvHYL4QUgRm/loWz3Q/nuk9KfA6ZXkTersofcq2WrjGGrNE/F991DeAGI4R15hd6orG7yFTlXjUNN46rKXtf1RvV3fd5m4DRVhreFl2saRl67hfACEMMbXiFV5W3nm2z1//TagGYlDEujrA+HdNUntIWXq+oG4bVbCC8AMTofNnRdLNFUlV1/EKX58EkVMKGV4QmvfBFeAGJ4R14hF2u4wqttVXfbYUNGXuNHeAGI4QyvNlXl9UDQnyrsqsqunxdTnws952U73Eh45YvwAhDDGV5tqsq7rggMrcpuBqNelT32akNXeFHxfbcQXgBiUGEDSdH/AMQgvJAU/Q9ADMILSdH/AMQgvJAU/Q9ADMILSdH/AMRoDK9tVZU3rwrsk3ljtfme7UrDNpXp0R++cwAxGsNrW1XltxVe+n1hIhcqeShcJr9bCC8AMTqXhxJxV4gXuVAmSr1UAV9b9Xc9yFz3d6np1DJVW/UK9GYgquXq95YdHByszSe0Mr25Tq4bpxGG8AIQwzvyCrlR2Vch3lbPUB/J6WGnv6fmY6sMr6ZTIWhSYeR63yw9ZYap/nNIZfqyLAmvDggvADG84RVSVb6pQrzrs+ZhQz3ofDUNQw43ukZ751d4LbzUfEKrdJjr22b9sYnwAhCjl8OGvgrx+qG3oiic4aWW2VRNvu25MvV5NVIivHYL4QUghnfkFXOxhn7Y0BxBbXvkZR7C1NukRl+E124hvADEcIZXaFV5X4V4cwSlnx8yzys1nfPyjdiMFVo7TKjaoD7fNbz0eYicO8dGeMUjvADEcIZXm6ryvgrx+nt6KNiu6NM1XW1oHm40/62/9PfUBR37+/vR4cXVhv0hvADE8B42RLP5fL5xng/h6H8AYhBeLalRmXkBCuLQ/wDEKFxXFfLiNdQLANoq2HkgJfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGI0Vt4mdXWh3hMSFEU3ueMpZ4fmhFeAGL0El7m04pFzj300fX05b4QXvkjvADECAovM5hMZVlKXddrv1NPQjb/bT4Gw/ZEZT1A1JOPzdGc/oTmg4MDKYpi9dmmJyGrddLbos9vuVw2tlctC93wHQKI0Tm81E7eF24i58JBjcTm87nMZjMRuRAGtvfqul79rOahB5sKKjUPFaBN4aWHbVVVq1DUg7OpvWZYIw7hBSCGM7zUoUDzZR4KDAkvtcO3TeMLGtuIyWj8WnipefjmabZFtcc2v5D2ohvCC0CMQUZeKght0zSNkqqqcl4E0ld4mfNr0150Q3gBiLH1c176Dt/3nu/8lL4c32FDNQ8zUNuGFyOv4RBeAGJs9WpD8wIL/RySGSa2oNHPR6l5tAkv9dnZbLYWiPp7VVWt3vOd83K1F90QXgBi9Hafl3mOzDzE13T1nu/iCtf5NnXF3/7+/kagmIcb9Xm62qJfrRjaXnRDeAGI0Vt4ATHofwBiEF5Iiv4HIAbhhaTofwBiEF5Iiv4HIAbhhaTofwBi9B5evvuo+tZ1WX22taoqqapKFovFWkmrLnztG/J73qYxrAOA4WUdXrtiuVyuBZYKsm0ay/c8hnUAMLxeKmzoz/JS1eEV1/1SIu6K8fpy9enMiu76Dlwv8KumMdvsml9TW5sq38/n840KI/r0qi3mY2JUwd+m5YZ8z7kawzoAGF4v4VUUF6qsmztV873QivG2yu+u6vH6zyoE9coZTfPTP9O28r1aR9NsNtt4NphtndVN2b7lhnzPuRrDOgAYnjO8QqvKu0YH6mdbZYvlcumtYeiq/O4qwKv/bKtv2DQ/2zJD6y+a66jYDh3q89RLWDUtt+l7ztkY1gHA8DqPvFwV2PX3zJeal6tivGvH3Ca8VLtthXZty4mtfO8LL9uh0NlsJnVdS13Xq8ONIeHl+55zNoZ1ADC8zuHVZuTlox827CO8mkZerumV0PBaLpfBIy/1e3VosK5rRl4jWAcAwwsKrybmORvXeRp99BBaMV59Vr3fFF6qHeY5L9/8XOsRUvlezdtkO+elz6vNckO+51yNYR0ADK+X8NKvlmu62lAfwfkqxtuuwAsJL/0KxtD5+T5jW64ZXqFXG+rrrQd36NWGvu85V2NYBwDD6yW8dkHKR5WkuM9rLMbS/wAMi/DqiV5hI/Q8HwgvAHFGE17IE/0PQAzCC0nR/wDEILyQFP0PQAzCC0nR/wDEILyQFP0PQAzCC0nR/wDEILx68OCDD6ZuQrbofwBiEF49uP7661M3IVv0PwAxCr0sES9eKV4A0BZ7jo4uv/xyOXHihBw9ejR1UwBgMgivDu666y45evSoPPfcc3LZZZfJH//4x9RNAoBJILwi/fvf/5ZLL71UHnnkEREROXXqlBw/fjxxqwBgGgivSLfffvtaWD3zzDPywhe+UJ544omErQKAaSC8Ivztb3+T5z//+fL444+v/f5Tn/qUfOITn0jUKgCYDsIrwic/+UlrSD311FPyvOc9T55++ukErQKA6SC8WvrLX/4ihw8fljNnzljf/9jHPiaf+cxnBm4VAEwL4dXSzTff7A2n06dPy0tf+lL5z3/+M2CrAGBaCK8W/vCHP8ill14qZ8+e9X7upptukjvvvHOgVgHA9BBeLbzvfe+Tu+66q/FzDz/8sBw5cmSAFgHANBFegX7zm9/I5ZdfHvz5G264Qb7+9a9vsUUAMF2EV6Drr79e7rnnnuDPP/TQQ/Ka17xmiy0CgOkivAL87Gc/k9e+9rWtp7v22mvl+9///hZaBADTRngFePOb3yw/+MEPWk/3k5/8RN7whjdsoUUAMG2EV4Mf/vCHcvXVV0dPf9VVV8lPf/rTHlsEACC8Glx55ZVyzTXXyE033STHjx+X+XwuJ0+elFtvvVVuu+02ueOOO+QLX/iCfOlLX5KvfvWrcs8998h3vvMd+d73vicHBwdy8uRJed3rXpd6NQBgVAgvjwcffFDuu+8++dGPfiTf/e535Vvf+pZ87Wtfk7vvvlvuvPNOOXXqlLztbW+Ta665Rm655RY5ceKEfOQjH5Fjx47J+9//fjl69KjccMMN8va3v11+9atfpV4dABgNwqujz33uc3L77benbgYATArh1dGnP/1pOXXqVOpmAMCkEF4d3XrrrZSCAoCBEV4dnTx5Ur7yla+kbgYATArh1dF8Ppf9/f3UzQCASSG8Ojp+/Lh885vfTN0MAJgUwqujD37wg3LvvfembgYATArh1dGNN94odV2nbgYATArh1dG73/1uue+++1I3AwAmhfDq6J3vfKc88MADqZsBAJNCeHV03XXXyS9+8YvUzQCASSG8OnrrW99K3UIAGBjh1dHVV18tv/3tb1M3AwAmhfDq6KqrrpLf//73qZsBAJNCeHV05ZVXyp/+9KfUzQCASSG8Onr1q18tp0+fTt0MAJgUwqujsizliSeeSN0MAJgUwqujI0eOyJNPPpm6GQAwKYRXR5dddpmcOXMmdTMAYFIIr44uueQS+fvf/566GQAwKYRXRy9+8YvlX//6V+pmAMCkEF4dHT58WP773/+mbgYATEpRFIXw4pXyBQBtFew8kBL9D0AMwgtJ0f8AxCC8kBT9D0AMwgtJ0f8AxCC8kBT9D0AMwgtJ0f8AxCC8kBT9D0AMwgtJ0f8AxCC8zrPdPFtVlYiILJdLKYpC5vN54laOD/0PQAzC67yiKGQ2m63+PZ/PpSgKWSwWCVs1fvQ/ADEIr/PM8FosFqvRlz7yUr83R2iu34uIVFW19nsC8QL6H4AYhNd5oeFlTmMLI/33aj7L5VJEzj15WV/O1NH/AMQgvM7zHTa0hZcaTZmB1vT7oiikLMvtrkxG6H8AYhBe59ku2KjrWkQ2L9hQ/9ZHVK7fq5GXCqyyLAkvDf0PQAzC6zxz5KUzw0uNytQ5LcX2e/U7/bAh4XUB/Q9ADMLrvNDw0i/M0Ll+rw4X6ue/CK8L6H8AYhBe54WGl3nloJrO9Xs1b/13fOcX8F0AiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiFHs7e1Z6/rx4jXEa29vL/X/AQAZKoqCv3yRDv0PQAzCC0nR//KUesTOi1dRFOw8kA79L09sN6REeCE5+l+e2G5IifBCcvS/PLHdkBLhheTof3liuyElwgvJ0f/yxHZDSoQXkqP/5YnthpR2OrwWi4UURSHL5bL3eZuXXJrvLRYLZ3vQL77TPLHdkNIkw6ssS6mqavXvqqrW/iO6wgvbsav9D35sN6QUHV4qWObz+Wr0ou/wl8uldWSjplOBocJJ/Tyfz73LUEGm3pvNZqv5u5apVnS5XK4+YwZiURRS17WUZbma/uDgYG0Z+siraf3rul5bp7IsW3/HU8FOsD9VVclsNgv+fJvPmqay3fR9gu+ITR/U/knfD8Kuc3ipEcx8Pl/7j6CPbvT3zOlUMCyXy42Rlh4QIuf+Y6oQUO/Vdb22MurfZnt0apn6tOaXslgsNpZhCy/X+uthVpYl4eXh639lWTIKDqT+IAwJJBVy+h9mbU0lvBTz/zzS6hxeKmjUqEV/T9FHO+Z0vp2++VnffBaLxVpAuEZYij4ysoWgHl5mmDatv/6z7d9Y5+p/thEt7FQQ+f5oM3U9h0t4nWPbj9iOzJh/wNve00de+s/m50TW92FNf5CPkTe89C/NHMY27by3EV76+7bl24b1IefL1LzUcgmvYdn6X1mWq35DeIULDa/lcrn6jmP7JuEla4Gi9kF1Xa8+a/5BX1WV9z1beKnPqZH1YrHYOLxIeLWw6yMvl7qurf+5y7JcbXjCa1i+/kd4tdNm5NXV1MNLhZXeP8uylNlsZg26kPdcIy9zeeay9eCciq2El8jmOS8z2NqEl5qP7ZyXOSpTG88c/ZkrrW9k9Vk1r67hZa7HbDYjvDwIr/4QXttj2x/Zwkudp7UFVNN7IeFV1/XGsgmvFpp23k1XG7YJL/3wpWs+tmWawWb+2/VZNQTf39+PDi+uNgxHePWH8NqeXRl51XXNyEt2/D6vsZjP51z66kF49Yfw2p6Yc17mH7v6OS/be6HhxTkvwmsr9M6pn3CFHeHVH1t4mYfE+7rpf2r7DVt4mUd7tnW1ocjm6Eq/V1bNa0r/VwgvJEf/yxPbzc13T1gf94u5zr9to5TeriK8kBz9L71//vOfradhu7ltO7xE1m9l6mN+uSG8kBz9L627775bXvKSl8gdd9zRKsTYbkipU3ht8xjrkBXc1clO26tvfayXfk5NtT3nC0JCv4+HH35Yfv7zn2+5NdP02GOPyYc+9KFWIUZ4IaWdDa9Uclgn84bsqqpGHV7PPPOMfPzjH5dLLrlEXvnKV8oHPvAB+etf/zpQ66ZFhdjFF1/cGGKEF1KKDi+90rJerd0csbiqyIe+p/8cU8FdH1WZO3jbCU5z3ru4XlMKr/39fbn00kvlxIkT8o9//ENERD772c/KRRddJJ///OeHauLkhIQY4YWUeht5NVWR91WG972n/9y2grtZCiqkQrkZILu4XlMIr4ceekje9KY3yXXXXSe/+93vNt5//PHH5cYbb5RXvepV8uMf/3iIZk7SY489JjfffLM1xAgvpNRLeLWpZdj2PZH4OoIxNQX1wNjV9RpzeD311FNy7NgxecUrXiH33ntv47QPPPCAvP71r5d3vetd8uc//1meffbZ6Ndzzz03+Ot///tfkldbthAjvJCSN7x8VeXVxHqRSMW3I9enC3lPpFsRXPNGvpAvxCy5smvrNdbw+vKXvywvetGL5I1vfKOcPXu21Tze8Y53SFEUcujQIbnooot6fx06dCi7l+sipK6vI0eOyKFDh+QFL3jB1pbBi1fTa29vT4qiGOfIy9T2sOGurtdYw0tE5MyZM/LhD39YXv7yl8u3v/3txmnvv/9+ueKKK+Q973mPPProo9ts5qSdPn2akRd2yvkQ6/+cl+1ZNSL28z++9/Sf21ZwN3fqfZzz2oX1GnN4Kb/+9a/lLW95i1x77bXWbfboo4/Ke9/7Xrniiivk/vvvH6KZk2QLLYXwQkqdwkt/hHjTVXn6VX9t3tN/jqngrl8VaXsCaterDVOs1xTCS/nGN74hL3vZy+SjH/2oPP300/Lss8/KbbfdJocPH5YvfvGLA7ZyWnyhpRBeSKlTeIWwnf8JeS/WUBXcU67XlMJLROTs2bNyyy23yMUXXyx7e3ty7NgxefLJJwdq3bSEhJZCeCGl7MMrVQX3lOs11QobjzzyiPzyl7/ccmum6fTp01TYQFa2Hl5AE/pfWtQ2RI4ILyRH/0uPqvLIDeGF5Oh/eWK7IaWdDa/Q6uvm1YVKTHUNxbzisEmbSvFt5z0Fu9j/0GxXt9s2LphS9PPQ5vq7/m8P+YSMKdnZ8AqlOobZaWazWfTD2bYZMITXppz735Tt6nbbVnjp93yKXKjeo/B/e1jR4RVSOV1dCSfivl/KaMjGPVCKb3qzU6nPmlfsmW23VXM3q+Xr91oVRSF1Xa+12VU1I2TeOGdXd4Lw67LfcD1Joe1THPT7IH3LiN03qWn1Kjnm++ooj5r+4OBgbRl9PEkCmzqHl69yurmjV/82q6eXZbl6T92zZIaXq7q7msb2b9cyQ6u5mx1WX0fbd9Fm3riA8MpT1z96Rez7At9THNR76v+3rRxbn/smnblPsX0ferk89bnY/YP+JAls6hxeIbX9zJtqXbX+9PdtG9w2va0tZVlu/NXia5+vLJPOdR4ttuQTziG88tTHfsOs2en6f25O59vp97Vvsmk6EmMrzL2NOq3oUFU+pHK6voH0Da4P5V0nM/Xf+6q7K2r0pv/V07TMkIDRv4OisN8ETXh1Q3jlqY/9hrnz3kZ46e+32Tc1Mc+3E17DSjLyss3H9/umkZfIhc6syiiFLLMpYMzlMvLaDsIrT2McebmYD7ZV9FMehNewOoeXr3K6+ZeP2sjmX1h6x1PzaTrnZW5U/eIM/S8h2zJDA8Zsp+sYNOHVDeGVp77DS6T5KQ5twquPfZO5vvphQvVZNa+u4WWuh/4kCWzqHF6+yul6BzGv6PG9p8/D9xnbytiOX5vLbOpA+lVC+lVE5nTmX3Nt541zCK88bSO8mq42bBNe+uFL13xsyzSDzfy367Pq//b+/n50eHG1YbjeDhsCsQivPLHdtmuoJ2TkivBCcuwE88R265d+6sM8ioRN0eEF9IX+lye2G1IivJAc/S9PbDekRHghOfpfnthuSGmnwqvtVXiue8T6mDeGsyv9D+3s+nZrs3+ImYZ9SlpZh9euzBvd7Er/QztT327sU9LqHF62ezJ892Tp94Xpl4GaFddDKj3b7p+gmnt+pr4TzFXIdvP9n9NvJnbdiznUkynYp+SnU3jZqsGr39uqJpslVlQlDb0xeudwVV5WbB2Nau75Ibzy1Fd47cKTKcw26T+zT9lNne/z0tkK7ep1xZpqdanOEFLL0GwDZZnyRXjlybXdXMVuzYfDdqlB2PeTKcw22drHPmW3eMMrpDq0qakCvH6IwJyn6gwhVeTNNtDR8kV45amvkdeuPJnCnIZ9ym7rdeRl+73v+Tiuw4b8lTQthFee+g6v1E+mcE3DPmU3dTrnpW88/Xi1qzK0fl5MfS70nBfV3MeL8MpT1+22a0+m0JdrawP7lN3SKbxcV+/4rurRr9Ixj4HrFddjrwyimnt+CK889RVeu/BkCvYp+ekUXkAf6H956iu8uNQcMQgvJEf/yxPhhZQILyRH/8sT2w0pEV5Ijv6XJ7YbUiK8kBz9L09sN6REeCE5+l+ednG7tb36jyry+SK8kBz9L0+7uN22GTCE124hvJAc/S9PXSts6J9pc68oT6aAiBZevHilfCE/IdutKPw7e55MgVjn9x3sPJAO/S9Pru3Wtqq8jidTIBThheTof3nqOvLiyRTogvBCcvS/PG0rvHgyBUIQXkiO/penPrabHgg8mQJtEF5Ijv6Xpz62G0+mQCzCC8nR//LEdkNKhBeSo//lie2GlAgvJEf/yxPbDSkRXkiO/pcnthtSIryQHP0vT2w3pER4ITn6X57YbkiJ8EJy9L88sd2QUq/hpe6TcJVu2Ya+77vgPo7hsRPME9sNKfUWXqqWmF5+ZTabbdxE2DfCK3/sBPPEdkNKweHlq1Emsv5oA0Xd4W7+27zj3fccHZGw5/ccHBxIURRrd8H77pbXVn7VltDnAal5q2WhG77DPLHdkFIv4eUrnKlzPV/H9xyd0Of3qHmoAG0KL9dzhHy10cz2mmGNOOwE88R2Q0re8Ap9Lk9IePmqPPuCpu3ze9Q8fPN0PUfINr+Q9qIbdoJ5YrshpcFGXr7n6zSNkkKe39M1vMz5tWkvumEnmCfbH7a8eA382u45r6ano4acn9KX4ztsqOZhBmrb8GLkNZyQ/ofdw3ZDSsHh1cR1taF5gYXt+Tq+8Ap9fo8rvNRnZ7OZ89k8+nOE9N+HthfdsBPME9sNKfUWXiKb58jMQ3xNV+/5Lq5Q07ie37O/v78RKObhRn2erraEPA+I8OoXO8E8sd2QUq/hBcSg/+WJ7YaUCC8kR//LE9sNKRFeSI7+lye2G1IivJAc/S9PbDekRHghOfpfnthuSGmQ8LLdO+X62eR7z8ecznaDW+zVgvrl9E2qqpKqqmSxWKyVufK1ta9l54KdYJ7Ybkhp0PDShe6w+wwvvQ36vV1thQbIcrlcCywVZF0QXtgVbDek1EuFDd99WrbK7/o05s/6v9X9Wfp75r1kelUP33TmTt+2/JCK9GbleV975vP5RtURs01m9RFfhX1z2WPBTjBPbDektPXwUtO2OWyoh4AeQmbJJ3P05JpOb4NSVZWzSnzo+oS0xzSbzbyjUF+Ffdt6jAE7wTyx3ZCSN7xCq8r3GV6uUZirfa56hW3OebUp6quvT1N7bIclbYcObevfdtk5YyeYp762G09gR4ydG3m5qrkr+uG0oijWHp3im8512NBWaLdNgLja4wsv8z8n4UV45aiP7cYT2BGrl/DyVXBX0/Yx8jLfazvyMjuXqoTfpiK9a33Mzy2XS0ZegQivPHXdb4jwBHbECw4vn9AK7qGBZVZzd43KyrLcKOBrm05vg+IbeYWuT1N7bN9r6Dkvwgu7ru8/el14AjtsegkvEX8Fd7Pyu4g/vPS/XMwQ0v/KcYWObTr9LyH1cnXY0PVRhwZd7elytaHrP4++7LEgvPLk2qi4k+IAAAJzSURBVG48gZ3nAA6ht/DCpm3c5zVG9L88DTHy4gnscCG8tkyvsBF7U/TY0f/y1DW8RHgCO+IRXkiO/penPrYbT2BHLMILydH/8tTXduMJ7IhBeCE5+l+e2G5IaSvh5Ts23Leuy+qzrSEV5NvytW/I73mbxrAOU8R2Q0rZh9euSHFl4Vi+5zGswxSx3ZBScHiF3o+hjhPbrrgxjwGLuO+CNxq4cR+UeUe8/p5+zNpss2t+TW1tups/9J4us/SNuomxabkh33OuxrAOU8R2Q0q9hVdR+Cu66++F3gVvu5vddUe8/rMKQbPKu29++mfa3s2v1tFkq6ZhW2d1otm33JDvOVdjWIcp0v/Y4sUr0aufqvK2f5v3N+n3Pfjuy3Ddze66qVD/2XbPRtP8bMsMvafEdQ+X7dChPk/9stym5TZ9zzkbwzoAGJ4zvIwPOUdevorurgDUn4GlfqcfNnTtmNuEl2q37eZB23Ji7+b3hZftUOhsNpO6rqWu69XhxpDwaqqcn6sxrAOA4XUOrzYjLx/9sGEf4dU08nJNr4SGV5sK8ur36tCgXtXet1xb+xh5AZiyoPBq0lTRXZ2n0UcPoXfBq8+q95vCS7XD9mRj1/xc6xFyN7+at8l2zkufV5vlhnzPuRrDOgAYXi/h5avobl5Jp4/gfHfB267ACwkv/QrG0Pn5PmNbrhleoVcb6uutB3fo1Ya+7zlXY1gHAMPrJbx2QcryK1SQjzeW/gdgWIRXT6ggH2cs/Q/AsEYTXsgT/Q9ADMILSdH/AMQgvJAU/Q9ADMILSdH/AMQobBUwePEa8gUAbbHnAABkh/ACAGSH8AIAZOf/uJjejJki6DQAAAAASUVORK5CYII=">
</div>
```
```
### 3.2. Usuario
Con base en la información de una persona, construye una clase de `Usuario` que herede de la clase `Persona`. Puedes guiarte del diagrama UML.
<div align="center">

</div>
```
```
### 3.3. Empleado - Trabajo - Empresa
Construye la relación de clases entre los trabajadores y la empresa para la que trabajan. Puedes guiarte del diagrama UML.
<div align="center">
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAu8AAAG3CAYAAAAEtrtWAAAM+nRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjhUMDElM0EzOSUzQTMyLjEyOFolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJBbmJmdmM3WGdQVjZ1TmNLdHhrNCUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyTVZJdFVNT3o5YnQ3cmJmZEc4aVYlMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1Z4ZGI2TTRGUDAxa2JvcnRjSkF2aDdidE4yWjBjeXFtbzVtZDU5R0xyaUpad0JuRFdtYiUyRnZxOUJqc0JiRXFTNG5SVklWVlZmREVHZkklMkJQanc5T0J0NHNmdnFENCUyQlhpQ3d0Sk5IQ2Q4R25nWFE1Y0YlMkZrSSUyRm92QXVnajRybE1FNXB5R3NzNDJjRXVmaVF5cWFpc2FrclJTTVdNc3l1aXlHZ3hZa3BBZ3E4UXc1JTJCeXhXdTJlUmRXckx2R2NhSUhiQUVkNjlDOGFaZ3NaUmFQcDlzQUhRdWNMZGVuUjBDJTJCT3hGalZsbyUyQlNMbkRJSGtzaDcycmd6VGhqV2ZFcGZwcVJTSFNlNnBqaXZPdUdvNXM3NHlUSmRqbmg1ODN6MGd0JTJGckQ3OXk3MDVIVjUlMkYlMkYlMkZRMVBaV3RQT0JvSlolMkY0aHZDVUpWamVjN1pXUFpFJTJCMGpqQ0NaUXU3bG1TM2NvakNNbzRvdk1FUGdkd0o0UkQ0SUh3akVJbm5zc0RHVnRDTkZqUUtQeU0xMndsN2pmTmNQQkxsUzRXak5ObmFCWkhzazA0ekRPSkIzZFVxWEVyem9Td0ExRk9VcWh6b3pvQjFVSmY4Rk9sNG1lY1pqSVFzQ2pDeTVUZWJSNGp4bnhPa3d1V1pTeVdsV1R2d09PUXA4WnVSNXRrd2lnZ0xDWVpYME1WZWNKSXBuJTJCdGdDM0xqeVU0VFdSc1VVYlNVQWF4aFBCODAlMkZRMnlmQkI1bm1Qbkx0YXpnZXVCNEdFeFhjY0l1ZTNHYWZKZkFEZDducFA1M2szaU9Na3hORHMlMkJkMDZJOXF4WkJYRHBWY1JodlNmVSUyQmdaN2VRWTA4amNkZzFvME5WWkRnRE9mcEVaaXhnMGVabXdBbmswaW1vaEJiNkkzR2VOMEV1WE9JRExmczdyWFByYnlGZlo0U0xFNE56N0tCJTJCaUN4cUdKQkd3WVJuT2NJRVJBWWdsZzRmTEV6SzhnRDlJMGN3NUd3NkdjT016S0tOdEdmNUVkWjdOV0FMUGdta09Gd0lBZkNRQ2hMdGhxM25RNm9DVENCTmR1d3ZBUnI0bGZIa0dmTlZ5SE5FOGQwV09GYkdpZ3hJY1E2b2lzczNvTjVId3kxT2taZDNUcyUyQjRaTWh6aE94TGRzSlJtbEluMmVWRzNsdm0zU3U1MFIlMkZhWVdNcXRiOGd0UEt4VDlNTXF5Q0J6NWJFdGp1RWdXMEVTbnpHWEZIRUNiUEViY01FRGcydjFUR0FQTEpQZHdESjBMYUZscUtIbEtsNUdCSFJRTHk4NmtoZkRxcnp3a0U0UXJtT1NGNTR0ZVRIU2tuNmF6JTJGJTJCUWVBYUQlMkZodkhkJTJGZ25LNDE3Y2ZqanBZclg1Y09wRWc4enhtRVE0WXclMkJzRjVIZE1JZXczMTFoQkZKUTFzNll0enJDR3ZKbmV4SUU3WjB4T1JWT2lLZlJSaklDUExBZmpOeVNzOEdYUU5HMXhKR3dQaTJ0TVMwblExSUVwNEx6d2RLZHhFVGslMkZnRmhPUXNqMFpGOFpxSzYlMkJiZHZzaGlwUWZJRTgzJTJCRm1IbzVxTDBqNndrUGw4JTJCbFF0clZVamd1VW9uaWFJNEM1MDUza1FGdHFmbXBjcTVONFJUNkJ5aGFVcHlnSVNhSlZVVEElMkZEUWJNVUQwcTY4UU9YTXlVdEo5ODFKTHl0R1pXQndFb25KdDNwdnBqVEw1bTVZUHBFcm9ZSnFSc2htS2FPYUtKNUpubFgyc1dvTmVXNUxROFZEYXczbHFOczg0JTJCRkFWQXFycW1wQkFQYWVXV2VpZGpTc3BIaHNXUFZPVGFLMkRvWE8yQWZwUm1tYmFTYUVhNENYTk0lMkZQZWNoV290Y0VQR0FVQlpvJTJGbHJLQU1yanJjJTJCVyUyRkZ1cWdYbTNKV1NobVIxRVRpa0IzZVAyWnBrSXdSd0lUTiUyRkk0bE9mRmROQlBpWWRQaWNneHclMkZRRnA4MEV5N0V0aFl4TVZtNHZrVHRLcjlHcU4lMkJYWGxraEdKaWUxVFNYVFpFMEM0SkprVm5EUFNRZ0E0TXpvdDEza3ZUUm5sJTJGU0J3akFNV2RwV215UVBWT2h2V0k4ckt5YzlhYXdka2pUZ2RDbnV4bHlwSnllTDZEVzRmMGIwMmxMc1NEZUxiMWZCaXFjd0lmWktxU09sNU8lMkZnJTJGNDFNVW1sc1RTcnBycTlaQ3RVc1ByR2tGN0xtQTA0WFglMkZCU0tack4yaDd1eHRuWXgwb2Q5U0xuOVR6U3NBWjh3UVkwQWNxM0puSjBRN2tYT1oybDE3QzBNcWJYbXNZeHVieXRUbUFZU2d2d1JWbnpKNHRwZ3B2clBFRFBZYlZpT2xFZjl0TSUyQmNUcXZVbFAlMkZZdk1ZcURXNGtTYlVUcTFwRzkzQTNreFV2YlRwUk5yVWZUNlR0RUZHYVdQdDFTYlNUZWpUalFmMEk0QnVNTDZhRkhWQTlHSk96YTgzSVVQeW1PR3RaczhpcjJDUnliN1N4Z2dvYTI4NFhkMUs3cVZOWiUyQmsxdk9NMHB0ZVd0SEZObm5HcnRQbTVTak55bTdNRmprNXkybGozbXVJWWNESDRKY2FGamkxTm9RWiUyRmVUUDJ4czd2UlVVbm9tSkRDc29sY3czckg5Y2tLa2EyUklXcnU3eGJVZEVrSjVhY0JKUjlGd3NZbzZTZ1NicUtUWGJLeCUyRnhBc1dUNUNBQ1pFOTdiS0Yyd1N6RjY5N0pSVERpenB6Vk1XM2Q3cmRGUmVrMzdxVXpwdGFZMWROTjFSNjJCJTJCVTNPSlNjbFNtbTBRT1FaMTR6SHF3aWZTSlk1JTJGUDFQenpJZHc5RGdpNWhnT0xhbFlid2RWalJIM3FaVlhMJTJCVFhWVk5ycFNGN1ZMMUZ6dHUlMkZYM05ydHVsUnVOYVF5T3YycERsN1ZMJTJCWVlqSUZhY0JKaElYRzFGYUtodHdnc29vMldDbWJUdmZvTFNacjdTM3IyRXJYNGY0a2hxaGRkZGU0VU1kQllkdURUNyUyQm9UajB2WmFHYk9OUVg0MmpsNERJbHZrc1VBYlZscVRjT2tKRGltT1doTjhXTktrdGw1QmZnMnFPSUVDTFdyUEJQTEZnYzViZzZHb2JiWnZiN3VTNmFJUDBVJTJCZk1jU1lWdUklMkJkRnNCRG9iNEZ0YnluRlpYSEFXb1pCYlkzdEtKZGQ3UTJMUElsQ3FHYnhoTlUzVjNvZGpOU2ZLY0c4RU0zdUE1SExRMDFqSlI4TTJDcG1oUXN6Vk5NN1lhOTBmamwlMkIycDR3S2I2U0gydnpGd2ZQaFIzWER0YjNUNjd2MCUyQkpIU3JRalJaMGR2YTd4Z1pTanBZb1lGOWwydWpCYkpaSGp3dWFrVnRRcE9LYWp4d3ZjJTJCVzVTa0lTRGpwelBWQVRpWmNtRE44Z0UxMWJub2V2ZXg3R0RCeURqNUVWUG5ZcVZPeE12Y09wR0owNU1PRlhOSWszZm1zNjlsOHBWYlo4N0xuRGNRV2Q2a3ZHcjFYUWRjRnhLQiUyQlBwb2Z4c1Q1REdCJTJGekNIeW5XeiUyRnZtJTJCJTJGOEd0OTUzbHZ6blc3UHZEZnhXU3pjdDR3M251N05lRjN5VThQbTJDM3RUTVpLbnJ5U1o3ejZGNXZxS05xVloxeTFSRkh2SnRDJTJCUEhNRUt0RTM0NzF2S3FuJTJGZm8lMkZoQnppT3l5VDZmcm0zWVJJN3NpbG5rcXByTSUyRlpmcDUyY2luYUN3TmglMkZhJTJGR2tmc21zVFR3VnJ3WWJzUWw5NWJ2VEttZE11dUcwJTJCbmNzNjF1cWRuWjlKaTBOJTJGUjg0VGQlMkZMOTc0NXJiNlFSNGEzVngyUkdoUzN2M0JYSkd6N080SGUxWDglM0QlM0MlMkZkaWFncmFtJTNFJTNDJTJGbXhmaWxlJTNFzMKrkwAAIABJREFUeJztnX3sZUlZ50/PMG/OdA9vglky2tnbEbPpQdRIVpMNO9lsbgf+EKM4ZsT1ZWcWrzaYGA2zykJrG4ebbEiP2Oyyxn/Q9aYTxRdaDaNG3MuqMYpEYRharsHRiECcGXt2gwsNz/7RPr9+fvWrqlPn5Z5Tdc7nk5z0/fU5p+qpuvXU+Z7n1kslAAAAAABQBNXYBgAAAMC4VFXFwTHLo0TKtBoAAAB6o1QRA9CFUtt9mVYDAADMmMViIdvttrf0ShUxAF0otd2XaTUAAMBM0Z/7Ee8A3Si13ZdpNQAAwAxZLBay2+0Q7wA9UGq7L9NqAACAGYN4B+hOqe2+TKsBAABmDOIdoDultvsyrQYAAJgxiHeA7pTa7su0GgAAYMYg3gG6U2q7L9NqAACAGYN4H4b1eu3d2Ge5XI5tGvRAqe2+TKsBAAAgyPvf/375oz/6o+TrSxUx+0bFe58vSpAPpbb7Mq0GAACAI3zyk5+U//Sf/pO85CUvkRe84AXyxje+UZ5++una+0oVMfsmJt4Xi4UsFgtZLpcHEfnNZuONzus1q9XqyDr9ek7/1f9fLBaH0rXYXwFWq1XwHL8QxCm13ZdpNQAAABziv/7X/yp33nmn/MiP/Ih89rOflaeeekre+MY3ygte8AL5mZ/5mei9pYqYfVMn3vWcrr2v9ejep8JcRbj+7TvnntcXAk1LXwBC+aiYd++Do5Ta7su0GgAAAERE5L3vfa987dd+rXzTN32TfOhDHzpy/o/+6I9kuVzKN37jN8pv//Zve9MoVcTsm9CY981mcxB5F5ED8e4KZ59Yt+e32+3Bud1ud3Dejajbv22k395jcW0FP6W2+zKtBgAAmDlXrlyRb//2b5ev+qqvkkuXLtVe/+53v1tOnjwp3/3d3y1PPvnkoXOliph9kzJsRqS9eN9sNkfO2Si+b3jMdrs9ck6xUXzXBjhKqe2+TKsBACbMlStX5MKFC3L27Fk5c+aMnDp1yvswH+M4duwYx7FjctNNN416HDt2TKqqkn//7/99o7Z17do1+Tf/5t/IrbfeKo888sjB/5cqYvbNvsW7jbxbfGPZfejQndVqVWsDHKXUdl+m1QAAE+X8+fNSVZWcPXtWLly4IJcvX5YnnnhCrl27trc8v/jFLxZ1fOELXyjquHbt2l6Oxx9/XL71W79V7r33XnnPe95T+z3/4i/+opw6dUpe97rXyV/91V8dOleqiNk3fYv39Xp96G/3s2L/TyPtmpYvX03XTlL1jaWHw5Ta7su0GgBgYrzvfe+T06dPy/333y9XrlwZ2xwoiF/5lV+Re++9V77lW75FPvKRjxw5/6d/+qfy6le/Wr7+679efuu3fsubRqkiZt+ExryrgG4q3mOrzbjY1WZUnCvWFruijLXXfWGAo5Ta7su0GgBgYpw+fVoefvjhsc2Agnnb294mt912m/zoj/6oXLt2TZ599ln5oR/6Ibn77rvlwoUL0XtLFTGlEBLoMC6lfidlWg0AMCHOnz8v999//9hmwAT4m7/5G/me7/ke+Yqv+Ap58YtfLKvVSj7zmc/U3leqiCkFxHuelPqdlGk1AMBEuHLlilRVxVAZ6JXf+Z3fkQ984APJ15cqYgC6UGq7L9NqAICJoKvKAIxJqSIGoAultvsyrQYAmAi6qgzAmJQqYgC6UGq7L9NqAICJcObMGbl8+fLYZsDMKVXEAHSh1HZfptUAABPh1KlT8sQTT4xtBsycUkUMQBdKbfdlWg0AMBGqqtrrBkwAKZQqYgC6UGq7L9NqgImwXq8PbbBRR5NroQxKfXjAtDh58qR3MyIOjikfJ0+eHNv1WsFTA2AkdCe8FEGuIl/XCvZt1Q1lUlV0wzA+tEOYI6W2+zKtBigcFeKr1So5mr7dbovtaCAM3ynkAO0Q5kip7b5MqwEmQqp43+12slgsDv6F6VDqwwOmBe0Q5kip7T7Z6rHHJXFw9H3kQJPIO0yTXNoizBvaIcyRUtt9I/EOMBVyac+Id8ilLcK8oR3CHCm13SPeYZbk0p4R75BLW4R5QzuEOVJqu0e8wyxp256vXr3aqx2I93nz5JNPypd92ZeNbQYAz3iYJaW2e8Q7zJKm7fnq1aty7tw5OX78uFy4cKE3O3zivaoq2e12Rz7D9Hjve9/LyxtkAc94mCOltnvEO8yS1PZsRftDDz0kjz/++J4tgzlx/vx5edOb3jS2GQC1feJyuZTtdjvqile6XO4+AhqxBQ2qyr+3Bsv3lk+p3x/iHWZJXXtW0X7ixAl58MEHEe3QO08//bR81Vd9lfzyL//y2KYA1PaJKti32+1ovxbtS7wvFgtZr9cHf+sGekpIvEP5lKptEe8wS0LtGdEOQ/Ga17yGqDtkQ6hPDC21a8WuyA1hvVqtDq6xgne323kj23qfCmYV5/p5tVpF81Ahr+d0F+pYnlqu3W53cI37QlBVlWw2G1ksFgf3X7p06VAeNvJeV/7NZnOoTOzXkQelalvEO8wStz0j2mEoHnvsMfnmb/5mec1rXjO2KQAHxJ7xm83mQETr8BkXK8JFjs7nsdFte869T4Xxbrc7Emm3AlnkeoTc/iKggtuWSf+OLQ6gedp73brZbrdH8vCJ91D5rZhfLBaI90woVdsi3luQEoWAvNH2jGiHffGFL3xBnnzySfnDP/xD+aVf+iV59NFH5Ru/8RvlZS97mfy3//bfxjYP4BCxZ/x6vT54xulOzy6u0NaotT2n2Gi3e19M9LrXxtLZbreHBHIowq7YyLjvJcCKd/dloq789rPvbxiPUrUt4r0F+rOZoj+TMSauHKqqOpiIes899wR/Gubg6HK85CUvkVe84hXyzd/8zXL27Fl5z3veM3bTB/BSVf0MmwmJV5t+X+Ldnvfl77M7Zby8pqX5hvJAvJdPqN3nDuK9BVV1WLy7P5fZTsM6qDsmT9OwY/18nYs9p9EA7fzs+Dprk5smLxaH0fbsRt4/+tGPjmwZAMDwxJ7xKZNVc4+8h9hsNt4yLRaLg+ct4n26lKptEe8tiIl3V8gvFosjY/t8P+Xp9W7a9m99KdhsNgf3aVoq1n0djLUBruO2Z0Q8AMyZ0DPeLg1ph8+4xMSryNEx766wbyLeNR3fmHc3Kq8C3I3+u2W3w2T0WrvfRhfx7pZjuVwi3jOhVG2LeG+BK7DtsBkV0erA9m93so3IDYd3/1/kRgdio+YqxG3kPXStjb7TURwm1J6tiH/ooYcQ8QAwC0J9YspkVZF68Vq32kwT8W5/cQ6l48vTFfbu36Fr9dfyixcvthbvrDaTJ6VqW8R7C3zj6OyM9tA4OzdqoGjHoIeNKvjEu04Y8on3zWZzJMLPzPaj1LVnRDwAzAme8cOxWq2OBOtgHEpt94j3FriRd4sbebeExLti38x3u12ryLtGSdxhM4j3w6S2Z3eHVUQ8AEwRnvH7w/7Czi/heVFqu0e8tyAm3n1r1rpj8qx4t6Jb5Ebk3peXb8y7T7z7xr/TWRymaXu2Iv7ChQt7sgoAYBx4xsMcKbXdI95bEBPvIkeXqFJCkffYyjDumD3fajM2Tzs7Xg+74xxcp219XL16tWdLAADGh2cEzJFS2z3iHWYJ7RkA4Ab0iTBHSm33iHeYJbRnAIAb5NAnuuvBu79Ep9zT9bqUNGJz2vokpfz2un3YMHVKrS/EO8yS3Nuzb8UiDg6O/R1zJ8c6qCrEexPxDs3Jsd2ngHiHWZJ7e87dPoApgb91qwPfS5A790vnY9m5X3rOt276YrE4dL4uPWuLm15VVUcWg0hdd963U6xdEtpneyx9F7tUtF0+MrX89rpLly4dyYsX1Dil1gviHWZJ7u05d/sApgT+1r4OFovFgZBcr9eyWq0OhKvdrNAngEPnrE3b7TYpvVCZQiu5uTu+ujuh14n3FNtt3r5FLjabzaH/XywWhyLoKeW317k2+L4bOEypvo94h1mSe3vO3T6AKYG/tasDn3D2DSnxCWC9zgrT1DHvvvTqbPPthmptdm2Iifc627fbrXd3Wbdu3F1YXVLKb6+LlVHtgMOU6vuId5glubfn3O0DmBL4W7gO7BARN3Ibi3q7u42HBLDm7YscW/Fal56LDjNRVEDHzjUV7yHb3SEu7hAbix0+5NZvSvlDNjB5NY1S6wjxDrMk9/acu30AUwJ/6y/y7vv/rpH3lPTqbEiNvLtR8j4i76mEhs3Eyh+6DvGeRql1lGz1yZMnvW+SHBwlHidPntynX3WmqsrsUABKBH9rXwdVdSM6rGOx3cj2YrEI7jSeMuY9JT0f7rh2Nw17TtNTQa5liu2Snjrm3bVfccehh8R7rPz2utgvF+44ebhOqb5P5B1mSe7tOXf7AKYE/ta+DkKrqthVUGzEWj/bVVYUV3zqNRrJjqVny+FGxqsqfbUZkaNDWWKrzYRsd9MPjTe35XJ3X08pv73u4sWLyWWE65RaL4h3mCW5t+fc7QOYEvjbcHUQWysdYGhK9X3EO8yS3Ntz7vaViG/4lBvpaov+rK0/k7dlt9t5l5SD/YK/Id5hnpTq+4h3mCW5t+fc7SsR/ale0Z+++9iZsC/x7toIw4C/UQcwT0pt94h3mCW5t+fc7SsRVxjbyWfuOFx7nY3UW3HujovV8+6kNlfY210f7cuDHdPqTkbz5Q/9gb9RBzBPSm33iHeYJbm359ztK5EU8e6uxmDvsSJcJ4LpShEq+uvEu3ufin4dQuB7cfDlD/2Cv1EHME9KbfeId5glubfn3O0rkdiwGXfbc5EbYtkOq1ksFrJcLo+cs8I6Jt59aYZsjOUP/YK/1dfBcrmU7XYru91ukCUH97FOuW/ei7sxUo7YX+s0UFCC3SVQqu8j3mGW5N6ec7evRHwPbncoi53AqsNiXPGs60O3Ee++NF0bVZzH8od+wd/q68Aul1jqC6TP93Rd+pxxN31y14eH9pTq+72J96Hfyn3scxa7+9B3z/kexuxwli+5fy+521ciscmgPvHeNvKuQ2OIvJcD/hauA99Lr+sril273RWXdWuOu+fq1k1Pzdcdlub63mq1OihLKA93TkzbsvrWibf2qM/7gguI9/1Qqu/3Jt5zeCvfl3i3O7GJ3IiIKbGHMeRJ7g6bu30l0lS8u/ekjnlvcs6Nrrs2hvKHfsHf4nWw2WwOtWff826z2Rxqu+5uoe6Opu612q5VmLriPXR/Xb5uGd1zrv+pHTYPtcXumNqmrG4/Y89p36D6xd29FfG+H0r1/c7iPfWtvO6Ns+5N1a7s4O6aFstDHcH35hx7k9d7XYey5zebzaEVIi5dunQoD9v5NHnjtju6wX7I3WFzt69E2oh3t4+wwtn1WXs+tBKNzcuXprtzYyx/6A/8LV4H6/X6wDcWi4U3QKbPQx+hKLpvh1Q973t++u6P5esrY0iruALZZ6OWu2tZU9Kx5xDv+6NU3+8l8p7yVh574xRJf1NVYexzBHfSmfvm6j78Qm/ZLjZyFqobO+nNfUinlN+Keca17p/cHTZ3+wCmBP7Wz7AZ96VV0RddpU68ixx+fsbuj+XrK0soKu8OWbEBQN+v+l3KGhLv9uW9qirE+wCU6vu9iPeUt/JYo23yphoTve61sXRib9k+QmPRrA2hl4m68rsO3CSSAO3I3WFztw9gSuBv8TpoMyzWDiVpGnl374ndH8vXJSbeXU3gsyWkD5qWNUUHEXkfhlJ9f/BhMyHxug/xbs/78g+9Zdehadlxcoj3ssjdYXO3D2BK4G/hOrCLUNhAnYsrKOvGgbsbkem1+ot53Zh3a1MsX7eMsflpNjBndYn7/G5b1iY6yP4Cj3jfH6X6fi+R95S38twj7yHciSm2zOrkiPfyyN1hc7cPYErgb+E6SBkWq9g5YHXzR+rONVltJpavfe7WiXc3D/dZboN7bcpaN2zGpmmvRbzvj1J9v7N4T30rT2m0KW+qKeJd0/GNeXej8r63bF/ZfRPV3A6hrXh3y7FcLhHveyZ3h83dPoApgb9RBzmDeN8fpbb7zuI99a28TrymvqmmiHd3xQZfOr48XWHv/h26Vie0Xrx4sbV4Z7WZYcndYXO3D2BK4G/UQc7YFarYYbVfSm337LCaIavVCsfcM7m359ztA5gS+Bt1APOk1HaPeM8Ad91nou77J/f2nLt9AFMCf6MOYJ6U2u4R7zBLcm/PJ0+ePDJUi4ODYz/HyZMnx3b5vfCbv/mb8sd//MdJ11ZV3n0iwD4otd0j3mGW5N6ec7cPYEpMzd92u5289rWvlec+97ly2223yQ/+4A/KM888E72nrg50TptdpGIIQuvAN6Gq4qvM9MEQC02EytGkjlLrwveS28dw3ty+z1J9H/EOsyT39py7fQBTYkr+9qM/+qNSVZXcfvvtB6Lrtttuk+PHj8vFixeD99XVQZuNmvogN7HnQ19oYmvM90Ef5Wgi3t3r7BLZbcnt+yzV93sT732+lbf9cut2akvZgEkZ4k0dxiN3h83dPoApMQV/u3Tpktxzzz3yvOc9Lzg86Pjx4/LVX/3V8ju/8ztH7g/VQSit0EaM6/X64Bp35TVdKUUkvm67nQemq8fZ/w+t2ubaK3J47fTdbhfNV+3zRZljGkKXybZLOLpl9tVBaOf25XJ5qH417dTIe6gcbl1YtF7c/Wssq9VK1ut1Ed+nu6R3iFJ9vzfx3udbOeId9k3uDpu7fQBTogR/++IXv3hwfOELX5AvfOELcu3aNfmzP/szeeUrXynPfe5zg0LbPU6cOCGvfe1r5cknnzxIP1YHKUtC26WaRfz7rNiorbu3i7sEtF7bROzZyLAV0vZ5HsrX3ZCxSRR9sVgcEpK+Mrt/200k3fry2aLX14n3unK4aagY9olmNy/9vxK+z7ryWdtKpLN4T30rF2n3ZiYSfjutu08bhW/9d20svjdcm1fdmzqUSe7fY+72AUyJ3PztIx/5iPzUT/2UvPa1rz0UaTx27NjBcdNNN8mxY8ekqiq55ZZbkoW7HjfffLMcP35cLly4ICLxOrAbMKqQdGm6w7nNz3etm64vj9hO7Zquls2KTl++bXc2dzdQWi6XstlsgmUOBRF9Gzeq/lBBmireY+XQNOoi0zFdV8L36aLnPvWpTx0pZ4n0EnlP3ajJfftKeTOLvZ3G7rPEIgIpb7gxu6FMcnfY3O0DmBI5+dvly5flec97nvzwD/+w/M//+T/lox/9aPT6D3/4w/Lv/t2/iw6XcY9bb71VvuM7vkM+/elPH6TTNUAX2sXct/u4u6O5K6J953x51Ik9145YviKHg3epkzNtUFAP1UF14t2912qb1Wolm83m0ItTinivK4ebhr4curomlJevLLl+nyI3hhARefeQ+lbu21G17s3MJfblpg6bcb/g2BtuzG4ol9wdNnf7AKZELv72jne8Q5773OfKe9/73sb3/vIv/7K85CUvkTvvvDMouO+66y45ffq0/K//9b+O3B+rg5RhsUNE3t3nb1Ox1yRSmzpsxhW5dux4TLy7trgR8+12exAsdCPOLrGy1w2bcdOIjXl3r835+9T6r/sOc/H9pgw2bMYd+qJH3ZuZSPjttO4+JRYR0PRDb7gxu6FccnfY3O0DmBI5+NsXv/hFeeELXyi/9mu/1imdRx55RG6++eZDz6vbbrtNTpw4IY8++mjwvlAd2EUo7DPSxU5Y1WtdIWafm+5YZXessz3nij19drvLM9pzNn/7/6F87Zhqva5O+LkaRFkulwcR35B4d+/V1WosbsQ4RbzXlSNF0NZdV8L3mUoOvt+GXiLvqW/loUpNGROl7CPyHnvDjdkN5ZK7w+ZqX924zS64L8juuaYRJ4BUcmhD//2//3d5zWte00taTz75pDzwwANy1113yS233CIPPfSQ/N//+3+j98R+7U4ZFqu+aFc6cc/ZfiM2l8yec4fDukNCfL+Mu2naFV9i+dr5Be5Liq/fWy6X3uE1sRV3XMGr14WG1Vg7fIFEO74+pRy2LmI0Fe85fp8p5OD7begs3lPfyjUN/UnG99bpezOrezsN3WeJRQSsbaE3vpjdUCa5f4e52rcv8W79WORGh66kRosA2pCDv33d132d/OZv/mavaf72b/+2fPCDH0y6tmsd7PPFfq5Qn/snB99vQ2fxnvpWLnL0LSr01uaK8Njbaew+vc5OWA29nblvuLE3O5ypfHJ32Db2+dq5byxmKCrmRousX8XycCNLqWv/6r2hcaf60mz9/9KlS4fy8P1KFyq/HQLX5udVmC5j9wef/OQn5UUvetGoNiDe88FdZAP2x9i+35Zehs1MATqdeZF7e+4i3kNr3obWw3Xvs2M1QxOTUtdytn/HVmrSPEO796kQd/PwiffYWsN2nCTiHZSx+4M//dM/lZe//OWj2jB2HQCMQantfvbinTfceZJ7e+4i3lNm7sdWgoiJ3qYrSjRZqSm2n4Mr3n2rHMTK767k0HZNZ5gmY/cH733ve+VVr3rVqDaMXQcAY1Bqu5+9eId5knt7Dtlnh4S4L5x14nUf4t2eD62m4B4pv3BpWnbeCeId9sXY/cG73vUueeihh0a1Yew6ABiDUts94h1mSe7teYqR9xDuRmmK3Rob8Q77ZOz+4Ny5c/LKV75yVBvGrgOAMSi13SPeYZbk3p77Fu8i4fVw24j31LWc7fCX2EpN7jAZd4OTruLdLYe7njDMm7H7g3Pnzsm5c+dGtWHsOgAYg1LbfW/iXVeasUtHjoU+xEPDC+ru6+u6vtJwV82A7uTusPsQ73WrzTQR776Vm1LW/nWFvft36Fqd0Hrx4sXW4p3VZiDE2P0B4h1gHEpt972J95SNmobAjdiJXH/wx9afV4YU701AvPdP7g6bu32ls1qtmKQOB4ztb4h3gHEotd13Fu++CWn2Z3UltgZzyljdlDWoRQ6Pk1X0evfvUOQw1SYltEqGb91rd3xwXd3YNa5ZyrI/cnfY3O0rDffXOKLuYBnb3xDvAONQarvvJfKeslFTbA3mVPGu6YcmyNUtRWfL4lt7uo14d/OsW/farY+6ulF7ibz3S+4Om7t9AFNibH9DvAOMQ6ntvhfxvl6vD4TnYrHwiucUMZxyLiZsU8R7bO3ptpF3S8x2X33U5eWWEfohd4fN3T6AKTG2v+1bvC8Wi9pnyNh1ADAGpbb7wYfNjC3eY2tPtxXvdriL/Uke8Z4vuTts7vYBTImx/W2f4t0dihm7DmBulNrue4m8p0xWjQlUV3S3Fe9qS2jMuwr00HjX2HCYkHj3jX9HvOdP7g6bo32pE7VDK7l0WVu9qQ+wmhM0YWx/25d411/CEe8Afkpt953Fu10a0g6fcUkR7741mJuK99BqM3Ziqx2Hbtee9on3mE3u/SLXO0vEe/7k7rC52xfD3R1VSV31ycc+fQD/grH9bd/DZhDvAH5KbfedxXvKZFWReoFqV5Npu4GMtckOY3FXpAmtPe1G6+psUuyKMNbe0KY1TYboaL0iMPold4dtY19oZSZ7zq58FFp1ydpgz7ntPna/3RDKXuuu+uLanrLiUmh1J7WZ1ZygKWP3B4h3gHEotd2zwyrMktzbcxfxri+rdSsf2b/dl2I7/Gy9XstqtfK+tIZWSFqv196/Q3mmrrgUW93JVxdN0ob5MnZ/gHgHGIdS2z3iHWZJ7u25i3j3LZ3qnktddcmej831cEW1m99isTgyFj5mX+rQsdA4eoalQRPG7g8Q7wDjUGq7R7zDLMm9PYfss0M/3OFgoWFa2+3WK2Dt0BM7dCQ02TM218O30pNG723Uvy7PFIEdWt0pZCviHeoYuz/IRbxzcMzxKBHEO8yS3NtzG/u6RN596cT+vy7yLnJjeMpqtToYdlOXZ53Ajq3uVGcr4h1CjN0fsEkTwDiU2u4R7zBLcm/PXcS7ju32jXl3o/K+VZf0nApaTaduzLsrou3kVE0rlGeqwI6t7uTLu0naMF/G7g8Q7wDjUGq7702860ozdunIoYlFBl1iP5uEHuZN1o6GvMn9e+wi3nViqE3DJ95Dqy75ztk0Ytf4yuEb5+7m2WTFpdDqTpofqzlBU8buDxDvAONQarvvTbynbNQ0JDGh7S5jp0vrKTzIp0/uDtvHsBkASGPs/gDxDjAOpbb7zuI9NAHAtxlLKFIXWp9aP7sT80LrPMci75qmb2yunteInKZ76dKlQ5HM1LWjXRtDO07CeOTusIh3gOEYuz9AvAOMQ6ntvpfIe+pGTVZox9Z4VrHsm2gXW+c5ddiMu960z047OS72cpCydnRoXC6MR+4Om7t9AFNibH9DvAOMQ6ntvhfxvl6vD0TsYrHwRv5S1pVusouqYsevNhnzXrdLo295vdRxtO6Y2tCKGDAeuTts7vYBTImx/Q3xDjAOpbb7wYbNNFnjuU68h9Z5biLeLXqtXRED8T5tcnfY3O0DmBJj+xviHWAcSm33vUTeUyarNlnjOSbeY+s8p4j3zWbjtdFuB494nz65O2yXMe9Twverm9s/tCE2P0DP+XxW58Tsa15B7FfGEki1P7dyju03iHeAcSi13XcW73ZpSDt8JpRGyhrPMfEeW+c5NfLuDpPRNO1yc13Eu2vzcrlEvGdG7g6bu31DMbZ4d5fPRLzHQby3A/EOMA6ltvvO4j11sqpI+hrPdcNmQus8p6w2Y//22aLlqKpKLl682Fq8s9pM3uTusF0j73WrIdm14O1qTrF23XRVqNg68KH8tezui7TF7R9i81fqyumrH1tOd0lZvX6329WueuXWkw+to9Cvfrl+fyFS7Lf9t10BzJdPrB+tW73MLkxQ98I1dn+AeAcYh1LbPTusDoBuDw/5kHt77ku8+1ZDcoeO6e6p9r468ScSXxVK022av68eYuI9tvpUSjl99tk5MO79KgTf//73B/O1wtU9Z9PS6L6vvDl/fzFS7LfX6WffKmTude6qXaH73FXC6upc02rDer1u9CtQ6FrEO8A4lNruEe97QB8eehAh6reWAAAgAElEQVR1z4/c23Nf4j1lToYvjZj4azM3xYrsJnNArB/ZIySC+iyn5q/RYR1G44vixurJLXssCuyK3xy/vxgp9rvXhVYhi80darJ6mctut5Pf+73fO2J3U/QXjBTxriI/tJvvlMS7+/wbInDlq9Oc0oN8yV0LhEC8wyzJvT2H7LPDENyHYpNhXXboRNNhF23mpvgi5CkP97rIu1sn7styl3Jq2pvN5mDIjC1H3apX7jA939A/V/ilit+xv78QTcV7bBWymHhvsnqZosK5j8i7CvEm8y9i87CmIt59L6fL5TI6F64PEO/Qlty1QAjEO8yS3NvzPiPvLnbYhSvShojcdhk24+aTWs5U8a5DVHQ+j5ZDd1/25RuLvLu4QzxSxW+oXGN8f5aukXdLk8i7xZd3nRhr2x+kinf7y43P7lLEe10bsKu2Kdp23L/1UNrO80jZFT3mR64tqXMy3LkVUCalfneId5glubfnfYp3jSIrPvGnf9uVkpqIP03XjnlOyd9XDzHxHlt9KpZPqni3D27796OPPhrM1x3v7RvzHiJF/I79/XW1383Lvry432eoLLH76obNhOxuQx8rH4lMQ7ynvuCF5lK0neehdtl2504eD7VD+7Jh/cq2uzp7Qzu1QxnkrgVCIN5hluTenvcdebfRJfcnbXdIRlvxF1tFJJa/zaNOvLtpuTaG8kkV7yI3hkjYMtllI9183Yhck+8yVfyO/f2FRFqq/TYy6uZj022y2ozeh3hvR6gOQkOU3HaXIt5jv+i0neehtvvaXRM/V3t86aXYC2WSuxYIgXiHWZJ7e87dPggzh4f6er0evHz7XLUL8T5M5D02l6LtPA+1qw/x7qbXxF4ok1KftYh3mCW5t+fc7YMwc3io9yFY63BXLdnnql2I9/2PebeCN3au6TwPtcsn3lPmgITKSeR9HpT6rG0k3jk4pnTkTO72AUyJtv7mE+9VdXjYV4q4e8tb3lKEeK8jtNqMO0fDN5ei7TwPtT0m3mPzJvScnZti/z/VXiiTUp+1RN5hluTennO3D2BKjO1vDzzwgLz73e8e1Ya+6sAdI5+6c2+XeR6hXdFFwnNAYrbE5mSE7IUyGdv324J4h1mSe3vO3T6AKTG2v73iFa+QP/iDPxjVhrHrAGAMSm33iHeYJbm359ztA5gSXfzt6aef7pz/85//fPn0pz/dOZ0u0OfAHCm13SPeYZbk3p5ztw9gSrTxt6efflr+y3/5L3LnnXfKO9/5ztZ5f/zjH5fnP//5re/vC/ocmCOltnvEO8yS3Ntz7vYBTIkm/mZF++tf/3r52Mc+1invN7zhDfKGN7yhUxp9QJ8Dc6TUdo94h1mSe3vO3T6AKZHib32LdhGRD3zgA/KiF71Innnmmc5pdYU+B+ZIqe0e8Q6zJPf2nLt9AFMi5m/7EO0iIs8884zcd9998q53vauX9LrStc+JrZs+JH3a4e410McmYa59dlnK1HtErq+i466641479Hey3W572wMiZnuf5cqhzbYB8Q6zJPf2nLt9AFPC52/7Eu0iIu9617vkRS96URbDZRT6nMOE1qz3CeYutBHvu90uKJLHfokKvVT0CeId8Q4zJff2nLt9AFPC+lvfov3Tn/60/MEf/IG8+93vlre85S1y3333yX333Scf+MAHuprdKyl9Tmxdc1/Ed7VaHUSt7UZJVtyp2Autp27z9p1z/z+0K2ponXafjSLx3WIVd017vV7TtmvMa71Z++za9bvdrjY9ZbVaHbLN/kKg5WnynaTUk659H7vWfif2s75s2O9dNziry7eujF0p9VmLeIdZknt7tp0ZBwfH/o9PfvKT8spXvlKqqpIv//Iv7y3d5z//+fKKV7xCHnjgAXnLW94iv/ALvzB29+KlqvoX73ZnUo0UbzabQ1HjxWJxIOBUkLo711ohbXdb9f2/K/rcHVI13ZiNak9sAyb3GrtDqxXKoXO2Tu1mULH07D0Wt+7c76GuvL403HqyLwuhOlWWy+WRXxN837tuxBX7furK2JW+0hkaxDvMktzbc+72AUwJ62/7HC6TM6E+x40G6+EOjfAJxdBuqXpOI7Lb7da78+lut/MOkUj9/1AU3l7nszFFvPvqyRXben+sPCreU9LTz7auQhHq0GdfeVPq3/fLQaiufENn7HX6OeX7qStjV0p91iLeYZbk3p5ztw9gSvj8bW4ifh+R95B412EfKvJCLwghke7mF/p/TVfpW7zbIShVVQXFu9bddruNive69PSzrctQGX33hcqbUv/2vhTx7pvcu1wuZbPZyGazOWgDKeI9VsaulPqsHVy872P2dh2hN9tc0oPhyd1hc7cPYErE/G0uIn5I8b7dbg+NeXYFaSjdpv/fNvIuEh/z7nup6Bp5T0lP09p35N1Xn10j7/r/+r1vNhsi7x0YVLwPNXvbBfEOLrk7bO72AUyJFH+buojv2uc0Ee+anzuURsWyG2m1z1w7Ftz3/3Vj3kMC27UxpFc04OjauFgsjqSt+aaMeU9Jz95jccvYVLzH6t/3K0KoTm09+TSSDd7WpVX3PSLeUy/s+FYukjZ7u+3McJ0J7Ubz7WzuS5cuSVXdmDWd2sFYW9zZ4U1maEM+5P6d5G4fwJRo4m9TFfFDi/fVanUocOc+S62WCD1nff/fdLWZmI3ucBJ3pIDVAzY99/kfixpbPVKXnq07q6VsGduK91D9+8R7m9VmbJ3ZekxdbSZUxq6U+qwdTLynjiFrMzPcN4vZCnt1BHfWdJOfzewYLvviUGev+7ICeZC7w+ZuH8CUaONvVsS/853v3INVwzJ0n5OiB0rFJ3j7JLbO+9gMsc57n5T6rO0s3lNnoqeI97bj03xvzK7tVryH3ihDY8ysPb70UuyFvMjdYXO3D2BKdPG3p59+ukdLxmOoPsf+ij5Vhnj+5yiSY2Pnc6XUZ21Wkfe2M8NF5NBmCG6n0Jd4d9NrYi/kRe4Om7t9AFMCf6MOYJ6U2u6zGfPuG9fVRLy7+cSGzfhmgLtppoh3Iu/lkrvD5m4fwJTA36gDmCeltvssVptxJ5g2nRnurinaVLzrtcvl8sjs67oZ7qn2Ql7k7rC52wcwJfA36gDmSantfvB13utmb7edGW5nabvjwHQ298WLF48Iane4TWz2tZvedrtNthfyIneHzd0+gCmBv5VZB775dvbX/br1wGPn+1xLfB9phsa8x9ZH3zfb7ba3ibRDfTcltnsRdliFmZJ7e87dPoApgb+VWQf2V3CRGwG3PlZ5y1m8x1abGVqwuwwxkRbxjniHmZJ7e+7LPnY0Bqgn9/5gCLrOa7PLOWt/Y691f3W3AtueS/0FXO1x+4b1en1kyWaRoxtC2j1j3DKoHb45baF7QnvQtElT7Q312+4676E8fFH4kJ1N9qxps867r/51h91Yvin12IVSfR/xDrMk9/bch33saAyQRu79wRD0Jd5VaNp5Yu7iEPacpmvnkblDYn17qbj3uXa4n337wbgi0L5U1J1L2YOmLs1Q2VL2rrGE8vDVRaqdsT1rYt+JiH+HVV+ZdPhzbK+clHrsQqm+j3iHWZJ7e+76IBVhR2OAVGgz3fdyqVvNzU0ztE9LbNU3N80m4t3eaxer8F3rS8f3a0DdSnh1aYbKFltNz7UlNY86O1PLV/ediPiHztTVfyjfujJ2pVTfR7zDLMm9PXcV7+xoDJBO7v3BEPQVebfnre/aQEBVVUniPbaXipu+a4f7WeR60GGz2chms5HVanUkMu/Ly55zj5QNJOvSDJUttHeNK7RjeaSK96blSxHvviGabv2n7JVTV8aulOr72Yt3n3M2Yd8zr9uOKU61pavNQ+7CVtJM89wdtmsUjB2NAdLJvT8Ygr7Fe2wPl31G3kNj3vWcvvRvNpvWkfdYufuIvLvYAMlut9t75D1WvraRd/3/UP27aRF5jzMr8d43XcYUDyHeYzPS90UpM81zd9ghIu/saAxwndz7gyHoWgfur3V2XLvru+64dvucd/dbCe2l4t4ncnTvltikR995Ny97rqpu/LJny5Pyi2IozVDZUvausYTySBXvTcoXs1vxjXm3aaXUQZPvpgul+n6v4j1VLOjh/sRtz4n4x8+GGl4o7dDbmxUfNnpZl44dU9vHmGL7OUXUNLHPzkiPjWFOyd9dQSAk3PR7tJ9znGmeu8P24W/saAyQRu79wRD0Jd7tfBiLfZ6HhmJo/95ktRn3sH2Rr1/Riaq+8+6zLfYsTwlKNE0zVF9uQMxdbSaURxPxnlq+Orv1e7GfXeFvdUOqBojVYxdK9f3BxLtvPFeT8a6xhhdLO3XYjArhunTUxtQxxfae0EzqVPGeUk7r0D5HiI0XrhPvIjcmKsYEUwkzzXN32D7sY0djgDRy7w+GoC/x3tUHV6vVIEvalswYv6qnMuRQ3T4o1fc7i/fUMbi++9qMd02JBIbSDol3V/TG0glFEX2448jqxnM1jUjG7HPzTskj5VzKUl0lzDTP3WH7so8djQHqyb0/GIKxxLv9VbWqquBzHQ6To0iOjZ3PlVJ9f9BhM6HZ5k1+Mg+JiZS0Qz+hWeFel04T8Z4yg9uXdky8p9o3tnjPfaZ57g6bu30AUwJ/ow5gnpTa7gcT776x203Ee5PxsqmRd3ccdko67titujHFdTO43Xua2F/3y0CqeE/Jv2vkXf8/l5nmuTts7vYBTAn8jTqAeVJqu+9VvMdoMtvcN941Nl42lnZIBPrG1aWkY8V7ypjiqorP4BaJjwWORaDr7EsdBpGSfxPxXsJM89wdNnf7AKYE/kYdwDwptd0PJt5F4rPNU8a7xsbLhtIOiXd7vZtmLB33l4WmY4rdGdxKqGw+EZtqn2+1mdCwo7r8m4h3a6/v14ocZprn7rC52wcwJfC3vOqgy6+r7nPdPi/3YZ9vcYUh888xTf31fbvNf++XnNp9EwYV7zAsY8xIz3ESjY/c23Pu9gFMCfxtOnXQt1Cuwyfe951/zuLd1R257/1SartHvE+coXdYLWWmee7teZ/29bUyS9uHlC8q1efScE078qF8JNUu36YnIvU728ZI+a5KiJLti9zsGYOUOqjrN+p+SfXtM+LbA8S9v8n+JrG2rten7lvSdP+YPvMPXevLM7buemx/HTuUtcka9HYtf7fO3DXotU7sZxX4Oez9UqrvI95hluTennO3T6SbeHfv803+HoIc10vWh5JvLknbl4zU7yr3KNm+yM2eMehDvFtx6Nu3w7enh28PECvG2uxvkiKeReL7lrTdP6av/O0LTyz/WL3X7a/j3mfTDO3B4vu+bHl97SjnvV9K9X3EO8yS3NtzHw/SuqiR74HhRndDERY792K323WOjK1Wq4Mxku71sQiQpmfPNYlK+eaFxCJdQ9llH2D2Whtt8kWhfJHN1O/K2m0/5xQl2xe52TMGoTpI3cvF/eXVt3pY3WpnoT1AXHvqFmpwjy5zuGL5up/7zD/2MuHmH6r31DLUlcmmGfsFMPTre857v5Tq+4h3mCW5t+eu4j0lalT3UE2JsGy3R5dxbRMZ0/8LXe+LKqlN7s7MdSsVuQ9LJSXSNZRd6/Xa+3coz1hk09ZvXSROJO8o2b7IzZ4x6NrnpOxrEgsS2D1A3PaTur+J2lgX+U4V7232j+kr/1jZbJ6xeo+VIbZ/SmwPFpGjC1zY8oXEe657v5Tq+4h3mCW5t+euUTDffW3Ee2yMdegh1SYypvb7fjpO2aXYnk+NILlpp0S6hrDLZ4sOJWgT2Uz9rpSco2T7Ijd7xqCreA8JNz0Xa5+xPUDc9pLSv/QpnmP5+q7bl3hPjbz78qsrQ12ZYtF8G9TZ7XZB8Z7r3i+l+j7iHWZJ7u2564NUJC1alfJQ9UVYNH/ttPcVGUvdpdiXhr1faSLera1D2qVodNtG/evyTBHvoe9KyTlKti9ys2cM+qgDbaci/n1NQu3TDquyf7vpiKTtb9KHeG6zf0yf+bu/pqWOebd21+2vE9s/JbQHi9s/tB3zbsuRkm/dL5eI99CFhRYQwEfu7bmPKFhKtMoVjbFoe2jYzD4jY02iSr7/j0Vy3ChRl8h7n3Yp+mDXDeW6RDZTvisl5yjZvsjNnjHoow7cORChX5J87c7uAeITappmLPig5fAdts2mDptpun9Mn/nrtXbekRLyPbfeY2Vw72uy2oxN0+0rUlebsWmNufdLqb6PeIdZknt77mpfarTKDoEQOby7bkqEZbvd7jUyptf7okpuWhqZqovQuKLWzTsW6RrKLpunTSuUZ6p4r4vEieQdJdsXudkzBtRBXvj6nBLY7YZf570LpbZ7xDvMktzbcx/2pUarUncudjtgu/tx08hYE/Eeiyr5ojR1USmLb7WZWKRrKLtsXfnGubt51on3lO/K5mk/5xQl2xe52TMG1EFelCreRQ7vsNp2b4qhKLXdI95hluTenvdpX47DFsbCRolKflj2Re5Rsn2BP1AHME9KbfeId5glubfnfdmnUfY5CrQQNko0Z/FeQpRsX+TeHwwBdQBzpNR2j3iHWZJ7e87dPoApgb9RBzBPSm33iHeYJbm359ztA5gS+Fv/dTDk8LyuefVpq/0lz07c7ELMPoZBdqPUukO8wyzJvT3nbh/AlMDfyhbvuTDGSitzrOc+KbXuEO8wS3Jvz0NMWO06tju2akzdfe7RV4SqKU0ffO74+JIhMniD3O0bgpQ6qOs37HKiTdYNtys8+TYIC63cZFdRcs/ZVbRcm9uuBKXX2Y3ObB+Yusb5crk8JOp1vffU1Zti9QzNKLXuEO8wS3Jvz7nbJ9JNvLe5bx80EZZuVG0KEBm8Tu72DUEf4r2qbuxD4IpK95z60mazOeRX7n4SutOwyI29J7RNucu82s/6EmD3a6hLz15j9yVwV6TyndMyuvj2TfCVWZd3jeWbUs/QjFLrDvEOsyT39tzHg1Q35NHDfdD51mEPrQ/uRsTsWuG73a42L01H7Q6J97rIkxtpc6NrPltjZfTtOOorh4h/TXj7ORSNC9Vhil0p5YvVWyx/hchg/v3BEITqwPUJPdyXvthOuu5KRnY33tiuzr4Xv9COx64/xnZKDqXnyzO207Lrs75y+F6Q3Z2ebZ8Wy7eunqE5pdYd4h1mSe7tuat4tx2/SHi30NjDqC4ipg+clLysCI6J97rIkyue9VoVqLEHe514j5VD7VZCP9O7dsfqMFW815UvVm9136GmPffIYO72DUHXPsfdvVf9yZ5zj9BGcUpImDYR72r3drutTS9Wji7i3ffSvFwuZbPZyGazOQgMpIj3WD1Dc0qtO8Q7zJLc23PXKJjvvjbiPbbud0iEh/Ky9/ke4k0iXu7fvp+vm4jklDqzdRETC6l1mGJXk/L56q3uOxQhMiiSf38wBF3Fe5PIewzfC24orz4i76H7lVTxvtvtkiPv+v/q15vNZtL+lTOl1l0j8c7BMaUjZ1Lsq6r4sBk7jKOqqsbiXSQcEdP89SGbkpfvPkuTiNe+xHusHG3Ee6wO+xLvsXqL5W/tm3tkMHf7hqCPOnB/mbFpVtWNX2NsG3HbX+gXPr1Wz/v6A/tZ7fD9ghZKL1SOkF+6vu6rQ98vWzatJvmm1DM0o9S6K9PqCfPUU0+NbQJkQFfx7hvL7XsAuUIvFqkNPVRT83Lvq7O5L/EeK2MsohWLqjUR76E6TLGra+Q9lr9CZLDcB3if9FEH7rwMX3vRwx32pf8f+hXICt0U8W7neqSmF7vGl6/r66mrzdhy2xeX1DklsXqGZpRad2VaPWHe/OY3j20CZEDXDsWNfi4Wi6h4V0G3XC4PrkuNiKXm5d7nIzXi1Ua8+8oYixjbcmiaSqp4j9Vhil0p5YvVW913qPnOPTKYu31DMJU68PU5Q7HbDb/OO3Sj1HZfptUT5amnnpIv+ZIvIfoOvf2EreIrNlTGHVbhLqsWiojZlV9S89KyhcR7asSrqbgNldEnOn3l0Dxiq82EonGxOqyzK7V8oXqry1/Tsp/nGBnM3b4hmEodjCneRQ7vBZE6zh/Go9R2X6bVE+XNb36z/Kt/9a+IvsNeO5QShjHkiBtVmwJEBq+DP1AHME9KbfdlWj1BNOr+2GOPEX2HvXUoGulFsLVjSmKXyOANSn2A9wl1AHOk1HZfptUT5M1vfrN83/d9n4iIfN/3fR/R95lTaocCUCL4G3UA86TUdl+m1RNDo+5XrlwREZErV64QfZ85pXYoACWCv1EHME9KbfdlWj0xbNRdIfo+b0rtUABKBH+jDmCelNruy7R6QrhRd4Xo+7zpu0MZcpJq17z6tNWu/NDXZNOYfUwGLhO+M+oA5kmp7b5MqyeEL+quEH2fLyWL91wYY83lOdbzFOA7Yxd1jvkeJVKm1RMhFHVXiL7Pl5QOpariaxnbTXbqdju02J0J7RrfNl97n7ujYWircr3HtTmUXp2tep2WTfNWUnc7XC6Xh0S9rqmeuqZ5rJ6hDPjOAKAk6LFGJBZ1V4i+z5M+xHtVVQfi1RWV7jmNUG82m0PRandHzsVicXCf7t6p4jW2gZG+BKzX6yMbGIXSs9fY3TrVPvti4J7TMrr4dhP1lVk3Worlm1LPUAZ8Z1ACn/nMZ8Y2ATKBHmsk6qLuCtH3eRISE5vNxvuznzscJBQd1s9WQGuEebfbHdkdNJam3hva/dR+1nM2r7r0fHn67vftbBpaw9w3dMamqZ9T8q2rZygHvjMogfvuu29sEyAT6LFGIiXqrhB9nx9dI+8q8hUVn/ace2hadoiLHTYTEqZNxLvavd1ua9OLlaOLePcNBVoul7LZbGSz2RwMt0kR77F6hnLgO4PcuXr1qhw7dkyuXr06timQAfRYI5AadVeIvs+PruK9SeQ9hh0204d4r4u8h+5XUsX7brdLjrzr/+vQmM1mQ+R9ZvCdQe6cO3dOXvrSl8q5c+fGNgUygB5rBJpE3RWi7/OiDzHhjtkOjdO20WM3Mu2OebeTQnX8eop4VzvcMe+x9ELlsNH1kHjXtF18Y95tWk3yTalnKAO+M8iZq1evyokTJ+Ty5cty4sQJou+AeB+aplF3hej7vOhDTNjVUupWm7ER/MViERxL71uBJUW82xVsUtOLXePL1xXvqavN2HLbF5fU1WZi9QxlwHcGOXPu3Dl58MEHRUTkwQcfJPoOiPehaRN1V4i+z4epiAnfmPehGGOddyiTqfgbTA+Nuj/++OMiIvL4448TfQfE+5C0jborRN/nw1TExJjiXeTwDqup4/xhfkzF32B62Ki7QvQd6LEG5M1vfrPccccd8uIXvzh4HD9+PHr+jjvuIPo+AxATAMOBv0GOuFF3heg70GMNxFNPPSV///d/X3tUVZV0HdH3aYOYABgO/A1yxBd1V4i+zxt6rMzgIQIitAOAIcHfIDdCUXeF6Pu8ocfKDB4iIEI7ABgS/A1yIxZ1V4i+zxd6rMzgIQIitAOAIcHfICfqou4K0ff5Qo+VGTxEQIR2ADAk+BvkRErUXSH6Pk/osTKDhwiI0A4AhgR/g1xIjborRN/nCT1WZvAQARHaAcCQ4G+QC02i7grR9/lBj5UZPERAhHYAMCT4G+RA06i7QvR9ftBjZQYPERChHQAMCf4GOdAm6q4QfZ8X9FiZwUMERK63Aw4OjuEOgDFpG3VXiL7PC3qszOAhAgAAMC/e+ta3yl133SVf/uVfHjxe8IIXRM/fdddd8ta3vnXsosAAoBQzA/EOAAAwH5555hn5xCc+UXtUVZV03TPPPDN2kWDPoBQzA/EOAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAZbNYLGS73dZet9vtZLVaDWARTAH0ASi0hMzAOQEAyqWqKqmqqla8r9drWS6XslqtZLFYDGQdlAz6ABRaQmbgnAAAZbJYLGS32yWJdxGRzWZzcA9AHegDUGgJmYFzAgCUTYp43+12slwuZbPZMHQGkkAfgEJLyAycEwCgbFIj7wBNQB+AUun4PA4ODv8BRxn7O+HgyJmq6le8j13XHBwc2R15d4IAY4J/+KFeYExyb39V1b94BwAQEcQ7QB34hx/qBcYk9/bnindrbxvbcy8vAAwH4h2gBvzDD/UCYzK39je38gJAGMQ7QA34hx/qBcZkbu1vbuUFgDCId4Aa8A8/1AuMydza39zKCwBhEO8ANeAffqgXGJO5tb+5lRcAwiDeAWrAP/xQLzAmc2t/cysvAIRBvAPUgH/4oV5gTObW/uZWXgAIg3hPJLRIfl/r+O52O6mqqtctsjebjVRVJZvNprc05wj+4WdO9bJYLIJ9QBO2261UVSXr9XqQ+6bMnNqfyPzKWzK2v1gul2Obk8xqtZKqqmS3241tCtSAeE9k306IeM8X/MPPHOulq4hGhPfH3Nrf3MpbKuv1+uC7Ks3fEe/lgHhPJCTeVSBro9fPy+XySHReHdleq2m64l3TrapKFovFoTy1c/BF/+05zceKd3sfoj4N/MPPHOvFfRjr3+rv6s8hH431ASn3WREwd1+eUvv72Z/9WXnnO98ZvWZK5Z0y2hf4fpXXc8pisTj0fLe6wfYLsfts32Pztf2DGxS052w+iPdyQLwnUife9Zw2fn3I2nP6AFansw9kK97dB/VisTiShjqXPee+AOLXSkUAACAASURBVKgz64Pd2kJUPh38w88c6yUk3u0DOOajsT4g5T5fvzJXX55C+/v93/99efnLXy4vfOEL5fjx4/Lyl79cfvd3f9d77RTKOwfsi7kbcY+JcCucQ89y333uc96mJXIjIKCifrlcHgkS6jnEezkg3hPxjXVdLBZHHpxu47dO5oue6QPaOqs6m6bh/m3/zwoB1xGtbe45mzfEwT/8zLFeQuLdN9zN56OxPiD1Pnz5OiW3v7/7u7+T7/qu75K77rpLvuRLvuTQc+X48ePyute9Tv72b//20D0ll3duuFpBiYnwmA+niHerD2wEPyTEfb/cId7LAfGeSF3kvYt4XywWh8S7fXO3x263OxK5s+m7b9jWNvecey+EwT/8zLFeQuLd+nTMR2N9QOp9+PJ1Sm1/58+fl1tvvVVuu+02bz+vx3Oe85wjw6SgHHxD42IiPKQx6u5zz4nc6C9iLxA2qId4Lw/EeyL7FO91kXdLLH0i7/sB//Azx3pJEe9t+4DU+/Dl6+TW/j7/+c/Ln/zJnwSPX/u1X5NTp07J3XffHRXt9rj77rvlnnvukV/91V/NrrxQjz7X3aExSmrkPXafT7xbdOWb1WoVnFuHeC8PxHsifYp3re/UMe/L5dIbXXcjdYx53w/4h5851kuKeI/5aKwPSLmPMe83yKH9PfXUU/L6179evv7rv15uuukm+Zqv+Rr5uq/7uiPHYrGQe+65pzbaHorAnzx5MovyQj3Wb/VvfSZbfeD6uKsdrI/H7vOJd6s7VBf4+g5XIyDeywHxnkioY33DG97QWLw3XW3G/X7s/7uOa+9zV5vRPPSY28O+LfiHnznWS4p4Fwn7aN1qM3X3aT748vjt7y/+4i/k3nvvlYcfflj+9//+3/JP//RPtfc88sgjcvPNNyeJ9ttvv12OHTsmP/ZjPyYi45cX0nB90x3OZv07ttpM6n2hyLvblyh2To3eq/0K4r0cEO8DUtqar3Ad/MMP9TIc2nfMUaSHGLP9ffCDH5TnPe958jM/8zON7/3rv/5reeCBB+Suu+4KCvc77rhDXv3qV8tf/dVfHdyHvwGAgngfEMR7meAffqiXYbCRMrjBmPXxAz/wA/LWt761UxqPPfaYvOxlL5M77rjj4Ps9ceKEnDp1Sn7rt37ryPV8/wCgIN4BasA//FAvMCZjtb9nn31W7rjjDnnyySd7Se8d73iH3HHHHXL77bfLI488ErwOfwMABfEOUAP+4Yd6gTEZq/09+uij8sADD/Sa5j/8wz/IZz7zmeg1+BsAKIh3gBrwDz/UC4zJWO3vu7/7u+Xnfu7nBs8XfwMABfEOUAP+4Yd6gTEZq/0tl0vvmPR9g78BgIJ4B6gB//BDvcCYjNX+Xvayl8mHPvShwfPF3wBAQbwD1IB/+KFeYEzGan8vfOEL5VOf+tTg+eJvAKBUVSWV7t7GwcFx9Dh58uTYfpol9BscYx6uX67X6+D28j6aXGupqnFENP7GwcGhxz/3B7zRA4TAP/xQLzAmtv3pWvgpglxFvu4uud1uW+c7JPgbACj/LOLpFABC4B9+qBcYE21/KsRXq1VyNF03zOuS79DgbwCgIN4BasA//FAvMCZu+0sV77vdThaLxcG/XfMdCvwNABTEO0AN+Icf6gXGpK147zvfocDfAEBBvAPUgH/4oV5gTBDvADBXEO8ANeAffqgXGBPEOwDMFcQ7QA34hx/qBcYE8Q4AcwXxDlAD/uGHeoExSRHvVVXJbrc78rnPfIcCfwMAJWvx3mU5Lx+6rq/vaGNX0wdB2/tgXHL1j7HJtV7m6Gex/iy0lnnf/evQzE1E5/BduW0mZZ381HbWR3uM+X7p7R3AMivxbmmzOYcyR3EwZ3L1j7HJtV7m5p+LxULW6/XB37phkdKlr8uZMdrftWvXZi3eXRDvAOPQSbzvdjtvtEedRB8i6kz6ebVa1V5rz9XlV3fO58xup6N5aXReRGSz2RxKc7PZHLp2tVodsTnlPr02ZjPkA9+Nnzb14vMd64d99ik+//T5eRc/9N1X5/+hPma1Wh1adzylT9NrfP3bZrORxWJxcP+lS5cO5W/717rvJWbnWIzhl0888YScOnVq8HxFupW3TTuteybbtrXb7WrTs7a46dm2p9T1BZqGtnN7Lub7dekDlEAn8W4d1I43tM4vcmO4ym63O+J41tlErkeNXEesy0/kcPQpZeJSSLxr+u5D0WeXz+aU+/RcU5thHOjY/XQR76F2H/KJvvsU9XMtR6hfibFYLA7uW6/XslqtkvzfzVv7ocVicUgUp/YPWhc2XYvm4ebvE++h/GJ2jsUYfnn58mU5c+bM4PmKtC9v23aa+kzebrdJ6YXKZH3PXlfXF9SJ9xTb2/g9QA60Fu/b7dYbJfI9TGMPglA02qaTmp/vXKzgPvEeusfXQfhsTr2vjc0wDoh3P13Ee+zBq+yzT7FCP9SvpJTD0sT/3XNN6sJHKOpp6yb0klNnW8zOMenql88++2zjey5cuCBnz57tlG9buvibpWk7jT1r3edoLL0623zt0doc8uE2trf1e4BciIp3+9OT/Vla5OjDwv5E1eVBa89bZ4vlp+eUvsS7LX9VVcEOwk0v5b42NsM4IN79tOk36kTiPsS7Pe/LP9SvxIhFE1P7jZgo7tI/aD5aH4j3wzz77LPy4z/+43L33XfLT/zETzS69+zZs3LhwoVW+Xali7/V3Zf6fIuJ97r0XEJtPHauqXgP2d7W7wFyISreY7hvru65tg/a1DdlX35KH+LdTbNtZILIe/kg3v207TfqHrzKGJH3puWo+/99R943m4335347VALxfh0r2r/3e79XPvzhDze6/8qVK1JVlVy5cqXRfX3Rxd/q/r9r5D0lvTobUiPvri/0EXkHKI3W4l1v1geEfVNu86DVsW2pY9TcN3N3fFydY9aJd1/6rl0+m1PuC415pzPJE8S7n77Fu0jYJ/ruU9zIXKhfqSu/5ql5NPF/N43lcplUFz477DAZtUHz6Sre6+wci9TvqatoV+6//345f/58q3v7oMtzuk07TX0mb7fbpPR8uG085ZmugtzXHvvSEwC500m8qxO5Pzm1edDaNdiV0Nu37ycu95xbSDdiVSfeRQ7PpvdFze3PhJa6+9yogC8NyAe+Gz/7EO8hn2jTp/j80+fnsX4lRsjWVP/X8uu1bVabsX+HyqB968WLF1uL9xJXm7l69aqcO3dOTpw4If/xP/5H+chHPtI6rze96U1y+vTp1vf3QV/PaaWunaY8k+3KRXXp2XL4noGpq82I3FgO1W2PKb4fSp9fvaEkOon3PvA90KZKLAIB+cJ35od66ZfVanVkzHKO5GJnqP31Kdo/9rGPybd927fJ6dOn5bHHHmudTh8M5W9zeiYDlArifSA0UmA3U4EyQKT6mXK9uFE5e/QVdda+r+90+yZXO932Z0X7937v98qf//mfy+c+9zn5/Oc/HzyuXbt2cHzuc5+Txx9/XH79139d3v72t8v3f//3y7Fjx+Qnf/InRyrhYRDvAKCMLt4Bcgf/8EO9wJho+/v85z8v6/VaXvziF8s999wjN998s9xyyy1yyy23yHOe85xDx80333zkuOmmm+Smm24SEZGv/MqvlFe96lXyxje+UX76p39aPv7xj49ZxEPgbwCgIN4BasA//FAvMCZu+/vc5z4nb3vb2+RFL3qRvO51r5MPfvCDI1m2H/A3AFAQ7wA1pPjHM888M4Alw/GP//iPtdfQb8CYhNqfK+L/7M/+bGDL9gP+BgBKJ/FuV3jwjQ21y5fZmevu0YS24/Ha3NdkgqmtC5gWsTZw9epVeetb3yqf+MQnBrRo/3ziE5+Qc+fOydWrV4PXDCkm2k72bnMffl8Gdd/R//t//08eeeQR+dIv/VL5zu/8zuJFfK7ifeiFGHw+564+1YaYRtBzvvkeusrOPuYIlKibXEL299lv7qMNpqZp5wTp9zXEhP5exbv9MnTClxXwvvuakutkGh7i08XnH1evXpWHH35Ybr/9drn11lsnKd5vvfVWueOOO4IivgTxvm/w+/FIbQ+uiP/Qhz60Z8v2Q47tX2R+4t1dZnaf4t1Sqm4aoo8cW7zbl7r1el22eBe5XgifQ7nXuuvKihzdvlhfAnzruIbWTfbdl7LGul7nWwfZ5qtlsOvb5vZSAd2x7cOK9uPHj0tVVXLnnXdOUrzfeeedUlWVHD9+3Cvi2/YbNprkdnJ1/pt6nV1/2t6H30+Hpu3vn/7pn+Snfuqn5IUvfKH8h//wH4oT8W39rW7vBLtmurvuet219lxdfnXnYv2CzS9FvIf6h1g+MV+35bcrxqlQU/vq+iVf/YW+LzfwmbtuChET72O0wVjbsFH01H0HQuLd3Syvb/7Zjv2I99CbS6gRaiW62x/7dknTL9WeS7kvtLup7yXD9xAP3RNroFA2VVV5RbseUxfvergivk2/sdlsDvnNYrE48JsU/029LiT68fvp0Pa5ZUX82MtA/u7v/q78yZ/8SdK1XZ7T6g+2/bptW0Wgb0Ox2HM3tnOp6y8h/4v1C77yxMR7rH+I5RPzdT233W6P3K8i7f3vf38r3WLTqqrwUqyl6KY6uy1Dt8FYWX33pTw76iLvdd9rW7IS76E3FLvbX+hN0Hdv6L7QW5qbhu8hnrJ9OEwHFanHjx+XW2655ZCY1eO2227z/n/pR6hct91228ELTGxMvA/Xb1Kvjf2EGesffD6s4Pdl0/a5pXz2s5/tyZLm/M3f/I18z/d8j3zFV3yF3H333fJDP/RD8n/+z/+J3tOmvK6wsG3ebdspuxb7nruuv9Tl5zvXpF8I9VchIWnTjuUT83Vru16jQ2ZC/UiqbkmN0Jaim3x2u0fIrn23wZSyKqnPjtRhM7Gh5G3457r0dwr2JxafMbZifQ+xLuLd5h37st30Uu5TZ1F4iEMMFannzp2T22+//UhHNJfI+6233iq33367PPzww9HIe12/4W5tHrrX57+p1/nEO34/LbqK97F429veJrfeeqv85//8n+Xzn/+8fOYzn5HVaiVf9mVfJv/jf/yP4H1t/M0dIqBHV/Fuz/t8zJdfnf/F+gW3HuqGzYT6h1g+qeJdh0SoSLPlaKtbRG5EaEMvIaXopjq7LUO3wVhZQ+WLnUsR7/prQlGR96Zj3kPjl1LfIFPvIwIHTbBtxSfipy7eXdGu9CGefD9bK6GHZ5P+gcj7dClNvL/nPe+Re++9V77lW75FPvzhDx85v91u5b777pN/+2//rfz+7//+kfNtyusKC/dcW+GUGvX05afExF+XYTOx/iGWT6p416E3y+VSttvtQTkuXbrUSreE6il1zHtuusllX+K9jza4z8i7Cv59PR/2Jt61cD7D6xqh+5azWCyOVLCOPXLHs9XdFxq75fuyeYiDSHi1GRXxz3nOcyYp3m+55RavaFfa9Bu+8YDqNyn+m3pd6IGG30+HUsT7Rz7yEfnWb/1Wuffee+U973lP7fU/+7M/K//iX/wLef3rXy+f+tSnDv6/y3NahaD1nTbCyffcdX0slJ9I2P9i/YKvPDHxHusfYvmkinfVNu7fjz76aCvdkkopuqnObsvQbTBWVl/5UtpuXeR9X/Qq3t0j1fl8P4fYlRx8b3/2pw9L3X3uW5svjdDbXOwhble3gGkR8w/WeW+O9VG7coN7Lhb1SfXzUMQEvy+fIcT7xz/+cfnpn/5peeMb3yivetWr5Cu/8ivlpptuSj6OHTsmVVXJq1/96kb5Pvvss/Kv//W/luPHj8vb3/52EelvtRnXN5oIJ7tSi1LnY/a5HvO/WL9g06kT725aPgHqyydVvIvIQeTdlknHwMf6JV/9pVKKbvLZ7Ts2m80obTDWNux9XVeb2TedxHtJuF8uQCop7YYdVgGGZd/t7yd/8ifl2LFj8v3f//3y9re/XX79139dHn/8cfnsZz8r165dSz4+9KEPyTd90zfJ137t18rly5dr8/35n/95+Zf/8l/Kd33Xdx0KCozpbz6hCOmUWn9NdNN6vd5r+XKtQ8T7HtEJKu7bPEAKU/ePtlAvMCb7an+PPfaYnD59Wr7t275NPvaxj/WW7qVLl+SlL32pPPDAA/KXf/mXR87/8R//sZw5c0a+4Ru+QR577LEj5xHv5VJi/TXVTV03yaoj1zpUu+yvAoh3gAzAP/xQLzAm+2p/p0+flje96U17SVtE5Cd+4iekqir58R//cRG5/qvdD/7gD8rzn/98ecc73hG8D38DAAXxDlAD/uGHeoEx2Uf7O3/+vNx///29p+vy8Y9/XB544AF56UtfKl/6pV8qZ8+elX/4h3+I3oO/AYCCeAeoAf/wQ73AmPTd/q5cuSJVVcmVK1d6TTfGb/zGb8gf/uEfJl2LvwGAkrV4j62zaf/Pzg7OGZ3d3PfkWXcWty9PFybwpkM9+WlTL2OMW+zS1vGtfOm7ji9cuCBnz57tNc0+oU0BgJK1eLf4Ftd3RcByucx6UmroYd8Fu/aoyI1JJvvMc26U4B9jUIp4bwu+lTd9++XZs2flwoULvabZJ/RDAKB0Eu+xdS91lrJdZzMUIXe3s9XF9WOR98VicWQHMrudbYp9do1mn71N04mVxa4tandic184dJ3TlPVUQzucab6+PN0y23LYNWCtKLFlSt2YYUrw0PTTRbyH2lofvhby77o8tEz4Vhn07ZdnzpxJWspxLOiHAEDpJN7tA9O3PbGKUn2oxRb9179Du2b5djiri965O2K59rkPe32pcHc/S00nVhatL3fYjG61bPNSceDL00Xr1n2RCeUZezEK5WcFh7sj2Rzgoemni3gPtbU+fC3W1mN5uOBbedO3X546dUqeeOKJXtPsE/ohAFBai3d3YXr7MG2ya5ZLaFezpuI9FG2L7cLoe6Fokk6sLLbcobLo51ieoXx8EUhfnr6dJ2M7y7llcP+eAzw0/XQR7ym7GLb1tZS27ssjlC6+lSd9+2VVVXLt2rVe03RZLBath1rRDwGAEhXv9qfeuqEuerQR7zafqqp6Ee9qn9JEvFsbm6QTK4tN07fV8mazkc1mI6vVKppnHZq21i8Cozs8NP206Tfq2lofvhZr6/jWdNiHeN8n2k4R7wDQld4i7+65VPHum4haJ95F4mPem0bM+4q8x8piy+1ep9vpqohPjQ66Q258dYPA6A4PTT9jRt5T+o0ukXd8K39KEu+LxeLQL6ttoB8CAKW1eNeb9UFmI1pNxLsbCbPjPmMP3dBqMzbS545trXu467V1Y95D6cTKYsvtlkX/du/15elivwNfvXQVGDYNreO5CQwemn76Fu8i/fhandDHt6ZBSeLd5oF4B4CudBLv7qoNoUh23bAZu3KDvTcm3kWODt1xf6KvW7nCtVcnqLn5pKYTK4vIjQlwFy9ePJLHYrE4ZH/Kihj2b9/34MuzjcCY+4oYPDT97EO89+FrdeId35oGiHcAmCudxPtUqJsMBzdYrVbZb4bVN3P3jxDUS7/M0be6gHgHgLmCeBfEeww7pKeqqllGBufuHyGol27gW91AvAPAXEG8A9SAf/ihXmBM5ijeOTg4OMzBQxggBP7hh3qBMem7/eXennO3DwCGA/EOUAP+4Yd6gTFBvAPAXEG8A9SAf/ihXmBMEO8AMFcQ7wA14B9+qBcYE8Q7AMwVxDtADfiHH+oFxgTxDgBzBfEOUAP+4Yd6gTFBvAPAXEG8A9SAf/ihXmBMEO8AMFcQ7wA14B9+qBcYE8Q7AMwVxDtADfiHH+oFxgTxDgBz5UC8c3BwhA84ytjfCQdH3+05Z3K3DwCG45/7QDoFgBD4hx/qBcYE8Q4AcwXxDlAD/uGHeoExQbwDwFxBvAPUgH/4oV5gTBDv7dhut4eGHq1Wq17SjVFVlWy322zTAygNxDtADfiHH+oFxgTx3pzNZiNVVclutzv4v+VyKev1unPaMRDvAP2CeAeoAf/wQ73AmCDe/ddYYe6yWCxks9kc+r/dbncobf3bnRSsEfvVanVwzgro5XLpjeYvFouD/7906ZJUVXVwraapNm82G1ksFkfKZG2x6e12u1p7NS+AKYF4B6gB//BDvcCYIN7914TEu4rcmLgXuS6ONRK/Wq1kuVyKyA0x7Du32WwOPmsaVtirUNc09AWiTrzbl431en3wUmBfHOrsdV9WAKYA4h2gBvzDD/UCY4J4v44OhXEPdyhMinhXweu7Jya0fRFz13Yr3jWNWJquLWqPL70UewGmBOIdoAb8ww/1AmOCePdf0yXyri8CvnvqouTr9To4CbYv8e6m18RegCmBeAeoAf/wQ73AmCDe/de0HfNuBW/sXGx8us0nNmxG03BfKJqKdyLvMFcQ7wA14B9+qBcYE8R7c0KrzbgTTO0YcldM+4S2HY+uaTQR73rtcrk89EJgz63X64NzsTHvIXsBpgTiHaAG/MMP9QJjgnhvhztG3h3iUrd6S2xyaWi8va74cvHixSOC2h1uY9MM2WJXq0m1F2BKIN4BasA//FAvMCaIdwCYK72Kd3ZugynCQ9MP/cb+0oN6EO8AMFd6E+/s3AZThYemH/qN/aUH9SDeAWCuJIv3LrPY3b9DY9PYuQ1yhO/YD/0G/caYIN4BYK70It7ZuQ2mDA9NP/Qb9BtjgngHgLkSFe/s3Mb6scBDMwT9Bv3GmCDe90dsjXX3nM/X3V/B9mlPU9brtazXa9lut4de8LvQpL4A+mCwyDs7t0Gp0PH6od84mh79xnAg3vdHU/G+7/kefQng3W53SLCrkN8niHfYB72IdxF2boPpQsfrh35Dguml2AvdQLz7r0l5GdbD+p5d9UnnkaSci4l3O7RN77dzQuxLdehae04JzSupO7darY70N25ZVOBbUa/D3+rms6TUF0AfJIv3Oti5DaYKHa8f+g36jTFBvPuvSf2ly7ZfvVeFrU+gx86liHeRG5PIfS+zdgK6a58v2m/tcee3+Oac6H0uy+XyiP2+OTPav8Tms6TUF0Af9CbeRdi5DaYJHa8f+g36jTFBvF8ndY6J776QOLZ/x86pXe4RemH1iWkVzrFfwVx7fD6Z8kud75c539AZe599oY+lnVpfAH3Qq3gHmCL4hx/qBcYE8e6/JvZyaJdVtSI7NFej7pzmWRd5byPe7XkrgEMvK7vdrnbOSUi8+zaGWy6XstlsZLPZHAy3SRHvdfUF0AeId4Aa8A8/1AuMCeLdf01IvLsR4D4j7/sQ76mR91gZ3Qh6auRd/1/t3Ww2RN4hKxDvADXgH36oFxgTxHsz3IjwYrE4MszMjud2rw2d61u8az6pY9595fLNOdH7XHxj3m2eKWm79sXqC6APEO8ANeAffqgXGBPEe3PsPJBQpLuqjq6QEjvnG8Ki4rqNeLc7Iyt1q83YXxv6WG3G1pcdUpO62kysvgD6APEOUAP+4Yd6gTFBvE+LISZ0j7HOO8A+GFW8x35uSyE2zqwroWhCn5tR9GlzTp3Qdhveuc5GVdyjaR5tOvo29039odmWfdZLXw/yJj7bpj229eHU+3zXdfX1PvudmK+3SStk13a7ld/7vd879H+I92kx1GpMdofV2C7MADkzGfHeN30LdR992e9GE3IgRWB0qeMhl92b+kOzLSXUS2obs2NURW4sF1nHviej+X6O7+rrfds81C6ViPe87QOA4UgW73VCKbZzm8nooAOyY+8uXbp0KH13veZQ2qHIu12nWY/1el2bjkaF1d4Sd42zG124vxbUrYsdW8O6yW5zSmwsoa+Ofd9D3Xdmy2nTr7vPN84z5Ac8NP3ss9+w31Ndu7W/5rgbM9m2Efqu3c1rrO3upLhYH5RynV0Pvsl9ijtmN1T2fdjs9gNWtPt8ve9dKhHvedsHAMPRi3iv27nNboFu11TVh1js4RxLO3XYjD6Q6tKxD8VU8S6S165xrm2+XeBCIiiUf9Pd5pTQLH6bn0+8qw0p372vXlPu03N1ZXDrF26wz34jVbz72qZtU7aNxfxLfdgNOqTamXqdT0Cnpq9lUGJl34fNvvz0viF2qUS8520fAAxHVLzvY+c2ETno8FPEe2raIfEeeiD70nE3h3CPEnaNi9Vl3blQ/k13m1Pqfk4PifeQ2IuVJRQ9jd0XKsNnP/vZI3bCUYboN1LEe2zcqu1jQu07ZLev3wjZ2aQ8be7Tz+4vkqljdvuwWUQO9QN1E/+a9hux/pxhM/nb58MGWJT1et37MM99zB8bcj5Hid8tjEtUvLsXxn7+Du3cVhcRTxHvKWn78rER/5R03IdUXeS9jXj3lVvL7BM8u12zXePaivdY/iLNdptTQjvXherYV1dtv7OU+0JlQLynMUS/kTJsxg5Jc9ubtom69u2ieaa2pyblaXOffnbFeqzsfdus1242myNifYhdKhHvedvnw+dn+xDvfTHGajSId2hDL+LdN4ayiXh3hV/s/tTIuzveMiWdIcR7auTdl5/iRrRSxXtdPceieE12m7P3dIm8N/nOYr9UEHnfD0P0G+4vP+51LqFhM7H27Q7vsGn52npqH5QaeU9N3/X1WNn3YbP+nx27rgyxSyXivR9/882/Eon/8mTPuRsf6Xeoh2vPcrk8Iojt36H7Q/b6XlZ9bcc376uunKnrwPc9nyP3tgf5kSzeY9Tt3Gadx46pdMW7XrNcLo9EhH1phzr71WoVnLwVS2ef4n2oXeNSxLuvnuvyt51Nik2afqgO3XoK2V73nfnqtcl3XVcGtROOss9+wyfefe3Wjfimjnl38w6JFd8vNKE+qEm7a3qftVOJlX0fNlsbUn29Sb/hltW9DvHej3hvOk9I09Xv132+x+YN6X32V3BXvNfNgWgyt8x3n00zpZwuQ8znyL3tQX70It5F0ndus3nZlRfcn3/dzsGXdki82+vdNGPp1I15V4drI96H2jWu6RCDWATFfQgsFmm7zdl6sJ99IqBu2Ezdd2ajK66tKd91w0K2kQAADD1JREFUXRnccsAN9tlvuN9Tav/gRn9tH1PXvl1fb9IOm5an6X2KGxmMlb1vm60Nbl4xX0/tN2L9IeK9+xyT2K+VvjRD80piv4y7aWqbstdZ8Z7yS0yT56yvnE3m0PmuG2I+R+5tD/Ljn/2chrMvQg/APnHH6eVATptGWdp0lPiHn33WCw80Pzn4utufjeXriHf/NXWR96bzhGLivW7uk5u+thW7ktnQ4j02xyMk3vc9nyP3tgf5gXjfM0OId5G8xHJsfPGYaPS2aT3hH372VS9tv6e5MJavu8Mu9P/G8nXEu/+aVPGeOk+oj8i7PRcbTrJv8R4rZ2hOyRDzOXJve5AfiHeAGvAPP9QLjAnivRlt5wlp3qG5UnXzsezQSH0xj415D8256EO8p5TTZYj5HLm3PcgPxDtADfiHH+oFxgTx3ozY/CuR+Ly1LqvNuMJ3sVg0Wm2m72EzsXKmrjZj0+pjPkfubQ/yA/EOUAP+4Yd6gTFBvDejryGcvtXcpoI7pySn4agAFsQ7QA34hx/qBcYE8d6MtuLdDg+xEzynit1hdeplhXJBvAPUgH/4oV5gTBDvADBXEO8ANeAffqgXGBPEOwDMFcQ7QA34hx/qBcYE8Q4AcwXxnjF///d/P7YJIDw0Q1AvMCaI93HxrSQTus49+pjw2sf66KllEDk8ebX0Mm232942e4vZzBr2++NAvHNwcIQPOMrY3wkHR9/tOWdys6+q0sW7b7lId0nGpgwp3t1VaKZQpiFW0kG874+qqoSazZCrV6/KsWPH5OrVq2ObAgAweXIXGSn2VVV4NZnlcnlIrOm66XVrk+s65FYU2rXSd7vdoXXgq6o6JGJ9YnK1Wh2s6FJVN9aeF4mvG29XvrHro9et7a522DTdMsTyddd/n0KZ9H77WV9S2rSTuvJAvyDeM+XcuXNyzz33yLlz58Y2BQBg8uQuMrqK981mc2RnUxXdKkztRkjujqyhTZJU1Gm+dudWe51r53a7PcjDCmN391E3T2trqtC1UfH1en0wxMV9IYnl67O/5DKJ+HePDbWTUFqueA+VB/oF8Z4hV69elRMnTkhVVXLixAmi7wAAeyZ3kRGyz4166+EOibAiWz+7a5nba+rEY2h4hu+6kG1uHqGdSK09Suquqr6hG3qdK7hD+cYi3iWWSfENnQm1k5QyxMoD/YJ4z5Bz587Jgw8+KFVVyYMPPkj0HQBgz+QuMrpG3kWuR1o3m41sNpuDoSA+IdpUvNuhNVVVJYl8Eb9ADYnE0LlQOjGh69oWyzck3ksuk2Ij9pZQOwnVj56LlQf6BfGeGRp1/+hHPypVVcnjjz9O9B0AYM/kLjL6EO8q1lScxXYRTRXvrpBMjdD78mgbpXaFaVOhG8t3t9t1Eu85lkkJTVr1tRMi73mBeM8MjbqL3Oisib4DAOyX3EVGH/bZyYQ2XR2jbCOnqeLdjbYuFovW4l3vt2Or3XHe9pwrdDWf5XIZtMGOybf/H8vXrfsplEnT9JXD105CabkCPVQe6BfEe0bYqLvIjQ6D6DsAwH7JXWT0Zd9isTg0VMJdRcSNGIfEu66motF7vd+9r6nQja2QYs+5wnC9Xh865xvL76Zpy9B1tZnSyqTlsJ/dF45YO3HLm1Ie6A/Ee0acO3dOHnrooYO/baMn+g4AsD9yFxm52zdl3HXep8AQ67zD/kC8Z4IbdRc53FkTfQcA2B+5i+Pc7Zs6UxK7sbkOUAaI90xwo+4iRztrou8AAPshd3Gcu30AMByI9wy4evWqHD9+/FDUXeRoZ/3444/L8ePHib4DAPRM7uI4d/sAYDgQ7xngi7qL+Dvrhx56iOg7AEDP5C6Oc7cPAIYD8T4yoai7iL+zJvoOANA/uYvjnOzrsn63XbHEHqHVW7ra59o6dP45ppnL+P2pfxfb7ba3ic7PPPPMob8R7xlw4cIF7/+HGkfoegAAaEdO4thH7val0rc4q8Mn3vedf87iPaeVc+bwXezrRQnxnjFT6awBAHIn9/42xT53rW6XurW6dV1uK6qWy+Uh8WGvUXSjJj10TXRNV9ceVxvr1ki365u764Zb6vJ1P/eZf+haX56x9dZDZbDp+NZMj6Wp9e2rM7tmfdP6jqVry+HbEMrHHL4LLaf9rC9Qrl8tl8tgWkTeCyL3hwkAwFTIvb/tQ7xbQaJiQeSw+HHPbTabQ5Fa3bxH7VGxofna3T413aYbHKkdKhZ3u92RzY9S8nU/95m/feGJ5R+r91gZfPfZNN2dTGPfly2v76Utpbwp6dqdXfsU73W25fpdKL6dbH31qRuh+dJCvBdE7g8TAICpkHt/G7LPjRbq4f5U767tbcVKbDdVe51+jg0bsPf6dhv12Rq63hVCMbHny9f93Gf+sZcJN/9QvaeWoa5MNk13J1yLa0uT8sbSdc/FrrVM/btQfENnUvzKXoN4L4jcHyYAAFMh9/62a+Q9JPLrxLvI9cjhZrORzWYjq9XqiMiwQ2liAkxtrIu2por3unzdz33mHyubzTNW77Ey6H2KCrnYORs11vTc4SVtxXss3S7iferfhdabO8xI5KhfxdJCvBdE7g8TAICpkHt/21W8x3bVrBPvKj5UbMTEcUrkvU/BFsvXd92+BGNqtNeXX10Z6soUiyDb4S273a6TeA+lm5t4z+27CE1ajfmVmxbivSByf5gAAEyF3PvbPuyrqhvjdm2Ur06828l69m83HZHD4533Kd5T8nU/95m/OyY7dZy1tTtWBv3bjn92r7Xn9D43ypsy5j2lvCnp6t/L5XIU8Z7bd6H4xrxb21LSQrwXRO4PEwCAqZB7f9uHfe5KFu6Ev5B4F7kxUdVeb89pmjat1DHvKqiaRoLr8vXZ2lf+eq1dgUUJRVDdeo+Vwb2vyQonNk034utbbaZNfbvpxlabCf0qMIfvQm0N1YX1q1haiPeCyP1hAgAwFXLvb3O3b274XkxKQJcp3Cer1eqQIF2v13utp5y/C9Z5nyF01gAAw5B7f5u7fXMjZ8FYR9+C0g7/qKrKO2xkn+T6XcTG2HcF8Z4xdNYAAMOQe3+bu30AMByI94yhswYAGIbc+9vc7QOA4UC8ZwydNQDAMOTe3+ZuHwAMB+I9Y+isAQCGIff+Nnf7AGA4EO8ZQ2cNADAMufe3udsHAMOBeM8YOmsAgGHIvb/N3T4AGA7Ee8bQWQMADEPu/W3u9gHAcCDeM4bOGgBgGHLvb3O3DwCGA/GeMXTWAADDkHt/m7t9ADAciPeMobMGABiG3Pvb3O0DgOFAvGcMnTUAwDDk3t/mbh8ADAfiPWPorAEAhiH3/jZ3+wBgOBDvGTN2Z71er2W5XCZf3+RaAICcGLu/rSN3+wBgOBDvGTNmZ71er6WqqiRBriJ/uVxKVVWy3W4HsBAAoD9yF8e52wcAw4F4z5ixOmsV4qvVKjmavt1uebgAQLHk3n/lbh8ADAfiPWPG7qxTxftut5PFYnHwLwBAaYzd39aRu30AMByI94ypqkquXbs2Wv5NIu8AAKVy7dq17MVxVVUcHBwcN46xOyXwc+rUKXniiSdGyx/xDgBz4IknnpBTp06NbQYAQDKI90w5c+aMXL58ebT8Ee8AMAcuX74sZ86cGdsMAIBkEO+ZcvbsWblw4cJo+SPeAWAOXLhwQc6ePTu2GQAAySDeM2XsB4pPvFdVJbvd7shnAIBSGTtQAgDQFMR7ply5ckWqqpIrV66MbQoAwCShnwWAEkG8Z8z58+fl/vvvH9sMAIBJcv/998v58+fHNgMAoBGI98w5ffq0PPzww2ObAQAwKR5++GE5ffr02GYAADQG8Z4573vf++T06dNy//3389MuAEBHrly5Ivfff7+cPn1a3ve+941tDgBAYxDvhXD+/HmpqupgctXly5fliSeeGHUjJwCAnLl27Zo88cQTcvny5YNFAKqqYqgMABQN4r0grly5cvAAOnPmjJw6dWr8Xb44ODg4Mj5OnTolZ86cOQh88AsmAJQO4h0AAAAAoBAQ7wAAAAAAhYB4BwAAAAAoBMQ7AAAAAEAhIN4BAAAAAAoB8Q4AAAAAUAiIdwAAAACAQkC8AwAAAAAUAuIdAAAAAKAQEO8AAAAAAIWAeAcAAAAAKATEOwAAAABAISDeAQAAAAAKAfEOAAAAAFAIiHcAAAAAgEJAvAMAAAAAFALiHQAAAACgEP4/8tkJqlOyQtcAAAAASUVORK5CYII=">
</div>
```
```
| github_jupyter |
# Face Recognition for the Happy House
Welcome to the first assignment of week 4! Here you will build a face recognition system. Many of the ideas presented here are from [FaceNet](https://arxiv.org/pdf/1503.03832.pdf). In lecture, we also talked about [DeepFace](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf).
Face recognition problems commonly fall into two categories:
- **Face Verification** - "is this the claimed person?". For example, at some airports, you can pass through customs by letting a system scan your passport and then verifying that you (the person carrying the passport) are the correct person. A mobile phone that unlocks using your face is also using face verification. This is a 1:1 matching problem.
- **Face Recognition** - "who is this person?". For example, the video lecture showed a face recognition video (https://www.youtube.com/watch?v=wr4rx0Spihs) of Baidu employees entering the office without needing to otherwise identify themselves. This is a 1:K matching problem.
FaceNet learns a neural network that encodes a face image into a vector of 128 numbers. By comparing two such vectors, you can then determine if two pictures are of the same person.
**In this assignment, you will:**
- Implement the triplet loss function
- Use a pretrained model to map face images into 128-dimensional encodings
- Use these encodings to perform face verification and face recognition
In this exercise, we will be using a pre-trained model which represents ConvNet activations using a "channels first" convention, as opposed to the "channels last" convention used in lecture and previous programming assignments. In other words, a batch of images will be of shape $(m, n_C, n_H, n_W)$ instead of $(m, n_H, n_W, n_C)$. Both of these conventions have a reasonable amount of traction among open-source implementations; there isn't a uniform standard yet within the deep learning community.
Let's load the required packages.
```
from keras.models import Sequential
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.initializers import glorot_uniform
from keras.engine.topology import Layer
from keras import backend as K
K.set_image_data_format('channels_first')
import cv2
import os
import numpy as np
from numpy import genfromtxt
import pandas as pd
import tensorflow as tf
from fr_utils import *
from inception_blocks_v2 import *
%matplotlib inline
%load_ext autoreload
%autoreload 2
np.set_printoptions(threshold=np.nan)
```
## 0 - Naive Face Verification
In Face Verification, you're given two images and you have to tell if they are of the same person. The simplest way to do this is to compare the two images pixel-by-pixel. If the distance between the raw images are less than a chosen threshold, it may be the same person!
<img src="images/pixel_comparison.png" style="width:380px;height:150px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u></center></caption>
Of course, this algorithm performs really poorly, since the pixel values change dramatically due to variations in lighting, orientation of the person's face, even minor changes in head position, and so on.
You'll see that rather than using the raw image, you can learn an encoding $f(img)$ so that element-wise comparisons of this encoding gives more accurate judgements as to whether two pictures are of the same person.
## 1 - Encoding face images into a 128-dimensional vector
### 1.1 - Using an ConvNet to compute encodings
The FaceNet model takes a lot of data and a long time to train. So following common practice in applied deep learning settings, let's just load weights that someone else has already trained. The network architecture follows the Inception model from [Szegedy *et al.*](https://arxiv.org/abs/1409.4842). We have provided an inception network implementation. You can look in the file `inception_blocks.py` to see how it is implemented (do so by going to "File->Open..." at the top of the Jupyter notebook).
The key things you need to know are:
- This network uses 96x96 dimensional RGB images as its input. Specifically, inputs a face image (or batch of $m$ face images) as a tensor of shape $(m, n_C, n_H, n_W) = (m, 3, 96, 96)$
- It outputs a matrix of shape $(m, 128)$ that encodes each input face image into a 128-dimensional vector
Run the cell below to create the model for face images.
```
FRmodel = faceRecoModel(input_shape=(3, 96, 96))
print("Total Params:", FRmodel.count_params())
```
** Expected Output **
<table>
<center>
Total Params: 3743280
</center>
</table>
By using a 128-neuron fully connected layer as its last layer, the model ensures that the output is an encoding vector of size 128. You then use the encodings the compare two face images as follows:
<img src="images/distance_kiank.png" style="width:680px;height:250px;">
<caption><center> <u> <font color='purple'> **Figure 2**: <br> </u> <font color='purple'> By computing a distance between two encodings and thresholding, you can determine if the two pictures represent the same person</center></caption>
So, an encoding is a good one if:
- The encodings of two images of the same person are quite similar to each other
- The encodings of two images of different persons are very different
The triplet loss function formalizes this, and tries to "push" the encodings of two images of the same person (Anchor and Positive) closer together, while "pulling" the encodings of two images of different persons (Anchor, Negative) further apart.
<img src="images/triplet_comparison.png" style="width:280px;height:150px;">
<br>
<caption><center> <u> <font color='purple'> **Figure 3**: <br> </u> <font color='purple'> In the next part, we will call the pictures from left to right: Anchor (A), Positive (P), Negative (N) </center></caption>
### 1.2 - The Triplet Loss
For an image $x$, we denote its encoding $f(x)$, where $f$ is the function computed by the neural network.
<img src="images/f_x.png" style="width:380px;height:150px;">
<!--
We will also add a normalization step at the end of our model so that $\mid \mid f(x) \mid \mid_2 = 1$ (means the vector of encoding should be of norm 1).
!-->
Training will use triplets of images $(A, P, N)$:
- A is an "Anchor" image--a picture of a person.
- P is a "Positive" image--a picture of the same person as the Anchor image.
- N is a "Negative" image--a picture of a different person than the Anchor image.
These triplets are picked from our training dataset. We will write $(A^{(i)}, P^{(i)}, N^{(i)})$ to denote the $i$-th training example.
You'd like to make sure that an image $A^{(i)}$ of an individual is closer to the Positive $P^{(i)}$ than to the Negative image $N^{(i)}$) by at least a margin $\alpha$:
$$\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 + \alpha < \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$$
You would thus like to minimize the following "triplet cost":
$$\mathcal{J} = \sum^{m}_{i=1} \large[ \small \underbrace{\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2}_\text{(1)} - \underbrace{\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2}_\text{(2)} + \alpha \large ] \small_+ \tag{3}$$
Here, we are using the notation "$[z]_+$" to denote $max(z,0)$.
Notes:
- The term (1) is the squared distance between the anchor "A" and the positive "P" for a given triplet; you want this to be small.
- The term (2) is the squared distance between the anchor "A" and the negative "N" for a given triplet, you want this to be relatively large, so it thus makes sense to have a minus sign preceding it.
- $\alpha$ is called the margin. It is a hyperparameter that you should pick manually. We will use $\alpha = 0.2$.
Most implementations also normalize the encoding vectors to have norm equal one (i.e., $\mid \mid f(img)\mid \mid_2$=1); you won't have to worry about that here.
**Exercise**: Implement the triplet loss as defined by formula (3). Here are the 4 steps:
1. Compute the distance between the encodings of "anchor" and "positive": $\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2$
2. Compute the distance between the encodings of "anchor" and "negative": $\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$
3. Compute the formula per training example: $ \mid \mid f(A^{(i)}) - f(P^{(i)}) \mid - \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2 + \alpha$
3. Compute the full formula by taking the max with zero and summing over the training examples:
$$\mathcal{J} = \sum^{m}_{i=1} \large[ \small \mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 - \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2+ \alpha \large ] \small_+ \tag{3}$$
Useful functions: `tf.reduce_sum()`, `tf.square()`, `tf.subtract()`, `tf.add()`, `tf.maximum()`.
For steps 1 and 2, you will need to sum over the entries of $\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2$ and $\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$ while for step 4 you will need to sum over the training examples.
```
# GRADED FUNCTION: triplet_loss
def triplet_loss(y_true, y_pred, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula (3)
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
### START CODE HERE ### (≈ 4 lines)
# Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor,positive)),axis=-1)
# Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor,negative)),axis=-1)
# Step 3: subtract the two previous distances and add alpha.
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist),alpha)
# Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tf.reduce_sum(tf.maximum(basic_loss,0))
### END CODE HERE ###
return loss
with tf.Session() as test:
tf.set_random_seed(1)
y_true = (None, None, None)
y_pred = (tf.random_normal([3, 128], mean=6, stddev=0.1, seed = 1),
tf.random_normal([3, 128], mean=1, stddev=1, seed = 1),
tf.random_normal([3, 128], mean=3, stddev=4, seed = 1))
loss = triplet_loss(y_true, y_pred)
print("loss = " + str(loss.eval()))
```
**Expected Output**:
<table>
<tr>
<td>
**loss**
</td>
<td>
528.143
</td>
</tr>
</table>
## 2 - Loading the trained model
FaceNet is trained by minimizing the triplet loss. But since training requires a lot of data and a lot of computation, we won't train it from scratch here. Instead, we load a previously trained model. Load a model using the following cell; this might take a couple of minutes to run.
```
FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])
load_weights_from_FaceNet(FRmodel)
```
Here're some examples of distances between the encodings between three individuals:
<img src="images/distance_matrix.png" style="width:380px;height:200px;">
<br>
<caption><center> <u> <font color='purple'> **Figure 4**:</u> <br> <font color='purple'> Example of distance outputs between three individuals' encodings</center></caption>
Let's now use this model to perform face verification and face recognition!
## 3 - Applying the model
Back to the Happy House! Residents are living blissfully since you implemented happiness recognition for the house in an earlier assignment.
However, several issues keep coming up: The Happy House became so happy that every happy person in the neighborhood is coming to hang out in your living room. It is getting really crowded, which is having a negative impact on the residents of the house. All these random happy people are also eating all your food.
So, you decide to change the door entry policy, and not just let random happy people enter anymore, even if they are happy! Instead, you'd like to build a **Face verification** system so as to only let people from a specified list come in. To get admitted, each person has to swipe an ID card (identification card) to identify themselves at the door. The face recognition system then checks that they are who they claim to be.
### 3.1 - Face Verification
Let's build a database containing one encoding vector for each person allowed to enter the happy house. To generate the encoding we use `img_to_encoding(image_path, model)` which basically runs the forward propagation of the model on the specified image.
Run the following code to build the database (represented as a python dictionary). This database maps each person's name to a 128-dimensional encoding of their face.
```
database = {}
database["danielle"] = img_to_encoding("images/danielle.png", FRmodel)
database["younes"] = img_to_encoding("images/younes.jpg", FRmodel)
database["tian"] = img_to_encoding("images/tian.jpg", FRmodel)
database["andrew"] = img_to_encoding("images/andrew.jpg", FRmodel)
database["kian"] = img_to_encoding("images/kian.jpg", FRmodel)
database["dan"] = img_to_encoding("images/dan.jpg", FRmodel)
database["sebastiano"] = img_to_encoding("images/sebastiano.jpg", FRmodel)
database["bertrand"] = img_to_encoding("images/bertrand.jpg", FRmodel)
database["kevin"] = img_to_encoding("images/kevin.jpg", FRmodel)
database["felix"] = img_to_encoding("images/felix.jpg", FRmodel)
database["benoit"] = img_to_encoding("images/benoit.jpg", FRmodel)
database["arnaud"] = img_to_encoding("images/arnaud.jpg", FRmodel)
```
Now, when someone shows up at your front door and swipes their ID card (thus giving you their name), you can look up their encoding in the database, and use it to check if the person standing at the front door matches the name on the ID.
**Exercise**: Implement the verify() function which checks if the front-door camera picture (`image_path`) is actually the person called "identity". You will have to go through the following steps:
1. Compute the encoding of the image from image_path
2. Compute the distance about this encoding and the encoding of the identity image stored in the database
3. Open the door if the distance is less than 0.7, else do not open.
As presented above, you should use the L2 distance (np.linalg.norm). (Note: In this implementation, compare the L2 distance, not the square of the L2 distance, to the threshold 0.7.)
```
# GRADED FUNCTION: verify
def verify(image_path, identity, database, model):
"""
Function that verifies if the person on the "image_path" image is "identity".
Arguments:
image_path -- path to an image
identity -- string, name of the person you'd like to verify the identity. Has to be a resident of the Happy house.
database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
model -- your Inception model instance in Keras
Returns:
dist -- distance between the image_path and the image of "identity" in the database.
door_open -- True, if the door should open. False otherwise.
"""
### START CODE HERE ###
# Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)
encoding = img_to_encoding(image_path, model)
# Step 2: Compute distance with identity's image (≈ 1 line)
dist = np.linalg.norm(encoding-database[identity])
# Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)
if None:
print("It's " + str(identity) + ", welcome home!")
door_open = None
else:
print("It's not " + str(identity) + ", please go away")
door_open = None
### END CODE HERE ###
return dist, door_open
```
Younes is trying to enter the Happy House and the camera takes a picture of him ("images/camera_0.jpg"). Let's run your verification algorithm on this picture:
<img src="images/camera_0.jpg" style="width:100px;height:100px;">
```
verify("images/camera_0.jpg", "younes", database, FRmodel)
```
**Expected Output**:
<table>
<tr>
<td>
**It's younes, welcome home!**
</td>
<td>
(0.65939283, True)
</td>
</tr>
</table>
Benoit, who broke the aquarium last weekend, has been banned from the house and removed from the database. He stole Kian's ID card and came back to the house to try to present himself as Kian. The front-door camera took a picture of Benoit ("images/camera_2.jpg). Let's run the verification algorithm to check if benoit can enter.
<img src="images/camera_2.jpg" style="width:100px;height:100px;">
```
verify("images/camera_2.jpg", "kian", database, FRmodel)
```
**Expected Output**:
<table>
<tr>
<td>
**It's not kian, please go away**
</td>
<td>
(0.86224014, False)
</td>
</tr>
</table>
### 3.2 - Face Recognition
Your face verification system is mostly working well. But since Kian got his ID card stolen, when he came back to the house that evening he couldn't get in!
To reduce such shenanigans, you'd like to change your face verification system to a face recognition system. This way, no one has to carry an ID card anymore. An authorized person can just walk up to the house, and the front door will unlock for them!
You'll implement a face recognition system that takes as input an image, and figures out if it is one of the authorized persons (and if so, who). Unlike the previous face verification system, we will no longer get a person's name as another input.
**Exercise**: Implement `who_is_it()`. You will have to go through the following steps:
1. Compute the target encoding of the image from image_path
2. Find the encoding from the database that has smallest distance with the target encoding.
- Initialize the `min_dist` variable to a large enough number (100). It will help you keep track of what is the closest encoding to the input's encoding.
- Loop over the database dictionary's names and encodings. To loop use `for (name, db_enc) in database.items()`.
- Compute L2 distance between the target "encoding" and the current "encoding" from the database.
- If this distance is less than the min_dist, then set min_dist to dist, and identity to name.
```
# GRADED FUNCTION: who_is_it
def who_is_it(image_path, database, model):
"""
Implements face recognition for the happy house by finding who is the person on the image_path image.
Arguments:
image_path -- path to an image
database -- database containing image encodings along with the name of the person on the image
model -- your Inception model instance in Keras
Returns:
min_dist -- the minimum distance between image_path encoding and the encodings from the database
identity -- string, the name prediction for the person on image_path
"""
### START CODE HERE ###
## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)
encoding = img_to_encoding(image_path, model)
## Step 2: Find the closest encoding ##
# Initialize "min_dist" to a large value, say 100 (≈1 line)
min_dist = 100
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in database.items():
# Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line)
dist = np.linalg.norm(encoding-db_enc)
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)
if dist<min_dist:
min_dist = dist
identity = name
### END CODE HERE ###
if min_dist > 0.7:
print("Not in the database.")
else:
print ("it's " + str(identity) + ", the distance is " + str(min_dist))
return min_dist, identity
```
Younes is at the front-door and the camera takes a picture of him ("images/camera_0.jpg"). Let's see if your who_it_is() algorithm identifies Younes.
```
who_is_it("images/camera_0.jpg", database, FRmodel)
```
**Expected Output**:
<table>
<tr>
<td>
**it's younes, the distance is 0.659393**
</td>
<td>
(0.65939283, 'younes')
</td>
</tr>
</table>
You can change "`camera_0.jpg`" (picture of younes) to "`camera_1.jpg`" (picture of bertrand) and see the result.
Your Happy House is running well. It only lets in authorized persons, and people don't need to carry an ID card around anymore!
You've now seen how a state-of-the-art face recognition system works.
Although we won't implement it here, here're some ways to further improve the algorithm:
- Put more images of each person (under different lighting conditions, taken on different days, etc.) into the database. Then given a new image, compare the new face to multiple pictures of the person. This would increae accuracy.
- Crop the images to just contain the face, and less of the "border" region around the face. This preprocessing removes some of the irrelevant pixels around the face, and also makes the algorithm more robust.
<font color='blue'>
**What you should remember**:
- Face verification solves an easier 1:1 matching problem; face recognition addresses a harder 1:K matching problem.
- The triplet loss is an effective loss function for training a neural network to learn an encoding of a face image.
- The same encoding can be used for verification and recognition. Measuring distances between two images' encodings allows you to determine whether they are pictures of the same person.
Congrats on finishing this assignment!
### References:
- Florian Schroff, Dmitry Kalenichenko, James Philbin (2015). [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/pdf/1503.03832.pdf)
- Yaniv Taigman, Ming Yang, Marc'Aurelio Ranzato, Lior Wolf (2014). [DeepFace: Closing the gap to human-level performance in face verification](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf)
- The pretrained model we use is inspired by Victor Sy Wang's implementation and was loaded using his code: https://github.com/iwantooxxoox/Keras-OpenFace.
- Our implementation also took a lot of inspiration from the official FaceNet github repository: https://github.com/davidsandberg/facenet
| github_jupyter |
# Introduction to pytorch tensors
---
Pytorch tensors, work very similar to numpy arrays and you can always convert it to a numpy array or make a numpy array into a torch tensor. The primary difference is that it is located either on your CPU or your GPU and that it contains works with the auto differential software of pytorch.
```
!pip install torch
import torch
x_tensor = torch.tensor([[1., -1.], [1., -1.]])
print(type(x_tensor))
# tensor to numpy
x_array = x_tensor.numpy()
print(type(x_array))
# numpy to tensor
x_tensor2 =torch.tensor(x_array)
print(type(x_tensor2))
print(x_tensor2 == x_tensor)
# Location of tensor
x_tensor.device
```
### Example of finding gradient
```
x = torch.tensor([[1., -1.], [1., 1.]], requires_grad=True)
# sum(x_i ^ 2)
out = (x**2).sum()
# calculate the gradient
out.backward()
# What is the gradient for x
print(x.grad)
```
# Optimizing a polynomial
---
```
x = torch.tensor([3.], requires_grad=True)
y = 2. + x**2 -3 *x
print(y)
# create sgd optimizer
optimizer = torch.optim.SGD([x], lr=0.01) # lr = learning rate, SGD = stochastic gradient descent
# backward pass / calcuate the gradient on the thing we want to optimize
y.backward()
print(x.grad) # examine - the gradient at a specific point
# step in the direction to minimize y
optimizer.step()
# set the gradient to zero. (This is a bit weird but required)
optimizer.zero_grad()
# we see that x have improved (minimum is 1.5 so moving in the right direction)
print(x)
# we see that the gradient is set to zero
print(x.grad)
```
We can now do this multiple times to obtain the desired results: (i.e. to find the minimum)
- we are stepping down the curve - one step at the time
```
for i in range(1000):
# forward pass / or just calculate the outcome
y = 2. + x**2 -3 *x
# backward pass on the thing we want to minimize
y.backward() # calculate gradient?
# take a step in the "minimize direction"
optimizer.step()
# zero the gradient
optimizer.zero_grad()
# now we have found the minimum
print(x)
```
# Fitting a Linear regression
---
Here we will fit a linear regression using pytorch, using the same approach as above.
## 0) Prepare the data
First let us create some data. We will do this using `sklearn`'s `make_regression`, which just make some sample data for regression.
```
!pip install sklearn
from sklearn import datasets
import matplotlib.pyplot as plt
# 0) Prepare data
X_numpy, y_numpy = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=4)
# plot the sample
plt.plot(X_numpy, y_numpy, 'ro')
plt.show()
# cast to float Tensor
X = torch.tensor(X_numpy, dtype=torch.float)
y = torch.tensor(y_numpy, dtype=torch.float)
y = y.view(y.shape[0], 1) # view is similar to reshape it simply sets the desired shape to (100, 1)
print(y.shape)
print(y.dtype)
print(x.dtype)
n_samples, n_features = X.shape
```
## 1) Creating the linear model
You can do this using a the `nn.Linear`. This corresponds to multiplying with a matrix of beta coefficients (or weights in a neural network sense) and adding a bias.
> Actually if you are very pedantic it is not really a linear transformation but an *affine* transformation, but it corresponds to what we think of as linear (regression).
```
from torch import nn
# 1) Model
# Linear model f = wx + b
input_size = n_features
output_size = 1
model = nn.Linear(input_size, output_size) # a linear layer
```
# 2) Loss and optimizer
Here we will create the optimizer, note we use `model.parameters` to get a list to get all of the parameters of the model.
We also use the `MSELoss()` as our criterion to minimize. It is simply the mean squared error (MSE) which you are used to from regression.
```
learning_rate = 0.01 # feel free to change this
print(list(model.parameters())) # only two parameters a beta and an intercept
criterion = nn.MSELoss() # mean squared error - sum(diff between predicted values and the actual values)^2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
```
## 3) Training loop
Well this is essentially the same as before:
```
epochs = 500 # how many times we want to repeat it
for epoch in range(epochs):
# Forward pass / calc predicted y
y_predicted = model(X)
# calucate loss / MSE # optimise by minimising MSE
loss = criterion(y_predicted, y)
# Backward pass / gradient and update
loss.backward()
optimizer.step()
# zero grad before new step
optimizer.zero_grad()
# some print to see that it is running
if (epoch+1) % 100 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
# Plot
predicted = model(X).detach().numpy()
plt.plot(X_numpy, y_numpy, 'ro')
plt.plot(X_numpy, predicted, 'b')
plt.show()
```
<details>
<summary> Hmm this seems like something we should do in a class? (or why didn't you introduce nn.modules?!) </summary>
You are completely right it does work much better as a class, but maybe slightly overdoing it for the task at hand, however if you are familiar with pytorch or deep learning you might find it more convenient to create a model class. We will look at this next time as well, but in pytorch this looks something like this for the linear regression:
```py
class LinearModel(nn.Module):
def __init__(self, n_input_features):
# initialize the superclass nn.module (to tell pytorch that this is a trainable model)
super(Model, self).__init__()
# create a linear layer to save in the model
self.linear = nn.Linear(n_input_features, 1)
def forward(self, x):
y = self.linear(x)
return y
```
You can the use this model the same way as before:
```
model = LinearModel(n_input_features = 1) # create model
y = model.forward(x)
# same af before you can now calculate the loss and optimize in a loop
```
It is a little overkill for the exercise, but it might be nice getting used to the abstraction.
</details>
<br />
# Task: Logistic Regression
---
create a logistic regression, fitted either to your text features or (maybe easier to start with) the following test data.
Things which might be useful to know:
Torch do have a binary cross entropy loss function (`torch.nn.BCELoss`) and a sigmoid function (`torch.sigmoid`).
If you find it hard to start it might worth debating with your studygroup
- what the difference is between linear regression and logistic regression
- How many input features how many output?
- Try to draw it as a 1-layer neural network
> **Bonus**: The end of the chapter introduced the multinomial logistic regression, it is actually surprisingly easy to do in pytorch, can you implement it? (*Hint*: pytorch does have a softmax function) The chapter also introduces (L1 and) L2 regularization for logistic regression, can you add that to your model as well?
10 input features, 1 outputs (either 1 or 0)
```
# 0) prepare data
#!pip install sklearn
from sklearn import datasets
import matplotlib.pyplot as plt
# 0) Prepare data
X_numpy, y_numpy = datasets.make_classification(n_samples=1000, n_features=10, random_state=7)
# plot the sample
plt.plot(X_numpy, y_numpy, 'ro')
plt.show()
# cast to float Tensor
X = torch.tensor(X_numpy, dtype=torch.float)
y = torch.tensor(y_numpy, dtype=torch.float)
y = y.view(y.shape[0], 1) # view is similar to reshape it simply sets the desired shape to (100, 1)
# we change the shape of y so it can be used in nn.Linear
print(y.shape)
print(y.dtype)
print(x.dtype)
n_samples, n_features = X.shape
print(X_numpy.shape)
# only plotting 2 out of 10 features
plt.scatter(X_numpy[:, 0], X_numpy[:, 1], marker='o', c=y_numpy,
s=25, edgecolor='k')
# 1) create model
from torch import nn
# 1) Linear model
input_size = n_features # 10 features
output_size = 1 # one output which tells us the prop
model = nn.Linear(input_size, output_size) # a linear layer
# 2) optimisation and loss
learning_rate = 0.05 # feel free to change this
print(list(model.parameters())) # only two parameters a beta and an intercept
criterion = torch.nn.BCELoss() # binary cross entropy
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# 3) training loop
epochs = 500 # how many times we want to repeat it
for epoch in range(epochs):
# Forward pass / calc predicted y
y_predicted = torch.sigmoid(model(X))
# calucate loss / Binary cross entropy
loss = criterion(y_predicted, y)
# Backward pass / gradient and update
loss.backward()
optimizer.step()
# zero grad before new step
optimizer.zero_grad()
# some print to see that it is running
if (epoch+1) % 100 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
# only plotting 2 out of 10 features
plt.scatter(X_numpy[:, 0], X_numpy[:, 1], marker='o', c=predicted,
s=25, edgecolor='k')
```
| github_jupyter |
# 0.0 Notebook Template
--*Set the notebook number, describe the background of the project, the nature of the data, and what analyses will be performed.*--
## Jupyter Extensions
Load [watermark](https://github.com/rasbt/watermark) to see the state of the machine and environment that's running the notebook. To make sense of the options, take a look at the [usage](https://github.com/rasbt/watermark#usage) section of the readme.
```
# Load `watermark` extension
%load_ext watermark
# Display the status of the machine and packages. Add more as necessary.
%watermark -v -n -m -g -b -t -p numpy,pandas,matplotlib,seaborn
```
Load [autoreload](https://ipython.org/ipython-doc/3/config/extensions/autoreload.html) which will always reload modules marked with `%aimport`.
This behavior can be inverted by running `autoreload 2` which will set everything to be auto-reloaded *except* for modules marked with `%aimport`.
```
# Load `autoreload` extension
%load_ext autoreload
# Set autoreload behavior
%autoreload 1
```
Load `matplotlib` in one of the more `jupyter`-friendly [rich-output modes](https://ipython.readthedocs.io/en/stable/interactive/plotting.html). Some options (that may or may not have worked) are `inline`, `notebook`, and `gtk`.
```
# Set the matplotlib mode
%matplotlib inline
```
## Imports
Static imports that shouldn't necessarily change throughout the notebook.
```
# Standard library imports
import logging
# Third party
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# tqdm.tqdm wraps generators and displays a progress bar:
# `for i in tqdm(range(10)): ...`
from tqdm import tqdm
```
Local imports that may or may not be autoreloaded. This section contains things that will likely have to be re-imported multiple times, and have additions or subtractions made throughout the project.
```
# Constants to be used throughout the package
%aimport {{ cookiecutter.import_name }}.constants
# Import the data subdirectories
from {{ cookiecutter.import_name }}.constants import DIR_DATA_EXT, DIR_DATA_INT, DIR_DATA_PROC, DIR_DATA_RAW
# Utility functions
%aimport {{ cookiecutter.import_name }}.utils
from {{ cookiecutter.import_name }}.utils import setup_logging
```
## Initial Setup
Set [seaborn defaults](https://seaborn.pydata.org/generated/seaborn.set.html) for matplotlib.
```
sns.set()
```
Set up the logger configuration to something more useful than baseline. Creates log files for the different log levels in the `logs` directory.
See `logging.yml` for the exact logging configuration.
```
# Run base logger setup
setup_logging()
# Define a logger object
logger = logging.getLogger("{{ cookiecutter.import_name }}")
```
## Global Definitions
```
# data_str = "" # Data filename
# data_path = DIR_DATA_RAW / data_str # Full path to the data
```
## Get the Data
```
# data = pd.read_csv(str(data_path), delim_whitespace=False, index_col=0)
# logger.info("Loaded dataset '{0}' from '{1}'".format(data_path.name, data_path.parent.name))
```
## Preprocessing
```
# data_norm = (data - data.mean()) / data.std()
# logger.info("Processed data '{0}'".format(data_path.stem))
```
## Plotting
```
# [plt.plot(data_norm[i,:]) for i in range(len(data_norm))]
# plt.show()
# ...
# ...
# ...
```
## Hints
Various hints for working on `jupyter notebooks`. Should probably be removed when a notebook is completed.
General stuff:
- To make logging even lazier, set `print = logger.info`, and then `print` away!
- The `!` can be used to run shell commands from within the notebook (ex. `!which conda`)
- Use `assert` liberally - this isn't a script and it's very readable.
Cheatsheets:
- [Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)
| github_jupyter |
## In the product wheel, we are trying to use thr transition sheets loss as our standards to reduce the cost
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
```
## Importing the first dataframe: ordercolor transition report
```
df1 = pd.read_excel('../data/OrderColorTransitionsReport 1_1_17 - 4_22_19.xlsx',
header=205, usecols=[0,1,4,5,7,8,9,11,12])
df1
## take a look at the datatype
df1.dtypes
## delete , and convert the object to int64
df1['AGrade Sheets'] = pd.to_numeric(df1['AGrade Sheets'].str.replace(',', ''))
df1['Transition Sheets'] = pd.to_numeric(df1['Transition Sheets'].str.replace(',', ''))
df1['Total sheets'] = pd.to_numeric(df1['Total Sheets'].str.replace(',', ''))
## convert the NaN to 0
df1 = df1.fillna(0)
df1
```
## Transition loss for df1
```
ndf1 = df1.groupby(['Color','Next Color Code']).agg({'Transition Sheets':
['mean', 'std', 'min', 'max', 'count'],'AGrade Sheets':['sum']})
ndf1.columns = ['ts_mean', 'ts_std', 'ts_min', 'ts_max', 't_count','AGS_sum']
ndf1 = ndf1.reset_index()
ndf1
for i in range(len(ndf1)):
for n in range(len(ndf1)):
if ndf1.iloc[i,0] == ndf1.iloc[n,1] and ndf1.iloc[i,1] == ndf1.iloc[n,0]:
print(i,n)
```
# Data cleaning
1. For the unsusal color code
```
## To see how many unique color in the color and next color code.
print(len(df1['Color'].unique()))
print(len(df1['Next Color Code'].unique()))
## to see the unusaul color code in the data
print(list(set(df1['Color'].unique()) - set(df1['Next Color Code'].unique())))
print(list(set(df1['Next Color Code'].unique()) - set(df1['Color'].unique())))
uc = ['DP', 'dv']
df1.loc[df1['Next Color Code'].isin(['DP', 'dv'])]
## Find the unsual color code in the data frame and delete it
len(df1)
newdf1 = df1.set_index('Next Color Code')
newdf1 = newdf1.drop(['DP', 'dv'], axis=0)
newdf2 = df1.set_index('Next Color Code')
newdf2 = newdf2.drop(list(set(df1['Next Color Code'].unique()) - set(df1['Color'].unique())), axis=0)
len(newdf1)
```
Calculating the # of differnt cast oder in unique color
```
## Creating a new dataframe without the unusual color code named newdf1
## define a function to delete the unusual data
def delete_unusualcolor(path):
df1 = pd.read_excel(path,
header=205, usecols=[0,1,4,5,7,8,9,11,12])
print('the length of df1 is', len(df1))
print('the number of unique color', len(df1['Color'].unique()))
print('the number of unique next color', len(df1['Next Color Code'].unique()))
print('the unique color exists in the color',
(set(df1['Color'].unique()) - set(df1['Next Color Code'].unique())))
print('the unique color exists in the next color',
list(set(df1['Next Color Code'].unique()) - set(df1['Color'].unique())))
newdf1 = df1.set_index('Next Color Code')
newdf1 = newdf1.drop(list(set(df1['Next Color Code'].unique()) - set(df1['Color'].unique())), axis=0)
print('the length of newdf1 is', len(newdf1))
## nosetest
import pandas as pd
#import delete_unusualcolor as du
def test_delete_unusualcolor():
path = '../data/OrderColorTransitionsReport 1_1_17 - 4_22_19.xlsx'
du(path)
df1 = pd.read_excel('../data/OrderColorTransitionsReport 1_1_17 - 4_22_19.xlsx')
assert df1.empty == False,'you cannot put in an empty datafrmae'
```
## Function for the average transition loss
```
for z in range(len(df1['Color'].unique())):
list1 = list(df1['Color'].unique())
color1 = list1[z]
list2 = list(df1['Next Color Code'].unique())
for z2 in range(len(df1['Next Color Code'].unique())):
color2 = list2[z2]
Select_3(color1, color2)
colors = df1['Color'].unique().tolist()
colors.sort()
colors
shape = (len(colors), len(colors))
matrix = np.zeros(shape)
for i in range(0, len(colors)):
for j in range(0, len(colors)):
value = df2['ts_mean'].loc[(df2['Color'] == colors[i]) &
(df2['Next Color Code'] == colors[j])].tolist()
matrix[i, j] = value[0]
def Select_2(color1, color2):
color = df1['Color'] == color1
nextcolorcode = df1 ['Next Color Code'] == color2
ndf = df1[color & nextcolorcode]
if len(ndf) == 0:
return
else:
n = 0
for i in range(len(ndf)-1):
if ndf.iloc[i,0] != ndf.iloc[i+1,0]:
n = n + 1
print (color1, color2, n + 1)
tl = 0
for i in range(len(ndf)):
tl = tl + int(ndf.iloc[i,8])
tl = tl/(n+1)
print (color1, color2, 'cycles:', n + 1, 'average transition sheet loss:', tl)
def Select_2(color1, color2):
color = df1['Color'] == color1
nextcolorcode = df1 ['Next Color Code'] == color2
ndf = df1[color & nextcolorcode]
if len(ndf) == 0:
return
else:
n = 0
for i in range(len(ndf)-1):
if ndf.iloc[i,0] != ndf.iloc[i+1,0]:
n = n + 1
tl = 0
for i in range(len(ndf)):
tl = tl + int(ndf.iloc[i,8])
tl = tl/(n+1)
print (color1, color2, 'cycles:', n + 1, 'average transition sheet loss:', tl)
```
Telling how many cast order do you have for each color code
```
for z in range(len(df1['Color'].unique())):
list1 = list(df1['Color'].unique())
color1 = list1[z]
list2 = list(df1['Next Color Code'].unique())
for z2 in range(len(df1['Next Color Code'].unique())):
color2 = list2[z2]
Select_2(color1, color2)
Select('1M' , 'CW')
```
### 2. For the fail rate
```
print (df1['AGrade Sheets'].isnull())
```
The total number of number of failing sheets
```
print ('total number of our data is', len(df1))
print ('the fail sheets number is', (df1['AGrade Sheets'].isnull().sum()))
```
## data collecting
### take a look at the df2 and pick up the most frequently used color code
### Sorting the dataframe
1. 10 most demanding color
2. 10 highest transition cost
3. 10 hiest sales rank
4. 10 most frequent sequence
5. 10 highest sub family number
6. 10 biggest mean transition loss
```
##Importing the second dataframe: variable cost per sheet
df2 = df_desc = pd.read_excel('../data/Variable cost per sheet.xlsx',
header=3)
df2.dtypes
## we need to deal with the NaN and , in the dataframe
df2 = df2.fillna(0)
df2['CC DIM'] = [x.replace('1/2', '').replace('1/4', '').replace('3/4', '') for x in df_desc['CC DIM']]
df2.drop_duplicates(inplace=True)
df2
## define a function for sorting the data by colums and rank from high to low.
##and you can adjust the n to see the number of rows you want
def sort_hightolow(df2, colums, n):
df2_demanding = df2.sort_values(by=[colums],ascending=False)
return df2_demanding.head(n=n)
## define a function for sorting the data by colums and rank from low to high
def sort_lowtohigh(df2, colums):
df2_demanding = df2.sort_values(by=[colums],ascending=True)
return df2_demanding.head(n=n)
```
Questions:
#### 1. How to find the relationship between the color and the family subfamily?
2. what are VCOM & TCOM
3. which parameters are more important??
4. question: how many color code we should look at?2Ams 20 - 30
5. how should we collect if the number is close to each other?
## Jointing two data together to obtain the average sheet loss for the the family
1. combining two dataframes
```
df2.loc[df2['CC DIM'] == 'AN1/2']
len(df1)
print(len(df1['Color'].unique()))
family_df
len(df1)
# remove gauge strings from data file and set series values
products = [x.replace('1/2', '').replace('1/4', '').replace('3/4', '') for x in df_desc['CC DIM']]
families = df2['Family']
sub_families = df2['Sub Family']
colors = df2['Color']
# create new dataframe and drop duplicates (repeat values for lines 1 and 2)
family_df = pd.DataFrame([products, families, sub_families, colors]).T
family_df.columns = ['Color','family','sub_family','pigment']
family_df.drop_duplicates(inplace=True)
# inner merge with main dataframe
dff = pd.merge(df1, family_df, how='inner', on='Color')
family_df.columns = ['Next Color Code','family','sub_family','pigment']
dff = pd.merge(dff, family_df, how='inner', on='Next Color Code')
family_df.columns = ['Color','family','sub_family','pigment']
dff
print(len(dff['family_x'].unique()))
print(len(dff['sub_family_x'].unique()))
dff2 = dff.groupby(['Color','Next Color Code']).agg({'Transition Sheets':
['mean', 'std', 'min', 'max', 'count'],'AGrade Sheets':['sum']})
dff2
dff2.columns = ['ts_mean', 'ts_std', 'ts_min', 'ts_max', 't_count','AGS_sum']
dff2 = dff2.reset_index()
dff2
```
Obtaining the 30 produced most products
```
ndf = sort_hightolow(dff2, 'AGS_sum', 30)
ndf
print('Number of unique Colors:', len(ndf['Color'].unique()))
print('Number of unique Next Colors:', len(ndf['Next Color Code'].unique()))
print(list(set(ndf['Color'].unique()) - set(ndf['Next Color Code'].unique())))
print(list(set(ndf['Next Color Code'].unique()) - set(ndf['Color'].unique())))
len(dff2)
```
Searching for the relationship between the color code and the transition sheets.
```
for i in range(len(dff2)):
for n in range(len(dff2)):
if dff2.iloc[i,0] == dff2.iloc[n,1] and dff2.iloc[i,1] == dff2.iloc[n,0]:
if dff2.iloc[i,7] == dff2.iloc[n,7]:
print(dff2.iloc[i,0],dff2.iloc[i,1], dff2.iloc[i,7])
print(dff2.iloc[n,0], dff2.iloc[n,1],dff2.iloc[n,7])
print(list(set(dff['Color'].unique()) - set(dff['Next Color Code'].unique())))
print(list(set(dff['Next Color Code'].unique()) - set(dff['Color'].unique())))
```
### The mean value of transition sheet loss for different family
```
dff.groupby(['family_x', 'sub_family_x']).agg({'Transition Sheets':['mean','std','min','max','count']})
```
## creating dataframe
| github_jupyter |
# Developing an AI application
Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below.
<img src='assets/Flowers.png' width=500px>
The project is broken down into multiple steps:
* Load and preprocess the image dataset
* Train the image classifier on your dataset
* Use the trained classifier to predict image content
We'll lead you through each part which you'll implement in Python.
When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.
First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.
```
# Imports here
import torch
import json
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import transforms, datasets
```
## Load the data
Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.
The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
```
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# TODO: Define your transforms for the training, validation, and testing sets
train_transform = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transform = transforms.Compose([transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# TODO: Load the datasets with ImageFolder
train_dataset = datasets.ImageFolder(train_dir, transform = train_transform)
valid_dataset = datasets.ImageFolder(valid_dir, transform = test_transform)
test_dataset = datasets.ImageFolder(test_dir, transform = test_transform)
# TODO: Using the image datasets and the trainforms, define the dataloaders
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size = 64, shuffle = True)
valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size = 64)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size = 64)
```
### Label mapping
You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.
```
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
```
# Building and training the classifier
Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.
We're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:
* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)
* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
* Train the classifier layers using backpropagation using the pre-trained network to get the features
* Track the loss and accuracy on the validation set to determine the best hyperparameters
We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!
When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
One last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to
GPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.
**Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again.
```
# TODO: Build and train your network
# Decision made was to use a pre-trained network
# Import first
from torchvision import models
# Load the VGG pre-defined network
model = models.vgg11()
model
# Load the pre-train state, download from internet
state_dict = torch.load('vgg11-bbd30ac9.pth')
# Load to the model created
model.load_state_dict(state_dict)
# Freeze other parameters
for param in model.parameters():
param.requires_grad = False
# The is a 25088 in and 1000 out full connected network. Need to replace to match our application.
# In our application, its a 25088 in and 102 out network
ClassifierNew = nn.Sequential(nn.Linear(25088, 2048),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(2048, 1024),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(512, 102),
nn.LogSoftmax(dim = 1))
# And replace the original Classifier
model.classifier = ClassifierNew
# Now define the loss function and optimer
criterion = nn.NLLLoss()
# Keep in mind that we only tune the classifier's parameters
optimizer = optim.Adam(model.classifier.parameters(), lr = 0.003)
# Now some prepare work before training. Define GPU usage
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("The training will be run in: ", device)
# Move model to device
model.to(device)
# Start training
epoch = 10
running_loss = 0
for e in range(epoch):
for images, labels in train_dataloader:
# Send data to device
images, labels = images.to(device), labels.to(device)
# Flatten the image
# images = images.view(images.shape[0], -1)
optimizer.zero_grad()
logps = model.forward(images)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
# For each epoch, print the result
accuracy = 0
valid_loss = 0
# Update to evaluation modes
model.eval()
with torch.no_grad():
for images, labels in valid_dataloader:
# Send data to device
images, labels = images.to(device), labels.to(device)
# Flatten the image
# images = images.view(images.shape[0], -1)
logps = model.forward(images)
loss = criterion(logps, labels)
valid_loss += loss.item()
# Calculate the accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim = 1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
else:
print("Epoch: ", e + 1, "/", epoch)
print(" Loss for train: ", running_loss/len(train_dataloader))
print(" Loss for valid: ", valid_loss/len(valid_dataloader))
print(" Accuracy for valid: ", (accuracy/len(valid_dataloader))* 100, "%")
# Reset statistic for next epoch
running_loss = 0
# Turn model back to train
model.train()
```
## Testing your network
It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.
```
# TODO: Do validation on the test set
# For each epoch, print the result
accuracy = 0
test_loss = 0
# Update to evaluation modes
model.eval()
with torch.no_grad():
for images, labels in test_dataloader:
# Send data to device
images, labels = images.to(device), labels.to(device)
# Flatten the image
# images = images.view(images.shape[0], -1)
logps = model.forward(images)
loss = criterion(logps, labels)
test_loss += loss.item()
# Calculate the accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim = 1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
else:
print(" Loss for test: ", test_loss/len(test_dataloader))
print(" Accuracy for test: ", (accuracy/len(test_dataloader))* 100, "%")
```
## Save the checkpoint
Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.
```model.class_to_idx = image_datasets['train'].class_to_idx```
Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.
```
# TODO: Save the checkpoint
# Preserve class to ind as
model.class_to_idx = train_dataset.class_to_idx
# Construct dic to preserve information
checkpoint = {'state_dict': model.state_dict(),
'classifier': model.classifier,
'classifier.state_dict': model.classifier.state_dict(),
'class_to_idx': model.class_to_idx,
'optimizer_state_dict': optimizer.state_dict()}
# Save
torch.save(checkpoint, 'checkpoint.pth')
```
## Loading the checkpoint
At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.
```
# TODO: Write a function that loads a checkpoint and rebuilds the model
def rebuild_model(pth_path):
model_reload = models.vgg11()
checkpoint_load = torch.load(pth_path)
model_reload.classifier = checkpoint_load['classifier']
model_reload.load_state_dict(checkpoint_load['state_dict'])
model_reload.classifier.load_state_dict(checkpoint_load['classifier.state_dict'])
model_reload.class_to_idx = checkpoint_load['class_to_idx']
optimizer_reload = optim.Adam(model_reload.classifier.parameters(), lr = 0.003)
optimizer_reload.load_state_dict(checkpoint_load['optimizer_state_dict'])
return model_reload, optimizer_reload
model_reload, optimizer_reload = rebuild_model('checkpoint.pth')
model_reload
```
# Inference for classification
Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like
```python
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
```
First you'll need to handle processing the input image such that it can be used in your network.
## Image Preprocessing
You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.
Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.
As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation.
And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
```
# Import required tools
import PIL
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import numpy
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
# 1. resize the image
if im.width > im.height:
height = 256
width = int(256 * im.width / im.height)
else:
width = 256
height = int(256 * im.height / im.width)
im_resized = im.resize((width, height))
# then central crop a 224x224
left = (width - 224)/2
top = (height - 224)/2
right = (width + 224)/2
bottom = (height + 224)/2
im_resized = im_resized.crop((left, top, right, bottom))
# 2. update the color channel, normalize as 0-1
np_img = np.array(im_resized)
np_img = np_img / 255
# 3. normalize data
np_img = (np_img - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
# 5. update the color channel
np_img = np_img.transpose(2,0,1)
# Final convertion
torch_img = torch.from_numpy(np_img)
torch_img = torch_img.type(torch.FloatTensor)
return torch_img
```
To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
```
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
%matplotlib inline
test_img_path = "flowers/test/1/image_06754.jpg"
img = process_image(test_img_path)
imshow(img)
```
## Class Prediction
Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.
To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.
Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.
```python
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
```
```
from torch.autograd import Variable
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# load the image
img = process_image(image_path)
# process the data, send to GPU and make a fake batch dimension
img = Variable(img, requires_grad=False)
img = img.unsqueeze(0)
img = img.to(device)
# through model
with torch.no_grad():
logps = model(img)
ps = torch.exp(logps)
top_p, top_class = ps.topk(topk, dim = 1)
return top_p, top_class
```
## Sanity Checking
Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:
<img src='assets/inference_example.png' width=300px>
You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
```
# TODO: Display an image along with the top 5 classes
model_reload.to(device)
real_img_path = './real_test.jpg'
probs, classes = predict(real_img_path, model_reload)
# reverse the class to indice map
inv_map = {v: k for k, v in model_reload.class_to_idx.items()}
# Move result back to cpu
probs_cpu = probs.cpu()
classes_cpu = classes.cpu()
# Show the image
real_img = process_image(real_img_path)
imshow(real_img)
class_name = []
for i in np.nditer(classes_cpu.numpy()):
class_name.append(cat_to_name[inv_map.get(int(i))])
plt.bar(class_name, probs_cpu.view(5))
plt.show()
```
| github_jupyter |
```
############## PLEASE RUN THIS CELL FIRST! ###################
# import everything and define a test runner function
from importlib import reload
from helper import run
import bloomfilter, network
# Example Bloom Filter
from helper import hash256
bit_field_size = 10
bit_field = [0] * bit_field_size
h256 = hash256(b'hello world')
bit = int.from_bytes(h256, 'big') % bit_field_size
bit_field[bit] = 1
print(bit_field)
# Example Bloom Filter 2
from helper import hash256
bit_field_size = 10
bit_field = [0] * bit_field_size
h = hash256(b'hello world')
bit = int.from_bytes(h, 'big') % bit_field_size
bit_field[bit] = 1
h = hash256(b'goodbye')
bit = int.from_bytes(h, 'big') % bit_field_size
bit_field[bit] = 1
print(bit_field)
# Example Bloom Filter 3
from helper import hash160, hash256
bit_field_size = 10
bit_field = [0] * bit_field_size
phrase1 = b'hello world'
h1 = hash256(phrase1)
bit1 = int.from_bytes(h1, 'big') % bit_field_size
bit_field[bit1] = 1
h2 = hash160(phrase1)
bit2 = int.from_bytes(h2, 'big') % bit_field_size
bit_field[bit2] = 1
phrase2 = b'goodbye'
h1 = hash256(phrase2)
bit1 = int.from_bytes(h1, 'big') % bit_field_size
bit_field[bit1] = 1
h2 = hash160(phrase2)
bit2 = int.from_bytes(h2, 'big') % bit_field_size
bit_field[bit2] = 1
print(bit_field)
# Example BIP0037 Bloom Filter
from helper import murmur3
from bloomfilter import BIP37_CONSTANT
field_size = 2
num_functions = 2
tweak = 42
bit_field_size = field_size * 8
bit_field = [0] * bit_field_size
for phrase in (b'hello world', b'goodbye'):
for i in range(num_functions):
seed = i * BIP37_CONSTANT + tweak
h = murmur3(phrase, seed=seed)
bit = h % bit_field_size
bit_field[bit] = 1
print(bit_field)
```
### Exercise 1
Given a Bloom Filter with these parameters: size=10, function count=5, tweak=99, which bits are set after adding these items?
* `b'Hello World'`
* `b'Goodbye!'`
```
# Exercise 1
from bloomfilter import BIP37_CONSTANT
from helper import murmur3, bit_field_to_bytes
field_size = 10
function_count = 5
tweak = 99
items = (b'Hello World', b'Goodbye!')
# bit_field_size is 8 * field_size
bit_field_size = field_size * 8
# create a bit field with the appropriate size
bit_field = [0] * bit_field_size
# for each item you want to add to the filter
for item in items:
# iterate function_count number of times
for i in range(function_count):
# BIP0037 spec seed is i*BIP37_CONSTANT + tweak
seed = i * BIP37_CONSTANT + tweak
# get the murmur3 hash given that seed
h = murmur3(item, seed=seed)
# set the bit to be h mod the bit_field_size
bit = h % bit_field_size
# set the bit_field at the index bit to be 1
bit_field[bit] = 1
# print the bit field converted to bytes using bit_field_to_bytes in hex
print(bit_field_to_bytes(bit_field).hex())
```
### Exercise 2
#### Make [this test](/edit/session8/bloomfilter.py) pass: `bloomfilter.py:BloomFilterTest:test_add`
```
# Exercise 2
reload(bloomfilter)
run(bloomfilter.BloomFilterTest('test_add'))
```
### Exercise 3
#### Make [this test](/edit/session8/bloomfilter.py) pass: `bloomfilter.py:BloomFilterTest:test_filterload`
```
# Exercise 3
reload(bloomfilter)
run(bloomfilter.BloomFilterTest('test_filterload'))
```
### Exercise 4
Do the following:
* Connect to a testnet node
* Load a filter for your testnet address
* Send a request for transactions from the block which had your previous testnet transaction
* Receive the merkleblock and tx messages.
```
# Exercise 4
from bloomfilter import BloomFilter
from ecc import PrivateKey
from helper import decode_base58, hash256, little_endian_to_int
from merkleblock import MerkleBlock
from network import SimpleNode, GetDataMessage, FILTERED_BLOCK_DATA_TYPE
from tx import Tx
block_hash = bytes.fromhex('00000000000129fc37fde810db09f033014e501595f8560dcdb2e86756986ee3')
passphrase = b'Jimmy Song'
secret = little_endian_to_int(hash256(passphrase))
private_key = PrivateKey(secret=secret)
addr = private_key.point.address(testnet=True)
print(addr)
filter_size = 30
filter_num_functions = 5
filter_tweak = 90210
# get the hash160 of the address using decode_base58
h160 = decode_base58(addr)
# create a bloom filter using the filter_size, filter_num_functions and filter_tweak above
bf = BloomFilter(filter_size, filter_num_functions, filter_tweak)
# add the h160 to the bloom filter
bf.add(h160)
# connect to testnet.programmingbitcoin.com in testnet mode
node = SimpleNode('testnet.programmingbitcoin.com', testnet=True)
# complete the handshake
node.handshake()
# send the filterload message
node.send(bf.filterload())
# create a getdata message
getdata = GetDataMessage()
# add_data (FILTERED_BLOCK_DATA_TYPE, block_hash) to request the block
getdata.add_data(FILTERED_BLOCK_DATA_TYPE, block_hash)
# send the getdata message
node.send(getdata)
# wait for the merkleblock command
mb = node.wait_for(MerkleBlock)
# check that the merkle block's hash is the same as the block hash
if mb.hash() != block_hash:
raise RuntimeError('Wrong Merkle Block')
# check that the merkle block is valid
if not mb.is_valid():
raise RuntimeError('Invalid Merkle Block')
# loop through the tx hashes we are expecting using proved_txs
for tx_hash in mb.proved_txs():
# wait for the tx command
tx_obj = node.wait_for(Tx)
# check that the tx hash is the same
if tx_obj.hash() != tx_hash:
raise RuntimeError('Wrong transaction')
# print the transaction serialization in hex
print(tx_obj.serialize().hex())
```
### Exercise 5
#### Make [this test](/edit/session8/network.py) pass: `network.py:SimpleNodeTest:test_get_filtered_txs`
```
# Exercise 5
reload(network)
run(network.SimpleNodeTest('test_get_filtered_txs'))
```
### Exercise 6
You have been sent some unknown amount of testnet bitcoins to your address.
Send all of it back (minus fees) to `mqYz6JpuKukHzPg94y4XNDdPCEJrNkLQcv` using only the networking protocol.
This should be a 1 input, 1 output transaction.
Remember turn on logging in `SimpleNode` if you need to debug
```
# Exercise 6
from time import sleep
from block import Block
from bloomfilter import BloomFilter
from ecc import PrivateKey
from helper import decode_base58, hash160, hash256, little_endian_to_int
from merkleblock import MerkleBlock
from network import GetHeadersMessage, HeadersMessage, SimpleNode
from script import p2pkh_script
from tx import Tx, TxIn, TxOut
start_block_hex = '000000000000011f34db8b77b66d78abcf2e242299c8aed30dd915911c4fa97f'
start_block = bytes.fromhex(start_block_hex)
end_block_hex = '000000000000000bf70f0f61df923b0ac97cc578240490dea5e9c35382f9eef0'
end_block = bytes.fromhex(end_block_hex)
passphrase = b'Jimmy Song'
secret = little_endian_to_int(hash256(passphrase))
private_key = PrivateKey(secret=secret)
addr = private_key.point.address(testnet=True)
h160 = decode_base58(addr)
target_address = 'mqYz6JpuKukHzPg94y4XNDdPCEJrNkLQcv'
target_h160 = decode_base58(target_address)
target_script = p2pkh_script(target_h160)
bloom_filter = BloomFilter(30, 5, 90210)
fee = 5000 # fee in satoshis
# connect to testnet.programmingbitcoin.com in testnet mode
node = SimpleNode('testnet.programmingbitcoin.com', testnet=True)
# add the h160 to the bloom filter
bloom_filter.add(h160)
# complete the handshake
node.handshake()
# send the 'filterload' message from the bloom filter
node.send(bloom_filter.filterload())
# create GetHeadersMessage with the start_block as the start_block and end_block as the end block
getheaders = GetHeadersMessage(start_block=start_block, end_block=end_block)
# send a getheaders message
node.send(getheaders)
# wait for the headers message
headers = node.wait_for(HeadersMessage)
# check that the headers are valid
if not headers.is_valid():
raise RuntimeError
# get all the block hashes from the headers.headers array
block_hashes = [h.hash() for h in headers.headers]
# get the filtered transactions from these blocks
filtered_txs = node.get_filtered_txs(block_hashes)
# loop through each filtered transaction
for tx_obj in filtered_txs:
# use find_utxos to get utxos that belong to our address
utxos = tx_obj.find_utxos(addr)
# if we have any utxos, break
if len(utxos) > 0:
break
# prev_tx, prev_index, prev_amount are what we get in each utxo
prev_tx, prev_index, prev_amount = utxos[0]
# create tx_in
tx_in = TxIn(prev_tx, prev_index)
# calculate the output amount (prev_amount - fee)
output_amount = prev_amount - fee
# create tx_out
tx_out = TxOut(output_amount, target_script)
# create transaction on testnet
tx_obj = Tx(1, [tx_in], [tx_out], 0, testnet=True)
# sign the one input we have
tx_obj.sign_input(0, private_key)
# serialize and hex to see what it looks like
print(tx_obj.serialize().hex())
# send this signed transaction on the network
node.send(tx_obj)
# wait a sec so this message goes through to the other node sleep(1)
sleep(1)
# now check to see if the tx has been accepted using is_tx_accepted()
if node.is_tx_accepted(tx_obj):
print('success!')
print(tx_obj.id())
```
| github_jupyter |
```
# Update sklearn to prevent version mismatches
!pip install sklearn --upgrade
# install joblib. This will be used to save your model.
# Restart your kernel after installing
!pip install joblib
import pandas as pd
```
# Read the CSV and Perform Basic Data Cleaning
```
df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
```
# Select your features (columns)
```
# Set features. This will also be used as your x values.
selected_features = df[['koi_period', 'koi_time0bk', 'koi_slogg', 'koi_srad', 'koi_kepmag']]
df.head()
```
# Create a Train Test Split
Use koi_disposition for the y values
```
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from tensorflow.keras.utils import to_categorical
# assign x and y values
X = df.drop("koi_disposition", axis=1)
y = df["koi_disposition"]
# split training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
X_train.head()
```
# Pre-processing
Scale the data using the MinMaxScaler and perform some feature selection
```
# Scale your data
from sklearn.preprocessing import StandardScaler
# Create a StandardScater model and fit it to the training data
X_scaler = MinMaxScaler().fit(X_train)
# X_scaler = StandardScaler().fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# label-encode data
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
encoded_y_train = label_encoder.transform(y_train)
encoded_y_test = label_encoder.transform(y_test)
```
# Train the Model
```
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
train_scores = []
test_scores = []
for k in range(1, 20, 2):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train_scaled, encoded_y_train)
train_score = knn.score(X_train_scaled, encoded_y_train)
test_score = knn.score(X_test_scaled, encoded_y_test)
train_scores.append(train_score)
test_scores.append(test_score)
print(f"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}")
plt.plot(range(1, 20, 2), train_scores, marker='o')
plt.plot(range(1, 20, 2), test_scores, marker="x")
plt.xlabel("k neighbors")
plt.ylabel("Testing accuracy Score")
plt.show()
knn = KNeighborsClassifier(n_neighbors=19)
knn.fit(X_train_scaled, encoded_y_train)
predictions = knn.predict(X_test_scaled)
print(predictions)
print(f"k = 19: Training Data Score: {knn.score(X_train_scaled, encoded_y_train):.5f}")
print(f"k = 19: Testing Data Score: {knn.score(X_test_scaled, encoded_y_test):.5f}")
# Classification report
from sklearn.metrics import classification_report
print(classification_report(encoded_y_test, predictions,
target_names = ["Confirmed", "False Positive", "Candidate"]))
```
# Save the Model
```
import joblib
filename = 'KNN_model.sav'
joblib.dump(knn, filename)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/Course%201%20-%20Part%204%20-%20Lesson%202%20-%20Notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Beyond Hello World, A Computer Vision Example
In the previous exercise you saw how to create a neural network that figured out the problem you were trying to solve. This gave an explicit example of learned behavior. Of course, in that instance, it was a bit of overkill because it would have been easier to write the function Y=2x-1 directly, instead of bothering with using Machine Learning to learn the relationship between X and Y for a fixed set of values, and extending that for all values.
But what about a scenario where writing rules like that is much more difficult -- for example a computer vision problem? Let's take a look at a scenario where we can recognize different items of clothing, trained from a dataset containing 10 different types.
## Start Coding
Let's start with our import of TensorFlow
```
import tensorflow as tf
print(tf.__version__)
```
The Fashion MNIST data is available directly in the tf.keras datasets API. You load it like this:
```
mnist = tf.keras.datasets.fashion_mnist
```
Calling load_data on this object will give you two sets of two lists, these will be the training and testing values for the graphics that contain the clothing items and their labels.
```
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
```
What does these values look like? Let's print a training image, and a training label to see...Experiment with different indices in the array. For example, also take a look at index 42...that's a a different boot than the one at index 0
```
import numpy as np
np.set_printoptions(linewidth=200)
import matplotlib.pyplot as plt
plt.imshow(training_images[0])
print(training_labels[0])
print(training_images[0])
```
You'll notice that all of the values in the number are between 0 and 255. If we are training a neural network, for various reasons it's easier if we treat all values as between 0 and 1, a process called '**normalizing**'...and fortunately in Python it's easy to normalize a list like this without looping. You do it like this:
```
training_images = training_images / 255.0
test_images = test_images / 255.0
```
Now you might be wondering why there are 2 sets...training and testing -- remember we spoke about this in the intro? The idea is to have 1 set of data for training, and then another set of data...that the model hasn't yet seen...to see how good it would be at classifying values. After all, when you're done, you're going to want to try it out with data that it hadn't previously seen!
Let's now design the model. There's quite a few new concepts here, but don't worry, you'll get the hang of them.
```
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
```
**Sequential**: That defines a SEQUENCE of layers in the neural network
**Flatten**: Remember earlier where our images were a square, when you printed them out? Flatten just takes that square and turns it into a 1 dimensional set.
**Dense**: Adds a layer of neurons
Each layer of neurons need an **activation function** to tell them what to do. There's lots of options, but just use these for now.
**Relu** effectively means "If X>0 return X, else return 0" -- so what it does it it only passes values 0 or greater to the next layer in the network.
**Softmax** takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like [0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05], it saves you from fishing through it looking for the biggest value, and turns it into [0,0,0,0,1,0,0,0,0] -- The goal is to save a lot of coding!
The next thing to do, now the model is defined, is to actually build it. You do this by compiling it with an optimizer and loss function as before -- and then you train it by calling **model.fit ** asking it to fit your training data to your training labels -- i.e. have it figure out the relationship between the training data and its actual labels, so in future if you have data that looks like the training data, then it can make a prediction for what that data would look like.
```
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
```
Once it's done training -- you should see an accuracy value at the end of the final epoch. It might look something like 0.9098. This tells you that your neural network is about 91% accurate in classifying the training data. I.E., it figured out a pattern match between the image and the labels that worked 91% of the time. Not great, but not bad considering it was only trained for 5 epochs and done quite quickly.
But how would it work with unseen data? That's why we have the test images. We can call model.evaluate, and pass in the two sets, and it will report back the loss for each. Let's give it a try:
```
model.evaluate(test_images, test_labels)
```
For me, that returned a accuracy of about .8838, which means it was about 88% accurate. As expected it probably would not do as well with *unseen* data as it did with data it was trained on! As you go through this course, you'll look at ways to improve this.
To explore further, try the below exercises:
# Exploration Exercises
###Exercise 1:
For this first exercise run the below code: It creates a set of classifications for each of the test images, and then prints the first entry in the classifications. The output, after you run it is a list of numbers. Why do you think this is, and what do those numbers represent?
```
classifications = model.predict(test_images)
print(classifications[0])
```
Hint: try running print(test_labels[0]) -- and you'll get a 9. Does that help you understand why this list looks the way it does?
```
print(test_labels[0])
```
### What does this list represent?
1. It's 10 random meaningless values
2. It's the first 10 classifications that the computer made
3. It's the probability that this item is each of the 10 classes
####Answer:
The correct answer is (3)
The output of the model is a list of 10 numbers. These numbers are a probability that the value being classified is the corresponding value (https://github.com/zalandoresearch/fashion-mnist#labels), i.e. the first value in the list is the probability that the image is of a '0' (T-shirt/top), the next is a '1' (Trouser) etc. Notice that they are all VERY LOW probabilities.
For the 9 (Ankle boot), the probability was in the 90's, i.e. the neural network is telling us that it's almost certainly a 7.
### How do you know that this list tells you that the item is an ankle boot?
1. There's not enough information to answer that question
2. The 10th element on the list is the biggest, and the ankle boot is labelled 9
2. The ankle boot is label 9, and there are 0->9 elements in the list
####Answer
The correct answer is (2). Both the list and the labels are 0 based, so the ankle boot having label 9 means that it is the 10th of the 10 classes. The list having the 10th element being the highest value means that the Neural Network has predicted that the item it is classifying is most likely an ankle boot
##Exercise 2:
Let's now look at the layers in your model. Experiment with different values for the dense layer with 512 neurons. What different results do you get for loss, training time etc? Why do you think that's the case?
```
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
###Question 1. Increase to 1024 Neurons -- What's the impact?
1. Training takes longer, but is more accurate
2. Training takes longer, but no impact on accuracy
3. Training takes the same time, but is more accurate
####Answer
The correct answer is (1) by adding more Neurons we have to do more calculations, slowing down the process, but in this case they have a good impact -- we do get more accurate. That doesn't mean it's always a case of 'more is better', you can hit the law of diminishing returns very quickly!
##Exercise 3:
What would happen if you remove the Flatten() layer. Why do you think that's the case?
You get an error about the shape of the data. It may seem vague right now, but it reinforces the rule of thumb that the first layer in your network should be the same shape as your data. Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, so it makes more sense to 'flatten' that 28,28 into a 784x1. Instead of wriitng all the code to handle that ourselves, we add the Flatten() layer at the begining, and when the arrays are loaded into the model later, they'll automatically be flattened for us.
```
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([#tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
##Exercise 4:
Consider the final (output) layers. Why are there 10 of them? What would happen if you had a different amount than 10? For example, try training the network with 5
You get an error as soon as it finds an unexpected value. Another rule of thumb -- the number of neurons in the last layer should match the number of classes you are classifying for. In this case it's the digits 0-9, so there are 10 of them, hence you should have 10 neurons in your final layer.
```
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(5, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
##Exercise 5:
Consider the effects of additional layers in the network. What will happen if you add another layer between the one with 512 and the final layer with 10.
Ans: There isn't a significant impact -- because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary.
```
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
#Exercise 6:
Consider the impact of training for more or less epochs. Why do you think that would be the case?
Try 15 epochs -- you'll probably get a model with a much better loss than the one with 5
Try 30 epochs -- you might see the loss value stops decreasing, and sometimes increases. This is a side effect of something called 'overfitting' which you can learn about [somewhere] and it's something you need to keep an eye out for when training neural networks. There's no point in wasting your time training if you aren't improving your loss, right! :)
```
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=30)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[34])
print(test_labels[34])
```
#Exercise 7:
Before you trained, you normalized the data, going from values that were 0-255 to values that were 0-1. What would be the impact of removing that? Here's the complete code to give it a try. Why do you think you get different results?
```
import tensorflow as tf
print(tf.__version__)
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images#/255.0
test_images=test_images#/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
#Exercise 8:
Earlier when you trained for extra epochs you had an issue where your loss might change. It might have taken a bit of time for you to wait for the training to do that, and you might have thought 'wouldn't it be nice if I could stop the training when I reach a desired value?' -- i.e. 95% accuracy might be enough for you, and if you reach that after 3 epochs, why sit around waiting for it to finish a lot more epochs....So how would you fix that? Like any other program...you have callbacks! Let's see them in action...
```
import tensorflow as tf
print(tf.__version__)
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('loss')<0.4):
print("\nReached 60% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images/255.0
test_images=test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])
```
| github_jupyter |
```
# import lib
# ===========================================================
import csv
import pandas as pd
from datascience import *
import numpy as np
import random
import time
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('fivethirtyeight')
import collections
import math
import sys
from tqdm import tqdm
from time import sleep
# Initialize useful data
# with open('clinvar_conflicting_clean.csv', 'r') as f:
# reader = csv.reader(f)
# temp_rows = list(reader)
df = pd.read_csv('clinvar_conflicting_clean.csv', low_memory=False)
# columns_to_change = ['ORIGIN', 'EXON', 'INTRON', 'STRAND', 'LoFtool', 'CADD_PHRED', 'CADD_RAW', 'BLOSUM62']
# df[['CLNVI', 'MC', 'SYMBOL', 'Feature_type', 'Feature', 'BIOTYPE',
# 'cDNA_position', 'CDS_position', 'Protein_position', 'Amino_acids', 'Codons',
# 'BAM_EDIT', 'SIFT', 'PolyPhen']] = df[['CLNVI', 'MC', 'SYMBOL', 'Feature_type', 'Feature', 'BIOTYPE',
# 'cDNA_position', 'CDS_position', 'Protein_position', 'Amino_acids', 'Codons',
# 'BAM_EDIT', 'SIFT', 'PolyPhen']].fillna(value="null")
df = df.fillna(value=0)
df_zero = df.loc[df['CLASS'] == 0]
df_zero = df_zero.sample(n=10000)
df_one = df.loc[df['CLASS'] == 1]
df_one = df_one.sample(n=10000)
df = pd.concat([df_zero, df_one])
df = df.sample(n = df.shape[0])
all_rows = df.values.tolist()
row_num = len(all_rows)
df.head()
# Decision stump part for Adaboost
# ===========================================================
def is_numeric(value):
return isinstance(value, int) or isinstance(value, float)
# === LeafNode is the prediction result of this branch ===
class LeafNode:
def __init__(self, rows):
labels = [row[-1] for row in rows]
# labels = []
# self.one_idx = []
# self.zero_idx = []
# for i in range(len(rows)):
# row = rows[i]
# labels.append(row[-1])
# if row[-1] == 1:
# self.one_idx.append(i)
# else:
# self.zero_idx.append(i)
self.prediction = collections.Counter(labels)
# === DecisionNode is an attribute / question used to partition the data ===
class DecisionNode:
def __init__(self, question = None, left_branch = None, right_branch = None):
self.question = question
self.left_branch = left_branch
self.right_branch = right_branch
class DecisionStump:
def __init__(self, training_attribute, training_data, height, method = "CART"):
self.attribute = training_attribute # takein attribute and data separately
self.train = training_data
self.height = height
self.row_num = len(self.train)
self.column_num = len(self.attribute)
self.method = method.upper() # convert to upper case for general use
self.significance = 0
if self.method not in ["C4.5", "CART", "HYBRID"]:
print("Error: Please choose a valid method! from: [C4.5, CART, HYBRID]")
return None
# train decision stump
self.root = self.build_stump(self.train, 1)
# count ACC classifications and mis classifications to update weights
self.accclassify_idx = []
self.misclassify_idx = []
# Only after DecisionStump trained, can we know which rows are misclassified
# Walk down the decision stump to collect all misclassification indices
# if self.root.left_branch.prediction.get(1, 0) > self.root.left_branch.prediction.get(0, 0):
# # then consider the prediction of this leaf node as 1: 1 -> correct, 0 -> misclassify
# self.accclassify_idx += self.root.left_branch.one_idx
# self.misclassify_idx += self.root.left_branch.zero_idx
# else:
# # then consider the prediction of this leaf node as 0: 0 -> correct, 1 -> misclassify
# self.accclassify_idx += self.root.left_branch.zero_idx
# self.misclassify_idx += self.root.left_branch.one_idx
# if self.root.right_branch.prediction.get(1, 0) > self.root.right_branch.prediction.get(0, 0):
# # then consider the prediction of this leaf node as 1: 1 -> correct, 0 -> misclassify
# self.accclassify_idx += self.root.right_branch.one_idx
# self.misclassify_idx += self.root.right_branch.zero_idx
# else:
# # then consider the prediction of this leaf node as 0: 0 -> correct, 1 -> misclassify
# self.accclassify_idx += self.root.right_branch.zero_idx
# self.misclassify_idx += self.root.right_branch.one_idx
def uniq_val(self, column):
return set([self.train[i][column] for i in range(len(self.train))])
# when raising a question.
# if it's a categorical attribute, we simply iterate all categories
# if it's a numeric attribute, we iterate the set of possible numeric values
class Question:
def __init__(self, column, ref_value, attribute):
self.column = column
self.ref_value = ref_value if ref_value else "None"
self.attri = attribute
def match(self, row):
if is_numeric(self.ref_value):
try:
return row[self.column] >= self.ref_value
except:
print("Error occured in ", row)
return True
else:
return row[self.column] == self.ref_value
def __repr__(self):
operand = ">=" if is_numeric(self.ref_value) else "=="
return "Is %s %s %s?" % (self.attri[self.column], operand, str(self.ref_value))
# === Method 1 - C4.5 ===
def entropy(self, rows):
# === Bits used to store the information ===
labels = [row[-1] for row in rows]
frequency = collections.Counter(labels).values()
pop = sum(frequency)
H = 0
for f in frequency:
p = f / pop
H -= p * math.log(p, 2)
return H
# === Method 2 - CART ===
def gini(self, rows):
# === Probability of misclassifying any of your label, which is impurity ===
labels = [row[-1] for row in rows]
frequency = collections.Counter(labels).values()
pop = sum(frequency)
gini = 1
for f in frequency:
p = f / pop
gini -= p ** 2
return gini
# === Calculate Gain Info ===
def info(self, branches, root):
# === Objective: to find the best question which can maximize info ===
root_size = float(len(root))
if self.method == "C4.5": # Here I pick the GainRatio Approach
root_uncertainty = self.entropy(root)
gain_info = root_uncertainty
split_info = 0
for branch in branches:
if not branch: continue
gain_info -= len(branch) / root_size * self.entropy(branch)
split_info -= float(len(branch)) / root_size * math.log(float(len(branch)) / root_size)
# print(gain_info, split_info)
return gain_info / split_info
elif self.method == "CART":
root_uncertainty = self.gini(root)
gain_info = root_uncertainty
for branch in branches:
if not branch: continue
gain_info -= len(branch) / root_size * self.gini(branch)
return gain_info
elif self.method == "HYBRID":
pass
pass
# === Here I only do Binary Partitions ===
def partition(self, rows, question):
true_rows = []
false_rows = []
for row in rows:
if question.match(row):
true_rows.append(row)
else:
false_rows.append(row)
return true_rows, false_rows
# the question that achieves the max infomation attenuation is the best question
def find_best_question(self, rows):
max_info_attenuation = 0
best_question = self.Question(0, self.train[0][0], self.attribute)
# === Iterate through all question candidates ===
# === TODO: Maybe Iteration here can be optimized ===
for col in range(self.column_num - 1): # minus 1 to avoid using the label as attribute
ref_candidates = self.uniq_val(col)
for ref_value in ref_candidates:
if ref_value == "null": continue # avoid using null values to generate a question
q = self.Question(col, ref_value, self.attribute)
temp_true_rows, temp_false_rows = self.partition(rows, q)
temp_info_attenuation = self.info([temp_true_rows, temp_false_rows], rows)
if temp_info_attenuation >= max_info_attenuation:
max_info_attenuation = temp_info_attenuation
best_question = q
return max_info_attenuation, best_question
# === Input rows of data with attributes and labels ===
def build_stump(self, rows, height):
# === Assign all rows as root of the whole decision tree ===
# === We have met the leaf node if gini(rows) is 0 or no question candidates left ===
gain_reduction, q = self.find_best_question(rows)
true_rows, false_rows = self.partition(rows, q)
if height + 1 >= self.height:
return DecisionNode(q, LeafNode(true_rows), LeafNode(false_rows))
else:
return DecisionNode(q, self.build_stump(true_rows, height + 1), self.build_stump(false_rows, height + 1))
# === Input a row of data with attributes (and no label), predict its label with our decision tree ===
# === Actually it can contain a label, we just don't use it ===
# === walk down the decision tree until we reach the leaf node ===
def classify(self, row, node):
if isinstance(node, LeafNode):
# do a mapping from label[1, 0] to label[1, -1]
return node.prediction
# return 1 if node.prediction.get(1, 0) / (node.prediction.get(1, 0) + node.prediction.get(0, 0)) > cutoff else -1
if node.question.match(row):
return self.classify(row, node.left_branch)
else:
return self.classify(row, node.right_branch)
# function to print the tree out
def print_tree(self, node, spacing=""):
# Base case: we've reached a leaf
if isinstance(node, LeafNode):
print (spacing + "Predict", node.prediction)
return
# Print the question at this node
print (spacing + str(node.question))
# Call this function recursively on the true branch
print (spacing + '--> True:')
self.print_tree(node.left_branch, spacing + " ")
# Call this function recursively on the false branch
print (spacing + '--> False:')
self.print_tree(node.right_branch, spacing + " ")
def test(self):
for i in range(self.column_num):
q = self.Question(i, self.train[1][i], self.attribute)
print(q)
print(q.match(1))
def normalized_weight(weight):
return np.divide(weight, sum(weight))
def rev_logit(val):
return 1 / (1 + np.exp(val))
# Divide whole dataset into training set and testing set
# ===========================================================
training_percentage = 0.2 # percent of partition of training dataset
training_size = int(row_num * training_percentage)
testing_size = row_num - training_size
training_attribute = list(df.columns)
training_data = all_rows[: training_size] # training data should include header row
testing_data = all_rows[training_size: ] # testing data don't need to include header row
# Recursively Training base learners
# ===========================================================
# let's train T base learners
T = 20
weakleaner_height = 3
stump_forest = []
weight = [1 / training_size for _ in range(training_size)]
start = time.time()
for i in range(T):
# train a decision stump
stump = DecisionStump(training_attribute, training_data, weakleaner_height, "CART")
# calculate the total error of the stump after it's trained
for j in range(training_size):
row = training_data[j]
pred_counter = stump.classify(row, stump.root)
pred_label = 1 if pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001) > 0.5 else 0
if pred_label == row[-1]:
stump.accclassify_idx.append(j)
else:
stump.misclassify_idx.append(j)
accuracy = len(stump.accclassify_idx) / training_size
total_err_rate = 1 - accuracy
# update the significance level of this stump, remember not to divide by zero
stump.significance = 0.5 * math.log((1 - total_err_rate + 0.0001) / (total_err_rate + 0.0001))
# append stump into the forest
stump_forest.append(stump)
# if len(stump_forest) == T: break # early break
# update training_data weight, resample the training data with the updated weight distribution
true_scale = np.e ** stump.significance
for idx in stump.misclassify_idx:
weight[idx] = weight[idx] * true_scale
for idx in stump.accclassify_idx:
weight[idx] = weight[idx] * (1 / true_scale)
distrib = normalized_weight(weight)
# interactive printing
# sys.stdout.write('\r')
# # the exact output you're looking for:
# sys.stdout.write("Training Random Forest: [%-10s] %d%% alpha = %.02f" % ('='*int((i + 1) / T * 10), int((i + 1) / T * 100), stump.significance))
# sys.stdout.flush()
# stump.print_tree(stump.root)
# print(i, stump.significance)
resampled_idx = np.random.choice(training_size, training_size, p = distrib)
training_data = [training_data[idx] for idx in resampled_idx]
if len(set([row[1] for row in training_data])) < 0.04 * training_size: break
print(i, len(set([row[1] for row in training_data])), stump.significance, end='\n')
weight = [1 / training_size for _ in range(training_size)]
end = time.time()
print("\nTime: %.02fs" % (end - start))
# New Testing Adaboost
# ===========================================================
# Compute TN, TP, FN, FP, etc. together with testing
# ===========================================================
ROC = Table(make_array('CUTOFF', 'TN', 'FN', 'FP', 'TP', 'ACC'))
step_size = 0.05
forest_size = len(stump_forest)
CMap = {0: 'TN', 1: 'FN', 2: 'FP', 3: 'TP'}
for cutoff in np.arange(0, 1 + step_size, step_size):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Testing: [%-20s] %d%%" % ('='*int(cutoff * 100 / 5), int(cutoff * 100)))
sys.stdout.flush()
'''
# calculate the total error of each stump
for stump in stump_forest:
stump.accclassify_idx = []
stump.misclassify_idx = []
# walk down the stump for each training data, see if its prediction makes sense
for j in range(training_size):
row = training_data[j]
pred_counter = stump.classify(row, stump.root)
pred_label = 1 if pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001) > cutoff else 0
if pred_label == row[-1]:
stump.accclassify_idx.append(j)
else:
stump.misclassify_idx.append(j)
accuracy = len(stump.accclassify_idx) / training_size
total_err_rate = 1 - accuracy
# update the significance level of this stump, remember not to divide by zero
stump.significance = 0.5 * math.log((1 - total_err_rate + 0.0001) / (total_err_rate + 0.0001))
'''
Confusion = {'TN': 0, 'FN': 0, 'FP': 0, 'TP': 0}
for row in testing_data:
true_rate_forest = 0
for tree_i in stump_forest:
# prediction is a counter of label 1 and 0
pred_counter = tree_i.classify(row, tree_i.root)
# do a mapping from label[1, 0] to label[1, -1]
true_rate_tree = 1 if pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001) > cutoff else -1
true_rate_forest += true_rate_tree * tree_i.significance
# true_rate_forest = rev_logit(true_rate_forest)
# true_pred = 1 if true_rate_forest >= cutoff else 0
true_pred = 0 if np.sign(true_rate_forest) <= 0 else 1
indicator = (true_pred << 1) + row[-1]
# accordingly update confusion matrix
Confusion[CMap[indicator]] += 1
# concatenate the confusion matrix values into the overall ROC Table
thisline = [cutoff] + list(Confusion.values()) + [(Confusion['TP'] + Confusion['TN']) / sum(Confusion.values())]
ROC = ROC.with_row(thisline)
ROC = ROC.with_columns('SENSITIVITY', ROC.apply(lambda TP, FN: TP / (TP + FN + 0.00000001), 'TP', 'FN'))
ROC = ROC.with_columns('FPR', ROC.apply(lambda TN, FP: FP / (TN + FP + 0.00000001), 'TN', 'FP'))
ROC = ROC.with_column('FMEAS', ROC.apply(lambda TP, FP, FN: 2 * (TP / (TP + FN)) * (TP / (TP + FP)) / (TP / (TP + FN) + TP / (TP + FP)), 'TP', 'FP', 'FN'))
ROC.show()
# Acc Curve by cutoff
# ===========================================================
# matplotlib.use('TkAgg')
fig = plt.figure()
plt.xlabel('Cutoff')
plt.ylabel('Accuracy')
plt.title('Accuracy - Cutoff of Adaboost')
plt.plot(np.arange(0, 1.1, 0.1), [0.5 for i in np.arange(0, 1.1, 0.1)], color='black')
plt.plot(ROC.column('CUTOFF'), ROC.column('ACC'), color='orange')
plt.legend(['Adaboost', 'Null'])
plt.axis([0, 1, 0, 1.1])
plt.show()
fig.savefig('Adaboost ACC.png', bbox_inches='tight')
# ROC_CURVE
# ===========================================================
fig = plt.figure()
plt.xlabel('False Positive Rate')
plt.ylabel('Sensitivity')
plt.title('ROC - Curve of Adaboost')
plt.plot(np.arange(0, 1.1, 0.1), np.arange(0, 1.1, 0.1), color='black')
plt.plot(ROC.column('FPR'), ROC.column('SENSITIVITY'), color='orange')
plt.legend(['Adaboost', 'Null'])
plt.axis([0, 1, 0, 1.1])
plt.show()
fig.savefig('Adaboost ROC.png', bbox_inches='tight')
# Compute AUC
# ===========================================================
length = len(ROC.column('FPR'))
auc = 0
for i in range(length - 1):
auc += 0.5 * abs(ROC.column('FPR')[i + 1] - ROC.column('FPR')[i]) * (ROC.column('SENSITIVITY')[i] + ROC.column('SENSITIVITY')[i + 1])
print("auc = %.03f" %auc)
# Original Testing
# ===========================================================
accuracy = 0
for row in testing_data:
overall_classification = 0
for stump in stump_forest:
classification = stump.classify(row, stump.root)
vote = stump.significance
overall_classification += classification * vote
# reverse mapping from label[1, -1] to label[1, 0]
predicted_label = 0 if np.sign(overall_classification) <= 0 else 1
if predicted_label == row[-1]: accuracy += 1
# print(classification, predicted_label, row[-1])
accuracy = accuracy / testing_size
print("%.03f%%" % (accuracy * 100))
# Testing with a toy dataset
# ===========================================================
training_data = [
['Green', 3, 1],
['Yellow', 3, 1],
['Red', 1, 0],
['Red', 1, 0],
['Yellow', 3, 1],
['Red', 3, 1]
]
testing_data = [
['Red', 2, 0],
['Yellow', 3.5, 1],
['Green', 3, 1]
]
training_attribute = ['Color', 'Diameter', 'Label']
training_size = len(training_data)
testing_size = len(testing_data)
# pf = df[]
len(set(pd.Index(df)))
np.bincount([row[-1] for row in all_rows])
fpr, sen, acc = ROC.column('FPR'), ROC.column('SENSITIVITY'), ROC.column('ACC')
fpr
sen
acc
```
| github_jupyter |
<a href="https://colab.research.google.com/github/DataScienceUB/DeepLearningMaster2019/blob/master/6.%20Recurrent%20Neural%20Networks%20I.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Recurrent Neural Networks I
Classical neural networks, including convolutional ones, suffer from two severe limitations:
+ They only accept a fixed-sized vector as input and produce a fixed-sized vector as output.
+ They do not consider the sequential nature of some data (language, video frames, time series, etc.)
Recurrent neural networks overcome these limitations by allowing to operate over sequences of vectors (in the input, in the output, or both).
## Vanilla Recurrent Neural Network
The basic formulas of a simple RNN are:
$$ \mathbf s_t = \mbox{tanh }(U \mathbf x_t + W \mathbf s_{t-1}) $$
$$ \mathbf y_t = V \mathbf s_t $$
where the hiperbollic tangent funtion ``tanh`` is:
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/TanhReal.gif?raw=1" alt="" style="width: 300px;"/>
These equations basically say that the current network state $s_t$, commonly known as hidden state, is a function of the previous hidden state $\mathbf s_{t-1}$ and the current input $\mathbf x_t$. $U, V, W$ matrices are the parameters of the RNN and $\mathbf y_t$ is its output at time $t$.
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/vanilla.png?raw=1" alt="" style="width: 400px;"/>
## Unrolling in time of a RNN
Given an input sequence, we apply RNN formulas in a recurrent way until we process all input elements. **The RNN shares the parameters $U,V,W$ across all recurrent steps**.
Some important observations:
+ We can think of the hidden state as a memory of the network that captures information about the previous steps. **It embeds the representation of the sequence.**
+ The output of the network can be considered at every stage or only at the final one (see bellow).
+ When starting to train a RNN we must provide initial values for $U,V,W$ as well as for $\mathbf s$.
By unrolling we mean that we write out the network for the complete sequence:
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/unrolling.png?raw=1" alt="" style="width: 600px;"/>
Training a RNN involves the following steps:
+ Providing a training set composed of several input ($n$-dimensional) sequences $\{\mathbf{X}_i \}$ and their expected outcomes. Each element of a sequence $\mathbf{x}_j \in \mathbf{X}_i$ is also a vector.
+ Defining a loss function to measure the fitting of the output of the network to the expected outcome.
+ Applying SGD (or variants) to optimize the loss function.
## Vanilla Recurrent Neural Network (minibatch version)
Observe that the number of parameters of a RNN can be very high:
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/minibatch.png?raw=1" alt="" style="width: 400px;"/>
It is not necessary to have outputs $y_t$ at each time step. Depending on the problem we are solving, we can have the following RNN architectures:
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/kar.png?raw=1" alt="" style="width: 600px;"/>
Source: http://karpathy.github.io/2015/05/21/rnn-effectiveness/
RNN have shown success in:
+ Language modeling and generation.
+ Machine Translation.
+ Speech Recognition.
+ Image Description.
+ Question Answering.
+ Etc.
## RNN Computation
Defining a simple RNN with code is simple:
```python
class RNN:
#...
def step(self,x):
self.h = np.tanh(np.dot(self.W_ss, self.h) +
np.dot(self.W_xs, self.x))
y = np.dot(self.W_sy, self.h)
return y
#...
```
We can go deep by stacking RNNs:
```python
y1 = rnn1.step(x)
y2 = rnn2.step(y1)
```
### RNN and information representation
The inputs of a recurrent network must be always **sequences of vectors**, but we can process sequences of symbols/words by representing these symbols by numerical vectors.
Let's suppose we are classifying a series of words $\mathbf x_1, ..., \mathbf x_{t-1}, \mathbf x_t, \mathbf x_{t+1}, ... \mathbf x_{n}$, where $\mathbf x_i$ are word vectors corresponding to a document with $n$ words that belong to a corpus with $|V|$ symbols.
Then, the relationship to compute the hidden layer features at each time-step $t$ is $\mathbf s_t = \sigma(W^{(ss)} \mathbf s_{t-1} + W^{(sx)} \mathbf x_{t})$, where:
+ $\mathbf x_{t} \in \mathbb{R}^{d}$ is input word vector at time $t$.
+ $W^{(sx)} \in \mathbb{R}^{D_s \times d}$ is the weights matrix used to condition the input word vector, $x_t$.
+ $W^{(ss)} \in \mathbb{R}^{D_s \times D_s}$ is the weights matrix used to condition the state of the previous time-step state, $s_{t-1}$.
+ $\mathbf s_{t-1} \in \mathbb{R}^{D_s}$ is the state at the previous time-step, $s-1$.
+ $\mathbf s_0 \in \mathbb{R}^{D_s}$ is an initialization vector for the hidden layer at time-step $t = 0$.
+ $\sigma ()$ is the non-linearity function (normally, ``tanh``).
In this case, the output of the network can be defined to be $\hat{\mathbf y}_t = softmax (W^{(sy)}\mathbf s_t)$ and the loss function cross-entropy, because we are dealing with a multiclass classification problem.
$\hat{\mathbf y}$ is the output probability distribution over the vocabulary at each time-step $t$ (i.e. it is a high dimensional vector!).
Essentially, $\hat{\mathbf y}_t$ is the next predicted word given the document context score so far (i.e. $\mathbf s_{t-1}$) and the last observed word vector $\mathbf x^{(t)}$. Here, $W^{(sy)} \in \mathbb{R}^{|V|\times D_h}$ and $\hat{\mathbf y} \in \mathbb{R}^{|V|}$ where $|V|$ is the cardinality of the vocabulary.
The loss function used in RNNs is often the cross entropy error:
$$
L^{(t)} = - \sum_{j=1}^{|V|} y_{t,j} \times log (\hat{y}_{t,j})
$$
The cross entropy error over a document of size $n$ is:
$$
L = \dfrac{1}{n} \sum_{t=1}^n L^{(t)}(W) = - \dfrac{1}{n} \sum_{t=1}^{n} \sum_{j=1}^{|V|} y_{t,j} \times log (\hat{y}_{t,j})
$$
In the case of classifying a series of symbols/words, the *perplexity* measure can be used to assess the goodness of our model. It is basically 2 to the power of the negative log probability of the cross entropy error function:
$$
Perplexity = 2^{L}
$$
Perplexity is a measure of confusion where lower values imply more confidence in predicting the next word in the sequence (compared to the ground truth outcome).
## RNN Training
> Training a RNN is similar to training a traditional NN, but some modifications. The main reason is that parameters are shared by all time steps: in order to compute the gradient at $t=4$, we need to propagate 3 steps and sum up the gradients. This is called **Backpropagation through time (BPTT)**.
Recurrent neural networks propagate weight matrices from one time-step to the next. Recall the goal of a RNN implementation is to enable propagating context information through faraway time-steps. When these propagation results in a long series of matrix multiplications, weights can **vanish or explode**.
+ Once the gradient value grows extremely large, it causes an overflow (i.e. ``NaN``) which is easily detectable at runtime; this issue is called the *Gradient Explosion Problem*.
+ When the gradient value goes to zero, however, it can go undetected while drastically reducing the learning quality of the model for far-away words in the corpus; this issue is called the *Vanishing Gradient Problem*.
There are several tricks to mitigate these problems:
### Gradient Clipping
To solve the problem of exploding gradients, Thomas Mikolov first introduced a simple heuristic solution that *clips* gradients to a small number whenever they explode. That is, whenever they reach a certain threshold, they are set back to a small number.
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/exploding.png?raw=1" alt="" style="width: 400px;"/>
### Better initialization
To solve the problem of vanishing gradients, instead of initializing $W^{ss}$ randomly, starting off from **random orthogonal matrices** works better, i.e., a square matrix $W$ for which $W^T W=I$.
There are two properties of orthogonal matrices that are useful for training deep neural networks:
+ they are norm-preserving, i.e., $ ||W \mathbf x||^2=||\mathbf x||^2$, and
+ their columns (and rows) are all orthonormal to one another.
At least at the start of training, the first of these should help to keep the norm of the input constant throughout the network, which can help with the problem of exploding/vanishing gradients.
Similarly, an intuitive understanding of the second is that having orthonormal weight vectors encourages the weights to learn different input features.
You can obtain a random $n \times n$ orthogonal matrix $W$, (uniformly distributed) by performing a QR factorization of an $n \times n$ matrix with elements i.i.d. Gaussian random variables of mean $0$ and variance $1$. Here is an example:
```
import numpy as np
from scipy.linalg import qr
n = 3
H = np.random.randn(n, n)
print(H)
print('\n')
Q, R = qr(H)
print (Q.dot(Q.T))
```
### Steeper Gates
We can make the "gates steeper" so they change more repidly from 0 to 1 and the model is learnt quicker.
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/steeper.png?raw=1" alt="" style="width: 600px;"/>
## Gated RNN
Recurrent Neural Networks work just fine when we are dealing with short-term dependencies.
However, vanilla RNNs fail to understand the lomg-term context dependencies (when relevant information may be separated from the point where it is needed by a huge load of irrelevant data).
Gated RNNs (with units that are designed to forget and to update relevant information) are a solution to this problem.
The most important types of gated RNNs are:
+ **Long Short Term Memories** (LSTM). It was introduced by S.Hochreiter and J.Schmidhuber in 1997 and is widely used. LSTM is very good in the long run due to its high complexity.
+ **Gated Recurrent Units** (GRU). It was recently introduced by K.Cho. It is simpler than LSTM, faster and optimizes quicker.
#### LSTM
The key idea of LSTMs is to have two state representations: the hidden state $\mathbf h$ and the cell state $\mathbf C$ (instead of $\mathbf s$).
The cell state $\mathbf C$ is like a conveyor belt. It runs straight down the entire chain, with only some minor linear interactions. It’s very easy for information to just flow along it unchanged.
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/lstm.png?raw=1" alt="Source: http://colah.github.io/posts/2015-08-Understanding-LSTMs/" style="width: 600px;"/>
LSTM has the ability to remove or add information to the cell state, carefully regulated by structures called **gates**.
Gates are a way to optionally let information through. They are composed out of a *sigmoid* neural net layer and a pointwise multiplication operation.
Let us see how a LSTM uses $\mathbf h_{t-1}, \mathbf C_{t-1}$ and $\mathbf x_{t}$ to generate the next states $\mathbf C_t, \mathbf h_{t}$:
$$ \mathbf f_t = \sigma(W^f \cdot [\mathbf h_{t-1}, \mathbf x_t]) \mbox{ (Forget gate)} $$
$$ \mathbf i_t = \sigma(W^i \cdot [\mathbf h_{t-1}, \mathbf x_t]) \mbox{ (Input gate)} $$
$$ \tilde {\mathbf C_t} = \operatorname{tanh}(W^C \cdot [\mathbf h_{t-1}, \mathbf x_t]) $$
$$ \mathbf C_t = \mathbf f_t \cdot \mathbf C_{t-1} + \mathbf i_t \cdot \tilde {\mathbf C_t} \mbox{ (Update gate)} $$
$$ \mathbf o_t = \sigma(W^o \cdot [\mathbf h_{t-1}, \mathbf x_t]) $$
$$ \mathbf h_t = \mathbf o_t \cdot \operatorname{tanh}(\mathbf C_t) \mbox{ (Output gate)} $$
where $[\mathbf h_{t-1}, \mathbf x_t]$ represents the concatenation of two vectors.
There are other variants of LSTM (f.e. LSTM with peephole connections of Gers & Schmidhuber (2000))
#### GRU
The transition from hidden state $\mathbf h_{t-1}$ to $\mathbf h_{t}$ in vanilla RNN is defined by using an affine transformation and a point-wise nonlinearity.
What motivates the use of gated units? Although RNNs can theoretically capture long-term dependencies, they are very hard to actually train to do this. Gated recurrent units are designed in a manner to have more persistent memory thereby making it easier for RNNs to capture long-term dependencies.
<img src="https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/gru.png?raw=1" alt="Source: http://colah.github.io/posts/2015-08-Understanding-LSTMs/" style="width: 300px;"/>
Let us see how a GRU uses $\mathbf h_{t-1}$ and $\mathbf x_{t}$ to generate the next hidden state $\mathbf h_{t}$.
$$ \mathbf z_{t} = \sigma(W_z \cdot [\mathbf x_{t}, \mathbf h_{t-1}]) \mbox{ (Update gate)}$$
$$ \mathbf r_{t} = \sigma(W_r \cdot [\mathbf x_{t}, \mathbf h_{t-1}]) \mbox{ (Reset gate)}$$
$$ \tilde{\mathbf h}_{t} = \operatorname{tanh}(\mathbf r_{t} \cdot [\mathbf x_{t}, \mathbf r_t \cdot \mathbf h_{t-1}] ) \mbox{ (New memory)}$$
$$ \mathbf h_{t} = (1 - \mathbf z_{t}) \cdot \mathbf h_{t-1} + \mathbf z_{t} \cdot \tilde{\mathbf h}_{t} \mbox{(Hidden state)}$$
It combines the forget and input gates into a single “update gate.” It also merges the cell state and hidden state, and makes some other changes. The resulting model is simpler than standard LSTM models.
### RNN in Keras
Whenever you train or test your LSTM/GRU, you first have to build your input matrix $\mathbf X$ of shape ``[nb_samples,timesteps,input_dim]`` where your batch size divides ``nb_samples``.
For instance, if ``nb_samples=1024`` and ``batch_size=64``, it means that your model will receive blocks of 64 samples, compute each output (whatever the number of timesteps is for every sample), average the gradients and propagate it to update the parameters vector.
> By default, **Keras shuffles (permutes) the samples in $\mathbf X$** and consequently the dependencies between $\mathbf X_i$ and $\mathbf X_{i+1}$ are not considered. If
$\mathbf X_i$ and $\mathbf X_{i+1}$ represent independent sequences (f.e. different instances of a times series), this is not a problem. but if it represents parts of a bigger sequence (f.e. words in a text) it is!
> By using the **stateful model** all the states are propagated to the next batch. We must also prevent shuffling in the ``fit`` method.
```
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM, GRU
%matplotlib inline
tsteps = 1
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=10, x0=0, xn=50000, step=1, k=0.0001):
"""
Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
"""
cos = np.zeros((xn - x0) * step)
for i in range(len(cos)):
idx = x0 + i * step
cos[i] = amp * np.cos(idx / (2 * np.pi * period))
cos[i] = cos[i] * np.exp(-k * idx)
cos = cos / np.max(cos)
return cos
cos = gen_cosine_amp()
plt.figure(figsize=(15,7))
plt.plot(cos)
# print('Generating Input Data')
lenght_series = 400
cos = gen_cosine_amp()
cos_series = np.zeros((50000-lenght_series, lenght_series, 1))
for i in range(50000-lenght_series):
cos_series[i,:,:] = cos[i:i+lenght_series, np.newaxis]
print('Input shape:', cos_series.shape)
expected_output = np.zeros((len(cos)-lenght_series, 1))
for i in range(len(cos_series)-1):
expected_output[i, 0] = cos[i+lenght_series]
print('Output shape', expected_output.shape)
print("Sample: ", expected_output[0])
plt.figure(figsize=(20,5))
plt.plot(expected_output,'-r')
plt.title('Expected')
plt.show()
print('Creating Model')
model = Sequential()
model.add(LSTM(50,
batch_input_shape=(None, lenght_series, 1),
return_sequences=True, # This param indicates whether to return
# the last output in the output
# sequence, or the full sequence.
stateful=False))
model.add(LSTM(50,
batch_input_shape=(None, lenght_series, 1),
return_sequences=False,
stateful=False))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.summary()
print('Training')
epochs = 1
batch_size = 100
model.fit(cos_series,
expected_output,
batch_size=batch_size,
verbose=1,
epochs=epochs,
shuffle=True)
print('Predicting...')
predicted_output = model.predict(cos_series, batch_size=batch_size)
print('Ploting Results')
plt.figure(figsize=(20,5))
plt.plot(predicted_output,'-g')
plt.plot(expected_output - predicted_output,'-r')
plt.title('Predicted Output and Error')
plt.show()
future = np.zeros((1,400,1))
future[0,:,:] = cos[10000:10400, np.newaxis]
print(future)
predicted_future = model.predict(future)
print(predicted_future)
future = np.zeros((1,lenght_series,1))
future[0,:,:] = cos[:lenght_series, np.newaxis]
predicted_future = np.zeros((1000,))
for i in range(1000):
predicted_future[i] = model.predict(future)
future[0,0:lenght_series-1,0] = future[0,1:lenght_series,0]
future[0,lenght_series-1,0] = predicted_future[i]
print(predicted_future[0])
plt.plot(predicted_future,'-g')
plt.plot(cos[:1000],'-r')
```
## Exercise
Read and execute carefully this notebook: [Chollet-advanced-usage-of-recurrent-neural-networks.ipynb](Chollet-advanced-usage-of-recurrent-neural-networks.ipynb)
| github_jupyter |
# Assignment 2 | Programming Logic
Reminder: in all of the assignments this semester, the answer is not the only consideration, but also how you get to it. It's OK (suggested even!) to use the internet for help. But you _should_ be able to answer all of these questions using only the programming techniques you have learned in class and from the readings.
A few keys for success:
- Avoid manual data entry
- Emphasize logic and clarity
- Use comments, docstrings, and descriptive variable names
- In general, less code is better. But if more lines of code makes your program easier to read or understand what its doing, then go for it.
## Problem 1
Write a Python program to count the number of even and odd numbers from a list of numbers. Test your code by running it on a list of integers from 1 to 9. No need to make this a function unless you want to.
```
def count_even_and_odd(list):
number_even, number_odd= 0,0
for x in list:
if x%2 == 0:
number_even += 1
else:
number_odd +=1
print ("evens", number_even, "odds", number_odd)
my_list = [1,2,3,4,5,6,7,8,9]
count_even_and_odd(my_list)
```
## Problem 2
Write a Python function that takes a list of numbers and returns a list containing only the even numbers from the original list. Test your function by running it on a list of integers from 1 to 9.
```
def only_returns_evens(list):
list_of_evens = []
for x in list:
if x%2 == 0:
list_of_evens.append(x)
else:
pass
print(list_of_evens)
only_returns_evens(my_list)
```
## Problem 3
1. Create a function that accepts a list of integers as an argument and returns a list of floats which equals each number as a fraction of the sum of all the items in the original list.
2. Next, create a second function which is the same as the first, but limit each number in the output list to two decimals.
3. Create another function which builds on the previous one by allowing a "user" pass in an argument that defines the number of decimal places to use in the output list.
4. Test each of these functions with a list of integers
```
#Number 1
def floats_as_fraction_of_list_sum(list):
print([x/sum(my_list) for x in my_list])
floats_as_fraction_of_list_sum(my_list)
#Number 2
def floats_as_fraction_of_list_sum_2_points(list):
new_list = ([x/sum(my_list) for x in my_list])
new_list = [round(x, 2) for x in new_list]
print(new_list)
floats_as_fraction_of_list_sum_2_points(my_list)
#Number 3
def floats_as_fraction_of_list_sum_b_points(list,b):
new_list = ([x/sum(my_list) for x in my_list])
new_list = [round(x, b) for x in new_list]
print(new_list)
floats_as_fraction_of_list_sum_b_points(my_list,2)
```
## Problem 4
A prime number is any whole number greater than 1 that has no positive divisors besides 1 and itself. In other words, a prime number must be:
1. an integer
2. greater than 1
3. divisible only by 1 and itself.
Write a function is_prime(n) that accepts an argument `n` and returns `True` (boolean) if `n` is a prime number and `False` if n is not prime. For example, `is_prime(11)` should return `True` and `is_prime(12)` should return `False`.
```
def is_prime(n):
if n > 1:
for x in range(2,n): ##ensures its an int, and dividing numbers above 1
if (x % n) == 0:
return False
return True
else:
return False
is_prime(5)
```
## Problem 5
1. Create a class called `Housing`, and add the following attributes to it:
- type
- area
- number of bedrooms
- value (price)
- year built.
2. Create two instances of your class and populate their attributes (make 'em up)
3. Create a method called `rent()` that calculates the estimated monthly rent for each house (assume that monthly rent is 0.4% of the value of the house)
4. Print the rent for both instances.
```
class Housing:
pass
house_1 = Housing()
house_2 = Housing()
class Housing:
def __init__(self, type, area, number_of_bedrooms, value, year_built):
self.type = type
self.area = area
self.number_of_bedrooms = number_of_bedrooms
self.value = value
self.value = year_built
def rent(self):
return self.value * .04
house_1 = Housing("big", "3000 sq ft", 5, 150000, 1954)
house_2 = Housing("small", "1000 sq ft", 1, 500000, 2000)
"""house_1.type = "big"
house_1.area = "3000 sq ft"
house_1.number_of_bedrooms = 5
house_1.value = 1500000
house_1.year_built = 1954
house_2.type = "small"
house_2.area = "1000 sq ft"
house_2.number_of_bedrooms = 1
house_2.value = 500000
house_2.year_built = 2000
"""
"""class Housing:
def __init__(self, type, area, number_of_bedrooms, value, year_built):
self.type = type
self.area = area
self.number_of_bedrooms = number_of_bedrooms
self.value = value
self.value = year_built
def rent(self):
return self.value * .04"""
house_1.rent()
```
| github_jupyter |
# Direction of the Gradient
When you play around with the thresholding for the gradient magnitude in the previous exercise, you find what you might expect, namely, that it picks up the lane lines well, but with a lot of other stuff detected too. Gradient magnitude is at the heart of Canny edge detection, and is why Canny works well for picking up all edges.
In the case of lane lines, we're interested only in edges of a particular orientation. So now we will explore the direction, or orientation, of the gradient.
The direction of the gradient is simply the inverse tangent (arctangent) of the yy gradient divided by the xx gradient:
**arctan(sobely/sobelx)**
Each pixel of the resulting image contains a value for the angle of the gradient away from horizontal in units of radians, covering a range of **-π/2 to π/2** . An orientation of 0 implies a vertical line and orientations of **+/−π/2** imply horizontal lines. (Note that in the quiz below, we actually utilize np.arctan2, which can return values between **+/−π**; however, as we'll take the absolute value of sobelx, this restricts the values to **+/−π/2***, as shown here.)
In this next exercise, you'll write a function to compute the direction of the gradient and apply a threshold. The direction of the gradient is much noisier than the gradient magnitude, but you should find that you can pick out particular features by orientation.
Steps to take in this exercise:
1. Fill out the function in the editor below to return a thresholded absolute value of the gradient direction. Use Boolean operators, again with exclusive (**<, >**) or inclusive (**<=, >=**) thresholds.
2. Test that your function returns output similar to the example below for **sobel_kernel=15, thresh=(0.7, 1.3)**.
```
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
# Read in an image
image = mpimg.imread('img/signs_vehicles_xygrad.png')
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the gradient in x and y separately
# 3) Take the absolute value of the x and y gradients
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
# 5) Create a binary mask where direction thresholds are met
# 6) Return this mask as your binary_output image
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
#binary_output = np.copy(img) # Remove this line
return binary_output
# Run the function
dir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(dir_binary, cmap='gray')
ax2.set_title('Thresholded Grad. Dir.', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
```
| github_jupyter |
<center><h1>Improved Graph Laplacian via Geometric Self-Consistency</h1></center>
<center>Yu-Chia Chen, Dominique Perrault-Joncas, Marina Meilă, James McQueen. University of Washington</center> <br>
<center>Original paper: <a href=https://nips.cc/Conferences/2017/Schedule?showEvent=9223>Improved Graph Laplacian via Geometric Self-Consistency] on NIPS 2017 </a></center>
## The Task
1. Problem: Estimate the ``radius`` of heat kernel in manifold embedding
1. Formally: Optimize Laplacian w.r.t. parameters (e.g. ``radius``)
1. Previous work:
1. asymptotic rates depending on the (unknown) manifold [4]
1. Embedding dependent neighborhood reconstruction [6]
1. Challenge: it’s an unsupervised problem! What “target” to choose?
## The ``radius`` affects…
1. Quality of manifold embedding via neighborhood selection
1. Laplacian-based embedding and clustering via the kernel for computing similarities
1. Estimation of other geometric quantities that depend on the Laplacian (e.g Riemannian metric) or not (e.g intrinsic dimension).
1. Regression on manifolds via Gaussian Processes or Laplacian regularization.
All the reference is the same as the poster.
## Radius Estimation on hourglass dataset
In this tutorial, we are going to estimate the radius of a noisy hourglass data. The method we used is based on our NIPS 2017 paper "[Improved Graph Laplacian via Geometric Self-Consistency](https://nips.cc/Conferences/2017/Schedule?showEvent=9223)" (Perrault-Joncas et. al). Main idea is to find an estimated radius $\hat{r}_d$ given dimension $d$ that minimize the distorsion. The distorsion is evaluated by the riemannian metrics of local tangent space.
Below are some configurations that enables plotly to render Latex properly.
```
!yes | conda install --channel=conda-forge pip nose coverage gcc cython numpy scipy scikit-learn pyflann pyamg h5py plotly
!rm -rf megaman
!git clone https://github.com/mmp2/megaman.git
!cd megaman
import plotly
plotly.offline.init_notebook_mode(connected=True)
from IPython.core.display import display, HTML
display(HTML(
'<script>'
'var waitForPlotly = setInterval( function() {'
'if( typeof(window.Plotly) !== "undefined" ){'
'MathJax.Hub.Config({ SVG: { font: "STIX-Web" }, displayAlign: "center" });'
'MathJax.Hub.Queue(["setRenderer", MathJax.Hub, "SVG"]);'
'clearInterval(waitForPlotly);'
'}}, 250 );'
'</script>'
))
```
## Generate data
This dataset used in this tutorial has a shape of hourglass, with ``size = 10000`` and dimension be 13. The first three dimensions of the data is generated by adding gaussian noises onto the noise-free hourglass data, with ``sigma_primary = 0.1``, the variance of the noises added on hourglass data. We made ``addition_dims = 10``, which is the addition noises dimension to make the whole dataset has dimension 13, with ``sigmal_additional = 0.1``, which is the variance of additional dimension.
```
from plotly.offline import iplot
#import megaman
from megaman.datasets import *
data = generate_noisy_hourglass(size=10000, sigma_primary=0.1,
addition_dims=10, sigma_additional=0.1)
```
We can visualize dataset with the following plots:
```
from megaman.plotter.scatter_3d import scatter_plot3d_plotly
import plotly.graph_objs as go
t_data = scatter_plot3d_plotly(data,marker=dict(color='rgb(0, 102, 0)',opacity=0.5))
l_data = go.Layout(title='Noisy hourglass scatter plot for first 3 axis.')
f_data = go.Figure(data=t_data,layout=l_data)
iplot(f_data)
```
## Radius estimation
To estimate the ``radius``, we need to find the pairwise distance first.
To do so, we compute the adjacency matrix using the Geometry modules in megaman.
```
rmax=5
rmin=0.1
from megaman.geometry import Geometry
geom = Geometry(adjacency_method='brute',adjacency_kwds=dict(radius=rmax))
geom.set_data_matrix(data)
dist = geom.compute_adjacency_matrix()
```
For each data points, the distortion will be estimated. If the size $N$ used in estimating the distortion is large, it will be computationally expensive. We want to choose a sample with size $N'$ such that the average distion is well estimated. In our cases, we choose $N'=1000$. The error will be around $\frac{1}{\sqrt{1000}} \approx 0.03$.
In this example, we searched radius from the minimum pairwise distance ``rmin`` to the maximum distance between points ``rmax``. By doing so, the distance matrix will be dense. If the matrix is too large to fit in the memory, smaller maximum radius ``rmax`` can be chosen to make the distance matrix sparse.
Based on the discussion above, we run radius estimation with
1. sample size=1000 (created by choosing one data point out of every 10 of the original data.)
1. radius search from ``rmin=0.1`` to ``rmax=50``, with 50 points in logspace.
1. dimension ``d=1``
Specify run_parallel=True for searching the radius in parallel.
```
%%capture
# Using magic command %%capture for supressing the std out.
from megaman.utils.estimate_radius import run_estimate_radius
import numpy as np
# subsample by 10.
sample = np.arange(0,data.shape[0],10)
distorion_vs_rad_dim1 = run_estimate_radius(
data, dist, sample=sample, d=1, rmin=rmin, rmax=rmax,
ntry=50, run_parallel=True, search_space='logspace')
```
Run radius estimation same configurations as above except
1. dimension ``d=2``
```
%%capture
distorion_vs_rad_dim2 = run_estimate_radius(
data, dist, sample=sample, d=2, rmin=0.1, rmax=5,
ntry=50, run_parallel=True, search_space='logspace')
```
### Radius estimation result
The estimated radius is the minimizer of the distorsion, denoted as $\hat{r}_{d=1}$ and $\hat{r}_{d=2}$. (In the code, it's ``est_rad_dim1`` and ``est_rad_dim2``)
```
distorsion_dim1 = distorion_vs_rad_dim1[:,1].astype('float64')
distorsion_dim2 = distorion_vs_rad_dim2[:,1].astype('float64')
rad_search_space = distorion_vs_rad_dim1[:,0].astype('float64')
argmin_d1 = np.argmin(distorsion_dim1)
argmin_d2 = np.argmin(distorsion_dim2)
est_rad_dim1 = rad_search_space[argmin_d1]
est_rad_dim2 = rad_search_space[argmin_d2]
print ('Estimated radius with d=1 is: {:.4f}'.format(est_rad_dim1))
print ('Estimated radius with d=2 is: {:.4f}'.format(est_rad_dim2))
```
### Plot distorsions with different radii
```
t_distorsion = [go.Scatter(x=rad_search_space, y=distorsion_dim1, name='Dimension = 1'),
go.Scatter(x=rad_search_space, y=distorsion_dim2, name='Dimension = 2')]
l_distorsion = go.Layout(
title='Distorsions versus radii',
xaxis=dict(
title='$\\text{Radius } r$',
type='log',
autorange=True
),
yaxis=dict(
title='Distorsion',
type='log',
autorange=True
),
annotations=[
dict(
x=np.log10(est_rad_dim1),
y=np.log10(distorsion_dim1[argmin_d1]),
xref='x',
yref='y',
text='$\\hat{r}_{d=1}$',
font = dict(size = 30),
showarrow=True,
arrowhead=7,
ax=0,
ay=-30
),
dict(
x=np.log10(est_rad_dim2),
y=np.log10(distorsion_dim2[argmin_d2]),
xref='x',
yref='y',
text='$\\hat{r}_{d=2}$',
font = dict(size = 30),
showarrow=True,
arrowhead=7,
ax=0,
ay=-30
)
]
)
f_distorsion = go.Figure(data=t_distorsion,layout=l_distorsion)
iplot(f_distorsion)
```
## Application to dimension estimation
We followed the method proposed by [Chen et. al (2011)]((http://lcsl.mit.edu/papers/che_lit_mag_ros_2011.pdf) [5] to verify the estimated radius reflect the truth intrinsic dimension of the data. The basic idea is to find the largest gap of singular value of local PCA, which correspond to the dimension of the local structure.
We first plot the average singular values versus radii.
```
%%capture
from rad_est_utils import find_argmax_dimension, estimate_dimension
rad_search_space, singular_values = estimate_dimension(data, dist)
```
The singular gap is the different between two singular values. Since the intrinsic dimension is 2, we are interested in the region where the largest singular gap is the second. The region is:
```
singular_gap = -1*np.diff(singular_values,axis=1)
second_gap_is_max_range = (np.argmax(singular_gap,axis=1) == 1).nonzero()[0]
start_idx, end_idx = second_gap_is_max_range[0], second_gap_is_max_range[-1]+1
print ('The index which maximize the second singular gap is: {}'.format(second_gap_is_max_range))
print ('The start and end index of largest continuous range is {} and {}, respectively'.format(start_idx, end_idx))
```
### Averaged singular values with different radii
Plot the averaged singular values with different radii. The gray shaded area is the continous range in which the largest singular gap is the second, (local structure has dimension equals 2). And the purple shaded area denotes the second singular gap.
By hovering the line on this plot, you can see the value of the singular gap.
```
from rad_est_utils import plot_singular_values_versus_radius, generate_layouts
t_avg_singular = plot_singular_values_versus_radius(singular_values, rad_search_space, start_idx, end_idx)
l_avg_singular = generate_layouts(start_idx, end_idx, est_rad_dim1, est_rad_dim2, rad_search_space)
f_avg_singular = go.Figure(data=t_avg_singular,layout=l_avg_singular)
iplot(f_avg_singular)
```
### Histogram of estimated dimensions with estimated radius.
We first find out the estimated dimensions of each points in the data using the estimated radius $\hat{r}_{d=1}$ and $\hat{r}_{d=2}$.
```
dimension_freq_d1 = find_argmax_dimension(data,dist, est_rad_dim1)
dimension_freq_d2 = find_argmax_dimension(data,dist, est_rad_dim2)
```
The histogram of estimated dimensions with different optimal radius is shown as below:
```
t_hist_dim = [go.Histogram(x=dimension_freq_d1,name='d=1'),
go.Histogram(x=dimension_freq_d2,name='d=2')]
l_hist_dim = go.Layout(
title='Dimension histogram',
xaxis=dict(
title='Estimated dimension'
),
yaxis=dict(
title='Counts'
),
bargap=0.2,
bargroupgap=0.1
)
f_hist_dim = go.Figure(data=t_hist_dim,layout=l_hist_dim)
iplot(f_hist_dim)
```
## Conclusion
1. Choosing the correct radius/bound/scale is important in any non-linear dimension reduction task
1. The __Geometry Consistency (GC) Algorithm__ required minimal knowledge: maximum radius, minimum radius, (optionally: dimension $d$ of the manifold.)
1. The chosen radius can be used in
1. any embedding algorithm
1. semi-supervised learning with Laplacian Regularizer (see our NIPS 2017 paper)
1. estimating dimension $d$ (as shown here)
1. The megaman python package is __scalable__, and __efficient__
<img src=https://raw.githubusercontent.com/mmp2/megaman/master/doc/images/spectra_Halpha.png width=600 />
## __Try it:__
<div style="float:left;">All the functions are implemented by the manifold learning package <a href=https://github.com/mmp2/megaman>megaman.</a> </div><a style="float:left;" href="https://anaconda.org/conda-forge/megaman"><img src="https://anaconda.org/conda-forge/megaman/badges/downloads.svg" /></a>
## Reference
[1] R. R. Coifman, S. Lafon. Diffusion maps. Applied and Computational Harmonic Analysis, 2006. <br>
[2] D. Perrault-Joncas, M. Meila, Metric learning and manifolds: Preserving the intrinsic geometry , arXiv1305.7255 <br>
[3] X. Zhou, M. Belkin. Semi-supervised learning by higher order regularization. AISTAT, 2011 <br>
[4] A. Singer. From graph to manifold laplacian: the convergence rate. Applied and Computational Harmonic Analysis, 2006. <br>
[5] G. Chen, A. Little, M. Maggioni, L. Rosasco. Some recent advances in multiscale geometric analysis of point clouds. Wavelets and multiscale analysis. Springer, 2011. <br>
[6] L. Chen, A. Buja. Local Multidimensional Scaling for nonlinear dimension reduction, graph drawing and proximity analysis, JASA,2009. <br>
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.